query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Add an isosurface to this item.
def addIsosurface(self, level, color): isosurface = self._Isosurface(parent=self) isosurface.setColor(color) if callable(level): isosurface.setAutoLevelFunction(level) else: isosurface.setLevel(level) isosurface.sigItemChanged.connect(self._isosurfaceItemChanged) self._isosurfaces.append(isosurface) self._updateIsosurfaces() self.sigIsosurfaceAdded.emit(isosurface) return isosurface
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_surface(self,s):\n self.surfaces.append(s)\n s.system=self.surfaces", "def _addClicked(self):\n volume = self.volume()\n if volume is not None:\n dataRange = volume.getDataRange()\n if dataRange is None:\n dataRange = 0., 1.\n\n volume.addIsosurface(\n numpy.mean((dataRange[0], dataRange[-1])),\n '#0000FF')", "def isosurface(grd_name, isosurface_name, level, color):\r\n # pymol_out = PyMOLCommands.load(fname, grd_name)\r\n pymol_out = f'\\ncmd.isosurface(name=\"{isosurface_name}\", map=\"{grd_name}\", level=\"{level}\")\\n'\r\n pymol_out += f'\\ncmd.color(\"{color}\", \"{isosurface_name}\")'\r\n return pymol_out", "def isosurface(self):\n return self._isosurface()", "def removeIsosurface(self, isosurface):\n if isosurface not in self.getIsosurfaces():\n _logger.warning(\n \"Try to remove isosurface that is not in the list: %s\",\n str(isosurface))\n else:\n isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged)\n self._isosurfaces.remove(isosurface)\n self._updateIsosurfaces()\n self.sigIsosurfaceRemoved.emit(isosurface)", "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def add_surf(self, surf: Surface, pos: Tuple[int, int]):\n self.manual_surfaces.append((pos, surf))", "def get_fsurface(self, path):\n raise NotImplementedError", "def add_stock(self, symbol, quantity, unit_price):\n # TODO write SQL statement to grab unit_price\n stock_price_total = quantity * unit_price # TODO write SQL statement\n # TODO deduct stock quantity from market ??\n self.portfolios.append((symbol, quantity, unit_price))\n self.value += stock_price_total", "def add(self, data, **kwargs):\n if not isinstance(data, (FDNetwork)):\n return super(Viewer, self).add(data, **kwargs)\n\n if kwargs.get(\"as_wireframe\"):\n del kwargs[\"as_wireframe\"]\n return super(Viewer, self).add(data, **kwargs)\n\n artist = Artist(data, viewer=self, context=\"Viewer\", **kwargs)\n self.artists.append(artist)\n artist.draw()\n artist.add()", "def addWireframe(self, name, wireframe):\n\n self.wireframes[name] = wireframe", "def addLayer(self, layer):\n self.layers.append(layer)", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def addStockType(self, item):\n # TODO\n # hint: Add an item to this.stocklist\n # No. 6\n self.stocklist.append(item)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def addStock(self,Stock):\n self.DoS[Stock.get_Symbol()]=Stock", "def add_layer(self, layer):\n self.__layers.append(layer)", "def add_station(self, station):\n self.__stations.append(station)", "def add_feature(self, feature):\n self.features += [feature]\n for stock in self.stocks:\n feature(self.stock_data[stock])", "def add(self, output_svg: Drawing) -> None:\n pass", "def add_area_element(self, obj, typ_sofi, layer):\n\n qd = AreaElement(obj)\n\n pts = rs.SurfacePoints(obj)\n\n qd.n1 = self.nodes.add(Node(None, pts[0]))\n qd.n2 = self.nodes.add(Node(None, pts[1]))\n qd.n3 = self.nodes.add(Node(None, pts[3]))\n qd.n4 = self.nodes.add(Node(None, pts[2]))\n\n qd.layer = layer\n\n self.area_elements.add(qd)", "def add_layer(self, *args):\n\n nm = None\n\n #check to see if we're sending an already formed layer to add - used for data file\n if len(args) == 1 & isinstance(args[0], QgsVectorLayer):\n print('Importing {} as a vector'.format(args[0]))\n self.project.addMapLayer(args[0])\n nm = args[0].name()\n\n elif len(args) > 1:\n print('Importing {} as a vector'.format(args[0]))\n print(args)\n self.project.addMapLayer(QgsVectorLayer(*args))\n nm = args[1]\n\n if nm:\n self.get_layer(nm)\n\n else:\n print()\n print('***Bad map layer for {}***'.format(str(args)))\n print()", "def add_layer(self, layer):\n\n self._layers.append(layer)", "def add_layer(self, layer):\n idx = len(self.dict_topo)\n idx += 1\n self.dict_topo[idx] = layer", "def add_vertex(self, item: Any, kind: str) -> None:\n if item not in self._vertices:\n self._vertices[item] = _Vertex(item, kind)", "def add_to_inventory(self, item_to_add_to_inventory):\n raise NotImplementedError(\"Subclasses define what adding to the inventory entails\")", "def AddLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_AddLayer(self, *args)", "def _removeClicked(self):\n isosurface = self.isosurface()\n if isosurface is not None:\n volume = isosurface.parent()\n if volume is not None:\n volume.removeIsosurface(isosurface)", "def add_stock(self, symbol):\n verbose_message(\"Adding \" + symbol + \"...\")\n if symbol not in self.stocks:\n self.stocks += [symbol]\n\n data = StockData()\n\n data.name = StockDataCollection.get_stock_name(symbol)\n data.symbol = symbol\n data.market = StockDataCollection.get_market_data(symbol,\n str(self.start_date)[:USEFUL_TIMESTAMP_CHARS],\n str(self.end_date)[:USEFUL_TIMESTAMP_CHARS])\n\n # create a list of dates in the YYYY-MM-DD format\n data.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(data.market.index)]\n data.dates = data.market.index\n\n for i in data.dates:\n if i not in self.dates:\n self.dates += [i]\n self.dates.sort()\n self.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(self.dates)]\n\n for collection_function in self.features:\n collection_function(data)\n\n data.position = []\n for _ in data.dates:\n data.position += [0]\n if type(self.cash) is not pd.DataFrame:\n self.cash += [self.starting_capital]\n\n data.position = pd.DataFrame({\"Position\": data.position}).set_index(data.dates)\n if type(self.cash) is not pd.DataFrame:\n self.cash = pd.DataFrame({\"cash\": self.cash}).set_index(data.dates)\n debug_message(data)\n self.shuffled_data_reset()\n self.stock_data[symbol] = data", "def add_polyline(self, layer_to_use,poly,open):\n if type(poly) is not list:\n toplot = [poly]\n else:\n toplot = poly\n\n for y in toplot:\n\n polyline = self.msp.add_polyline2d(\n points=[],\n dxfattribs={'layer': layer_to_use['name']})\n\n if open==True:\n polyline.close(False)\n else:\n polyline.close(True)\n y = np.round(100*y)/100\n if layer_to_use['inversion']==0:\n polyline.append_vertices(y)\n else:\n polyline.append_vertices(-y)", "def plot_fft_isosurface(title: str, omega: np.ndarray, \n ut: np.ndarray, filename: str) -> None:\n\n print(f'Plotting fft isosurface: {title}...')\n\n (omega_x_grid, omega_y_grid, omega_z_grid) = np.meshgrid(omega, omega, \n omega, indexing='ij')\n\n fig = go.Figure()\n fig.add_trace(\n go.Isosurface(\n x=omega_x_grid.flatten(), \n y=omega_y_grid.flatten(), \n z=omega_z_grid.flatten(), \n value=normalize(ut).flatten(),\n opacity=0.5,\n isomin=0.6,\n isomax=0.9,\n surface_count=3,\n colorscale=\"Viridis\",\n )\n )\n fig.update_layout(\n title_text=title,\n scene_xaxis_title_text='omega_x',\n scene_yaxis_title_text='omega_y',\n scene_zaxis_title_text='omega_z',\n )\n pio.write_html(fig, filename)", "def add_interface(self, interface):\n logger.info('adding interface: %s' % interface.name)\n data = self._add_common(interface)\n logger.debug('interface data: %s' % data)\n self.interface_data[interface.name] = data\n if interface.routes:\n self._add_routes(interface.name, interface.routes)\n\n if interface.renamed:\n logger.info(\"Interface %s being renamed to %s\"\n % (interface.hwname, interface.name))\n self.renamed_interfaces[interface.hwname] = interface.name", "def append(self, layer):\n self.layers.append(layer)", "def plot_fft_isosurfaces(description: str, omega: np.ndarray, \n ut: np.ndarray, filename: str) -> None:\n\n print(f'Plotting fft isosurfaces: {description}...')\n\n (omega_x_grid, omega_y_grid, omega_z_grid) = np.meshgrid(omega, omega, \n omega, indexing='ij')\n n = len(omega)\n\n num_slices = ut.shape[0]\n # We only want to plot the first, middle, and last time slices.\n slices = [0, num_slices//2, num_slices-1]\n\n titles = [f'{description}: slice {slice}' for slice in slices]\n\n num_rows = 1\n num_cols = len(slices)\n fig = make_subplots(\n rows=num_rows, \n cols=num_cols,\n specs=[\n [{'is_3d': True}]*num_cols,\n ]*num_rows,\n subplot_titles=titles,\n )\n for s in range(len(slices)):\n ut_slice = np.reshape(ut[slices[s],:], (n, n, n))\n fig.add_trace(\n go.Isosurface(\n x=omega_x_grid.flatten(), \n y=omega_y_grid.flatten(), \n z=omega_z_grid.flatten(), \n value=normalize(ut_slice).flatten(),\n opacity=0.5,\n isomin=0.6,\n isomax=0.9,\n surface_count=3,\n colorscale=\"Viridis\",\n ),\n row=1,\n col=s+1\n )\n fig.update_layout(\n scene_xaxis_title_text=\"omega_x\",\n scene_yaxis_title_text=\"omega_y\",\n scene_zaxis_title_text=\"omega_z\",\n scene2_xaxis_title_text=\"omega_x\",\n scene2_yaxis_title_text=\"omega_y\",\n scene2_zaxis_title_text=\"omega_z\",\n scene3_xaxis_title_text=\"omega_x\",\n scene3_yaxis_title_text=\"omega_y\",\n scene3_zaxis_title_text=\"omega_z\",\n )\n pio.write_html(fig, filename)", "def service_execute(self, url, s_id):\n url_param = 'type=xyz&url={}'.format(url)\n\n rlayer = QgsRasterLayer(url_param, 'OpenEO-{}'.format(s_id), 'wms')\n\n if rlayer.isValid():\n QgsProject.instance().addMapLayer(rlayer)\n else:\n warning(self.iface, 'invalid layer')", "def curveOnSurface(*args, append: bool=True, degree: float=3, knot: Union[float,\n List[float]]=0.0, name: AnyStr=\"\", periodic: bool=True, positionUV:\n Union[List[float, float], List[List[float, float]]]=None, replace: bool=True,\n **kwargs)->AnyStr:\n pass", "def add_symbol(self, x, is_global=False):\n if is_global:\n self.global_symbols[x.name] = x\n self.global_symbols[x.name].addr = self.global_addr\n self.global_symbols[x.name].isglobal = True\n if x.type != 'procedure':\n self.global_addr += x.size\n else:\n addr = self.local_symbols_size() + self.local_param_size()\n self.symbols[-1][x.name] = x\n self.symbols[-1][x.name].addr = addr", "def add_draw(self, draw):\n self.draws.append(draw)", "def addShipOrder(self, amount, shipDesignID, systemID):\n try:\n dOrder = {'type':'Add Ship', 'value':'%s-%s' % (str(amount), shipDesignID),\n 'system':systemID, 'round':self.game.myGalaxy['currentRound']}\n serverResult = self.game.server.addIndustryOrder(self.game.authKey, dOrder)\n if serverResult <> 1:\n self.modeMsgBox(serverResult)\n else:\n self.refreshIndustryOrder(systemID)\n except:\n self.modeMsgBox('addShipOrder->Connection to Server Lost, Login Again')", "def add(self, layer):\n if len(self.layers) == 0:\n if not layer.n_inputs:\n raise Exception('Need to have n_inputs for layer.')\n else:\n layer.n_inputs = self.layers[-1].units\n self.layers.append(layer)", "def test_avargrp_areaonsurface_withsurface(self):\n import omtk\n from omtk.modules import rigHead\n from omtk.modules import rigFaceAvarGrps\n\n # Create a base rig\n rig = omtk.create()\n rig.add_module(rigHead.Head([pymel.PyNode('jnt_head')]))\n rig.add_module(\n rigFaceAvarGrps.AvarGrpOnSurface(\n pymel.ls('jnt_lip*', type='joint') + [pymel.PyNode('surface_lips'), pymel.PyNode('pSphereShape1')]\n )\n )\n\n # Validate the state of the scene before testing.\n self.assertEqual(self._get_scene_surface_count(), 1)\n\n rig.build(strict=True)\n\n # Ensure there's one one nurbsSurface in the scene.\n self.assertEqual(self._get_scene_surface_count(), 1)\n\n rig.unbuild(strict=True)\n\n # Ensure there's still one nurbsSurface in the scene.\n self.assertEqual(self._get_scene_surface_count(), 1)\n\n # Remove all surfaces\n pymel.delete(pymel.ls(type='nurbsSurface'))\n self.assertEqual(self._get_scene_surface_count(), 0)\n\n # Re-created the rig and ensure the new surface was correctly created.\n rig.build(strict=True)\n\n # Ensure there's still one nurbsSurface in the scene.\n self.assertEqual(self._get_scene_surface_count(), 1)", "def add(self, layer):\n self._top = layer(self._top)\n layer_name_ = layer.__class__.__name__\n layer_params_ = layer.params\n self._info.append((layer_name_, layer_params_))", "def create_item_surface(self):\n # Create the surface the subsurfaces will be blitted to.\n self._item_surface = pygame.Surface((240, 160))\n self._item_surface.fill((255, 255, 254))\n self._item_surface.set_colorkey((255, 255, 254))\n\n # Text maker used to make subsurfaces.\n text_maker = TextMaker(join(\"fonts\", \"party_txt_font.png\"))\n\n # Height variable used to dynamically change where subsurfaces are\n # blitted to.\n height = 15\n for item in self._inventory_list[self.start_item_index:\n self.start_item_index + 6]:\n\n # If the item is not cancel then also display its price.\n if item != \"CANCEL\":\n price_surf = \\\n text_maker.get_surface(f\"~{self._inventory[item]}\")\n self._item_surface.blit(price_surf,\n end_at(price_surf, (223, height)))\n # Display item name.\n self._item_surface.blit(text_maker.get_surface(item.name),\n (97, height))\n else:\n # Display item name.\n self._item_surface.blit(text_maker.get_surface(item),\n (97, height))\n\n height += 16", "def addPolygon(self, verts, color=[220,0,0], thickness=1.0, alpha=255,\n linestyle='=', fill=None, selectable=True, movable=False,\n selectThickness=2.0, selectColor=None, closed=True,\n name='QIVPolygon', noadd=False, isCosmetic=False):\n\n # create the polygon object\n polygon = QIVPolygon(verts, color=color, thickness=thickness,\n alpha=alpha, linestyle=linestyle, fill=fill, selectable=selectable,\n movable=movable, closed=closed, view=self, name=name,isCosmetic=isCosmetic)\n\n if (not noadd):\n # add the polygon to the scene\n self.scene.addItem(polygon)\n\n # and add it to our list of items\n self.sceneItems.append(polygon)\n\n return polygon", "def add_fx(self, name, even_if_exists=True):\n index = RPR.TakeFX_AddByName(\n self.id, name, 1 - 2*even_if_exists\n )\n if index == -1:\n raise ValueError(\"Can't find FX named {}\".format(name))\n fx = reapy.FX(self, index)\n return fx", "def addIon(self, name, Z, data, attr=list()):\n self.ions.append(IonSpecies(name=name, Z=Z, data=data, grid=self.grid, output=self.output, attr=attr))", "def add_planet(self,x,y,z):\n\t\tself.ob.append(vis.sphere(x=x, y=y, z=z, radius = 0.1))", "def add_asset(self, asset, replace=False):\n assert replace or asset.short_name() not in self._assets, (\n f'Attempting to add duplicate Asset: {asset.short_name()}')\n self._assets[asset.short_name()] = asset\n return self", "def addAsset(self, name, asset):\n self.__assets[name] = asset\n return True", "def Surface(self, *args):\n return _Adaptor3d.Adaptor3d_HSurface_Surface(self, *args)", "def add(self, service: AbstractService):\n self.services.append(service)", "def add_poly_to_scene(self, polygon, point_marker_dict=None, curve_marker_dict=None, hole_mode=False):\n if hole_mode:\n poly = self.addPolygon(polygon, QPen(QColor(0, 0, 0, 0)), QBrush(QColor(255, 255, 255)))\n poly.setZValue(1)\n self.poly_list.append(poly)\n self.hole_list.append(poly)\n else:\n poly = self.addPolygon(polygon, QPen(QColor(0, 0, 0, 0)), QBrush(QColor(0, 0, 0, 50)))\n self.poly_list.append(poly)\n self.add_poly_corners(poly, point_marker_dict)\n self.add_poly_edges(poly, curve_marker_dict)\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable)\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsMovable)\n return poly", "def add_surface(\n self,\n data,\n *,\n colormap='gray',\n contrast_limits=None,\n gamma=1,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending='translucent',\n visible=True,\n ) -> layers.Surface:\n layer = layers.Surface(\n data,\n colormap=colormap,\n contrast_limits=contrast_limits,\n gamma=gamma,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer", "def add_layer(self, layer):\n\t\tif isinstance(layer, Layer):\n\t\t\tif layer != self:\n\t\t\t\tself.sublayers.append(layer)\n\t\t\t\tlayer.superlayer = self\n\t\telse:\n\t\t\traise TypeError('Invalid layer object')", "def update_surfs(self, surf_path, surf_type, offset=None):\n try:\n self.surf[surf_type]\n except KeyError:\n pass\n # Here should be a dialog for confirm, whether adding data or not\n else:\n self._add_surface(surf_path, surf_type, offset)", "def addWireframe(self, wireframe):\n self.wireframe = wireframe\n self.tf_wireframe = wireframe.copy()", "def add(self, quadkey, asset):\n self.tiles[quadkey] = self.tiles.get(quadkey, set())\n self.tiles[quadkey].add(asset)", "def add_solid_input(self, solid_name, input_name, value, is_kwargs=False, chk_dict=None):\n # add_solid_input('read_csv', 'csv_path', 'cereal.csv')\n # results in\n # environment_dict = {\n # 'solids': {\n # 'read_csv': {\n # 'inputs': {\n # 'csv_path': {'value': 'cereal.csv'}\n # }\n # }\n # }\n # }\n name_check = self._solid_name_check(solid_name, chk_dict=chk_dict)\n if name_check == EnvironmentDict._VALID or name_check == EnvironmentDict._EXISTS:\n if chk_dict is None:\n chk_dict = self._e_dict\n if name_check == EnvironmentDict._VALID:\n chk_dict['solids'][solid_name] = {'inputs': {}}\n EnvironmentDict.__add_solid_input(chk_dict['solids'][solid_name]['inputs'],\n input_name, value, is_kwargs=is_kwargs)\n return self", "def plot_surface(self, varname):\n\n if self.is_vr:\n self._plot_vr_surface(varname)\n else:\n self._plot_sr_surface(varname)", "def set_surface(self, surface:str, image:pygame.image) -> None:\n self.surface = surface\n self.__set_image__(image)", "def _createShip(self):\n self._ship=Ship()", "def add(self, obj):\n if isinstance(obj, Drawable):\n self._drawables.add(obj)\n if isinstance(obj, Updateable):\n self._updateables.add(obj)\n if isinstance(obj, Collidable) and not isinstance(obj, Projectile):\n self._collidables.add(obj)\n if isinstance(obj, Collidable) and isinstance(obj, Projectile):\n self._projectiles.add(obj)\n if isinstance(obj, Textbox):\n self._textboxes.add(obj)\n # Always make sure the newest textbox is on top.\n obj.z = zlayer.TEXT + max(t.z for t in self._textboxes) + 1\n self.__len__.cache_clear()", "def addShip(self,start,stop,ship):\n\n if (start[0] == stop[0]):\n for y in range (start[1], stop[1]+1):\n self.ships[(start[0],y)] = ship\n else:\n for x in range (start[0],stop[0]+1):\n self.ships[(x,start[1])] = ship\n self.fleetSize +=1\n return self.fleetSize", "def add_input(self, sinput):\r\n self.sinputs.append(sinput)\r\n self.variables.append(sinput.variable)", "def add(self, layer):\n layer.set_dtype(self.dtype)\n self.layers = np.append(self.layers, layer)", "def add(self, *drawables):\n self.drawables.extend(drawables)", "def add_solid(self, solid_name):\n return self.__add_solid(solid_name)", "def add_synth(self, wave, synth, iteration=0):\n self.fig.add_scatter(x=wave, y=synth, name=f\"Iteration {iteration}\")", "def add_layer(self, layer: layers.Layer) -> layers.Layer:\n layer.events.select.connect(self._update_active_layer)\n layer.events.deselect.connect(self._update_active_layer)\n layer.events.status.connect(self._update_status)\n layer.events.help.connect(self._update_help)\n layer.events.interactive.connect(self._update_interactive)\n layer.events.cursor.connect(self._update_cursor)\n layer.events.cursor_size.connect(self._update_cursor_size)\n layer.events.data.connect(self._on_layers_change)\n layer.dims.events.ndisplay.connect(self._on_layers_change)\n layer.dims.events.order.connect(self._on_layers_change)\n layer.dims.events.range.connect(self._on_layers_change)\n self.layers.append(layer)\n self._update_layers(layers=[layer])\n\n if len(self.layers) == 1:\n self.reset_view()\n return layer", "def add_item(self, order_item):\n self.order_items.append(order_item)", "def add_feature(self, feat: Feature) -> None:\n self.data_features.append(feat)", "def add_filter(self, fs, sos=None, ba=None, zpk=None):\n\n if fs in self.filters.keys():\n raise KeyError(f\"There is already a filter for {fs=}\")\n\n nones = len(list(filter(lambda x: x is None, [sos, ba, zpk])))\n if nones != 2:\n raise RuntimeError(\"Must provide exactly one of sos, ba, zpk\")\n\n if ba is not None:\n sos = scipy.signal.tf2sos(*ba)\n elif zpk is not None:\n sos = scipy.signal.zpk2sos(*zpk)\n\n self.filters[fs] = sos", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)", "def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None", "def add_item(self, sid, title, src):\n item = (sid, title, src)\n self.items.append(item)", "def add_layer(self, layer_name, layer_def):\n\n layer_idx, datatype = layer_def.split(\"/\")\n layer_idx = int(layer_idx)\n datatype = int(datatype)\n self.layers[layer_name] = LayerInfo(layer_idx, datatype, layer_name)", "def add_played_disk(self, x, y, player):\n self.played_disks.append((x, y, player))", "def add(self, game_obj):\r\n self.game_objects_for_adding.append(game_obj)", "def add_layer(self, layer, membership):\n # TODO: Check if membership is allowed to add layer\n return InitiativeLayer.objects.create(initiative=self, layer=layer, membership=membership)", "def addInterface(self, iTag, iType, clsName, addr):\r\n try:\r\n validateName(iTag)\r\n except IllegalName:\r\n raise InvalidRequest('Interface tag is not a valid.')\r\n\r\n if iTag in self._interfaces:\r\n raise InvalidRequest(\"Can not use the same interface tag '{0}' \"\r\n 'in the same container twice.'.format(iTag))\r\n\r\n try:\r\n iType = Types.encode(iType)\r\n except TypeError:\r\n raise InvalidRequest('Interface type is invalid (Unknown prefix).')\r\n\r\n interface = self._obj.createInterface(iType, clsName, addr)\r\n interface = Interface(interface, iType, clsName)\r\n self._interfaces[iTag] = interface\r\n interface.notifyOnDeath(self._interfaceDied)", "def addInterface(self, iTag, iType, clsName):\r\n try:\r\n validateName(iTag)\r\n except IllegalName as e:\r\n raise InvalidRequest('Interface tag is invalid: {0}'.format(e))\r\n\r\n if iTag in self._interfaces:\r\n raise InvalidRequest(\"Can not use the same interface tag '{0}' \"\r\n 'in the same robot twice.'.format(iTag))\r\n\r\n try:\r\n iType = Types.encode(iType)\r\n except TypeError:\r\n raise InvalidRequest('Interface type is invalid.')\r\n\r\n interface = self._obj.createInterface(iType, clsName, iTag)\r\n interface = Interface(interface, iType, clsName)\r\n self._interfaces[iTag] = interface\r\n interface.notifyOnDeath(self._interfaceDied)", "def addIrisToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfor sha in shas:\n\t\t\tif not sha.a.iris_Occ.exists:\n\t\t\t\toccText = sha.a.iris_Occ.add( at='bool' )\n\t\t\tsha.a.iris_Occ.v = True", "def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)", "def add_satellite(ax, coo_x, coo_y, coo_z):\n from mpl_toolkits.mplot3d import Axes3D\n from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\n tr = np.transpose(np.vstack((coo_x.cartesian.xyz.value, coo_y.cartesian.xyz.value, coo_z.cartesian.xyz.value)))\n\n alpha_czti = 0.5\n alpha_radiator = 0.5\n alpha_sat = 0.3\n\n color_czti = 'yellow'\n color_radiator = 'black'\n color_sat = 'green'\n\n c_w2 = 0.15 # czti half-width\n c_h = 0.30 # czti height\n c_hr = 0.40 # czti radiator height\n sat_w = 0.6\n\n # For each surface, do the following:\n # verts = []\n # verts.append([tuple(tr.dot(np.array[cx, cy, cz]))])\n # surf = Poly3DCollection(verts)\n # surf.set_alpha()\n # surf.set_color()\n # ax.add_collection3d(surf)\n \n # +x rect\n verts = []\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_czti)\n surf.set_color(color_czti)\n ax.add_collection3d(surf)\n \n # +y rect\n verts = []\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([c_w2, c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_czti)\n surf.set_color(color_czti)\n ax.add_collection3d(surf)\n\n # -y rect\n verts = []\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, c_h]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_czti)\n surf.set_color(color_czti)\n ax.add_collection3d(surf)\n \n # -x radiator plate\n verts = []\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, c_hr]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, c_hr]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_radiator)\n surf.set_color(color_radiator)\n ax.add_collection3d(surf)\n\n # # Bottom CZTI only\n # verts = []\n # verts.append(tuple(tr.dot(np.array([c_w2, c_w2, 0]))))\n # verts.append(tuple(tr.dot(np.array([-c_w2, c_w2, 0]))))\n # verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n # verts.append(tuple(tr.dot(np.array([c_w2, -c_w2, 0]))))\n # surf = Poly3DCollection([verts])\n # surf.set_alpha(alpha_czti)\n # surf.set_color(color_czti)\n # ax.add_collection3d(surf)\n\n # Satellite top\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n # Satellite bottom\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, -sat_w]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n\n ax.add_collection3d(surf)\n\n # Satellite back (radiator side)\n verts = []\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n # Satellite front (opposite radiator side)\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n #dpix_mask Satellite right (-y, common to czti)\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, -c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n # Satellite left (+y)\n verts = []\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, 0]))))\n verts.append(tuple(tr.dot(np.array([sat_w-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, -sat_w]))))\n verts.append(tuple(tr.dot(np.array([-c_w2, sat_w-c_w2, 0]))))\n surf = Poly3DCollection([verts])\n surf.set_alpha(alpha_sat)\n surf.set_color(color_sat)\n ax.add_collection3d(surf)\n\n return", "def add_newInventory(id, title, artist, table):\r\n dicRow = {'ID': id, 'Title': title, 'Artist': artist}\r\n table.append(dicRow)", "def addContour(self, coordinates):\r\n\r\n # instantiate a graphics item\r\n contour = gc.GraphicsCollection()\r\n # make it polygon type and populate its points\r\n points = [QtCore.QPointF(x, y) for x, y in zip(*coordinates)]\r\n contour.Polygon(QtGui.QPolygonF(points), self.name)\r\n # set its properties\r\n contour.pen.setColor(self.pencolor)\r\n contour.pen.setWidth(self.penwidth)\r\n contour.pen.setCosmetic(True) # no pen thickness change when zoomed\r\n contour.brush.setColor(self.brushcolor)\r\n\r\n # add contour as a GraphicsItem to the scene\r\n # these are the objects which are drawn in the GraphicsView\r\n self.contour_item = PGraphicsItem.GraphicsItem(contour, self.scene)\r\n\r\n # add the contour as item to the scene\r\n self.scene.addItem(self.contour_item)", "def drawShip(self,view):\r\n if not self.getShip() is None:\r\n self.getShip().draw(view)", "def add_sphere(self):\n self.scenes[self.current_scene].add_object(Sphere())\n self.redraw()", "def addSymbol(self, id, symbol):\n env = self\n if id in env.variables:\n return None\n env.variables[id] = symbol\n return symbol", "def AddSquad(self):\n if self.squad.squad_type == \"Troop\":\n self.parent._army.AddTroop(self.squad)\n\n if self.squad.squad_type == \"HQ\":\n self.parent._army.AddHq(self.squad)\n\n if self.squad.squad_type == \"Elite\":\n self.parent._army.AddElite(self.squad)\n\n if self.squad.squad_type == \"Heavy Support\":\n self.parent._army.AddHeavy(self.squad)\n\n if self.squad.squad_type == \"Fast Attack\":\n self.parent._army.AddFast(self.squad)\n\n self.parent.updateArmy()\n \n self.__mainWindow.destroy()", "def add_figure(self, ref, Fig):\n\n self.pytex.add_created(Fig.file_name)\n\n self._figure_registry[ref] = {'number': self.fig_count, 'Figure': Fig}\n self.fig_count += 1", "def addChannel(self, *args):\n return _osgAnimation.Animation_addChannel(self, *args)", "def add(self, item):\n self.contents.append(item)", "def __init__(self, obj, solids):\n # Add an unique property to identify the Ship instances\n tooltip = str(QtGui.QApplication.translate(\n \"Ship\",\n \"True if it is a valid ship instance, False otherwise\",\n None,\n QtGui.QApplication.UnicodeUTF8))\n obj.addProperty(\"App::PropertyBool\",\n \"IsShip\",\n \"Ship\",\n tooltip).IsShip = True\n # Add the main dimensions\n tooltip = str(QtGui.QApplication.translate(\n \"Ship\",\n \"Ship length [m]\",\n None,\n QtGui.QApplication.UnicodeUTF8))\n obj.addProperty(\"App::PropertyLength\",\n \"Length\",\n \"Ship\",\n tooltip).Length = 0.0\n tooltip = str(QtGui.QApplication.translate(\n \"Ship\",\n \"Ship breadth [m]\",\n None,\n QtGui.QApplication.UnicodeUTF8))\n obj.addProperty(\"App::PropertyLength\",\n \"Breadth\",\n \"Ship\",\n tooltip).Breadth = 0.0\n tooltip = str(QtGui.QApplication.translate(\n \"Ship\",\n \"Ship draft [m]\",\n None,\n QtGui.QApplication.UnicodeUTF8))\n obj.addProperty(\"App::PropertyLength\",\n \"Draft\",\n \"Ship\",\n tooltip).Draft = 0.0\n # Add the subshapes\n obj.Shape = Part.makeCompound(solids)\n tooltip = str(QtGui.QApplication.translate(\n \"Ship\",\n \"Set of external faces of the ship hull\",\n None,\n QtGui.QApplication.UnicodeUTF8))\n obj.addProperty(\"Part::PropertyPartShape\",\n \"ExternalFaces\",\n \"Ship\",\n tooltip)\n tooltip = str(QtGui.QApplication.translate(\n \"Ship\",\n \"Set of weight instances\",\n None,\n QtGui.QApplication.UnicodeUTF8))\n obj.addProperty(\"App::PropertyStringList\",\n \"Weights\",\n \"Ship\",\n tooltip).Weights = []\n tooltip = str(QtGui.QApplication.translate(\n \"Ship\",\n \"Set of tank instances\",\n None,\n QtGui.QApplication.UnicodeUTF8))\n obj.addProperty(\"App::PropertyStringList\",\n \"Tanks\",\n \"Ship\",\n tooltip).Tanks = []\n\n obj.Proxy = self", "def draw_item(self, icon_character, surface, x, y):\n img_path = os.path.join('images/decor', icon_character)\n character_image = pygame.image.load(img_path).convert_alpha()\n surface.blit(character_image, (x, y))", "def add(self, factory):\n self._factories.append(factory)", "def add_filter(self, dimension_filter):\n\n self['dimensionFilterClauses'][0]['filters'] += [dimension_filter]\n\n return self", "def add_object(self, name, obj):\n if not isinstance(obj, SceneObject):\n raise ValueError('obj must be an object of type SceneObject')\n self._objects[name] = obj\n self.close_renderer()", "def add_layer(self, data, name=None, vis_url=None, **kwargs):\n # Make sure we pass in kernel_id to the layer, then to the vis_server\n # Otherwise we cant generate the coorect vis_url.\n\n layer_type = kwargs.get('layer_type', None)\n\n kwargs['kernel_id'] = self.kernel_id\n\n if layer_type != 'annotation':\n kwargs['zIndex'] = len(self.layers)\n\n # HACK: figure out a way to do this without so many conditionals\n if isinstance(data, RasterData):\n # TODO verify layer exists in geoserver?\n name = data.name if name is None else name\n\n layer = SimpleLayer(\n name, self._remote, data=data, vis_url=vis_url, **kwargs\n )\n elif isinstance(data, RasterDataCollection):\n assert name is not None, \\\n RuntimeError(\"RasterDataCollection layers require a 'name'\")\n\n layer = TimeSeriesLayer(\n name, self._remote, data=data, vis_url=vis_url, **kwargs\n )\n elif isinstance(data, VectorData):\n layer = VectorLayer(\n name, self._remote, self.layers, data=data, **kwargs\n )\n else:\n assert name is not None, \\\n RuntimeError(\"Non data layers require a 'name'\")\n if layer_type == 'annotation':\n layer = AnnotationLayer(\n name, self._remote, self.layers, **kwargs\n )\n else:\n layer = NoDataLayer(\n name, self._remote, vis_url=vis_url, **kwargs\n )\n\n def _add_layer(layer_name):\n self.layers.append(layer)\n\n return self._remote.add_layer(layer.name, layer.vis_url,\n layer.vis_options.serialize(),\n layer.query_params) \\\n .then(_add_layer, self.rpc_error) \\\n .catch(self.callback_error)" ]
[ "0.6740248", "0.62558913", "0.60210866", "0.5904212", "0.5893358", "0.577374", "0.53729206", "0.52408725", "0.519066", "0.5183285", "0.5161759", "0.5116448", "0.5109245", "0.5073585", "0.5032729", "0.5032729", "0.5009403", "0.4923389", "0.49232364", "0.4879343", "0.48575088", "0.48441488", "0.48291886", "0.47914204", "0.47569522", "0.47477052", "0.467216", "0.46614528", "0.46555722", "0.4654333", "0.46250927", "0.46244487", "0.4610453", "0.459935", "0.45937717", "0.45871162", "0.4583692", "0.45714197", "0.4555511", "0.45504606", "0.45320237", "0.45299134", "0.4519168", "0.45065182", "0.44990823", "0.44985774", "0.44959906", "0.44782957", "0.44499126", "0.443693", "0.44332227", "0.4432674", "0.43822443", "0.43804038", "0.43759385", "0.4364247", "0.4364164", "0.43624824", "0.43623692", "0.4361506", "0.4356891", "0.43542632", "0.4350047", "0.43431622", "0.4343065", "0.434277", "0.43381068", "0.43347853", "0.43265784", "0.4317662", "0.43170065", "0.4305509", "0.42883843", "0.42807013", "0.42765042", "0.42737377", "0.42733055", "0.42673102", "0.4267166", "0.4261723", "0.4260261", "0.42556438", "0.42539847", "0.4247839", "0.4247571", "0.42471674", "0.42448565", "0.424164", "0.42388132", "0.42310435", "0.4227317", "0.42259407", "0.4225748", "0.42215586", "0.42205444", "0.42159495", "0.42140052", "0.4212639", "0.42062664", "0.4204835" ]
0.72267526
0
Remove an isosurface from this item.
def removeIsosurface(self, isosurface): if isosurface not in self.getIsosurfaces(): _logger.warning( "Try to remove isosurface that is not in the list: %s", str(isosurface)) else: isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged) self._isosurfaces.remove(isosurface) self._updateIsosurfaces() self.sigIsosurfaceRemoved.emit(isosurface)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _removeClicked(self):\n isosurface = self.isosurface()\n if isosurface is not None:\n volume = isosurface.parent()\n if volume is not None:\n volume.removeIsosurface(isosurface)", "def isosurface(self):\n return self._isosurface()", "def remove(self):\r\n game_ref.remove(self)", "def remove_layer(self, layer_pos):\n self.stack.pop(layer_pos)\n return", "def remove_stock(self, stock):\n if stock in self.stocks:\n self.stocks.remove(stock)\n if stock in self.stock_data.keys():\n del self.stock_data[stock]", "def remove_stock(self, symbol, quantity):\n for p_symbol, p_quantity, p_unit_price in self.portfolios:\n if p_symbol == symbol:\n logging.debug(\"Found %s, %s, %s\" %\n (p_symbol, p_quantity, p_unit_price))\n # First delete completely\n self.portfolios.remove((p_symbol,\n p_quantity,\n p_unit_price))\n # Check if some quantity of stocks should remain\n if quantity < p_quantity:\n # Keep remainder\n self.portfolios.append((p_symbol,\n p_quantity-quantity,\n p_unit_price))\n # Reduce value of portfolio by value of stocks removed\n total_price = quantity * p_unit_price\n self.value -= total_price", "def remove_curve(self, name):\n self._curve_reg.__delitem__(name)", "def removeItem(self, item):\n # remove this item from our list\n if item in self.sceneItems:\n self.sceneItems.remove(item)\n\n # remove it from the scene\n self.scene.removeItem(item)\n\n # update the viewport\n self.viewport().update()", "def removeProjection(self, iremove):\n # check that dims of Projections and Markers are the same\n nprojProjs = len(self._ProjectionList._list)\n nprojMarker = len(self.Markers[0].xProj)\n if (nprojProjs != nprojMarker):\n \"Houston: we have a problem!\"\n \"Numbers of projections in Markers and Projections do not match\"\n kk = -1\n for proj in self._ProjectionList._list:\n kk = kk + 1\n ii = proj._index\n if (ii == iremove):\n break\n self._ProjectionList._list.pop(kk)\n self._projIndices.remove(iremove)\n for Marker in self.Markers:\n Marker.xProj.pop(kk)\n Marker.yProj.pop(kk)\n Marker._projIndices.remove(iremove)\n if self.verbose:\n print((\"Projection \" + str(iremove) + \" removed from TiltSeries\"))", "def RemoveLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_RemoveLayer(self, *args)", "def delete(self):\n if self.shape is not None:\n self.shape.delete()\n if self in shared.obstacles:\n shared.obstacles.remove(self)", "def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.dataset.pop(name)", "def _clearLayer(self, layer=0):\n for i in self._existingLayerItems(layer):\n self._plt.removeItem(i)", "def remove_object(self, name):\n if name in self._objects:\n del self._objects[name]\n else:\n raise ValueError('Object {} not in scene!'.format(name))\n self.close_renderer()", "def removeShip(self, shipID):\n myShip = self.ships[shipID]\n # remove captain first\n myCaptain = myShip.myCaptain\n self.removeCaptain(myCaptain.id)\n # remove ship\n del self.ships[shipID]", "def remove_from_hand(self):\n pass", "def remove(self) -> None:\n self.map.remove_brush(self)", "def removeScene(self):\n del self.scene, self.imgPixmapItem", "def remove_stock(self, item_id : int):\n removal_flag = False\n for item in self._item:\n if item.id == item_id:\n self._item.remove(item)\n removal_flag = True\n break\n\n if removal_flag == True:\n break\n\n if removal_flag == False:\n raise ItemNotFound(item_id)", "def deleteLayer(self, id):\n\n # just in case we got None\n if id is None:\n return\n\n # see if what we are about to remove might be visible\n layer = self.layer_mapping[id]\n visible = layer.visible\n\n del layer\n self.layer_z_order.remove(id)\n\n # if layer was visible, refresh display\n if visible:\n self.Refresh()", "def unload(self):\n if self.material_background:\n self.parent.removeItem(self.material_background)\n self.material_background = None\n if self.mod_background:\n self.parent.removeItem(self.mod_background)\n self.mod_background = None\n if self.material_foreground:\n self.parent.removeItem(self.material_foreground)\n self.material_foreground = None\n if self.mod_foreground:\n self.parent.removeItem(self.mod_foreground)\n self.mod_foreground = None\n if self.liquid:\n self.parent.removeItem(self.liquid)\n self.liquid = None", "def removeIrisToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfor sha in shas:\n\t\t\tif sha.a.iris_Occ.exists:\n\t\t\t\tsha.a.iris_Occ.delete()", "def removeItem(self, item):\n if item.type not in self.__inventory__:\n return\n for i in range(0, len(self.__inventory__[item.type])):\n if self.__inventory__[item.type][i].id == item.id:\n self.__inventory__[item.type].pop(i)\n return", "def remove_layer(self, layer_key_name):\n del(self.config.layers[layer_key_name])", "def remove_station(self, station):\n self.__stations.remove(station)", "def track_del(self,posicion):\n self.tracks.pop(posicion)", "def remove_layer(self, layer_pos):\n\n # If not within feasible bounds, return\n if layer_pos <= 1 or layer_pos > self.number_hidden_layers:\n return\n\n # We set the number of input and output dimensions for the layer to be\n # added and for the ones in the architecture that will be connected to it\n\n # We delete the layer in pos layer_pos\n self.dims = np.delete(self.dims, layer_pos)\n self.init_functions = np.delete(self.init_functions, layer_pos)\n self.act_functions = np.delete(self.act_functions, layer_pos)\n self.batch_norm = np.delete(self.batch_norm, layer_pos)\n self.dropout = np.delete(self.dropout, layer_pos)\n self.dropout_probs = np.delete(self.dropout_probs, layer_pos)\n\n # Finally the number of hidden layers is updated\n self.number_hidden_layers = self.number_hidden_layers - 1", "def remove(self, i):\n assert self.apply_remove_point_rules((self._ys[i], self._xs[i])), 'Removal rules are not satisfied'\n\n if len(self.get_raw_xs()) > 5:\n if self.is_settable:\n self._remove_xs(i)\n self._remove_ys(i)\n self.is_changed = True\n else:\n raise ValueError('graph '+str(self.name)+' is not is_settable')\n elif not self.is_raw_data:\n raise ValueError('Must be at least 5 points for interpolation.')", "def removeFluxSurfaces(self):\n if self._fluxOverlayHandles is not None:\n for h in self._fluxOverlayHandles:\n h.remove()\n\n self._fluxOverlayHandles = []\n self.overlayFluxSurfaces = False", "def remove_item(self, idx_of_item):\n del self.items[idx_of_item]", "def remove(self, game_obj):\r\n self.game_objects_for_removal.append(game_obj)", "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def __delitem__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint2___delitem__(self, *args)", "def removeInterface(self, iTag):\r\n try:\r\n self._interfaces.pop(iTag).destroy()\r\n except KeyError:\r\n raise InvalidRequest('Can not remove a non existent interface '\r\n \"'{0}' from the robot.\".format(iTag))", "def remove(cls, ob):\n return cls._remove_avos(cls.__name__, ob)", "def remove_visualization(self, visualization_id):\n for i, panel in enumerate(self.panels):\n if panel['id'] == visualization_id:\n del self.panels[i]", "def remove(self):\n\n if self.selected_point is None:\n RosProxy().notify(\"No calibration point selected\", STATE.ERROR)\n return\n\n if len(self.poses) == 0:\n RosProxy().notify(\"No calibration point added\", STATE.ERROR)\n return\n\n self.poses.remove(self.poses[self.selected_point])\n\n if len(self.poses) == 0:\n self.selected_point = None\n else:\n self.selected_point = min(len(self.poses) - 1, self.selected_point)\n\n self.calibration_changed()", "def remove(self, name):\n if hasattr(self, name):\n site = getattr(self, name)\n if isinstance(site, IconSite):\n delattr(self, name)\n self._typeDict[site.type].remove(name)", "def remove(self):\n raise NotImplementedError", "def remove(self):\n raise NotImplementedError", "def remove(self, done=False, verbose=True):\n return _image.image_remove(self, done, verbose)", "def RemoveLayer(self, name, idx):\n name = self.layerName[name]\n self.map.RemoveLayer(name = name)\n del self.layerName[name]\n self.toolbar.choice.Delete(idx)\n if not self.toolbar.choice.IsEmpty():\n self.toolbar.choice.SetSelection(0)\n\n self.frame.GetWindow().UpdateMap(render = True, renderVector = False) \n #self.frame.Render(self.mapWindow)", "def __delitem__(self, feature):\n self[feature] = None", "def remove(self):\n self.model_or_sim.remove_package(self)", "def __delitem__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3___delitem__(self, *args)", "def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.train.pop(name)\n self.test.pop(name)", "def remove(self, item: Item) -> None:\n raise NotImplementedError(\"remove\")", "def remove_from_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] -= 1\n\t\t\tif self.inventory[item] == 0:\n\t\t\t\tdel self.inventory[item]", "def removeInterface(self, iTag):\r\n try:\r\n self._interfaces.pop(iTag).destroy()\r\n except KeyError:\r\n raise InvalidRequest('Can not remove a non existent interface '\r\n \"'{0}' from the container.\".format(iTag))", "def remove(self, feature_type):\n with self._map_lock.write_lock():\n del self._feature2memory[feature_type]", "def __delitem__(self, key):\n self.deleteCurve(key)", "def removeClientInterface(self, interface):\n if interface == self.potentialclient:\n self.potentialclient = None\n if interface in self.clients:\n self.clients.remove(interface)", "def erase(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint2_erase(self, *args)", "def get_fsurface(self, path):\n raise NotImplementedError", "def erase(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3_erase(self, *args)", "def remove(self, *args):\n return _libsbml.ListOfFluxBounds_remove(self, *args)", "def remove(self) -> None:\n self.map.cameras.remove(self)\n if self.is_active():\n self.set_inactive_all()", "def RemoveShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_RemoveShape(self, *args)", "def remove_asset(self, name):\n if name in self.assets:\n del self.assets[name]", "def delX(self):\n del self.components[0]", "def delX(self):\n del self.components[0]", "def __delitem__(self, key):\n\n del self._vertices[key]", "def delete_layer(self, index) :\n \n # Remove the actor, delete the list item, and update the other layers.\n self._renderer.RemoveActor(self._layers[index].actor)\n del self._layers[index]\n self._update_layers_positions()", "def remove(self) -> None:\n self.map.remove_ent(self)", "def remove_wearable(self, pid: str):\n if pid in self.wearables:\n del self.wearables[pid]", "def remove(self):\n self._world.remove_mob(self)", "def remove(self, i=None):\n if i > len(self.x):\n print 'index out of range'\n return\n if i==None:\n self.x.pop()\n self.y.pop()\n else:\n del self.x[i]\n del self.y[i]\n del self.active[i]\n for j in range(self.dims):\n del self.errors[j][i]\n return", "def forceRemove( self ):\n scene = self.scene()\n if ( scene ):\n scene.forceRemove(self)", "def removeDictItem(self, key):\n if key in self._dentsvertsdata:\n self._dentsvertsdata[key].free()\n del self._dentsvertsdata[key]", "def remove(self):\n for artist in self._artists:\n artist.remove()", "def remove(self, *args):\n return _libsbml.ListOfInSpeciesTypeBonds_remove(self, *args)", "def destroy(self):\n self.__overlayList.removeListener('overlays', self.__name)\n base.Action.destroy(self)", "def remove(self, x):\n del self.d[x]", "def remove_card(self):\n return self.hand.pop()", "def remove_animation(self, animation):\n\t\tdel self.animations[animation.attribute]", "def InterfaceRemoved(self, interface_name):\n pass", "def removeSndFile(self, filename):\n try:\n sndfile = self.sndfiles[filename]\n except KeyError:\n return\n for ch in range(sndfile.getChannels()):\n w = self.grid.getWaveform(sndfile, ch)\n self.sb.unregisterWaveform(w)\n i = self.grid.getRowIndex(sndfile, ch)\n if i is not None:\n self.grid.removeRow(i)\n self.player.stop()\n self.player.removeSndFile(sndfile)\n del self.sndfiles[filename]", "def remove(self):\n if self.removed:\n return\n self._remove()\n self.removed = True", "def _onremove(self):\n self._channellist.remove(self)\n self.deleteLater()", "def remove_animal(self, i):\n self.__list_animaux.pop(i)", "def _on_project_remove_gadget(self, project, gadget):\n iter = self._find_iter_by_widget(gadget)\n if iter:\n self._model.remove(iter)", "def delete(self):\n del self.shx.atoms[self.index]", "def remove(self):\n\t\treturn self._flist.remove(self)", "def removeLatticeFrame(self):\n self.latticeFrame.remove()", "def remove_curve(self, pv_name):\n curve = self.chart.findCurve(pv_name)\n if curve:\n self.chart.removeYChannel(curve)\n del self.channel_map[pv_name]\n self.chart.removeLegendItem(pv_name)\n\n widgets = self.findChildren((QCheckBox, QLabel, QPushButton, QGroupBox), pv_name)\n for w in widgets:\n w.deleteLater()\n\n if len(self.chart.getCurves()) < 1:\n self.enable_chart_control_buttons(False)\n self.show_legend_chk.setChecked(False)", "def remove(self):\n self.workspace.client._perform_empty(\n \"DELETE\", \"/workspaces/%s/objects/%s\" % (self.workspace.workspace_key, self.data['id']))", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def remove(self, s):\n if s in self.outputs:\n self.outputs.remove(s)\n self.inputs.remove(s)\n del self.conns[s]\n s.close()", "def SetRemoveFaceMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_RemoveInternalWires_SetRemoveFaceMode(self, *args)", "def remove_from_basket(self, item):\n self._products.pop(item)", "def del_item(self, item):\n index = self.board[item.pos[0]][item.pos[1]].index(item)\n del self.board[item.pos[0]][item.pos[1]][index]", "def _itemRemoved(self, item):\n group = self.item()\n if group is None:\n return\n\n # Find item\n for row in self.children():\n if isinstance(row, Item3DRow) and row.item() is item:\n self.removeRow(row)\n break # Got it\n else:\n raise RuntimeError(\"Model does not correspond to scene content\")", "def removeSpeciesFeature(self, *args):\n return _libsbml.MultiSpeciesPlugin_removeSpeciesFeature(self, *args)", "def remove_obj(self, obj_name):\n self.scene.remove_world_object(obj_name)", "def remove(self, *args):\n return _libsbml.ListOfGraphicalObjects_remove(self, *args)", "def RemoveComponent(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_RemoveComponent(self, *args)", "def remove(self):\n self.inp.inputs.discard(self)\n self.out.outputs.discard(self)", "def __del__(self) -> None:\n self.map.face_id.discard(self.id)", "def isosurface(grd_name, isosurface_name, level, color):\r\n # pymol_out = PyMOLCommands.load(fname, grd_name)\r\n pymol_out = f'\\ncmd.isosurface(name=\"{isosurface_name}\", map=\"{grd_name}\", level=\"{level}\")\\n'\r\n pymol_out += f'\\ncmd.color(\"{color}\", \"{isosurface_name}\")'\r\n return pymol_out", "def delete_polygon(self, poly: QGraphicsPolygonItem, delete_from_coord_list=False):\n\n self.poly_list.remove(poly)\n\n if poly in self.hole_list:\n self.hole_list.remove(poly)\n\n for item in poly.childItems():\n if isinstance(item, PyQt5.QtWidgets.QGraphicsLineItem):\n self.edge_list.remove(item)\n if item in self.potential_edge_splitters:\n self.potential_edge_splitters.remove(item)\n\n if delete_from_coord_list:\n for point in self.poly_to_list(poly, \"Global\"):\n self.point_coord_list = np.delete(self.point_coord_list, np.where(\n np.all(self.point_coord_list == [[point.x(), point.y()]], axis=1))[0][0], axis=0)\n\n poly.hide()" ]
[ "0.7447585", "0.58254415", "0.5618442", "0.557807", "0.5543547", "0.54468757", "0.5415708", "0.54150975", "0.53558755", "0.533654", "0.53338057", "0.5329659", "0.53128797", "0.5312818", "0.5301393", "0.52970207", "0.52678525", "0.5265467", "0.52498937", "0.5239983", "0.5233106", "0.52129185", "0.5190236", "0.51683474", "0.5150279", "0.51365364", "0.51352006", "0.5130295", "0.5126869", "0.5109566", "0.5096612", "0.5077429", "0.50588953", "0.5053321", "0.505167", "0.5051484", "0.5050487", "0.5049329", "0.5042727", "0.5042727", "0.50424916", "0.5036388", "0.5031667", "0.5013883", "0.5000333", "0.49929336", "0.4992867", "0.49924904", "0.4981768", "0.49706012", "0.4969668", "0.49679568", "0.4963437", "0.4960849", "0.49551496", "0.49503034", "0.49499506", "0.4945393", "0.49376997", "0.49330193", "0.49330193", "0.49290183", "0.49283955", "0.49281004", "0.4919027", "0.4888689", "0.48811913", "0.4873417", "0.4873356", "0.48709035", "0.48695004", "0.48679796", "0.48574084", "0.48546726", "0.48542", "0.48537716", "0.48409438", "0.48400745", "0.4838111", "0.48289424", "0.4824143", "0.48202184", "0.48184586", "0.4817741", "0.48167124", "0.48157567", "0.48089042", "0.48068276", "0.48059162", "0.47999772", "0.47995868", "0.47992578", "0.4795482", "0.4791733", "0.4789839", "0.47880223", "0.47879016", "0.47867197", "0.4779346", "0.4774012" ]
0.8135996
0
Handle update of isosurfaces upon level changed
def _isosurfaceItemChanged(self, event): if event == Item3DChangedType.ISO_LEVEL: self._updateIsosurfaces()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)", "def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None", "def update_gl_state(self, *args, **kwargs):\n for v in self._subvisuals:\n v.update_gl_state(*args, **kwargs)", "def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def UpdateLayers(self):\n pass", "def addIsosurface(self, level, color):\n isosurface = self._Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def isoslider(surface_dic, surface_value_dic, min_value=0):\r\n return \\\r\nf\"\"\"\r\n\\n\\nclass IsoLevel(tk.Variable):\r\n def __init__(self, master, name, level):\r\n tk.Variable.__init__(self, master, value=level)\r\n self.name = name\r\n self.trace('w', self.callback)\r\n\r\n def callback(self, *args):\r\n cmd.isolevel(self.name, self.get())\r\n\r\n def increment(self, event=None, delta=0.1):\r\n self.set(round(float(self.get()) + delta, 2))\r\n\r\n def decrement(self, event=None):\r\n self.increment(None, -0.1)\r\n\r\n\r\nsurface_list = {surface_dic}\r\nsurface_max_list = {surface_value_dic}\r\n\r\ntop = tk.Toplevel(plugins.get_tk_root())\r\n\r\nmaster = tk.Frame(top, padx=10, pady=10)\r\nmaster.pack(fill=\"both\", expand=1)\r\n\r\nfor child in list(master.children.values()):\r\n child.destroy()\r\n\r\n\r\nrow_counter = 0\r\nfor identifier, component_dic in surface_list.items():\r\n # add calculation identifier\r\n tk.Label(master, text=identifier).grid(row=row_counter, column=0, sticky=\"w\")\r\n row_counter += 1\r\n \r\n for component_id, surfaces in component_dic.items():\r\n # add collection label, e.g. superstar or hotspot etc.\r\n tk.Label(master, text=component_id).grid(row=row_counter, column=1, sticky='w')\r\n row_counter += 1\r\n \r\n for i, surface in enumerate(surfaces):\r\n # add grid type label\r\n probe = surface.split(\"_\")[-2]\r\n tk.Label(master, text=probe).grid(row=row_counter, column=2, sticky=\"w\")\r\n \r\n # slider code \r\n v = IsoLevel(master, surface, 5)\r\n e = tk.Scale(master, orient=tk.HORIZONTAL, from_={min_value}, to=surface_max_list[identifier][component_id],\r\n resolution=0.1, showvalue=0, variable=v)\r\n e.grid(row=row_counter, column=3, sticky=\"ew\")\r\n\r\n e = tk.Entry(master, textvariable=v, width=4)\r\n e.grid(row=row_counter, column=4, sticky=\"e\")\r\n master.columnconfigure(3, weight=1)\r\n row_counter += 1\r\n\\n\\n\r\n\"\"\"", "def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")", "def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()", "def _parentChanged(self, event):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexIsosurface, self)._parentChanged(event)", "def update_surfs(self, surf_path, surf_type, offset=None):\n try:\n self.surf[surf_type]\n except KeyError:\n pass\n # Here should be a dialog for confirm, whether adding data or not\n else:\n self._add_surface(surf_path, surf_type, offset)", "def isosurface(grd_name, isosurface_name, level, color):\r\n # pymol_out = PyMOLCommands.load(fname, grd_name)\r\n pymol_out = f'\\ncmd.isosurface(name=\"{isosurface_name}\", map=\"{grd_name}\", level=\"{level}\")\\n'\r\n pymol_out += f'\\ncmd.color(\"{color}\", \"{isosurface_name}\")'\r\n return pymol_out", "def update(self, surface, keys, current_time, dt, scale):\n self.anykey.update(current_time)\n self.draw(surface)", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def update(self):\n # Find only unmasked data :\n xyz, sData, sColor, _ = self._select_unmasked()\n # xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n if xyz.size:\n self.mesh.visible = True\n self.mesh.set_data(xyz, edge_color=self.edgecolor, size=sData,\n face_color=sColor, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n # self.mesh.transform = self.transform\n self.mesh.update()\n else:\n self.mesh.visible = False", "def update_fov(self) -> None:\n self.game_map.visible[:] = compute_fov(\n self.game_map.tiles[\"transparent\"],\n (self.player.x, self.player.y),\n radius=8,\n )\n # If a tile is \"visible\" it should be added to \"explored\".\n self.game_map.explored |= self.game_map.visible", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)", "def update(self):\n if self.name == \"Settings\":\n args = [\"NAME:Settings\"]\n else:\n args = [\"NAME:\" + self.name, \"Enable:=\", self.Enable]\n if self.UserSpecifiedSettings:\n args += self.manualsettings\n else:\n args += self.autosettings\n if self.name == \"Settings\":\n self.meshmodule.EditGlobalMeshRegion(args)\n else:\n self.meshmodule.EditMeshRegion(self.name, args)\n return True", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def level_upgrade(self, lvl):\n\t\tpass", "def _UpdateEnergy(self):\n self.mol.GetEnergy('nokinetic')", "def _update_objects(self):\n\t\tself.clouds.update()\n\t\tif self.is_play:\n\t\t\tself.floor.update()\n\t\t\tself.bolan.update()\n\t\t\tself.obstacles.update()\n\t\t\tself.scoreboard.update()", "def draw_level(self):\r\n self.level_surface.blit(self.map_image, self.viewport, self.viewport)\r\n self.level_surface.blit(self.title_box, self.title_rect)", "def update_surface(frame):\n \n #fig.suptitle(time[frame])\n im.set_array(surf[frame])\n im.set_extent([np.nanmin(xx[frame]), np.nanmax(xx[frame]), np.nanmin(yy[frame]), np.nanmax(yy[frame])])\n \n line.set_data([(times[:-1] + utc_to_east).plot_date[frame]]*2, ylim)", "def update(self, *args):\n\n\t\t# Update Bullets\n\t\tif self.power == 'bulletup' and self.level >= 2:\n\t\t\tself.angle_bullets(self.level)\n\t\t\n\t\t# Update Lazer\n\t\tif self.power == 'lazerup' and self.level > 0:\n\n\t\t\tself.index += 1\n\t\t\tif self.index % 12:\n\t\t\t\tself.step += 1\n\t\t\t\n\t\t\tself.y -= self.speed\n\n\n\t\t\tself.rect.y = self.y\n\t\t\tself.rect.x = self.x\n\n\t\t\t# print(\"SLOPE??? \", self.slope)\n\t\t\tself.sheet.blitme(self.screen, self.step % self.sheet.totalCells, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.rect.x, self.rect.y)\n\n\t\t# Update Bombs\n\t\tif self.power == 'bombup' and self.level > 0:\n\t\t\tself.bomb_vector()\n\n\t\t# Update Default\n\t\telse:\n\t\t\tself.y -= self.speed\n\t\t\tself.rect.y = self.y\n\n\t\tpygame.display.flip()", "def on_update(self):\n \n # update physics engine\n \n \n # use code from pick up coins lab to pick up coins\n # you don't need all of the code from that lab(no gameover or reset)", "def update_visualization(self) -> None:\n pass", "def update():", "def update():", "def _zincSceneviewerEvent(self, event):\n if event.getChangeFlags() & Sceneviewerevent.CHANGE_FLAG_REPAINT_REQUIRED:\n QtCore.QTimer.singleShot(0, self.updateGL)", "def __call__(self, level):\r\n import time\r\n\r\n currentVolume = self.__findCurrentVolumeLevel()[0]\r\n\r\n assert isinstance(level, int), \"Given volume level is not integer (instead %s)\" % type(level)\r\n if not (level <= self.currentMaximumVolume and level >= 0):\r\n self.phone.fail(\"adjustVolume: given level is not valid. Valid ones for this specific volume bar are 0 - %s)\" % self.currentMaximumVolume)\r\n\r\n self.phone.comment(\"adjustVolume(%s)\" % level)\r\n\r\n if level < currentVolume:\r\n while level < currentVolume:\r\n self.phone.delay(200, False)\r\n self.phone.select('KBD_KEY_VOL_DOWN')\r\n currentVolume -= 1\r\n\r\n elif level > currentVolume:\r\n while level > currentVolume:\r\n self.phone.delay(200, False)\r\n self.phone.select('KBD_KEY_VOL_UP')\r\n currentVolume += 1\r\n\r\n else: # volume level is now ok, pass\r\n pass\r\n\r\n return True\r\n\r\n ## TODO: current volume level should be asked from yapas\r\n\r\n #doCheck = False\r\n\r\n #if doCheck:\r\n #\r\n # currentVolume = self.getCurrentVolumeLevel()\r\n # assert currentVolume == level, \"Adjusted volume, but the volume level is %s when it should be %s\" % (currentVolume, level)\r\n\r\n # debug.brf(\"Selected volume level %s and verified from UI\" % level)\r\n # return True\r\n #else:\r\n # debug.brf(\"Selected volume level %s\" % level)\r\n # return True\r", "def update(self):\n self.data.update()\n for sensor in self.data.daikinskyport.get_sensors(self._index):\n if sensor[\"type\"] == self._type and self._sensor_name == sensor[\"name\"]:\n self._state = sensor[\"value\"]", "def updateLayerData(self, **kwargs):\n self.currentLayerData = self.layers[self.getCurrentRow()]\n self.currentLayerData.update(**kwargs)\n self.layers[self.getCurrentRow()] = self.currentLayerData\n self.updateSelectedLayer()", "def setLevels(self, levels, update=True):\n if self._xp is None:\n self.levels = levels\n self._defferedLevels = levels\n return\n if levels is not None:\n levels = self._xp.asarray(levels)\n self.levels = levels\n self._effectiveLut = None\n if update:\n self.updateImage()", "def update(self):\n print(\"sensorState Update\")", "def setLevel(self, level):\n self._autoLevelFunction = None\n level = float(level)\n if level != self._level:\n self._level = level\n self._updateScenePrimitive()\n self._updated(Item3DChangedType.ISO_LEVEL)", "def drawIsoSurfaces0( self ):\n #research\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n v= vtk.vtkAppendPolyData()\n \n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"nth\")!=None and modelNode.GetDisplayVisibility()==1 :\n v.AddInput(modelNode.GetPolyData())\n \n modeller = vtk.vtkImplicitModeller()\n modeller.SetInput(v.GetOutput())\n modeller.SetSampleDimensions(self.dim.value,self.dim.value,self.dim.value)\n modeller.SetCapping(0)\n modeller.SetAdjustBounds(self.abonds.value)\n modeller.SetProcessModeToPerVoxel() \n modeller.SetAdjustDistance(self.adist.value/100)\n modeller.SetMaximumDistance(self.maxdist.value/100) \n \n contourFilter = vtk.vtkContourFilter()\n contourFilter.SetNumberOfContours(self.nb.value)\n contourFilter.SetInputConnection(modeller.GetOutputPort()) \n contourFilter.ComputeNormalsOn()\n contourFilter.ComputeScalarsOn()\n contourFilter.UseScalarTreeOn()\n contourFilter.SetValue(self.contour.value,self.contourValue.value)\n contourFilter.SetValue(self.contour2.value,self.contourValue2.value)\n contourFilter.SetValue(self.contour3.value,self.contourValue3.value)\n contourFilter.SetValue(self.contour4.value,self.contourValue4.value)\n contourFilter.SetValue(self.contour5.value,self.contourValue5.value)\n\n isoSurface = contourFilter.GetOutput()\n self.AddContour(isoSurface)", "def notify(self):\n _mesh = self._data.mesh\n log_gui.debug(\"notify mesh is valided %s\", _mesh)\n if(_mesh != None):\n umesh, ugeom = self._use_mesh, self._use_geom\n umesh.setEnabled(True)\n #umesh.setChecked(True)\n ugeom.setEnabled(False)\n if _mesh.has_geom():\n ugeom.setEnabled(True)\n #ugeom.setChecked(True)\n else:\n self.reset()", "def plane_update(self):\n self.plane.update()", "def update_waveforms(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n self.trace_lines[0].set_ydata(self.pv_monitor.arrays[key][0])\n self.trace_lines[1].set_ydata(self.pv_monitor.arrays[key][1])\n self.draw()", "def updateChannels(self):\n self.__redrawChannels()\n self.__update()", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def ToggleLayerManager(self, event):\n pass", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def change_level(self):\n new_level = GameLevel[self.scoreboard.current_level]\n self.greeterboard.reset(level=new_level, msg='')\n self.end_game(i18n.OUT_MSG_NEW_GAME)\n self.init_game_metrics()", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def update(self):\n #update position\n trans = self.buffer.lookup_transform(\"map\", \"base_footprint\", rospy.Time(),rospy.Duration(1))\n self.position = (trans.transform.translation.x,trans.transform.translation.y)\n #update map\n \n #update map\n self.map_callback(self.get_map().map)\n\n #update forntiers\n frontier_map = frontier(self.map,self.map_info,self.position)\n pos = frontier_map.frontier_world\n #set goal\n self.set_goal(pos)\n\n #check if there are any frontiers left\n return frontier_map.counter", "def changes(screen):\r\n global inventory\r\n screen.blit(bg, (0, 0))\r\n Sprites.draw(screen)\r\n Sprites.update()\r\n ammo.draw(screen)\r\n ammo.update()\r\n prt.check_trying_using()\r\n if inventory.count('score {}'.format(id(score_0))) == 0:\r\n score_0.pick(inventory, player, Sprites, score)\r\n if inventory.count('score {}'.format(id(score_1))) == 0:\r\n score_1.pick(inventory, player, Sprites, score)\r\n if inventory.count('boost {}'.format(id(sp))) == 0:\r\n sp.boost(inventory, player, Sprites)\r\n borders(WIDTH, HEIGHT)\r\n see_score(score)\r\n key_0.pick(inventory, player, Sprites)\r\n pygame.display.flip()", "def updateVisualization(self):\n\t\tif self.visualization:\n\t\t\tif self.fixedVisualization:\n\t\t\t\tself.visualization.setFixedVisualization(self.fixedVisualization)\n\t\t\tif self.movingVisualization:\n\t\t\t\tself.visualization.setMovingVisualization(self.movingVisualization)\n\t\tself.multiRenderWidget.setVolumeVisualization(self.visualization)\n\t\tself.visualizationUpdated.emit(self.visualization)", "def drawIsoSurfaces0(self):\r\n # research\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n v = vtk.vtkAppendPolyData()\r\n\r\n for modelNode in modelNodes.values():\r\n if modelNode.GetAttribute(\"nth\") != None and modelNode.GetDisplayVisibility() == 1 :\r\n v.AddInput(modelNode.GetPolyData())\r\n\r\n modeller = vtk.vtkImplicitModeller()\r\n modeller.SetInput(v.GetOutput())\r\n modeller.SetSampleDimensions(self.dim.value, self.dim.value, self.dim.value)\r\n modeller.SetCapping(0)\r\n modeller.SetAdjustBounds(self.abonds.value)\r\n modeller.SetProcessModeToPerVoxel()\r\n modeller.SetAdjustDistance(self.adist.value / 100)\r\n modeller.SetMaximumDistance(self.maxdist.value / 100)\r\n\r\n contourFilter = vtk.vtkContourFilter()\r\n contourFilter.SetNumberOfContours(self.nb.value)\r\n contourFilter.SetInputConnection(modeller.GetOutputPort())\r\n contourFilter.ComputeNormalsOn()\r\n contourFilter.ComputeScalarsOn()\r\n contourFilter.UseScalarTreeOn()\r\n contourFilter.SetValue(self.contour.value, self.contourValue.value)\r\n contourFilter.SetValue(self.contour2.value, self.contourValue2.value)\r\n contourFilter.SetValue(self.contour3.value, self.contourValue3.value)\r\n contourFilter.SetValue(self.contour4.value, self.contourValue4.value)\r\n contourFilter.SetValue(self.contour5.value, self.contourValue5.value)\r\n\r\n isoSurface = contourFilter.GetOutput()\r\n self.AddContour(isoSurface)", "def update(self) -> None:\n state = int(self._light.is_on())\n self._state = bool(state)\n self._brightness = to_hass_level(state)", "def update_world(self):\n pass", "def draw_level(self, surface):\n surface.blit(self.background, (0, 0))\n surface.blit(self.player.image, self.player.rect)\n surface.blit(self.message_box.image, self.message_box.rect)\n surface.blit(self.arrow.image, self.arrow.rect)\n surface.blit(self.transition_surface, (0, 0))", "def updateShaderState(self):\n\n dopts = self.opts\n copts = self.canvas.opts\n lightPos = None\n flatColour = dopts.getConstantColour()\n useNegCmap = (not dopts.useLut) and dopts.useNegativeCmap\n\n if self.threedee:\n lightPos = np.array(copts.lightPos)\n lightPos *= (copts.zoom / 100.0)\n else:\n lightPos = None\n\n if dopts.useLut:\n delta = 1.0 / (dopts.lut.max() + 1)\n cmapXform = transform.scaleOffsetXform(delta, 0.5 * delta)\n else:\n cmapXform = self.cmapTexture.getCoordinateTransform()\n\n fslgl.glmesh_funcs.updateShaderState(\n self,\n useNegCmap=useNegCmap,\n cmapXform=cmapXform,\n flatColour=flatColour,\n lightPos=lightPos)", "def _draw(self):\r\n \r\n if self.active:\r\n self.surface = self.activeSurface # Set active surface to be displayed.\r\n else:\r\n self.surface = self.passiveSurface # Set passive surface to be displayed.\r", "def updateGeometryInfo(self,*args):\r\n self.wf.dataGridView.Rows.Clear()\r\n sceneRoot = Application.ActiveSceneRoot\r\n children = sceneRoot.FindChildren2( \"\", constants.siPolyMeshType, constants.siMeshFamily, True )\r\n for child in children:\r\n vTrans = child.Kinematics.Local.GetTransform2(None).Translation\r\n self.wf.AddRow( child.FullName, vTrans.X, vTrans.Y, vTrans.Z )", "def on_update(self, delta_time: float) -> None:\n #inventory of items \"picked up\"\n hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.levels[self.current_level].item_list)\n for item in hit_list:\n item.remove_from_sprite_lists()\n self.inventory += 1\n\n #update player sprite \"outfit\" is sword item is picked up\n self.player_list.update()\n self.player_list.update_animation(self.inventory)\n\n #update physics engine for player sprite and walls\n self.physics_engine.update()\n\n #go to next level\n #level 2 blocked if coin item is not picked up\n if self.player_sprite.center_y > settings.HEIGHT and self.current_level == 0 and self.inventory >= 1: \n self.current_level = 1\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = 0\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 0 and self.inventory == 0: \n self.player_sprite.center_y = settings.HEIGHT\n\n #level 3 blocked if sword item is not picked up\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 1 and self.inventory >= 2:\n self.current_level = 2\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = 0\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 1 and self.inventory == 1:\n self.player_sprite.center_y = settings.HEIGHT\n\n #go up to empty level after winning game\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 2:\n self.current_level = 3\n\n #go down levels\n elif self.player_sprite.center_y < 0 and self.current_level == 1:\n self.current_level = 0\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = settings.HEIGHT\n elif self.player_sprite.center_y < 0 and self.current_level == 2:\n self.current_level = 1\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = settings.HEIGHT", "def update(self) -> None:\n self._light.update()\n self._state = self._light.is_on()\n self._brightness = self._light.brightness", "def update(self):\n #update checkboxes\n self.average_check_box.SetValue(self.parent.fftsink.average)\n self.use_persistence_check_box.SetValue(self.parent.fftsink.use_persistence)\n self.peak_hold_check_box.SetValue(self.parent.fftsink.peak_hold)\n #update radio buttons\n try:\n index = list(DIV_LEVELS).index(self.parent.fftsink.y_per_div)\n self.radio_buttons[index].SetValue(True)\n except: pass", "def update(self):\r\n if self._block.info_values is not None:\r\n self._state = self._block.info_values.get(self._sensor_name, None)", "def update_H(self):", "def drawIsoSurfaces( self ):\n #research\n profprint()\n\n slicer.modules.NeedleFinderWidget.hideContourButton.setEnabled(1)\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n \n v= vtk.vtkAppendPolyData()\n canContinue = 0\n for modelNode in modelNodes.values():\n print \"for\"\n if modelNode.GetAttribute(\"nth\")!=None and modelNode.GetDisplayVisibility()==1 :\n canContinue = 1\n v.AddInputData(modelNode.GetPolyData())\n \n if canContinue ==1:\n modeller = vtk.vtkImplicitModeller()\n modeller.SetInputConnection(v.GetOutputPort())\n modeller.SetSampleDimensions(60,60,60)\n modeller.SetCapping(0)\n modeller.AdjustBoundsOn()\n modeller.SetProcessModeToPerVoxel() \n modeller.SetAdjustDistance(1)\n modeller.SetMaximumDistance(1.0)\n modeller.Update()\n \n contourFilter = vtk.vtkContourFilter()\n contourFilter.SetNumberOfContours(1)\n contourFilter.SetInputConnection(modeller.GetOutputPort()) \n contourFilter.ComputeNormalsOn()\n contourFilter.ComputeScalarsOn()\n contourFilter.UseScalarTreeOn()\n contourFilter.SetValue(1,10)\n # contourFilter.SetValue(2,13)\n # contourFilter.SetValue(3,15)\n # contourFilter.SetValue(4,20)\n # contourFilter.SetValue(5,25)\n contourFilter.Update()\n isoSurface = contourFilter.GetOutputDataObject(0)\n\n self.AddContour(isoSurface)", "def __update_portfolio_handler(self, msg):\n pass", "def updateGlobal(self):\n state = self.getState()\n n = len(self.myPlotCanvasList)\n for i in range(n):\n if self.myPlotCanvasList[i] is not None:\n self.myPlotCanvasList[i].myUpdateGlobal(state)", "def update(self):\n if (not self._run) or (not self.IA.is_loaded()):\n return\n self.IA.BG_MAP.update(speed=self.speed)\n self.IA.O_ATUAL.update()\n self._desintegrator.update()", "def update(self):\n self.sensor.update()", "def update():\n global current_level\n # Initialization (only runs on start/restart)\n player = Player()\n\n walls, goals, start = parse_level(levels[current_level])\n player.centerx = start[0]\n player.centery = start[1]\n\n # Main update loop\n while True:\n update_player(player, delta())\n draw_player(player)\n\n for wall in walls:\n window = pg.display.get_surface()\n pg.draw.rect(window, pg.Color(100, 100, 100), wall)\n\n player_vel, wall_vel, overlap = solve_rect_overlap(player,\n wall,\n player.velocity,\n mass_b=0,\n bounce=0.1)\n player.velocity = player_vel\n\n for goal in goals:\n window = pg.display.get_surface()\n pg.draw.rect(window, pg.Color(20, 100, 20), goal)\n\n normal, depth = overlap_data(player, goal)\n if depth > 0:\n current_level = (current_level + 1) % len(levels)\n restart()\n\n draw_text(f\"Level: {current_level + 1}\", (0, 0))\n\n # Main loop ends here, put your code above this line\n yield", "def update(self, level, camera=None):\n if self.frozen: return\n\n dx = 8 if self.moving_right else -8\n self.rect.move_ip(dx, 0)\n center = self.rect.center\n angle = 180 + self.counter*10 if self.moving_right else 180 - self.counter*10\n self.image = pygame.transform.rotate(self.orig_image, angle)\n self.rect.w = self.image.get_rect().w\n self.rect.h = self.image.get_rect().h\n self.rect.center = center\n self.counter += 1\n\n hit = any(self.rect.colliderect(block) for block in level)\n left, top = camera.pos.topleft if camera else (0, 0)\n return hit or self.rect.x > -left + self.screen.w or self.rect.right < -left", "def use_level(self, level):\n\n if self.min_level <= level <= self.max_level:\n map_extent = self.tiles.use_level(level)\n if map_extent:\n self.level = level\n (self.map_width, self.map_height,\n self.ppd_x, self.ppd_y) = map_extent\n (self.map_llon, self.map_rlon,\n self.map_blat, self.map_tlat) = self.tiles.extent\n\n # do level change callback\n self.handleLevelChangeCallback(level)\n\n return True\n\n return False", "def isosurface(self):\n return self._isosurface()", "def test_update_impact_level(self):\n pass", "def updateLayer(self):\n if self.num_layers == 0:\n self.box[0].setDisabled(False)\n for i in range(1,4):\n self.box[i].setDisabled(True)\n elif self.num_layers == 1:\n self.box[0].setDisabled(False)\n self.box[1].setDisabled(False)\n for i in range(2,4):\n self.box[i].setDisabled(True)\n elif self.num_layers == 2:\n self.box[0].setDisabled(False)\n self.box[1].setDisabled(False)\n self.box[2].setDisabled(False)\n self.box[3].setDisabled(True)\n else:\n self.box[0].setDisabled(False)\n self.box[1].setDisabled(False)\n self.box[2].setDisabled(False)\n self.box[3].setDisabled(False)", "def update(self):\n self.platform_list.update()\n #self.enemy_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite_list.update()", "def updateWorld(self):\n pass", "def update_E(self):", "def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False", "def handleLevelChangeCallback(self, level):\n\n if self.change_level_callback:\n self.change_level_callback(level)", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "def update_gauge(self):\n pass # Do nothing", "def refreshStock(self, level : int = -1):\n self.shipsStock.clear()\n self.weaponsStock.clear()\n self.modulesStock.clear()\n self.turretsStock.clear()\n # self.currentTechLevel = random.randint(bbConfig.minTechLevel, bbConfig.maxTechLevel)\n if level == -1:\n self.currentTechLevel = bbConfig.pickRandomShopTL()\n else:\n if level not in range(bbConfig.minTechLevel, bbConfig.maxTechLevel + 1):\n raise ValueError(\"Attempted to refresh a shop at tech level \" + str(level) + \". must be within the range \" + str(bbConfig.minTechLevel) + \" to \" + str(bbConfig.maxTechLevel))\n self.currentTechLevel = level\n \n for i in range(self.maxShips):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.shipKeysByTL[itemTL - 1]) != 0:\n self.shipsStock.addItem(bbShip.bbShip.fromDict(bbData.builtInShipData[random.choice(bbData.shipKeysByTL[itemTL - 1])]))\n\n for i in range(self.maxModules):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.moduleObjsByTL[itemTL - 1]) != 0:\n self.modulesStock.addItem(random.choice(bbData.moduleObjsByTL[itemTL - 1]))\n\n for i in range(self.maxWeapons):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.weaponObjsByTL[itemTL - 1]) != 0:\n self.weaponsStock.addItem(random.choice(bbData.weaponObjsByTL[itemTL - 1]))\n\n # if random.randint(1, 100) <= bbConfig.turretSpawnProbability:\n for i in range(self.maxTurrets):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.turretObjsByTL[itemTL - 1]) != 0:\n self.turretsStock.addItem(random.choice(bbData.turretObjsByTL[itemTL - 1]))", "def update_trace(Y, X, win, name):\n global vis\n vis.updateTrace(X, Y, win=win, name=name)", "def upgrage_level(self):\n print('level is upgraded on one point')\n self.level += 1", "def filterLevelSlot(self, level, shown):\r\n\r\n if shown:\r\n self.model.removeFilter(level)\r\n else:\r\n self.model.addFilter(level)", "def update_visuals(self):\n\n result, data = self.dev.grab_pipe()\n if not result:\n log.critical(\"Problem grabbing pipe\")\n\n if self.live_updates == True:\n self.update_graph(data)\n self.curve_render += 1\n self.update_image(data)\n self.check_image(self.curve_render)\n\n self.update_fps()\n self.data_timer.start(0)", "def update(self):\n\n # check if gain information is available, if not, update config\n if \"d2d\" not in self.config:\n self.setup_d2d()\n\n for channel in self.light_channels:\n # turn on the light\n self.light_control(channel, 1)\n\n d_print(\"Letting gains settle for the {} channel...\".format(channel), 1)\n\n with picamera.PiCamera() as sensor:\n # set up the sensor with all its settings\n sensor.resolution = self.settings.resolution\n sensor.framerate = self.settings.framerate[channel]\n sensor.shutter_speed = self.settings.shutter_speed[channel]\n\n sensor.awb_mode = \"off\"\n sensor.awb_gains = (self.config[\"wb\"][channel][\"r\"], self.config[\"wb\"][channel][\"b\"])\n\n time.sleep(30)\n\n sensor.exposure_mode = self.settings.exposure_mode\n\n # set the analog and digital gain\n ag = float(sensor.analog_gain)\n dg = float(sensor.digital_gain)\n\n self.config[\"d2d\"][channel][\"digital-gain\"] = dg\n self.config[\"d2d\"][channel][\"analog-gain\"] = ag\n\n d_print(\"Measured ag: {} and dg: {} for channel {}\".format(ag, dg, channel), 1)\n d_print(\"Saved ag: {} and dg: {} for channel {}\".format(self.config[\"d2d\"][channel][\"analog-gain\"], self.config[\"d2d\"][channel][\"digital-gain\"], channel), 1)\n\n # turn the light off\n self.light_control(channel, 0)\n\n # update timestamp\n self.config[\"d2d\"][\"timestamp\"] = time.time()\n\n # save the new configuration to file\n self.save_config_to_file()", "def drawIsoSurfaces(self):\r\n # research\r\n profprint()\r\n\r\n slicer.modules.NeedleFinderWidget.hideContourButton.setEnabled(1)\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n\r\n v = vtk.vtkAppendPolyData()\r\n canContinue = 0\r\n for modelNode in modelNodes.values():\r\n print \"for\"\r\n if modelNode.GetAttribute(\"nth\") != None and modelNode.GetDisplayVisibility() == 1 :\r\n canContinue = 1\r\n v.AddInputData(modelNode.GetPolyData())\r\n\r\n if canContinue == 1:\r\n modeller = vtk.vtkImplicitModeller()\r\n modeller.SetInputConnection(v.GetOutputPort())\r\n modeller.SetSampleDimensions(60, 60, 60)\r\n modeller.SetCapping(0)\r\n modeller.AdjustBoundsOn()\r\n modeller.SetProcessModeToPerVoxel()\r\n modeller.SetAdjustDistance(1)\r\n modeller.SetMaximumDistance(1.0)\r\n modeller.Update()\r\n\r\n contourFilter = vtk.vtkContourFilter()\r\n contourFilter.SetNumberOfContours(1)\r\n contourFilter.SetInputConnection(modeller.GetOutputPort())\r\n contourFilter.ComputeNormalsOn()\r\n contourFilter.ComputeScalarsOn()\r\n contourFilter.UseScalarTreeOn()\r\n contourFilter.SetValue(1, 10)\r\n # contourFilter.SetValue(2,13)\r\n # contourFilter.SetValue(3,15)\r\n # contourFilter.SetValue(4,20)\r\n # contourFilter.SetValue(5,25)\r\n contourFilter.Update()\r\n isoSurface = contourFilter.GetOutputDataObject(0)\r\n\r\n self.AddContour(isoSurface)", "def upgrade(self):\n if self.level < len(self.tower_images):\n self.level_up_animation = True\n self.level += 1\n self.base_damage += 3\n self.damage = self.base_damage\n\n #Since level does not upgrade in menu we have to manually do it here\n self.menu.tower_level += 1", "def update(self, *args):\n return _osgAnimation.Channel_update(self, *args)", "def update(self):\n changes = {}\n for coord in INDICES: # the need for two for loops is necessary\n if self.chart[coord] == ALIVE and (\n self.number_of_neighbors(coord) < 2 or self.number_of_neighbors(coord) > 3):\n changes[coord] = KILL\n elif self.number_of_neighbors(coord) == 3:\n changes[coord] = REVIVE\n for coord in changes.keys(): # because the evolution is discrete\n if changes[coord] == KILL:\n self.kill(coord)\n elif changes[coord] == REVIVE:\n self.givebirth(coord)", "def OnSysColourChanged(self, event):\r\n \r\n # This event is probably triggered by a theme change \r\n # so we have to re-init the art provider.\r\n if self._art:\r\n self._art.Init()\r\n\r\n if self._frame:\r\n self.Update()\r\n self._frame.Refresh()", "def __change_level(self, level):\n self.level = level", "def on_flags_update(self, event):\n self.entity.on_flags_update(event)", "def update(self, new_gameStateData):\r\n pass", "def _update(self):\n self.cv.update()", "def updateWeaponStatus(self):\n if self.myGalaxy.shipSelected == self:\n for position in self.positions:\n myQuad = self.quads[position] \n for id in funcs.sortStringList(myQuad.weapons.keys()):\n myWeapon = myQuad.weapons[id]\n self.updateMyGUIValue('%sweapon%sStatus' % (position,id), myWeapon.operational)\n self.updateMyGUIValue('%sweapon%sLock' % (position,id), myWeapon.currentLock)\n self.updateMyGUIValue('%sweapon%sPower' % (position,id), myWeapon.currentPower)\n if myWeapon.myWeaponData.ammo == 1 or myWeapon.droneID != '':\n self.updateMyGUIValue('%sweapon%sAmmo' % (position,id), myWeapon.availAmmo)" ]
[ "0.6973583", "0.6578195", "0.6466973", "0.62059677", "0.6089591", "0.5909208", "0.5899945", "0.5825903", "0.57568187", "0.5703271", "0.5698263", "0.5687134", "0.56855094", "0.5576002", "0.55106765", "0.547519", "0.54643226", "0.54612565", "0.5458047", "0.5452608", "0.54472446", "0.54448307", "0.5433167", "0.5410528", "0.539943", "0.5392605", "0.5375215", "0.53721255", "0.5331784", "0.53085095", "0.5291183", "0.5270107", "0.5269863", "0.5269863", "0.5266598", "0.5265461", "0.52608424", "0.52591413", "0.5256453", "0.5248227", "0.5244211", "0.5226966", "0.521634", "0.52128714", "0.51968485", "0.5193245", "0.5189567", "0.51873976", "0.5186236", "0.5179149", "0.5177387", "0.51651144", "0.5163327", "0.5152428", "0.51519895", "0.51494604", "0.51489764", "0.5148867", "0.5140647", "0.5134432", "0.513317", "0.51268435", "0.5123776", "0.5123462", "0.5121879", "0.5121775", "0.5118916", "0.51130694", "0.5111088", "0.51092607", "0.5107527", "0.5104091", "0.51033294", "0.5102839", "0.5098403", "0.5097332", "0.5093626", "0.50903034", "0.5088432", "0.508521", "0.5079002", "0.5077787", "0.50646687", "0.50573784", "0.5055454", "0.50526595", "0.50496584", "0.5046617", "0.5044741", "0.5043705", "0.5043234", "0.50407493", "0.5040118", "0.5036818", "0.5032975", "0.50303495", "0.5028555", "0.5025838", "0.5016217", "0.5015901" ]
0.81053317
0
Handle updates of isosurfaces level and add/remove
def _updateIsosurfaces(self): # Sorting using minus, this supposes data 'object' to be max values sortedIso = sorted(self.getIsosurfaces(), key=lambda isosurface: - isosurface.getLevel()) self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def addIsosurface(self, level, color):\n isosurface = self._Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface", "def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None", "def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def update_surfs(self, surf_path, surf_type, offset=None):\n try:\n self.surf[surf_type]\n except KeyError:\n pass\n # Here should be a dialog for confirm, whether adding data or not\n else:\n self._add_surface(surf_path, surf_type, offset)", "def update_gl_state(self, *args, **kwargs):\n for v in self._subvisuals:\n v.update_gl_state(*args, **kwargs)", "def UpdateLayers(self):\n pass", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def updateGeometryInfo(self,*args):\r\n self.wf.dataGridView.Rows.Clear()\r\n sceneRoot = Application.ActiveSceneRoot\r\n children = sceneRoot.FindChildren2( \"\", constants.siPolyMeshType, constants.siMeshFamily, True )\r\n for child in children:\r\n vTrans = child.Kinematics.Local.GetTransform2(None).Translation\r\n self.wf.AddRow( child.FullName, vTrans.X, vTrans.Y, vTrans.Z )", "def isoslider(surface_dic, surface_value_dic, min_value=0):\r\n return \\\r\nf\"\"\"\r\n\\n\\nclass IsoLevel(tk.Variable):\r\n def __init__(self, master, name, level):\r\n tk.Variable.__init__(self, master, value=level)\r\n self.name = name\r\n self.trace('w', self.callback)\r\n\r\n def callback(self, *args):\r\n cmd.isolevel(self.name, self.get())\r\n\r\n def increment(self, event=None, delta=0.1):\r\n self.set(round(float(self.get()) + delta, 2))\r\n\r\n def decrement(self, event=None):\r\n self.increment(None, -0.1)\r\n\r\n\r\nsurface_list = {surface_dic}\r\nsurface_max_list = {surface_value_dic}\r\n\r\ntop = tk.Toplevel(plugins.get_tk_root())\r\n\r\nmaster = tk.Frame(top, padx=10, pady=10)\r\nmaster.pack(fill=\"both\", expand=1)\r\n\r\nfor child in list(master.children.values()):\r\n child.destroy()\r\n\r\n\r\nrow_counter = 0\r\nfor identifier, component_dic in surface_list.items():\r\n # add calculation identifier\r\n tk.Label(master, text=identifier).grid(row=row_counter, column=0, sticky=\"w\")\r\n row_counter += 1\r\n \r\n for component_id, surfaces in component_dic.items():\r\n # add collection label, e.g. superstar or hotspot etc.\r\n tk.Label(master, text=component_id).grid(row=row_counter, column=1, sticky='w')\r\n row_counter += 1\r\n \r\n for i, surface in enumerate(surfaces):\r\n # add grid type label\r\n probe = surface.split(\"_\")[-2]\r\n tk.Label(master, text=probe).grid(row=row_counter, column=2, sticky=\"w\")\r\n \r\n # slider code \r\n v = IsoLevel(master, surface, 5)\r\n e = tk.Scale(master, orient=tk.HORIZONTAL, from_={min_value}, to=surface_max_list[identifier][component_id],\r\n resolution=0.1, showvalue=0, variable=v)\r\n e.grid(row=row_counter, column=3, sticky=\"ew\")\r\n\r\n e = tk.Entry(master, textvariable=v, width=4)\r\n e.grid(row=row_counter, column=4, sticky=\"e\")\r\n master.columnconfigure(3, weight=1)\r\n row_counter += 1\r\n\\n\\n\r\n\"\"\"", "def isosurface(grd_name, isosurface_name, level, color):\r\n # pymol_out = PyMOLCommands.load(fname, grd_name)\r\n pymol_out = f'\\ncmd.isosurface(name=\"{isosurface_name}\", map=\"{grd_name}\", level=\"{level}\")\\n'\r\n pymol_out += f'\\ncmd.color(\"{color}\", \"{isosurface_name}\")'\r\n return pymol_out", "def level_upgrade(self, lvl):\n\t\tpass", "def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()", "def refreshStock(self, level : int = -1):\n self.shipsStock.clear()\n self.weaponsStock.clear()\n self.modulesStock.clear()\n self.turretsStock.clear()\n # self.currentTechLevel = random.randint(bbConfig.minTechLevel, bbConfig.maxTechLevel)\n if level == -1:\n self.currentTechLevel = bbConfig.pickRandomShopTL()\n else:\n if level not in range(bbConfig.minTechLevel, bbConfig.maxTechLevel + 1):\n raise ValueError(\"Attempted to refresh a shop at tech level \" + str(level) + \". must be within the range \" + str(bbConfig.minTechLevel) + \" to \" + str(bbConfig.maxTechLevel))\n self.currentTechLevel = level\n \n for i in range(self.maxShips):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.shipKeysByTL[itemTL - 1]) != 0:\n self.shipsStock.addItem(bbShip.bbShip.fromDict(bbData.builtInShipData[random.choice(bbData.shipKeysByTL[itemTL - 1])]))\n\n for i in range(self.maxModules):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.moduleObjsByTL[itemTL - 1]) != 0:\n self.modulesStock.addItem(random.choice(bbData.moduleObjsByTL[itemTL - 1]))\n\n for i in range(self.maxWeapons):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.weaponObjsByTL[itemTL - 1]) != 0:\n self.weaponsStock.addItem(random.choice(bbData.weaponObjsByTL[itemTL - 1]))\n\n # if random.randint(1, 100) <= bbConfig.turretSpawnProbability:\n for i in range(self.maxTurrets):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.turretObjsByTL[itemTL - 1]) != 0:\n self.turretsStock.addItem(random.choice(bbData.turretObjsByTL[itemTL - 1]))", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()", "def update(self):\n self.platform_list.update()\n #self.enemy_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite_list.update()", "def setLevels(self, levels, update=True):\n if self._xp is None:\n self.levels = levels\n self._defferedLevels = levels\n return\n if levels is not None:\n levels = self._xp.asarray(levels)\n self.levels = levels\n self._effectiveLut = None\n if update:\n self.updateImage()", "def add_surface(self,s):\n self.surfaces.append(s)\n s.system=self.surfaces", "def _addClicked(self):\n volume = self.volume()\n if volume is not None:\n dataRange = volume.getDataRange()\n if dataRange is None:\n dataRange = 0., 1.\n\n volume.addIsosurface(\n numpy.mean((dataRange[0], dataRange[-1])),\n '#0000FF')", "def level_fix(planet, used_planets, planet_levels, left_control_keys, \n planet_control, level, planet_level_dict):\n planet_value = planet_control[planet]\n if len(planet_levels) < (level+1):\n planet_levels.append([])\n if (planet_value == 'Vul') or (planet_value == 'Ear'):\n if level == 0:\n used_planets.append(planet_value) # Add to used planet list\n planet_levels[level].append(planet_value) #Add planet to the level \n planet_level_dict[planet_value] = level \n else:\n used_planets.append(planet) # Add planet to the used planet list\n planet_levels[level].append(planet) # Add planet to the level\n planet_level_dict[planet] = level \n if planet in left_control_keys:\n left_control_keys.remove(planet) #Delete from left planet list\n else:\n used_planets.append(planet)\n planet_levels[level].append(planet) # Add planet to the level\n planet_level_dict[planet] = level\n if planet in left_control_keys:\n left_control_keys.remove(planet) # Delete from left planet list", "def process(self):\n\n\n index = self.dlg.ui.layerCombo.currentIndex() \n if index < 0: \n # it may occur if there's no layer in the combo/legend \n pass\n else: \n layer = self.dlg.ui.layerCombo.itemData(index) \n # layer = QgsVectorLayer(self.fileName, \"layer_name\", \"ogr\")\n \n\n nFeat = layer.featureCount()\n layer.startEditing()\n\n \n\n # Should really put these in a function\n\n index = layer.fieldNameIndex(\"_lts\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_num_lane\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_num_lane\", \\\n QVariant.Int) ] )\n layer.updateFields()\n\n index = layer.fieldNameIndex(\"_protected\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_protected\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_bike_lane\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_bike_lane\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"CROSSINGME\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"CROSSINGME\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts11\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts11\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts12\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts12\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts13\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts13\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts_woX\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts_woX\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"LTS\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"LTS\", \\\n QVariant.Int) ] )\n layer.updateFields()\n\n\n\n i=1\n featid_lts ={}\n for feature in layer.getFeatures():\n street = street_link_object()\n street.path_width = feature['PATHWIDTH']\n street.park_width = feature['PARKWIDTH']\n street.num_lane = feature['NUMLANE']\n street.f_code = feature['ROADCLASS']\n street.foc_width = feature['FOC_WIDTH']\n # street.median = feature['MEDIAN']\n street.speed_limit = feature['SPD_LIM']\n # street.pocket_lane = feature['RTLANE']\n street.illegial_parking = feature['ILLPARKING']\n street.center_line = feature['CL']\n street.net_type = feature['NET_TYPE']\n street.right_turn_speed=feature['RTSPEED']\n street.pocket_lane_shift = feature['RTLANSHIFT']\n street.right_turn_lane_length = feature['RTPOCKLENG']\n street.one_way = feature['ONEWAY']\n street.raw_cross_stress = feature['_rawCrossS']\n street.cross_treat = feature['CrossTreat']\n\n street.calculate_crossing_me(street.num_lane) # has to always be before computing lts\n street.compute_LTS()\n if street.LTS != None :\n i+=1\n j=ceil(i/(nFeat/100))\n self.dlg.ui.progress_bar.setValue(j)\n feature[\"_lts_woX\"] = street.LTS\n feature[\"_lts\"] = street.LTS\n feature[\"_lts11\"] = street.lts11\n feature[\"_lts12\"] = street.lts12\n feature[\"_lts13\"] = street.lts13\n feature[\"_num_lane\"] = street.num_lane\n feature[\"_bike_lane\"] = street.bike_lane\n feature[\"_protected\"] = street.protected\n feature[\"CROSSINGME\"] = street.crossing_me\n layer.updateFeature(feature)\n # layer.updateFields()\n # QMessageBox.information(self.dlg, (\"WAIT\"), (\"Please wait!\"))\n layer.commitChanges()\n # layer.commitChanges()\n QMessageBox.information(self.dlg, (\"Successful\"), (\"LTS has been computed!\")) \n\n self.dlg.close()", "def _update_objects(self):\n\t\tself.clouds.update()\n\t\tif self.is_play:\n\t\t\tself.floor.update()\n\t\t\tself.bolan.update()\n\t\t\tself.obstacles.update()\n\t\t\tself.scoreboard.update()", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "def on_update(self, delta_time: float) -> None:\n #inventory of items \"picked up\"\n hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.levels[self.current_level].item_list)\n for item in hit_list:\n item.remove_from_sprite_lists()\n self.inventory += 1\n\n #update player sprite \"outfit\" is sword item is picked up\n self.player_list.update()\n self.player_list.update_animation(self.inventory)\n\n #update physics engine for player sprite and walls\n self.physics_engine.update()\n\n #go to next level\n #level 2 blocked if coin item is not picked up\n if self.player_sprite.center_y > settings.HEIGHT and self.current_level == 0 and self.inventory >= 1: \n self.current_level = 1\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = 0\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 0 and self.inventory == 0: \n self.player_sprite.center_y = settings.HEIGHT\n\n #level 3 blocked if sword item is not picked up\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 1 and self.inventory >= 2:\n self.current_level = 2\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = 0\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 1 and self.inventory == 1:\n self.player_sprite.center_y = settings.HEIGHT\n\n #go up to empty level after winning game\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 2:\n self.current_level = 3\n\n #go down levels\n elif self.player_sprite.center_y < 0 and self.current_level == 1:\n self.current_level = 0\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = settings.HEIGHT\n elif self.player_sprite.center_y < 0 and self.current_level == 2:\n self.current_level = 1\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = settings.HEIGHT", "def draw_level(self):\r\n self.level_surface.blit(self.map_image, self.viewport, self.viewport)\r\n self.level_surface.blit(self.title_box, self.title_rect)", "def updateWeaponStatus(self):\n if self.myGalaxy.shipSelected == self:\n for position in self.positions:\n myQuad = self.quads[position] \n for id in funcs.sortStringList(myQuad.weapons.keys()):\n myWeapon = myQuad.weapons[id]\n self.updateMyGUIValue('%sweapon%sStatus' % (position,id), myWeapon.operational)\n self.updateMyGUIValue('%sweapon%sLock' % (position,id), myWeapon.currentLock)\n self.updateMyGUIValue('%sweapon%sPower' % (position,id), myWeapon.currentPower)\n if myWeapon.myWeaponData.ammo == 1 or myWeapon.droneID != '':\n self.updateMyGUIValue('%sweapon%sAmmo' % (position,id), myWeapon.availAmmo)", "def update():", "def update():", "def update(self):\n if self.name == \"Settings\":\n args = [\"NAME:Settings\"]\n else:\n args = [\"NAME:\" + self.name, \"Enable:=\", self.Enable]\n if self.UserSpecifiedSettings:\n args += self.manualsettings\n else:\n args += self.autosettings\n if self.name == \"Settings\":\n self.meshmodule.EditGlobalMeshRegion(args)\n else:\n self.meshmodule.EditMeshRegion(self.name, args)\n return True", "def update(self, surface, keys, current_time, dt, scale):\n self.anykey.update(current_time)\n self.draw(surface)", "def addLevel(self):\n pass", "def update(self):\n if self.__first:\n self.__first = False\n self.__map_data = self.__gui_handler.get_map_data()\n self.__next_data = self.__gui_handler.get_entities()\n labels = []\n\n # Découverte du terrain\n for terrain in SimUtils.get_terrains():\n self.__terrain.append(terrain.color)\n labels.append(StatItem(terrain.name, \"\", terrain.color))\n\n # Tri lexicographique des labels.\n labels.sort(key=lambda stat: stat._name)\n # Ajout des labels de terrain\n for label in labels:\n self.__gui_handler.add_stat(label)\n\n # Remplissage de la carte avec les terrains.\n for i in range(0, self.__width):\n for j in range(0, self.__height):\n # Affichage du point.\n color = QColor(self.__terrain[self.__map_data.get_terrain_type(i,j)])\n self.__image.setPixel(i,j,color.rgb())\n\n # Permet de faire le tri entre les entités déjà rencontrées et les\n # autres.\n entity_types = {}\n\n # Liste des futurs labels\n labels = []\n\n # Découverte des entités - affectation des couleurs\n for entity in self.__next_data:\n # Ajout des labels de couleur pour les entités\n if not entity_types.has_key(entity.__name__):\n entity_types[entity.__name__] = True\n\n for label, color in entity._labels.iteritems():\n labels.append(StatItem(label, \"\", color))\n\n # Affichage de l'entité.\n self.__image.setPixel(entity._x, entity._y, QColor(entity._color).rgb())\n self.positions[id(entity)] = [entity._x, entity._y]\n\n # Tri lexicographique des labels.\n labels.sort(key=lambda stat: stat._name)\n\n for label in labels:\n self.__gui_handler.add_stat(label)\n else:\n # Mise à jour du rendu\n for entity in self.__next_data:\n # Cas d'une entité désactivée (morte)\n remove_entity = not entity._is_active()\n if id(entity) not in self.positions:\n # Ajout de l'entité en cours de simulation\n self.__image.setPixel(entity._x, entity._y, QColor(entity._color).rgb())\n self.positions[id(entity)] = [entity._x,entity._y]\n\n # Le simulateur demande de repeindre l'entité\n old_points = self.positions[id(entity)]\n\n if not remove_entity:\n self.positions[id(entity)] = [entity._x, entity._y]\n\n # On remet la couleur du terrain.\n color = QColor(self.__terrain[self.__map_data.get_terrain_type(old_points[0], old_points[1])])\n self.__image.setPixel(old_points[0], old_points[1], color.rgb())\n\n if not remove_entity:\n # Ajout des paramètres de setPixel dans une liste pour être ploté après.\n self.__image.setPixel(entity._x, entity._y, QColor(entity._color).rgb())", "def update(self):\n #update position\n trans = self.buffer.lookup_transform(\"map\", \"base_footprint\", rospy.Time(),rospy.Duration(1))\n self.position = (trans.transform.translation.x,trans.transform.translation.y)\n #update map\n \n #update map\n self.map_callback(self.get_map().map)\n\n #update forntiers\n frontier_map = frontier(self.map,self.map_info,self.position)\n pos = frontier_map.frontier_world\n #set goal\n self.set_goal(pos)\n\n #check if there are any frontiers left\n return frontier_map.counter", "def __update_portfolio_handler(self, msg):\n pass", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def refreshLayerLists(self):\n self.layers = self.iface.legendInterface().layers()\n self.lineLayerIndexMap = dict()\n self.pointLayerIndexMap = dict()\n self.lineLayerList = [] # holds the filtered layer names\n self.pointLayerList = [] # holds the filtered layer names\n for i, layer in enumerate(self.layers):\n try:\n if layer.geometryType() == 0: # 0: point, 1: line\n self.pointLayerIndexMap[len(self.pointLayerList)] = i # put the index pair in the dictionary\n self.pointLayerList.append(layer.name()) # add the layer name to the list\n elif layer.geometryType() == 1: # 0: point, 1: line\n self.lineLayerIndexMap[len(self.lineLayerList)] = i # put the index pair in the dictionary\n self.lineLayerList.append(layer.name()) # add the layer name to the list\n except AttributeError:\n # if the above checks failed, i.e. because of a raster layer, skip it\n continue", "def _UpdateEnergy(self):\n self.mol.GetEnergy('nokinetic')", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf", "def _parentChanged(self, event):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexIsosurface, self)._parentChanged(event)", "def update_status_bars():\n for status_bar_name in init.game_state.status_bars.keys():\n gradual_status_bar_fluctuation(status_bar_name)", "def draw_level(self, surface):\n surface.blit(self.background, (0, 0))\n surface.blit(self.player.image, self.player.rect)\n surface.blit(self.message_box.image, self.message_box.rect)\n surface.blit(self.arrow.image, self.arrow.rect)\n surface.blit(self.transition_surface, (0, 0))", "def test_update_impact_level(self):\n pass", "def update_visualization(self) -> None:\n pass", "def updateLayerData(self, **kwargs):\n self.currentLayerData = self.layers[self.getCurrentRow()]\n self.currentLayerData.update(**kwargs)\n self.layers[self.getCurrentRow()] = self.currentLayerData\n self.updateSelectedLayer()", "def changes(screen):\r\n global inventory\r\n screen.blit(bg, (0, 0))\r\n Sprites.draw(screen)\r\n Sprites.update()\r\n ammo.draw(screen)\r\n ammo.update()\r\n prt.check_trying_using()\r\n if inventory.count('score {}'.format(id(score_0))) == 0:\r\n score_0.pick(inventory, player, Sprites, score)\r\n if inventory.count('score {}'.format(id(score_1))) == 0:\r\n score_1.pick(inventory, player, Sprites, score)\r\n if inventory.count('boost {}'.format(id(sp))) == 0:\r\n sp.boost(inventory, player, Sprites)\r\n borders(WIDTH, HEIGHT)\r\n see_score(score)\r\n key_0.pick(inventory, player, Sprites)\r\n pygame.display.flip()", "def updateMap(self,map):\n if not self.opened:\n col = int( self.world_rect.left / map.header_data['tilewidth'])\n row = int( self.world_rect.top / map.header_data['tileheight'])\n layerIndex = len(map.layer_data)-1\n while(layerIndex > 0):\n layer = map.layer_data[layerIndex]\n if(layer[row][col] > 1):\n layer[row][col] = 0\n break\n layerIndex -= 1\n for g in self.groups():\n g.remove(self)", "def add_extra_level(self, variable, xlevel):\n \n if variable not in [\"geopotential\", \"temperature\"]:\n raise Exception(\"variable should be one of [geopotential,temperature]\")\n \n if variable == \"geopotential\":\n # geopotential \n A = self.z.z[:, -1, :, :].to_dataset() # copy lowest pressure level\n A[\"level\"] = xlevel\n self.z = (xarray.concat([self.z, A], dim=\"level\"))\n \n # convert pressure to geopotential\n self.z.z[0, -1, :, :] = pres2alt(xlevel * 100) * g\n \n else: \n # temperature\n A = self.t.t[:, -1, :, :].to_dataset() # copy lowest pressure level\n A[\"level\"] = xlevel\n self.t = (xarray.concat([self.t, A], dim=\"level\"))", "def update(self):\n # Find only unmasked data :\n xyz, sData, sColor, _ = self._select_unmasked()\n # xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n if xyz.size:\n self.mesh.visible = True\n self.mesh.set_data(xyz, edge_color=self.edgecolor, size=sData,\n face_color=sColor, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n # self.mesh.transform = self.transform\n self.mesh.update()\n else:\n self.mesh.visible = False", "def updateAllGUIValues(self):\n if self.myGalaxy.shipSelected == self:\n d = {'shipISP':self.currentISP,\n 'shipStrength':self.strength,\n 'shipAccel':self.accel,\n 'shipRotation':self.rotation,\n 'shipPower':self.currentPower,\n 'shipBattery':self.currentBattery,\n 'maxAssault':self.maxAssault}\n for position in self.positions:\n myQuad = self.quads[position]\n d[position+'Shields'] = myQuad.currentSP\n d[position+'Armor'] = myQuad.currentAP\n d[position+'Comps'] = myQuad.currentComps\n self.myGalaxy.shipInfo.updateAttributes(d)", "def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")", "def update(self, *args):\n\n\t\t# Update Bullets\n\t\tif self.power == 'bulletup' and self.level >= 2:\n\t\t\tself.angle_bullets(self.level)\n\t\t\n\t\t# Update Lazer\n\t\tif self.power == 'lazerup' and self.level > 0:\n\n\t\t\tself.index += 1\n\t\t\tif self.index % 12:\n\t\t\t\tself.step += 1\n\t\t\t\n\t\t\tself.y -= self.speed\n\n\n\t\t\tself.rect.y = self.y\n\t\t\tself.rect.x = self.x\n\n\t\t\t# print(\"SLOPE??? \", self.slope)\n\t\t\tself.sheet.blitme(self.screen, self.step % self.sheet.totalCells, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.rect.x, self.rect.y)\n\n\t\t# Update Bombs\n\t\tif self.power == 'bombup' and self.level > 0:\n\t\t\tself.bomb_vector()\n\n\t\t# Update Default\n\t\telse:\n\t\t\tself.y -= self.speed\n\t\t\tself.rect.y = self.y\n\n\t\tpygame.display.flip()", "def update_surface(frame):\n \n #fig.suptitle(time[frame])\n im.set_array(surf[frame])\n im.set_extent([np.nanmin(xx[frame]), np.nanmax(xx[frame]), np.nanmin(yy[frame]), np.nanmax(yy[frame])])\n \n line.set_data([(times[:-1] + utc_to_east).plot_date[frame]]*2, ylim)", "def updateScreenTiling(self,level):\n\n self.tile_list=[]\n self.objList=[]\n self.level=level\n\n self.rowCount=0\n \n for row in worldData[self.level]:\n self.colCount=0\n for tile in row:\n if tile!=0:\n img11=self.tilType[tile-1]\n img=pygame.transform.scale(img11,(self.tileSize,self.tileSize))\n img_rect = img.get_rect()\n img_rect.x = self.colCount * self.tileSize\n img_rect.y = self.rowCount * self.tileSize\n tile= (img, img_rect)\n self.tile_list.append(tile)\n self.colCount+=1\n self.rowCount+=1\n \n self.rowCount=0\n for row in objectData[self.level]:\n self.colCount=0\n for tile in row:\n if tile!=0:\n img11=self.objType[tile-1]\n img=pygame.transform.scale(img11,(self.tileSize,self.tileSize))\n img_rect = img.get_rect()\n img_rect.x = self.colCount * self.tileSize\n img_rect.y = self.rowCount * self.tileSize\n tile= (img, img_rect)\n self.objList.append(tile)\n self.colCount+=1\n self.rowCount+=1", "def level_data(self):\n self.level(self.data)", "def updateData( Tables, Graph, LayersInfo, WarningMessage ):\n\n # clean the warning message\n LayersInfo.clean()\n WarningMessage.clean()\n\n LayerThicknessBuffer = Tables[ \"GeometryProperties\" ].getValue( 0, 2 )\n try:\n\n\n Layers = getLayersFromString( Tables[ \"GeometryProperties\" ].getValue( 0, 2 ) )\n\n LayersInfo.printMessage( str( len( Layers ) ) )\n\n # Homogenize the input data\n if len(Layers) != 1:\n\n makeMultiLayerMask( Tables )\n\n HomogenizedData = homogenize( Tables[ \"ElasticModulus\" ].getData( )[ 0 ],\n Tables[ \"ShearModulus\" ].getData( )[ 0 ],\n Tables[ \"PoissonRatios\" ].getData( ),\n Layers )\n\n #cangeMode( Tables, WarningMessage, Graph.getMode( ) )\n\n Tables[ \"ElasticModulus\" ].assignValuesSet( HomogenizedData[ \"ElasticModulus\" ] )\n Tables[ \"ShearModulus\" ].assignValuesSet( HomogenizedData[ \"ShearModulus\" ] )\n Tables[ \"PoissonRatios\" ].assignValuesSet( HomogenizedData[ \"PoissonRatios\" ] )\n Tables[ \"GeometryProperties\" ].assignValue( 0, 2, HomogenizedData[ \"TotalThickness\" ] )\n\n\n # Part of error handling.Function \"isInputNegative\" throws an error\n # if there is an element with its negetive value.\n isInputNegative( Tables [ \"ElasticModulus\" ].getData( ) )\n isInputNegative( Tables [ \"ShearModulus\" ].getData( ) )\n isInputNegative( Tables [ \"PoissonRatios\" ].getData( ) )\n isInputNegative( Tables [ \"MaterialProperties\" ].getData( ) )\n isInputNegative( Tables [ \"GeometryProperties\" ].getData( ) )\n\n # update the tables buffers\n makeMask( Tables, Graph.getMode() )\n\n # before calling user-define functions check the current mode\n cangeMode( Tables, WarningMessage, Graph.getMode() )\n\n precomputePoissonRatios( Tables )\n\n # get data from the corresponding tables\n ElasticModulusData = Tables [ \"ElasticModulus\" ].getData( )\n ShearModulusData = Tables [ \"ShearModulus\" ].getData( )\n PoissonRatiosData = Tables [ \"PoissonRatios\" ].getData( )\n MaterialPropertiesData = Tables [ \"MaterialProperties\" ].getData( )\n GeometryPropertiesData = Tables [ \"GeometryProperties\" ].getData( )\n\n\n #################### CALL USER-SPECIFIC FUNCTION ##########################\n\n testInputData( Graph.getMode(), PoissonRatiosData )\n\n Graph.Containers[ \"WaveVelocity\" ] = wave_speeds(\n ElasticModulusData,\n ShearModulusData,\n PoissonRatiosData,\n MaterialPropertiesData,\n GeometryPropertiesData,\n bool( Graph.getMode() ),\n Graph.getRange() )\n\n\n Graph.Containers[ \"ModesInBand\" ] = ModesInBand(\n ElasticModulusData,\n ShearModulusData,\n PoissonRatiosData,\n MaterialPropertiesData,\n GeometryPropertiesData,\n bool( Graph.getMode( ) ),\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"ModalDensity\" ] = ModaleDichte(\n Graph.Containers[ \"WaveVelocity\" ][ \"c_L\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_S\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_B_eff\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_g_eff\" ],\n GeometryPropertiesData,\n bool( Graph.getMode( ) ),\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"ModalOverlapFactor\" ] = ModalOverlapFactor(\n MaterialPropertiesData,\n Graph.Containers[ \"ModalDensity\" ],\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"MaxElementSize\" ] = MaximumElementSize(\n Graph.Containers[ \"WaveVelocity\" ][ \"c_B\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_B_eff\" ],\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"EigenFrequency\" ] = EigenfrequenciesPlate(\n ElasticModulusData,\n ShearModulusData,\n PoissonRatiosData,\n MaterialPropertiesData,\n GeometryPropertiesData,\n bool( Graph.getMode() ),\n Graph.getRange() )\n\n # Update the current graph with new data\n updateGraph( Graph, Graph.getCurrentGraphNumber( ) )\n\n WarningMessage.clean()\n\n\n except VibroP_DataCorrupted as Error:\n WarningMessage.printMessage( str(Error) )\n Tables[ \"GeometryProperties\" ].setValue( 0, 2, LayerThicknessBuffer, \"\" )\n\n\n except VibroP_WrongLayersThikness as Error:\n WarningMessage.printMessage( str(Error) )\n\n\n except VibroP_TableCorrupted as Error:\n WarningMessage.printMessage( str(Error) )\n\n #'''\n except:\n Message = \"Error: Unexpected error. Please, refer to the code\"\n WarningMessage.printMessage( Message )\n #'''", "def update( ):\r\n pass", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)", "def update():\n global current_level\n # Initialization (only runs on start/restart)\n player = Player()\n\n walls, goals, start = parse_level(levels[current_level])\n player.centerx = start[0]\n player.centery = start[1]\n\n # Main update loop\n while True:\n update_player(player, delta())\n draw_player(player)\n\n for wall in walls:\n window = pg.display.get_surface()\n pg.draw.rect(window, pg.Color(100, 100, 100), wall)\n\n player_vel, wall_vel, overlap = solve_rect_overlap(player,\n wall,\n player.velocity,\n mass_b=0,\n bounce=0.1)\n player.velocity = player_vel\n\n for goal in goals:\n window = pg.display.get_surface()\n pg.draw.rect(window, pg.Color(20, 100, 20), goal)\n\n normal, depth = overlap_data(player, goal)\n if depth > 0:\n current_level = (current_level + 1) % len(levels)\n restart()\n\n draw_text(f\"Level: {current_level + 1}\", (0, 0))\n\n # Main loop ends here, put your code above this line\n yield", "def update(self):\n if pygame.time.get_ticks() - self.start_time > const.LEVEL_WAITING:\n self.player.update()\n self.platform_list.update()\n self.platform_grass_list.update()\n self.platform_stone_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite.update()\n self.enemy_bubble_list.update()\n self.fruit_list.update()\n for bullet in self.bullet_list:\n if bullet.rect.x > const.SCREEN_WIDTH + 10 or bullet.rect.x < -10:\n self.bullet_list.remove(bullet)\n self.active_sprite.remove(bullet)\n\n\n for guy in self.enemy_list:\n enemy_hit_list = pygame.sprite.spritecollide(guy, self.bullet_list, False, pygame.sprite.collide_circle)\n for hit in enemy_hit_list:\n bub_enemy= enemy.Enemy_bubble(guy)\n self.enemy_list.remove(guy)\n\n self.bullet_list.remove(hit)\n self.active_sprite.add(bub_enemy)\n self.active_sprite.remove(hit)\n self.active_sprite.remove(guy)\n\n self.enemy_bubble_list.add(bub_enemy)\n\n\n if len(self.enemy_list) == 0 and len(self.enemy_bubble_list) == 0 and self.close_time == 0:\n self.close_time=pygame.time.get_ticks()\n\n if self.close_time > 0 and pygame.time.get_ticks()-self.close_time > 2000:\n self.close = True", "def write_surface_info(surface_info):\n with open(os.path.join(PLUGIN_DIR, 'surface_info.json'), 'w') as outfile:\n json.dump(surface_info, outfile)", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def drawIsoSurfaces( self ):\n #research\n profprint()\n\n slicer.modules.NeedleFinderWidget.hideContourButton.setEnabled(1)\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n \n v= vtk.vtkAppendPolyData()\n canContinue = 0\n for modelNode in modelNodes.values():\n print \"for\"\n if modelNode.GetAttribute(\"nth\")!=None and modelNode.GetDisplayVisibility()==1 :\n canContinue = 1\n v.AddInputData(modelNode.GetPolyData())\n \n if canContinue ==1:\n modeller = vtk.vtkImplicitModeller()\n modeller.SetInputConnection(v.GetOutputPort())\n modeller.SetSampleDimensions(60,60,60)\n modeller.SetCapping(0)\n modeller.AdjustBoundsOn()\n modeller.SetProcessModeToPerVoxel() \n modeller.SetAdjustDistance(1)\n modeller.SetMaximumDistance(1.0)\n modeller.Update()\n \n contourFilter = vtk.vtkContourFilter()\n contourFilter.SetNumberOfContours(1)\n contourFilter.SetInputConnection(modeller.GetOutputPort()) \n contourFilter.ComputeNormalsOn()\n contourFilter.ComputeScalarsOn()\n contourFilter.UseScalarTreeOn()\n contourFilter.SetValue(1,10)\n # contourFilter.SetValue(2,13)\n # contourFilter.SetValue(3,15)\n # contourFilter.SetValue(4,20)\n # contourFilter.SetValue(5,25)\n contourFilter.Update()\n isoSurface = contourFilter.GetOutputDataObject(0)\n\n self.AddContour(isoSurface)", "def update(self, new_gameStateData):\r\n pass", "def Update(self, mode = UPDATE_MODE.all):\r\n aux_versions = dstore.Get(\"versions\")\r\n \r\n if(aux_versions['hw'] != None): \r\n Ui().lineHwVersion.setText(str(aux_versions['hw'])) \r\n else:\r\n Ui().lineHwVersion.setText(\"- -\")\r\n \r\n if(aux_versions['fw'] != None): \r\n Ui().lineFwVersion.setText(str(aux_versions['fw'])) \r\n else:\r\n Ui().lineFwVersion.setText(\"- -\") \r\n \r\n \r\n \r\n \"\"\" TERMINAL INFO \"\"\"\r\n aux_terminal_info = dstore.Get(\"terminal_info\", \"GET\")\r\n \r\n \"\"\" number of cells \"\"\"\r\n if(aux_terminal_info['number_of_cells'] != None):\r\n Ui().lineCells.setText(str(aux_terminal_info['number_of_cells'])) \r\n else:\r\n Ui().lineCells.setText(\"-\") \r\n \r\n \r\n \"\"\" battery \"\"\"\r\n if(aux_terminal_info['battery'] != None):\r\n Ui().lineBattery.setText(str(aux_terminal_info['battery'])+\" %\") \r\n else:\r\n Ui().lineBattery.setText(\"-- %\") \r\n \r\n \"\"\" speaker \"\"\" \r\n if(aux_terminal_info['speaker']['keys'] == True):\r\n Ui().lineSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['keys'] == False):\r\n Ui().lineSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerKeys.setText(\"- -\")\r\n Ui().pushSpeakerKeys.setText(\"- -\")\r\n \r\n if(aux_terminal_info['speaker']['system'] == True):\r\n Ui().lineSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['system'] == False):\r\n Ui().lineSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['timing'] == True):\r\n Ui().lineSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['timing'] == False):\r\n Ui().lineSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else: \r\n Ui().lineSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['keys'] == None or aux_terminal_info['speaker']['timing']==None or aux_terminal_info['speaker']['system']==None): \r\n Ui().pushSpeakerKeys.setEnabled(False)\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n else:\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n \r\n \r\n return True", "def update_E(self):", "def update(self):\n changes = {}\n for coord in INDICES: # the need for two for loops is necessary\n if self.chart[coord] == ALIVE and (\n self.number_of_neighbors(coord) < 2 or self.number_of_neighbors(coord) > 3):\n changes[coord] = KILL\n elif self.number_of_neighbors(coord) == 3:\n changes[coord] = REVIVE\n for coord in changes.keys(): # because the evolution is discrete\n if changes[coord] == KILL:\n self.kill(coord)\n elif changes[coord] == REVIVE:\n self.givebirth(coord)", "def update_H(self):", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def setPlayerStates(self, updates):\r\n for upd in updates:\r\n print \"UPD player %s\" % upd['player']\r\n player = self.players[upd['player']]\r\n player.setStatus(upd['status'], upd['jump'], upd['charge'])\r\n\r\n player.health = upd['health']\r\n if player == self.myPlayer:\r\n #self.healthbar.setValue(self.myPlayer.health)\r\n pass", "def upgrage_level(self):\n print('level is upgraded on one point')\n self.level += 1", "def update_trace(Y, X, win, name):\n global vis\n vis.updateTrace(X, Y, win=win, name=name)", "def updateActionsAndMenus(self):\n self.app.actions.getAction(\"save_CAlpha\").setEnabled(self.loaded)\n self.app.actions.getAction(\"unload_CAlpha\").setEnabled(self.loaded)", "def update(self):", "def update(self):", "def update(self):", "def updateChannels(self):\n self.__redrawChannels()\n self.__update()", "def __init__(self, layer, parent):\r\n super(DialogLayerProperties, self).__init__(parent)\r\n self.layer = layer\r\n self.main = parent\r\n self.dialogTitle = 'LUMENS Layer Properties - ' + self.layer.name()\r\n self.layerSymbolFillColor = self.styleCategorizedColor = self.styleGraduatedColor = self.styleRuleBasedColor = self.labelColor = QtGui.QColor(0, 0, 0) # black\r\n \r\n if self.main.appSettings['debug']:\r\n print 'DEBUG: DialogLayerProperties init'\r\n self.logger = logging.getLogger(type(self).__name__)\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n ch = logging.StreamHandler()\r\n ch.setFormatter(formatter)\r\n fh = logging.FileHandler(os.path.join(self.main.appSettings['appDir'], 'logs', type(self).__name__ + '.log'))\r\n fh.setFormatter(formatter)\r\n self.logger.addHandler(ch)\r\n self.logger.addHandler(fh)\r\n self.logger.setLevel(logging.DEBUG)\r\n \r\n self.setupUi(self)\r\n \r\n self.loadLayerSettings()\r\n \r\n self.comboBoxStyleType.currentIndexChanged.connect(self.handlerChangeStyleType)\r\n self.handlerChangeStyleType(0)\r\n self.buttonAddStyleCategorized.clicked.connect(self.handlerAddStyleCategorized)\r\n self.buttonAddStyleGraduated.clicked.connect(self.handlerAddStyleGraduated)\r\n self.buttonAddStyleRuleBased.clicked.connect(self.handlerAddStyleRuleBased)\r\n self.buttonDeleteStyleCategorized.clicked.connect(self.handlerDeleteStyleCategorized)\r\n self.buttonDeleteStyleGraduated.clicked.connect(self.handlerDeleteStyleGraduated)\r\n self.buttonDeleteStyleRuleBased.clicked.connect(self.handlerDeleteStyleRuleBased)\r\n self.buttonDeleteAllStyleCategorized.clicked.connect(self.handlerDeleteAllStyleCategorized)\r\n self.buttonDeleteAllStyleGraduated.clicked.connect(self.handlerDeleteAllStyleGraduated)\r\n self.buttonDeleteAllStyleRuleBased.clicked.connect(self.handlerDeleteAllStyleRuleBased)\r\n self.sliderLayerTransparency.sliderMoved.connect(self.handlerSliderLayerTransparencyMoved)\r\n self.spinBoxLayerTransparency.valueChanged.connect(self.handlerSpinBoxLayerTransparencyValueChanged)\r\n self.buttonLayerSymbolFillColor.clicked.connect(self.handlerSelectFillColor)\r\n self.buttonStyleCategorizedFillColor.clicked.connect(self.handlerSelectFillColor)\r\n self.buttonStyleGraduatedFillColor.clicked.connect(self.handlerSelectFillColor)\r\n self.buttonStyleRuleBasedFillColor.clicked.connect(self.handlerSelectFillColor)\r\n self.buttonExpressionBuilderDialog.clicked.connect(self.handlerExpressionBuilderDialog)\r\n self.buttonLabelColor.clicked.connect(self.handlerSelectLabelColor)\r\n self.buttonBox.accepted.connect(self.accept)\r\n self.buttonBox.rejected.connect(self.reject)", "def update_drawings_from_structure_volume(self, name_u, side):\n\n volume, bbox = self.structure_volumes[name_u]\n xmin, xmax, ymin, ymax, zmin, zmax = bbox\n print 'volume', volume.shape, xmin, xmax, ymin, ymax, zmin, zmax\n\n volume_downsample_factor = self.gui.volume_downsample_factor\n # xmin_lossless, xmax_lossless, ymin_lossless, ymax_lossless, zmin_lossless, zmax_lossless = np.array(bbox) * downsample\n bbox_lossless = np.array(bbox) * volume_downsample_factor\n\n downsample = self.data_feeder.downsample\n\n if volume_downsample_factor <= downsample:\n\n volume_downsampled = volume[::downsample/volume_downsample_factor, ::downsample/volume_downsample_factor, ::downsample/volume_downsample_factor]\n xmin_ds, xmax_ds, ymin_ds, ymax_ds, zmin_ds, zmax_ds = np.array(bbox_lossless) / downsample\n\n print 'volume_downsampled', volume_downsampled.shape, xmin_ds, xmax_ds, ymin_ds, ymax_ds, zmin_ds, zmax_ds\n\n matched_confirmed_polygons = [(i, p) for i, polygons in self.drawings.iteritems()\n for p in polygons if p.label == name_u and p.type != 'interpolated' and p.side == side]\n\n # matched_unconfirmed_polygons = [(i, p) for i, polygons in self.drawings.iteritems() for p in polygons if p.label == name_u and p.type == 'interpolated']\n # for i, p in matched_unconfirmed_polygons:\n # if i == self.active_i:\n # self.removeItem(p)\n # self.drawings[i].remove(p)\n\n # if self.data_feeder.orientation == 'sagittal':\n if hasattr(self.data_feeder, 'sections'):\n assert self.data_feeder.orientation == 'sagittal'\n # sec_min = DataManager.convert_z_to_section(stack=self.data_feeder.stack, z=zmin, downsample=downsample)\n # matched_confirmed_sections = [self.data_feeder.sections[i] for i, p in matched_confirmed_polygons]\n matched_confirmed_sections = [self.data_feeder.sections[i] for i, p in matched_confirmed_polygons]\n\n if len(matched_confirmed_sections) > 0:\n min_sec = np.min(matched_confirmed_sections)\n max_sec = np.max(matched_confirmed_sections)\n else:\n min_sec = DataManager.convert_z_to_section(stack=self.data_feeder.stack, z=zmin, downsample=volume_downsample_factor)\n max_sec = DataManager.convert_z_to_section(stack=self.data_feeder.stack, z=zmax, downsample=volume_downsample_factor)\n\n for sec in range(min_sec, max_sec+1):\n\n # remove if this section has interpolated polygon\n # if sec not in self.data_feeder.sections:\n if sec not in self.data_feeder.sections:\n sys.stderr.write('Section %d is not loaded.\\n' % sec)\n continue\n\n # i = self.data_feeder.sections.index(sec)\n i = self.data_feeder.sections.index(sec)\n matched_unconfirmed_polygons_to_remove = [p for p in self.drawings[i] if p.label == name_u and p.type == 'interpolated' and p.side == side]\n for p in matched_unconfirmed_polygons_to_remove:\n self.drawings[i].remove(p)\n if i == self.active_i:\n self.removeItem(p)\n\n if sec in matched_confirmed_sections:\n continue\n\n\n z0, z1 = DataManager.convert_section_to_z(stack=self.data_feeder.stack, sec=sec, downsample=downsample)\n # z_currResol = int(np.round((z0 + z1)/2))\n z_currResol = .5 * z0 + .5 * z1\n z_volResol = int(np.round(z_currResol * downsample / volume_downsample_factor))\n # (int(np.ceil(z0)) + int(np.floor(z1))) / 2\n # z_volResol = z_currResol * downsample / volume_downsample_factor\n print sec, z0, z1, z_currResol, z_volResol, zmin\n # if downsample == 32:\n cnts_volResol = find_contour_points(volume[:, :, z_volResol - zmin].astype(np.uint8), sample_every=20)\n\n # print cnts_volResol\n\n if len(cnts_volResol) > 0 and 1 in cnts_volResol:\n # print x_ds\n xys_volResol = np.array(cnts_volResol[1][0])\n gscene_xs_volResol = xys_volResol[:,0] + xmin # the coordinate on gscene's x axis\n gscene_ys_volResol = xys_volResol[:,1] + ymin\n gscene_points_volResol = np.c_[gscene_xs_volResol, gscene_ys_volResol]\n gscene_points_currResol = gscene_points_volResol * volume_downsample_factor / downsample\n self.add_polygon_with_circles_and_label(path=vertices_to_path(gscene_points_currResol),\n label=name_u, linecolor='g', section=sec, type='interpolated',\n side=side,\n side_manually_assigned=False)\n else:\n # raise Exception('Sagittal interpolation on volume data is not implemented.')\n\n matched_confirmed_positions = [i for i, p in matched_confirmed_polygons]\n\n if self.data_feeder.orientation == 'sagittal':\n posmin_ds = zmin_ds\n posmax_ds = zmin_ds + volume_downsampled.shape[2] - 1\n elif self.data_feeder.orientation == 'coronal':\n posmin_ds = xmin_ds\n posmax_ds = xmin_ds + volume_downsampled.shape[1] - 1\n elif self.data_feeder.orientation == 'horizontal':\n posmin_ds = ymin_ds\n posmax_ds = ymin_ds + volume_downsampled.shape[0] - 1\n\n for pos_ds in range(posmin_ds, posmax_ds+1):\n\n # if pos_ds in matched_confirmed_positions:\n # continue\n\n # remove if this section has interpolated polygon\n matched_unconfirmed_polygons_to_remove = [p for p in self.drawings[pos_ds] if p.label == name_u and p.type == 'interpolated' and p.side == side]\n for p in matched_unconfirmed_polygons_to_remove:\n self.drawings[pos_ds].remove(p)\n if pos_ds == self.active_i:\n self.removeItem(p)\n\n if self.data_feeder.orientation == 'sagittal':\n raise Exception('Not implemented.')\n\n elif self.data_feeder.orientation == 'coronal':\n\n cnts = find_contour_points(volume_downsampled[:, pos_ds-posmin_ds, :].astype(np.uint8), sample_every=max(20/downsample, 10))\n if len(cnts) == 0 or 1 not in cnts:\n sys.stderr.write('%s: Contour not found with reconstructed volume.\\n' % self.id)\n continue\n # Contour for label 1 (which is the only label in the boolean volume)\n zys = np.array(cnts[1][0])\n gscene_xs = self.data_feeder.z_dim - 1 - (zys[:,0] + zmin_ds) # the coordinate on gscene's x axis\n gscene_ys = zys[:,1] + ymin_ds\n\n elif self.data_feeder.orientation == 'horizontal':\n\n cnts = find_contour_points(volume_downsampled[pos_ds-posmin_ds, :, :].astype(np.uint8), sample_every=max(20/downsample, 10))\n if len(cnts) == 0 or 1 not in cnts:\n sys.stderr.write('%s: Contour not found with reconstructed volume.\\n' % self.id)\n continue\n\n zxs = np.array(cnts[1][0])\n gscene_xs = zxs[:,1] + xmin_ds\n gscene_ys = self.data_feeder.z_dim - 1 - (zxs[:,0] + zmin_ds) # the coordinate on gscene's x axis\n\n pts_on_gscene = np.c_[gscene_xs, gscene_ys]\n self.add_polygon_with_circles_and_label(path=vertices_to_path(pts_on_gscene), label=name_u,\n linecolor='g', vertex_radius=1, linewidth=2, index=pos_ds,\n type='interpolated',\n side=side,\n side_manually_assigned=False)\n\n\n # elif self.data_feeder.orientation == 'coronal':\n # # x = self.active_i * self.data_feeder.downsample\n #\n # matched_confirmed_positions = [i for i, p in matched_confirmed_polygons]\n #\n # # for x_ds in range(xmin_ds, xmax_ds + 1):\n # for x_ds in range(xmin_ds, xmin_ds + volume_downsampled.shape[1]):\n #\n # if x_ds in matched_confirmed_positions:\n # continue\n #\n # # remove if this section has interpolated polygon\n # matched_unconfirmed_polygons_to_remove = [p for p in self.drawings[x_ds] if p.label == name_u and p.type == 'interpolated']\n # for p in matched_unconfirmed_polygons_to_remove:\n # self.drawings[x_ds].remove(p)\n # if x_ds == self.active_i:\n # self.removeItem(p)\n #\n # # print x_ds\n # cnts = find_contour_points(volume_downsampled[:, x_ds-xmin_ds, :].astype(np.uint8), sample_every=max(20/downsample, 10))\n # if len(cnts) > 0 and 1 in cnts: # Contour for label 1 (which is the only label in the boolean volume)\n # # print x_ds\n # zys = np.array(cnts[1][0])\n # gscene_xs = self.data_feeder.z_dim - 1 - (zys[:,0] + zmin_ds) # the coordinate on gscene's x axis\n # gscene_ys = zys[:,1] + ymin_ds\n # pts_on_gscene = np.c_[gscene_xs, gscene_ys]\n # # print pts_on_gscene\n # self.add_polygon_with_circles_and_label(path=vertices_to_path(pts_on_gscene), label=name_u,\n # linecolor='g', vertex_radius=1, linewidth=2, index=x_ds,\n # type='interpolated')\n #\n # elif self.data_feeder.orientation == 'horizontal':\n #\n # matched_confirmed_positions = [i for i, p in matched_confirmed_polygons]\n #\n # for y_ds in range(ymin_ds, ymin_ds + volume_downsampled.shape[0]):\n #\n # if x_ds in matched_confirmed_positions:\n # continue\n #\n # # remove if this section has interpolated polygon\n # matched_unconfirmed_polygons_to_remove = [p for p in self.drawings[y_ds] if p.label == name_u and p.type == 'interpolated']\n # for p in matched_unconfirmed_polygons_to_remove:\n # self.drawings[y_ds].remove(p)\n # if y_ds == self.active_i:\n # self.removeItem(p)\n #\n # cnts = find_contour_points(volume_downsampled[y_ds-ymin_ds, :, :].astype(np.uint8), sample_every=max(20/downsample, 10))\n # if len(cnts) > 0 and 1 in cnts:\n # # print y_ds\n # zxs = np.array(cnts[1][0])\n # gscene_xs = zxs[:,1] + xmin_ds\n # gscene_ys = self.data_feeder.z_dim - 1 - (zxs[:,0] + zmin_ds) # the coordinate on gscene's x axis\n # pts_on_gscene = np.c_[gscene_xs, gscene_ys]\n # self.add_polygon_with_circles_and_label(path=vertices_to_path(pts_on_gscene), label=name_u,\n # linecolor='g', vertex_radius=1, linewidth=2, index=y_ds,\n # type='interpolated')\n\n # elif self.data_feeder.orientation == 'sagittal':\n # z = self.active_i\n # yxs = find_contour_points(volume[:,:,z-zmin])\n # self.add_polygon_with_circles_and_label(path=vertices_to_path(yxs[:, ::-1]+(xmin, ymin)), label=name_u, index=z)", "def update(self, dt):\n self.level.update(self.keys, dt)", "def handle_layers(context, model, toplayer, layerids, materials, update, import_hidden=False):\n #setup main container to hold all layer collections\n layer_col_id=\"Layers\"\n if not layer_col_id in context.blend_data.collections:\n layer_col = context.blend_data.collections.new(name=layer_col_id)\n try:\n toplayer.children.link(layer_col)\n except Exception:\n pass\n else:\n #If \"Layers\" collection is in place, we assume the plugin had imported 3dm before\n layer_col = context.blend_data.collections[layer_col_id]\n\n # build lookup table for LayerTable index\n # from GUID, create collection for each\n # layer\n for lid, l in enumerate(model.Layers):\n if not l.Visible and not import_hidden:\n continue\n lcol = utils.get_iddata(context.blend_data.collections, l.Id, l.Name, None)\n layerids[str(l.Id)] = (lid, lcol)\n utils.tag_data(layerids[str(l.Id)][1], l.Id, l.Name)\n '''\n matname = l.Name + \"+\" + str(l.Id)\n if matname not in materials:\n laymat = utils.get_iddata(context.blend_data.materials, l.Id, l.Name, None)\n if update:\n\t laymat.use_nodes = True\n\t r, g, b, _ = l.Color\n\t principled = PrincipledBSDFWrapper(laymat, is_readonly=False)\n\t principled.base_color = (r/255.0, g/255.0, b/255.0)\n materials[matname] = laymat\n '''\n # second pass so we can link layers to each other\n for l in model.Layers:\n # link up layers to their parent layers\n if str(l.ParentLayerId) in layerids:\n parentlayer = layerids[str(l.ParentLayerId)][1]\n try:\n parentlayer.children.link(layerids[str(l.Id)][1])\n except Exception:\n pass\n # or to the top collection if no parent layer was found\n else:\n try:\n layer_col.children.link(layerids[str(l.Id)][1])\n except Exception:\n pass", "def updateVisualization(self):\n\t\tif self.visualization:\n\t\t\tif self.fixedVisualization:\n\t\t\t\tself.visualization.setFixedVisualization(self.fixedVisualization)\n\t\t\tif self.movingVisualization:\n\t\t\t\tself.visualization.setMovingVisualization(self.movingVisualization)\n\t\tself.multiRenderWidget.setVolumeVisualization(self.visualization)\n\t\tself.visualizationUpdated.emit(self.visualization)", "def setMyStatus(self):\n self.clearMyStatus()\n self.mass = self.myShipHull.mass\n for position, myQuad in self.quads.iteritems():\n self.maxBattery += myQuad.maxBattery\n self.currentPower += myQuad.maxPower\n self.thrust += myQuad.thrust\n self.rotation += myQuad.rotation\n self.radar += myQuad.radar\n self.jamming += myQuad.jamming\n self.repair += myQuad.repair\n self.mass += myQuad.mass\n self.maxAssault += myQuad.maxAssault\n\n # scale back attributes if internal structure has been hit\n ratio = self.currentISP/self.myShipHull.maxISP\n self.currentPower = self.currentPower * ratio\n self.thrust = self.thrust * ratio\n self.rotation = self.rotation * ratio\n\n self.accel = self.myDesign.getAccel(self.thrust, self.mass)\n self.accel = self.accel\n\n self.rotation = self.myDesign.getRotation(self.rotation, self.mass)\n self.rotation = self.rotation\n self.setMyStrength()\n self.setWeaponStatus()\n self.setRange()\n self.setAssaultStrength(ratio)", "def upgrade(self):\n if self.level < len(self.tower_images):\n self.level_up_animation = True\n self.level += 1\n self.base_damage += 3\n self.damage = self.base_damage\n\n #Since level does not upgrade in menu we have to manually do it here\n self.menu.tower_level += 1", "def update(self):\r\n if self.opportunity or 'key' in inventory:\r\n self.image = pygame.transform.scale(pygame.image.load_extended(\"images/greenPortal.png\").convert_alpha(),\r\n (50, 75))\r\n self.image.set_colorkey((255, 255, 255))\r\n elif not self.opportunity:\r\n self.image = pygame.transform.scale(pygame.image.load_extended(\"images/redPortal.png\").convert_alpha(),\r\n (50, 75))\r\n self.image.set_colorkey((255, 255, 255))", "async def update(self):\n \n logging.info('updating state...')\n info = await self.send_command(\"$dat\", \"upd01-\")\n if not info:\n msg = \"Unable to get data about windows and scenes from Gateway\"\n return msg\n\n logging.debug('hub response is :')\n logging.debug(info)\n prefix = None\n lines = re.split(r'[\\n\\r]+', info)\n\n for line in lines:\n line = line.strip()\n if not prefix:\n prefix = line[:2]\n elif not line.startswith(prefix):\n continue\n else:\n line = line[2:]\n\n if line.startswith(\"$cr\"):\n # name of room\n room_id = line[3:5]\n room_name = line.split('-')[-1].strip()\n if(not room_name in self.rooms):\n logging.debug('creating room '+room_name)\n self.rooms[room_name] = HunterDouglasPlatinumRoom(hub=self, name=room_name, id=int(room_id))\n elif line.startswith(\"$cm\"):\n # name of scene\n scene_id = line[3:5]\n scene_name = line.split('-')[-1].strip()\n if(not scene_name in self.scenes):\n logging.debug('creating scene '+scene_name)\n self.scenes[scene_name] = HunterDouglasPlatinumScene(hub=self, name=scene_name, id=int(scene_id))\n elif line.startswith(\"$cs\"):\n # name of a shade\n parts = line.split('-')\n shade_id = line[3:5]\n shade_name = parts[-1].strip()\n room_id = parts[1]\n if(not shade_name in self.shades):\n logging.debug('creating shade '+shade_name)\n self.shades[shade_name] = HunterDouglasPlatinumShade(hub=self, name=shade_name, id=int(shade_id), room=int(room_id))\n elif line.startswith(\"$cp\"):\n # state of a shade\n shade_id = line[3:5]\n state = line[-4:-1]\n state = int(state)\n shade = self.get_shade(id=int(shade_id))\n logging.debug('updating shade state for shade '+shade_id+' to '+str(state)+' for shade '+str(shade))\n if shade:\n shade.set_state(state)\n return \"\"", "def update_stats(self):\n self.scoreText = pygame.font.Font(FONT, 20)\n\n #update score\n textsurface = self.scoreText.render((\"Score: \"+str(self.current_score)), False, BLUE)\n self.screen.blit(textsurface,(5,5))\n\n #update high score\n if self.highest_score <= self.current_score:\n self.highest_score = self.current_score\n #To write highest score to file\n filename = \"highscore.txt\"\n file = open(filename,\"w\")\n file.write(str(self.highest_score))\n file.close()\n\n #Display High Score\n textsurface = self.scoreText.render((\"Highest Score: \"+str(self.highest_score)), False, BLUE)\n self.screen.blit(textsurface,(230,5))\n\n #Display Life Text\n textsurface = self.scoreText.render(\"Lives: \", False, BLUE)\n self.screen.blit(textsurface,(570,5))\n\n #Shows lifes left\n for i in range(self.lives):\n self.live = pygame.image.load(\"./images/ship.png\").convert_alpha()\n self.live = pygame.transform.scale(self.live , (20, 20))\n self.screen.blit(self.live, (670+(i*25), 7))\n\n #Mute Button\n button=pygame.image.load(\"./images/mutebutton.png\")\n button=pygame.transform.scale(button,(30,30))\n self.screen.blit(button, (750,5))", "def ship_updates(ai, var, screen, ship, charges, shields, hub):\r\n\tship.update(ai)\r\n\tship.draw_ship()\r\n\tcharge_shield_graphics(ai, var, screen, ship, charges, shields, hub)", "def __call__(self, level):\r\n import time\r\n\r\n currentVolume = self.__findCurrentVolumeLevel()[0]\r\n\r\n assert isinstance(level, int), \"Given volume level is not integer (instead %s)\" % type(level)\r\n if not (level <= self.currentMaximumVolume and level >= 0):\r\n self.phone.fail(\"adjustVolume: given level is not valid. Valid ones for this specific volume bar are 0 - %s)\" % self.currentMaximumVolume)\r\n\r\n self.phone.comment(\"adjustVolume(%s)\" % level)\r\n\r\n if level < currentVolume:\r\n while level < currentVolume:\r\n self.phone.delay(200, False)\r\n self.phone.select('KBD_KEY_VOL_DOWN')\r\n currentVolume -= 1\r\n\r\n elif level > currentVolume:\r\n while level > currentVolume:\r\n self.phone.delay(200, False)\r\n self.phone.select('KBD_KEY_VOL_UP')\r\n currentVolume += 1\r\n\r\n else: # volume level is now ok, pass\r\n pass\r\n\r\n return True\r\n\r\n ## TODO: current volume level should be asked from yapas\r\n\r\n #doCheck = False\r\n\r\n #if doCheck:\r\n #\r\n # currentVolume = self.getCurrentVolumeLevel()\r\n # assert currentVolume == level, \"Adjusted volume, but the volume level is %s when it should be %s\" % (currentVolume, level)\r\n\r\n # debug.brf(\"Selected volume level %s and verified from UI\" % level)\r\n # return True\r\n #else:\r\n # debug.brf(\"Selected volume level %s\" % level)\r\n # return True\r", "def drawIsoSurfaces(self):\r\n # research\r\n profprint()\r\n\r\n slicer.modules.NeedleFinderWidget.hideContourButton.setEnabled(1)\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n\r\n v = vtk.vtkAppendPolyData()\r\n canContinue = 0\r\n for modelNode in modelNodes.values():\r\n print \"for\"\r\n if modelNode.GetAttribute(\"nth\") != None and modelNode.GetDisplayVisibility() == 1 :\r\n canContinue = 1\r\n v.AddInputData(modelNode.GetPolyData())\r\n\r\n if canContinue == 1:\r\n modeller = vtk.vtkImplicitModeller()\r\n modeller.SetInputConnection(v.GetOutputPort())\r\n modeller.SetSampleDimensions(60, 60, 60)\r\n modeller.SetCapping(0)\r\n modeller.AdjustBoundsOn()\r\n modeller.SetProcessModeToPerVoxel()\r\n modeller.SetAdjustDistance(1)\r\n modeller.SetMaximumDistance(1.0)\r\n modeller.Update()\r\n\r\n contourFilter = vtk.vtkContourFilter()\r\n contourFilter.SetNumberOfContours(1)\r\n contourFilter.SetInputConnection(modeller.GetOutputPort())\r\n contourFilter.ComputeNormalsOn()\r\n contourFilter.ComputeScalarsOn()\r\n contourFilter.UseScalarTreeOn()\r\n contourFilter.SetValue(1, 10)\r\n # contourFilter.SetValue(2,13)\r\n # contourFilter.SetValue(3,15)\r\n # contourFilter.SetValue(4,20)\r\n # contourFilter.SetValue(5,25)\r\n contourFilter.Update()\r\n isoSurface = contourFilter.GetOutputDataObject(0)\r\n\r\n self.AddContour(isoSurface)", "def update_visuals(self):\n\n result, data = self.dev.grab_pipe()\n if not result:\n log.critical(\"Problem grabbing pipe\")\n\n if self.live_updates == True:\n self.update_graph(data)\n self.curve_render += 1\n self.update_image(data)\n self.check_image(self.curve_render)\n\n self.update_fps()\n self.data_timer.start(0)", "def on_update(self):\n \n # update physics engine\n \n \n # use code from pick up coins lab to pick up coins\n # you don't need all of the code from that lab(no gameover or reset)", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def __facilityChanged(self):\n self.removeAllItems()\n self._update()", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def drawIsoSurfaces0( self ):\n #research\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n v= vtk.vtkAppendPolyData()\n \n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"nth\")!=None and modelNode.GetDisplayVisibility()==1 :\n v.AddInput(modelNode.GetPolyData())\n \n modeller = vtk.vtkImplicitModeller()\n modeller.SetInput(v.GetOutput())\n modeller.SetSampleDimensions(self.dim.value,self.dim.value,self.dim.value)\n modeller.SetCapping(0)\n modeller.SetAdjustBounds(self.abonds.value)\n modeller.SetProcessModeToPerVoxel() \n modeller.SetAdjustDistance(self.adist.value/100)\n modeller.SetMaximumDistance(self.maxdist.value/100) \n \n contourFilter = vtk.vtkContourFilter()\n contourFilter.SetNumberOfContours(self.nb.value)\n contourFilter.SetInputConnection(modeller.GetOutputPort()) \n contourFilter.ComputeNormalsOn()\n contourFilter.ComputeScalarsOn()\n contourFilter.UseScalarTreeOn()\n contourFilter.SetValue(self.contour.value,self.contourValue.value)\n contourFilter.SetValue(self.contour2.value,self.contourValue2.value)\n contourFilter.SetValue(self.contour3.value,self.contourValue3.value)\n contourFilter.SetValue(self.contour4.value,self.contourValue4.value)\n contourFilter.SetValue(self.contour5.value,self.contourValue5.value)\n\n isoSurface = contourFilter.GetOutput()\n self.AddContour(isoSurface)", "def update_fov(self) -> None:\n self.game_map.visible[:] = compute_fov(\n self.game_map.tiles[\"transparent\"],\n (self.player.x, self.player.y),\n radius=8,\n )\n # If a tile is \"visible\" it should be added to \"explored\".\n self.game_map.explored |= self.game_map.visible", "def update(self):\n self.data.update()\n for sensor in self.data.daikinskyport.get_sensors(self._index):\n if sensor[\"type\"] == self._type and self._sensor_name == sensor[\"name\"]:\n self._state = sensor[\"value\"]" ]
[ "0.7572625", "0.6483485", "0.62844723", "0.6087187", "0.60834914", "0.5838356", "0.58052474", "0.57790345", "0.57654625", "0.56409895", "0.5590038", "0.5531311", "0.552662", "0.55226326", "0.54919386", "0.5402099", "0.5401069", "0.53722477", "0.5337533", "0.5290507", "0.5276824", "0.5265918", "0.5265438", "0.52547926", "0.5244208", "0.5228894", "0.51997095", "0.5189326", "0.5178624", "0.5173066", "0.5173066", "0.5142397", "0.5136357", "0.5117855", "0.51167774", "0.51145333", "0.5100473", "0.50986207", "0.50922567", "0.5082943", "0.5078174", "0.50779516", "0.5077076", "0.5053719", "0.50515383", "0.5046248", "0.50451946", "0.5035389", "0.5035262", "0.5033131", "0.5011899", "0.50045496", "0.50023305", "0.49993572", "0.49905476", "0.49751237", "0.4974797", "0.49570826", "0.49555832", "0.4954962", "0.49464852", "0.49361464", "0.49326402", "0.4928378", "0.49254066", "0.49190786", "0.49169025", "0.49168983", "0.49164486", "0.49096414", "0.49034864", "0.49030882", "0.49013638", "0.48998985", "0.48969102", "0.4896891", "0.4896891", "0.4896891", "0.48884445", "0.48841408", "0.48821467", "0.48805138", "0.487956", "0.48746222", "0.4873073", "0.4867952", "0.4867304", "0.48658082", "0.48618448", "0.48574468", "0.48557812", "0.4855365", "0.4851869", "0.4844482", "0.48418114", "0.4839604", "0.48389167", "0.48363143", "0.48294938", "0.48276573" ]
0.6803121
1
Returns the list of items currently present in this item.
def getItems(self): return self.getCutPlanes() + self.getIsosurfaces()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_items(self):\n return self.item_list", "def get_items(self) -> list:\r\n return self._items", "def items(self) -> List[Item]:\n return self._items", "def get_all_items(self):\n return self.api.state['items']", "def items(self):\n return [x.item for x in self]", "def get_items(self):\n return self.item_ids", "def get_items(self):\n return self.items", "def available_items(self):\n return [item for item in self.all_items.values() if self.is_available(item)]", "def items(self):\n return self._items", "def getAllItemsList():\n return Gw2Spidy._request('all-items', 'all')['results']", "def get_items(self):\n return (item for item in self.items)", "def get_items(self):\r\n item_list = []\r\n for item in self._inventory:\r\n item_list.append(item._name)\r\n return item_list", "def get_items(self):\n return []", "def get_items(self) -> list:\n if self._cached_items is None:\n self._cached_items = list(self.items.all())\n return self._cached_items", "def items(self):\n return list(self.items_generator())", "def get_item_list(cls):\n if Item.__item_list is None:\n Item.__item_list = []\n return Item.__item_list", "def get_items(self):\r\n return self.items()", "def get_all(self):\n return self.__items", "def allItems(self):\n items = []\n for itemType in self.__inventory__:\n for item in self.__inventory__[itemType]:\n items.append(item)\n return items", "def get_items(self):\n return self.order_items", "def items(self):\n self._remove_expired()\n\n return self._d.items()", "def items(self):\n return self._get_storage().items()", "def get(self):\n return self._items", "def get(self):\n return self._items", "def get_all(self):\n\n return self._items[:]", "def items(self) -> [Room]:\n return self.__items", "def items(self):\n return self._as_dict().items()", "def items(self):\n return self._d.items()", "def get_items(self):\n return [item for item in self.items if item.quantity > 0]", "def items(self):\n return self.d.items()", "def items(self):\n return self.d.items()", "def items(self):\r\n return self._as_dict().items()", "def items(self) -> List[RadioStation]:\n return self._items", "def items(self):\r\n return self.elements.values()", "def all(self):\n return self.client.request_with_method(Methods.LIST % self.name)['items']", "def list(self):\n return self._get_list()", "def get_list_items(self):\n list_items = self.driver.find_element(*list_page_locators.LIST_EL)\n return list_items.find_elements(*list_page_locators.LIST_ITEM_EL)", "def getItems(self):\n return self._nsObject.itemTitles()", "def items(self):\n return iteritems(self.__inflated)", "def all_items(self) -> ItemGroup:\n return self.items + self.end_items", "def items(self):\n return self.__items(())", "def items(self):\n return self._ctx.items()", "def items(self):\n return self.root.items()", "def items(self):\n current = self.first\n output = []\n\n while current is not None:\n output.append(current.item)\n current = current.next_node\n\n return output", "def FilterItems(self):\r\n\t\treturn self._get_attribute('filterItems')", "def get_display_items(self):\r\n items = []\r\n for child in self.get_children():\r\n items.extend(child.displayable_items())\r\n\r\n return items", "def getItemList(self):\r\n raise AbstractError\r\n return []", "def get_player_items(self):\n return self.player.items", "def displayable_items(self):\r\n return [self]", "def get_list(self):\n obj_list = []\n for group in self.root_item.child_items:\n for item in group.child_items:\n obj_list.append(item.obj)\n\n return obj_list", "def get_children(self):\n return self.items", "def get_playlist_items(self):\n results = self.API.playlist(self.playlist_uri)\n return results[\"tracks\"][\"items\"]", "def items(self):\n return self._tagged.items()", "def fetch_items(self):\n if self.items is None:\n rows = self.sqldb.execute(TodoManager.select_sql).fetchall()\n self.items = [TodoItem._make(row) for row in rows]\n return self.items", "def items(self) -> List[InlineResponse200Items]:\n return self._items", "def items(self):\n items = []\n for item in self.contents:\n items.append((item, self.contents[item]))\n return items", "def get_menu_items(self) -> List[str]:\n return sorted(self._items()) # return a copy", "def items(self) -> List:\n pass", "def Items(self) -> list:\n return list(self.get(0, Tags.End.value))", "def get_all(self,empty=True):\n with self.lock:\n items = self.items\n if empty: self.items = []\n return items", "def get_items():\n return requester.perform_request(Uri.items)", "def items(self):\n items = []\n current = self.head\n while current != None:\n items.append(current.data)\n current = current.next\n return items", "def getAll(self):\n return self.__lst", "def items(self) -> Tuple[Item]:\n return tuple(self.__items)", "def list(self):\n return [self.inUse, self.type, self.previousBlock, self.amount,\n self.blocks, self.nextBlock, self.items]", "def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KeyToPathArgs']]]]:\n return pulumi.get(self, \"items\")", "def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['KeyToPathArgs']]]]:\n return pulumi.get(self, \"items\")", "def show(self):\n return self.items", "def items_in_order(self):\n items = []\n if not self.is_empty():\n # Traverse tree in-order from root, appending each node's item\n self._traverse_in_order_recursive(self.root, items.append)\n # self._traverse_in_order_iterative(self.root, items.append)\n # Return in-order list of all items in tree\n return items", "def getList(self):\n return self.list", "def getItemNames(self):\n\t\treturn self.items.keys()", "def getList(self):\n return self.list_", "def getitems(self):\n if self.onlydiag():\n return self.getdiag()\n else:\n return self.items()", "def __call__(self):\n return self.get_items()", "def allItemsByType(self, itemType):\n if itemType.value not in self.__inventory__:\n return []\n return self.__inventory__[itemType.value]", "def get_items_to_index(self):\n\t\treturn []", "def items(self):\n return self.docs.items()", "def get_list(self):\n return self.__repository.get_all()", "def all_items(self):\n return range(self.n_items)", "def getList(self):\n return self.position.exportToList()", "def items(self):\n return DiscoDBItemInquiry(lambda: ((k, self[k]) for k in self))", "def items(self):\n out = []\n for y,x in self.coords(False):\n out.append(self.retrieve(y, x))\n return out", "def get_orderItems(self):\n \n out_list = []\n for o in self.order_lst:\n out_list += o.get_items()\n \n return out_list", "def get_inventory(self):\n from noc.inv.models.object import Object\n\n return list(Object.objects.filter(data__management__managed_object=self.id))", "def list_items(self) -> List[Dict[str, Any]]:\n return [c.to_dict() for c in self._objects.values()]", "def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardVariableItemArgs']]]]:\n return pulumi.get(self, \"items\")", "def inventory(self):\n data = self.client.inventory(self.creds, self.transaction, self.environment)\n return list(data) if isinstance(data, set) else data", "def items(self):\n with self.__plock:\n return map(lambda key: (key, self[key]), self._keys)", "def items(self):\n # Collect all pairs of key-value entries in each bucket\n all_items = []\n for bucket in self.buckets:\n all_items.extend(bucket.items())\n return all_items", "def getItems(self): \n items = []\n if self.itemCount > 0:\n \n site = getSite()\n \n \n # Make string path relative to the site root\n # E.g. string path \"news\" becomes \"/yoursiteid/news\"\n site_path = site.getPhysicalPath();\n \n path = \"/\".join(site_path) + \"/\" + self.path \n \n #if self.itemPortalType2 != None:\n # types.append(self.itemPortalType2) \n \n #print \"Querying by:\" + type + \" \" + path\n content_by_type = self.context.portal_catalog(path={ \"query\": path, \"depth\" :9 }, \n sort_on=\"created\", \n sort_order=\"reverse\")[0:self.itemCount]\n\n \n items += [ brain.getObject() for brain in content_by_type ]\n\n return items", "def get_items(self):\n\n if not self.ITEM_MODEL:\n raise NotImplementedError(f\"ITEM_MODEL attribute not defined for {__class__}\")\n\n ids = []\n\n # Construct a list of possible query parameter value options\n # e.g. if self.ITEM_KEY = 'order' -> ['order', 'order[]', 'orders', 'orders[]']\n for k in [self.ITEM_KEY + x for x in ['', '[]', 's', 's[]']]:\n if ids := self.request.query_params.getlist(k, []):\n # Return the first list of matches\n break\n\n # Next we must validated each provided object ID\n valid_ids = []\n\n for id in ids:\n try:\n valid_ids.append(int(id))\n except ValueError:\n pass\n\n # Filter queryset by matching ID values\n return self.ITEM_MODEL.objects.filter(pk__in=valid_ids)", "def get_ids(self):\n return [item.id for item in self.items]", "def getList(self):\n\treturn self.list", "def get_all_items(unit) -> list:\n items = []\n for item in unit.items:\n if item.multi_item:\n for subitem in item.subitems:\n items.append(subitem)\n else:\n items.append(item)\n return items", "def getList(self):\n return self.sorted_list.getList()", "def __getitem__(self, item):\n return self.getList()", "def get_out(self):\r\n\t\tlogger.debug(\"Getting items checked out.\")\r\n\t\t\r\n\t\treturn db.items_out()", "def getList(self):\r\n node = self.root\r\n list = []\r\n return self.inOrderTraverse(node, list)", "def list(self):\n return self._list(self._path())", "def return_items(self):\n cur = self.cursor\n cur.execute(f\"SELECT * FROM {self.product_name}\")\n products = cur.fetchall()\n return products", "def list_items(self):\n click.echo(\"ID --|-- Item Title\")\n for index, item in enumerate(self.items):\n click.echo(\" {} --|-- {}\".format(index, item.title))" ]
[ "0.8302771", "0.82145596", "0.80660933", "0.7957701", "0.79011357", "0.7892576", "0.7880532", "0.7875717", "0.7802759", "0.7696309", "0.7644806", "0.762694", "0.75896615", "0.7539122", "0.735725", "0.7353106", "0.7332338", "0.732954", "0.7262226", "0.7146134", "0.7119999", "0.7106275", "0.71061665", "0.71061665", "0.71057206", "0.7063837", "0.700506", "0.6960505", "0.6960424", "0.69378185", "0.69378185", "0.69327366", "0.6923031", "0.68754315", "0.6871073", "0.6860397", "0.67960924", "0.6791937", "0.6784801", "0.6751456", "0.6750904", "0.6722622", "0.6707575", "0.6702648", "0.6676585", "0.6674565", "0.6673461", "0.66607493", "0.66190666", "0.6614566", "0.6603822", "0.66034013", "0.6602325", "0.6598608", "0.6597923", "0.65658575", "0.6565293", "0.65565675", "0.655246", "0.6551729", "0.65311444", "0.65264696", "0.6519053", "0.64922416", "0.64843535", "0.64783263", "0.64783263", "0.6466235", "0.6461723", "0.6457533", "0.64483887", "0.63884366", "0.6382414", "0.63671833", "0.636301", "0.63452303", "0.6344397", "0.6333941", "0.63290817", "0.63281447", "0.6319554", "0.6305699", "0.6303142", "0.62836105", "0.62738997", "0.62733346", "0.6266473", "0.626462", "0.62621593", "0.6260555", "0.6238665", "0.62247455", "0.62137884", "0.6211374", "0.6207654", "0.61964107", "0.61833715", "0.617442", "0.61705506", "0.61667275", "0.6157518" ]
0.0
-1
Synchronize this instance data with that of its parent
def _syncDataWithParent(self): parent = self.parent() if parent is None: data, range_ = None, None else: mode = self.getComplexMode() data = parent.getData(mode=mode, copy=False) range_ = parent.getDataRange(mode=mode) self._updateData(data, range_)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(\n mode=parent.getComplexMode(), copy=False)\n\n if parent is None or self.getComplexMode() == self.ComplexMode.NONE:\n self._setColormappedData(None, copy=False)\n else:\n self._setColormappedData(\n parent.getData(mode=self.getComplexMode(), copy=False),\n copy=False)\n\n self._updateScenePrimitive()", "def sync(self):\n pass", "def sync(self):\n pass", "def sync(self):\n return", "def sync(self, other):\n pass # TODO", "def do_sync(self):\n raise NotImplementedError() # pragma: no cover", "def sync_local(self, other):\n pass # TODO", "def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)", "def sync(self, **kwargs):\n pass", "def update_original_data(self):\n pass", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def sync(self):\n return self._sync", "def update(self, parent):\r\n pass", "def _post_sync(self):", "def _reload(self):\n if self._ancestorModelSourceCreated:\n self._parent._reload()\n else:\n # beware this breaks parent/child links such as current selection / hierarchical groups\n dictSave = self.serialize()\n tmpRegion = self._createBlankCopy()\n tmpRegion.deserialize(dictSave)\n self._assign(tmpRegion)\n self._informRegionChange(True)", "def lock(self):\n raise NotImplementedError", "def sync() -> None:", "def sync(self):\n if not self.is_readonly():\n deser = self._deserialize()\n orig = getattr(self.model, self.name)\n if (orig != deser):\n if isinstance(orig, list):\n # first remove the original triples, instead of doing sophisticated\n # set manipulations\n setattr(self.model, self.name, [])\n setattr(self.model, self.name, deser)", "def SyncRoot(self) -> object:", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n\r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def sync(self, sync):\n self._sync = sync", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops caused by a constantly changing state value at each run.\n # Example: state.value += 1\n\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def _pre_sync(self):", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n \r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None)", "def sync(self):\n\n if self._inchild:\n os.read(self._pr_child, len(self.RELEASE_MSG))\n else:\n os.read(self._pr_parent, len(self.RELEASE_MSG))", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def freeze(self,):\n pass", "def sync(self):\n # TODO: write better documentation: when would user need this?\n wait(self.proto.sync())", "def sync(self, sync):\n\n self._sync = sync", "def after_sync(self):\n self.title = self.c[\"title\"]\n self.body = self.c[\"body\"]\n self.state = self.c[\"state\"]\n self.base = self.c[\"base\"][\"ref\"]\n self.head = self.c[\"head\"][\"ref\"]\n self.maintainer_can_modify = self.c[\"maintainer_can_modify\"]", "def sync(self) -> None:\n for parameter in self.data_to_sync:\n assert hasattr(self, parameter), \\\n \"Parameter: %s does not exist in: %s\" % (parameter, self)\n self.publish(self.key_gen(parameter), getattr(self, parameter))", "def sync(self) -> None: #\n self.__target.load_state_dict(self.__policy.state_dict())", "def sync(self):\n resp = yield self.do_sync()\n self.c = resp.data\n self.after_sync()\n raise gen.Return(self)", "def sync_tree_cache(self) -> None:\n self.sync_tree_with_data(self.tree_cache, self.data_cache)", "def __post_init__(self):\n # ------------------------------------------------------------ 01\n # if path exists load data dict from it\n # that is sync with contents on disk\n if self.path.exists():\n _hashable_dict_from_disk = \\\n m.FrozenDict.from_yaml(self.path.read_text())\n # update internal dict from HashableDict loaded from disk\n self.__dict__.update(\n _hashable_dict_from_disk.get()\n )\n\n # ------------------------------------------------------------ 02\n # start syncing i.e. any updates via __setattr__ will be synced\n # to disc\n self.internal.start_syncing = True", "def _notify_parent_change(self):\n pass", "def syncContents(self):\n self._contents.setState_TRY(self.temperature(),\n self.density(),\n self.massFractions())", "def freeze(self):\n raise NotImplementedError()", "def i_am_locking(self):\r\n pass", "def update(self):\n\n pass", "def update(self):\n return self", "def update(self):\n raise NotImplementedError", "def _update(self):\n pass", "def update(self):\r\n pass", "def on_parent_changed(self):\n pass", "def update(self):\n if self._data_provider_state is not None:\n self._state = self._data_provider_state()\n \n if self._data_provider_attributes is not None:\n self._attributes = self._data_provider_attributes()", "def sync_remote(self, other):\n pass # TODO", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n raise NotImplementedError()", "def lock (self):\n self.locked = True\n self._changed = False", "def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")", "def sync_after_remote_computation(self):\n\n # If this state was never initialized, it doesn't have any out-of-date\n # information, so there's no need to update anything.\n if not self.is_initialized:\n return\n\n assert self.should_persist\n\n # First, let's flush the stored entries in our cache accessor. Since we just\n # computed this entry in a subprocess, there should be a new cache entry that\n # isn't reflected yet in our local accessor.\n # (We don't just call self.refresh_cache_accessors() because we don't\n # particularly want to do the cache versioning check -- it's a little late to\n # do anything if it fails now.)\n self._cache_accessor.flush_stored_entries()\n\n # Then, populate the value hashes.\n if self._result_value_hash is None:\n self._load_value_hash()", "def __getstate__(self) -> Dict[str, Any]:\n s = self.__dict__.copy()\n # Kill the parent ref. It won't pickle well.\n s[\"_parent\"] = None\n return s", "def update(self):\n with managed_session() as session:\n session.merge(self)", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def sync_tree_db(self) -> None:\n self.sync_tree_with_data(self.tree_db, self.data_db)", "def update(self):\n # default implementation is to do nothing.", "def sync_widgets(self):\n self.data_changed.emit(self.value)", "def sync_to_ontology(self):\n self.ontology.sync_entity_to_graph(self)", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def copy(self):\n return super().copy()", "def lock(self):\n self.mtx.acquire()", "def __init__(self):\n self._data_queue = []\n self._access_queue_lock = Lock()", "def build(self):\n self.lock_built = True", "def after_sync(self):\n pass", "def update_data():\n pass", "def __init__(self):\n self.data = {}\n self.refresh()", "def __enter__(self):\n\n self.create()\n return super().__enter__()", "def reparent(self, obj, parent):\n return self.update(obj, parent=parent)", "async def async_update(self) -> None:\n await super().async_update()\n await self.async_get_state()", "def restore_object(self):\n self.co_worker_list = self.original_co_worker_list", "def _update_object(self, data_dict):\r\n pass", "def update(self, other):\n b = self.hallucinate_merge(other)\n self.l_child = b.l_child\n self.r_child = b.r_child", "def sync_info(self, sync_info):\n\n self._sync_info = sync_info", "def update(self):\n self._xfinity_data.update()", "def cambiar_parent(self):\r\n self.client.parent = self", "def cambiar_parent(self):\r\n self.client.parent = self", "def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()", "def sync_end(self):", "def __enter__(self):\n return self._get_storage().__enter__()", "def __enter__(self):\n\n return self" ]
[ "0.8105605", "0.80830806", "0.7733652", "0.71153784", "0.71153784", "0.70563495", "0.7043718", "0.674526", "0.65729344", "0.6530828", "0.64562", "0.6451494", "0.6362502", "0.6362502", "0.6325555", "0.63112843", "0.6255493", "0.6245364", "0.6242624", "0.619789", "0.61851496", "0.61773336", "0.61612016", "0.61592674", "0.615458", "0.61517084", "0.6139571", "0.61280423", "0.6117423", "0.6102235", "0.60881495", "0.6027518", "0.6023041", "0.6007091", "0.5970572", "0.5955212", "0.59466237", "0.5941309", "0.59173286", "0.5876683", "0.58658206", "0.5858481", "0.58379585", "0.583246", "0.5813005", "0.57983154", "0.5788369", "0.5780768", "0.5767515", "0.57577527", "0.57572305", "0.5747498", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747163", "0.5744666", "0.57408583", "0.5727137", "0.5714643", "0.5712627", "0.5683065", "0.5666505", "0.56472826", "0.5630778", "0.56155974", "0.56155485", "0.55971104", "0.5590951", "0.5575481", "0.5554041", "0.5548815", "0.55483866", "0.5545577", "0.55447865", "0.5542659", "0.554085", "0.5539797", "0.55267113", "0.5525976", "0.5525867", "0.5520475", "0.5518965", "0.5518965", "0.55176026", "0.5515576", "0.5513838", "0.5512805" ]
0.7977319
2
Handle update of the cut plane (and take care of mode change
def _updated(self, event=None): if event == ItemChangedType.COMPLEX_MODE: self._syncDataWithParent() super(ComplexCutPlane, self)._updated(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plane_update(self):\n self.plane.update()", "def _update(self):\n self.cv.update()", "def onUpdateFactors(self, evt):\n\t\tif self.blockFactorUpdate:\n\t\t\tprint \"Blocking factor update\"\n\t\t\treturn\n\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\tfx = 1\n\t\tfy = 1\n\t\tfz = 1\n\t\ttry:\n\t\t\tfx = float(self.factorX.GetValue())\n\t\t\tfy = float(self.factorY.GetValue())\n\t\t\tfz = float(self.factorZ.GetValue())\n\t\texcept:\n\t\t\tpass\n\t\tx *= fx\n\t\ty *= fy\n\t\tz *= fz\n\t\tself.blockDimUpdate = 1\n\t\tself.newDimX.SetValue(\"%d\" % x)\n\t\tself.newDimY.SetValue(\"%d\" % y)\n\t\tself.newDimZ.SetValue(\"%d\" % z)\n\t\tself.currSize = (x, y, z)\n\t\tself.blockDimUpdate = 0", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def update():\n # TODO: Park the car 30 cm away from the closest orange cone.\n # Use both color and depth information to handle cones of multiple sizes.\n # You may wish to copy some of your code from lab2b.py\n global speed\n global angle\n global curState\n # Search for contours in the current color image\n update_contour()\n\n imgX = rc.camera.get_width()\n\n depth_image = rc.camera.get_depth_image()\n depth_image_adjust = (depth_image - 0.01) % 9999\n depth_image_adjust_blur = cv.GaussianBlur(depth_image_adjust, (11,11), 0)\n\n contour_x = contour_center[1]\n contour_y = contour_center[0]\n\n if contour_center is not None:\n angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)\n\n contour_distance = depth_image_adjust_blur[contour_y][contour_x]\n\n print(contour_distance)\n # TODO: Park the car 30 cm away from the closest orange cone\n if curState == State.search:\n rc.drive.set_speed_angle(0.5, 1)\n \n if contour_center is not None:\n curState = State.approach\n\n elif curState == State.approach:\n # rc.drive.set_speed_angle(0.5, angle)\n\n if contour_distance > 50:\n rc.drive.set_speed_angle(0.3,angle)\n elif contour_distance > 32:\n rc.drive.set_speed_angle(0.1,angle)\n elif contour_distance == 32:\n rc.drive.set_speed_angle(-0.1,angle)\n elif contour_distance < 32:\n curState = State.stop\n print(\"stop\")\n\n elif curState == State.stop:\n rc.drive.set_speed_angle(0,0)\n\n pass", "def _update(self):\n self._execute_lane_changes()\n self._execute_forward_movement()", "def update(self):\n self.setVector(0.15, 0.0)", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")", "def __init__(self):\n super(SteklovBoundary, self).__init__()\n self.value = SteklovBoundary.value\n SteklovBoundary.value -= 1\n self.update(param=\"1\")", "def update(self):\n\n obstVals = self.robot.getDepth(self.startCol, self.startRow,\n self.sampleWidth, self.sampleHeight)\n\n masked_obstVals = numpy.ma.masked_array(obstVals, obstVals == 0)\n\n if numpy.ma.count(masked_obstVals) == 0:\n meanDistance = 500\n else:\n meanDistance = numpy.mean(masked_obstVals)\n if meanDistance < 500:\n meanDistance = 500\n\n if meanDistance < 1200: # Changing this value will change how sensitive robot is to walls\n self.setVector(self.speedMult / meanDistance, 180 - self.angle)\n else:\n self.setVector(0.0, 0.0)", "def update(self): # called to update this piece's position\r\n \r\n if self.name == \"white\" and self.y == 25: self.crowned()\r\n elif self.name == \"black\" and self.y >= 350: self.crowned()\r\n self.draw()", "def update_H(self):", "def update(i):\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)", "def update(self):\n if self.state['enabled']:\n if not self.state['blue'] and not self.state['return']:\n self.update_normal()\n elif self.state['blue']:\n self.update_blue()\n elif self.state['return']:\n self.update_return()\n self.last_position = (self.rect.centerx, self.rect.centery)", "def viewUpdate(self):\n # Update Capture\n imgtk = self.model.capture\n self.updateImage(self.view.lmain, imgtk)\n # Update Stitch \n imgtk = self.model.stitch\n self.updateImage(self.view.rmain, imgtk)\n self.view.dist.set(self.model.dist)", "def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()", "def update(self,**kwargs):\n self._update_from_kwargs(**kwargs)\n #--- calc estimated chops from chop length\n self._calc_estimated_chops_from_timepoints()\n #--- adjust chops for stimulusresponse\n self._adjust_chops_for_stimulus_response()\n #--- update annotations\n self._update_annotations()\n #--- get info\n if self.verbose:\n self.GetInfo()\n #--- show plot\n if self.show:\n self.show_chops()", "def update(self):\n events = pygame.event.get()\n self.plane_update()\n self.bullet_update(events)\n self.background_update()\n self.enemy_update(events)", "def switch_cut_cor(self):\n if self.cut_cor == 41:\n self.cut_cor = 42\n elif self.cut_cor == 42:\n self.cut_cor = 41", "def redraw(self):\r\n self.c.update()", "def run(self):\n\n self._check_hardware_control()\n\n if self._is_stabilizing:\n #If we are locking the power, then need to update teh feedback loop and change the output label\n self._update_feedback()\n self._update_output_voltage_label()\n\n #We always need to update the plots as well and power label\n\n self._update_plots()\n self._update_power_label()\n\n self.gui.force_update()", "def mode_changed_callback(self, entity, attribute, old, new, kwargs):\n\n entity_dict = kwargs['entity_dict']\n self.log('{} mode changed to {}.'.format(entity_dict['friendly'], new))\n\n if new == 'Maximum':\n self.turn_on(entity_dict['light'],\n brightness_pct=entity_dict['max_brightness'])\n self.log('Setting {} to {}% brightness.'.format(\n entity_dict['friendly'], entity_dict['max_brightness']))\n elif new == 'Minimum':\n self.turn_on(entity_dict['light'],\n brightness_pct=entity_dict['min_brightness'])\n self.log('Setting {} to {}% brightness.'.format(\n entity_dict['friendly'], entity_dict['min_brightness']))\n elif new == 'Automatic':\n self.set_value(entity_dict['setpoint'], value=0)\n self.auto_brightness_callback(\n dict(entity_dict=entity_dict))", "def update(self):\n # Find only unmasked data :\n xyz, sData, sColor, _ = self._select_unmasked()\n # xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n if xyz.size:\n self.mesh.visible = True\n self.mesh.set_data(xyz, edge_color=self.edgecolor, size=sData,\n face_color=sColor, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n # self.mesh.transform = self.transform\n self.mesh.update()\n else:\n self.mesh.visible = False", "def _do_updates(self):\n is_right = self._puzzle.is_guess_right()\n if is_right:\n self._puzzle.reveal_puzzle()\n else:\n self._jumper.cut_line()", "def update(self, *args):\n ## If prediction is enabled then predict() handles rebounds.\n use_prediction = list(args).pop(0)\n if not use_prediction:\n self._rebound(0.0)\n ## Speed step needs to be adjusted by the value of interpolation\n ## at the time the ball collided with an edge (predictive_rebound_*).\n self.x += self.dx * self.speed/TICKS_PER_SECOND * (1-self.predictive_rebound_x)\n self.y += self.dy * self.speed/TICKS_PER_SECOND * (1-self.predictive_rebound_y)\n self.predictive_rebound_x,self.predictive_rebound_y = 0.0,0.0\n self.rect.center = round(self.x),round(self.y)", "def update(self):\n changes = {}\n for coord in INDICES: # the need for two for loops is necessary\n if self.chart[coord] == ALIVE and (\n self.number_of_neighbors(coord) < 2 or self.number_of_neighbors(coord) > 3):\n changes[coord] = KILL\n elif self.number_of_neighbors(coord) == 3:\n changes[coord] = REVIVE\n for coord in changes.keys(): # because the evolution is discrete\n if changes[coord] == KILL:\n self.kill(coord)\n elif changes[coord] == REVIVE:\n self.givebirth(coord)", "def update(self):\n\t\t# If being controlled by COM\n\t\tif self.controled_by_com :\n\t\t\t# Substract 1 from the update counter\n\t\t\tself.update_counter -= 1\n\t\t\t# If the update counter reaches zero\n\t\t\tif self.update_counter == 0. :\n\t\t\t\t# then ask for an action \n\t\t\t\tif self.intermediate_phase is False :\n\t\t\t\t\tself.action_required = True \n\t\t\t\t\t\t\n\t\t\t\t# if during a change\n\t\t\t\t# then make the change\n\t\t\t\tif self.intermediate_phase is True : \n\t\t\t\t\tself.action_required = False\n\t\t\t\t\tself._color_changer() #Make the change in the Simulator\n\t\telse :\n\t\t\tpass", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def update(self):\n super().update()\n if self.center_y > TOP_LIMIT:\n self.center_y = BOTTOM_LIMIT\n if self.center_y < BOTTOM_LIMIT:\n self.center_y = TOP_LIMIT\n\n if self.center_x < 250:\n self.change_x = (0.2) * OBJECTS_SPEED\n elif self.center_x > SCREEN_WIDTH - 250:\n self.change_x = (-0.2) * OBJECTS_SPEED", "def refresh(self, c):\n \n self.fs = []\n super(JuliaPlane,self).__init__(self.xmin,self.xmax,self.xlen,self.ymin,self.ymax,self.ylen)\n self.apply(julia(self.c))\n return self.plane", "def update(self):\n self.sensor.update()", "def computation(*args):\n contour, initial = segment_one_image(\n nodes=self.control_points,\n image=self.image_data,\n degree=degree,\n resolution=resolution,\n sigma=sigma,\n )\n self.initial = initial\n self.contour = contour\n Clock.schedule_once(reenable, 0)", "def changeClippingPlane(self):\n dir = gp_Dir(0., 0., 1.)\n checkedButton = self.ui.buttonGroup.checkedButton()\n if checkedButton == self.ui.xRadioButton:\n dir = gp_Dir(1., 0., 0.)\n elif checkedButton == self.ui.yRadioButton:\n dir = gp_Dir(0., 1., 0.)\n elif checkedButton == self.ui.zRadioButton:\n dir = gp_Dir(0., 0., 1.)\n self._surface.UpdateClippingPlane(dir)", "def update_plot():\n pass", "def update(self):\n self.plot.draw()\n \n func=str(self.edit1b.currentText())\n if self.win.test()==0:\n x=np.linspace(0,10,200)\n elif self.win.test()==1:\n x=np.linspace(0,0.40,200)\n \n pattern1=r'Steel'\n pattern2=r'Aluminium'\n pattern3=r'[\\d]+'\n \n if (func!='Comparison Chart'):\n self.edit2b.setDisabled(False)\n self.edit3b.setDisabled(False)\n self.edit4b.setDisabled(False)\n if (func=='Quenched/Tempered Steel'):\n alpha = 0.0025\n elif (func=='Annealed Steel'):\n alpha = 0.01\n elif (func=='Steel (input Su)'):\n S = str(self.edit2b.text())\n if (self.win.test()==0):\n S = str(float(S)/6.895)\n alpha = notch.alpha(eval(S))\n elif (func=='Aluminium Alloy 356.0 as cast'):\n rho = 0.08\n elif (func=='Aluminium Alloy 6061'):\n rho = 0.025\n elif (func=='Aluminium Alloy 7075'):\n rho = 0.015\n elif (func=='Material dropdown'):\n pass\n \n y1=[]\n if re.search(pattern1,func):\n Su=notch.su_s(alpha)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsp(alpha,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsp(alpha,r,self.win.test())))\n elif re.search(pattern2,func):\n Su=notch.su_a(rho)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsn(rho,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsn(rho,r,self.win.test())))\n \n self.edit2b.setText(str(Su))\n func1 = 'Steel (Su='+str(self.edit2b.text())+')'\n if (func!='Steel (input Su)'):\n self.plot.redraw(x,y,func, self.xlabel)\n elif (func=='Steel (input Su)'):\n self.plot.redraw(x,y,func1, self.xlabel)\n \n elif (func=='Comparison Chart'):\n self.edit2b.setText(\"\")\n self.edit2b.setDisabled(True)\n self.edit3b.setText(\"\")\n self.edit3b.setDisabled(True)\n self.edit4b.setText(\"\")\n self.edit4b.setDisabled(True)\n self.plot.draw_comp(self.xlabel, self.win.test())", "def onCut(self):\n pass", "def update():", "def update():", "def cb_update(val):\n alpha_update = [sAlpha0.val, sAlpha1.val, sAlpha2.val]\n\n # update Dirichlet's parameters alpha\n dirichlet.set_param(alpha_update)\n draw_pdf_contours(axDirichlet, dirichlet, True) # Draw Dirichlet\n\n # MAP\n lambda_MAP = CatMAP.MAPinfer(x_cat, dirichlet)\n axMAP.cla()\n drawBarGraph( axMAP, \"MAP\", lambda_MAP, bar_y_max, col_MAP ) # Draw Bar graph\n\n # Bayes\n posteriorDirichlet.set_param(alpha_update)\n posteriorDirichlet.calcPosterior(x_cat)\n draw_pdf_contours(axPosteriorDirichlet, posteriorDirichlet) # Draw Posterior Dirichlet\n lambda_Bayes = np.zeros(3)\n for k in range(3):\n lambda_Bayes[k] = posteriorDirichlet.BayesInfer(k)\n\n axBayes.cla()\n drawBarGraph( axBayes, \"Bayes\", lambda_Bayes, bar_y_max, col_Bayes ) # Draw Bar graph\n\n print('Update')\n print('lambda_ML =', lambda_ML)\n print('lambda_MAP =', lambda_MAP)\n print('lambda_Bayes=', lambda_Bayes)\n draw_point(axDirichlet, lambda_ML, col_ML)\n draw_point(axDirichlet, lambda_MAP, col_MAP)\n draw_point(axDirichlet, lambda_Bayes, col_Bayes)\n draw_point(axPosteriorDirichlet, lambda_MAP, col_MAP)\n draw_point(axPosteriorDirichlet, lambda_Bayes, col_Bayes)\n\n fig.canvas.draw_idle()", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def _update_moved(self):\n self._RAS_textbox.setPlainText('{:.2f}, {:.2f}, {:.2f}'.format(\n *self._ras))\n self._VOX_textbox.setPlainText('{:3d}, {:3d}, {:3d}'.format(\n *self._current_slice))\n self._intensity_label.setText('intensity = {:.2f}'.format(\n self._base_data[tuple(self._current_slice)]))", "def update(self, k):\n for z in range(self.sweeps_per_update):\n\n\n u_update = self.dt* (np.multiply(self.D1,(self.lap2D(self.u_grid))))\\\n - self.dt * np.multiply(self.u_grid, np.square(self.v_grid))\\\n + self.dt * self.F * (1 - self.u_grid)\n\n\n v_update = self.dt*(np.multiply(self.D2,(self.lap2D(self.v_grid))))\\\n + self.dt * np.multiply(self.u_grid, np.square(self.v_grid))\\\n - self.dt * (self.F + self.k)*self.v_grid\n\n\n self.u_grid = np.add(self.u_grid, u_update)\n self.v_grid = np.add(self.u_grid, v_update)\n\n if self.animation:\n self.fig.clear()\n plt.xlabel(\"F: %.3f, dt:%.3f\" % (self.F, self.dt))\n plt.imshow(self.u_grid, interpolation='nearest',\n cmap='coolwarm', origin='lower')\n plt.colorbar()", "def warmup():\n print camera.CoolerOFF()\n camera.status.update()", "def update(self):\n\n bumperCode = self.robot.getBumperStatus()\n if bumperCode == 2: # Left side of bumper was hit\n self.setVector(0.4, 220)\n elif bumperCode == 1: # should be right\n self.setVector(0.4, 160)\n elif bumperCode == 3: # should be both\n self.setVector(0.4, 180)\n else:\n self.setVector(0.0, 0.0)", "def update(self, scene_info):\n if scene_info['status'] == \"GAME_1P_WIN\" or scene_info['status'] == \"GAME_2P_WIN\":\n print(scene_info['ball_speed'])\n \n if scene_info[\"status\"] != \"GAME_ALIVE\":\n return \"RESET\"\n\n if not self.ball_served:\n self.ball_served = True\n return \"SERVE_TO_LEFT\"\n\n if not len(self.his):\n self.his.append(scene_info)\n return 'NONE'\n\n self.his = [self.his[-1]]\n ball = list(scene_info['ball'])\n v = list(scene_info['ball_speed'])\n block = list(scene_info['blocker'])\n Vblock = scene_info['blocker'][0] - self.his[-1]['blocker'][0]\n predict = 100\n while True:\n if ball[1] <= block[1] and v[1] > 0:\n t = (block[1] - (ball[1] + 5)) / v[1]\n px = ball[0] + v[0] * t\n bx = block[0] + Vblock * t\n bounce = 0\n Bbounce = 0\n while px < 0 or px > 195:\n bounce += 1\n if px < 0:\n px = -px\n else:\n px = 390 - px\n while bx < 0 or bx > 170:\n Bbounce += 1\n if bx < 0:\n bx = -bx\n else:\n bx = 340 - bx\n if bx <= px + 5 and px <= bx + 30: # hit up\n ball[0] = bx \n ball[1] = block[1] + 5 \n v[1] *= -1\n if self.side == '1P':\n print('Hit Up')\n continue\n ball[0] = px\n ball[1] = ball[1] + v[1] * t\n block[0] = bx\n if bounce & 1:\n v[0] *= -1\n if Bbounce & 1:\n Vblock *= -1\n if ball[1] >= block[1] + 20 and v[1] < 0:\n t = (block[1] + 20 - (ball[1])) / v[1]\n px = ball[0] + v[0] * t\n bx = block[0] + Vblock * t\n bounce = 0\n Bbounce = 0\n while px < 0 or px > 195:\n bounce += 1\n if px < 0:\n px = -px\n else:\n px = 390 - px\n while bx < 0 or bx > 170:\n Bbounce += 1\n if bx < 0:\n bx = -bx\n else:\n bx = 340 - bx\n if bx <= px and px <= bx + 30: # hit down\n ball[0] = bx\n ball[1] = block[1] + 20\n v[1] *= -1\n if self.side == '1P':\n print('Hit Down')\n continue\n ball[0] = px\n ball[1] = ball[1] + v[1] * t\n block[0] = bx\n if bounce & 1:\n v[0] *= -1\n if Bbounce & 1:\n Vblock *= -1\n \n if (v[1] > 0 and ball[1] > block[1] + 20 and self.side == '2P') or (v[1] < 0 and ball[1] < block[1] and self.side == '1P'):\n predict = 97.5\n break \n if v[1] > 0:\n t = (420 - 5 - ball[1]) / v[1]\n px = ball[0] + v[0] * t\n else:\n t = (80 - ball[1]) / v[1]\n px = ball[0] + v[0] * t\n while px < 0 or px > 195:\n if px < 0:\n px = - px\n else:\n px = 390 - px\n predict = px + 2.5\n break\n # if scene_info['frame'] % 10 == 0:\n # print('predict = ', end = '')\n # print(predict)\n self.his.append(scene_info)\n plat = scene_info['platform_1P'] if self.side == '1P' else scene_info['platform_2P']\n if -3.0 <= plat[0] + 15 - predict and plat[0] + 15 - predict <= 3.0:\n return 'NONE'\n if plat[0] + 15 > predict:\n return 'MOVE_LEFT'\n else:\n return 'MOVE_RIGHT'\n return 'NONE'", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def update(self,\n args):\n super(WiderfaceDetMetaInfo, self).update(args)\n self.model_type = args.model_type\n if self.model_type == 1:\n self.receptive_field_center_starts = [3, 7, 15, 31, 63]\n self.receptive_field_strides = [4, 8, 16, 32, 64]\n self.bbox_factors = [10.0, 20.0, 40.0, 80.0, 160.0]\n else:\n self.receptive_field_center_starts = [3, 3, 7, 7, 15, 31, 31, 31]\n self.receptive_field_strides = [4, 4, 8, 8, 16, 32, 32, 32]\n self.bbox_factors = [7.5, 10.0, 20.0, 35.0, 55.0, 125.0, 200.0, 280.0]", "def control_change(self, channel, cc, value):\n knob, bank = self.decode_mpd218_cc(cc)\n log.debug(\"Winch control change %d on knob %d bank %d\", cc, knob, bank)\n\n if knob == 1: # Knob #1 on MPD218, use to control resonant frequency\n #self.frequency = 0.05 + 0.1 * value\n self.frequency = 5.00\n self.set_freq_damping()\n\n elif knob == 2: # Knob #2 on on MPD218, use to control damping ratio\n #self.damping_ratio = 0.05 + 0.01 * value\n self.damping_ratio = 1.32\n self.set_freq_damping()", "def swarm(self) -> None:\n self.state[:, :, Boids.Attr.ACC] *= 0\n self.update_acc_by_rules()\n self._update_vel()\n self._update_loc()", "def OnSwitchTab(self, event):\n canvas_HDV = self.dicom_navigation.parent.dicom_right_window.top_info.canvas_HDV\n canvas_dicom = self.dicom_navigation.parent.dicom_right_window.top_info.canvas_dicom\n \n if (self.dicom_navigation.display_settings['miniature'] == 1):\n canvas_HDV.get_tk_widget().pack_forget()\n canvas_dicom.get_tk_widget().pack(side=tk.RIGHT, fill=tk.Y, expand=False)\n # Tricky hack pour ne pas avoir le probleme de zoom lorsqu'on met les mignatures (on retrace les canvas initiaux)\n self.dicom_navigation.parent.dicom_right_window.dicom_hdv.canvas.get_tk_widget().update_idletasks()\n self.dicom_navigation.parent.dicom_right_window.dicom_view.canvas.get_tk_widget().update_idletasks()", "def prepare_vacuum_state(self, mode):\n self.circuit.loss(0.0, mode)", "def update(self, img, print_features=False):\n self.features_changed = True\n\n # extract new ball position\n try:\n bp, _ = get_ball_position(ID=self.client_id, img=img)\n bp = tuple(map(int, bp))\n self.ball_pos_stamps.append((bp, time.time()))\n except IndexError:\n print_debug(\"ID {} did not found it self..........\".format(self.client_id))\n return\n\n if len(self.ball_pos_stamps) > NUM_STAMPS_CALC_SPEED:\n self.ball_pos_stamps.remove(self.ball_pos_stamps[0])\n\n # extract mask for the opponents\n self.ball_mask = np.zeros(np.shape(self.track))\n for i in self.opponents_ids:\n try:\n _, m = get_ball_position(i, img)\n self.ball_mask += m\n except IndexError:\n # if an opponent was not found the number of clients is known\n self.max_clients = i - 1\n self.opponents_ids.remove(i)\n break\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~ extract new features ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # speed\n self._calc_speed_features()\n\n # distance\n self._calc_distance_features()\n\n # sections\n bp_section = self.checkpoint_map[bp[0], bp[1]]\n\n # check if a new section is entered\n if not bp_section == self.last_seen_section:\n\n # check is a new best section is entered\n # allow skipping of NUM_SECTION_JUMP sections\n r1 = range(self.current_section_id+1, self.current_section_id+NUM_SECTION_JUMP)\n r1 = list(map(lambda x: x % self.num_cps, r1))\n\n r2 = range(self.last_seen_section + 1, self.last_seen_section + NUM_SECTION_JUMP)\n r2 = list(map(lambda x: x % self.num_cps, r2))\n if bp_section in r1 and bp_section in r2:\n self.section_counter[bp_section] += 1\n # ensure over jumped section also increase counter\n i = bp_section - 1\n while i >= 0 and self.section_counter[i] < self.section_counter[bp_section]:\n self.section_counter[i] += 1\n i -= 1\n self.current_section_id = bp_section\n\n self.last_seen_section = bp_section\n\n if print_features:\n self.print_features()", "def update_figure(self):\n\n self.draw()", "def update(self):\n print(\"sensorState Update\")", "def update(self):\n if self.black + self.white == self.SIZE*self.SIZE:\n if self.black > self.white:\n self.gc.black_wins = True\n elif self.white > self.black:\n self.gc.white_wins = True\n else:\n self.gc.tie = True\n self.gc.black_num = self.black\n self.gc.white_num = self.white", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def update(self):", "def update(self):", "def update(self):", "def refresh(self):\n\n # delete the existing plane first before recreating our plane by resetting the plane to an empty plane\n self.plane = []\n # calling our create_plane() to redraw our plane.\n self.create_plane()", "def update(self, *args):\n\n\t\t# Update Bullets\n\t\tif self.power == 'bulletup' and self.level >= 2:\n\t\t\tself.angle_bullets(self.level)\n\t\t\n\t\t# Update Lazer\n\t\tif self.power == 'lazerup' and self.level > 0:\n\n\t\t\tself.index += 1\n\t\t\tif self.index % 12:\n\t\t\t\tself.step += 1\n\t\t\t\n\t\t\tself.y -= self.speed\n\n\n\t\t\tself.rect.y = self.y\n\t\t\tself.rect.x = self.x\n\n\t\t\t# print(\"SLOPE??? \", self.slope)\n\t\t\tself.sheet.blitme(self.screen, self.step % self.sheet.totalCells, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.rect.x, self.rect.y)\n\n\t\t# Update Bombs\n\t\tif self.power == 'bombup' and self.level > 0:\n\t\t\tself.bomb_vector()\n\n\t\t# Update Default\n\t\telse:\n\t\t\tself.y -= self.speed\n\t\t\tself.rect.y = self.y\n\n\t\tpygame.display.flip()", "def game_control_updates(self):\r\n if self.game_control is not None:\r\n self.speed_step = self.game_control.get_prop_val(\"running.speed_step\", -1)", "def callback_time_cut(val):\n global plot_mode\n global idx_time\n last_plot_mode = plot_mode\n plot_mode = 'time_cut'\n idx_time = int(val)\n update_num_shadow(int(sld['neighbors'].val))\n # plot 121\n lcuttime.set_xdata( [val, val] )\n lcuttime.set_alpha( alpha_hm )\n lcutfreq.set_alpha( 0.0 )\n # plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_time ) # [True/False, True/False]\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True ] )\n replot_light()\n reform_axis()\n\n fig.canvas.draw_idle()", "def update(self):\n # get the new position of the snowman\n newpos = self.rect.move((self.move, 0))\n # handle getting to the edges\n if (self.rect.left < self.area.left or\n self.rect.right > self.area.right):\n # move in the opposite direction\n self.move = -self.move\n # get the new position\n newpos = self.rect.move((self.move, 0))\n # mirror the image (flip it)\n self.image = pygame.transform.flip(self.image, 1, 0)\n self.rect = newpos", "def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )", "def update(self):\n if (self.j + self.step >= self.image.shape[0]) and (self.i + self.step >= self.image.shape[1]):\n self.no_more_crops = True\n elif self.i + self.step >= self.image.shape[1]:\n self.i = 0\n self.j += self.step\n else:\n self.i += self.step", "def update(self, frame):\n\n if(frame % 1 == 0): \n\n # Calling method to move people, and check and infect them and perform\n # other functions.\n self.putil.move(frame)\n \n # Get all the healthy, immune, infected, and dead people seperately \n healthy_x = self.putil.population.get_all_healthy()[:, index.x_axis]\n healthy_y = self.putil.population.get_all_healthy()[:, index.y_axis]\n infected_x = self.putil.population.get_all_infected()[:, index.x_axis]\n infected_y = self.putil.population.get_all_infected()[:, index.y_axis]\n immune_x = self.putil.population.get_all_recovered()[:, index.x_axis]\n immune_y = self.putil.population.get_all_recovered()[:, index.y_axis]\n dead_x = self.putil.population.get_all_dead()[:, index.x_axis]\n dead_y = self.putil.population.get_all_dead()[:, index.y_axis]\n total_infected = self.putil.size - len(healthy_x)\n total_hospitalized = len(self.putil.persons[self.putil.persons[:,index.hospitalized] == 3])\n currently_infected = len(infected_x)\n\n # Update healthcare status\n if currently_infected > self.putil.total_healthcare_capacity*3/2:\n self.healthcare_status = \"Extreme\"\n elif currently_infected > self.putil.total_healthcare_capacity:\n self.healthcare_status = \"Worse\"\n elif currently_infected > self.putil.total_healthcare_capacity*2/3:\n self.healthcare_status = \"Manageable\"\n else:\n self.healthcare_status = \"Normal\"\n\n # Update Graphs\n data1 = np.c_[healthy_x,healthy_y]\n data2 = np.c_[infected_x,infected_y]\n data3 = np.c_[immune_x,immune_y]\n data4 = np.c_[dead_x,dead_y]\n\n if frame == self.putil.enforce_mask_wearing_at:\n self.mask_wearing_info = \"Active\" \n \n if frame == self.putil.enforce_social_distance_at:\n self.social_distancing_info = \"Active\"\n\n self.text.set_text(\"%i \\n%i \\n%s \\n%s \\n%s \\n%s\" % (frame,len(infected_x), str(len(healthy_x)) + \" or \" + str(round(len(healthy_x)*100/self.putil.size,1)) + \"%\",\n str(len(immune_x)) + \" or \" + str(round(len(immune_x)*100/self.putil.size,1)) + \"%\", str(len(dead_x)) + \" or \" + str(round(len(dead_x)*100/self.putil.size,1)) + \"%\",\n self.healthcare_status))\n self.text2.set_text(\"%s \\n%s \\n%s \\n%s \\n%s\\n\" % (self.putil.size, self.mask_wearing_info, self.social_distancing_info, self.social_distancing_num , total_infected))\n self.scat.set_offsets(data1)\n self.scat2.set_offsets(data2)\n self.scat3.set_offsets(data3)\n self.scat4.set_offsets(data4)\n \n self.infected.append(len(infected_x))\n self.infected_total.append(self.putil.size - len(healthy_x))\n self.deaths.append(len(dead_x))\n self.frames.append(frame)\n self.immunes.append(len(immune_x))\n\n self.currently_infected.set_ydata(self.infected)\n self.currently_infected.set_xdata(self.frames)\n\n self.total_deaths.set_ydata(self.deaths)\n self.total_deaths.set_xdata(self.frames)\n\n self.total_immune.set_ydata(self.immunes)\n self.total_immune.set_xdata(self.frames)\n\n \n \n return self.scat, self.scat2, self.scat3, self.scat4, self.currently_infected,", "def observe(self, obs):\n self.observation = obs\n self.selected = obs.selected\n \n #############################\n # Update of turn statistics #\n #############################\n if self.id == (obs.step % 6):\n # Store base locations\n if self.__class__.home_base is None:\n self.__class__.home_base = (obs.loc[0]+16, obs.loc[1]+8)\n self.__class__.enemy_base = \\\n self.getSymmetricOpposite(self.__class__.home_base)\n \n # Reset trendingSpot\n self.__class__.trendingSpot = {}\n \n # Update friendly CPs\n self.__class__.friendlyCPs = map(lambda x: x[0:2], \n filter(lambda x: x[2] == self.team, obs.cps))\n \n # Update enemy CPs\n self.__class__.enemyCPs = map(lambda x:x[0:2], \n filter(lambda x: x[2] != self.team, obs.cps))\n \n # Update ammo packs \n ammopacks = filter(lambda x: x[2] == \"Ammo\", obs.objects)\n if ammopacks:\n self.updateAllAmmoSpots(ammopacks)\n\n # Update inFriendlyHands stat\n if SETTINGS_DOMINATION_ADDS_UP:\n inFriendlyHands = self.__class__.inFriendlyHands\n else:\n inFriendlyHands = {}\n for cp in self.__class__.friendlyCPs:\n if cp in self.__class__.inFriendlyHands:\n inFriendlyHands[cp] = self.__class__.inFriendlyHands[cp] + 1\n else:\n inFriendlyHands[cp] = 1\n self.__class__.inFriendlyHands = inFriendlyHands", "def update(self):\n self.syncSpriteCoordinates()\n self.moveBasedOnCurrentMomentum()\n #self.decelerate()\n self.checkCanvasBoundsAndWrap()", "def UpdateLayers(self):\n pass", "def update(self):\r\n super().update()\r\n if self.stopped and self.charge < CHARGE_MAX:\r\n self.charge += 1", "def update(self):\n self.grid.update()\n sleep(self.update_rate)", "def update_channels(imgs, msks, **settings):\n\n\tshp = imgs.shape\n\tnew_imgs = np.zeros((shp[0],shp[1],shp[2], settings['IN_CHANNEL_NO']))\n\tnew_msks = np.zeros((shp[0],shp[1],shp[2], settings['OUT_CHANNEL_NO']))\n\n\tif settings['MODE']==1:\n\t\tnew_imgs[:,:,:,0] = imgs[:,:,:,2] # flair\n\t\tnew_msks[:,:,:,0] = msks[:,:,:,0]+msks[:,:,:,1]+msks[:,:,:,2]+msks[:,:,:,3]\n\t\tprint('-'*10,' Whole tumor', '-'*10)\n\n\telif settings['MODE'] == 2:\n\t\t#core (non enhancing)\n\t\tnew_imgs[:,:,:,0] = imgs[:,:,:,0] # t1 post\n\t\tnew_msks[:,:,:,0] = msks[:,:,:,3]\n\t\tprint('-'*10,' Predicing enhancing tumor', '-'*10)\n\n\telif settings['MODE'] == 3:\n\t\t#core (non enhancing)\n\t\tnew_imgs[:,:,:,0] = imgs[:,:,:,1]# t2 post\n\t\tnew_msks[:,:,:,0] = msks[:,:,:,0]+msks[:,:,:,2]+msks[:,:,:,3] # active core\n\t\tprint('-'*10,' Predicing active Core', '-'*10)\n\n\telse:\n\t\tnew_msks[:,:,:,0] = msks[:,:,:,0]+msks[:,:,:,1]+msks[:,:,:,2]+msks[:,:,:,3]\n\n\treturn new_imgs.astype(np.float32), new_msks.astype(np.float32)", "def update(self, frame):\n self.__update_state(frame)\n\n if self.strategy == \"median\":\n ax = statistics.median(self.p1[:,:,0] - self.p0[:,:,0])\n ay = statistics.median(self.p1[:,:,1] - self.p0[:,:,1])\n else:\n ax = np.mean(self.p1[:,:,0] - self.p0[:,:,0])\n ay = np.mean(self.p1[:,:,1] - self.p0[:,:,1])\n\n P = 1\n new_x = int(self.bbox[0] + self.bbox[2] * ax * P)\n new_y = int(self.bbox[1] + self.bbox[3] * ay * P)\n return True, (new_x, new_y, self.bbox[2], self.bbox[3])", "def update(self):\n self.value = self.sensor.update()", "def Update(self, ticks=0):", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.context.open=False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n self.context.open=False\n if event.key == K_SPACE:\n self.setMode((self.mode+1)%3)\n #if event.key == K_f:\n # pygame.display.toggle_fullscreen()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 4: self.context.draw.plane.zoom([1.1,1.1])\n if event.button == 5: self.context.draw.plane.zoom([0.9,0.9])", "def update_visualization(self) -> None:\n pass", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n if self.is_moving_up:\n self.dirty = 1\n if self.is_moving_down:\n self.dirty = 1\n if self.is_moving_right:\n self.dirty = 1\n if self.is_moving_left:\n self.dirty = 1\n \n self.rect.x += self.moveX\n self.logic.wall_hit_logic(self.moveX, \"x\", self.room.wall_list)\n self.room_change.change_room()\n \n self.rect.y += self.moveY\n self.logic.wall_hit_logic(self.moveY, \"y\", self.room.wall_list)\n self.room_change.change_room()", "def update_visuals(self):\n\n result, data = self.dev.grab_pipe()\n if not result:\n log.critical(\"Problem grabbing pipe\")\n\n if self.live_updates == True:\n self.update_graph(data)\n self.curve_render += 1\n self.update_image(data)\n self.check_image(self.curve_render)\n\n self.update_fps()\n self.data_timer.start(0)", "def _update(self):\n self.parametrize_beam()\n self.update_ranks()\n self._points = tf.reshape(self._endpoint, (1, 2)) * tf.reshape(self._ranks, (-1, 1))", "def beforeUpdate(self):", "def updateState(self):\n\n if ('cutting' in self.step_ops) and (self.cut_state.user_cutting):\n self.step_ops['cutting'] = True\n \n if ('cooking' in self.step_ops) and (self.cut_state.user_cooking):\n self.step_ops['cooking'] = True\n\n # TODO: add the rest of the operations\n\n advance = True\n\n # Check if ALL operations are complete\n for op in self.step_ops:\n if self.step_ops[op] == False:\n advance = False\n break\n\n if advance:\n self.nextStep()", "def _update(self, event):\n if self.ignore(event) or event.button != 1:\n return\n\n if event.name == 'button_press_event' and event.inaxes == self.ax:\n self.drag_active = True\n event.canvas.grab_mouse(self.ax)\n\n if not self.drag_active:\n return\n\n elif ((event.name == 'button_release_event') or\n (event.name == 'button_press_event' and\n event.inaxes != self.ax)):\n self.drag_active = False\n event.canvas.release_mouse(self.ax)\n return\n if self.orientation == 'vertical':\n val = self._value_in_bounds(event.ydata)\n else:\n val = self._value_in_bounds(event.xdata)\n if val not in [None, self.val]:\n self.set_val(val)", "def prepare_coherent_state(self, alpha, mode):\n\n self.circuit.loss(0.0, mode)\n self.circuit.displace(alpha, mode)", "def update_display(self):\n \n # check availability of display queue of the wide camera\n# if not hasattr(self,'wide_disp_queue'):\n# pass\n# elif self.wide_disp_queue.empty():\n# pass\n# else:\n# try:\n# wide_disp_image = self.wide_disp_queue.get()\n# \n# self.wide_disp_counter += 1\n# self.wide_disp_counter %= 2\n# if self.wide_disp_counter == 0:\n# if type(wide_disp_image) == np.ndarray:\n# if wide_disp_image.shape == (self.wide_cam.settings.height.value(),self.wide_cam.settings.width.value()):\n# try:\n# self.wide_cam_image.setImage(wide_disp_image)\n# except Exception as ex:\n# print('Error: %s' % ex)\n# except Exception as ex:\n# print(\"Error: %s\" % ex)\n \n # check availability of display queue of the track camera \n if not hasattr(self,'track_disp_queue'):\n pass\n elif self.track_disp_queue.empty():\n pass\n else:\n try:\n track_disp_image = self.track_disp_queue.get()\n self.track_disp_counter += 1\n self.track_disp_counter %= 4\n if self.track_disp_counter == 0:\n if type(track_disp_image) == np.ndarray:\n if track_disp_image.shape == (self.track_cam.settings.height.value(),self.track_cam.settings.width.value()):\n try:\n self.track_cam_image.setImage(track_disp_image)\n except Exception as ex:\n print('Error: %s' % ex)\n \n x = int(self.settings.x.value())\n y = int(self.settings.y.value())\n self.tracker_data[:] = 0\n self.tracker_data[x,y] = 1\n self.tracker_image.setImage(np.copy(self.tracker_data))\n except Exception as ex:\n print(\"Error: %s\" % ex)", "def observe_Env(self, mode='all'):\r\n L_cnt, R_cnt, bump,DLightBump, AnalogBump,Infra_Omi, Infra_L, Infra_R = self.achieve_data(mode)\r\n old_state = self.real_state.copy()\r\n\r\n if mode != 'e':\r\n # Check if current state is terminal\r\n terminal,obs = self.check_terminal(bump,DLightBump, AnalogBump,(Infra_Omi, Infra_L, Infra_R))\r\n # update list of obstacles\r\n # maximum count for determining if the obstacle 100% exists\r\n max_cnt =5.0\r\n for o in obs:\r\n # if obstacle is not detected before\r\n if self.obs_ls[0].count(o)<1:\r\n self.obs_ls[0].append(o)\r\n self.obs_ls[1].append(1/max_cnt)\r\n else:\r\n # update probability of this obstacle observed\r\n self.obs_ls[1][self.obs_ls[0].index(o)] += 1.0/max_cnt\r\n\r\n # The reward is the reward obtained after transition (s,a,s')\r\n r = self.cal_reward(bump, DLightBump, AnalogBump,(Infra_Omi, Infra_L, Infra_R))\r\n else:\r\n # if encoder mode, return encoder info only, without calculate rewards and terminals\r\n r= 0\r\n terminal =False\r\n\r\n # obtain postion and heading angle\r\n self.real_state[0],self.real_state[1],self.real_state[2] = self.Motion.get_CurPos(L_cnt,R_cnt)\r\n\r\n return old_state, self.real_state,r, terminal, (L_cnt, R_cnt, bump,DLightBump, AnalogBump)", "def update(self):\r\n if self.change_x < 0 and self.left - self.speed <= Costanti.LEVEL_INFO['left']:\r\n # raggiunto limite sinistro\r\n self.left = Costanti.LEVEL_INFO['left'] + 1\r\n elif self.change_x > 0 and Costanti.LEVEL_INFO['right'] <= self.right + self.speed:\r\n # raggiunto limite destro\r\n self.right = Costanti.LEVEL_INFO[\"right\"] - 1\r\n elif self.left > Costanti.LEVEL_INFO[\"left\"] and self.right < Costanti.LEVEL_INFO[\"right\"]:\r\n # sposta il paddle\r\n self.center_x += self.change_x * self.speed\r\n\r\n # aggiorno l'immagine per l'animazione\r\n super().update_animation()\r\n\r\n # ristabilisco le giuste dimensioni del paddle e non quelle dell'immagine originale\r\n self.width = Costanti.Paddle_WIDTH\r\n self.height = Costanti.Paddle_HEIGHT", "def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )", "def _update_board(self):\n if self.power_state == \"ON\":\n # Only update brightness if on. Will adjust from most recent brightness level.\n self._update_brightness()\n else:\n # Case where called to switch off\n self._update_color(OFF)\n logger.info(self.power_state)\n logger.info(self.brightness)", "def update():\n # TODO: Follow the wall to the right of the car without hitting anything.\n global DIST, RIGHT_TOP_WINDOW, LEFT_TOP_WINDOW, RIGHT_WINDOW, LEFT_WINDOW, FRONT_WINDOW, REAR_WINDOW\n global cur_state\n \n scan = rc.lidar.get_samples()\n scan = (scan - 0.01) % 100000\n\n speed = 1\n angle = 0\n\n _, right_dist = rc_utils.get_lidar_closest_point(scan, RIGHT_WINDOW)\n _, left_dist = rc_utils.get_lidar_closest_point(scan, LEFT_WINDOW)\n _, right_top_dist = rc_utils.get_lidar_closest_point(scan, RIGHT_TOP_WINDOW)\n _, left_top_dist = rc_utils.get_lidar_closest_point(scan, LEFT_TOP_WINDOW)\n _, front_dist = rc_utils.get_lidar_closest_point(scan, FRONT_WINDOW)\n _, rear_dist = rc_utils.get_lidar_closest_point(scan, REAR_WINDOW)\n\n \n if cur_state == State.drive:\n if right_top_dist > left_top_dist:\n angle = angle_controller(right_top_dist, 1) \n else:\n angle = angle_controller(left_top_dist, -1)\n\n if abs(angle) > 0.75:\n kP = 2\n speed = 1 / (abs(angle) * kP)\n speed = rc_utils.clamp(speed, -1, 1)\n\n rc.drive.set_speed_angle(speed, angle)", "def on_data_vars_change(self, change):\n if change['type'] == 'change' and change['name'] == 'value':\n self.left_ds = getattr(self.ts.data, change['new'])\n if self.mask is None:\n self.right_ds = self.left_ds.copy(deep=True)\n else:\n self.right_ds = self.left_ds * self.mask\n\n self.left_imshow.set_data(self.left_ds.data[0])\n self.right_imshow.set_data(self.right_ds.data[0])", "def update_newly_set_ref_V_ampl(self):\n self.qlin_ref_V_ampl_RMS.setText(\n \"%.3f\" % self.alia.config.ref_V_ampl_RMS\n )\n self.qlin_ref_V_ampl.setText(\"%.3f\" % self.alia.config.ref_V_ampl)\n self.qlbl_ref_is_clipping.setText(self.get_clipping_text())\n # QApplication.processEvents()\n\n self.alia_qdev.state.reset()\n self.clear_curves_stage_1_and_2()", "def _update(self):\n if self._need_display_update:\n self._need_display_update = False\n\n self._set_view_slice(self.viewer.dims.indices)\n\n if self._need_visual_update:\n self._need_visual_update = False\n self._node.update()", "def run(self):\n super(MovementControl,self).run()" ]
[ "0.6893299", "0.60525465", "0.58858526", "0.5834753", "0.5762848", "0.5702077", "0.5648497", "0.5648258", "0.56118804", "0.56109047", "0.55797815", "0.55794543", "0.55765516", "0.5570159", "0.5562381", "0.55067044", "0.550493", "0.54722816", "0.54584134", "0.5452447", "0.5450791", "0.54296863", "0.54067004", "0.5405082", "0.54015386", "0.53991103", "0.5395922", "0.5394981", "0.5363178", "0.533246", "0.5323486", "0.53207815", "0.5307988", "0.5307467", "0.5306328", "0.5303505", "0.5302158", "0.5294832", "0.52928805", "0.52908164", "0.52908164", "0.52907366", "0.52903837", "0.52839375", "0.5280674", "0.5276407", "0.527241", "0.52722335", "0.5265784", "0.5255957", "0.5250546", "0.52460265", "0.5239012", "0.5238761", "0.5237119", "0.5224338", "0.5216323", "0.52149796", "0.52063626", "0.52041274", "0.52041274", "0.52041274", "0.5197634", "0.51923466", "0.5183846", "0.51822853", "0.5175992", "0.51752687", "0.51745486", "0.5172248", "0.5171813", "0.51696885", "0.5166504", "0.5158351", "0.5152321", "0.51521707", "0.5134058", "0.51312685", "0.5129956", "0.51265806", "0.51258373", "0.5114751", "0.5114751", "0.5114567", "0.51107377", "0.51085645", "0.51015997", "0.51009995", "0.50995207", "0.509735", "0.5095521", "0.5095253", "0.5093375", "0.5093168", "0.5088305", "0.5087299", "0.5087011", "0.5085185", "0.5081891", "0.50785905" ]
0.6976842
0
Handle update of color
def _updateColor(self, color): primitive = self._getScenePrimitive() if (len(primitive.children) != 0 and isinstance(primitive.children[0], primitives.ColormapMesh3D)): primitive.children[0].alpha = self._color[3] else: super(ComplexIsosurface, self)._updateColor(color)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_color(self, color):\n self.color = color", "def _update_color(self, *args):\n\n if self._variable and 'w' in self._mode and not self._dnd_started:\n self._internal_color_change = True\n self.color_var.set(self._variable)", "def update_color(self):\r\n \r\n \r\n colorset = self.colorset\r\n \r\n self.grfx[0].colorset = colorset\r\n pass", "def refresh_color(self):\n self.color = max(0, int(math.sqrt(self.vx ** 2\n + self.vy ** 2)) + 100)", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def color(self, color):\n #self._color = color\n new_color = \"{0}{1}{2}\".format(hex(int(color[0]))[2:].zfill(2),\n hex(int(color[1]))[2:].zfill(2),\n hex(int(color[2]))[2:].zfill(2))\n #self.log.info(\"RASPLes.color(%s : %s -> %s)\" % (self.number, color, new_color))\n #print(\"color(%s -> %s)\" % (self.number, new_color))\n try:\n self.current_color = new_color\n #self.strip.setPixelColor(int(self.number), self.current_color)\n self.strip.setPixelColorRGB(int(self.number), color[0], color[1], color[2])\n\n self.strip.updated = True\n except Exception as e:\n self.log.error(\"led update error\" + str(e))", "def _color_var_changed(self, *args):\n\n if not self._internal_color_change:\n self._variable = self.color_var.get()\n self._update()\n self._internal_color_change = False", "def update_color(self):\n self.plot(update_traces=False, update_waveforms=True)", "def process_color(self, color):\n self.controller.game.receive_color(color)\n self.parent.parent.update_stat_frame()\n self.parent.parent.update_table_frame()\n self.parent.parent.end_turn()", "def update_r(color, new_r):\n\n color.update_r(new_r)", "def _update_color(self):\n self._vertex_list.colors[:] = self._rgba * self._num_verts", "def _update_color(self, rgb_tuple):\n for color in rgb_tuple._fields:\n pin = getattr(PINS, color)\n value = getattr(rgb_tuple, color)\n # Ensure color between 0 and 255\n value = max(min(value, 255), 0)\n # print(pin, value)\n self.pi.set_PWM_dutycycle(pin, value)", "def update(self):\n super().update()\n time_since_start = self.time_since_start() \n curr_mod = time_since_start%self.game.time_cycle_secs\n grade = abs(curr_mod - self.game.time_cycle_secs/2) / (self.game.time_cycle_secs/2)\n color_value = grade*(255-self.game.max_darkness) + self.game.max_darkness\n for sprite in self.all_sprites:\n sprite.color = (color_value, color_value, color_value)", "def _onEdit(self, event):\n index = self.colorlist.GetSelection()\n icol = self._indexTupleToColor(index)\n icd = wx.ColourData()\n icd.SetColour(icol)\n dialog = wx.ColourDialog(self, icd)\n\n if dialog.ShowModal() == wx.ID_OK:\n tup = _colorDataToTuple(dialog.GetColourData())\n self.graphColors[index] = tup\n self._tupleListToStrings()\n self._updateButtons(None)", "def plot_color_changed(self):\n self.plot_color = self.plot_color_button.color()", "def rgb_slider_moved(self, event):\n slider_red = int(self.slider_r.get_value())\n slider_green = int(self.slider_g.get_value())\n slider_blue = int(self.slider_b.get_value())\n\n self.change_color((slider_red, slider_green, slider_blue))", "def _color(self, args):", "def _updateColor(self, color):\n primitive = self._getScenePrimitive()\n if len(primitive.children) != 0:\n primitive.children[0].setAttribute('color', color)", "def update_g(color, new_g):\n\n color.update_g(new_g)", "def _update(self):\n\n if self.rgb:\n self._canvas['bg'] = tks.color_funcs.rgb_to_hex_string(self.rgb)\n self._text['text'] = self._color_info_text()\n else:\n self._canvas['bg'] = self._blank_label_color\n self._text['text'] = ''", "def update(): # (12)\n with canvas(device) as draw:\n for led_pos in range(0, len(color_buffer)):\n color = color_buffer[led_pos]\n\n ## If your LED strip's colors are are not in the expected\n ## order, uncomment the following lines and adjust the indexes\n ## in the line color = (rgb[0], rgb[1], rgb[2])\n # rgb = getrgb(color)\n # color = (rgb[0], rgb[1], rgb[2])\n # if len(rgb) == 4:\n # color += (rgb[3],) # Add in Alpha\n\n draw.point((led_pos, 0), fill=color)", "def _on_palette_change(self, palette_data: dict) -> None:\n # set the color from the metadata\n color = self._label_to_rgb[palette_data['label']]\n # if the selected color is different, queue a cursor update\n if not np.array_equal(self._color, color):\n self.is_cursor_change = True\n # store the color with the new value\n self._color[:] = color\n # set the is brush flag\n self.is_brush = palette_data['paint'] == 'brush'\n # store the brush size with the new value\n self.brush_size = palette_data['brush_size']\n # if the palette is in super pixel mode, get that data\n if palette_data['paint'] == 'super_pixel':\n # get the algorithm from the dictionary\n algorithm = palette_data['super_pixel']\n # get the arguments for the specific algorithm\n arguments = palette_data[algorithm]\n # get the segments using the given algorithm and arguments\n segs = segment(self._image, algorithm, **arguments)\n # apply the segmented image pixels and segments to local structures\n self._super_pixel_segments[:], self._super_pixel[:] = segs\n # otherwise set the super pixel data back to 0\n else:\n self._super_pixel_segments[:] = 0\n self._super_pixel[:] = 0", "def set_color(self, new_color):\n self.color = new_color", "def change_color(self, rgb):\n\n rgba = Gdk.RGBA()\n rgba.parse(\"rgb({},{},{})\".format(*rgb))\n self.square.override_background_color(Gtk.StateType.NORMAL, rgba)\n\n GObject.signal_handler_block(self.spinbutton_r, self.red_sb_id)\n self.spinbutton_r.set_value(rgb[0])\n GObject.signal_handler_unblock(self.spinbutton_r, self.red_sb_id)\n GObject.signal_handler_block(self.slider_r, self.red_s_id)\n self.slider_r.set_value(rgb[0])\n GObject.signal_handler_unblock(self.slider_r, self.red_s_id)\n\n GObject.signal_handler_block(self.spinbutton_g, self.green_sb_id)\n self.spinbutton_g.set_value(rgb[1])\n GObject.signal_handler_unblock(self.spinbutton_g, self.green_sb_id)\n GObject.signal_handler_block(self.slider_g, self.green_s_id)\n self.slider_g.set_value(rgb[1])\n GObject.signal_handler_unblock(self.slider_g, self.green_s_id)\n\n GObject.signal_handler_block(self.spinbutton_b, self.blue_sb_id)\n self.spinbutton_b.set_value(rgb[2])\n GObject.signal_handler_unblock(self.spinbutton_b, self.blue_sb_id)\n GObject.signal_handler_block(self.slider_b, self.blue_s_id)\n self.slider_b.set_value(rgb[2])\n GObject.signal_handler_unblock(self.slider_b, self.blue_s_id)\n\n GObject.signal_handler_block(self.output, self.output_id)\n self.output.set_text(rgb_to_hex(rgb))\n GObject.signal_handler_unblock(self.output, self.output_id)\n\n self.rgb_color = rgb\n self.change_output()", "def test_update_r():\n\n color = Color(100, 142, 438)\n\n assert color.get_r() == 100\n assert color.get_g() == 142\n assert color.get_b() == 438\n\n update_r(color, 202)\n\n assert color.get_r() == 202\n assert color.get_g() == 142\n assert color.get_b() == 438", "def test_update_g():\n color = Color(100, 142, 438)\n\n assert color.get_r() == 100\n assert color.get_g() == 142\n assert color.get_b() == 438\n\n update_g(color, 239)\n\n assert color.get_r() == 100\n assert color.get_g() == 239\n assert color.get_b() == 438", "def update(self):\n try:\n if not self._light.connected:\n self._light.connect()\n # pylint: disable=invalid-name\n r, g, b, w = self._light.get_color()\n except pykulersky.PykulerskyException as exc:\n if self._available:\n _LOGGER.warning(\"Unable to connect to %s: %s\", self._light.address, exc)\n self._available = False\n return\n if not self._available:\n _LOGGER.info(\"Reconnected to %s\", self.entity_id)\n self._available = True\n\n hsv = color_util.color_RGB_to_hsv(r, g, b)\n self._hs_color = hsv[:2]\n self._brightness = int(round((hsv[2] / 100) * 255))\n self._white_value = w", "def on_rgb_slide(self,r,g,b):\n if not self.active:\n return\n red = int(round(r / 100.0))\n green = int(round(g / 100.0))\n blue = int(round(b / 100.0))\n self.rgb = colormodel.RGB(red, green, blue)\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.update()", "def update_b(color, new_b):\n\n color.update_b(new_b)", "def changeColor(self):\n self.layer.new_colormap()", "def change_color(self, color):\n self.color = color", "async def update_image(new_value: str) -> None:\n img.apply(swatch, img.Handle.color(parse_color(new_value), size, size))", "def message_colour_tick():\n global colour_count\n colour_count += 1", "def updateColorFor(self, id, color):\n\n # find the good LED strip\n currentStrip = None\n index = 0\n for LEDStrip in self._LEDStrips:\n if LEDStrip._id == id:\n currentStrip = LEDStrip\n if currentStrip == None:\n index += 1\n \n if currentStrip == None:\n return\n\n self._colors[index] = color", "def set_color(self, color):\n\t\tpass", "def output_entry_changed(self, event):\n value = self.output.get_text().lstrip(\"#\")\n\n if len(value) == 6:\n rgb = hex_to_rgb(value)\n self.change_color(rgb)", "def shadechanged(self, shadenum, newshade):\n\n if self.performingupdate or shadenum >= self.numcols or type(newshade) != int:\n return\n\n diff = newshade - self.currentshades[shadenum]\n if diff == 0:\n return\n\n incr = 1\n if diff < 0:\n incr = -1\n\n while newshade in self.currentshades:\n newshade += incr\n\n # If we've run off either end, we'll have to go back to where we were\n\n if newshade < 0 or newshade > 255:\n self.colspins[shadenum].setValue(self.currentshades[shadenum])\n return\n\n self.performingupdate = True\n self.currentshades[shadenum] = newshade\n self.currentshades.sort(reverse=not self.gs.inverse)\n for n in range(0, self.numcols):\n self.colspins[n].setValue(self.currentshades[n])\n self.performingupdate = False\n self.plotmap()", "def slider_action(self, sender):\n self.r = self.rslider.value\n self.g = self.gslider.value\n self.b = self.bslider.value\n self.preview.background_color = self.rgb\n self.colorlabel.text = self.hexcode", "def color_callback(self, data):\n cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding=\"passthrough\")\n self.color_mutex.acquire()\n self.color_image = cv_image\n self.color_mutex.release()", "def test_update_b():\n color = Color(100, 142, 438)\n\n assert color.get_r() == 100\n assert color.get_g() == 142\n assert color.get_b() == 438\n\n update_b(color, 47)\n\n assert color.get_r() == 100\n assert color.get_g() == 142\n assert color.get_b() == 47", "def updatePixelColor(self):\n height = self.frameGeometry().height()\n width = self.frameGeometry().width()\n pixel_pos = QtCore.QPoint(width/2, self.__selector_y*height)\n self.__color_selected = QtGui.QColor(self.__picker_image.pixel(pixel_pos))\n self.color_changed.emit(self.__color_selected)", "def updateColors(self):\n self.negativeColor = (int(self.negativeRedTextField.get(\"1.0\", tk.END)),\n int(self.negativeGreenTextField.get(\"1.0\", tk.END)),\n int(self.negativeBlueTextField.get(\"1.0\", tk.END)))\n self.positiveColor = (int(self.positiveRedTextField.get(\"1.0\", tk.END)),\n int(self.positiveGreenTextField.get(\"1.0\", tk.END)),\n int(self.positiveBlueTextField.get(\"1.0\", tk.END)))\n # Update the positive and negative labels\n self.negativeLabel.config(background=self.negativeColorHex())\n self.positiveLabel.config(background=self.positiveColorHex())\n\n print(f\"Negative: {self.negativeColor}\")\n print(f\"Positive: {self.positiveColor}\")", "def _on_edge_color_change(self, event=None):\n with self.layer.events.edge_color.blocker():\n index = self.edgeComboBox.findText(\n self.layer.edge_color, Qt.MatchFixedString\n )\n self.edgeComboBox.setCurrentIndex(index)\n color = Color(self.layer.edge_color).hex\n self.edgeColorSwatch.setStyleSheet(\"background-color: \" + color)", "def update(self, rgb, cmyk, hsv):\n compRGB = a3.complement_rgb(rgb)\n if (compRGB is None):\n compRGB = rgb\n \n rgb_str = rgb_to_str(rgb)\n cmyk_str = '' if cmyk is None else str5_cmyk(cmyk) \n hsv_str = '' if hsv is None else str5_hsv(hsv)\n \n self.main.text = (\"Color\\nRGB: \" + rgb_str +\n \"\\nCMYK: \" + cmyk_str +\n \"\\nHSV: \" + hsv_str + \"\\n \\n\" +\n \"R,G,B sliders in: 0..255\\n\" +\n \"C,M,Y,K sliders: 0 to 100%\\n\" +\n \"H slider: 0 <= H < 360 degrees\\n\" +\n \"S,V sliders: 0 <= S,V <= 1\")\n self.main.background = rgb.glColor()\n self.main.foreground = compRGB.glColor()\n self.comp.text = (\"Color\\nRGB: \" + rgb_str +\n \"\\nCMYK: \" + cmyk_str +\n \"\\nHSV: \" + hsv_str + \"\\n \\n\" +\n \"R,G,B sliders in: 0..255\\n\" +\n \"C,M,Y,K sliders: 0 to 100%\\n\" +\n \"H slider: 0 <= H < 360 degrees\\n\" +\n \"S,V sliders: 0 <= S,V <= 1\" )\n self.comp.background = compRGB.glColor()\n self.comp.foreground = rgb.glColor()\n \n # set the sliders\n self.rSlider.value = rgb.red*100\n self.gSlider.value = rgb.green*100\n self.bSlider.value = rgb.blue*100\n self.cSlider.value = 0 if cmyk is None else cmyk.cyan*100 \n self.mSlider.value = 0 if cmyk is None else cmyk.magenta*100\n self.ySlider.value = 0 if cmyk is None else cmyk.yellow*100\n self.kSlider.value = 0 if cmyk is None else cmyk.black*100\n self.hSlider.value = 0 if hsv is None else hsv.hue*100\n self.sSlider.value = 0 if hsv is None else hsv.saturation*100\n self.vSlider.value = 0 if hsv is None else hsv.value*100", "def on_material_color_btn_color_set(self,button,data=None):\n self.app.reload_job()", "def set_color(self, color):\n pass", "def rgb(self, value):\n\n self._variable = value\n self._update()", "def _setColor(self, index):\n\n self.colorLabel.setStyleSheet(\"border: 1px solid black; background-color:rgb(%s, %s, %s);\" % (\n cControlColors[index][0] * 255, cControlColors[index][1] * 255,\n cControlColors[index][2] * 255))\n self.rgbColorDlg.setCurrentColor(QColor.fromRgb(\n cControlColors[index][0] * 255, cControlColors[index][1] * 255,\n cControlColors[index][2] * 255))\n self.colorSlider.setValue(index)", "def onColorMenu(self, item):\n self.canvas.color = item.color\n return 1", "def _color_change_mode(self):\r\n self.dlg.exec_()\r\n self.color = self.dlg.currentColor().name()\r\n self.colorPlate.setStyleSheet(\"background-color: %s;\" % self.color)\r\n self.input_scene.get_stk_color(self.color)\r\n return", "def rgb_spin_changed(self, event):\n spin_red = self.spinbutton_r.get_value_as_int()\n spin_green = self.spinbutton_g.get_value_as_int()\n spin_blue = self.spinbutton_b.get_value_as_int()\n\n self.change_color((spin_red, spin_green, spin_blue))", "def updateFromHsl ( self ):\n rgb = Colz.hslToRgb( self.h, self.s, self.l )\n self.r = rgb[0]\n self.g = rgb[1]\n self.b = rgb[2]\n self.rgb = rgb\n self.rgba = [ rgb[0], rgb[1], rgb[2], self.a ]\n # Updates Hex\n self.hex = Colz.rgbToHex( rgb[0], rgb[1], rgb[2] )", "def colorFrame(self, _color):\n\t\tif self.frame:\n\t\t\tfor nr, i in enumerate(self.frame):\n\t\t\t\tself.frame[nr][1] = _color", "def colorFrame(self, _color):\n\t\tif self.frame:\n\t\t\tfor nr, i in enumerate(self.frame):\n\t\t\t\tself.frame[nr][1] = _color", "def change_color(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if color_mode == 'RGB':\n color_red = random.randint(-25,25)\n color_green = random.randint(-25,25)\n color_blue = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = (color[0]+color_red,color[1]+color_green,color[2]+color_blue)\n else: #color_mode == 'L':\n color_diff = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = color+color_diff\n mutated_genome[index][0] = newcolor", "def updateFromRgb ( self ):\n hsl = self.rgbToHsl( self.r, self.g, self.b )\n self.h = hsl[0]\n self.s = hsl[1]\n self.l = hsl[2]\n self.hsl = hsl\n self.hsla = [ hsl[0], hsl[1], hsl[2], self.a ]", "def on_rgb_press(self,r,g,b):\n self.rgb = colormodel.RGB(r, g, b)\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.update()", "def set_color(color):\r\n global _current_color\r\n _current_color = color", "def red(self, new_value):\r\n if self.empty is True and self.yellow is False and self.red is False and new_value is True:\r\n self._red = new_value\r\n self.empty = False\r\n else:\r\n raise DomainError('Square already full! ')", "def main():\n # color = rb.Color.BLUE.value\n # move_to_color(color)\n infared_sensor()\n\n # WHITE/RED does not work same with the BLUE/GREEN going down", "def update(self, rgb, cmyk, hsv):\n # RGB Fields\n self.rField.text = `rgb.red`\n self.gField.text = `rgb.green`\n self.bField.text = `rgb.blue`\n # CMYK fields\n self.cField.text = \"\" if cmyk is None else `round(cmyk.cyan,2)`\n self.mField.text = \"\" if cmyk is None else `round(cmyk.magenta,2)`\n self.yField.text = \"\" if cmyk is None else `round(cmyk.yellow,2)`\n self.kField.text = \"\" if cmyk is None else `round(cmyk.black,2)`\n # HSV fields\n self.hField.text = \"\" if hsv is None else `round(hsv.hue,1)`\n self.sField.text = \"\" if hsv is None else `round(hsv.saturation,3)`\n self.vField.text = \"\" if hsv is None else `round(hsv.value,3)`", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def _on_change(self, *_):\n colour = self.on_colour if self.value else self.off_colour\n self.configure(bg=colour)\n if self.label:\n self.label.configure(bg=colour)", "def set_color(self):\n self.image[self.x, self.y] = self.color\n if self.diffusion:\n r = g = b = 0\n for i in range(self.convolution_matrix.shape[0]):\n for j in range(self.convolution_matrix.shape[1]):\n r = g = b = 0\n for k in range(self.convolution_matrix.shape[0]):\n for l in range(self.convolution_matrix.shape[1]):\n m = (self.x + i + k - 2 + self.image.shape[0]) % self.image.shape[0]\n n = (self.y + j + l - 2 + self.image.shape[1]) % self.image.shape[1]\n r += self.convolution_matrix[k][l] * self.image[m, n][2]\n g += self.convolution_matrix[k][l] * self.image[m, n][1]\n b += self.convolution_matrix[k][l] * self.image[m, n][0]\n self.image[self.x, self.y] = (b, g, r)", "def recolor(self, label_value: int, color: Tuple[float, float, float]) -> None:\n seginfo = self.infos[label_value]\n seginfo.color = color\n # propagate state changes\n self._update_state_from_infos()", "def fill(self, color):", "def change_lights_color(self, entity, attribute, oldUrl, newUrl, kwargs):\n if newUrl != oldUrl and newUrl is not None and self.can_change_colors():\n rgb_colors = self.get_colors(self.format_ha_url(newUrl))\n for i in range(len(self.lights)):\n threading.Thread(target=self.set_light_rgb, args=(self.lights[i], rgb_colors[i])).start()", "def test_color(self):\n self._calibration_test(\"color_full\")", "def set_color(self, color):\n self.light_color = color\n for f in self.color_change_cb:\n f(self)", "def tween_rgb_at(self, progress, output):\n for cell_id in self.next.keys():\n next_color = self.next[cell_id]\n\n if cell_id in self.last:\n last_color = self.last[cell_id]\n else:\n last_color = color.BLACK\n\n r = tween.linear(last_color.r, next_color.r, progress)\n g = tween.linear(last_color.g, next_color.g, progress)\n b = tween.linear(last_color.b, next_color.b, progress)\n cell_color = color.RGB(r,g,b)\n output(cell_id, cell_color)", "def color(self, color):\n\n self.container['color'] = color", "def color(self, color_value):\n self.app.color = color_value", "def drawChanges(self):\n self.draw(wait=False)\n draw(self.values,color='yellow',bbox=None,clear=False,shrink=self.shrink)", "def set_red(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_red(newval)", "def change(widget, colors): \n\t\n new_val = '#'\n for name in ('red', 'green', 'blue'):\n new_val += colors[name].get()\n widget['bg'] = new_val", "def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")", "def _set_color(self, r):\n c = COLORS[self.color]\n r.setLineColor(c[0], c[1], c[2])\n r.setColor(c[0], c[1], c[2])", "def _update_brightness(self):\n while self.current_brightness != self.brightness:\n next_color = RGB(r=int(self.color.r * (self.current_brightness/100.0)),\n g=int(self.color.g * (self.current_brightness/100.0)),\n b=int(self.color.b * (self.current_brightness/100.0)))\n self._update_color(next_color)\n diff = self.brightness - self.current_brightness\n # adjust current brightness to +/- 1\n self.current_brightness = self.current_brightness + \\\n (diff) / abs(diff)\n time.sleep(.05)\n # Final update to exact brightness and default if no change in brightness setting\n final_color = RGB(r=int(self.color.r * (self.brightness/100.0)),\n g=int(self.color.g * (self.brightness/100.0)),\n b=int(self.color.b * (self.brightness/100.0)))\n self._update_color(final_color)", "def shell_fgcolor_changed(self, color):\n self.set_fgcolor(color)", "def update(self, grid, colRamp = ['white', 'blue']):\n \n # update the cell colors\n for y in range(len(grid)):\n yl = y + 1\n for x in range(len(grid[y])):\n xl = x + 1\n color = colRamp[int(grid[y][x])]\n self.displayWindow.update((xl, yl), color)\n\n # refresh the window\n self.displayWindow.tkupdate()", "def color(self, value: tuple) -> None:\n if value in Color.PALETTE:\n self._color = value", "def register(self):\n active = True\n self.rgb = colormodel.RGB(0, 255, 0)\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.update()", "def _color(self,c):\n return self.colorlist[c%len(self.colorlist)]", "def set_color(self):\n new_color = QColorDialog.getColor(QColor(self.config['color']))\n if not new_color.isValid():\n return\n self.config['color'] = new_color.rgb()\n self.paint()", "def set_red(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3] = value", "def update(self):\n self._brightness = self._lj.get_load_level(self._index) / 99 * 255", "def shell_bgcolor_changed(self, color):\n self.set_bgcolor(color)", "def set_color_rgb(r, g, b):\r\n global _current_color\r\n _current_color = (r, g, b)", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def changecolor (color):\n valid_colors = (\"red\", \"grey\", \"yellow\", \"green\")\n if color in valid_colors:\n if changecolor.times:\n print(\"The color was last changed at \", changecolor.times[-1])\n print (color)\n changecolor.times.append(time.asctime())\n else:\n n = valid_colors.__len__()\n not_last = valid_colors[:n-1]\n last = valid_colors[-1]\n\n message = ', '.join(not_last) + ' and ' + last\n print (\"sorry, a color can only be\", message)", "def updateColorItem(self, item, itemColor): \n self.avatarConfiguration[item] = itemColor\n self.paintAvatarItem(item)", "def update(self):\n if self.state['enabled']:\n if not self.state['blue'] and not self.state['return']:\n self.update_normal()\n elif self.state['blue']:\n self.update_blue()\n elif self.state['return']:\n self.update_return()\n self.last_position = (self.rect.centerx, self.rect.centery)", "def _calcColor(self, colorTuple):\n return milight.color_from_rgb(*colorTuple)", "def change_color(self, x, y, state):\n if state == 1:\n color = self.tile_color\n else:\n color = self.background_color\n self.canvas.itemconfig(self.board[(x, y)], fill=color)", "def _setRgbColor(self, color):\n item = self.item()\n if item is not None:\n color.setAlpha(item.getColor().alpha())\n item.setColor(color)", "def setColor(self, color):\n color = rgba(color)\n if color != self._color:\n self._color = color\n self._updateColor(self._color)\n self._updated(ItemChangedType.COLOR)", "def change_led_floor_color(update: 'Update', context: 'CallbackContext'):\n args = context.args\n message = \" \".join(args)\n\n try:\n publish.single(\"ledfloorupdates\", message, hostname=\"10.90.154.80\", port=1883, client_id=\"kolabbot\")\n update.message.reply_text('Changing LED floor color to \"{}\".'.format(message))\n except (ConnectionRefusedError, TimeoutError) as err:\n msg = \"Could not connect to LED-floor: {}\".format(err)\n print(msg)\n update.message.reply_text(msg)" ]
[ "0.792751", "0.7712366", "0.77086216", "0.7582702", "0.72797334", "0.7251339", "0.72495466", "0.7238376", "0.72019273", "0.7178436", "0.7135327", "0.69967073", "0.69614214", "0.69462603", "0.6907712", "0.68813443", "0.6823944", "0.6820552", "0.6733206", "0.67252445", "0.6687395", "0.6653723", "0.6634742", "0.6625806", "0.662149", "0.6616557", "0.6581856", "0.6546624", "0.65457785", "0.6538357", "0.6502497", "0.64979625", "0.6493508", "0.64900565", "0.64847165", "0.6460563", "0.6456034", "0.6430071", "0.6428601", "0.64193237", "0.6414078", "0.6398025", "0.63827074", "0.6357889", "0.6353343", "0.63296276", "0.6328359", "0.63124424", "0.6298331", "0.6284459", "0.6268865", "0.6255252", "0.6252684", "0.6252684", "0.62485236", "0.624581", "0.6235886", "0.62200403", "0.6213656", "0.6213357", "0.621217", "0.6204643", "0.620115", "0.6199151", "0.6186757", "0.6171659", "0.61689895", "0.61684155", "0.61650395", "0.61620325", "0.6161217", "0.61439455", "0.6138982", "0.61376494", "0.61267763", "0.61214495", "0.6093968", "0.607878", "0.60768545", "0.60765153", "0.60698134", "0.60603917", "0.60493994", "0.60486907", "0.6045861", "0.60455716", "0.6044695", "0.6036818", "0.6029281", "0.6029281", "0.6029281", "0.6029281", "0.6028772", "0.6017756", "0.6013994", "0.60078824", "0.59962827", "0.5993008", "0.5984273", "0.59812677" ]
0.659273
26
Synchronize this instance data with that of its parent
def _syncDataWithParent(self): parent = self.parent() if parent is None: self._data = None else: self._data = parent.getData( mode=parent.getComplexMode(), copy=False) if parent is None or self.getComplexMode() == self.ComplexMode.NONE: self._setColormappedData(None, copy=False) else: self._setColormappedData( parent.getData(mode=self.getComplexMode(), copy=False), copy=False) self._updateScenePrimitive()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n mode = self.getComplexMode()\n data = parent.getData(mode=mode, copy=False)\n range_ = parent.getDataRange(mode=mode)\n self._updateData(data, range_)", "def sync(self):\n pass", "def sync(self):\n pass", "def sync(self):\n return", "def sync(self, other):\n pass # TODO", "def do_sync(self):\n raise NotImplementedError() # pragma: no cover", "def sync_local(self, other):\n pass # TODO", "def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)", "def sync(self, **kwargs):\n pass", "def update_original_data(self):\n pass", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def sync(self):\n return self._sync", "def update(self, parent):\r\n pass", "def _post_sync(self):", "def _reload(self):\n if self._ancestorModelSourceCreated:\n self._parent._reload()\n else:\n # beware this breaks parent/child links such as current selection / hierarchical groups\n dictSave = self.serialize()\n tmpRegion = self._createBlankCopy()\n tmpRegion.deserialize(dictSave)\n self._assign(tmpRegion)\n self._informRegionChange(True)", "def lock(self):\n raise NotImplementedError", "def sync() -> None:", "def sync(self):\n if not self.is_readonly():\n deser = self._deserialize()\n orig = getattr(self.model, self.name)\n if (orig != deser):\n if isinstance(orig, list):\n # first remove the original triples, instead of doing sophisticated\n # set manipulations\n setattr(self.model, self.name, [])\n setattr(self.model, self.name, deser)", "def SyncRoot(self) -> object:", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n\r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def sync(self, sync):\n self._sync = sync", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops caused by a constantly changing state value at each run.\n # Example: state.value += 1\n\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def _pre_sync(self):", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n \r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None)", "def sync(self):\n\n if self._inchild:\n os.read(self._pr_child, len(self.RELEASE_MSG))\n else:\n os.read(self._pr_parent, len(self.RELEASE_MSG))", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def freeze(self,):\n pass", "def sync(self):\n # TODO: write better documentation: when would user need this?\n wait(self.proto.sync())", "def sync(self, sync):\n\n self._sync = sync", "def after_sync(self):\n self.title = self.c[\"title\"]\n self.body = self.c[\"body\"]\n self.state = self.c[\"state\"]\n self.base = self.c[\"base\"][\"ref\"]\n self.head = self.c[\"head\"][\"ref\"]\n self.maintainer_can_modify = self.c[\"maintainer_can_modify\"]", "def sync(self) -> None:\n for parameter in self.data_to_sync:\n assert hasattr(self, parameter), \\\n \"Parameter: %s does not exist in: %s\" % (parameter, self)\n self.publish(self.key_gen(parameter), getattr(self, parameter))", "def sync(self) -> None: #\n self.__target.load_state_dict(self.__policy.state_dict())", "def sync(self):\n resp = yield self.do_sync()\n self.c = resp.data\n self.after_sync()\n raise gen.Return(self)", "def sync_tree_cache(self) -> None:\n self.sync_tree_with_data(self.tree_cache, self.data_cache)", "def __post_init__(self):\n # ------------------------------------------------------------ 01\n # if path exists load data dict from it\n # that is sync with contents on disk\n if self.path.exists():\n _hashable_dict_from_disk = \\\n m.FrozenDict.from_yaml(self.path.read_text())\n # update internal dict from HashableDict loaded from disk\n self.__dict__.update(\n _hashable_dict_from_disk.get()\n )\n\n # ------------------------------------------------------------ 02\n # start syncing i.e. any updates via __setattr__ will be synced\n # to disc\n self.internal.start_syncing = True", "def _notify_parent_change(self):\n pass", "def syncContents(self):\n self._contents.setState_TRY(self.temperature(),\n self.density(),\n self.massFractions())", "def freeze(self):\n raise NotImplementedError()", "def i_am_locking(self):\r\n pass", "def update(self):\n\n pass", "def update(self):\n return self", "def update(self):\n raise NotImplementedError", "def _update(self):\n pass", "def update(self):\r\n pass", "def on_parent_changed(self):\n pass", "def update(self):\n if self._data_provider_state is not None:\n self._state = self._data_provider_state()\n \n if self._data_provider_attributes is not None:\n self._attributes = self._data_provider_attributes()", "def sync_remote(self, other):\n pass # TODO", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n raise NotImplementedError()", "def lock (self):\n self.locked = True\n self._changed = False", "def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")", "def sync_after_remote_computation(self):\n\n # If this state was never initialized, it doesn't have any out-of-date\n # information, so there's no need to update anything.\n if not self.is_initialized:\n return\n\n assert self.should_persist\n\n # First, let's flush the stored entries in our cache accessor. Since we just\n # computed this entry in a subprocess, there should be a new cache entry that\n # isn't reflected yet in our local accessor.\n # (We don't just call self.refresh_cache_accessors() because we don't\n # particularly want to do the cache versioning check -- it's a little late to\n # do anything if it fails now.)\n self._cache_accessor.flush_stored_entries()\n\n # Then, populate the value hashes.\n if self._result_value_hash is None:\n self._load_value_hash()", "def __getstate__(self) -> Dict[str, Any]:\n s = self.__dict__.copy()\n # Kill the parent ref. It won't pickle well.\n s[\"_parent\"] = None\n return s", "def update(self):\n with managed_session() as session:\n session.merge(self)", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def sync_tree_db(self) -> None:\n self.sync_tree_with_data(self.tree_db, self.data_db)", "def update(self):\n # default implementation is to do nothing.", "def sync_widgets(self):\n self.data_changed.emit(self.value)", "def sync_to_ontology(self):\n self.ontology.sync_entity_to_graph(self)", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def copy(self):\n return super().copy()", "def lock(self):\n self.mtx.acquire()", "def __init__(self):\n self._data_queue = []\n self._access_queue_lock = Lock()", "def build(self):\n self.lock_built = True", "def after_sync(self):\n pass", "def update_data():\n pass", "def __init__(self):\n self.data = {}\n self.refresh()", "def __enter__(self):\n\n self.create()\n return super().__enter__()", "def reparent(self, obj, parent):\n return self.update(obj, parent=parent)", "async def async_update(self) -> None:\n await super().async_update()\n await self.async_get_state()", "def restore_object(self):\n self.co_worker_list = self.original_co_worker_list", "def _update_object(self, data_dict):\r\n pass", "def update(self, other):\n b = self.hallucinate_merge(other)\n self.l_child = b.l_child\n self.r_child = b.r_child", "def sync_info(self, sync_info):\n\n self._sync_info = sync_info", "def update(self):\n self._xfinity_data.update()", "def cambiar_parent(self):\r\n self.client.parent = self", "def cambiar_parent(self):\r\n self.client.parent = self", "def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()", "def sync_end(self):", "def __enter__(self):\n return self._get_storage().__enter__()", "def __enter__(self):\n\n return self" ]
[ "0.8105605", "0.80830806", "0.7977319", "0.71153784", "0.71153784", "0.70563495", "0.7043718", "0.674526", "0.65729344", "0.6530828", "0.64562", "0.6451494", "0.6362502", "0.6362502", "0.6325555", "0.63112843", "0.6255493", "0.6245364", "0.6242624", "0.619789", "0.61851496", "0.61773336", "0.61612016", "0.61592674", "0.615458", "0.61517084", "0.6139571", "0.61280423", "0.6117423", "0.6102235", "0.60881495", "0.6027518", "0.6023041", "0.6007091", "0.5970572", "0.5955212", "0.59466237", "0.5941309", "0.59173286", "0.5876683", "0.58658206", "0.5858481", "0.58379585", "0.583246", "0.5813005", "0.57983154", "0.5788369", "0.5780768", "0.5767515", "0.57577527", "0.57572305", "0.5747498", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747469", "0.5747163", "0.5744666", "0.57408583", "0.5727137", "0.5714643", "0.5712627", "0.5683065", "0.5666505", "0.56472826", "0.5630778", "0.56155974", "0.56155485", "0.55971104", "0.5590951", "0.5575481", "0.5554041", "0.5548815", "0.55483866", "0.5545577", "0.55447865", "0.5542659", "0.554085", "0.5539797", "0.55267113", "0.5525976", "0.5525867", "0.5520475", "0.5518965", "0.5518965", "0.55176026", "0.5515576", "0.5513838", "0.5512805" ]
0.7733652
3
Handle data change in the parent this isosurface belongs to
def _parentChanged(self, event): if event == ItemChangedType.COMPLEX_MODE: self._syncDataWithParent() super(ComplexIsosurface, self)._parentChanged(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)", "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(\n mode=parent.getComplexMode(), copy=False)\n\n if parent is None or self.getComplexMode() == self.ComplexMode.NONE:\n self._setColormappedData(None, copy=False)\n else:\n self._setColormappedData(\n parent.getData(mode=self.getComplexMode(), copy=False),\n copy=False)\n\n self._updateScenePrimitive()", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def data_changed(self):\n return", "def XPLMDataChanged_f(inRefcon):", "def data_changed(self):\n self.data_changed_signal.emit(self)", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n mode = self.getComplexMode()\n data = parent.getData(mode=mode, copy=False)\n range_ = parent.getDataRange(mode=mode)\n self._updateData(data, range_)", "def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")", "def MyDataChangedCallback(self, inRefcon):\r\n pass", "def get_data(self, data):\n data = super().get_data(data)\n self.pid.update_layer1(data[self.pid_cols])\n return data", "def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()", "def update_original_data(self):\n pass", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)", "def on_data_vars_change(self, change):\n if change['type'] == 'change' and change['name'] == 'value':\n self.left_ds = getattr(self.ts.data, change['new'])\n if self.mask is None:\n self.right_ds = self.left_ds.copy(deep=True)\n else:\n self.right_ds = self.left_ds * self.mask\n\n self.left_imshow.set_data(self.left_ds.data[0])\n self.right_imshow.set_data(self.right_ds.data[0])", "def update_data():\n pass", "def update(self, datain):\r\n self.arraydata = datain\r\n self.layoutChanged.emit()", "def update(self, parent):\r\n pass", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def dataGridView_CellValueChanged(self, sender, eventArgs):\r\n name = self.wf.dataGridView.Rows[eventArgs.RowIndex].Cells[0].Value\r\n newVal = self.wf.dataGridView.Rows[eventArgs.RowIndex].Cells[eventArgs.ColumnIndex].Value\r\n child = Application.ActiveSceneRoot.FindChild2( name, constants.siPolyMeshType, constants.siMeshFamily, True )\r\n if child:\r\n transform = child.Kinematics.Local.GetTransform2(None)\r\n translation = transform.Translation\r\n if eventArgs.ColumnIndex == 1:\r\n transform.Translation = XSIMath.CreateVector3( newVal, translation.Y, translation.Z )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n elif eventArgs.ColumnIndex == 2:\r\n transform.Translation = XSIMath.CreateVector3( translation.X, newVal, translation.Z )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n elif eventArgs.ColumnIndex == 3:\r\n transform.Translation = XSIMath.CreateVector3( translation.X, translation.Y, newVal )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n else:\r\n print \"DataGridView_CellValueChanged: \" + child + \" not found!\"", "def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def _notify_parent_change(self):\n pass", "def on_parent_changed(self):\n pass", "def updateGeometryInfo(self,*args):\r\n self.wf.dataGridView.Rows.Clear()\r\n sceneRoot = Application.ActiveSceneRoot\r\n children = sceneRoot.FindChildren2( \"\", constants.siPolyMeshType, constants.siMeshFamily, True )\r\n for child in children:\r\n vTrans = child.Kinematics.Local.GetTransform2(None).Translation\r\n self.wf.AddRow( child.FullName, vTrans.X, vTrans.Y, vTrans.Z )", "def _notify_parent_change(self):\n for p in self.parameters:\n p._parent_changed(self)", "def _load_data(self, event):\n if self.parent is not None:\n wx.PostEvent(self.parent, NewLoadDataEvent())", "def _numberOfPoints_changed(self):\n self.reinitialiseData()", "def _update_proxy(self, change):\n if change['type'] == 'container':\n #: Only update what's needed\n self.proxy.update_points(change)\n else:\n super(MapPolygon, self)._update_proxy(change)", "def _metadata_changed(self, old, new):\n\n #self.cross_plot.value_range.low = self.minz\n #self.cross_plot.value_range.high = self.maxz\n #self.cross_plot2.value_range.low = self.minz\n #self.cross_plot2.value_range.high = self.maxz\n if self._imag_index.metadata.has_key(\"selections\"):\n x_ndx, y_ndx = self._imag_index.metadata[\"selections\"]\n if y_ndx and x_ndx:\n# xdata, ydata = self._image_index.get_data()\n# xdata, ydata = xdata.get_data(), ydata.get_data()\n self.pd_horiz.set_data(\"horiz\", self._image_value.data[y_ndx,:])\n self.pd_vert.set_data(\"vert\", self._image_value.data[:,x_ndx])", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def on_new_data(self, data):\n raise NotImplementedError()", "def _update_proxy(self, change):\n if change['type'] == 'container':\n #: Only update what's needed\n self.proxy.update_points(change)\n else:\n super(MapPolyline, self)._update_proxy(change)", "def data_dict_update(self, change):\n self.data_dict = change['value']", "def _update_data(self) -> None:\n data: SwitcherShutter = self.coordinator.data\n self._attr_current_cover_position = data.position\n self._attr_is_closed = data.position == 0\n self._attr_is_closing = data.direction == ShutterDirection.SHUTTER_DOWN\n self._attr_is_opening = data.direction == ShutterDirection.SHUTTER_UP", "def _modelUpdated(self, *args, **kwargs):\n topLeft = self.index(column=0)\n bottomRight = self.index(column=1)\n model = self.model()\n if model is not None:\n model.dataChanged.emit(topLeft, bottomRight)", "def change_data(self):\n\n if self.changed is not True:\n self.changed = True\n print('True')", "def _data_updated_callback(self, attr, old, new):\n pass", "def update_E(self):", "def update_visualization(self) -> None:\n pass", "def updateData(self):\n self.needsData.emit(self.property(\"number\"))", "def update(self):\n # Find only unmasked data :\n xyz, sData, sColor, _ = self._select_unmasked()\n # xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n if xyz.size:\n self.mesh.visible = True\n self.mesh.set_data(xyz, edge_color=self.edgecolor, size=sData,\n face_color=sColor, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n # self.mesh.transform = self.transform\n self.mesh.update()\n else:\n self.mesh.visible = False", "def rDataChanged(self):\n\n self._queues.uResolutionTab.refreshData()\n self._layerManager.updateReviewLayer()", "def _update_ax(self):\n raise NotImplementedError(\"Implement _update_ax(self) in subclass\")", "def update_data(self):\n\n # Update all plots in the figure\n self.data = self.model.measurements.get_bokeh_vis_data()\n self.source.stream(self.data, len(self.data))\n self.line_source.stream(self.data[self.data.agent_type == 'system'])\n self.school_dropdown_func()\n\n # Update the utility histograms\n self.update_histograms()\n\n # Update the composition histograms\n to_update = [self.neighbourhood_composition_quads, \n self.school_composition_quads, self.distance_quads]\n\n for quads in to_update:\n\n # Grab the new data\n if quads == self.neighbourhood_composition_quads:\n hist_data = self.composition_data(agent_type='neighbourhood')\n elif quads == self.school_composition_quads:\n hist_data = self.composition_data(agent_type='school')\n else:\n hist_data = self.composition_data(agent_type='household')\n\n # Update the bars and edges\n for group in hist_data.keys():\n\n hist, edges = np.histogram(hist_data[group],\n density=True,\n bins=20)\n\n # Update histogram\n quads[group].data_source.data['top'] = hist\n quads[group].data_source.data['left'] = edges[:-1]\n quads[group].data_source.data['right'] = edges[1:]", "def setData(self,newData):\r\n pass", "def onChange(self, parent):\r\n pass", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def reload_data(self):\n super(UpdateMessage, self).reload_data()\n self._previous_avro_payload.reload_data()", "def _original_data(self, data: np.ndarray):\n if self._raw_data is None:\n self._raw_data = data", "def onMarketUpdate(self, data):\n pass", "def update(self, new_gameStateData):\r\n pass", "def onFlowUpdate(self, event):", "def on_edit(self, dataobj):", "def update_graph(self, data):\n if (self.type == 'matplotlib'):\n pass\n else:\n pass", "def update_all_data(self):\n self.dataChanged.emit(qtc.QModelIndex(), qtc.QModelIndex())", "def update(self):", "def update(self):", "def update(self):", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, i):\n\n self.current_position = self.mediaPlayer.position()\n \t\n \n\n \"\"\"\n \"Record mode\" and \"wide x-axis mode\" shouls not work together. Wide mode is only for reading data, not writing data. \n The user is not allowed to write data when 16 000 points are displayed (wide mode) on tha diagram. If he does so, the frequency of the graph points decreases with time. \n \"\"\"\n \n if self.checkbox.isChecked():\n self.wideRadio.setEnabled(False)\n if not self.checkbox.isChecked():\n self.wideRadio.setEnabled(True)\n if self.wideRadio.isChecked():\n self.checkbox.setEnabled(False)\n if not self.wideRadio.isChecked():\n self.checkbox.setEnabled(True)\n \n\n\n if self.checkbox.isChecked() and self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n \n self.savedRecently = False\n\n\n self.current_position = self.mediaPlayer.position()\n\n \n if self.xValues == []:\n # \"If the list of xValues is empty\". This happens only in the start of the plotting process.\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n #self.position_index = self.xValues.index(self.current_position)\n \n\n if self.xValues != []:\n\n if self.current_position > max(self.xValues):\n # \"If the point is bigger than the last point\". I.e if the point will be plotted in the end of the current graph.\n\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n\n if self.current_position < max(self.xValues):\n # \"If the point is smaller than the last point\". I.e if the point will be plotted in the middle of the current graph.\n\n \n if self.mediaPlayer.position() < 100:\n # The program has a problem of removing a point if x=0. This if-statement solves the problem.\n self.xValues.pop(0)\n self.yValues.pop(0)\n self.colors.pop(0)\n \n\n\n # Clearing all the points that are 100 ms (or less) in front of the current position. \n for number in range(self.current_position, self.current_position + 100):\n if number in self.xValues:\n self.yValues.pop(self.xValues.index(number))\n self.colors.pop(self.xValues.index(number))\n self.xValues.remove(number)\n \n \n \n # Plot new points\n bisect.insort(self.xValues,self.current_position) # Through this method, the element is inserted in order.\n self.yValues.insert(self.xValues.index(self.current_position), self.mouseY)\n self.colors.insert(self.xValues.index(self.current_position), self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n \n\n\n # View modes: zoom or wide.\n\n if self.zoomRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n self.canvas.axes.set_xlim(self.current_position-5000, self.current_position+5000)\n\n self.update_tempLists()\n\n self.curve = self.canvas.axes.scatter(self.tempXList, self.tempYList, s=10 , c=self.tempCList)\n\n\n\n if self.wideRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n\n if self.mediaPlayer.duration() != 0:\n self.canvas.axes.set_xlim(0, self.mediaPlayer.duration())\n elif self.xValues != []:\n self.canvas.axes.set_xlim(0, max(self.xValues))\n\n self.curve = self.canvas.axes.scatter(self.xValues, self.yValues, s=10 , c=self.colors)\n\n \n\n # I remove the previous vertical and horizontal lines. If I do not remove them, the program gets slower and slower, and the frequency of the points decreases with time.\n self.hline.remove()\n self.vline.remove()\n \n # New vertical and horizontal lines are created and updated to the correct values.\n self.vline = self.canvas.axes.axvline(x=self.mediaPlayer.position(), color='gray',linestyle=\":\")\n self.hline = self.canvas.axes.axhline(y=self.mouseY, color='gray',linestyle=\":\")\n\n\n\n return [self.curve] + [self.vline] + [self.hline]", "def __itemChanged(self, event):\n if event in (items.ItemChangedType.DATA, items.ItemChangedType.MASK):\n self._updateFromItem()", "def process_data(self, windowed_data):\n return", "def on_dataobj_create(self, dataobj):", "def project_changed(self, day_idx):\n self.is_modified = True\n self.fire_project_changed(ChartProject.CHANGED)", "def onFrameUpdated(self):\n pass", "def _resolution_changed(self):\n self.reinitialiseData()", "def process_IN_MODIFY(self, event):", "def _measurement_update(self):\n pass", "def updateData(self, fp, prop):\n return", "def setData(self,newdata):\n self.record(inspect.currentframe())\n if np.shape(newdata) == np.shape(self.data):\n self.data = np.copy(newdata)", "def onUpdated(self):", "def slot_depth(self, dummy_sender, data):\r\n (typ, price, _voldiff, total_vol) = data\r\n if self._update_book(typ, price, total_vol):\r\n self.signal_changed(self, None)", "def _update_proxy(self, change):\n # The superclass handler implementation is sufficient.\n super(AbstractItemView, self)._update_proxy(change)", "def update_data(self, newData):\r\n self.AllData = newData", "def onPropertiesChange(self, data):\n pass", "def _itemChanged(self, event):\n if event == items.ItemChangedType.COLORMAP:\n self._sigColormapChanged.emit()\n if self._colormap is not None:\n self._colormap.sigChanged.disconnect(self._colormapChanged)\n\n item = self.item()\n if item is not None:\n self._colormap = item.getColormap()\n self._colormap.sigChanged.connect(self._colormapChanged)\n else:\n self._colormap = None\n\n elif event == items.ItemChangedType.DATA:\n self._sigColormapChanged.emit()", "def _itemChanged(self, event):\n if event in self._EVENTS:\n model = self.model()\n if model is not None:\n index = self.index(column=0)\n model.dataChanged.emit(index, index)", "def process_IN_ATTRIB(self, event):", "def data_input_changed(self):\n self.message.data = self.dataInput.toPlainText()\n self.validate_data_input(self.message.dlc)", "def update(self):\n self.data.update()\n\n sensor_type = self.entity_description.key\n if sensor_type == \"light\":\n self._attr_native_value = self.data.light\n elif sensor_type == \"light_red\":\n self._attr_native_value = self.data.light_red\n elif sensor_type == \"light_green\":\n self._attr_native_value = self.data.light_green\n elif sensor_type == \"light_blue\":\n self._attr_native_value = self.data.light_blue\n elif sensor_type == \"accelerometer_x\":\n self._attr_native_value = self.data.accelerometer_x\n elif sensor_type == \"accelerometer_y\":\n self._attr_native_value = self.data.accelerometer_y\n elif sensor_type == \"accelerometer_z\":\n self._attr_native_value = self.data.accelerometer_z\n elif sensor_type == \"magnetometer_x\":\n self._attr_native_value = self.data.magnetometer_x\n elif sensor_type == \"magnetometer_y\":\n self._attr_native_value = self.data.magnetometer_y\n elif sensor_type == \"magnetometer_z\":\n self._attr_native_value = self.data.magnetometer_z\n elif sensor_type == \"temperature\":\n self._attr_native_value = self.data.temperature\n elif sensor_type == \"pressure\":\n self._attr_native_value = self.data.pressure\n elif sensor_type == \"voltage_0\":\n self._attr_native_value = self.data.voltage_0\n elif sensor_type == \"voltage_1\":\n self._attr_native_value = self.data.voltage_1\n elif sensor_type == \"voltage_2\":\n self._attr_native_value = self.data.voltage_2\n elif sensor_type == \"voltage_3\":\n self._attr_native_value = self.data.voltage_3", "def draw_data(self):\n\n return NotImplementedError", "def _update_proxy(self, change):\n # The superclass implementation is sufficient.\n super(DockArea, self)._update_proxy(change)", "def OnData(self, data):\n\n for k in self.assets_keys:\n cond1 = (self.Time > self.stop_time_dict[k])\n cond2 = self.Portfolio[k].Invested\n # self.Debug(f\"cond1 {cond1}, cond2 {cond2}\")\n if cond1 and cond2:\n self.Debug(f\"{self.Time}, {k} position {self.Portfolio[k].Quantity}\")\n self.Liquidate(k)\n self.Debug(f\"{k} position liquidated: {self.Portfolio[k].Quantity}\")\n\n for k in self.assets_keys:\n if not data.ContainsKey(k):\n continue\n\n dat = data[k]\n time = dat.Time\n\n try:\n # self.features.loc[time] = [data[\"GAZP\"].Fastmavg, data[\"GAZP\"].Slowmavg, data[\"GAZP\"].Close]\n # self.features.loc[time]\n self.features_dict[k].loc[time] = [dat.Logret, dat.Momone, dat.Momtwo, dat.Momthree, dat.Momfour,\n dat.Momfive, dat.Volatilityfifty, dat.Volatilitythirtyone,\n dat.Volatilityfifteen,\n dat.Autocorrone, dat.Autocorrtwo, dat.Autocorrthree,\n dat.Autocorrfour, dat.Autocorrfive,\n dat.Logtone, dat.Logttwo, dat.Logtthree, dat.Logtfour, dat.Logtfive,\n dat.Bin, dat.Side]\n # self.Debug(\"1\")\n except AttributeError as e:\n continue\n\n if self.clf_dict[k] is not None:\n X = self.features_dict[k].drop([\"Bin\"], axis=1).loc[time].values.reshape(1, -1)\n y_pred = self.clf_dict[k].predict(X)\n\n if y_pred > .8:\n\n if dat.Side == 1:\n if not self.Portfolio[k].IsLong:\n self.stop_time_dict[k] = self.Time + self.lifetime\n if self.Portfolio[k].Invested:\n self.Liquidate(k)\n self.SetHoldings(k, .5)\n # self.Debug(f\" long {k}, {self.Portfolio[k].Quantity}, till {self.stop_time_dict[k]}\")\n # self.Debug(f\" hol {self.Portfolio.TotalHoldingsValue}, cash {self.Portfolio.Cash}\")\n\n else:\n self.stop_time_dict[k] = self.Time + self.lifetime\n # self.Debug(f\" long_ {k}, {self.Portfolio[k].Quantity}, till {self.stop_time_dict[k]}\")\n\n elif dat.Side == -1:\n if self.Portfolio[k].IsLong:\n self.stop_time_dict[k] = self.Time + self.lifetime\n self.Liquidate(k)\n self.SetHoldings(k, -0.5)\n # self.Debug(f\" short {k}, {self.Portfolio[k].Quantity}, till {self.stop_time_dict[k]}\")\n # self.Debug(f\" hol {self.Portfolio.TotalHoldingsValue}, cash {self.Portfolio.Cash}\")\n else:\n self.stop_time_dict[k] = self.Time + self.lifetime\n # self.Liquidate(k)\n self.SetHoldings(k, -0.5)\n # self.Debug(f\" short_ {k}, {self.Portfolio[k].Quantity}, till {self.stop_time_dict[k]}\")\n # self.Debug(f\" hol {self.Portfolio.TotalHoldingsValue}, cash {self.Portfolio.Cash}\")", "def _itemChanged(self, event):\n if event == items.ItemChangedType.VISUALIZATION_MODE:\n item = self.sender()\n if item is not None: # This occurs with PySide/python2.7\n self.__isEnabled = item.isPropertyEnabled(self.__propertyName)\n self.__updateFlags()\n\n # Notify model\n model = self.model()\n if model is not None:\n begin = self.index(column=0)\n end = self.index(column=1)\n model.dataChanged.emit(begin, end)", "def changeFridge(self,*args):\n self.selectedADR = self.adrSelect.get()\n # clear temps plot\n self.stage60K.set_xdata([])\n self.stage60K.set_ydata([])\n self.stage03K.set_xdata([])\n self.stage03K.set_ydata([])\n self.stageGGG.set_xdata([])\n self.stageGGG.set_ydata([])\n self.stageFAA.set_xdata([])\n self.stageFAA.set_ydata([])\n # load saved temp data\n # We have to sleep for 0.5s here because it seems like it takes\n # a moment for the connected server to register in self.cxn, even\n # though all this starts because a message is received saying it\n # is connected :\\\n time.sleep(0.5)\n startDateTime = yield self.cxn[self.selectedADR].get_start_datetime()\n try:\n reg = self.cxn.registry\n yield reg.cd(ADR_SETTINGS_BASE_PATH + [self.selectedADR])\n logPath = yield reg.get('Log Path')\n tempDataChest = dataChest(logPath)\n ds = dateStamp()\n dset = '%s_temperatures'%ds.dateStamp(startDateTime.isoformat())\n tempDataChest.openDataset(dset)\n\n n = tempDataChest.getNumRows()\n # load approximately the last 6 hours of data\n pastTempData = tempDataChest.getData(max(0,n-6*60*60),None )\n for newRow in pastTempData:\n # change utc time to local\n utc = newRow[0] # (float)\n utc = datetime.datetime.utcfromtimestamp(utc)\n utc = utc.replace(tzinfo=tz.tzutc())\n newRow[0] = mpl.dates.date2num(utc)\n # add old data from file into plot\n self.stage60K.set_xdata(numpy.append(self.stage60K.get_xdata(),newRow[0]))\n self.stage60K.set_ydata(numpy.append(self.stage60K.get_ydata(),newRow[1]))\n self.stage03K.set_xdata(numpy.append(self.stage03K.get_xdata(),newRow[0]))\n self.stage03K.set_ydata(numpy.append(self.stage03K.get_ydata(),newRow[2]))\n self.stageGGG.set_xdata(numpy.append(self.stageGGG.get_xdata(),newRow[0]))\n self.stageGGG.set_ydata(numpy.append(self.stageGGG.get_ydata(),newRow[3]))\n self.stageFAA.set_xdata(numpy.append(self.stageFAA.get_xdata(),newRow[0]))\n self.stageFAA.set_ydata(numpy.append(self.stageFAA.get_ydata(),newRow[4]))\n except IOError:\n # file not created yet if adr server just opened\n print( 'temp file not created yet?' )\n self.updatePlot()\n # clear and reload last 20 messages of log\n self.log.clear()\n logMessages = yield self.cxn[self.selectedADR].get_log(20)\n for (t,m,a) in logMessages:\n self.updateLog(t,m,a)\n # update instrument status stuff: delete old, create new\n for widget in self.instrumentStatusFrame.winfo_children():\n widget.destroy()\n returnStatus = yield self.cxn[self.selectedADR].get_instrument_state()\n self.instrumentStatuses = {}\n for name,status in returnStatus:\n self.instrumentStatuses[name] = Tkinter.Label(self.instrumentStatusFrame,\n text=name,\n relief=Tkinter.RIDGE,\n bg='gray70')\n self.instrumentStatuses[name].pack(side=Tkinter.LEFT,\n expand=True,\n fill=Tkinter.X)\n # update field limits and button statuses\n self.setFieldLimits()\n self.magUpButton.configure(state=Tkinter.NORMAL)\n self.regulateButton.configure(state=Tkinter.NORMAL)\n self.compressorButton.configure(state=Tkinter.DISABLED)\n mUp = yield self.cxn[self.selectedADR].get_state_var('maggingUp')\n reg = yield self.cxn[self.selectedADR].get_state_var('regulating')\n if mUp:\n self.magUpButton.configure(text='Stop Magging Up',\n command=self.cancelMagUp)\n self.regulateButton.configure(state=Tkinter.DISABLED)\n if reg:\n self.regulateButton.configure(text='Stop Regulating',\n command=self.cancelRegulate)\n self.magUpButton.configure(state=Tkinter.DISABLED)\n # update heat switch buttons\n HSAvailable = yield self.cxn[self.selectedADR].get_instrument_state(['Heat Switch'])\n if HSAvailable[0][1][0]:\n self.HSCloseButton.configure(state=Tkinter.NORMAL)\n self.HSOpenButton.configure(state=Tkinter.NORMAL)\n else:\n self.HSCloseButton.configure(state=Tkinter.DISABLED)\n self.HSOpenButton.configure(state=Tkinter.DISABLED)\n # refresh interface\n self.updateInterface()", "def __flight_data_handler(self, event, sender, data):\n self.battery = data.battery_percentage\n self.fly_mode = data.fly_mode\n self.throw_fly_timer = data.throw_fly_timer\n self.throw_ongoing = data.throw_fly_timer > 0\n\n if self.prev_flight_data != str(data):\n print(data)\n self.prev_flight_data = str(data)\n self.flight_data = data\n\n if self.is_flying != data.em_sky:\n self.is_flying = data.em_sky\n log.debug(f\"FLYING : {self.is_flying}\")\n if not self.is_flying:\n self.reset()\n else:\n if self.tracking_after_takeoff:\n log.info(\"Tracking on after takeoff\")\n self.toggle_tracking(True)\n\n # if self.write_header_log:\n # self.write_header_log = False\n # self.log_file_log.write(f\"{data.format_cvs_header()}\\n\")\n # self.log_file_log.write(f\"{data.format_cvs(0)}\\n\")", "def updateData( Tables, Graph, LayersInfo, WarningMessage ):\n\n # clean the warning message\n LayersInfo.clean()\n WarningMessage.clean()\n\n LayerThicknessBuffer = Tables[ \"GeometryProperties\" ].getValue( 0, 2 )\n try:\n\n\n Layers = getLayersFromString( Tables[ \"GeometryProperties\" ].getValue( 0, 2 ) )\n\n LayersInfo.printMessage( str( len( Layers ) ) )\n\n # Homogenize the input data\n if len(Layers) != 1:\n\n makeMultiLayerMask( Tables )\n\n HomogenizedData = homogenize( Tables[ \"ElasticModulus\" ].getData( )[ 0 ],\n Tables[ \"ShearModulus\" ].getData( )[ 0 ],\n Tables[ \"PoissonRatios\" ].getData( ),\n Layers )\n\n #cangeMode( Tables, WarningMessage, Graph.getMode( ) )\n\n Tables[ \"ElasticModulus\" ].assignValuesSet( HomogenizedData[ \"ElasticModulus\" ] )\n Tables[ \"ShearModulus\" ].assignValuesSet( HomogenizedData[ \"ShearModulus\" ] )\n Tables[ \"PoissonRatios\" ].assignValuesSet( HomogenizedData[ \"PoissonRatios\" ] )\n Tables[ \"GeometryProperties\" ].assignValue( 0, 2, HomogenizedData[ \"TotalThickness\" ] )\n\n\n # Part of error handling.Function \"isInputNegative\" throws an error\n # if there is an element with its negetive value.\n isInputNegative( Tables [ \"ElasticModulus\" ].getData( ) )\n isInputNegative( Tables [ \"ShearModulus\" ].getData( ) )\n isInputNegative( Tables [ \"PoissonRatios\" ].getData( ) )\n isInputNegative( Tables [ \"MaterialProperties\" ].getData( ) )\n isInputNegative( Tables [ \"GeometryProperties\" ].getData( ) )\n\n # update the tables buffers\n makeMask( Tables, Graph.getMode() )\n\n # before calling user-define functions check the current mode\n cangeMode( Tables, WarningMessage, Graph.getMode() )\n\n precomputePoissonRatios( Tables )\n\n # get data from the corresponding tables\n ElasticModulusData = Tables [ \"ElasticModulus\" ].getData( )\n ShearModulusData = Tables [ \"ShearModulus\" ].getData( )\n PoissonRatiosData = Tables [ \"PoissonRatios\" ].getData( )\n MaterialPropertiesData = Tables [ \"MaterialProperties\" ].getData( )\n GeometryPropertiesData = Tables [ \"GeometryProperties\" ].getData( )\n\n\n #################### CALL USER-SPECIFIC FUNCTION ##########################\n\n testInputData( Graph.getMode(), PoissonRatiosData )\n\n Graph.Containers[ \"WaveVelocity\" ] = wave_speeds(\n ElasticModulusData,\n ShearModulusData,\n PoissonRatiosData,\n MaterialPropertiesData,\n GeometryPropertiesData,\n bool( Graph.getMode() ),\n Graph.getRange() )\n\n\n Graph.Containers[ \"ModesInBand\" ] = ModesInBand(\n ElasticModulusData,\n ShearModulusData,\n PoissonRatiosData,\n MaterialPropertiesData,\n GeometryPropertiesData,\n bool( Graph.getMode( ) ),\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"ModalDensity\" ] = ModaleDichte(\n Graph.Containers[ \"WaveVelocity\" ][ \"c_L\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_S\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_B_eff\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_g_eff\" ],\n GeometryPropertiesData,\n bool( Graph.getMode( ) ),\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"ModalOverlapFactor\" ] = ModalOverlapFactor(\n MaterialPropertiesData,\n Graph.Containers[ \"ModalDensity\" ],\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"MaxElementSize\" ] = MaximumElementSize(\n Graph.Containers[ \"WaveVelocity\" ][ \"c_B\" ],\n Graph.Containers[ \"WaveVelocity\" ][ \"c_B_eff\" ],\n Graph.getRange( ) )\n\n\n Graph.Containers[ \"EigenFrequency\" ] = EigenfrequenciesPlate(\n ElasticModulusData,\n ShearModulusData,\n PoissonRatiosData,\n MaterialPropertiesData,\n GeometryPropertiesData,\n bool( Graph.getMode() ),\n Graph.getRange() )\n\n # Update the current graph with new data\n updateGraph( Graph, Graph.getCurrentGraphNumber( ) )\n\n WarningMessage.clean()\n\n\n except VibroP_DataCorrupted as Error:\n WarningMessage.printMessage( str(Error) )\n Tables[ \"GeometryProperties\" ].setValue( 0, 2, LayerThicknessBuffer, \"\" )\n\n\n except VibroP_WrongLayersThikness as Error:\n WarningMessage.printMessage( str(Error) )\n\n\n except VibroP_TableCorrupted as Error:\n WarningMessage.printMessage( str(Error) )\n\n #'''\n except:\n Message = \"Error: Unexpected error. Please, refer to the code\"\n WarningMessage.printMessage( Message )\n #'''", "def on_new_data(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update plot data\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.plotted_data[i].setData(self.data_indices, self.measurements_list[i])", "def _update_object(self, data_dict):\r\n pass", "def __init__(self, parent): \n \n self.parent = parent\n \n self.custom_channel_name = _qstring(parent.rhd)\n self.native_channel_name = _qstring(parent.rhd)\n self.native_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.custom_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.signal_type = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.channel_enabled = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.chip_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.board_stream = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_trigger_mode= np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_threshold = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_trigger_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_edge_polarity = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.electrode_impedance_magnitude = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n self.electrode_impedance_phase = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n\n if self.signal_type == 0 and self.channel_enabled:#Add name to the amplifier channel list\n parent._AMPLIFIER_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 1 and self.channel_enabled:#Add name to the aux channel list\n parent._AUX_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 2 and self.channel_enabled:#Supply voltage\n parent._SUPPLY_VOLTAGE_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 3 and self.channel_enabled:#usb board adc input channel\n parent._ADC_INPUT_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 4 and self.channel_enabled:#usb board digital input channel\n parent._DIGITAL_INPUT_CHANNELS.append(self.native_channel_name)", "def updateVisualization(self):\n\t\tif self.visualization:\n\t\t\tif self.fixedVisualization:\n\t\t\t\tself.visualization.setFixedVisualization(self.fixedVisualization)\n\t\t\tif self.movingVisualization:\n\t\t\t\tself.visualization.setMovingVisualization(self.movingVisualization)\n\t\tself.multiRenderWidget.setVolumeVisualization(self.visualization)\n\t\tself.visualizationUpdated.emit(self.visualization)" ]
[ "0.7281809", "0.6798219", "0.663362", "0.6617056", "0.6495038", "0.63887423", "0.63887423", "0.63203007", "0.6309291", "0.62671393", "0.6239306", "0.6219209", "0.6177352", "0.6040155", "0.59989554", "0.5994137", "0.59767103", "0.5967239", "0.5934066", "0.5895432", "0.58403164", "0.5837722", "0.58095384", "0.5787082", "0.5745996", "0.57370967", "0.57274175", "0.57120687", "0.5686296", "0.5668755", "0.56500715", "0.56143516", "0.56101364", "0.5600463", "0.55783623", "0.5572413", "0.5542561", "0.54469305", "0.5445184", "0.54451364", "0.5436735", "0.5423925", "0.5405189", "0.5404468", "0.540398", "0.53881997", "0.5385559", "0.53612375", "0.53593886", "0.53440094", "0.5343246", "0.5343246", "0.5343246", "0.53135335", "0.53007156", "0.5298004", "0.5285347", "0.5275803", "0.52378005", "0.521567", "0.52154994", "0.5215113", "0.5215113", "0.5215113", "0.52099544", "0.52099544", "0.52099544", "0.52099544", "0.5205525", "0.5201411", "0.5200751", "0.51920694", "0.5163546", "0.5162113", "0.51564157", "0.5156308", "0.5154777", "0.51547617", "0.515394", "0.5153451", "0.51531476", "0.5135554", "0.5131347", "0.5127962", "0.51279086", "0.5106883", "0.5100003", "0.50936526", "0.5084411", "0.50794625", "0.5078345", "0.5075342", "0.5073403", "0.5068132", "0.5062168", "0.5055551", "0.50551134", "0.5054878", "0.50545305", "0.5052645" ]
0.78250813
0
Handle update of the isosurface (and take care of mode change)
def _updated(self, event=None): if event == ItemChangedType.COMPLEX_MODE: self._syncDataWithParent() elif event in (ItemChangedType.COLORMAP, Item3DChangedType.INTERPOLATION): self._updateScenePrimitive() super(ComplexIsosurface, self)._updated(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def _draw(self):\r\n \r\n if self.active:\r\n self.surface = self.activeSurface # Set active surface to be displayed.\r\n else:\r\n self.surface = self.passiveSurface # Set passive surface to be displayed.\r", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)", "def update():", "def update():", "def plane_update(self):\n self.plane.update()", "def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()", "def update(self, surface, keys, current_time, dt, scale):\n self.anykey.update(current_time)\n self.draw(surface)", "def update_surfs(self, surf_path, surf_type, offset=None):\n try:\n self.surf[surf_type]\n except KeyError:\n pass\n # Here should be a dialog for confirm, whether adding data or not\n else:\n self._add_surface(surf_path, surf_type, offset)", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def update_figure(self):\n\n self.draw()", "def update(self):\n # Find only unmasked data :\n xyz, sData, sColor, _ = self._select_unmasked()\n # xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n if xyz.size:\n self.mesh.visible = True\n self.mesh.set_data(xyz, edge_color=self.edgecolor, size=sData,\n face_color=sColor, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n # self.mesh.transform = self.transform\n self.mesh.update()\n else:\n self.mesh.visible = False", "def update_surface(frame):\n \n #fig.suptitle(time[frame])\n im.set_array(surf[frame])\n im.set_extent([np.nanmin(xx[frame]), np.nanmax(xx[frame]), np.nanmin(yy[frame]), np.nanmax(yy[frame])])\n \n line.set_data([(times[:-1] + utc_to_east).plot_date[frame]]*2, ylim)", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def _update(self):\n self.cv.update()", "def update_visuals(self):\n\n result, data = self.dev.grab_pipe()\n if not result:\n log.critical(\"Problem grabbing pipe\")\n\n if self.live_updates == True:\n self.update_graph(data)\n self.curve_render += 1\n self.update_image(data)\n self.check_image(self.curve_render)\n\n self.update_fps()\n self.data_timer.start(0)", "def update( ):\r\n pass", "def update_visualization(self) -> None:\n pass", "def __call__(self, info, *fargs):\n frame = info[0] # Frame number\n update = info[1] # Update value\n grid_data = info[2] # Data to draw our grids\n mask = info[3] # Mask of data\n self._setup['update'].set_text(f'Update {update}')\n for ndx,data in enumerate(grid_data):\n self._setup['plots'][ndx].set_array(check_mask(data,mask[ndx]))\n for pp in self._setup['post_plot']:\n pp.blit_update(frame, update, ax_ndx=ndx)\n if self._setup._pbar:\n self._setup._pbar.update(frame)\n if frame == self._setup._num_frames - 1:\n self._setup._pbar.finish()\n return self._setup.get_drawables()", "def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")", "def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()", "def update(self):\n\n self.pta_time[0] = 1 + Globals.clock.get_frame_time() * self.options.time_scale\n\n Globals.base.graphicsEngine.dispatch_compute(\n (self.options.size // 16, self.options.size // 16, 1),\n self.attr_update,\n Globals.base.win.get_gsg())\n\n self.fftX.execute()\n self.fftY.execute()\n self.fftZ.execute()\n\n # Execute the shader which combines the 3 displacement maps into\n # 1 displacement texture and 1 normal texture. We could use dFdx in\n # the fragment shader, however that gives no accurate results as\n # dFdx returns the same value for a 2x2 pixel block\n Globals.base.graphicsEngine.dispatch_compute(\n (self.options.size // 16, self.options.size // 16, 1),\n self.attr_combine,\n Globals.base.win.get_gsg())", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def update(self) -> pygame.Surface:\n return self.surface", "def update(self):\n if (not self._run) or (not self.IA.is_loaded()):\n return\n self.IA.BG_MAP.update(speed=self.speed)\n self.IA.O_ATUAL.update()\n self._desintegrator.update()", "def update_plot():\n pass", "def update_visualizer(self):\n if self.visualizer:\n if self.frame_count == 2:\n self.visualizer.add_geometry(self.vis_points)\n self.visualizer.update_geometry(self.vis_points)\n self.visualizer.poll_events()\n self.visualizer.update_renderer()\n time.sleep(0.001)\n self.frame_count += 1", "def update(self):\n if self.name == \"Settings\":\n args = [\"NAME:Settings\"]\n else:\n args = [\"NAME:\" + self.name, \"Enable:=\", self.Enable]\n if self.UserSpecifiedSettings:\n args += self.manualsettings\n else:\n args += self.autosettings\n if self.name == \"Settings\":\n self.meshmodule.EditGlobalMeshRegion(args)\n else:\n self.meshmodule.EditMeshRegion(self.name, args)\n return True", "def _update(self):\n self._execute_lane_changes()\n self._execute_forward_movement()", "def update_image(self, surface):\n self.ui_widget.update_image(surface=surface)", "def UpdateLayers(self):\n pass", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def viewUpdate(self):\n # Update Capture\n imgtk = self.model.capture\n self.updateImage(self.view.lmain, imgtk)\n # Update Stitch \n imgtk = self.model.stitch\n self.updateImage(self.view.rmain, imgtk)\n self.view.dist.set(self.model.dist)", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None", "def isosurface(self):\n return self._isosurface()", "def update(self):\n events = pygame.event.get()\n self.plane_update()\n self.bullet_update(events)\n self.background_update()\n self.enemy_update(events)", "def update(self):\r\n pygame.display.update()\r\n return", "def update(self):\n self.data.update()\n for sensor in self.data.daikinskyport.get_sensors(self._index):\n if sensor[\"type\"] == self._type and self._sensor_name == sensor[\"name\"]:\n self._state = sensor[\"value\"]", "def UpdateState( self, **kwargs ):\n if bool( self ):\n if 'scale_mode' in kwargs:\n kwargs[ 'replot' ] = True\n\n kwargs = self._UpdateStateValues( **kwargs )\n redraw = kwargs.get( 'redraw', False )\n replot = kwargs.get( 'replot', False )\n\n if self.logger.isEnabledFor( logging.DEBUG ):\n self.logger.debug(\n '%s: redraw=%s, replot=%s',\n\t self.GetTitle(), str( redraw ), str( replot )\n\t )\n\n if replot:\n self._UpdateDataSetValues()\n self._UpdatePlot()\n\n elif redraw:\n self._DoUpdateRedraw()\n self.canvas.draw()", "def updateChannels(self):\n self.__redrawChannels()\n self.__update()", "def update(self):", "def update(self):", "def update(self):", "def update(self, i):\n\n self.current_position = self.mediaPlayer.position()\n \t\n \n\n \"\"\"\n \"Record mode\" and \"wide x-axis mode\" shouls not work together. Wide mode is only for reading data, not writing data. \n The user is not allowed to write data when 16 000 points are displayed (wide mode) on tha diagram. If he does so, the frequency of the graph points decreases with time. \n \"\"\"\n \n if self.checkbox.isChecked():\n self.wideRadio.setEnabled(False)\n if not self.checkbox.isChecked():\n self.wideRadio.setEnabled(True)\n if self.wideRadio.isChecked():\n self.checkbox.setEnabled(False)\n if not self.wideRadio.isChecked():\n self.checkbox.setEnabled(True)\n \n\n\n if self.checkbox.isChecked() and self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n \n self.savedRecently = False\n\n\n self.current_position = self.mediaPlayer.position()\n\n \n if self.xValues == []:\n # \"If the list of xValues is empty\". This happens only in the start of the plotting process.\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n #self.position_index = self.xValues.index(self.current_position)\n \n\n if self.xValues != []:\n\n if self.current_position > max(self.xValues):\n # \"If the point is bigger than the last point\". I.e if the point will be plotted in the end of the current graph.\n\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n\n if self.current_position < max(self.xValues):\n # \"If the point is smaller than the last point\". I.e if the point will be plotted in the middle of the current graph.\n\n \n if self.mediaPlayer.position() < 100:\n # The program has a problem of removing a point if x=0. This if-statement solves the problem.\n self.xValues.pop(0)\n self.yValues.pop(0)\n self.colors.pop(0)\n \n\n\n # Clearing all the points that are 100 ms (or less) in front of the current position. \n for number in range(self.current_position, self.current_position + 100):\n if number in self.xValues:\n self.yValues.pop(self.xValues.index(number))\n self.colors.pop(self.xValues.index(number))\n self.xValues.remove(number)\n \n \n \n # Plot new points\n bisect.insort(self.xValues,self.current_position) # Through this method, the element is inserted in order.\n self.yValues.insert(self.xValues.index(self.current_position), self.mouseY)\n self.colors.insert(self.xValues.index(self.current_position), self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n \n\n\n # View modes: zoom or wide.\n\n if self.zoomRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n self.canvas.axes.set_xlim(self.current_position-5000, self.current_position+5000)\n\n self.update_tempLists()\n\n self.curve = self.canvas.axes.scatter(self.tempXList, self.tempYList, s=10 , c=self.tempCList)\n\n\n\n if self.wideRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n\n if self.mediaPlayer.duration() != 0:\n self.canvas.axes.set_xlim(0, self.mediaPlayer.duration())\n elif self.xValues != []:\n self.canvas.axes.set_xlim(0, max(self.xValues))\n\n self.curve = self.canvas.axes.scatter(self.xValues, self.yValues, s=10 , c=self.colors)\n\n \n\n # I remove the previous vertical and horizontal lines. If I do not remove them, the program gets slower and slower, and the frequency of the points decreases with time.\n self.hline.remove()\n self.vline.remove()\n \n # New vertical and horizontal lines are created and updated to the correct values.\n self.vline = self.canvas.axes.axvline(x=self.mediaPlayer.position(), color='gray',linestyle=\":\")\n self.hline = self.canvas.axes.axhline(y=self.mouseY, color='gray',linestyle=\":\")\n\n\n\n return [self.curve] + [self.vline] + [self.hline]", "def update(self, *args):\n return _osgAnimation.Channel_update(self, *args)", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.t = time()\n self.frame += 1\n self.loop(self)\n self.draw_bg()\n self.draw_C()\n if self.cursor:\n self.draw_rect(*self.pos, RED, 2)\n self.draw_grid()\n self.draw_T()\n self.show_info()\n for (surf, rect) in self.surf_list:\n self.screen.blit(surf, rect)\n pygame.display.update()\n self.clock.tick(self.fps)", "def update(self):\n if self.state['enabled']:\n if not self.state['blue'] and not self.state['return']:\n self.update_normal()\n elif self.state['blue']:\n self.update_blue()\n elif self.state['return']:\n self.update_return()\n self.last_position = (self.rect.centerx, self.rect.centery)", "def update(self):\n self.sensor.update()", "def redraw(self):\r\n self.c.update()", "def update(self):\n #update position\n trans = self.buffer.lookup_transform(\"map\", \"base_footprint\", rospy.Time(),rospy.Duration(1))\n self.position = (trans.transform.translation.x,trans.transform.translation.y)\n #update map\n \n #update map\n self.map_callback(self.get_map().map)\n\n #update forntiers\n frontier_map = frontier(self.map,self.map_info,self.position)\n pos = frontier_map.frontier_world\n #set goal\n self.set_goal(pos)\n\n #check if there are any frontiers left\n return frontier_map.counter", "def update_screen(ai_settings, screen, ship):", "def _parentChanged(self, event):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexIsosurface, self)._parentChanged(event)", "def update(self):\n print(\"sensorState Update\")", "def update(self):\n while not rospy.is_shutdown():\n self.calculate_frame()\n for callback in self.callbacks:\n callback(self.keypoints, self.image)", "def update(self):\n self.device.update()", "def update(self):\n self.device.update()", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.context.open = False\n if event.type == KEYDOWN:\n\n if event.key == K_ESCAPE:\n self.context.open = False\n if event.key == K_SPACE or event.key == K_MENU or event.key == K_q:\n self.setMode((self.mode + 1) % 3)\n if event.key == K_0:\n self.show_polynomial = not(self.show_polynomial)\n if event.key == K_1:\n self.show_image = not(self.show_image)\n if event.key == K_2:\n self.show_drawing = not(self.show_drawing)\n if event.key == K_3:\n self.show_display = not(self.show_display)\n if event.key == K_4:\n self.show_vectors = not(self.show_vectors)\n if event.key == K_5:\n self.show_circles = not(self.show_circles)\n if event.key == K_6:\n self.show_sample = not(self.show_sample)\n if event.key == K_r:\n self.reset()\n if event.key == K_z:\n self.drawing = self.drawing[:-1]\n self.updateSample()\n if event.key == K_s:\n self.save() # Save the coefficients and the graphs\n if event.key == K_d:\n self.saveCoefficients()\n if event.key == K_a:\n # Save a picture the screen\n self.screenshot(self.directory)\n if event.key == K_p:\n self.pause = not(self.pause)\n if event.key == K_f:\n self.context.switch()\n if event.key == K_c:\n self.show_camera = not(self.show_camera)\n if self.show_camera:\n self.context.camera.buildCapture()\n else:\n self.context.camera.destroy()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if (event.button == 1) and (self.mode == 0):\n self.place()\n self.updateSample()\n if event.button == 4:\n self.context.draw.plane.zoom([1.1, 1.1])\n if event.button == 5:\n self.context.draw.plane.zoom([0.9, 0.9])\n\n if event.type == VIDEORESIZE:\n self.context.screen = pygame.display.set_mode(\n (event.w, event.h), RESIZABLE)", "def update_screen(ml_settings,screen, cartesian_plane):\n \t#Redraw the dcreen during each pass through the loop\n \tscreen.fill(ml_settings.bg_color)\n\n\t#Draw the cartesian plane\n\tcartesian_plane.draw(ml_settings,screen)\n\n \n \t#Make the most recetly drawn screen visible\n \tpygame.display.flip()", "def updateVisualization(self):\n\t\tif self.visualization:\n\t\t\tif self.fixedVisualization:\n\t\t\t\tself.visualization.setFixedVisualization(self.fixedVisualization)\n\t\t\tif self.movingVisualization:\n\t\t\t\tself.visualization.setMovingVisualization(self.movingVisualization)\n\t\tself.multiRenderWidget.setVolumeVisualization(self.visualization)\n\t\tself.visualizationUpdated.emit(self.visualization)", "def ship_updates(ai, var, screen, ship, charges, shields, hub):\r\n\tship.update(ai)\r\n\tship.draw_ship()\r\n\tcharge_shield_graphics(ai, var, screen, ship, charges, shields, hub)", "def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )", "def updateGlobal(self):\n state = self.getState()\n n = len(self.myPlotCanvasList)\n for i in range(n):\n if self.myPlotCanvasList[i] is not None:\n self.myPlotCanvasList[i].myUpdateGlobal(state)", "def update(self):\n\t\t# If being controlled by COM\n\t\tif self.controled_by_com :\n\t\t\t# Substract 1 from the update counter\n\t\t\tself.update_counter -= 1\n\t\t\t# If the update counter reaches zero\n\t\t\tif self.update_counter == 0. :\n\t\t\t\t# then ask for an action \n\t\t\t\tif self.intermediate_phase is False :\n\t\t\t\t\tself.action_required = True \n\t\t\t\t\t\t\n\t\t\t\t# if during a change\n\t\t\t\t# then make the change\n\t\t\t\tif self.intermediate_phase is True : \n\t\t\t\t\tself.action_required = False\n\t\t\t\t\tself._color_changer() #Make the change in the Simulator\n\t\telse :\n\t\t\tpass", "def update_H(self):", "def update(self,update_flags):\n pass", "def update_display(self):\n \n # check availability of display queue of the wide camera\n# if not hasattr(self,'wide_disp_queue'):\n# pass\n# elif self.wide_disp_queue.empty():\n# pass\n# else:\n# try:\n# wide_disp_image = self.wide_disp_queue.get()\n# \n# self.wide_disp_counter += 1\n# self.wide_disp_counter %= 2\n# if self.wide_disp_counter == 0:\n# if type(wide_disp_image) == np.ndarray:\n# if wide_disp_image.shape == (self.wide_cam.settings.height.value(),self.wide_cam.settings.width.value()):\n# try:\n# self.wide_cam_image.setImage(wide_disp_image)\n# except Exception as ex:\n# print('Error: %s' % ex)\n# except Exception as ex:\n# print(\"Error: %s\" % ex)\n \n # check availability of display queue of the track camera \n if not hasattr(self,'track_disp_queue'):\n pass\n elif self.track_disp_queue.empty():\n pass\n else:\n try:\n track_disp_image = self.track_disp_queue.get()\n self.track_disp_counter += 1\n self.track_disp_counter %= 4\n if self.track_disp_counter == 0:\n if type(track_disp_image) == np.ndarray:\n if track_disp_image.shape == (self.track_cam.settings.height.value(),self.track_cam.settings.width.value()):\n try:\n self.track_cam_image.setImage(track_disp_image)\n except Exception as ex:\n print('Error: %s' % ex)\n \n x = int(self.settings.x.value())\n y = int(self.settings.y.value())\n self.tracker_data[:] = 0\n self.tracker_data[x,y] = 1\n self.tracker_image.setImage(np.copy(self.tracker_data))\n except Exception as ex:\n print(\"Error: %s\" % ex)", "def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)", "def Update(self, mode = UPDATE_MODE.all):\r\n aux_versions = dstore.Get(\"versions\")\r\n \r\n if(aux_versions['hw'] != None): \r\n Ui().lineHwVersion.setText(str(aux_versions['hw'])) \r\n else:\r\n Ui().lineHwVersion.setText(\"- -\")\r\n \r\n if(aux_versions['fw'] != None): \r\n Ui().lineFwVersion.setText(str(aux_versions['fw'])) \r\n else:\r\n Ui().lineFwVersion.setText(\"- -\") \r\n \r\n \r\n \r\n \"\"\" TERMINAL INFO \"\"\"\r\n aux_terminal_info = dstore.Get(\"terminal_info\", \"GET\")\r\n \r\n \"\"\" number of cells \"\"\"\r\n if(aux_terminal_info['number_of_cells'] != None):\r\n Ui().lineCells.setText(str(aux_terminal_info['number_of_cells'])) \r\n else:\r\n Ui().lineCells.setText(\"-\") \r\n \r\n \r\n \"\"\" battery \"\"\"\r\n if(aux_terminal_info['battery'] != None):\r\n Ui().lineBattery.setText(str(aux_terminal_info['battery'])+\" %\") \r\n else:\r\n Ui().lineBattery.setText(\"-- %\") \r\n \r\n \"\"\" speaker \"\"\" \r\n if(aux_terminal_info['speaker']['keys'] == True):\r\n Ui().lineSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['keys'] == False):\r\n Ui().lineSpeakerKeys.setText(\"OFF\")\r\n Ui().pushSpeakerKeys.setText(\"ON\")\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerKeys.setText(\"- -\")\r\n Ui().pushSpeakerKeys.setText(\"- -\")\r\n \r\n if(aux_terminal_info['speaker']['system'] == True):\r\n Ui().lineSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['system'] == False):\r\n Ui().lineSpeakerSystem.setText(\"OFF\")\r\n Ui().pushSpeakerSystem.setText(\"ON\")\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n else:\r\n Ui().lineSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setText(\"- -\")\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['timing'] == True):\r\n Ui().lineSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n elif(aux_terminal_info['speaker']['timing'] == False):\r\n Ui().lineSpeakerTiming.setText(\"OFF\")\r\n Ui().pushSpeakerTiming.setText(\"ON\")\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n else: \r\n Ui().lineSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setText(\"- -\")\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n \r\n if(aux_terminal_info['speaker']['keys'] == None or aux_terminal_info['speaker']['timing']==None or aux_terminal_info['speaker']['system']==None): \r\n Ui().pushSpeakerKeys.setEnabled(False)\r\n Ui().pushSpeakerSystem.setEnabled(False)\r\n Ui().pushSpeakerTiming.setEnabled(False)\r\n else:\r\n Ui().pushSpeakerKeys.setEnabled(True)\r\n Ui().pushSpeakerSystem.setEnabled(True)\r\n Ui().pushSpeakerTiming.setEnabled(True)\r\n \r\n \r\n return True", "def _update(self):\n pass", "def update(self,\n args):\n super(WiderfaceDetMetaInfo, self).update(args)\n self.model_type = args.model_type\n if self.model_type == 1:\n self.receptive_field_center_starts = [3, 7, 15, 31, 63]\n self.receptive_field_strides = [4, 8, 16, 32, 64]\n self.bbox_factors = [10.0, 20.0, 40.0, 80.0, 160.0]\n else:\n self.receptive_field_center_starts = [3, 3, 7, 7, 15, 31, 31, 31]\n self.receptive_field_strides = [4, 4, 8, 8, 16, 32, 32, 32]\n self.bbox_factors = [7.5, 10.0, 20.0, 35.0, 55.0, 125.0, 200.0, 280.0]", "def update(self, new_gameStateData):\r\n pass", "def update(self):\n # GPS data\n self.model.GPS_latitude.set(self._kernel.data.lat)\n self.model.GPS_longitude.set(self._kernel.data.lon)\n \n self.model.GPS_heading.set(self._kernel.data.gps_heading)\n self.model.GPS_speed.set(self._kernel.data.speed)\n self.model.GPS_altitude.set(self._kernel.data.altitude)\n \n self.model.GPS_fix.set(self._kernel.data.fix)\n self.model.GPS_satellite_count.set(self._kernel.data.num_sat)\n \n # compass data\n self.model.compass_heading.set(self._kernel.data.compass_heading)\n \n # time data\n self.model.time.set(self._kernel.data.timestamp.isoformat())\n self.model.date.set(self._kernel.data.datestamp.isoformat())\n \n # other data\n self.model.temperature.set(self._kernel.data.temperature)", "def update(self):\n Enemy.update(self)\n self.update_movement()\n self.update_firing()\n self.surf = self.animation.next_animation()", "def updateDisplay(self):\n if self._displayPjt:\n self._displayPjt.updateim()\n if self._displayUsr:\n self._displayUsr.updateim()\n if self._displayVtk:\n self._displayVtk.updateim()", "def update(self):\n changes = {}\n for coord in INDICES: # the need for two for loops is necessary\n if self.chart[coord] == ALIVE and (\n self.number_of_neighbors(coord) < 2 or self.number_of_neighbors(coord) > 3):\n changes[coord] = KILL\n elif self.number_of_neighbors(coord) == 3:\n changes[coord] = REVIVE\n for coord in changes.keys(): # because the evolution is discrete\n if changes[coord] == KILL:\n self.kill(coord)\n elif changes[coord] == REVIVE:\n self.givebirth(coord)", "def update(self):\n if self.api is None:\n return\n self.api.update()\n\n if self.var_type == 'Time':\n self.var_state = self.api.result['timeObservation']\n return\n result = self.api.result[self.var_type.lower()]\n if self.var_type == 'Sky':\n self.var_state = result['name']\n self.var_icon = get_sky_icon(result['code'])\n elif self.var_type == 'Temperature':\n self.var_state = round(float(result['tc']), 1)\n elif self.var_type == 'Humidity':\n self.var_state = result\n elif self.var_type == 'Wind':\n if self.var_detail == 'Direction':\n self.var_state = round(float(result['wdir']), 1)\n else:\n self.var_state = round(float(result['wspd']), 1)\n elif self.var_type == 'Precipitation':\n self.var_state = round(float(result['sinceOntime']), 1)\n p_type = result['type']\n if p_type == 0:\n self.var_units = 'mm'\n self.var_icon = 'mdi:weather-sunny'\n elif p_type == 1:\n self.var_units = 'mm'\n self.var_icon = 'mdi:weather-rainy'\n elif p_type == 2:\n self.var_units = 'mm'\n self.var_icon = 'mdi:weather-snowy'\n elif p_type == 3:\n self.var_units = 'cm'\n self.var_icon = 'mdi:weather-snowy-rainy'\n elif self.var_type == 'Pressure':\n if self.var_detail == 'Surface':\n self.var_state = round(float(result['surface']), 1)\n else:\n self.var_state = round(float(result['seaLevel']), 1)\n elif self.var_type == 'Lightning':\n if result == '1':\n self.var_state = 'Exist'\n else:\n self.var_state = 'None'", "def update(self, update_data):\n logger.info(update_data)\n self.x, self.y = update_data['coords']\n self.color = update_data['color']\n self.is_visible = update_data['is_visible']\n\n # todo: direction, state", "def updateWorld(self):\n\t self.screen.clear()\n self.update()\n self.screen.refresh()", "def update(self):\r\n pass", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def updateShaderState(self):\n\n dopts = self.opts\n copts = self.canvas.opts\n lightPos = None\n flatColour = dopts.getConstantColour()\n useNegCmap = (not dopts.useLut) and dopts.useNegativeCmap\n\n if self.threedee:\n lightPos = np.array(copts.lightPos)\n lightPos *= (copts.zoom / 100.0)\n else:\n lightPos = None\n\n if dopts.useLut:\n delta = 1.0 / (dopts.lut.max() + 1)\n cmapXform = transform.scaleOffsetXform(delta, 0.5 * delta)\n else:\n cmapXform = self.cmapTexture.getCoordinateTransform()\n\n fslgl.glmesh_funcs.updateShaderState(\n self,\n useNegCmap=useNegCmap,\n cmapXform=cmapXform,\n flatColour=flatColour,\n lightPos=lightPos)", "def _update_objects(self):\n\t\tself.clouds.update()\n\t\tif self.is_play:\n\t\t\tself.floor.update()\n\t\t\tself.bolan.update()\n\t\t\tself.obstacles.update()\n\t\t\tself.scoreboard.update()", "def update(self):\n if self.__first:\n self.__first = False\n self.__map_data = self.__gui_handler.get_map_data()\n self.__next_data = self.__gui_handler.get_entities()\n labels = []\n\n # Découverte du terrain\n for terrain in SimUtils.get_terrains():\n self.__terrain.append(terrain.color)\n labels.append(StatItem(terrain.name, \"\", terrain.color))\n\n # Tri lexicographique des labels.\n labels.sort(key=lambda stat: stat._name)\n # Ajout des labels de terrain\n for label in labels:\n self.__gui_handler.add_stat(label)\n\n # Remplissage de la carte avec les terrains.\n for i in range(0, self.__width):\n for j in range(0, self.__height):\n # Affichage du point.\n color = QColor(self.__terrain[self.__map_data.get_terrain_type(i,j)])\n self.__image.setPixel(i,j,color.rgb())\n\n # Permet de faire le tri entre les entités déjà rencontrées et les\n # autres.\n entity_types = {}\n\n # Liste des futurs labels\n labels = []\n\n # Découverte des entités - affectation des couleurs\n for entity in self.__next_data:\n # Ajout des labels de couleur pour les entités\n if not entity_types.has_key(entity.__name__):\n entity_types[entity.__name__] = True\n\n for label, color in entity._labels.iteritems():\n labels.append(StatItem(label, \"\", color))\n\n # Affichage de l'entité.\n self.__image.setPixel(entity._x, entity._y, QColor(entity._color).rgb())\n self.positions[id(entity)] = [entity._x, entity._y]\n\n # Tri lexicographique des labels.\n labels.sort(key=lambda stat: stat._name)\n\n for label in labels:\n self.__gui_handler.add_stat(label)\n else:\n # Mise à jour du rendu\n for entity in self.__next_data:\n # Cas d'une entité désactivée (morte)\n remove_entity = not entity._is_active()\n if id(entity) not in self.positions:\n # Ajout de l'entité en cours de simulation\n self.__image.setPixel(entity._x, entity._y, QColor(entity._color).rgb())\n self.positions[id(entity)] = [entity._x,entity._y]\n\n # Le simulateur demande de repeindre l'entité\n old_points = self.positions[id(entity)]\n\n if not remove_entity:\n self.positions[id(entity)] = [entity._x, entity._y]\n\n # On remet la couleur du terrain.\n color = QColor(self.__terrain[self.__map_data.get_terrain_type(old_points[0], old_points[1])])\n self.__image.setPixel(old_points[0], old_points[1], color.rgb())\n\n if not remove_entity:\n # Ajout des paramètres de setPixel dans une liste pour être ploté après.\n self.__image.setPixel(entity._x, entity._y, QColor(entity._color).rgb())", "def update(self):\n self.plot.draw()\n \n func=str(self.edit1b.currentText())\n if self.win.test()==0:\n x=np.linspace(0,10,200)\n elif self.win.test()==1:\n x=np.linspace(0,0.40,200)\n \n pattern1=r'Steel'\n pattern2=r'Aluminium'\n pattern3=r'[\\d]+'\n \n if (func!='Comparison Chart'):\n self.edit2b.setDisabled(False)\n self.edit3b.setDisabled(False)\n self.edit4b.setDisabled(False)\n if (func=='Quenched/Tempered Steel'):\n alpha = 0.0025\n elif (func=='Annealed Steel'):\n alpha = 0.01\n elif (func=='Steel (input Su)'):\n S = str(self.edit2b.text())\n if (self.win.test()==0):\n S = str(float(S)/6.895)\n alpha = notch.alpha(eval(S))\n elif (func=='Aluminium Alloy 356.0 as cast'):\n rho = 0.08\n elif (func=='Aluminium Alloy 6061'):\n rho = 0.025\n elif (func=='Aluminium Alloy 7075'):\n rho = 0.015\n elif (func=='Material dropdown'):\n pass\n \n y1=[]\n if re.search(pattern1,func):\n Su=notch.su_s(alpha)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsp(alpha,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsp(alpha,r,self.win.test())))\n elif re.search(pattern2,func):\n Su=notch.su_a(rho)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsn(rho,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsn(rho,r,self.win.test())))\n \n self.edit2b.setText(str(Su))\n func1 = 'Steel (Su='+str(self.edit2b.text())+')'\n if (func!='Steel (input Su)'):\n self.plot.redraw(x,y,func, self.xlabel)\n elif (func=='Steel (input Su)'):\n self.plot.redraw(x,y,func1, self.xlabel)\n \n elif (func=='Comparison Chart'):\n self.edit2b.setText(\"\")\n self.edit2b.setDisabled(True)\n self.edit3b.setText(\"\")\n self.edit3b.setDisabled(True)\n self.edit4b.setText(\"\")\n self.edit4b.setDisabled(True)\n self.plot.draw_comp(self.xlabel, self.win.test())", "def updateInterface(self):\n p = self.cxn[self.selectedADR].packet()\n p.magnetv().pscurrent().psvoltage()\n p.time()\n p.temperatures()\n p.get_state_var('CompressorStatus')\n p.get_instrument_state()\n state = yield p.send()\n # change instrument statuses\n for name,status in state['get_instrument_state']:\n if status[0] == False: color = 'red3'\n elif status[1] == False: color = 'orange3'\n elif status[1] == True: color = 'green3'\n else: color = 'gray70'\n self.instrumentStatuses[name].config(bg=color)\n # change compressor button\n if state['get_state_var'] == True:\n self.compressorButton.configure(text='Stop Compressor',\n command=self.stopCompressor,\n state=Tkinter.NORMAL)\n elif state['get_state_var'] == False:\n self.compressorButton.configure(text='Start Compressor',\n command=self.startCompressor,\n state=Tkinter.NORMAL)\n else: self.compressorButton.configure(state=Tkinter.DISABLED)\n # update current, voltage fields\n temps = {}\n stages = ('T_60K','T_3K','T_GGG','T_FAA')\n for i in range(len(stages)):\n temps[stages[i]] = state['temperatures'][i]\n #if temps[stages[i]] == 'nan': temps[stages[i]] = numpy.nan\n if numpy.isnan(state['magnetv']['V']):\n emf = 'ERR'\n else:\n emf = \"{0:.3f}\".format(state['magnetv']['V'])\n if numpy.isnan(state['pscurrent']['A']):\n psI = 'PS OFF'\n else:\n psI = \"{0:.3f}\".format(state['pscurrent']['A'])\n if numpy.isnan(state['psvoltage']['V']):\n psV = 'PS OFF'\n else:\n psV = \"{0:.3f}\".format(state['psvoltage']['V'])\n self.currentBackEMF.set( emf )\n self.currentI.set( psI )\n self.currentV.set( psV )\n # update plot:\n # change data to plot\n self.stage60K.set_xdata(numpy.append(self.stage60K.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stage60K.set_ydata(numpy.append(self.stage60K.get_ydata(),temps['T_60K']['K']))\n self.stage03K.set_xdata(numpy.append(self.stage03K.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stage03K.set_ydata(numpy.append(self.stage03K.get_ydata(),temps['T_3K']['K']))\n self.stageGGG.set_xdata(numpy.append(self.stageGGG.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stageGGG.set_ydata(numpy.append(self.stageGGG.get_ydata(),temps['T_GGG']['K']))\n self.stageFAA.set_xdata(numpy.append(self.stageFAA.get_xdata(),mpl.dates.date2num(state['time'])))\n self.stageFAA.set_ydata(numpy.append(self.stageFAA.get_ydata(),temps['T_FAA']['K']))\n #update plot\n self.updatePlot()\n # update legend\n labelOrder = ['T_60K','T_3K','T_GGG','T_FAA']\n lines = [self.stage60K,self.stage03K,self.stageGGG,self.stageFAA]\n labels = [l.strip('T_')+' ['+\"{0:.3f}\".format(temps[l]['K'])+'K]' for l in labelOrder]\n labels = [s.replace('1.#QOK','OoR') for s in labels]\n # legend on top (if not using this, delete \\n in title)\n self.ax.legend(lines,labels,bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=4, mode=\"expand\", borderaxespad=0.)", "def _update_(self,update_background=True):\n # -- Make sure the fundamental update (if any) are made\n super(Image,self)._update_()\n # - Data\n self._update_data_(update_background=update_background)", "def update(self):\n self.wc.update()", "def updateWorld(self):\n pass", "def update_E(self):", "def updatefunction(self):\n self.arcdisplay.ws.order=int(self.orderValueEdit.text())\n self.arcdisplay.ws.function=self.funcComboBox.currentText()\n self.arcdisplay.ws.set_func()\n self.arcdisplay.findfit()", "def update(self):\n cv2.imshow(self.window_name, self.map.get_crop())", "def update(self):\r\n if self._block.info_values is not None:\r\n self._state = self._block.info_values.get(self._sensor_name, None)", "def update_gl_state(self, *args, **kwargs):\n for v in self._subvisuals:\n v.update_gl_state(*args, **kwargs)", "def onFrameUpdated(self):\n pass" ]
[ "0.72574514", "0.6297322", "0.62271756", "0.6138882", "0.6094875", "0.6022466", "0.6002216", "0.6002216", "0.5986893", "0.5943191", "0.59352255", "0.59275275", "0.59257054", "0.5873895", "0.58552265", "0.5850752", "0.5833824", "0.57953125", "0.5777924", "0.5773839", "0.5770586", "0.57686067", "0.57650787", "0.57644564", "0.5761846", "0.57578784", "0.5749106", "0.5747303", "0.57137436", "0.5701846", "0.5700851", "0.56993496", "0.5694285", "0.5693565", "0.5688838", "0.5679965", "0.56783277", "0.56679744", "0.56679136", "0.56574106", "0.56537", "0.56519055", "0.562398", "0.5621353", "0.5614511", "0.5614511", "0.5614511", "0.56115043", "0.5603097", "0.56018704", "0.56018704", "0.55976063", "0.55932534", "0.55818367", "0.55560565", "0.5550177", "0.55435497", "0.55388725", "0.5516407", "0.55126435", "0.5512263", "0.5512263", "0.55025476", "0.549999", "0.54895437", "0.54755664", "0.5474662", "0.5473986", "0.5466632", "0.5462498", "0.5462476", "0.5457757", "0.5450635", "0.544242", "0.54409444", "0.5437421", "0.5432024", "0.5427013", "0.5426842", "0.5421666", "0.5410846", "0.54099137", "0.5407371", "0.5406371", "0.54026145", "0.5397886", "0.53938544", "0.53888863", "0.53719693", "0.5363924", "0.53582156", "0.5352996", "0.535235", "0.5350562", "0.5341282", "0.53379995", "0.5335932", "0.5334712", "0.53328407", "0.5332316" ]
0.750728
0
Set the 3D complex data represented by this item. Dataset order is zyx (i.e., first dimension is z).
def setData(self, data, copy=True): if data is None: self._data = None self._dataRangeCache = None self._boundedGroup.shape = None else: data = numpy.array(data, copy=copy, dtype=numpy.complex64, order='C') assert data.ndim == 3 assert min(data.shape) >= 2 self._data = data self._dataRangeCache = {} self._boundedGroup.shape = self._data.shape self._updated(ItemChangedType.DATA)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setitem3d(self, index, value):\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iy, slice): sss[1:1] = [1]\n if not isinstance(iz, slice): sss[2:2] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value", "def set_value(self, z):\n z = complex(z)\n self.points[0, :2] = (z.real, z.imag)\n return self", "def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)", "def is3_d(self, is3_d):\n\n self.container['is3_d'] = is3_d", "def D3(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_D3(self, *args)", "def MakeCoordinates3D(self):\n\n self.points = np.concatenate((self.points, np.zeros((self.points.shape[0],1)) ), axis=1)\n self.points = np.ascontiguousarray(self.points)", "def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')", "def planes_3d(self, quantity, xplane, yplane):\n xplane = int(xplane)\n yplane = int(yplane)\n # Get the scalar values\n # Get the data on the plane with a fixed x value. These means we'll\n # have changing (y, z) points\n xdata = self.get_plane(quantity, 'yz', xplane)\n # z first cuz we want y to be changing before z to correspond with the\n # way numpy flattens arrays. Note this means y points will be in the\n # 2nd column\n xplanepoints = np.array(list(itertools.product(self.Z, self.Y)))\n xdata = xdata.flatten()\n xplanexval = np.array(list(itertools.repeat(x[xplane], len(xdata))))\n xplanedata = np.zeros((xplanepoints.shape[0], 4))\n xplanedata[:, 0] = xplanexval\n xplanedata[:, 1] = xplanepoints[:, 1]\n xplanedata[:, 2] = xplanepoints[:, 0]\n xplanedata[:, 3] = xdata\n # Same procedure for fixed y plane\n ydata = self.get_plane(quantity, 'xz', yplane)\n yplanepoints = np.array(list(itertools.product(z, x)))\n ydata = ydata.flatten()\n yplaneyval = np.array(list(itertools.repeat(y[yplane], len(ydata))))\n yplanedata = np.zeros((yplanepoints.shape[0], 4))\n yplanedata[:, 0] = yplanepoints[:, 1]\n yplanedata[:, 1] = yplaneyval\n yplanedata[:, 2] = yplanepoints[:, 0]\n yplanedata[:, 3] = ydata\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now stack them vertically and plot!\n all_data = np.vstack((xplanedata, yplanedata))\n self.scatter3d(all_data[:, 0], all_data[:, 1], all_data[:, 2],\n all_data[:, 3], labels, 'planes_3d')", "def set_position(self, x, y, z):\n for sec in self.all:\n for i in range(int(nrn.n3d())):\n nrn.pt3dchange(i, \\\n x-self.x+nrn.x3d(i), \\\n y-self.y+nrn.y3d(i), \\\n z-self.z+nrn.z3d(i), \\\n nrn.diam3d(i))\n self.x = x; self.y = y; self.z = z", "def f3z1(self, f3z1):\n\n self._f3z1 = f3z1", "def __setitem__(self, i, value):\n if i < X:\n raise IndexError(\"point3d::__setitem__: negative index {0}\".format(i))\n if i == X:\n self._x = value\n return\n if i == Y:\n self._y = value\n return\n if i == Z:\n self._z = value\n return\n # beyond Z\n raise IndexError(\"point3d::__setitem__: index too large {0}\".format(i))", "def plot3d(self):\n plot_rupture_wire3d(self)", "def SetDataSlice(vDataSet,arr,aIndexZ,aIndexC,aIndexT):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n dtype = GetType(vDataSet)\r\n\r\n if DEBUG:\r\n print(\"SetDataVolume\")\r\n print(\"vDataSet:\",(nz,ny,nx),GetType(vDataSet))\r\n print(arr.shape)\r\n print(arr.dtype)\r\n print(aIndexC)\r\n print(aIndexT)\r\n\r\n #Make sure the data is in range and convert the array\r\n s = arr\r\n if dtype != arr.dtype:\r\n miset,maset = GetTotalRange(vDataSet)\r\n arr[arr<miset]=miset\r\n arr[arr>maset]=maset\r\n s = arr.astype(dtype)\r\n\r\n s = s.swapaxes(0,1)\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataSliceBytes\r\n elif dtype == np.uint16:\r\n SetData = vDataSet.SetDataSliceShorts\r\n elif dtype == np.float32:\r\n SetData = vDataSet.SetDataSliceFloat32\r\n\r\n SetData(s,aIndexZ,aIndexC,aIndexT)\r\n #vDataSet.SetChannelRange(aIndexC,miset,maset)\r", "def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)", "def setView3D(x,y,z, viewtype='absolute'):\n vdict = {'absolute':'ABS','user':'USER','angle':'ANGLE'}\n dislin.view3d(x,y,z,vdict[viewtype])", "def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def cube_data(self):\n cube_data = copy.deepcopy(self.data)\n cube_data.shape = [self.nints * self.ngroups, self.rows, self.columns]\n return cube_data", "def transform(self, data):\n self.cube = self.trf.transform(data)", "def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])", "def test_3d_time_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/data/test%03d.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n lowmem_write_readback_3D(dic,data)", "def test_add_get_tensor_3D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 3D tensors of all datatypes\n data_3D = mock_data.create_data((10, 10, 10))\n add_get_arrays(dataset, data_3D)", "def setAxisLengths3D(x=2.,y=2.,z=2.):\n dislin.axis3d(x,y,z)", "def __init__(self, xx: float or Vec3 or 'Mat33' = 0.0, xy: float or Vec3 = 0.0, xz: float or Vec3 = 0.0,\n yx: float = 0.0, yy: float = 0.0, yz: float = 0.0, zx: float = 0.0, zy: float = 0.0, zz: float = 0.0):\n\n if isinstance(xx, Mat33):\n self.data = xx.data.copy()\n else:\n self.data = [xx, xy, xz, yx, yy, yz, zx, zy, zz]", "def plot3d(data_x, data_y, data_z, vol):\n fig = go.Figure(\n data = [\n go.Mesh3d(\n x = data_x,\n y = data_y,\n z = data_z,\n i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], # These are needed, numbers from documentation\n j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],\n k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],\n colorscale=[[0, 'darkblue'],\n [0.5, 'lightskyblue'],\n [1, 'darkblue']],\n intensity = np.linspace(0, 1, 8, endpoint=True),\n showscale=False,\n opacity = 0.6\n )\n ],\n layout = go.Layout(\n title = \"Le volume est: \" + str(vol),\n autosize = True\n )\n )\n\n # This prints it\n pyo.iplot(fig, filename='Determinant-Volume')", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def set_voxel(self, x, y, z, value, ignore=True):\n try:\n if isinstance(y, list):\n y_trans = [self._y_shift - item for item in y]\n # check coordinate validation\n coord_list = [(x[i], y_trans[i], z[i]) for i in range(len(x))]\n coord_list = [c for c in coord_list if c[0]>=0 and \n c[0]<self.get_data_shape()[0] and\n c[1]>=0 and\n c[1]<self.get_data_shape()[1] and\n c[2]>=0 and\n c[2]<self.get_data_shape()[2]]\n x = [c[0] for c in coord_list]\n y_trans = [c[1] for c in coord_list]\n z = [c[2] for c in coord_list]\n if self.is_4d():\n orig_data = self._data[y_trans, x, z, self._time_point]\n else:\n orig_data = self._data[y_trans, x, z]\n if np.any(orig_data != 0) and not ignore:\n force = QMessageBox.question(None, \"Replace?\",\n \"Would you like to replace the original values?\",\n QMessageBox.Yes,\n QMessageBox.No)\n if force == QMessageBox.No:\n return\n if self.is_4d():\n self.undo_stack.push((x, y, z, self._data[y_trans, x, z,\n self._time_point]))\n self._data[y_trans, x, z, self._time_point] = value\n else:\n self.undo_stack.push((x, y, z, self._data[y_trans, x, z]))\n self._data[y_trans, x, z] = value\n try:\n for z_ in range(min(z), max(z)+1):\n self.update_rgba(z_)\n except TypeError:\n self.update_rgba(z)\n if self._cross_pos:\n self.update_orth_rgba()\n except:\n raise\n print \"Input coordinates are invalid.\"", "def D3(self, *args):\n return _Adaptor3d.Adaptor3d_Curve_D3(self, *args)", "def transform3D(x: float, y: float, z: float, R: np.array) -> np.array:\n T = np.zeros((4, 4))\n T[:3, :3] = R\n T[:, 3] = [x, y, z, 1.0]\n\n return T", "def __init__(self, x, y, z, dx, dy, dz, *args, **kwargs):\n super().__init__((0, 0), (0, 0), *args, **kwargs)\n self.set_data(x, y, z, dx, dy, dz)", "def axis3D(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep):\n dislin.graf3d(xlow,xhigh,xfirst,xstep,ylow,yhigh,yfirst,ystep,\\\n zlow,zhigh,zfirst,zstep)", "def __setitem__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3___setitem__(self, *args)", "def _set_data(self, value):\n if len(value.shape) == 1:\n if self.index_dimension == 0:\n value = value[:,newaxis]\n else:\n value = value[newaxis,:]\n\n if len(value.shape) != 2:\n msg = 'Input is %d dimensional, but it must be 1 or 2' \\\n 'dimensional.' % len(value.shape)\n raise ValueError, msg\n\n self._data = value", "def _set_data(self, polyhedron, data):\n assert polyhedron.parent() is self._polyhedron_parent\n if len(data) != self._vector.degree():\n raise ValueError('V-representation data requires a list of length ambient_dim')\n\n self._vector[:] = data\n\n self._index = len(polyhedron._Vrepresentation)\n polyhedron._Vrepresentation.append(self)\n self._polyhedron = polyhedron", "def getData(self, copy=True, mode=None):\n if mode is None:\n return super(ComplexField3D, self).getData(copy=copy)\n else:\n return self._convertComplexData(self._data, mode)", "def test_05_01_mask_of3D(self):\n x=cpi.Image()\n x.image = np.ones((10,10,3))\n self.assertTrue(x.mask.ndim==2)", "def _setLayer(items, layer):\n for i in items:\n i.setZValue(layer)", "def EncodeMorton3D(x, y, z):\r\n return Expand3D(x) + (Expand3D(y) << 1) + (Expand3D(z) << 2)", "def test_simple_3d(self):\r\n a = tt.dtensor3()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n sl3 = 2\r\n\r\n for do_set in [True, False]:\r\n print \"Set\", do_set\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl3, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl3, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 3, 4))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n expected_result = numpy.copy(val_a)\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n if do_set:\r\n expected_result[:, sl3, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, sl3, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def _set_data(self, polyhedron, data):\n assert polyhedron.parent() is self._polyhedron_parent\n if len(data) != self._vector.degree():\n raise ValueError('H-representation data requires a list of length ambient_dim+1')\n\n self._vector[:] = data\n self._A[:] = data[1:]\n self._b = self._base_ring(data[0])\n\n self._index = len(polyhedron._Hrepresentation)\n polyhedron._Hrepresentation.append(self)\n self._polyhedron = polyhedron", "def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]", "def enable3D(self):\r\n if(self.dataController.fileLoaded==True):\r\n self.dataController.toggleInteractiveMode()\r\n\r\n self.midsagittalView = False\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False\r\n self.threeDView = True", "def is3D(data):\n return data.find(\"x3\") != -1 and data.find(\"y3\") != -1 and data.find(\"z3\") != -1", "def __setitem__(self, key: Tuple, value: np.array) -> np.array:\n\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = start / _normalize_units[0]\n stop = stop / _normalize_units[0]\n\n xs = (int(start), int(stop))\n\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n if len(value.shape) == 2:\n # TODO: Support other 2D shapes as well\n value = np.array([value])\n\n cutout = self.volume_provider.create_cutout(\n self._channel, self.resolution, xs, ys, zs, value\n )", "def __init__(self, _x, _y, _z):\n self.position = Position3d(int(_x), int(_y), int(_z))\n self.velocity = Velocity3d(0, 0, 0)", "def setZ(self, z):\n self.position.setZ(z)", "def __init__(self, xyz, header=None):\n # Coerce None to empty array\n if xyz is None:\n xyz = [[], [], []]\n \n # Store points as 3*n array\n x, y, z = xyz # ensure only 3 coordinates\n self._arr = np.stack([x, y, z])\n\n if header is not None:\n self._header = header", "def __init__(self, channels):\n super(PositionalEncodingPermute3D, self).__init__()\n self.penc = PositionalEncoding3D(channels)", "def cubify(\n arr: xr.DataArray,\n *spatial_dims: str,\n pixel_dim: Hashable = 'pixel'\n ):\n if not spatial_dims:\n spatial_dims = ('x', 'y')\n cube = arr.set_index({pixel_dim: spatial_dims}).unstack(pixel_dim) # type: ignore[union-attr]\n for d in spatial_dims:\n cube.coords[d].attrs = arr.coords[d].attrs\n return cube", "def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()", "def setBorder3D():\n dislin.box3d()", "def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z", "def appendlistdata_f3xyzf3rgb(self, x, y, z, r, g, b):\n pass", "def test_3d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n db.close()\n\n date = pd.to_datetime('2015-04-01')\n self.full_iv.get_data()\n df_date0 = self.full_iv.df_all.query('date == %r' % date)\n df_date1 = df_iv.query('date == %r' % date)\n df_date = pd.concat([df_date0, df_date1])\n \"\"\":type: pd.DataFrame\"\"\"\n\n x = df_date['dte']\n y = df_date['strike']\n z = df_date['impl_vol']\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n # noinspection PyUnresolvedReferences\n ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n # ax.plot_wireframe(x, y, z, rstride=1, cstride=1)\n plt.show()", "def is3_d(self):\n return self.container['is3_d']", "def SetData(self, data_):\n return _hypre.HypreParVector_SetData(self, data_)", "def xyz(self) -> np.ndarray:\n return np.vstack((self.x, self.y, self.z)).transpose()", "def xyz(self):\n xyz = np.zeros((len(self), 3))\n\n xyz[:len(self.qc_mol), ...] = self.qc_mol.xyz\n xyz[len(self.qc_mol):len(self.qc_mol) + len(self.br_mol), ...] = self.br_mol.xyz\n xyz[-len(self.pc_mol):, ...] = self.pc_mol.xyz\n\n return xyz", "def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)", "def visualise_data_pca_3d_movie(self, component1, component2, component3, input_data=False):\n if input_data:\n self.__generate_input_data()\n pca_3d_movie(array(self.input_data), component1, component2, component3, self.class_indices, self.path,\n 'high_dimension_data', self.legend)\n else:\n self.__generate_output_data()\n pca_3d_movie(array(self.output_data), component1, component2, component3, self.class_indices, self.path,\n 'low_dimension_data', self.legend)", "def scale_on_3d(x3d, scaler):\n (n_segs, n_concat, n_freq) = x3d.shape\n x2d = x3d.reshape((n_segs * n_concat, n_freq))\n x2d = scaler.transform(x2d)\n x3d = x2d.reshape((n_segs, n_concat, n_freq))\n return x3d", "def visualise_data_pca_3d(self, component1, component2, component3, input_data=False):\n if input_data:\n self.__generate_input_data()\n pca_3d(array(self.input_data), component1, component2, component3, self.class_indices, self.path,\n 'high_dimension_data', self.legend)\n else:\n self.__generate_output_data()\n pca_3d(array(self.output_data), component1, component2, component3, self.class_indices, self.path,\n 'low_dimension_data', self.legend)", "def plot3d(self,datarange=None,nx=100,ny=100,clf=True,cb=True,data='auto',**kwargs):\n from enthought.mayavi import mlab as M\n from operator import isMappingType\n\n if data == 'auto':\n if self.data:\n data = self.data[:2]\n else:\n data = None\n\n if data: #TODO:correct coord conv\n xd,yd = data[0][0],data[0][1]\n if datarange is None:\n datarange = (np.min(xd),np.max(xd),np.min(yd),np.max(yd))\n maxmind = (np.max(data[1]),np.min(data[1]))\n elif datarange is None:\n if self.rangehint is not None:\n datarange = self.rangehint\n else:\n raise ValueError(\"Can't choose limits for plotting without data or a range hint\")\n maxmind = None\n\n grid = np.mgrid[datarange[0]:datarange[1]:1j*nx,datarange[2]:datarange[3]:1j*ny]\n res = self(grid)\n\n# if maxmind:\n# norm = plt.normalize(min(np.min(res),maxmind[1]),max(np.max(res),maxmind[0]))\n# else:\n# norm = plt.normalize(np.min(res),np.max(res))\n\n if clf:\n M.clf()\n\n M.mesh(grid[0],grid[1],res)\n\n if cb:\n if isMappingType(cb):\n M.colorbar(**cb)\n else:\n M.colorbar()\n\n if data:\n if isMappingType(data):\n kwscat = dict(data)\n else:\n kwscat = {}\n zd = data[1]\n zres = zd-self((xd,yd))\n kwscat.setdefault('scale_mode','none')\n kwscat.setdefault('scale_factor','auto')\n g = M.points3d(xd,yd,zd,zres,**kwscat)\n if kwscat['scale_factor'] == 'auto':\n g.glyph.glyph.scale_factor /= 2\n\n #M.xlim(datarange[0],datarange[1])\n #M.ylim(datarange[2],datarange[3])", "def SetRange3d(self, *args):\n return _ShapeBuild.ShapeBuild_Edge_SetRange3d(self, *args)", "def z(self):\n return self[:, 2]", "def z(self, value=None):\n if isinstance(value, (int, float)):\n self[2] = value\n else:\n if value is not None:\n raise TypeError(\"Cannot be set to {}\".format(type(value)))\n return self[2]", "def setFocus3D(x,y,z, focustype='absolute'):\n fdict = {'absolute':'ABS','user':'USER'}\n dislin.vfoc3d(x,y,z,fdict[focustype])", "def set_data(self, data):\n self.__data = np.asarray(data, dtype=np.float32)\n if data is not None:\n self.account(data)\n return self", "def putdata(self, dat, scale=1.0, offset=0.0):\r\n data = np.array(dat)\r\n data = data * scale + offset\r\n channels, depth = self._get_channels_and_depth(self._mode)\r\n siz = self.size\r\n _im = np.ravel(self._instance)\r\n data = data[:len(_im)]\r\n _im = _im[:len(data)] = data\r\n self._instance = _im.reshape((siz[1], siz[0], channels))\r\n self._instance = self._instance.astype(depth)", "def test_3d_steam_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/full3D.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n write_readback(dic,data)", "def xyz(self: Q) -> np.array:\n\n return np.array([self.x, self.y, self.z])", "def ipset_y_3d():\n return IPSet(x=np.linspace(0, 10, 11), y=np.random.randn(11, 2, 5), x_new=np.linspace(1, 4, 3))", "def on_plot_3d(self, event):\n data = self._get_data_selection(event)\n from sas.sasgui.guiframe.local_perspectives.plotting.masking \\\n import FloatPanel as Float3dDialog\n\n panel = Float3dDialog(base=self, data=data,\n dimension=3, id=wx.NewId())\n panel.ShowModal()", "def generate_data_mayavi(self):\n from enthought.mayavi.sources.api import ParametricSurface\n from enthought.mayavi.modules.api import Outline, Surface \n from enthought.mayavi.filters.api import WarpVector\n from enthought.mayavi.sources.vtk_data_source import VTKDataSource\n from enthought.tvtk.api import tvtk\n from numpy import array\n e = self.scene.engine\n# s = ParametricSurface()\n# e.add_source(s)\n# e.add_module(Outline())\n# e.add_module(Surface())\n # The numpy array data.\n #points = array([[0,0,0], [1,0,0], [0,1,0], [0,0,1]], 'f')\n points = array([[0,0,0], [1,0,0], [1,1,0], [0,1,0]], 'f')\n warp = array([[0,0,0], [100,0,0], [1,1,0], [0,1,0]])\n deformation = tvtk.DoubleArray()\n deformation.number_of_components = 3\n deformation.number_of_tuples = 4\n deformation.set_tuple3(0,0.,0.,0)\n deformation.set_tuple3(1,20.,-5.,0.)\n deformation.set_tuple3(2,15.,3.,0.)\n deformation.set_tuple3(3,-4.,2.,0)\n #triangles = array([[0,1,3], [0,3,2], [1,2,3], [0,2,1]])\n triangles = array([[0,1,2,3]])\n temperature = array([10., 20., -20., 10.])\n # The TVTK dataset.\n mesh = tvtk.PolyData(points=points, polys=triangles)\n #mesh = tvtk.UnstructuredGrid(points=points)\n #cel_type = 7\n #mesh.set_cells(cel_type, triangles)\n #mesh.point_data.scalars = temperature\n #mesh.point_data.scalars.name = 'Temperature'\n mesh.point_data.vectors = warp\n src = VTKDataSource(data = mesh)\n e.add_source(src)\n e.add_filter(WarpVector())\n e.add_module(Outline())\n e.add_module(Surface())", "def __setitem__(self, index, value):\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._setitem2d(index, value)\n elif self.dim == 3:\n return self._setitem3d(index, value)", "def __init__(self, x = np.float32(0.0), y = np.float32(0.0), z = np.float32(0.0)):\n\n self._x = np.float32( x )\n self._y = np.float32( y )\n self._z = np.float32( z )", "def place(self, x: _vector_like = _null_vector, y: _vector_like = _null_vector,\n z: _vector_like = _null_vector):\n transform = Matrix3D.create()\n transform.translation = Vector3D.create(x.x, y.y, z.z)\n self._local_transform.transformBy(transform)\n self._reset_cache()\n return self", "def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]", "def set_image_data(self, data_file):\n # TODO: support other file formats, like hd5 and maybe raw binary?\n import scipy.io\n self.image_data = np.atleast_3d(scipy.io.loadmat(data_file).values()[0])\n if self.image_data.ndim == 3:\n self.image_data = self.image_data.reshape(self.image_data.shape + (1,))\n # TODO: confirm that this voxel reordering is necessary. Maybe lean on the recon\n # folks to standardize thier voxle order? Might also look at\n self.image_data = self.image_data.transpose((1,0,2,3))[::-1,:,::-1,:]\n\n if self.image_data.shape[0] != self.size_x or self.image_data.shape[1] != self.size_y:\n msg = 'Image matrix discrepancy. Fixing the header, assuming image_data is correct...'\n self.log and self.log.warning(msg) or print(msg)\n self.size_x = self.image_data.shape[0]\n self.size_y = self.image_data.shape[1]\n self.mm_per_vox[0] = float(self.fov[0] / self.size_x)\n self.mm_per_vox[1] = float(self.fov[1] / self.size_y)\n if self.image_data.shape[2] != self.num_slices:\n msg = 'Image slice count discrepancy. Fixing the header, assuming image_data is correct...'\n self.log and self.log.warning(msg) or print(msg)\n self.num_slices = self.image_data.shape[2]\n if self.image_data.shape[3] != self.num_timepoints:\n msg = 'Image time frame discrepancy (header=%d, array=%d). Fixing the header, assuming image_data is correct...' % (self.num_timepoints, self.image_data.shape[3])\n self.log and self.log.warning(msg) or print(msg)\n self.num_timepoints = self.image_data.shape[3]", "def plot_3d(self, ax_3d: Axes3D, lims_x: array_like = (-1, 1), lims_y: array_like = (-1, 1), **kwargs) -> None:\n X, Y, Z = self.to_mesh(lims_x, lims_y)\n\n ax_3d.plot_surface(X, Y, Z, **kwargs)", "def __init__(self, path: str = None, buffer: io.BytesIO = None):\r\n if not path and not buffer:\r\n raise ValueError(\"Must supply path or buffer\")\r\n if buffer:\r\n memory = buffer.getvalue()\r\n buffer.close()\r\n path = tempfile.TemporaryFile()\r\n else:\r\n memory = None\r\n\r\n with Dataset(path, memory=memory) as nc:\r\n keys = list(nc.variables.keys())\r\n self.x = nc.variables[keys[0]][:].filled()\r\n self.y = nc.variables[keys[1]][:].filled()\r\n self.layers = nc.variables[keys[2]][:].filled()\r\n self.values = nc.variables[keys[3]][:].filled()\r\n self.crs = nc.crs\r\n\r\n if np.sign(np.diff(self.y).mean()) > 0:\r\n self.values = np.flip(self.values, axis=1)\r\n self.y = np.flip(self.y)\r\n\r\n if self.layers.dtype == np.float64 and self.layers.mean() > 1e9:\r\n self.layers = np.datetime64(\"1970-01-01\") + self.layers.astype(\r\n \"timedelta64[s]\"\r\n )\r\n\r\n self.dimensions = {\r\n \"x\": len(self.x),\r\n \"y\": len(self.y),\r\n \"layers\": len(self.layers),\r\n }\r\n self.resolution = {\r\n \"x\": round(np.abs(np.diff(self.x).mean()), 8),\r\n \"y\": round(np.abs(np.diff(self.y).mean()), 8),\r\n }\r\n self.extent = {\r\n \"xmin\": round(self.x.min() - self.resolution[\"x\"] / 2, 8),\r\n \"xmax\": round(self.x.max() + self.resolution[\"x\"] / 2, 8),\r\n \"ymin\": round(self.y.min() - self.resolution[\"y\"] / 2, 8),\r\n \"ymax\": round(self.y.max() + self.resolution[\"y\"] / 2, 8),\r\n }", "def __init__(self, unit_vector_3d):\n \n self.unit_vector = unit_vector_3d\n transposed_uv = np.transpose(self.unit_vector)\n self.x = transposed_uv[0] \n self.y = transposed_uv[1] \n self.z = transposed_uv[2]\n self.d = SkyCoord(self.x, self.y, self.z, \n unit = 'mpc', \n representation_type = 'cartesian', \n frame = 'icrs')\n self.d.representation_type = 'spherical'\n self.lons = self.d.galactic.l.wrap_at(360 * u.deg).deg\n self.lats = self.d.galactic.b.wrap_at(180 * u.deg).deg", "def __init__(self,x=0,y=0,z=0):\n self.x = x\n self.y = y\n self.z = z", "def Compute3d(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Compute3d(self, *args)", "def SetDataVolume(vDataSet,arr,aIndexC,aIndexT):\r\n nx = vDataSet.GetSizeX()\r\n ny = vDataSet.GetSizeY()\r\n nz = vDataSet.GetSizeZ()\r\n dtype = GetType(vDataSet)\r\n\r\n if DEBUG:\r\n print(\"SetDataVolume\")\r\n print(\"vDataSet:\",(nz,ny,nx),GetType(vDataSet))\r\n print(arr.shape)\r\n print(arr.dtype)\r\n print(aIndexC)\r\n print(aIndexT)\r\n\r\n #Make sure the data is in range and convert the array\r\n s = arr\r\n if dtype != arr.dtype:\r\n miset,maset = GetTotalRange(vDataSet)\r\n arr[arr<miset]=miset\r\n arr[arr>maset]=maset\r\n s = arr.astype(dtype)\r\n\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataVolumeAs1DArrayBytes\r\n s = s.tostring()\r\n elif dtype == np.uint16:\r\n SetData = vDataSet.SetDataVolumeAs1DArrayShorts\r\n s = np.ravel(s)\r\n elif dtype == np.float32:\r\n SetData = vDataSet.SetDataVolumeAs1DArrayFloats\r\n s = np.ravel(s)\r\n SetData(s,aIndexC,aIndexT)\r\n\r\n if 0:\r\n #Old method slice by slice\r\n if dtype == np.uint8:\r\n SetData = vDataSet.SetDataSubVolumeAs1DArrayBytes\r\n elif dtype == np.uint16:\r\n s = np.ravel(s)\r\n SetData = vDataSet.SetDataSubVolumeAs1DArrayShorts\r\n elif dtype == np.float32:\r\n s = np.ravel(s)\r\n SetData = vDataSet.SetDataSubVolumeAs1DArrayFloats\r\n\r\n for z in range(nz):\r\n t = time.time()\r\n l = arr[z,...].swapaxes(0,1).tostring()\r\n SetData(l,0,0,z,aIndexC,aIndexT,nx,ny,1)\r\n print z,time.time()-t\r\n\r\n #vDataSet.SetChannelRange(aIndexC,miset,maset)\r", "def I3_u3(self) -> complex:\n return self.I3_u1() * cmath.rect(1, 120 / 180 * cmath.pi)", "def Has3d(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Has3d(self, *args)", "def setDataset(self,dataset):\n self.__dataSet = dataset", "def setData(self, data):\n self.data = data\n dagPath, components = self.__getGeometryComponents()\n self.setInfluenceWeights(dagPath, components)\n self.setBlendWeights(dagPath, components)\n\n for attr in ['skinningMethod', 'normalizeWeights']:\n cmds.setAttr('%s.%s' % (self.node, attr), self.data[attr])", "def create3D( self , ecl_kw , default = 0):\n if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():\n array = numpy.ones( [ self.getGlobalSize() ] , dtype = ecl_kw.dtype) * default\n kwa = ecl_kw.array\n if len(ecl_kw) == self.size:\n for i in range(kwa.size):\n array[i] = kwa[i]\n else:\n data_index = 0\n for global_index in range(self.getGlobalSize()):\n if self.active( global_index = global_index ):\n array[global_index] = kwa[data_index]\n data_index += 1\n \n array = array.reshape( [self.getNX() , self.getNY() , self.getNZ()] , order = 'F')\n return array\n else:\n raise ValueError(\"Keyword: %s has invalid size(%d), must be either nactive:%d or nx*ny*nz:%d\" % (ecl_kw.name , ecl_kw.size , self.nactive ,self.size))", "def set_element_dimensions(self, size_x, size_y, size_z):\n size_x = 1.0 * size_x\n size_y = 1.0 * size_y\n size_z = 1.0 * size_z\n x = np.repeat(size_x, self.numelements)\n y = np.repeat(size_y, self.numelements)\n z = np.repeat(size_z, self.numelements)\n self.dimensions = g.Points.from_xyz(x, y, z)\n return self", "def render_vertices_3d(self, **kwds):\n return point3d(self.coordinates_of(self.points), **kwds)", "def SetPyData(self, item, data):\r\n\r\n item.SetData(data)", "def _setitem2d(self, index, value):\n ix = index[0]\n iz = index[2]\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[2]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iz, slice): sss[1:1] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value", "def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()", "def set_equal_3d_axis(ax, x_lims, y_lims, z_lims):\n x_lims = np.asarray(x_lims)\n y_lims = np.asarray(y_lims)\n z_lims = np.asarray(z_lims)\n # compute max required range\n max_range = np.array([x_lims.max() - x_lims.min(),\n y_lims.max() - y_lims.min(),\n z_lims.max() - z_lims.min()]).max() / 2.0\n # compute mid-point along each axis\n mid_x = (x_lims.max() + x_lims.min()) * 0.5\n mid_y = (y_lims.max() + y_lims.min()) * 0.5\n mid_z = (z_lims.max() + z_lims.min()) * 0.5\n\n # set limits to axis\n ax.set_xlim(mid_x - max_range, mid_x + max_range)\n ax.set_ylim(mid_y - max_range, mid_y + max_range)\n ax.set_zlim(mid_z - max_range, mid_z + max_range)", "def test_3d_steam_time_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/full3D.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n lowmem_write_readback(dic,data)", "def drawCurve3D(xlist, ylist, zlist):\n dislin.curv3d(xlist,ylist,zlist,len(xlist))", "def zoomData(self, factor, neariso=False):\n \n new_data = np.zeros( (self.data.shape[0], self.data.shape[1], self.data.shape[2]*(2**factor), self.data.shape[3]*(2**factor)), dtype=self.data.dtype)\n for time_index in range(self.data.shape[0]):\n for z_index in range(self.data.shape[1]):\n new_data[time_index, z_index, :, :] = np.asarray(self.frombuffer(self.data[time_index, z_index, :, :]).resize([new_data.shape[3], new_data.shape[2]]))\n self.data = new_data", "def setGrid3D(nlinesx, nlinesy, planetype='all'):\n pdict = {'all':'ALL','back':'BACK','bottom':'BOTTOM'}\n dislin.grid3d(nlinesx, nlinesy, pdict[planetype])" ]
[ "0.6401612", "0.6063972", "0.6011041", "0.5975089", "0.5953853", "0.59492946", "0.5900088", "0.58977884", "0.58162755", "0.5780997", "0.57658124", "0.5763134", "0.5757174", "0.57559294", "0.5752819", "0.5680372", "0.56681836", "0.56602615", "0.5607443", "0.5574102", "0.55590284", "0.5548264", "0.5526642", "0.5514295", "0.5508868", "0.5496145", "0.5486998", "0.547999", "0.5465853", "0.5464128", "0.5453121", "0.5446278", "0.5436904", "0.54041785", "0.5391128", "0.5384943", "0.53727657", "0.5351012", "0.53304315", "0.53226256", "0.5320594", "0.5319011", "0.5315851", "0.5302566", "0.5286373", "0.52828574", "0.5282089", "0.52769256", "0.5253151", "0.52474856", "0.52465475", "0.52440876", "0.5238226", "0.52295923", "0.52123296", "0.5211759", "0.52111137", "0.5204815", "0.5191586", "0.51898986", "0.518735", "0.5178719", "0.5178358", "0.5173105", "0.51666325", "0.5166339", "0.51565343", "0.5151198", "0.5146602", "0.5142056", "0.5141742", "0.5136296", "0.51294404", "0.5127827", "0.51232845", "0.5118988", "0.5117121", "0.51167417", "0.51165664", "0.50875425", "0.5086885", "0.5080687", "0.50734884", "0.50711554", "0.5069764", "0.5069016", "0.5049141", "0.50434774", "0.50424725", "0.50363606", "0.5028498", "0.5026802", "0.5023133", "0.501618", "0.50041944", "0.5003259", "0.4997569", "0.49919322", "0.49869204", "0.49861625" ]
0.5670554
16
Return 3D dataset. This method does not cache data converted to a specific mode, it computes it for each request.
def getData(self, copy=True, mode=None): if mode is None: return super(ComplexField3D, self).getData(copy=copy) else: return self._convertComplexData(self._data, mode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def D3(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_D3(self, *args)", "def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]", "def get_dataset(self, cid, type=\"train\"):\n dataset = torch.load(\n os.path.join(self.path, type, \"data{}.pkl\".format(cid)))\n return dataset", "def dataset(self):\n return self.predictor_data_manager.dataset(\n self.data_name, self.trait_name, data=self._data, trait=self.trait,\n categorical_trait=self.categorical_trait)", "def cube_data(self):\n cube_data = copy.deepcopy(self.data)\n cube_data.shape = [self.nints * self.ngroups, self.rows, self.columns]\n return cube_data", "def get_dataset(self):\n if self.mode == \"test\":\n return OnlineQueryDataset(self.mode, self.df, self.tokenizer)\n else:\n return OnlineQueryDataset(self.mode, self.df_reindex, self.tokenizer)", "def dataset(self) -> np.ndarray:\n if self._cache_dataset_list:\n # Concatenates the `self._dataset` and the datasets in\n # `self._cache_dataset_list`.\n if self._dataset.size > 0:\n dataset_list = [self._dataset] + self._cache_dataset_list\n else:\n dataset_list = self._cache_dataset_list\n\n self._dataset = np.vstack(dataset_list)\n self._cache_dataset_list = []\n return self._dataset", "def get_dataset(self, therm_frac=0., make_plots=False) -> (xr.Dataset):\n data_vars = {}\n for key, val in self.data.items():\n arr = np.array(val)\n steps = np.arange(len(arr))\n if therm_frac > 0:\n arr, steps = therm_arr(arr, therm_frac=therm_frac)\n if len(arr.shape) == 1:\n data_vars[key] = xr.DataArray(arr, dims=['draw'],\n coords=[steps])\n elif len(arr.shape) == 3:\n arr = arr.T\n num_chains, num_lf, _ = arr.shape\n dims = ['chain', 'leapfrog', 'draw']\n coords = [np.arange(num_chains), np.arange(num_lf), steps]\n data_vars[key] = xr.DataArray(arr, dims=dims, coords=coords)\n else:\n chains = np.arange(arr.shape[1])\n data_vars[key] = xr.DataArray(arr.T, dims=['chain', 'draw'],\n coords=[chains, steps])\n\n return xr.Dataset(data_vars)", "def get_dataset(self):\n return", "def convert_1d_to_3d(data_X, data_Y):\n\n data_X = data_X.tocsr()\n \n data_dim_x = [] # slices along x-axis (has shape of (total_trials * dim_x, dim_z, dim_y))\n data_dim_x_label = [] # contains (total_trials * dim_x) labels\n data_dim_y = [] # slices along y-axis (has shape of (total_trials * dim_y, dim_z, dim_x))\n data_dim_y_label = [] # contains (total_trials * dim_y) labels\n data_dim_z = [] # slices along z-axis (has shape of (total_trials * dim_z, dim_y, dim_x))\n data_dim_z_label = [] # contains (total_trials * dim_z) labels\n\n for num_trial in range(data_X.shape[0]):\n label = data_Y[num_trial]\n data_1d = data_X[num_trial]\n data_3d = np.squeeze(np.asarray(data_1d.todense())).reshape((dim_z, dim_y, dim_x))\n for x in range(dim_x):\n x_slice = data_3d[:,:,x]\n # append only if the slice is not empty \n if x_slice.sum() != 0:\n data_dim_x.append(data_3d[:, :, x])\n data_dim_x_label.append(label)\n for y in range(dim_y):\n y_slice = data_3d[:, y, :]\n if y_slice.sum() != 0:\n data_dim_y.append(data_3d[:, y, :])\n data_dim_y_label.append(label)\n for z in range(dim_z):\n z_slice = data_3d[:, :, z]\n if z_slice.sum() != 0:\n data_dim_z.append(data_3d[z, :, :])\n data_dim_z_label.append(label)\n\n return np.array(data_dim_x), np.array(data_dim_x_label), \\\n np.array(data_dim_y), np.array(data_dim_y_label), \\\n np.array(data_dim_z), np.array(data_dim_z_label)", "def _get_dataset(self):\n if self.mode == 'train':\n return (\n tf.data.Dataset.from_tensor_slices(\n tensors=(tf.constant(value=self.file_paths),\n tf.reshape(tensor=tf.constant(self.labels), shape=[-1]))\n )\n .shuffle(buffer_size=self.num_samples, reshuffle_each_iteration=True)\n .map(map_func=self.import_waveforms_fn_train, num_parallel_calls=self.num_parallel_calls)\n .repeat()\n .batch(batch_size=self.batch_size)\n .prefetch(buffer_size=self.prefetch_buffer)\n )\n else:\n return (\n tf.data.Dataset.from_tensor_slices(\n tensors=(tf.constant(value=self.file_paths),\n tf.reshape(tensor=tf.constant(self.labels), shape=[-1]))\n )\n .map(map_func=self.import_waveforms_fn_val, num_parallel_calls=self.num_parallel_calls)\n .repeat()\n .batch(batch_size=self.batch_size)\n .prefetch(buffer_size=self.prefetch_buffer)\n )", "def __get_dataset(self):\n # Disable RasterIO logging, just show ERRORS\n log = rio_logging.getLogger()\n log.setLevel(rio_logging.ERROR)\n\n try:\n # Get dataset\n tmp_ds = xr.open_rasterio(self.fname)\n tmp_ds = None ; del tmp_ds\n except rio.errors.RasterioIOError as e:\n raise e\n\n chunks = get_chunk_size(self.fname)\n data_array = xr.open_rasterio(self.fname, chunks=chunks)\n\n data_array = data_array.rename(\n {'x': 'longitude',\n 'y': 'latitude',\n 'band': 'time'})\n\n # Check if file is a VRT\n name, extension = os.path.splitext(self.fname)\n if extension.lower() == '.vrt':\n times = get_times(self.fname)\n else:\n times = get_times_from_file_band(self.fname)\n data_array['time'] = times\n\n # Check that _FillValue is not NaN\n if data_array.nodatavals[0] is np.NaN:\n # Use _FillValue from band metadata\n _fill_value = get_fill_value_band_metadata(self.fname)\n\n data_array.attrs['nodatavals'] = \\\n tuple(np.full((len(data_array.nodatavals))\n ,_fill_value))\n\n # Create new dataset\n self.dataset_name = self.__get_dataset_name()\n dataset = data_array.to_dataset(name=self.dataset_name)\n\n # Back to default logging settings\n logging.basicConfig(level=logging.INFO)\n\n # Set self.data\n self.data = dataset", "def get_dataset(cfg,\n augmentor,\n mode='train',\n rank=None,\n dataset_class=VolumeDataset,\n dataset_options={},\n dir_name_init: Optional[list] = None,\n img_name_init: Optional[list] = None):\n assert mode in ['train', 'val', 'test']\n\n sample_label_size = cfg.MODEL.OUTPUT_SIZE\n topt, wopt = ['0'], [['0']]\n if mode == 'train':\n sample_volume_size = augmentor.sample_size if augmentor is not None else cfg.MODEL.INPUT_SIZE\n sample_label_size = sample_volume_size\n sample_stride = (1, 1, 1)\n topt, wopt = cfg.MODEL.TARGET_OPT, cfg.MODEL.WEIGHT_OPT\n iter_num = cfg.SOLVER.ITERATION_TOTAL * cfg.SOLVER.SAMPLES_PER_BATCH\n if cfg.SOLVER.SWA.ENABLED:\n iter_num += cfg.SOLVER.SWA.BN_UPDATE_ITER\n\n elif mode == 'val':\n sample_volume_size = cfg.MODEL.INPUT_SIZE\n sample_label_size = sample_volume_size\n sample_stride = [x//2 for x in sample_volume_size]\n topt, wopt = cfg.MODEL.TARGET_OPT, cfg.MODEL.WEIGHT_OPT\n iter_num = -1\n\n elif mode == 'test':\n sample_volume_size = cfg.MODEL.INPUT_SIZE\n sample_stride = cfg.INFERENCE.STRIDE\n iter_num = -1\n\n shared_kwargs = {\n \"sample_volume_size\": sample_volume_size,\n \"sample_label_size\": sample_label_size,\n \"sample_stride\": sample_stride,\n \"augmentor\": augmentor,\n \"target_opt\": topt,\n \"weight_opt\": wopt,\n \"mode\": mode,\n \"do_2d\": cfg.DATASET.DO_2D,\n \"reject_size_thres\": cfg.DATASET.REJECT_SAMPLING.SIZE_THRES,\n \"reject_diversity\": cfg.DATASET.REJECT_SAMPLING.DIVERSITY,\n \"reject_p\": cfg.DATASET.REJECT_SAMPLING.P,\n \"data_mean\": cfg.DATASET.MEAN,\n \"data_std\": cfg.DATASET.STD,\n \"data_match_act\": cfg.DATASET.MATCH_ACT,\n \"erosion_rates\": cfg.MODEL.LABEL_EROSION,\n \"dilation_rates\": cfg.MODEL.LABEL_DILATION,\n \"do_relabel\": cfg.DATASET.REDUCE_LABEL,\n \"valid_ratio\": cfg.DATASET.VALID_RATIO,\n }\n\n if cfg.DATASET.DO_CHUNK_TITLE == 1: # build TileDataset\n def _make_json_path(path, name):\n if isinstance(name, str):\n return [os.path.join(path, name)]\n\n assert isinstance(name, (list, tuple))\n json_list = [os.path.join(path, name[i]) for i in range(len(name))]\n return json_list\n\n input_path = cfg.DATASET.INPUT_PATH\n volume_json = _make_json_path(input_path, cfg.DATASET.IMAGE_NAME)\n\n label_json, valid_mask_json = None, None\n if mode == 'train':\n if cfg.DATASET.LABEL_NAME is not None:\n label_json = _make_json_path(input_path, cfg.DATASET.LABEL_NAME)\n if cfg.DATASET.VALID_MASK_NAME is not None:\n valid_mask_json = _make_json_path(input_path, cfg.DATASET.VALID_MASK_NAME)\n\n dataset = TileDataset(chunk_num=cfg.DATASET.DATA_CHUNK_NUM,\n chunk_ind=cfg.DATASET.DATA_CHUNK_IND,\n chunk_ind_split=cfg.DATASET.CHUNK_IND_SPLIT,\n chunk_iter=cfg.DATASET.DATA_CHUNK_ITER,\n chunk_stride=cfg.DATASET.DATA_CHUNK_STRIDE,\n volume_json=volume_json,\n label_json=label_json,\n valid_mask_json=valid_mask_json,\n pad_size=cfg.DATASET.PAD_SIZE,\n data_scale=cfg.DATASET.DATA_SCALE,\n coord_range=cfg.DATASET.DATA_COORD_RANGE,\n **shared_kwargs)\n\n else: # build VolumeDataset or VolumeDatasetMultiSeg\n volume, label, valid_mask = _get_input(\n cfg, mode, rank, dir_name_init, img_name_init, min_size=sample_volume_size)\n\n if cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT is not None:\n shared_kwargs['multiseg_split'] = cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT\n dataset = dataset_class(volume=volume, label=label, valid_mask=valid_mask,\n iter_num=iter_num, **shared_kwargs, **dataset_options)\n\n return dataset", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def dataset(self):\n with self._lock:\n if self._dataset is None:\n if isinstance(self._orig_dataset, DaskLazyIndexer):\n self._orig_dataset = self._orig_dataset.dataset\n dataset = dask_getitem(self._orig_dataset, self.keep)\n for transform in self.transforms:\n dataset = transform(dataset)\n self._dataset = dataset\n self._orig_dataset = None\n return self._dataset", "def get_dataset(params, run_mode=\"train\"):\n tokenizer = get_tokenizer(params)\n # Use run_mode to decide input_folder, MR cols, MR max lens.\n msg_col, rsp_col = params.msg_col, params.rsp_col\n max_msg_len, max_rsp_len = params.max_msg_len, params.max_rsp_len\n if run_mode == \"train\":\n input_folder = params.train_input_dir\n elif run_mode == \"valid\":\n input_folder = params.valid_input_dir\n elif run_mode == \"gmr\":\n input_folder = params.gmr_input_dir\n if params.truncate is False:\n max_msg_len, max_rsp_len = np.inf, np.inf\n elif run_mode == \"rsp_set\":\n # TODO: What's the purpose of this mode?\n input_folder = params.rsp_input_dir\n msg_col, rsp_col = 0, params.rsp_text_col\n # TODO: These values should be global parameters instead of being hard coded like this\n # TODO: Why not just set these values to np.inf like above?\n if params.truncate is False:\n max_msg_len, max_rsp_len = 1000, 1000\n elif run_mode == \"eval\":\n input_folder = params.eval_input_dir\n elif run_mode == \"export\":\n # TODO: We should remove this mode from this function since it does nothing anyways\n return None, tokenizer\n else:\n raise ValueError(\"SystemLog: Invalid run mode %s.\" % run_mode)\n\n # We consider each file to be in a separate pytorch dataset. We then use ConcatDataset to combine individual datasets\n datasets = []\n total_file_processed = 0\n # This sorting of file is done to make sure that we get the same file order each time\n for file_idx, filename in enumerate(sorted(os.listdir(input_folder))):\n filepath = os.path.join(input_folder, filename)\n datasets.append(MRDataset(filepath, tokenizer, msg_col=msg_col,\n rsp_col=rsp_col, max_msg_len=max_msg_len,\n max_rsp_len=max_rsp_len, run_mode=run_mode, architecture=params.architecture, truncate=params.truncate))\n total_file_processed += 1\n if file_idx % 1000 == 0:\n print(\"SystemLog: %d files processed \" % file_idx)\n print(\"SystemLog: %d files processed in total.\" % total_file_processed)\n mr_dataset = ConcatDataset(datasets)\n\n return mr_dataset, tokenizer", "def load_dataset(\n self,\n ):\n with xr.open_dataset(self._filepath) as fdata:\n out = fdata.assign_coords({\n 'nCells': np.arange(fdata.dims['nCells']),\n })\n if self.time is not None:\n out = out.assign_coords({\n 'Time': self.time,\n })\n if 'nVertLevels' in fdata.dims:\n out = out.assign_coords({\n 'nVertLevels': np.arange(fdata.dims['nVertLevels']),\n })\n if 'nVertLevelsP1' in fdata.dims:\n out = out.assign_coords({\n 'nVertLevelsP1': np.arange(fdata.dims['nVertLevelsP1']),\n })\n if 'nEdges' in fdata.dims:\n out = out.assign_coords({\n 'nEdges': np.arange(fdata.dims['nEdges']),\n })\n if 'nVertices' in fdata.dims:\n out = out.assign_coords({\n 'nVertices': np.arange(fdata.dims['nVertices']),\n })\n if 'nVertLevelsLES' in fdata.dims:\n out = out.assign_coords({\n 'nVertLevelsLES': np.arange(fdata.dims['nVertLevelsLES']),\n })\n return out", "def get_dataset(args):\n\n if args['experiment']['dataset'] == Dataset.mindsets:\n xs, ys, cs = make_mindsets(mindset_sizes=args['dataset']['mindset_sizes'],\n nb_questions=args['dataset']['nb_questions'],\n nb_useless=args['dataset']['nb_useless'],\n noise=args['dataset']['noise'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.questionnaire_likert:\n xs, ys, cs = make_likert_questionnaire(nb_samples=args['dataset']['nb_samples'],\n nb_features=args['dataset']['nb_features'],\n nb_mindsets=args['dataset']['nb_mindsets'],\n centers=args['dataset']['centers'],\n range_answers=args['dataset']['range_answers'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.retinal:\n xs, ys = load_RETINAL(root_path=args['root_dir'],\n nb_bins=args['dataset']['nb_bins'],\n max_idx=args['dataset']['max_idx'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.moons:\n xs, ys = make_moons(n_samples=args['dataset']['n_samples'],\n noise=args['dataset']['noise'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.breast_cancer_wisconsin:\n xs, ys = load_CANCER(args['dataset']['nb_bins'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.SBM:\n A, ys, G = load_SBM(block_sizes=args['dataset']['block_sizes'],\n p_in=args['dataset']['p'],\n p_out=args['dataset']['q'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.gaussian_mixture:\n xs, ys = make_blobs(n_samples=args['dataset']['blob_sizes'],\n centers=args['dataset']['blob_centers'],\n n_features=args['dataset']['blob_centers'],\n cluster_std=args['dataset']['blob_variances'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.LFR:\n A, ys, G = load_LFR(nb_nodes=args['dataset']['nb_nodes'],\n tau1=args['dataset']['tau1'],\n tau2=args['dataset']['tau2'],\n mu=args['dataset']['mu'],\n average_degree=args['dataset']['average_degree'],\n min_community=args['dataset']['min_community'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.wave:\n df = pd.read_csv('datasets/waveform.csv')\n xs = df[df.columns[:-1]].to_numpy()\n ys = df[df.columns[-1]].to_numpy()\n\n return Data(xs=xs, ys=ys)\n\n raise ValueError('Wrong name for a dataset')", "def generate_training_data_3D():\n c11 = np.random.uniform(0.05, 1.50, 20)\n c12 = np.random.uniform(-1.50, 1.50, 20)\n c13 = np.random.uniform(-2.50, -0.05, 20)\n c21 = np.random.uniform(-1.50, -0.05, 20)\n c22 = np.random.uniform(-1.50, 1.50, 20)\n c23 = np.random.uniform(0.05, 2.50, 20)\n c1 = np.array([[i, j, k] for i, j, k in zip(c11, c12, c13)])\n c2 = np.array([[i, j, k] for i, j, k in zip(c21, c22, c23)])\n\n points = plt.figure()\n ax = points.add_subplot(111, projection='3d')\n ax.scatter(c1[:, 0], c1[:, 1], c1[:, 2], c='r', marker='^')\n ax.scatter(c2[:, 0], c2[:, 1], c2[:, 2], c='b', marker='*')\n plt.show()\n plt.close()\n\n return c1, c2", "def get_dataset(self):\n return datasets.get_dataset(self.dataset_id)", "def getDataSet(self, i, raw = 0):\n\t\tdata = self.getTimepoint(i)\n\t\tif self.isRGB and self.numberOfComponents == 4:\n\t\t\textract = vtk.vtkImageExtractComponents()\n\t\t\textract.SetComponents(0, 1, 2)\n\t\t\textract.SetInput(data)\n\t\t\tdata = extract.GetOutput()\n\n\t\tif self.flipVertically:\n\t\t\tflip = vtk.vtkImageFlip()\n\t\t\tflip.SetFilteredAxis(1)\n\t\t\tflip.SetInput(data)\n\t\t\tdata = flip.GetOutput()\n\t\tif self.flipHorizontally:\n\t\t\tflip = vtk.vtkImageFlip()\n\t\t\tflip.SetFilteredAxis(0)\n\t\t\tflip.SetInput(data)\n\t\t\tdata = flip.GetOutput()\n\t\t\t\n\t\treturn data", "def get_dataset(name):\n if name == 'cityscapes':\n return Cityscapes", "def data(dataname = None, package = None, cache = False):\n\t#if dataname == None and data == None:\n\t# from rpy2.robjects import r\n\t# print(r.data())\n\treturn sm.datasets.get_rdataset(dataname = dataname, package = package, cache = cache).data", "def make_dataset(self,\n path,\n mode,\n height=None,\n width=None):\n # Split up the possibly comma seperated directories.\n if ',' in path:\n l = path.split(',')\n d = '/'.join(l[0].split('/')[:-1])\n l[0] = l[0].split('/')[-1]\n paths = [os.path.join(d, x) for x in l]\n else:\n paths = [path]\n\n # Generate list of filenames.\n # pylint:disable=g-complex-comprehension\n files = [os.path.join(d, f) for d in paths for f in tf.io.gfile.listdir(d)]\n num_files = len(files)\n ds = tf.data.Dataset.from_tensor_slices(files)\n if mode == 'multiframe':\n # Create a nested dataset.\n ds = ds.map(tf.data.TFRecordDataset)\n # pylint:disable=g-long-lambda\n ds = ds.interleave(\n lambda x: x.map(\n lambda y: self.parse_train(y, height, width),\n num_parallel_calls=tf.data.experimental.AUTOTUNE),\n cycle_length=min(10, num_files),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Prefetch a number of batches because reading new ones can take much\n # longer when they are from new files.\n ds = ds.prefetch(10)\n\n return ds", "def Pdata3(fidName=\"T\", N=0):\n\t\tData = Helper.Pdata(fidName, 3)\n\t\tNumzhuiti = Data.shape[0]\n\t\t# Data3 = []\n\t\tfor i in range(0, Numzhuiti):\n\t\t\tPa = Data[i, 0]\n\t\t\tPl = Data[i, 1]\n\t\t\tPr = Data[i, 2]\n\t\t\tPlo = Pa[1] - Pl[1]\n\t\t\tPro = Pa[1] - Pr[1]\n\t\t\tPal = Pa + [0, Plo * N, 0]\n\t\t\tPar = Pa + [0, Pro * N, 0]\n\n\t\t\tif i == 0:\n\t\t\t\tData3 = np.array([Pal])\n\t\t\t\tData3 = np.append(Data3, Pl)\n\t\t\t\tData3 = np.append(Data3, Par)\n\t\t\t\tData3 = np.append(Data3, Pr)\n\t\t\telse:\n\t\t\t\tData3 = np.append(Data3, Pal)\n\t\t\t\tData3 = np.append(Data3, Pl)\n\t\t\t\tData3 = np.append(Data3, Par)\n\t\t\t\tData3 = np.append(Data3, Pr)\n\t\tData3 = np.array(Data3.reshape(Data.shape[0] * 2, 2, 3))\n\t\treturn Data3", "def _create_dataset(self, *data):\n # Make sure data is a tuple of dense tensors\n data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]\n return TensorDataset(*data)", "def get_dataset(self):\n return self._X, self._y", "def get_dataset(self, data_path, n_workers=4, dataset_args={}):\n self.logging.info('loading dataset...')\n dataset = pd.read_csv(data_path)\n\n self.logging.info('preprocessing data...')\n\n results = [None] * n_workers\n with Pool(processes=n_workers) as pool:\n for i in range(n_workers):\n batch_start = (len(dataset) // n_workers) * i\n if i == n_workers - 1:\n batch_end = len(dataset)\n else:\n batch_end = (len(dataset) // n_workers) * (i + 1)\n\n batch = dataset[batch_start: batch_end]\n results[i] = pool.apply_async(self.preprocess_samples, [batch])\n\n # When debugging, you'd better not use multi-thread.\n # results[i] = self.preprocess_dataset(batch, preprocess_args)\n\n pool.close()\n pool.join()\n\n processed = []\n for result in results:\n processed += result.get()\n\n #padding = self.embedding.to_index('[PAD]')\n return DialogDataset(processed, **dataset_args)", "def get_data(self):\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')", "def _dataset(filename, filter, img_count=1000000):\n try:\n # Attempt to load the dataset.\n with np.load(filename) as data:\n X = data['arr_0']\n y = data['arr_1']\n except:\n # The dataset does not exist, so we regenerate.\n\n # Set up a sample of random images:\n sample_size = (img_count, 3, 3, 3) # 3x3 windows, each containing 3 channels\n images = np.random.random(sample_size)\n\n # The correct label for each \"image\" is the color at its center\n y = images[:, 1, 1, :]\n\n # Now we apply the filter to each of our images and store the filtered image\n print(\"Generating dataset:\")\n\n X = np.zeros(images.shape)\n\n for i in range(images.shape[0]):\n thisImg = images[i]\n filtered = filter.apply(thisImg)\n X[i] = filtered\n\n if (i + 1) % (img_count / 100) == 0:\n print(\"%s: %d%% done\" % (filename, 100 * (i + 1) / img_count))\n\n print(\"Dataset generation complete.\")\n\n np.savez(filename, X, y)\n\n return X[:img_count], y[:img_count]", "def get3dView():\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n return bpy.types.SpaceView3D(area.spaces[0])", "def __nc_dataset(self, name, data_sel, fill_as_nan):\n if name not in self.fid['/target_product'].variables.keys():\n raise ValueError('dataset {} for found'.format(name))\n\n dset = self.fid['/target_product/{}'.format(name)]\n res = dset[:].reshape(self.scanline, self.ground_pixel)\n if data_sel is not None:\n res = res[data_sel]\n\n if fill_as_nan:\n return res.filled(np.nan)\n\n return res.data", "def build_dataset(self):\n self.dataset = KITTIBEVDataset(self.dataset_config, self.transform)\n return self.dataset", "def get_dataset(self, split):\r\n def generator():\r\n while True:\r\n idx = self.get_batch_idx(split)\r\n yield self.idx_to_data(idx)\r\n return tf.data.Dataset.from_generator(\r\n generator,\r\n output_types=(dict(self.dtypes_input), self.dtype_target),\r\n output_shapes=(self.shapes_input, self.shape_target))", "def get_dataset(self, name, data_sel=None, fill_as_nan=True):\n if self.science_product:\n return self.__nc_dataset(name, data_sel, fill_as_nan)\n\n return self.__h5_dataset(name, data_sel, fill_as_nan)", "def get_dataset(self, patch_size):\n\n x_train, y_train = self.__get_audiomnist(self.__path, kind='train')\n x_val, y_val = self.__get_audiomnist(self.__path, kind='test')\n\n # up and down-sampling\n x_train = torch.nn.functional.interpolate(x_train, size=patch_size, mode='bilinear')\n x_val = torch.nn.functional.interpolate(x_val, size=patch_size, mode='bilinear')\n\n x_train = x_train.repeat(1, 3, 1, 1)\n x_val = x_val.repeat(1, 3, 1, 1)\n\n trainset = torch.utils.data.TensorDataset(x_train, y_train)\n valset = torch.utils.data.TensorDataset(x_val, y_val)\n\n return trainset, valset", "def test_3d():\n dic, data = ng.bruker.read(os.path.join(DATA_DIR, \"bruker_3d\"))\n assert dic['FILE_SIZE'] == 91226112\n assert data.shape == (116, 128, 768)\n assert round(data[0, 0, 40].real, 2) == 18.0\n assert round(data[0, 0, 40].imag, 2) == -66.0\n assert round(data[5, 13, 91].real, 2) == 1138.0\n assert round(data[5, 13, 91].imag, 2) == 3482.0\n write_readback(dic, data)", "def create_dataset(opt):\n data_loader = CustomDatasetDataLoader(opt)\n dataset = data_loader.load_data()\n return dataset", "def make_dataset(condition, root, base_path, files_json_path):\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n dataset = ZapposDataset(root, base_path, files_json_path, condition,\n transform=transforms.Compose([\n transforms.Scale(112),\n transforms.CenterCrop(112),\n transforms.ToTensor(),\n normalize,\n ]))\n return dataset", "def create_dataset(opt):\n\tdata_loader = CustomDatasetDataLoader(opt)\n\tdataset = data_loader.load_data()\n\treturn dataset", "def Compute3d(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Compute3d(self, *args)", "def _preprocess_data_3d(\n self, data_batch: Dict[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]:\n world2grid = data_batch['world2grid']\n uniform_samples = data_batch['uniform_samples']\n near_surface_samples = data_batch['near_surface_samples']\n if 'uniform_samples_per_camera' in data_batch:\n uniform_samples_per_camera = data_batch['uniform_samples_per_camera']\n if 'near_surface_samples_per_camera' in data_batch:\n near_surface_samples_per_camera = data_batch[\n 'near_surface_samples_per_camera']\n if 'depth_xyzn_per_camera' in data_batch:\n depth_xyzn_per_camera = data_batch['depth_xyzn_per_camera']\n\n batch_size = data_batch['near_surface_samples'].shape[0]\n spatial_dims = data_batch['grid_samples'].shape[1:4]\n\n # Assume grid size is the same in all dimensions.\n grid_size = spatial_dims[0]\n\n # Generate normalized [-1, 1] coordinates for grid samples.\n _, pixels_grid = point_sampling.sample3d_all_pixels(\n spatial_dims, normalize=True)\n pixels_grid = tf.tile(pixels_grid[None, ...], [batch_size, 1, 1, 1, 1])\n data_batch['grid_samples'] = tf.concat(\n [pixels_grid, data_batch['grid_samples']], axis=-1)\n # Tensor with shape [batch_size, dim_d, dim_h, dim_w, 4], (z, y, x).\n\n # Transform to grid space [0, 127] and map grid space [-0.5, 127.5] to\n # [-1, 1].\n uniform_samples = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n uniform_samples[..., :3],\n tf.ones(\n [batch_size, tf.shape(uniform_samples)[1], 1],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n # Tensor with shape [batch_size, 3, num_point].\n\n uniform_samples = (uniform_samples + 0.5) * 2.0 / grid_size - 1\n uniform_samples = uniform_samples[:, ::-1, :] # Convert to (z, y, x).\n uniform_samples = tf.concat([\n tf.transpose(uniform_samples, [0, 2, 1]),\n data_batch['uniform_samples'][..., 3:]\n ],\n axis=-1)\n data_batch['uniform_samples'] = uniform_samples\n\n near_surface_samples = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n near_surface_samples[..., :3],\n tf.ones([batch_size,\n tf.shape(near_surface_samples)[1], 1],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n near_surface_samples = (near_surface_samples + 0.5) * 2.0 / grid_size - 1\n near_surface_samples = near_surface_samples[:, ::-1, :]\n near_surface_samples = tf.concat([\n tf.transpose(near_surface_samples, [0, 2, 1]),\n data_batch['near_surface_samples'][..., 3:]\n ],\n axis=-1)\n data_batch['near_surface_samples'] = near_surface_samples\n\n if 'uniform_samples_per_camera' in data_batch:\n num_view = tf.shape(uniform_samples_per_camera)[1]\n num_point_per_view = tf.shape(uniform_samples_per_camera)[2]\n num_channel = tf.shape(uniform_samples_per_camera)[3]\n uniform_samples_per_camera = tf.reshape(uniform_samples_per_camera,\n [batch_size, -1, num_channel])\n uniform_samples_per_camera = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n uniform_samples_per_camera[..., :3],\n tf.ones(\n [batch_size,\n tf.shape(uniform_samples_per_camera)[1], 1],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n uniform_samples_per_camera = (uniform_samples_per_camera +\n 0.5) * 2.0 / grid_size - 1\n uniform_samples_per_camera = uniform_samples_per_camera[:, ::-1, :]\n uniform_samples_per_camera = tf.reshape(\n tf.transpose(uniform_samples_per_camera, [0, 2, 1]),\n [batch_size, num_view, num_point_per_view, -1])\n # Tensor with shape [batch_size, num_view, num_point_per_view, 3].\n\n uniform_samples_per_camera = tf.concat([\n uniform_samples_per_camera,\n data_batch['uniform_samples_per_camera'][..., 3:]\n ],\n axis=-1)\n data_batch['uniform_samples_per_camera'] = uniform_samples_per_camera\n\n if 'near_surface_samples_per_camera' in data_batch:\n num_view = tf.shape(near_surface_samples_per_camera)[1]\n num_point_per_view = tf.shape(near_surface_samples_per_camera)[2]\n num_channel = tf.shape(near_surface_samples_per_camera)[3]\n near_surface_samples_per_camera = tf.reshape(\n near_surface_samples_per_camera, [batch_size, -1, num_channel])\n near_surface_samples_per_camera = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n near_surface_samples_per_camera[..., :3],\n tf.ones([\n batch_size,\n tf.shape(near_surface_samples_per_camera)[1], 1\n ],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n near_surface_samples_per_camera = (near_surface_samples_per_camera +\n 0.5) * 2.0 / grid_size - 1\n near_surface_samples_per_camera = near_surface_samples_per_camera[:, ::\n -1, :]\n near_surface_samples_per_camera = tf.reshape(\n tf.transpose(near_surface_samples_per_camera, [0, 2, 1]),\n [batch_size, num_view, num_point_per_view, -1])\n # Tensor with shape [batch_size, num_view, num_point_per_view, 3].\n\n near_surface_samples_per_camera = tf.concat([\n near_surface_samples_per_camera,\n data_batch['near_surface_samples_per_camera'][..., 3:]\n ],\n axis=-1)\n data_batch[\n 'near_surface_samples_per_camera'] = near_surface_samples_per_camera\n\n if 'depth_xyzn_per_camera' in data_batch:\n num_view = tf.shape(depth_xyzn_per_camera)[1]\n num_point_per_view = tf.shape(depth_xyzn_per_camera)[2]\n num_channel = tf.shape(depth_xyzn_per_camera)[3]\n depth_xyzn_per_camera = tf.reshape(depth_xyzn_per_camera,\n [batch_size, -1, num_channel])\n depth_xyzn_per_camera = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n depth_xyzn_per_camera[..., :3],\n tf.ones([batch_size,\n tf.shape(depth_xyzn_per_camera)[1], 1],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n depth_xyzn_per_camera = (depth_xyzn_per_camera +\n 0.5) * 2.0 / grid_size - 1\n depth_xyzn_per_camera = depth_xyzn_per_camera[:, ::-1, :]\n depth_xyzn_per_camera = tf.reshape(\n tf.transpose(depth_xyzn_per_camera, [0, 2, 1]),\n [batch_size, num_view, num_point_per_view, -1])\n # Tensor with shape [batch_size, num_view, num_point_per_view, 3].\n\n depth_xyzn_per_camera = tf.concat(\n [depth_xyzn_per_camera, data_batch['depth_xyzn_per_camera'][..., 3:]],\n axis=-1)\n data_batch['depth_xyzn_per_camera'] = depth_xyzn_per_camera\n\n # Scale SDF\n data_batch['grid_samples'] = data_batch['grid_samples'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, None, None, :]\n data_batch['uniform_samples'] = data_batch['uniform_samples'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, :]\n data_batch['near_surface_samples'] = data_batch['near_surface_samples'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, :]\n if 'uniform_samples_per_camera' in data_batch:\n data_batch['uniform_samples_per_camera'] = data_batch['uniform_samples_per_camera'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, None, :]\n if 'near_surface_samples_per_camera' in data_batch:\n data_batch['near_surface_samples_per_camera'] = data_batch['near_surface_samples_per_camera'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, None, :]\n\n input_data = data_batch['grid_samples'][..., 3:4]\n # Tensor with shape [batch_size, dim_d, dim_h, dim_w, 1].\n\n gt_data = input_data\n\n return input_data, gt_data", "def get_dataset(dataset_type = \"training\"):\n X, Y = None, None\n #1. Pobieramy obrazki i etykiety\n images, labels = get_MNIST_dataset(range(10), dataset_type)\n #2. Konwersja do X,Y (czyli wiersz to jeden przyklad, X - dwuwymiarowa macierz), Y po prostu przepisujemy\n # hint : reshape, np.zeros, przez macierz mozna latwo iterowac w petli (for row in matrix)\n #TODO: fill in\n Y = labels\n X = images.reshape((images.shape[0], images.shape[1]*images.shape[2]))\n\n #for id, digit in enumerate(images):\n # pl.imshow(digit, cmap=pl.cm.gray)\n # X[id, :] = digit.reshape((digit.shape[0]*digit.shape[1],))\n # if id % 1000 == 0: print \"Converted \",id\n\n # Czy da sie szybciej?\n\n #3. Return X,Y\n return X, Y", "def create_tf_data(self) -> tf.data.Dataset:\n dataset = tf.data.Dataset.from_generator(\n generator=lambda: self.pipeline,\n output_types=self.model.output_types(),\n output_shapes=self.model.output_shapes())\n return dataset.repeat().batch(self.batch_size)", "def get_dataset(self):\n\n trainset = datasets.ImageNet('datasets/ImageNet/train/', split='train', transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.ImageNet('datasets/ImageNet/val/', split='val', transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def _make_data(self):\n pdf_datasets_all = make_pdf_datasets(self.pdf_list, self.xlims, self.ylims, self.tlims, self.dims, 9)\n self.pdf_dataset = np.concatenate(pdf_datasets_all, axis = 0)\n self.PDE_dataset = make_PDE_dataset(self.num_collocation, self.xlims, self.ylims, self.tlims, self.dims)\n self.BC_dataset = make_BC_dataset(self.num_BC, self.xlims, self.ylims, self.tlims, self.dims)", "def GetDataset():\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n classes1 = set()\n classes2 = set()\n for f in GetInputFiles():\n class1, class2, fold, fname = f.split('\\\\')[-4:]\n classes1.add(class1)\n classes2.add(class2)\n class1 = class1.split('_')[0]\n class2 = class2.split('_')[0]\n\n x = ReadAndTokenize(f)\n y = [int(class1 == 'positive'), int(class2 == 'truthful')]\n if fold == 'fold4':\n x_test.append(x)\n y_test.append(y)\n else:\n x_train.append(x)\n y_train.append(y)\n\n ### Make numpy arrays.\n x_test = MakeDesignMatrix(x_test)\n x_train = MakeDesignMatrix(x_train)\n y_test = numpy.array(y_test, dtype='float32')\n y_train = numpy.array(y_train, dtype='float32')\n\n dataset = (x_train, y_train, x_test, y_test)\n with open('dataset.pkl', 'wb') as fout:\n pickle.dump(dataset, fout)\n return dataset", "def get_dataset(self):\n\n trainset = datasets.STL10('datasets/STL10/train/', split='train', transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.STL10('datasets/STL10/test/', split='test', transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def build_dataloader(cfg, augmentor=None, mode='train', dataset=None, rank=None,\n dataset_class=VolumeDataset, dataset_options={}, cf=collate_fn_train):\n assert mode in ['train', 'val', 'test']\n print('Mode: ', mode)\n\n if mode == 'train':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH\n elif mode == 'val':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH * 4\n else:\n cf = collate_fn_test # update the collate function\n batch_size = cfg.INFERENCE.SAMPLES_PER_BATCH * cfg.SYSTEM.NUM_GPUS\n\n if dataset is None: # no pre-defined dataset instance\n if cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT is not None:\n dataset_class = VolumeDatasetMultiSeg\n dataset = get_dataset(cfg, augmentor, mode, rank, dataset_class, dataset_options)\n\n sampler = None\n num_workers = cfg.SYSTEM.NUM_CPUS\n if cfg.SYSTEM.DISTRIBUTED:\n num_workers = cfg.SYSTEM.NUM_CPUS // cfg.SYSTEM.NUM_GPUS\n if cfg.DATASET.DISTRIBUTED == False:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n\n # In PyTorch, each worker will create a copy of the Dataset, so if the data\n # is preload the data, the memory usage should increase a lot.\n # https://discuss.pytorch.org/t/define-iterator-on-dataloader-is-very-slow/52238/2\n img_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=False, collate_fn=cf,\n sampler=sampler, num_workers=num_workers, pin_memory=True)\n\n return img_loader", "def get_dataset(self) -> datasets.OpenMLDataset:\n return datasets.get_dataset(self.dataset_id)", "def create_dataset():\n ds_train_raw, ds_test_raw = load_raw_datasets()\n vectorize_layer = create_vectorizer(ds_train_raw)\n ds_train = ds_train_raw.map(\n lambda text, label: (vectorize_layer(text), label)\n ).prefetch(tf.data.experimental.AUTOTUNE)\n ds_test = ds_test_raw.map(\n lambda text, label: (vectorize_layer(text), label)\n ).prefetch(tf.data.experimental.AUTOTUNE)\n return ds_train, ds_test, vectorize_layer", "def get_dataset(dataset_name, use_cached=True, dataset_folder=DATASET_FOLDER, cache_path=CACHE_PATH, cache_file=None):\n\n dataset_npy = cache_file if cache_file else os.path.join(cache_path, 'dataset_{}.npy'.format(dataset_name))\n\n if use_cached and os.path.exists(dataset_npy):\n X, Y = get_dataset_cached(dataset_npy)\n else:\n X, Y = get_dataset_module(dataset_folder, dataset_name).fetch()\n\n # Test dataset validity before saving it\n test_dataset_validity(X, Y)\n\n with open(dataset_npy, 'wb') as f:\n pickle.dump((X, Y), f)\n\n test_dataset_validity(X, Y)\n return X, Y", "def get_mapnik_ds(self,**kwargs):\n if not self.geometry_field:\n raise ValueError('Geometry field not found')\n\n import itertools\n ids = itertools.count(0)\n assert hasattr(mapnik,'MemoryDatasource'), \"mapnik.MemoryDatasource requires >= mapnik 2.1\"\n ds = mapnik.MemoryDatasource()\n # todo - how to get subset of columns requested from the queryset?\n field_names = self.qs.query.get_meta().get_all_field_names()\n field_names.remove(self.geometry_field.name)\n if hasattr(mapnik,'Context'):\n context = mapnik.Context()\n for fld in field_names:\n context.push(fld)\n for i in self.qs.iterator():\n feature = None\n if hasattr(mapnik,'Context'):\n feature = mapnik.Feature(context,ids.next())\n else:\n feature = mapnik.Feature(ids.next())\n feature.add_geometries_from_wkb(str(getattr(i,self.geometry_field.name).wkb))\n for fld in field_names:\n feature[fld] = getattr(i,fld)\n ds.add_feature(feature)\n return ds", "def get_data():\n transform = Compose([paddle.vision.Resize(32),\n Normalize(mean=[127.5], std=[127.5], data_format='CHW'),\n paddle.vision.transforms.Transpose()])\n train_data = paddle.vision.datasets.Cifar10(mode='train', transform=transform)\n l = len(train_data)\n return paddle.io.random_split(train_data, [l // 2, l - l // 2])", "def r3d(**kwargs):\n\n return _video_resnet('r3d',\n block=BasicBlock,\n conv_makers=[Conv3DSimple] * 4,\n layers=[NUM_LAYER, NUM_LAYER, NUM_LAYER, NUM_LAYER],\n stem=BasicStem, **kwargs)", "def get_dataset(self, dataset_path=None, normalize=True, return_original=False):\n if dataset_path is None:\n dataset_path = self.dir\n \n if \"mocap\" in dataset_path.lower():\n print(\"Loading Mocap dataset.\")\n df = get_mocap()\n df_orig = df\n elif \"profi\" in dataset_path.lower():\n print(\"Loading Profiset dataset.\")\n df = get_profiset()\n df_orig = df\n else:\n print(\"Loading CoPhIR dataset.\")\n df_orig, attr_lengths = get_objects_with_indexes(self.labels, f'{dataset_path}/level-{str(self.n_levels)}.txt', f'{dataset_path}/objects.txt')\n if normalize:\n df = scale_per_descriptor(df_orig, self.labels, attr_lengths)\n else:\n df = df_orig\n \n assert df.shape[1] == self.descriptor_values + self.n_levels + len([\"object_id\"])\n logging.info(f\"Loaded dataset of shape: {df.shape}\")\n if return_original:\n return df, df_orig\n else:\n return df", "def create3D( self , ecl_kw , default = 0):\n if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():\n array = numpy.ones( [ self.getGlobalSize() ] , dtype = ecl_kw.dtype) * default\n kwa = ecl_kw.array\n if len(ecl_kw) == self.size:\n for i in range(kwa.size):\n array[i] = kwa[i]\n else:\n data_index = 0\n for global_index in range(self.getGlobalSize()):\n if self.active( global_index = global_index ):\n array[global_index] = kwa[data_index]\n data_index += 1\n \n array = array.reshape( [self.getNX() , self.getNY() , self.getNZ()] , order = 'F')\n return array\n else:\n raise ValueError(\"Keyword: %s has invalid size(%d), must be either nactive:%d or nx*ny*nz:%d\" % (ecl_kw.name , ecl_kw.size , self.nactive ,self.size))", "def get_dataset(dataset: str, split: str) -> Dataset:\n if dataset == \"imagenet\":\n return _imagenet(split)\n elif dataset == \"imagenet32\":\n return _imagenet32(split)\n elif dataset == \"cifar10\":\n return _cifar10(split)", "def loadData( path, strDataset, strName, nSamples ):\r\n # Size of the image:\r\n xSize = 176\r\n ySize = 208\r\n zSize = 176\r\n\r\n # Limits of the regions of interest of the data:\r\n xLimMin = 14\r\n xLimMax = 18\r\n yLimMin = 12\r\n yLimMax = 15\r\n zLimMin = 3\r\n zLimMax = 20\r\n\r\n # Creation of the dictionary which will contain our dataset:\r\n datasetDic = {}\r\n\r\n for i in range(nSamples):\r\n # Complete path of the i-th file of the dataset:\r\n imageName = strName + str(i + 1)\r\n imagePath = path + \"/\" + strDataset + \"/\" + imageName + \".nii\"\r\n \r\n # Loading of the 3D images using a function from the nibabel library\r\n imageRaw = nib.load(imagePath)\r\n \r\n # Tranforming the images into data (3d np.array):\r\n datasetDic[i] = imageRaw.get_data()[xLimMin:xSize-xLimMax, \\\r\n yLimMin:ySize-yLimMax, zLimMin:zSize-zLimMax, 0]\r\n \r\n return datasetDic", "def get_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n return self.data[\"dataset\"][name]\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def dataset(request):\n X, y = make_classification(\n n_samples=700, n_features=10, n_informative=8, n_redundant=2,\n n_classes=2, n_clusters_per_class=2, random_state=6483\n )\n\n request.cls.dataset = Dataset(X, y)", "def make_data(opts):\n qmax, nq, res = opts['qmax'], opts['nq'], opts['res']\n if opts['is2d']:\n data = empty_data2D(np.linspace(-qmax, qmax, nq), resolution=res)\n data.accuracy = opts['accuracy']\n set_beam_stop(data, 0.0004)\n index = ~data.mask\n else:\n if opts['view'] == 'log' and not opts['zero']:\n qmax = math.log10(qmax)\n q = np.logspace(qmax-3, qmax, nq)\n else:\n q = np.linspace(0.001*qmax, qmax, nq)\n if opts['zero']:\n q = np.hstack((0, q))\n data = empty_data1D(q, resolution=res)\n index = slice(None, None)\n return data, index", "def data_array(self) -> xr.Dataset:\n\n xr_data = xr.open_mfdataset(self.path_to_files,\n chunks=self.chunks,\n parallel=True)\n\n if not all(x in list(xr_data.coords) for x in self.DIMS):\n xr_data = xr_data.rename({\n 'latitude': 'lat',\n 'longitude': 'lon',\n })\n\n if self.subset_dict is not None:\n print(f'Cutting data using {self.subset_dict}')\n xr_data = self.cut(xr_data)\n\n if self.season is not None:\n xr_data = xr_data.where(xr_data.time.dt.season == self.season,\n drop=True)\n\n if self.rescale_longitude is True:\n xr_data = xr_data.assign_coords(lon=(((xr_data.lon + 180) % 360) -\n 180)).sortby('lon')\n\n return xr_data", "def _get_data_on_3d_points(self, varname, record, points):\n if self.get_mesh_dimension() != 3:\n raise TelemacException(\"Action possible only on 3d mesh\")\n\n res = float('nan')*np.ones((len(points)), dtype=np.float64)\n for i, point in enumerate(points):\n elev = self.get_data_on_vertical_segment(\\\n 'ELEVATION Z', record, point[:-1])\n values = self.get_data_on_vertical_segment(\\\n varname, record, point[:-1])\n for plan in range(self.nplan-1):\n if elev[plan] <= point[-1] and point[-1] <= elev[plan+1]:\n shz = (point[-1]-elev[plan])/max((elev[plan+1]\\\n -elev[plan]), 1.e-6)\n res[i] = (1.0-shz)*values[plan]+shz*values[plan+1]\n return res", "def CreateSurface2DMeshfrom3DMesh(self):\n\n self.__do_memebers_exist__()\n\n p = self.InferPolynomialDegree()\n mm = Mesh()\n if self.element_type == \"hex\":\n mm.element_type = \"quad\"\n elif self.element_type == \"tet\":\n mm.element_type = \"tri\"\n else:\n raise ValueError(\"Cannot make a 2D mesh from the 3D mesh of type {}\".format(self.element_type))\n\n unique_faces, inv_faces = np.unique(self.faces,return_inverse=True)\n mm.points = self.points[unique_faces,:]\n mm.nnode = mm.points.shape[0]\n aranger = np.arange(mm.nnode)\n mm.elements = aranger[inv_faces].reshape(self.faces.shape)\n mm.nelem = mm.elements.shape[0]\n mm.GetBoundaryEdges()\n\n return mm", "def getDataset(self, train=True):\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \n \n if self.dataset == \"ELLIPSE\":\n a = np.array([[0,1.0],[1.0,2.0]]) \n b = a*0.5 \n myE = el.ellipse(device, 500, 100, a, b) \n if train == True:\n return myE.create_dataset(myE.examples)\n return myE.create_dataset(myE.valid) \n \n if self.dataset == \"SWISS\": \n myS = sw.SwissRoll(device, 500, 0.2) \n if train == True:\n return myS.create_dataset(myS.examples)\n return myS.create_dataset(myS.valid)\n \n \n #open file\n myFile = h5py.File(self.dataString, 'r', self.driver)\n \n if train == True: \n inputString = \"train_inputs\"\n labelsString = \"train_labels\"\n \n else:\n inputString = \"test_inputs\"\n labelsString = \"test_labels\"\n \n #get hdf5 datsets\n features = myFile.get(inputString)\n labels = myFile.get(labelsString)\n \n #convert to tensors\n features = torch.from_numpy(np.array(features))\n labels = torch.from_numpy(np.array(labels))\n \n #close file to ensure dataset is in memory\n myFile.close()\n \n #conver to correct datatypes\n features = features.float()\n \n if self.conv_sg == False:\n labels = labels.long() \n \n dataset = torch.utils.data.TensorDataset(features, labels)\n \n return dataset", "def __init__(self, path: str = None, buffer: io.BytesIO = None):\r\n if not path and not buffer:\r\n raise ValueError(\"Must supply path or buffer\")\r\n if buffer:\r\n memory = buffer.getvalue()\r\n buffer.close()\r\n path = tempfile.TemporaryFile()\r\n else:\r\n memory = None\r\n\r\n with Dataset(path, memory=memory) as nc:\r\n keys = list(nc.variables.keys())\r\n self.x = nc.variables[keys[0]][:].filled()\r\n self.y = nc.variables[keys[1]][:].filled()\r\n self.layers = nc.variables[keys[2]][:].filled()\r\n self.values = nc.variables[keys[3]][:].filled()\r\n self.crs = nc.crs\r\n\r\n if np.sign(np.diff(self.y).mean()) > 0:\r\n self.values = np.flip(self.values, axis=1)\r\n self.y = np.flip(self.y)\r\n\r\n if self.layers.dtype == np.float64 and self.layers.mean() > 1e9:\r\n self.layers = np.datetime64(\"1970-01-01\") + self.layers.astype(\r\n \"timedelta64[s]\"\r\n )\r\n\r\n self.dimensions = {\r\n \"x\": len(self.x),\r\n \"y\": len(self.y),\r\n \"layers\": len(self.layers),\r\n }\r\n self.resolution = {\r\n \"x\": round(np.abs(np.diff(self.x).mean()), 8),\r\n \"y\": round(np.abs(np.diff(self.y).mean()), 8),\r\n }\r\n self.extent = {\r\n \"xmin\": round(self.x.min() - self.resolution[\"x\"] / 2, 8),\r\n \"xmax\": round(self.x.max() + self.resolution[\"x\"] / 2, 8),\r\n \"ymin\": round(self.y.min() - self.resolution[\"y\"] / 2, 8),\r\n \"ymax\": round(self.y.max() + self.resolution[\"y\"] / 2, 8),\r\n }", "def array_input_fn(array: np.ndarray, mode: str, batch_size: int):\n if len(array.shape) != 3:\n raise ValueError(\"`array` must have shape [n_samples, height, width].\")\n _assert_valid_mode(mode)\n\n dataset = tf.data.Dataset.from_tensor_slices(array)\n\n if mode == _TRAIN:\n dataset = dataset.shuffle(1000).repeat()\n\n dataset = dataset.batch(batch_size)\n\n return dataset", "def CreateDataset(all_arrays):\n dataset = Dataset()\n\n dataset._addData(all_arrays[0])\n dataset._addData(all_arrays[1])\n dataset._addData(all_arrays[3])\n dataset._addData(all_arrays[5])\n dataset._addData(all_arrays[6])\n dataset._addData(all_arrays[9])\n dataset._addData(all_arrays[8])\n dataset._addData(all_arrays[4])\n\n return dataset", "def create_ds(self, data, is_train=True):\n ds = tf.data.Dataset.from_tensor_slices(data)\n map_fn = lambda x, y: (cifar_process(x, is_train), y)\n ds = ds.map(map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return ds", "def get_dataset(opts):\n dataset_type = opts.dataset_params.dataset_type\n if dataset_type in 'synth':\n return synthgraph.SynthGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthnoise':\n return synthgraph.SynthNoiseGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'synthoutlier':\n return synthgraph.SynthOutlierGraphDataset(opts, opts.dataset_params)\n elif dataset_type in 'rome16kgeom':\n return spreal.GeomKNNRome16KDataset(opts, opts.dataset_params)\n elif dataset_type in 'graffiti':\n return graffiti.GraffitiDataset(opts, opts.dataset_params)\n else:\n print(\"ERROR: Dataset type {} not implemented yet\".format(dataset_type))\n sys.exit(1)", "def compute(cls, dataset):\n return dataset", "def get_dataloader(self, mode, label_mode, batch_size):\n cached_features_file = self._feature_file(mode, label_mode)\n logger.info('Loading features from cached file %s', cached_features_file)\n features = torch.load(cached_features_file)\n\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n all_emph_probs = torch.tensor([f.emph_probs for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_emph_probs)\n\n if mode == 'train':\n sampler = RandomSampler(dataset)\n else:\n sampler = SequentialSampler(dataset)\n\n dataloader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=sampler)\n return dataloader", "def fz3d_2_ndhwc_compute(self):\n tik_instance = self.set_tik_instance()\n branch = self.check_branch()\n\n if branch == \"c_align_small\":\n tik_instance = self.c_align_small(tik_instance)\n elif branch == \"c_align_split_n\":\n tik_instance = self.c_align_split_n(tik_instance)\n elif branch == \"c_not_align_small_fp16\":\n tik_instance = self.c_not_align_small_fp16(tik_instance)\n elif branch == \"c_not_align_split_n_fp32\":\n tik_instance = self.c_not_align_split_n_fp32(tik_instance)\n\n return tik_instance", "def get_dataset(self):\n\n trainset = datasets.FashionMNIST('datasets/FashionMNIST/train/', train=True, transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.FashionMNIST('datasets/FashionMNIST/test/', train=False, transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def get_dataset(self):\n\n trainset = datasets.CIFAR10('datasets/CIFAR10/train/', train=True, transform=self.train_transforms,\n target_transform=None, download=True)\n valset = datasets.CIFAR10('datasets/CIFAR10/test/', train=False, transform=self.val_transforms,\n target_transform=None, download=True)\n\n return trainset, valset", "def get_dataset(data_path, mean, std, target_transform=None):\n # can add more data augmentation\n preprocess = transforms.Compose([\n transforms.Resize((299, 299)),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std),\n transforms.RandomHorizontalFlip()\n ])\n\n return datasets.ImageFolder(root=data_path, transform=preprocess, target_transform=target_transform)", "def read_dataset(train_batch_size: int = 32, eval_batch_size: int = 128,\n train_mode: str = 'pretrain', strategy: tf.distribute.Strategy = None, topology=None,\n dataset: str = 'cifar10', train_split: str = 'train', eval_split: str = 'test',\n data_dir: str = None, image_size: int = 32, cache_dataset: bool = True,\n color_jitter_strength: float = 1.0) -> Tuple[tf.data.Dataset, tf.data.Dataset, int, int, int]:\n builder: DatasetBuilder = tfds.builder(dataset, data_dir=data_dir)\n builder.download_and_prepare()\n num_train_examples = builder.info.splits[train_split].num_examples\n num_eval_examples = builder.info.splits[eval_split].num_examples\n num_classes = builder.info.features['label'].num_classes\n train_dataset = build_distributed_dataset(builder=builder, batch_size=train_batch_size, is_training=True,\n strategy=strategy, topology=topology, train_mode=train_mode,\n train_split=train_split, eval_split=eval_split,\n cache_dataset=cache_dataset,\n image_size=image_size, color_jitter_strength=color_jitter_strength)\n test_dataset = build_distributed_dataset(builder=builder, batch_size=eval_batch_size, is_training=False,\n strategy=strategy, topology=topology, train_mode=train_mode,\n train_split=train_split, eval_split=eval_split,\n cache_dataset=cache_dataset,\n image_size=image_size, color_jitter_strength=color_jitter_strength)\n return train_dataset, test_dataset, num_train_examples, num_eval_examples, num_classes", "def make_dataset(self) -> torch.utils.data.Dataset:\n transform = cnn_utils.ToTensor()\n return cnn_utils.ArtifactDataset(self.stamps, transform)", "def dataset(self):\n if self._dataset is None:\n X, y = make_regression(n_samples=1000, n_features=2, noise=10.)\n self._dataset = {'X': X, 'y': y}\n\n return self._dataset", "def GetDataSlice(vDataSet,z,c,t):\r\n dtype = GetType(vDataSet)\r\n if dtype == np.uint8 or dtype == np.uint16:\r\n arr = np.array(vDataSet.GetDataSliceShorts(z,c,t),dtype)\r\n else:\r\n arr = np.array(vDataSet.GetDataSliceFloats(z,c,t),dtype)\r\n return arr.swapaxes(0,1)", "def _dataset(self, split, directory, data_file):\n del split\n\n filenames = generate_filenames(self.name, directory, data_file)\n\n def parse_function(record):\n image_dim = self.IMAGE_SIZE_PX\n features = tf.parse_single_example(\n record,\n features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64)\n })\n\n # Convert from a scalar string tensor (whose single string has\n # length image_pixel*image_pixel) to a uint8 tensor with shape\n # [image_pixel, image_pixel, 1].\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n image = tf.reshape(image, [image_dim, image_dim, 1])\n image.set_shape([image_dim, image_dim, 1])\n\n # Convert from [0, 255] -> [-0.5, 0.5] floats.\n image = tf.cast(image, tf.float32) * (1. / 255)\n\n label = tf.cast(features['label'], tf.int32)\n\n return image, label\n\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.map(parse_function, num_parallel_calls=4)\n\n return dataset", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)", "def reproduce(self) -> LocalDataset:\n return LocalDataset(self.path)", "def datasets(self):\n return [Dataset.GWAS_CATALOG, Dataset.CLINVAR, Dataset.EFO]", "def get_data(\n data_type,\n train_fraction=1,\n added_edge_fraction=0,\n feature_noise_ratio=0,\n **kwargs):\n def to_mask(idx, size):\n mask = torch.zeros(size).bool()\n mask[idx] = True\n return mask\n path = osp.join(osp.dirname(osp.realpath(\"__file__\")), '..', 'data', data_type)\n # Obtain the mode if given:\n data_type_split = data_type.split(\"-\")\n \n data_type_full = data_type\n data_type = data_type_split[0]\n mode = \"lcc\" if \"lcc\" in data_type_split else None\n boolean = True if \"bool\" in data_type_split else False\n split = \"rand\" if \"rand\" in data_type_split else None\n \n # Load data:\n info = {}\n if data_type in [\"Cora\", \"Pubmed\", \"citeseer\"]:\n dataset = Planetoid(path, data_type, transform=T.NormalizeFeatures())\n data = dataset[0]\n info[\"num_features\"] = dataset.num_features\n info[\"num_classes\"] = dataset.num_classes\n info['loss'] = 'softmax'\n else:\n raise Exception(\"data_type {} is not valid!\".format(data_type))\n\n # Process the dataset according to the mode given:\n if mode is not None:\n if mode == \"lcc\":\n data = get_data_lcc(dataset.data)\n else:\n raise\n\n if boolean:\n data.x = data.x.bool().float()\n \n if split == \"rand\":\n unlabeled_share = 0.8\n val_share = 0.1\n train_share = 1 - unlabeled_share - val_share\n\n split_train, split_val, split_unlabeled = train_val_test_split_tabular(np.arange(data.x.shape[0]),\n train_size=train_share,\n val_size=val_share,\n test_size=unlabeled_share,\n stratify=to_np_array(data.y),\n random_state=kwargs[\"seed\"] if \"seed\" in kwargs else None,\n )\n data.train_mask = to_mask(split_train, data.x.shape[0])\n data.val_mask = to_mask(split_val, data.x.shape[0])\n data.test_mask = to_mask(split_unlabeled, data.x.shape[0])\n\n # Reduce the number of training examples by randomly choosing some of the original training examples:\n if train_fraction != 1:\n try:\n train_mask_file = \"../attack_data/{}/train_mask_tr_{}_seed_{}.p\".format(data_type_full, train_fraction, kwargs[\"seed\"] % 10)\n new_train_mask = pickle.load(open(train_mask_file, \"rb\"))\n data.train_mask = torch.BoolTensor(new_train_mask).to(data.y.device)\n print(\"Load train_mask at {}\".format(train_mask_file))\n except:\n raise\n ids_chosen = []\n n_per_class = int(to_np_array(data.train_mask.sum()) * train_fraction / info[\"num_classes\"])\n train_ids = torch.where(data.train_mask)[0]\n for i in range(info[\"num_classes\"]):\n class_id_train = to_np_array(torch.where(((data.y == i) & data.train_mask))[0])\n ids_chosen = ids_chosen + np.random.choice(class_id_train, size=n_per_class, replace=False).tolist()\n new_train_mask = torch.zeros(data.train_mask.shape[0]).bool().to(data.y.device)\n new_train_mask[ids_chosen] = True\n data.train_mask = new_train_mask\n make_dir(\"../attack_data/{}/\".format(data_type_full))\n pickle.dump(to_np_array(new_train_mask), open(\"../attack_data/{}/train_mask_tr_{}_seed_{}.p\".format(data_type_full, train_fraction, kwargs[\"seed\"] % 10), \"wb\"))\n\n # Add random edges for untargeted attacks:\n if added_edge_fraction > 0:\n data = add_random_edge(data, added_edge_fraction=added_edge_fraction)\n elif added_edge_fraction < 0:\n data = remove_edge_random(data, remove_edge_fraction=-added_edge_fraction)\n\n # Perturb features for untargeted attacks:\n if feature_noise_ratio > 0:\n x_max_mean = data.x.max(1)[0].mean()\n data.x = data.x + torch.randn(data.x.shape) * x_max_mean * feature_noise_ratio\n\n # For adversarial attacks:\n data.data_type = data_type\n if \"attacked_nodes\" in kwargs:\n attack_path = osp.join(osp.dirname(osp.realpath(\"__file__\")), '..', 'attack_data', data_type_full) \n if not os.path.exists(attack_path):\n os.makedirs(attack_path)\n try:\n with open(os.path.join(attack_path, \"test-node.pkl\"), 'rb') as f:\n node_ids = pickle.load(f)\n info['node_ids'] = node_ids\n print(\"Load previous attacked node_ids saved in {}.\".format(attack_path))\n except:\n test_ids = to_np_array(torch.where(data.test_mask)[0])\n node_ids = get_list_elements(test_ids, kwargs['attacked_nodes'])\n with open(os.path.join(attack_path, \"test-node.pkl\"), 'wb') as f:\n pickle.dump(node_ids, f)\n info['node_ids'] = node_ids\n print(\"Save attacked node_ids into {}.\".format(attack_path))\n return data, info", "def read_data(self, hdu_index, plane):\n with pyfits.open(self.url) as hdulist:\n hdu = hdulist[hdu_index]\n data = numpy.float64(hdu.data.squeeze())\n if plane is not None and len(data.shape) > 2:\n data = data[plane].squeeze()\n n_dim = len(data.shape)\n if n_dim != 2:\n logger.warning(\"Loaded datacube with %s dimensions, assuming Stokes I and taking plane 0\" % n_dim)\n data = data[0, :, :]\n data = data.transpose()\n return data", "def __getitem__(self, key: Tuple) -> np.array:\n # If the user has requested XYZ mode, the first thing to do is reverse\n # the array indices. Then you can continue this fn without any\n # additional changes.\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Next, we need to get the shape of the dataset. We do this currently\n # by getting the coordinate frame, which means that we need the\n # coordframe data and experiment data if we don't have it already. In\n # the future, we may also want to allow the user to specify general\n # shape information so that we can avoid calling the API.\n\n # Populate the experiment metadata if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Populate the coordinate frame metadata if not yet set:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n # Now we can begin. There is a wide variety of indexing options\n # available, including single-integer indexing, tuple-of-slices\n # indexing, tuple-of-int indexing...\n\n # First we'll address if the user presents a single integer.\n # ```\n # my_array[500]\n # ```\n # In this case, the user is asking for a single Z slice (or single X\n # slice if in XYZ order... But that's a far less common use case.)\n # We will get the full XY extents and download a single 2D array:\n if isinstance(key, int):\n # Get the full Z slice:\n xs = (0, self.shape[2])\n ys = (0, self.shape[1])\n zs = (key, key + 1)\n else:\n # We also support indexing with units. For example, you can ask for\n # ```\n # my_array[0:10, 0:10, 0:10, \"nanometers\"]\n # ```\n # which will download as many pixels as are required in order to\n # download 10nm in each dimension. We do this by storing a\n # \"normalized units\" measure which is a rescale factor for each\n # dimension (in the same order, e.g. ZYX, as the array).\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n # We will now do the following codeblock three times, for X,Y,Z:\n # First, we check to see if this index is a single integer. If so,\n # the user is requesting a 2D array with zero depth along this\n # dimension. For example, if the user asks for\n # ```\n # my_data[0:120, 0:120, 150]\n # ```\n # Then \"150\" suggests that the user just wants one single X slice.\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n # If the key is a Slice, then it has .start and .stop attrs.\n # (The user is requesting an array with more than one slice\n # in this dimension.)\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = int(start / _normalize_units[0])\n stop = int(stop / _normalize_units[0])\n\n # Cast the coords to integers (since Boss needs int coords)\n xs = (int(start), int(stop))\n\n # Do the same thing again for the next dimension: Either a single\n # integer, or a slice...\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n # Do the same thing again for the last dimension: Either a single\n # integer, or a slice...\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n # Finally, we can perform the cutout itself, using the x, y, and z\n # coordinates that we computed in the previous step.\n cutout = self.volume_provider.get_cutout(\n self._channel, self.resolution, xs, ys, zs\n )\n\n # Data are returned in ZYX order:\n if self.axis_order == AxisOrder.XYZ:\n data = np.rollaxis(np.rollaxis(cutout, 1), 2)\n elif self.axis_order == AxisOrder.ZYX:\n data = cutout\n\n # If any of the dimensions are of length 1, it's because the user\n # requested a single slice in their key; flatten the array in that\n # dimension. For example, if you request `[10, 0:10, 0:10]` then the\n # result should be 2D (no Z component).\n _shape = data.shape\n if _shape[0] == 1:\n data = data[0, :, :]\n if _shape[1] == 1:\n data = data[:, 0, :]\n if _shape[2] == 1:\n data = data[:, :, 0]\n return data", "def _get_dataset(\n self,\n dataset_path: str,\n data_folder: str = \"data/\",\n ):\n if not os.path.isdir(dataset_path):\n click.secho(f\"{dataset_path} not found!\", fg=\"red\")\n\n dataset_hash = (\n int(hashlib.sha256(dataset_path.encode(\"utf-8\")).hexdigest(), 16) % 10 ** 8\n )\n\n # To avoid using cache for different models\n # split(/) for google/electra-base-discriminator\n pretrained_model = (\n self.hparams.pretrained_model.split(\"/\")[1]\n if \"/\" in self.hparams.pretrained_model\n else self.hparams.pretrained_model\n )\n dataset_cache = data_folder + \".dataset_\" + str(dataset_hash) + pretrained_model\n\n if os.path.isfile(dataset_cache):\n click.secho(f\"Loading tokenized dataset from cache: {dataset_cache}.\")\n return torch.load(dataset_cache)\n\n dataset_path += \"\" if dataset_path.endswith(\"/\") else \"/\"\n dataset = {\n \"train\": pd.read_csv(dataset_path + \"train.tsv\", sep=\"\\t\").to_dict(\n \"records\"\n ),\n \"valid\": pd.read_csv(dataset_path + \"valid.tsv\", sep=\"\\t\").to_dict(\n \"records\"\n ),\n \"test\": pd.read_csv(dataset_path + \"test.tsv\", sep=\"\\t\").to_dict(\"records\"),\n }\n # Read Labels\n with open(dataset_path + \"labels.txt\", \"r\") as fp:\n labels = [line.strip() for line in fp.readlines()]\n label_encoder = {labels[i]: i for i in range(len(labels))}\n\n dataset[\"label_encoder\"] = label_encoder\n # Tokenize\n dataset[\"train\"] = self._tokenize(dataset[\"train\"])\n dataset[\"valid\"] = self._tokenize(dataset[\"valid\"])\n dataset[\"test\"] = self._tokenize(dataset[\"test\"])\n torch.save(dataset, dataset_cache)\n return dataset", "def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)", "def get_data(self, path):\n\n if path == self.original_path:\n cache = self._2to3_cache_path(path)\n data = self._load_cached_2to3(path, cache)\n if data is None:\n output, encoding = self._refactor_2to3(path)\n data = bytearray(output, encoding or sys.getdefaultencoding())\n self.set_data(cache, data)\n return data\n\n else:\n return super().get_data(path)", "def generate_dataset(self):\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n\n return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()\n else: # only need shadow dataset for testing\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n\n return dataA_iter, dataA_iter.get_next()", "def get_datasets(self):\n d = {\n \"X_train\": self._X_train,\n \"Y_train\": self._Y_train,\n \"X_test\": self._X_test,\n \"Y_test\": self._Y_test\n }\n \n return d", "def test_add_get_tensor_3D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 3D tensors of all datatypes\n data_3D = mock_data.create_data((10, 10, 10))\n add_get_arrays(dataset, data_3D)", "def get_data(\n dataset: Union[str, tfds.core.DatasetBuilder],\n split: str,\n rng: Union[None, jnp.ndarray, tf.Tensor],\n host_batch_size: int,\n preprocess_fn: Optional[Callable[[deterministic_data.Features],\n deterministic_data.Features]],\n cache: bool = False,\n num_epochs: Optional[int] = None,\n repeat_after_batching: bool = False,\n shuffle: bool = True,\n shuffle_buffer_size: int = 10_000,\n prefetch_size: int = 4,\n drop_remainder: bool = True,\n data_dir: Optional[str] = None,\n) -> tf.data.Dataset:\n assert cache in (\"loaded\", \"batched\", False, None)\n\n dataset_builder = _get_dataset_builder(dataset, data_dir)\n\n if rng is not None:\n rng = jax.random.fold_in(rng,\n jax.process_index()) # Derive RNG for this host.\n\n if drop_remainder:\n remainder_options = deterministic_data.RemainderOptions.DROP\n else:\n remainder_options = deterministic_data.RemainderOptions.BALANCE_ON_PROCESSES\n host_split = deterministic_data.get_read_instruction_for_host(\n split,\n dataset_info=dataset_builder.info,\n remainder_options=remainder_options)\n\n dataset = deterministic_data.create_dataset(\n dataset_builder,\n split=host_split,\n batch_dims=(),\n rng=rng,\n filter_fn=None,\n preprocess_fn=preprocess_fn,\n decoders={\"image\": tfds.decode.SkipDecoding()},\n cache=cache == \"loaded\",\n num_epochs=num_epochs if not repeat_after_batching else 1,\n shuffle=shuffle,\n shuffle_buffer_size=shuffle_buffer_size,\n prefetch_size=0,\n pad_up_to_batches=None,\n drop_remainder=drop_remainder,\n )\n\n num_devices = jax.local_device_count()\n if drop_remainder:\n batch_dims = [num_devices, host_batch_size // num_devices]\n for batch_size in reversed(batch_dims):\n dataset = dataset.batch(batch_size, drop_remainder=True)\n flat_batch_size = batch_dims[0] * batch_dims[1]\n num_batch_dims = len(batch_dims)\n else:\n batch_size_per_device = math.ceil(host_batch_size / num_devices)\n flat_batch_size = batch_size_per_device * num_devices\n dataset = dataset.batch(flat_batch_size, drop_remainder=False)\n num_batch_dims = 1\n\n def f(xs):\n return _pad_reshape_mask_batch(xs, flat_batch_size, num_devices,\n num_batch_dims)\n\n dataset = dataset.map(f, num_parallel_calls=tf.data.AUTOTUNE)\n\n if cache == \"batched\":\n dataset = dataset.cache()\n\n if repeat_after_batching:\n dataset = dataset.repeat(num_epochs)\n\n return dataset.prefetch(prefetch_size)", "def datasets(self):\n return [Dataset.ENSEMBL]", "def get_raw_data(self):\n if self._img and self.is_4d():\n temp = self._img.get_data(caching='unchanged')\n temp = np.rot90(temp)\n for tp in self._loaded_time_list:\n temp[..., tp] = self._data[..., tp]\n else:\n temp = self._data.copy()\n\n return np.rot90(temp, 3)", "def get_3d_valid(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_valid['3d'][:, to_select, :][:, to_sort, :]", "def collect_datset(self):\n response = requests.get(self.url)\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # to remove labels from list\n dataset = np.matrix(data)\n return dataset", "def get_data(folder: str, dimensions: int):\n preprocess = transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(dimensions),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n ]\n )\n return datasets.ImageFolder(folder, transform=preprocess)" ]
[ "0.60187584", "0.59878784", "0.58777374", "0.58265644", "0.58206046", "0.5816656", "0.5816164", "0.57970816", "0.57919204", "0.5772738", "0.57650393", "0.57250017", "0.5724938", "0.5707195", "0.5686548", "0.5647479", "0.5564669", "0.55568475", "0.5537837", "0.5516145", "0.55019325", "0.5496016", "0.5495369", "0.5489609", "0.54872686", "0.5481195", "0.54801494", "0.54741186", "0.54517627", "0.54507184", "0.5441061", "0.54328847", "0.5430035", "0.54248184", "0.5423665", "0.54182965", "0.5407521", "0.5406412", "0.5382554", "0.53769004", "0.5367128", "0.53620183", "0.53573465", "0.53463984", "0.53372866", "0.532077", "0.53055584", "0.53052336", "0.5299371", "0.52868915", "0.5282488", "0.52792704", "0.5261149", "0.52544564", "0.52480114", "0.52452046", "0.5233047", "0.52310497", "0.5230062", "0.5229742", "0.52227783", "0.5209626", "0.5205186", "0.5202576", "0.51946163", "0.51913637", "0.518495", "0.5183643", "0.5183542", "0.5179192", "0.5176684", "0.5173097", "0.51690257", "0.5167992", "0.51642144", "0.5161973", "0.5160224", "0.5157257", "0.5156679", "0.51508975", "0.51416034", "0.51335055", "0.5129831", "0.5129831", "0.5125321", "0.5122798", "0.5122345", "0.5119482", "0.5113968", "0.5108896", "0.51085913", "0.5104449", "0.5103382", "0.51009935", "0.51000965", "0.50980616", "0.5094995", "0.5092748", "0.5090275", "0.5090179" ]
0.6331602
0
Return the range of the requested data as a 3tuple of values. Positive min is NaN if no data is positive.
def getDataRange(self, mode=None): if self._dataRangeCache is None: return None if mode is None: mode = self.getComplexMode() if mode not in self._dataRangeCache: # Compute it and store it in cache data = self.getData(copy=False, mode=mode) self._dataRangeCache[mode] = self._computeRangeFromData(data) return self._dataRangeCache[mode]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _computeRangeFromData(data):\n if data is None:\n return None\n\n dataRange = min_max(data, min_positive=True, finite=True)\n if dataRange.minimum is None: # Only non-finite data\n return None\n\n if dataRange is not None:\n min_positive = dataRange.min_positive\n if min_positive is None:\n min_positive = float('nan')\n return dataRange.minimum, min_positive, dataRange.maximum", "def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)", "def data_range(x):\n return max(x)-min(x)", "def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value", "def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)", "def range(self) -> ty.Tuple[float, float]:\r\n ...", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def range(self) -> Tuple[Union[int, float], Union[int, float]]:\n return self._range", "def getDataRange(self):\n return None if self._dataRange is None else tuple(self._dataRange)", "def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()", "def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))", "def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))", "def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range", "def range(x):\n try:\n return (min(min(y) for y in x), max(max(y) for y in x))\n except ValueError:\n return (None, None)", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans", "def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))", "def range(series):\n return min(series), max(series)", "def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin", "def range_(self):\n return tuple((e[0], e[-1]) for e in self.edges)", "def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value", "def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]", "def get_bounds(self, value = None, index = None):\n\n if self._data is None or 0 in self._data.shape:\n return (0.0, 0.0)\n\n if type(value) == types.IntType:\n if self.value_dimension == 0:\n maxi = nanmax(self._data[value, ::])\n mini = nanmin(self._data[value, ::])\n else:\n # value_dimension == 1\n maxi = nanmax(self._data[::, value])\n mini = nanmin(self._data[::, value])\n elif type(index) == types.IntType:\n if self.index_dimension == 0:\n maxi = nanmax(self._data[index, ::])\n mini = nanmin(self._data[index, ::])\n else:\n # index_dimension == 1\n maxi = nanmax(self._data[::, index])\n mini = nanmin(self._data[::, index])\n else:\n # value is None and index is None:\n maxi = nanmax(self._data)\n mini = nanmin(self._data)\n\n return (mini, maxi)", "def bounds(self):\n\n if self.size == 0:\n lo, hi = np.nan, np.nan\n elif self.is_monotonic:\n lo, hi = sorted([self.coordinates[0], self.coordinates[-1]])\n elif self.dtype is np.datetime64:\n lo, hi = np.min(self.coordinates), np.max(self.coordinates)\n else:\n lo, hi = np.nanmin(self.coordinates), np.nanmax(self.coordinates)\n\n return lo, hi", "def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range", "def scalar_range2tuple(sr: ScalarRange, defaults=(-np.inf, np.inf)):\n return (\n sr.min.value if sr.HasField(\"min\") else defaults[0],\n sr.max.value if sr.HasField(\"max\") else defaults[1],\n )", "def get_range(lst):\n return float(max(lst)) - float(min(lst))", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def find_min_max(data):\n v = [i[1] for i in data]\n extremes = [min(v), max(v)]\n logging.info('Calculated extremes: %s', extremes)\n return extremes", "def _get_time_range(self, data):\n time = data.coords[self.time_field]\n if time.size == 0:\n raise ProviderNoDataError()\n else:\n start = _to_datetime_string(data[self.time_field].values.min())\n end = _to_datetime_string(data[self.time_field].values.max())\n return [start, end]", "def minmax(data):\n smallest = data[0]\n largest = data[0]\n\n for i in range(0,len(data)):\n if data[i] < smallest:\n smallest = data[i]\n elif data[i] > largest:\n largest = data[i]\n\n return(smallest,largest)", "def min_range(self):\n return self._min_range", "def range_(headers, data):\n\tcolumn_matrix=data.get_data(headers).getT() # get columns as rows, as this makes analysis much easier by just perfoming operations on column list directly\n\tif column_matrix==[]:\n\t\tprint \"wrong headers, not present in data Object\"\n\t\treturn []\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\tfinal=np.concatenate((column_min, column_max), axis=1)\n\t\n\trng=final.tolist()\n\treturn rng", "def findRanges(data_grouped):\n ranges = []\n for i in data_grouped.columns:\n theRange = (data_grouped[i].min(), data_grouped[i].max())\n ranges.append(theRange)\n return ranges", "def get_index_range_inclusive(self):\n nx, ny, nz = self.get_mesh_size()\n return (1, nx, 1, ny, 1, nz)", "def get_range(df, col):\n return df[col].min(), df[col].max()", "def min_values(self, lower, upper): \n if not self.lower_bounds is None:\n return self.lower_bounds\n\n minus = np.clip(self.coeffs,-math.inf,0)\n plus = np.clip(self.coeffs,0,math.inf)\n self.lower_bounds = plus.dot(lower) + minus.dot(upper) + self.const\n \n return self.lower_bounds", "def get_range(self, start=None, end=None):\n\n # handle the case of no data\n if self.data.shape[0] == 0 or self.source.data[\"index\"].shape[0] == 0:\n return None, None\n\n first_source_idx = self.source.data[\"index\"][0]\n last_source_idx = self.source.data[\"index\"][-1]\n\n # convert to timestamp if necessary\n if isinstance(self.data.index, pd.DatetimeIndex):\n start = pd.to_datetime(start, unit=\"ms\")\n end = pd.to_datetime(end, unit=\"ms\")\n first_source_idx = pd.to_datetime(first_source_idx, unit=\"ms\")\n last_source_idx = pd.to_datetime(last_source_idx, unit=\"ms\")\n\n # get new start and end\n if start is not None:\n if start < first_source_idx:\n start = max(self.data.index[0], start)\n elif start > last_source_idx:\n start = min(self.data.index[-1], start)\n elif start < self.data.index[0]:\n start = self.data.index[0]\n elif start > self.data.index[-1]:\n start = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n start = first_source_idx\n else:\n start = self.data.index[0]\n\n if end is not None:\n if end < first_source_idx:\n end = max(self.data.index[0], end)\n elif end > last_source_idx:\n end = min(self.data.index[-1], end)\n elif end < self.data.index[0]:\n end = self.data.index[0]\n elif end > self.data.index[-1]:\n end = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n end = last_source_idx\n else:\n end = self.data.index[-1]\n\n return start, end", "def get_bounds(self):\n x_max = self.data['x'].max()\n y_max = self.data['y'].max()\n z_max = self.data['z'].max()\n print(\"x={}; y={}; z={}\".format(x_max, y_max, z_max))\n return (x_max, y_max, z_max)", "def get_xrange_indices(self, lower, upper) -> Tuple[int, int]:\n lower_index = np.argmax(self.x >= lower)\n upper_index = np.argmax(self.x >= upper)\n return int(lower_index), int(upper_index)", "def range_to_m(self, data):\n return data * self._total_range + self._min_range_m", "def _query_range_get(self):\n return (self.query_start, self.query_end)", "def get_min_max(self) -> tuple:\r\n\r\n minimum = float(\"inf\")\r\n maximum = float(\"-inf\")\r\n\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n for name, data in self.trees_data.items():\r\n if self.trees[name][\"point_helper\"] is None:\r\n mapping = self.trees[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n return minimum, maximum", "def _parse_vrange(self, data):\n vmin = self.config.get('vmin', np.nanmin(data))\n vmax = self.config.get('vmax', np.nanmax(data))\n vrange = self.config.get('vrange', None)\n\n # Parse vmin, vmax\n if isinstance(vmin, str):\n vmin = np.nanquantile(data, q=float(vmin))\n if isinstance(vmax, str):\n vmax = np.nanquantile(data, q=float(vmax))\n\n # Parse vrange\n if vrange is True:\n vrange = max(abs(np.nanmin(data)), abs(np.nanmax(data)))\n elif isinstance(vrange, str):\n vrange = abs(np.nanquantile(data, q=(float(vrange), 1-float(vrange)))).max()\n\n if vrange is not None:\n if isinstance(vrange, (list, tuple, np.ndarray)):\n vmin, vmax = vrange\n else:\n vmin, vmax = -vrange, vrange\n return vmin, vmax", "def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1", "def minimum(self) -> Union[int, float]:\n return self.range[0]", "def get_dyn_range(scale, zero_point, dtype):\n if dtype == torch.quint8:\n min_val, max_val = 0, 255\n elif dtype == torch.qint8:\n min_val, max_val = -128, 127\n else:\n raise RuntimeError(f\"Unsupported quantized dtype {dtype}\")\n\n return (min_val - zero_point) * scale, (max_val - zero_point) * scale", "def getMinMax(self,arr):\n # not implemented for Template SED yet\n return arr[\"z\"], arr[\"z\"]", "def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)", "def get_ranges(self) -> typing.List[typing.Tuple[float, float]]:\n return self.ranges[:]", "def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]", "def Min(data):\n return data.min()", "def _rangeQueryFloatFeature(self):\n\n # create args\n minToGet = c_double()\n maxToGet = c_double()\n\n errorCode = VimbaDLL.featureFloatRangeQuery(self._handle,\n self._name,\n byref(minToGet),\n byref(maxToGet))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return (minToGet.value, maxToGet.value)", "def get_bounds():\n return [0.00], [1.00]", "def variable_range(examples, var):\n if var[1] == 'd':\n range = set()\n for datum in examples:\n range.add(datum[var[0]])\n return range\n else:\n range_min, range_max = 0, 0\n for datum in examples:\n data_val = float(datum[var[0]])\n range_min, range_max = min(range_min, data_val), max(range_max, data_val)\n return (range_min, range_max)", "def get_xrange(self):\n return self.xvec[0], self.xvec[-1]", "def GetScalarRange(self):\n ...", "def xmin(self):\n return asarray([b[0] for b in self.bounds])", "def get_min(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return min(data)", "def calcrange(data, log=False):\n xmin, xmax = None, None\n for x in data:\n if not log or x > 0.:\n if xmin is None or x < xmin: xmin = x\n if xmax is None or x > xmax: xmax = x\n\n if xmin is None and xmax is None:\n if log:\n return 0.1, 1.\n else:\n return 0., 1.\n else:\n return xmin, xmax", "def range(self):\n return (self._start, self._end)", "def get_range(self):\n return time_to_range(self.get_time())", "def interval(self):\n return (self.start, S.Infinity)", "def possible_vals(pp):\n\n if pp[\"type\"] == \"w\":\n vals = [0, pp[\"pmax\"]]\n\n elif pp[\"type\"] == \"windturbine\":\n vals = [0, pp[\"pmin\"]]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"] + 1):\n vals.append(pp[\"pmin\"] + i)\n\n else: # Turbojet\n vals = [0]\n for i in range(pp[\"pmin\"], pp[\"pmax\"] - pp[\"pmin\"]):\n vals.append(pp[\"pmin\"] + i)\n return vals", "def _rangeQueryIntFeature(self):\n\n # create args\n minToGet = c_int64()\n maxToGet = c_int64()\n\n errorCode = VimbaDLL.featureIntRangeQuery(self._handle,\n self._name,\n byref(minToGet),\n byref(maxToGet))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return (int(str(minToGet.value)), int(str(maxToGet.value)))", "def mins(self) -> Tensor:\n return self._ranges[:, 0]", "def test_inclusive_intervals(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5.5)\n assert dim.interval() == (-3, 3)", "def get_range(self, field, deep=False, axis=None):\n variables = list(self.vars(deep, with_name=field))\n\n if not variables:\n raise KeyError(\"No variable named '%s' was found!\" % field)\n\n start = [np.nanmin(self[var], axis).item(0) for var in variables]\n end = [np.nanmax(self[var], axis).item(0) for var in variables]\n return min(start), max(end)", "def range(df):\r\n\r\n\tdf_range_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_range_dict[col] = [df[col].max(), df[col].min(), df[col].max() - df[col].min()]\r\n\r\n\tdf_range = pd.DataFrame(df_range_dict, index=['Max Value', 'Min Value', 'Range (Max - Min)'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_range", "def _get_shear_vals(lower_bound: float,\n upper_bound: float,\n step: float) -> Tuple[float]:\n return tuple(np.arange(lower_bound, upper_bound + step, step))", "def range(self):\n return self.range_array", "def bounds(self, start=None, finish=None):\n lower = start if start is not None else self.limits[0]\n upper = finish if finish is not None else self.limits[1]\n\n lower = lower + self.offsets[0]\n upper = upper + self.offsets[1]\n\n return (lower, upper)", "def regression_range(self):\n regression_range = detect_regression_range.DetectRegressionRange(\n self.historical_metadata)\n if regression_range is None: # pragma: no cover\n logging.warning('Got ``None`` for the regression range.')\n else:\n regression_range = tuple(regression_range)\n\n return regression_range", "def getCellData(X, y, min0, max0, min1, max1):\n Xcell = []\n ycell = []\n\n for x,label in zip(X, y):\n if (x[0] >= min0) and (x[0] < max0) and (x[1] >= min1) and (x[1] < max1):\n Xcell.append(x)\n ycell.append(label)\n\n return np.array(Xcell), np.array(ycell)", "def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]", "def range(self):\n lower, upper = sorted((self.y1, self.y2))\n return FloatRange(lower=lower, upper=upper)", "def minmax(xs):\n min_val = None\n max_val = None\n for x in xs:\n if min_val is None or x < min_val:\n min_val = x\n if max_val is None or x > max_val:\n max_val = x\n return (min_val, max_val)", "def get_range(value):\n\n raw = value\n\n # If we find a '@' at the beginning of the range, we should invert\n # the match.\n\n invert = False\n\n if value.find('@') == 0:\n invert = True\n value = value.lstrip('@')\n\n # The : separates a max/min range. If it exists, there is at least\n # a minimum. We'll start our ranges at zero and infinity so we don't\n # have to worry about complex testing logic.\n\n bottom = 0\n top = float('infinity')\n\n if value.find(':') > 0:\n (bottom, top) = value.split(':')\n if top == '':\n top = float('infinity')\n else:\n top = float(top)\n\n if bottom == '':\n bottom = 0\n elif bottom == '~':\n bottom = -float('infinity')\n else:\n bottom = float(bottom)\n else:\n top = float(value)\n\n return (bottom, top, invert, raw)", "def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)", "def compute_closest_coordinate(value, range_min, range_max):\r\n \r\n v = None\r\n \r\n if range_min < value < range_max:\r\n v = value\r\n \r\n elif value <= range_min:\r\n v = range_min\r\n \r\n elif value >= range_max:\r\n v = range_max\r\n \r\n return v", "def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max", "def get_min_max_tuple(min_max_tuple, value):\n min_v, max_v = min_max_tuple\n\n min_v = smart_min(min_v, value)\n max_v = smart_max(max_v, value)\n\n return (min_v, max_v)", "def min_max(self, data, era):\n return 0, np.max(data)", "def values(self):\n lower = float(self.lowerSpnbx.value())\n upper = float(self.upperSpnbx.value())\n return lower, upper", "def get_minmax(self):\n x_minmax = [np.min(self.grid['x']), np.max(self.grid['x'].max())]\n z_minmax = [np.min(self.grid['z']), np.max(self.grid['z'].max())]\n return x_minmax, z_minmax", "def _hit_range_get(self):\n return (self.hit_start, self.hit_end)", "def get_min_max(ints):\n current_max = None\n current_min = None\n\n if (len(ints) == 0) or (ints is None):\n return tuple([current_min, current_max])\n\n for i, n in enumerate(ints):\n if i == 0:\n current_max = n\n current_min = n\n else:\n if n > current_max:\n current_max = n\n elif n < current_min:\n current_min = n\n\n return tuple([current_min, current_max])", "def get_statistics(data):\n v_min = None\n v_max = None\n v_avg = None\n v = None\n v_sum = .0\n count = 0\n for d in data:\n if d is None:\n continue\n try:\n v = float(d)\n except ValueError:\n print(pc.CRED, d, pc.CEND, end=',')\n continue\n if count == 0:\n v_min = v\n v_max = v\n else:\n if v < v_min:\n v_min = v\n if v > v_max:\n v_max = v\n v_sum += v\n count += 1\n if count > 0:\n v_avg = round(v_sum/count, 2)\n return v_min, v_max, v_avg", "def get_minx_maxx(self, normalized=True):\n minx = np.array([[0.0] * len(self.encoded_feature_names)])\n maxx = np.array([[1.0] * len(self.encoded_feature_names)])\n\n for idx, feature_name in enumerate(self.continuous_feature_names):\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n\n if normalized:\n minx[0][idx] = (self.permitted_range[feature_name]\n [0] - min_value) / (max_value - min_value)\n maxx[0][idx] = (self.permitted_range[feature_name]\n [1] - min_value) / (max_value - min_value)\n else:\n minx[0][idx] = self.permitted_range[feature_name][0]\n maxx[0][idx] = self.permitted_range[feature_name][1]\n return minx, maxx", "def get_xrange(self) -> np.array:\n # todo: ensure this functions work as well for y_values\n lower, upper = self.get_xrange_indices()\n return self.x[lower, upper + 1]", "def getValidRatingInputs(self):\n min = self.minRatingInput.get()\n max = self.maxRatingInput.get()\n\n try:\n min = int(min)\n except ValueError:\n min = 0\n\n try:\n max = int(max)\n except ValueError:\n max = 100\n\n return min, max", "def get_refrange(self):\n if np.all(np.isnan(self.par)):\n print( 'Run params() before')\n return\n if hasattr(self,'refranges'):\n return self.refranges\n ice_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[1,r,10,0])]\n liq_r = [r for r in xrange(len(self.ref)) if ~ np.isnan(self.par[0,r,10,0])]\n return (liq_r,ice_r)", "def _read_range(range: str) -> Tuple[str, List[Tuple[Union[int, None], Union[int, None]]]]:\n format, split_on_pairs = range.split('=', 1)\n split_on_pairs = split_on_pairs.split(',')\n pairs = []\n for pair_str in split_on_pairs:\n split_on_range = pair_str.split('-', 1)\n start = int(split_on_range[0]) if len(split_on_range[0]) > 0 else None\n stop = int(split_on_range[1]) if len(split_on_range[1]) > 0 else None\n pairs.append((start, stop))\n return format, pairs", "def get_data_range(self, start_position, length):\n pass", "def getDataRange(self):\n return self._dataRange", "def _update_data_range(self):\r\n self._h_min = np.min(self.h)\r\n self._h_max = np.max(self.h)\r\n self._hr_min = np.min(self.hr)\r\n self._hr_max = np.max(self.hr)\r\n self._m_min = np.nanmin(self.m)\r\n self._m_max = np.nanmax(self.m)\r\n\r\n if self.temperature is None or np.all(np.isnan(self.temperature)):\r\n self._T_min = np.nan\r\n self._T_max = np.nan\r\n else:\r\n self._T_min = np.nanmin(self.temperature)\r\n self._T_max = np.nanmax(self.temperature)\r\n\r\n return", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax", "def data_range(self, n=-1):\n if self.rotate and len(self.results['velocities']) > 1:\n # Then we can use the last two velocities and time to extrapolate the line.\n prevVel = self.results['velocities'][-2:]\n prevTime = self.results['times'][-2:]\n\n coefficents = imageRot.computeFit(prevTime, prevVel)\n nextCenter = imageRot.extrapolate(coefficents, self.time[self.time_index])\n\n velocity_index = self.spectrogram._velocity_to_index(nextCenter)\n start_index = max(0, velocity_index - 2*self.span)\n end_index = min(velocity_index + 2*self.span,\n len(self.spectrogram.velocity))\n \n return start_index, end_index, coefficents\n # Compute the angle.\n if len(self.results['velocities']) > 0:\n last_v = self.results['velocities'][n]\n else:\n last_v = self.v_start\n velocity_index = self.spectrogram._velocity_to_index(last_v)\n start_index = max(0, velocity_index - self.span)\n end_index = min(velocity_index + self.span,\n len(self.spectrogram.velocity))\n return (start_index, end_index)", "def get_min(fun):\n\tglobal __dataset\n\n\tmin_val = sys.maxint\n\tmin_index = 0\n\tfor i, vec in enumerate(__dataset):\n\t\tret = fun(vec)\n\t\tif ret < min_val:\n\t\t\tmin_val = ret\n\t\t\tmin_index = i\n\treturn min_index, min_val", "def _adjustRange(self, start, end):\n adjusted_start = start\n if self._start:\n if end < self._start:\n return None\n adjusted_start = max(self._start, start)\n \n adjusted_end = end\n if self._end:\n if self._end < start:\n return None\n adjusted_end = min(self._end, end)\n \n return (adjusted_start, adjusted_end)" ]
[ "0.8155307", "0.7806243", "0.73593587", "0.7330732", "0.7230256", "0.72106385", "0.7186712", "0.7147313", "0.7062318", "0.69120955", "0.6874664", "0.6780946", "0.67772317", "0.6763518", "0.6762277", "0.6691655", "0.6686971", "0.66690457", "0.659863", "0.65986276", "0.6585736", "0.6567719", "0.64628863", "0.6440813", "0.64302355", "0.64066815", "0.6391271", "0.6387084", "0.63792217", "0.6349265", "0.6308885", "0.6302284", "0.6290853", "0.6263157", "0.6242405", "0.623424", "0.62313974", "0.622926", "0.6224458", "0.62115425", "0.61739254", "0.6171147", "0.61666733", "0.61529577", "0.6143827", "0.61308795", "0.6112873", "0.6112447", "0.6107202", "0.6106386", "0.6105429", "0.60998416", "0.60925376", "0.6089217", "0.60869867", "0.60849357", "0.60684144", "0.6062076", "0.6054637", "0.6053692", "0.60379833", "0.6019752", "0.60154706", "0.60087943", "0.60053384", "0.6004215", "0.60031897", "0.599845", "0.5993013", "0.59906983", "0.59877676", "0.59857905", "0.59849274", "0.59780544", "0.5976398", "0.597138", "0.59695", "0.5955734", "0.5945443", "0.59441984", "0.59363055", "0.5935864", "0.59253913", "0.59204394", "0.591622", "0.5914675", "0.5910279", "0.58882815", "0.5886813", "0.5874519", "0.58711755", "0.5870848", "0.5870261", "0.58641547", "0.58479553", "0.5824974", "0.5818481", "0.5816743", "0.5808908", "0.5798831", "0.57912266" ]
0.0
-1
Forward implementation, going through the complete flow $f_{\phi}$.
def forward(self, x: Tensor, covariates: Tensor) -> Tuple[Tensor, Tensor, Tensor]: return self.real_nvp(x, covariates)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step_forward(self):", "def forward(self, x):\n pass", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward(self, input):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def backtrack(f, x0, J, g, alpha, rho, mu, p):\r\n phi_prime = np.matmul(np.transpose(g), p)\r\n phi0 = J\r\n (phi_alpha, g_alpha) = f(x0+alpha*p)\r\n while phi_alpha > phi0+mu*alpha*phi_prime:\r\n alpha = rho*alpha\r\n (phi_alpha, g_alpha) = f(x0+alpha*p)\r\n x = x0+alpha*p\r\n return x, phi_alpha, g_alpha, alpha", "def forward(self, x):\n raise NotImplementedError", "def forward(self, x):\n raise NotImplementedError", "def transf(self,f):\r\n raise NotImplementedError", "def forward_graph(self):\n raise NotImplementedError", "def forward(self,\n *args,\n inputs: torch.nn.Module,\n step_fn: torch.nn.Module, ) -> torch.Tensor:\n timestep = 0\n hidden = inputs\n # halting_prob_cumulation: (batch, seq_len)\n halting_prob_cumulation = hidden.new_zeros(hidden.size()[:-1]).float()\n\n while timestep < self._max_computing_time and \"TODO: exit if all place exhausted\":\n # current all alive tokens, which need further computation\n # alive_mask: (batch, seq_len)\n alive_mask: torch.Tensor = halting_prob_cumulation < 1.\n alive_mask = alive_mask.float()\n\n # halting_prob: (batch, seq_len) <- (batch, seq_len, 1)\n halting_prob = self._halting_fn(hidden).squeeze(-1)\n\n # temp_cumulation: (batch, seq_len)\n temp_cumulation = halting_prob * alive_mask + halting_prob_cumulation\n\n # mask to the newly halted tokens, which is exhausted at the current timestep of computation\n # new_halted: (batch, seq_len)\n new_halted = (temp_cumulation > self._threshold).float()\n remainder = 1. - halting_prob_cumulation + 1.e-10\n\n # all tokens that survives from the current timestep's computation\n # alive_mask: (batch, seq_len)\n alive_mask = (1 - new_halted) * alive_mask\n\n halting_prob_cumulation += halting_prob * alive_mask\n # cumulations for newly halted positions will reach 1.0 after adding up remainder at the current timestep\n halting_prob_cumulation += remainder * new_halted\n\n step_out = step_fn(hidden, *args, timestep)\n timestep += 1\n state_update_weight = alive_mask.unsqueeze(-1)\n hidden = state_update_weight * step_out + (1 - state_update_weight) * hidden\n\n return hidden", "def forward(self, state):#forward pass\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return torch.tanh(self.fc3(x))", "def forward(self, input):\n raise NotImplementedError()", "def forward(self, state):\n x = self.fc(state)\n return x", "def forward(self, inputs):\n raise NotImplementedError", "def forward(self)->None:", "def forward(self, Q_, p_, G_, h_, A_, b_, F_):\n # TODO Write detailed documentation.\n\n nBatch = extract_nBatch(Q_, p_, G_, h_, A_, b_)\n Q, _ = expandParam(Q_, nBatch, 3)\n p, _ = expandParam(p_, nBatch, 2)\n G, _ = expandParam(G_, nBatch, 3)\n h, _ = expandParam(h_, nBatch, 2)\n A, _ = expandParam(A_, nBatch, 3)\n b, _ = expandParam(b_, nBatch, 2)\n F, _ = expandParam(F_, nBatch, 3)\n\n _, nineq, nz = G.size()\n neq = A.size(1) if A.ndimension() > 0 else 0\n assert(neq > 0 or nineq > 0)\n self.neq, self.nineq, self.nz = neq, nineq, nz\n\n if self.solver == LCPSolvers.PDIPM_BATCHED:\n self.Q_LU, self.S_LU, self.R = pdipm_b.pre_factor_kkt(Q, G, F, A)\n zhats, self.nus, self.lams, self.slacks = pdipm_b.forward(\n Q, p, G, h, A, b, F, self.Q_LU, self.S_LU, self.R,\n self.eps, self.verbose, self.notImprovedLim,\n self.maxIter, solver=pdipm_b.KKTSolvers.LU_PARTIAL)\n else:\n assert False\n\n # self.verify_lcp(zhats, Q, G, A, F, p, h)\n self.save_for_backward(zhats, Q_, p_, G_, h_, A_, b_, F_)\n return zhats", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self, x):\n theta = self.L(x[:, :6])\n theta = torch.cat((theta, self.U(torch.cat((x[:, :9], theta), 1))), 1)\n theta = torch.cat((theta, self.R(torch.cat((x[:, 3:12], theta), 1))), 1)\n theta = torch.cat((theta, self.B(torch.cat((x[:, 6:15], theta), 1))), 1)\n # Prepends T to the angle vector theta to maintain order S,L,U,R,B\n theta = torch.cat((self.S(torch.cat((x[:, 12:], x[:, :4], theta[:, :3]), 1)), theta), 1)\n return self.FC(theta)", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def forward(self, *args):\n raise NotImplementedError", "def forward(self, *args):\n raise NotImplementedError", "def base_forward(self, x):\r\n pass", "def forward(self, *inputs):\n raise NotImplementedError", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def forward(cls, linear_out):\n raise Exception(\"Unimplemented\")", "def forward(self, x, **kwargs):\n pass", "def forward(self, state):\n x = f.relu(self.fc1(state))\n x = f.relu(self.fc2(x))\n return torch.tanh(self.fc3(x))", "def move_forward():\n pass", "def forward(self, state):\n\n _, _, theta, dtheta = (\n state[:, 0], state[:, 1], state[:, 2], state[:, 3])\n\n # predict a change in force\n # we only use relevant information to simplify the problem\n controller_input = torch.stack([\n torch.cos(theta),\n torch.sin(theta),\n dtheta\n ]).T.to(device)\n force = self.f(controller_input)[:, 0]\n\n # observe change in system\n du = self.cartpole(state, force)\n\n return du", "def forward(self, x_in):\r\n # x_out = torch.zeros_like(x_in)\r\n\r\n for layer in self.layers: #Call forward function of each layer in order\r\n x_out = layer.forward(x_in)\r\n # print(\"Forward pass Seq: \", layer, x_in, x_out)\r\n x_in = x_out # output of the layer is passed as input to the next layer\r\n self.temp = x_in\r\n return x_out", "def forward(self, states):\n raise NotImplementedError()", "def forward(self):\n pass", "def forward(self):\n pass", "def forward(self):\n self.iteration_number += 1\n x = self.x\n self.x = self.alpha * self.x + self.betta\n t = x - self.x\n\n return (t * t).sum()", "def forward(self, state):\n x = self.nonlin(self.fc1(self.in_fn(state)))\n x = self.drop_layer(x)\n x = self.nonlin(self.fc2(x))\n x = self.drop_layer(x)\n return self.fc3(x)", "def forward(self, a: torch.FloatTensor, b: torch.FloatTensor) -> torch.FloatTensor:", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return F.tanh(self.fc3(x))", "def forward_tensor(self, x):\n pass", "def forward(self, s):", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)", "def forward(self, state):\n\t\tx = F.relu(self.fc1(state))\n\t\tx = F.relu(self.fc2(x))\n\t\tx = F.tanh(self.fc3(x)) # outputs are in the range [-1, 1]\n\n\t\treturn x", "def test_forward_backward(self):\n f = forward(self.obs, self.S, self.A, self.E)\n b = backward(self.obs, self.S, self.A, self.E)\n fp = logsumexp(f[:, -1])\n emission = precompute_emission(np.log(self.E))[tuple(self.obs[0])]\n bp = logsumexp(np.log(self.S) + emission + b[:, 0])\n assert_allclose(fp, bp)", "def forward(self, output, target):\n raise NotImplementedError", "def flowingFrom(self, fount):", "def forward(self, state):\n \n x = F.relu(self.fc1(state)) \n x = F.relu(self.fc2(x)) \n x = F.tanh(self.fc3(x)) \n \n \n ####x = F.relu(self.bn1(self.fc1(state)))\n ####x = F.relu(self.bn2(self.fc2(x)))\n ####x = torch.tanh(self.bn3(self.fc3(x)))\n ##x = torch.tanh(self.fc3(x))\n\n return x", "def forward(self, *args, **kwargs):\n raise NotImplementedError", "def forward(alphaIn, phi_x, y):\n alphaPhi_X = robot.Distribution()\n for x, alphaX in alphaIn.items():\n yProb = phi_x[x]\n tmpProd = yProb * alphaX\n if tmpProd > 0:\n alphaPhi_X[x] = tmpProd\n\n # compute alpha out\n alphaOut = robot.Distribution()\n for x, alphaPhi in alphaPhi_X.items():\n x2Poss = transition_model(x)\n # multiply and add x2Poss to o/p\n for x2Key, x2pVal in x2Poss.items():\n alphaOut[x2Key] += x2pVal*alphaPhi\n #print(alphaOut)\n return alphaOut", "def _forward_kinematics_step(self, t_step):\n (s_1, s_12, s_123) = self._sines\n (c_1, c_12, c_123) = self._cosines\n self._x_1[t_step] = self._jnt_lengths[0] * s_1\n self._y_1[t_step] = self._jnt_lengths[0] * c_1\n self._x_2[t_step] = self._x_1[t_step] + self._jnt_lengths[1] * s_12\n self._y_2[t_step] = self._y_1[t_step] + self._jnt_lengths[1] * c_12\n self._x_e[t_step] = self._x_2[t_step] + self._jnt_lengths[2] * s_123\n self._y_e[t_step] = self._y_2[t_step] + self._jnt_lengths[2] * c_123", "def forward(self, x):\n x = self.input(x)\n x = self.in0(x)\n x = self.block0(x) + x\n x = self.block1(x) + x\n x = self.block2(x) + x\n x = self.block3(x) + x\n x = self.block4(x) + x\n x = self.in0(x)\n\n out = self.out(x)\n\n return out", "def forward(self, inputs, **kwargs):\n x = self.ff_module1(inputs)\n x = self.mha_module(x, **kwargs)\n x = self.conv_module(x)\n x = self.ff_module2(x)\n\n return x", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, x):\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying hyperbolic tangent function\n x = torch.tanh(self.fc_layers[-1](x))\n # Return the better action for the input state\n return x", "def forward(self, state, action):\n q_in = torch.cat([state, action], 1)\n return self.ffn(q_in).view(-1)", "def forward(self):\n self.position += 1", "def forward(self, input, context, state):\n raise NotImplementedError", "def forward(self, state):\n '''\n state = F.relu(self.conv1(state))\n state = F.relu(self.conv2(state))\n state = F.relu(self.conv3(state))\n state = F.relu(self.fc1(state))\n \n action = F.relu(self.fc2(state))\n \n return action\n '''\n \n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n \n return x", "def forward(self, query_ids, context_ids, graph, fb_passes):\n\n # forward through encoder\n q_emb = self.encoder(context_ids, query_ids)\n c_emb = self.encoder(query_ids, context_ids)\n\n # forward through fb\n Ct = self.fusionblock(c_emb, q_emb, graph, passes=fb_passes)\n\n # forward through predictor: sup, start, end, type\n outputs = self.predictor(Ct) # ( (M), (M), (M), (1, 3) )\n\n return outputs", "def forward(self):\n self.value = np.dot(self.x_node.value, self.w_node.value) + self.b_node.value", "def forward_kinematics(self):\n temp_T = Matrix.eye(3)\n for i in range(len(self.lengths)):\n angle_mat = self.T_a.subs(self.q,self.angles[i]).evalf()\n len_mat = self.T_x.subs(self.l,self.lengths[i]).evalf()\n temp_T = temp_T * angle_mat * len_mat\n \n self.final_T = np.array(temp_T,dtype=float)\n \n return self.final_T", "def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.relu(self.fc4(x))\n\n return F.tanh(self.fc5(x))", "def forward(self, observation: Tensor) -> Tensor:\n pass", "def forward(self, transition):\n def length(word):\n return len(tuple(letter for letter in word if letter is not None))\n\n if self.is_multitape:\n increments = tuple(length(word) for word in\n zip(*transition.word_in))\n else:\n increments = (length(transition.word_in),)\n\n for track_number, (track_cache, inc) in \\\n enumerate(zip(self.cache, increments)):\n for _ in range(inc):\n if not track_cache:\n if not self.read(track_number)[0]:\n raise ValueError('forwarding tape is not possible')\n track_cache.popleft()\n position = [(p + increments[t], t)\n for p, t in self.position]\n self.position = tuple(sorted(position))", "def forward(self, x: torch.Tensor, dim: int = 0, p: int = 1):\n raise NotImplementedError", "def forward(self, shape, *args):\n #TODO\n return None", "def forward(self, x):\n previous_batch, current_batch = x\n previous_batch_pc, previous_batch_f = previous_batch[0], previous_batch[1]\n current_batch_pc, current_batch_f = current_batch[0], current_batch[1]\n\n f1 = previous_batch_pc[:, :, 3:]\n pc1 = previous_batch_pc[:, :, :3]\n\n f2 = current_batch_pc[:, :, 3:]\n pc2 = current_batch_pc[:, :, :3]\n\n batch_size, n_points_prev, _ = previous_batch_pc.shape\n batch_size, n_points_cur, _ = current_batch_pc.shape\n\n # All outputs of the following layers are tuples of (pos, features)\n # --- Point Feature Part ---\n pf_prev_1, pf_prev_2, pf_prev_3 = self._point_feature_net(pc1.float(), f1.float())\n pf_curr_1, pf_curr_2, pf_curr_3 = self._point_feature_net(pc2.float(), f2.float())\n\n # --- Flow Embedding / Point Mixture Part ---\n _, fe_2, fe_3 = self._point_mixture(x1=pf_prev_3, x2=pf_curr_3)\n\n # --- Flow Refinement Part ---\n x = self._flow_refinement(pf_curr_1=pf_curr_1, pf_curr_2=pf_curr_2, pf_curr_3=pf_curr_3, fe_2=fe_2, fe_3=fe_3)\n\n # --- Final fully connected layer ---\n pos, features = x\n features = features.transpose(1, 2)\n x = self._fc(features)\n return x", "def _forward(self, X, **kwargs):\n raise NotImplementedError()", "def forward(self, obs):\n raise NotImplementedError", "def forward_phi(self):\n for f in self.component_fields:\n f.require_layout(self._layout1)", "def _step(self, t, y, h):\n # We must use solvers / implicit form\n f_pn1 = lambda a_n1: (y + h*self.v + (h**2 / 2.0) * \\\n ((1.0 - 2.*self.beta)*self.a + 2.*self.beta*a_n1))\n f_vn1 = lambda a_n1: (self.v + h*((1.0-self.gamma)*self.a + self.gamma*a_n1))\n def f_an1(a_n1):\n f_n1 = self.f(t+h,f_pn1(a_n1),f_vn1(a_n1))\n f_n = self.f(t,y,self.v,)\n return a_n1 - ((1.0+self.alpha)*f_n1 - self.alpha*f_n)\n\n a = self.solver(f_an1, self.a)\n y = f_pn1(a) # Calculate and store new variables. \n self.v = f_vn1(a)\n self.a = a\n return t+h, y", "def forward(self, inp):\n return inp.dot(self.W) + self.b", "def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan\n\n # Convert vector-tensor form into matrix-tensor form\n x_t = tf.reshape(x_t, shape=[1, -1])\n h_tm1 = tf.reshape(h_tm1, shape=[1, -1])\n\n # Definitions of z_t and r_t\n z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)\n r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)\n\n # Definition of h~_t\n h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)\n\n # Compute the next hidden state\n h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)\n\n return tf.squeeze(h_t)", "def forward(self, labels: torch.Tensor) -> torch.Tensor:\n raise NotImplementedError", "def ForwardEuler(f, U0, T, n):\n t = np.zeros(n+1)\n u = np.zeros(n+1) # u[k] is the solution at time t[k]\n u[0] = U0\n dt = T/float(n)\n for k in range(n):\n t[k+1] = t[k] + dt\n u[k+1] = u[k] + dt*f(u[k], t[k])\n return u, t", "def forward(self, *args, **kwargs):\n pass", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n return x", "def forward(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=0, eps=0, gamma=0, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # forward euler and forward/backward timestep\n beta = np.float32(beta)\n mu = np.float32(mu)\n \n du1, du0 = du[:2]\n dv1, dv0 = dv[:2]\n dn0 = dn[0]\n \n dndt_x(h, n, u, v, dx, dy, dn0) # calculate dndt and put it into dn0\n \n n1 = n + ( dn0 )*dt\n \n dudt_x(h, n, f, u, v, dx, dy, du0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n, f, u, v, dx, dy, dv0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dudt_x(h, n1, f, u, v, dx, dy, du1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n1, f, u, v, dx, dy, dv1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n u1 = u + ( beta*du1 + (one-beta)*du0 )*dt\n v1 = v + ( beta*dv1 + (one-beta)*dv0 )*dt\n \n n, u, v = n1, u1, v1\n \n du = [du1, du0, du0, du0]\n dv = [dv1, dv0, dv0, dv0]\n dn = [dn0, dn0, dn0]\n return n1, u1, v1, du, dv, dn", "def _make_phi(self, F):\n scaled_bins_left = tf.concat(\n [self.bin_edges / self.sigma,\n np.array([np.inf])], 0)\n scaled_bins_right = tf.concat(\n [np.array([-np.inf]), self.bin_edges / self.sigma], 0)\n return inv_probit(scaled_bins_left - tf.reshape(F, (-1, 1)) / self.sigma) \\\n - inv_probit(scaled_bins_right - tf.reshape(F, (-1, 1)) / self.sigma)", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def calculate_flow(self):\r\n self._func()", "def forward(self, state):\n x = self.fc1(state)\n action = self.tanh(x)\n\n action = action.cpu().data.numpy() * self.action_lim\n action = torch.FloatTensor(action)\n\n return action", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def f(self):\n return self.g() + self.h()", "def forward_pass(self):", "def forward(self, state, action):\n xs = f.relu(self.fcs1(state))\n x = torch.cat((xs, action), dim=1)\n x = f.relu(self.fc2(x))\n return self.fc3(x)", "def forward(self):\n R = self.LP.cost.R\n A = self.LP.dyn.A\n B = self.LP.dyn.B\n\n x = self.LP.x0\n self.x[0] = x\n for i in range(self.LP.N):\n u = - np.linalg.inv(R+B.T.dot(self.V[i+1]).dot(B)).dot(.5*B.T.dot(self.W[i+1]) \\\n + B.T.dot(self.V[i+1]).dot(A).dot(x))\n if self.LP.dyn.u_dim == 1:\n self.u[i] = float(u)\n else:\n self.u[i] = u\n self.J_star[i] = float(x.T.dot(self.V[i]).dot(x) + self.W[i].T.dot(x)) #up to constant\n\n if i == 0:\n self.J[i] = self.LP.cost.loss(x, u, i)\n else:\n self.J[i] = self.J[i-1] + self.LP.cost.loss(x, u, i)\n x = self.LP.dyn.next_state(x, u)\n self.x[i+1] = x\n\n self.J[self.LP.N] = self.J[self.LP.N-1] + self.LP.cost.loss(x, 0, self.LP.N)\n\n self.J_star[self.LP.N] = float(x.T.dot(self.V[self.LP.N]).dot(x) \\\n + self.W[self.LP.N].T.dot(x)) #up to constant", "def _walk_forward(self, step_fn, x, **kwargs):\n for bij in reversed(self._bijectors):\n x = step_fn(bij, x, **kwargs.get(bij.name, {}))\n return x # Now `y`", "def ForwardEuler(f, U0, T, n):\n import numpy as np\n t = np.zeros(n+1)\n u = np.zeros(n+1) # u[k] is the solution at time t[k]\n u[0] = U0\n t[0] = 0\n dt = T/float(n)\n for k in range(n):\n t[k+1] = t[k] + dt\n u[k+1] = u[k] + dt*f(u[k], t[k])\n return u, t", "def Forward(Fin, z, sizenew, Nnew ):\n if z <= 0:\n raise ValueError('Forward does not support z<=0')\n Fout = Field.begin(sizenew, Fin.lam, Nnew, Fin._dtype)\n \n field_in = Fin.field\n field_out = Fout.field\n \n field_out[:,:] = 0.0 #default is ones, clear\n \n old_size = Fin.siz\n old_n = Fin.N\n new_size = sizenew #renaming to match cpp code\n new_n = Nnew\n\n on2 = int(old_n/2)\n nn2 = int(new_n/2) #read \"new n over 2\"\n dx_new = new_size/(new_n-1)\n dx_old = old_size/(old_n-1)\n #TODO again, dx seems better defined without -1, check this\n \n R22 = _np.sqrt(1/(2*Fin.lam*z))\n\n X_new = _np.arange(-nn2, new_n-nn2) * dx_new\n Y_new = X_new #same\n X_old = _np.arange(-on2, old_n-on2) * dx_old\n Y_old = X_old #same\n for i_new in range(new_n):\n x_new = X_new[i_new]\n \n P1 = R22*(2*(X_old-x_new)+dx_old)\n P3 = R22*(2*(X_old-x_new)-dx_old)\n Fs1, Fc1 = _fresnel(P1)\n Fs3, Fc3 = _fresnel(P3)\n for j_new in range(new_n):\n y_new = Y_new[j_new]\n \n P2 = R22*(2*(Y_old-y_new)-dx_old)\n P4 = R22*(2*(Y_old-y_new)+dx_old)\n Fs2, Fc2 = _fresnel(P2)\n Fs4, Fc4 = _fresnel(P4)\n \n C4C1=_np.outer(Fc4, Fc1) #out[i, j] = a[i] * b[j] \n C2S3=_np.outer(Fc2, Fs3) #-> out[j,i] = a[j]*b[i] here\n C4S1=_np.outer(Fc4, Fs1)\n S4C1=_np.outer(Fs4, Fc1)\n S2C3=_np.outer(Fs2, Fc3)\n C2S1=_np.outer(Fc2, Fs1)\n S4C3=_np.outer(Fs4, Fc3)\n S2C1=_np.outer(Fs2, Fc1)\n C4S3=_np.outer(Fc4, Fs3)\n S2S3=_np.outer(Fs2, Fs3)\n S2S1=_np.outer(Fs2, Fs1)\n C2C3=_np.outer(Fc2, Fc3)\n S4S1=_np.outer(Fs4, Fs1)\n C4C3=_np.outer(Fc4, Fc3)\n C4C1=_np.outer(Fc4, Fc1)\n S4S3=_np.outer(Fs4, Fs3)\n C2C1=_np.outer(Fc2, Fc1)\n \n Fr = 0.5 * field_in.real\n Fi = 0.5 * field_in.imag\n Temp_c = (Fr * (C2S3 + C4S1 + S4C1 + S2C3\n - C2S1 - S4C3 - S2C1 - C4S3)\n + Fi * (-S2S3 + S2S1 + C2C3 - S4S1\n - C4C3 + C4C1 + S4S3 - C2C1)\n + 1j * Fr *(-C4C1 + S2S3 + C4C3 - S4S3\n + C2C1 - S2S1 + S4S1 - C2C3)\n + 1j * Fi*(C2S3 + S2C3 + C4S1 + S4C1\n - C4S3 - S4C3 - C2S1 - S2C1))\n field_out[j_new, i_new] = Temp_c.sum() #complex elementwise sum\n Fout._IsGauss=False\n return Fout", "def _form_fr(self, fl):\n\n if not iterable(fl):\n raise TypeError('Force pairs must be supplied in an iterable.')\n\n N = self._inertial\n # pull out relevant velocities for constructing partial velocities\n vel_list, f_list = _f_list_parser(fl, N)\n vel_list = [msubs(i, self._qdot_u_map) for i in vel_list]\n\n # Fill Fr with dot product of partial velocities and forces\n o = len(self.u)\n b = len(f_list)\n FR = zeros(o, 1)\n partials = partial_velocity(vel_list, self.u, N)\n for i in range(o):\n FR[i] = sum(partials[j][i] & f_list[j] for j in range(b))\n\n # In case there are dependent speeds\n if self._udep:\n p = o - len(self._udep)\n FRtilde = FR[:p, 0]\n FRold = FR[p:o, 0]\n FRtilde += self._Ars.T * FRold\n FR = FRtilde\n\n self._forcelist = fl\n self._fr = FR\n return FR", "def applyForce(self, F, dT):", "def forward(self, target, ref, gain=1):\n flow_c = self.coarse_flow(torch.cat((ref, target), 1))\n wc = self.warp_c(ref, flow_c[:, 0], flow_c[:, 1])\n flow_f = self.fine_flow(torch.cat((ref, target, flow_c, wc), 1)) + flow_c\n flow_f *= gain\n return flow_f" ]
[ "0.6307234", "0.63024616", "0.6222227", "0.6175904", "0.6175904", "0.6155502", "0.61270106", "0.61270106", "0.6118309", "0.60926515", "0.60638916", "0.60609066", "0.605443", "0.6050017", "0.6004105", "0.59371006", "0.5925121", "0.5920974", "0.5920974", "0.5920974", "0.5911123", "0.5909327", "0.5899061", "0.5857724", "0.5857724", "0.58235645", "0.5817435", "0.58156234", "0.58121234", "0.58044654", "0.57983655", "0.5786375", "0.57844603", "0.5774386", "0.57731307", "0.5772648", "0.5772648", "0.575778", "0.57467705", "0.5735985", "0.5733345", "0.5728749", "0.5727127", "0.5719577", "0.57187885", "0.57033443", "0.56942904", "0.56740725", "0.56722474", "0.56635606", "0.56511635", "0.56412876", "0.56349164", "0.56187445", "0.56115925", "0.56115925", "0.56109375", "0.5602323", "0.5598328", "0.5595095", "0.5594977", "0.5573503", "0.55728805", "0.55678207", "0.5566422", "0.556274", "0.556274", "0.556274", "0.55617946", "0.55603206", "0.5552285", "0.55459833", "0.55459535", "0.55434084", "0.5540597", "0.55376804", "0.5537387", "0.5536606", "0.5532536", "0.5532408", "0.5531156", "0.55261403", "0.55197585", "0.55194676", "0.55194676", "0.55162525", "0.5511672", "0.5510333", "0.5509235", "0.55090606", "0.55082256", "0.5505364", "0.5489172", "0.54891586", "0.5487488", "0.5474211", "0.54645514", "0.54641545", "0.5463079", "0.54564476", "0.54546726" ]
0.0
-1
Go through the flow in reverse direction, i.e. $f_{\phi}^{1}$.
def inverse(self, u: Tensor, covariates: Tensor) -> Tensor: return self.real_nvp.inverse(u, covariates)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def back(self, step):\r\n self.forward(-step)", "def backward(self):\n #print('backward\\r')\n self.linearVector = Vector3(x=-1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def right_backward(self):\n self.right_motor.run_forever(speed_sp=-self.MAX_SPEED)", "def backward(self, y):\n pass", "def backward(self):\n raise NotImplementedError", "def backward_step():\n #print 'a step backward'\n maze.turn_left()\n maze.turn_left()\n if maze.found():\n return maze.found()\n maze.go()\n maze.turn_left()\n maze.turn_left()", "def drive_backward(self):\n\n print(f\"{self.make.title()} driving backward.\")", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def reverse(self):\n self.left_motor.reverse()\n self.right_motor.reverse()", "def move_backward():\n pass", "def reverse(self): # real signature unknown; restored from __doc__\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\r\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def forward_backward(self, x):\n raise NotImplementedError()", "def backward_pass(self, grad):\n pass", "def backward(self, speed):\n self.controller.reverse(speed)", "def reversed(self):\n ops = {Eq: Eq, Gt: Lt, Ge: Le, Lt: Gt, Le: Ge, Ne: Ne}\n a, b = self.args\n return Relational.__new__(ops.get(self.func, self.func), b, a)", "def back(self, steps=1):\n raise NotImplementedError", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def backward(self, amount):\n newX = self._x - round(amount * math.sin(math.radians(self._rotation)), 2)\n newY = self._y + round(amount * math.cos(math.radians(self._rotation)), 2)\n self.goto(newX, newY)", "def backward(ctx, dy):\n y = ctx.y\n if ctx.eagerly_discard_variables:\n del ctx.y\n for i in range(len(ctx.reversible_blocks) - 1, -1, -1):\n y, dy = ctx.reversible_blocks[i].backward_pass(y, dy, not ctx.eagerly_discard_variables)\n if ctx.eagerly_discard_variables:\n del ctx.reversible_blocks\n return dy, None, None", "def backward(self, z):\n return self.forward(z) * (1 - self.forward(z))", "def __reversed__(self): \n yield from self._traverse_backward(self.root)", "def reversed(self):\n return LINE(*self.elems,**{'reverse':(not self.reverse)})", "def left_backward(self):\n self.left_motor.run_forever(speed_sp=-self.MAX_SPEED)", "def backward(self,distance):\n assert (type(distance) in [int, float]), \"parameter distance:%s is not a valid number\" % `distance`\n self._turtle.backward(distance)", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def right_backward(self, state, speed):\n if state:\n self.right_motor.run_forever(speed_sp=-speed)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.RED)\n else:\n self.right_motor.stop()\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.BLACK)", "def __reversed__(self):\n return reverse(self)", "def backward(self, *output_grads):\n raise NotImplementedError", "def backward_character():\r\n set_point(point().offset(-1))", "def backward_tensor(self, x):\n pass", "def backwards(self):\n pass", "def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return", "def go_backward(self):\n command = _build_robovac_command(RobovacModes.GO_BACKWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)", "def reverse(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, False)\n GPIO.output(11, True)\n GPIO.output(13, False)\n GPIO.output(15, True)\n # time.sleep(sec)\n motor_direction = 'Reverse'\n return motor_direction", "def _backward(self):\n if self.units[0].value > 0:\n self.units[0].gradient += 1 * self.utop.gradient\n else:\n self.units[0].gradient += 0 * self.utop.gradient", "def backward(self, left_speed, right_speed):\n self.left_motor.run_forever(speed_sp=-left_speed)\n self.right_motor.run_forever(speed_sp=-right_speed)", "def step_forward(self):", "def flip(self, p):\n return -p", "def reverse_turn(world_state, ros_util):\n\n while world_state.warning_flag == 3:\n ros_util.publish_actions(\"reverse\", 0, 0, 0, 0)\n ros_util.rate.sleep()\n\n new_heading = (world_state.heading + 60) % 360\n\n while (new_heading - 1) < world_state.heading < (new_heading + 1):\n ros_util.publish_actions(\"left\", 0, 0, 0, 0)", "def flip(self, bev_direction: str = 'horizontal') -> None:\n pass", "def backward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(-1 * param * .3048)\n\t\telse:\n\t\t\tself.linear_move(-1 * riu.default_dist * .3048)", "def opposite(direction):\n return (direction+2)%4", "def backward(self):\n self.units = self._units_history.pop()\n self._backward()\n # We must set the utop to previous state immediately, because the utop could be other gate's input unit\n # And other gate's backward could be called before this gate's backward\n self._utop_history.pop()\n if self._utop_history:\n self.utop = self._utop_history[-1]", "def turn_right(self):\n temp = self.direction[0]\n self.direction[0] = -self.direction[1]\n self.direction[1] = temp", "def backward(self, grad, index):\n pass", "def reverse_path(self, crossings=[]):\r\n v = self\r\n while True:\r\n e = v.in_arrow\r\n v.reverse()\r\n if not e:\r\n break\r\n e.reverse(crossings)\r\n v = e.end\r\n if v == self:\r\n return\r\n self.reverse()\r\n v = self\r\n while True:\r\n e = v.out_arrow\r\n v.reverse()\r\n if not e:\r\n break\r\n e.reverse(crossings)\r\n v = e.start\r\n if v == self:\r\n return", "def right_forward(self):\n self.right_motor.run_forever(speed_sp=self.MAX_SPEED)", "def move_reverse(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.clockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.anticlockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.clockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.anticlockwise_rotate(speed)", "def get_direction_backwards(self, direction):\r\n return direction_backwards[direction]", "def reverse(seq):\n return seq[::-1]", "def reverse(seq):\n return seq[::-1]", "def reverse(self):\n self._sequence.reverse()", "def back(cargo):\n # Go backwards\n line_follower.turn()\n\n # return\n new_state = \"follow\"\n txt = \"follow line..\"\n\n return (new_state, txt)", "def back(self):\n self.position -= 1", "def test_reverse(self):\n t = Linearize()\n assert t.reverse(1) == numpy.e", "def turn_right(self):\n pass", "def print_backward(self):\r\n head = self\r\n tail = self.__next # go to my next node\r\n if tail is not None: # as long as the end of the list has not been reached\r\n tail.print_backward() # recursively print remainder of the list backwards\r\n print(head, end=\" \") # print my head\r", "def reverse_args(self, /, *args, **kwargs):\n return self._func(*args[::-1], **kwargs)", "def reverse(self):\n return self[::-1]", "def __reversed__(self): # real signature unknown; restored from __doc__\n pass", "def reverse_pose(pose):\n\n\tassert(issubclass(pose.__class__, Pose))\n\n\treversed_pose_twist = get_zero_twist()\n\n\t# Code adapted from turtlesim > mimic.cpp\n\treversed_pose_twist.angular.z = -pose.angular_velocity\n\treversed_pose_twist.linear.x = -pose.linear_velocity\n\n\treturn reversed_pose_twist", "def print_backward(self):\n head = self\n tail = self.__next # go to my next node\n if tail is not None: # as long as the end of the list has not been reached\n tail.print_backward() # recursively print remainder of the list backwards\n print(head, end=\" \") # print my head", "def backward_transform(self):\n try:\n backward = self.forward_transform.inverse\n except NotImplementedError as err:\n raise NotImplementedError(\"Could not construct backward transform. \\n{0}\".format(err))\n return backward", "def print_backward(self):\r\n print(\"[\", end=\" \")\r\n if self.first() is not None:\r\n self.first().print_backward()\r\n print(\"]\")", "def flip(self):", "def backward_theta(self):\n SW = self.simplesphere.sphere_wrapper\n self.set_layout(self._layout1)\n for dm, m in enumerate(self.simplesphere.local_m):\n m_data = SW.backward(m, self.rank, self.coeffs[dm])\n if self.rank == 0:\n m_data = [m_data]\n for i, f in enumerate(self.component_fields):\n f.data[dm] = m_data[i]", "def backward(self, x_out, x_target):\r\n return 2*(x_out - x_target)", "def move_backward(self, distance):\r\n return self.move('back', distance)", "def backward(self,input,grads):\n\t\traise RuntimeError(\"All subclasses of Module must implement a forward method\")", "def compute_rev(p1, p2):\n p1 = list(reversed(p1))\n p2 = list(reversed(p2))\n return(compute_fwd(p1, p2))", "def reverse(self,v):\n return np.tensordot(self._inverseTransform,\n v-self._translation,axes=([1],[0]))", "def move_down(self):\n self.pitch_motor.step_forward()", "def reverse_this(seq):\n r_seq = seq[::-1]\n return r_seq", "def convert_flip(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n\n for i, ax in enumerate(axis):\n if i == 0:\n out = _op.reverse(x, ax)\n else:\n out = _op.reverse(out, ax)\n\n g.add_node(op.output(\"Out\")[0], out)", "def reverse(self, *args, **kwargs):\n return reverse(*args, **kwargs)", "def backward(self) -> np.ndarray:\n # TODO\n return None", "def backward(ctx: Any, grad_output: Any) -> Any:\n return grad_output, None", "def move_forward():\n pass", "def rightFootGroundPositionAtTorsoStep(self, n: int) -> Transformation:\n\n torsostep = self.getTorsoStepPose(n)\n\n bodypos = torsostep.position\n transformToLeftFoot = Transformation([0, -self.foot_separation, -bodypos[2] + self.foot_center_to_floor])\n return torsostep @ transformToLeftFoot", "def backward(self, gradient):\n #TODO\n pass", "def backward(self, gradient):\n #TODO\n pass", "def backward(self):\n gradient = blah\n return gradient" ]
[ "0.6866659", "0.6588461", "0.65301454", "0.6521583", "0.65055853", "0.6387518", "0.6372426", "0.62705827", "0.62705827", "0.62705827", "0.62369365", "0.6212663", "0.6203503", "0.6184255", "0.6184255", "0.6184255", "0.61603993", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.61086434", "0.60820335", "0.60678357", "0.6067287", "0.60670173", "0.6041928", "0.60159856", "0.6013", "0.6006086", "0.59897363", "0.5978176", "0.5977512", "0.595232", "0.594854", "0.5934745", "0.5925943", "0.5920515", "0.5917706", "0.59100795", "0.59094703", "0.590361", "0.5899979", "0.5881525", "0.5881442", "0.5862376", "0.5860563", "0.58602893", "0.58480114", "0.58441955", "0.5841762", "0.58325505", "0.5813719", "0.58120817", "0.581192", "0.5766069", "0.57529825", "0.5730944", "0.57118505", "0.5707406", "0.5705717", "0.5705717", "0.5692436", "0.56920916", "0.56663096", "0.5652801", "0.56511104", "0.56497276", "0.5640919", "0.56387633", "0.563269", "0.5631775", "0.56303006", "0.5628558", "0.5600319", "0.5592568", "0.5592459", "0.55875015", "0.55873764", "0.55867493", "0.55806637", "0.55700445", "0.5569128", "0.5564166", "0.55542445", "0.5539003", "0.5534787", "0.55347073", "0.55339724", "0.5532832", "0.55250007", "0.55250007", "0.5520361" ]
0.0
-1
Population prior, i.e. $Categorical(\pi)$.
def prior_z(self) -> distributions.Distribution: return distributions.Categorical(self.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_prior_name(self):\n dim = Dimension(\"yolo\", \"reciprocal\", 1e-10, 1)\n assert dim.prior_name == \"reciprocal\"\n\n dim = Dimension(\"yolo\", \"norm\", 0.9)\n assert dim.prior_name == \"norm\"\n\n dim = Real(\"yolo\", \"uniform\", 1, 2)\n assert dim.prior_name == \"uniform\"\n\n dim = Integer(\"yolo1\", \"uniform\", -3, 6)\n assert dim.prior_name == \"int_uniform\"\n\n dim = Integer(\"yolo1\", \"norm\", -3, 6)\n assert dim.prior_name == \"int_norm\"\n\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\"yolo\", categories)\n assert dim.prior_name == \"choices\"", "def prior(self, c, labeled):\n return log(len(labeled[c])/self.N_features)", "def get_prior(self):\n assert self._prior in self._priors, 'Unsupported prior! Check the _priors attribute for a list of priors.'\n if self._prior == 'Gaussian':\n prior = 0.5 * torch.sum(self.parameters ** 2)/self.prior_var\n elif self._prior == 'Cauchy':\n dimconst = (self.parameters.shape[0] + 1)/2.\n prior = dimconst*torch.log(self.prior_var + torch.sum(self.parameters ** 2))\n elif self._prior == 'Sparse':\n n = self.dataset.shape[1]\n gauss_prior = 0.5 * torch.sum(torch.exp(self.parameters[-1] * torch.exp(self.parameters[n:2*n]) * self.parameters[:n] ** 2))\n gamma_density = torch.distributions.Gamma(1.5,0.5)\n# gamma_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n# lambda_density = torch.distributions.Gamma(1.5,0.5)\n lambda_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n prior = gauss_prior + lambda_prior\n return prior", "def lnprior(self):\n \n return", "def prior_sample(self):\n pass", "def prior(mu):\n p = np.ones(len(mu))/(mu.max()-mu.min())\n return p", "def P_prior(self):\n return dot(self.U_prior, dot(diag(self.D_prior), self.U_prior.T))", "def prior(store):\n mu = zeros(store['beta'].shape[0])\n Prec = diag(0.005 * ones(store['beta'].shape[0]))\n return -0.5 * dot(store['beta'].transpose(), dot(Prec, store['beta']))", "def test_get_prior_string_dict(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices({'asdfa': 0.10, 2: 0.20, 3: 0.30, 'lalala': 0.40}, \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def analysis(self) -> \"PriorFactor\":\n return self", "def _compute_mix_prior(self):\n if np.all(self.mix_prior == 1):\n return 0\n return np.dot(np.log(self.mix_weight).T, (self.mix_prior - 1))", "def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions", "def buildConditionalPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.conditional_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_qxgy.dimshuffle(0,'x',1,'x') - self.log_pxgzw), axis=3), axis=[1,2])", "def __init__(self, prior: Prior):\n # TODO: Consider analytical solution rather than implementing optimisation\n super().__init__(prior.factor, x=prior, name=namer(self.__class__.__name__))\n self.prior = prior\n self.label = f\"PriorFactor({prior.label})\"", "def bias_prior(self):", "def prior(cube, ndim, nparams):\n # construct prior from recovery file\n counter = 0\n if params2 is None:\n return\n for key in params2.keys():\n nparams_tmp = int(params2[key]['nparams'])\n for ii in range(nparams_tmp):\n # sp = [name, prior type, x1, x2]\n sp =\\\n params2[key]['param'+str(ii+1)].split(',')\n if sp[1][0] == 'U' and sp[2][:5]=='param' and sp[3][:5]=='param':\n subtract1 = int(key[-1]) - int(sp[2][-1])\n subtract2 = int(key[-1]) - int(sp[3][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n cube[counter-subtract1], cube[counter-subtract2])\n elif sp[1][0] == 'U' and sp[2][:5]=='param':\n subtract = int(key[-1]) - int(sp[2][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n cube[counter-subtract], float(sp[3]))\n elif sp[1][0] == 'U' and sp[3][:5]=='param':\n subtract = int(key[-1]) - int(sp[2][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n float(sp[2]), cube[counter - subtract])\n else:\n cube[counter] = GeneralPrior(cube[counter], sp[1], float(sp[2]),\n float(sp[3]))\n counter += 1", "def set_prior_priorunc_synthetic(self):\n\n lai_coeff_absunc = None\n statevec_absunc = None\n\n #-- \n if self.prior_inifile!=None:\n lai_coeff_absunc, statevec_absunc = self._setprior_from_inifile()\n elif self.use_generic_prior:\n self._setprior_generic_agriculture()\n statevec_absunc = self.generic_prior_unc\n else:\n #-- overall number of time-points in schedule\n npts = self.get_npts()\n\n #-- default prior file\n prior_file = os.path.join(ipt_dir_path, 'mni_stat_jules_2017.csv')\n\n #-- get signature simulator default state\n msg = \"START reading state variables from file ***{}***...\".format(prior_file)\n FileLogger.info(msg)\n state_inst = sv.get_state_csv(fname=prior_file, fmt='%Y-%m-%d %H:%M:%S' )\n msg = \"...reading DONE\"\n FileLogger.info(msg)\n\n #-- LAI,Canopy-Height,Soil-Moisture\n self.prstate = np.empty((3,npts), dtype=np.float64)\n\n for i,date_utc in enumerate(self.schedule_dct['date_utc']):\n idx, timedelt = sv.find_nearest_date_idx(state_inst.date_utc, date_utc)\n # print \"MVMV::nearest={} idx={} timedelt={}\".format(\n # state_inst.date_utc[idx], idx, timedelt)\n #-- LAI\n self.prstate[0,i] = state_inst.lai[idx]\n #-- canopy-height\n self.prstate[1,i] = state_inst.can_height[idx]\n #-- SM\n self.prstate[2,i] = state_inst.soil_moisture[idx]\n\n #-- set uncertainty values\n self._set_priorunc(statevec_absunc=statevec_absunc, lai_coeff_absunc=lai_coeff_absunc)", "def set_prior(self,field):\n self.observation_thresholds = [i/self.observations for i in range(0,self.observations)]\n self.observation_samples = 1\n # TODO: For use after integrating image processing with MCESP for Game-Delayed Reinforcements\n # self.norm = field.max()", "def _setprior_generic_agriculture(self):\n\n #-- number of time-points\n npts = self.get_npts()\n\n #-- LAI,Canopy-Height,Soil-Moisture\n self.prstate = np.empty((3,npts), dtype=np.float64)\n #-- LAI\n self.prstate[0,:] = self.generic_prior[0]\n #-- canopy-height\n self.prstate[1,:] = self.generic_prior[1]\n #-- soil moisture (volumetric)\n self.prstate[2,:] = self.generic_prior[2]", "def _call(self, x):\n if functional.prior is None:\n return np.exp(x)\n else:\n return functional.prior * np.exp(x)", "def prior_sample_parameter(self, parameter):\n pass", "def test_get_prior_string_list(self):\n categories = list(range(10))\n categories[0] = \"asdfa\"\n categories[2] = \"lalala\"\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices(['asdfa', 1, 'lalala', 3, 4, 5, 6, 7, 8, 9], \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def prior_model(self) -> Collection:\n return Collection(self.prior)", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def log_prior(self):\n raise NotImplementedError(\"the log_prior property should \"\n \"be defined in the Estimator sub-class\")", "def prior_vars(self):\n priors = []\n for i in self.active_ssms(0):\n ssm = self.ssms[i]\n prior = ssm.prior_vars()\n\n if self.ssm_starts[i] < 0:\n P = np.diag(prior)\n P2 = P.copy()\n for k in range(-self.ssm_starts[i]):\n ssm.transition_covariance(P2, k+1, P)\n ssm.transition_noise_diag(k+1, prior)\n np.fill_diagonal(P, np.diag(P) + prior)\n P2 = P\n\n # since the interface only supports independent\n # priors, return a diagonal approximation of the true\n # prior\n prior = np.diag(P)\n priors.append(prior)\n return np.concatenate(priors)", "def set_prior_priorunc_general(self):\n\n #-- some configurations apply absolute uncertainties\n lai_coeff_absunc = None\n statevec_absunc = None\n is_generic_prior = False\n\n #--\n if self.prior_states_file!=None:\n states_file = self.prior_states_file\n basename = os.path.basename(states_file)\n if os.path.splitext(basename)[1]=='.nc':\n msg = \"Prior state information will be read from ***{}***\".format(states_file)\n FileLogger.info(msg)\n self._setprior_jules(states_file)\n msg = \"...reading prior DONE\"\n FileLogger.info(msg)\n elif os.path.splitext(basename)[1]=='.csv':\n msg = \"Prior state information will be read from ***{}***\".format(states_file)\n FileLogger.info(msg)\n self._setprior_csv(states_file)\n msg = \"...reading prior DONE\"\n FileLogger.info(msg)\n else:\n msg = \"Unrecognised format of states file ***{}***. Cannot continue!\".format(\n states_file)\n FileLogger.fatal(msg)\n raise RuntimeError(msg)\n return\n elif self.prior_inifile!=None:\n lai_coeff_absunc, statevec_absunc = self._setprior_from_inifile()\n else:\n self._setprior_generic_agriculture()\n is_generic_prior = True\n statevec_absunc = self.generic_prior_unc\n\n #-- set uncertainty values\n self._set_priorunc( lai_coeff_absunc=lai_coeff_absunc,\n statevec_absunc=statevec_absunc,\n is_generic_prior=is_generic_prior )", "def psci(self, x_prior, P_prior, c_bar, Pcc):\n # Full state estimates\n x = self.filter.x_hat\n P = self.filter.P\n\n D_inv = np.linalg.inv(Pcc) - np.linalg.inv(P_prior)\n D_inv_d = np.dot( np.linalg.inv(Pcc), c_bar) - np.dot( np.linalg.inv(P_prior), x_prior)\n \n my_id = self.asset2id[self.my_name]\n begin_ind = my_id*self.num_ownship_states\n end_ind = (my_id+1)*self.num_ownship_states\n\n info_vector = np.zeros( x.shape )\n info_vector[begin_ind:end_ind] = D_inv_d\n\n info_matrix = np.zeros( P.shape )\n info_matrix[begin_ind:end_ind, begin_ind:end_ind] = D_inv\n\n posterior_cov = np.linalg.inv( np.linalg.inv( P ) + info_matrix )\n tmp = np.dot(np.linalg.inv( P ), x) + info_vector\n posterior_state = np.dot( posterior_cov, tmp )\n\n self.filter.x_hat = posterior_state\n self.filter.P = posterior_cov", "def ppf(self,x):\n return self.categoricalDist.ppf(x)", "def get_prior(self):\n return self.prior", "def prior_distribution(self):\n out = self.model.forward(self.inducing_points)\n return MultivariateNormal(out.mean, out.lazy_covariance_matrix.evaluate_kernel())", "def prior_predictive(self):\n cfg = self.config\n n = cfg['batch_size'] * cfg['q/n_samples']\n n_samples = cfg['q/n_samples']\n with util.get_or_create_scope('model', reuse=True):\n h_prior = tf.cast(self.p_h_L.sample(n), cfg['dtype'])\n h_prior = tf.reshape(\n h_prior, [cfg['q/n_samples'], cfg['batch_size'], -1])\n h = [None] * cfg['p/n_layers']\n h[cfg['p/n_layers'] - 1] = h_prior\n for n in range(cfg['p/n_layers'] - 1, 0, -1):\n p_h_n = self.build_stochastic_layer(n, h_above=h[n])\n h[n - 1] = tf.cast(p_h_n.sample(), cfg['dtype'])\n return self.likelihood(h[0])", "def log_prior(self, i):\n mu = self.prior.m_0\n var = (self.prior.k_0 + 1.) / (self.prior.k_0*self.prior.v_0) * self.prior.S_0\n log_prod_var = np.log(var).sum()\n inv_var = 1./var\n v = self.prior.v_0\n return self._log_prod_students_t(i, mu, log_prod_var, inv_var, v)", "def prior(\n self,\n next_state: np.ndarray,\n state: np.ndarray,\n control_x: Optional[np.ndarray] = None\n ) -> np.ndarray:\n pass", "def cumprob(self):\r\n return self.probabilities.cumsum(-1)", "def log_prior(self, params):", "def sample_from_prior(self):\n raise NotImplementedError", "def posterior(store):\n return logl(store) + prior(store)", "def sample_from_prior(self, *args, **kwargs):\n pass", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def conditionalize(prior, conditional, observed):\n\n # construct joint probability table (Step 1 of Master Method)\n joint = PGM2(prior, conditional)\n #print(joint.get_cell(('POX', 'NOSPOTS')))\n\n # update joint probability table after observing value of N1 (Steps 2 and 3 of Master Method)\n joint.update(observed, 1)\n\n # marginalize to get probability distribution for N0 (Step 4 of Master Method)\n posterior = joint.marginalize(0)\n\n return posterior", "def log_prior(x):\n logp = (-0.5 * x.pow(2) - torch.tensor(2 * math.pi).sqrt().log()).sum(dim=1)\n return logp", "def test_3_prior(self):\n print(\"test 3: prior probabilities\")\n\n for i, x in enumerate(self.X):\n print(i+1, prior_probability(\n x, self.means, self.dispersions, self.cluster_probabilities\n ), sep=' : ')", "def priorProbabilities():\r\n\ttotal = 0.0\r\n\tpos = 0.0\r\n\tneg = 0.0\r\n\r\n\t# Count the amount of positives and negatives in the training data\r\n\tfor item in trainingData:\r\n\t\ttotal += 1\r\n\t\tif item[1] == '0':\r\n\t\t\tpos +=1\r\n\t\tif item[1] == '1':\r\n\t\t\tneg +=1\r\n\t\t\t\r\n\t\t\t\r\n\t# Return the positive and negative probabilities \r\n\tposProb = float(pos / total * 100)\r\n\tnegProb = float(neg / total * 100)\r\n\r\n\t\r\n\t\r\n\treturn posProb, negProb", "def calculate_prior_probability(y):\n unique, counts = np.unique(y, return_counts=True)\n u_c = dict(zip(unique, counts))\n instances = len(y)\n for u in u_c:\n u_c[u] = float(u_c[u] / instances)\n return u_c", "def _call(self, x):\n if functional.prior is None:\n return 1.0 / (1 - x)\n else:\n return functional.prior / (1 - x)", "def add_prior(self, prior):\n if self.rate_variation:\n # Gamma prior with mean 1 over all mutation rates\n sub_prior = ET.SubElement(prior, \"prior\", {\"id\":\"featureClockRatePrior.s:%s\" % self.name, \"name\":\"distribution\"})\n compound = ET.SubElement(sub_prior, \"input\", {\"id\":\"featureClockRateCompound:%s\" % self.name, \"spec\":\"beast.core.parameter.CompoundValuable\", \"name\":\"x\"})\n plate = ET.SubElement(compound, \"plate\", {\n \"var\":\"feature\",\n \"range\":\",\".join(self.features)})\n ET.SubElement(plate, \"var\", {\n \"idref\":\"featureClockRate:%s:$(feature)\" % self.name})\n gamma = ET.SubElement(sub_prior, \"input\", {\"id\":\"featureClockRatePriorGamma:%s\" % self.name, \"spec\":\"beast.math.distributions.Gamma\", \"name\":\"distr\", \"alpha\":\"@featureClockRateGammaShape:%s\" % self.name, \"beta\":\"@featureClockRateGammaScale:%s\" % self.name})\n # Exponential hyperprior on scale of Gamma prior\n # Exponential prior favours small scales over large scales, i.e. less rate variation\n # Mean scale 0.23 chosen for general sensibility, e.g.:\n # - Prior distribution is roughly 50/50 that ratio of fastest\n # to slowest feature rate in a dataset of size 200 is below\n # or above 10.\n # - Prior probability of roughly 0.90 that this ratio is below\n # 100.\n sub_prior = ET.SubElement(prior, \"prior\", {\"id\":\"featureClockRateGammaScalePrior.s:%s\" % self.name, \"name\":\"distribution\", \"x\":\"@featureClockRateGammaScale:%s\" % self.name})\n ET.SubElement(sub_prior, \"Exponential\", {\"id\":\"featureClockRateGammaShapePriorExponential.s:%s\" % self.name, \"mean\":\"0.23\", \"name\":\"distr\"})", "def _call(self, x):\n if functional.prior is None:\n return (-1.0) / x + 1\n else:\n return (-functional.prior) / x + 1", "def test_get_prior_string_discrete(self):\n dim = Integer(\"yolo\", \"uniform\", 1, 2)\n assert dim.get_prior_string() == \"uniform(1, 3, discrete=True)\"", "def conditional_probability(data, attr, cp_table):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # number of instances beloning to each class\n nclass0 = cp_table.loc[0, class0].sum()\n nclass1 = cp_table.loc[0, class1].sum()\n total = nclass0 + nclass1\n # all probabilities include a laplace est of 1\n prior0 = (nclass0 + 1) / (total + 2)\n prior1 = (nclass1 + 1) / (total + 2)\n list0 = []\n list1 = []\n for index, row in cp_table.iterrows():\n numattr = len(attr.loc[index, 'vars'])\n numer0 = row[class0] + 1\n numer1 = row[class1] + 1\n denom0 = nclass0 + (1 * numattr)\n denom1 = nclass1 + (1 * numattr)\n cp0 = numer0 / denom0\n cp1 = numer1 / denom1\n list0.append(cp0)\n list1.append(cp1)\n # replacing columns in previous table with cond probs\n del cp_table[class0]\n del cp_table[class1]\n cp_table[class0] = list0\n cp_table[class1] = list1\n \n return cp_table, prior0, prior1", "def eval_prior(self, state, action):\n\n return np.dot(state, self.a.T) + np.dot(action, self.b.T)", "def perplexity(self):\n raise NotImplementedError(\"To be implemented\")", "def prep(self):\n \n # create a dict with prior probabilities\n self.row_priors = [0.0]*len(self.rows)\n self.feature_priors = dict()\n \n # denominator is given by reference priors\n denominator = sum(self.column_priors)\n # null_feature_prior is used when feature is not observed at all\n # this is set up to scale with features, i.e. arbitrarily adding\n # child features into an ontology should not skew sums over repr.\n null_feature_prior = 1/max(denominator, float(len(self.rows)))\n \n for rowname, rowindex in self.rows.items(): \n numerator = 0\n for colname, colindex in self.columns.items(): \n colprior = self.column_priors[colindex]\n numerator += self.data[colindex][rowindex]*colprior\n if numerator == 0:\n numerator = null_feature_prior \n self.row_priors[rowindex] = float(numerator)/denominator\n self.feature_priors[rowname] = self.row_priors[rowindex]\n\n return self", "def _call(self, x):\n if self.prior is None:\n tmp = (np.exp(x) - 1).inner(self.domain.one())\n else:\n tmp = (self.prior * (np.exp(x) - 1)).inner(self.domain.one())\n return tmp", "def categorical(pvals: np.ndarray) -> int:\n\n return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals)))", "def buildWPriorTerm(self):\r\n\r\n # self.w_prior.shape == (minibatch size,)\r\n self.w_prior = 0.5*T.sum(1 + T.log(self.qwgy_var) - self.qwgy_mu**2-self.qwgy_var, axis=1)\r\n\r\n self.w_prior_modif = - T.maximum(self.hyper['treshold_w_prior'], -self.w_prior)", "def buildZPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.z_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_pzgxw + T.log(self.hyper['num_clust'])), axis=3), axis=[1,2])\r\n\r\n self.z_prior_modif = - T.maximum(self.hyper['treshold_z_prior'], - self.z_prior)", "def posterior_sample(self):\n pass", "def cp(temp,pres):\n g_tt = liq_g(2,0,temp,pres)\n cp = -temp * g_tt\n return cp", "def evalGraphPrior(a, prior, undirected=True):\n probs = []\n priordict = prior[0]\n items = prior[1]\n nullprob = priordict['DEFAULTPRIOR']\n\n for inum, i in enumerate(a):\n for jnum, j in enumerate(i):\n if (inum > jnum) or ((undirected==False) and (inum != jnum)):\n if undirected:\n pair = np.sort((items[inum], items[jnum]))\n else:\n pair = (items[inum], items[jnum])\n try:\n priorprob = priordict[pair[0]][pair[1]]\n if j==1:\n prob = priorprob\n elif j==0:\n prob = (1-priorprob)\n except:\n prob = nullprob # no information about edge\n probs.append(prob)\n \n probs = [np.log(prob) for prob in probs] # multiplication probably results in underflow...\n probs = sum(probs)\n return probs", "def get_prior(self):\n return self._prior", "def prior_sample(self, bn):\n x = np.zeros(3)\n\n # first joint prob\n random_choice = np.random.choice(bn[0], 1, bn[0].all(), bn[0])\n x[0] = random_choice[0]\n\n # Second Joint Prob\n if x[0] == 0.1:\n random_choice = np.random.choice(bn[1][0], 1, bn[1][0].all(), bn[1][0])\n x[1] = random_choice\n elif x[0] == 0.9:\n random_choice = np.random.choice(bn[1][1], 1, bn[1][1].all(), bn[1][1])\n x[1] = random_choice\n\n # Third Joint Prob\n if random_choice[0] == 0.8 or random_choice == 0.1:\n random_choice = np.random.choice(bn[2][0], 1, bn[2][0].all(), bn[2][0])\n x[2] = random_choice\n else:\n random_choice = np.random.choice(bn[2][1], 1, bn[2][1].all(), bn[2][1])\n x[2] = random_choice\n return x", "def p1_pits(self):\n return self.state[:self.M]", "def add_prior(self, layer_number, prior_type, low_bound, hi_bound, units='mil'):\n prior = {'layer_number':layer_number, 'prior_type':prior_type, \\\n 'low_bound':low_bound, 'hi_bound':hi_bound, 'units':units}\n self.priors.append(prior)\n return", "def add_prior(self, beastxml):\n coalescent = xml.distribution(\n beastxml.prior, id=\"Coalescent.t:beastlingTree\", spec=\"Coalescent\")\n popmod = xml.populationModel(\n coalescent, id=\"ConstantPopulation:beastlingTree\", spec=\"ConstantPopulation\")\n xml.parameter(popmod, idref=\"popSize.t:beastlingTree\", name=\"popSize\")\n xml.treeIntervals(\n coalescent, id=\"TreeIntervals\", spec=\"TreeIntervals\", tree=\"@Tree.t:beastlingTree\")", "def p_prior(self):\n sampler = self.__sampler\n nwalkers = self.nwalkers\n pRanges = self.pRanges\n if sampler == \"EnsembleSampler\":\n p = [posRange(pRanges) for i in range(nwalkers)]\n elif sampler == \"PTSampler\":\n ntemps = self.ntemps\n p = np.zeros((ntemps, nwalkers, self.ndim))\n for loop_t in range(ntemps):\n for loop_w in range(nwalkers):\n p[loop_t, loop_w, :] = posRange(pRanges)\n return p", "def gen_categ(low=0, up=0):\n share_final = raw.copy()\n if low == 0:\n time = pd.Categorical(share_final.time)\n share_final = share_final.set_index([\"mergeid\", \"time\"])\n share_final[\"time\"] = time\n\n country = pd.Categorical(share_final.country)\n share_final[\"country\"] = country\n return share_final\n else:\n a = raw.loc[(raw[\"yrbirth\"] >= low) & (raw[\"yrbirth\"] <= up)]\n time = pd.Categorical(a.time)\n a = a.set_index([\"mergeid\", \"time\"])\n a[\"time\"] = time\n\n country = pd.Categorical(a.country)\n a[\"country\"] = country\n\n subsample = a.copy()\n\n return subsample", "def __init__(self, categories, prior=None, transform=None, name=None):\n if transform == \"identity\":\n self.categories = tuple([str(c) for c in categories])\n else:\n self.categories = tuple(categories)\n\n self.name = name\n\n if transform is None:\n transform = \"onehot\"\n self.transform_ = transform\n if transform not in [\"identity\", \"onehot\"]:\n raise ValueError(\n \"Expected transform to be 'identity' or 'onehot' \"\n \"got {}\".format(transform)\n )\n if transform == \"onehot\":\n self.transformer = CategoricalEncoder()\n self.transformer.fit(self.categories)\n else:\n self.transformer = Identity(dtype=type(categories[0]))\n\n self.prior = prior\n\n if prior is None:\n self.prior_ = np.tile(1.0 / len(self.categories), len(self.categories))\n else:\n self.prior_ = prior", "def get_prior(self, x):\n\n K_xx = self.kernel.eval(x, x)\n prior_mean = self.mean.eval(x)\n return prior_mean, K_xx", "def setupMixedPrior(self):\n\n if self.namePrior.find('mixed') < 0:\n return\n\n # we set up the default parameters for bounded flat prior,\n # then update them with non-flat examples\n if np.size(self.hyper) < 7:\n self.setupDefaultPars()\n\n # Adjust the hyperparameters for defaults.\n self.hyper[0][2] = 0.45\n self.hyper[1][2] = 0.05\n self.hyper[0][3] = 16.3\n self.hyper[1][3] = 0.1\n\n nMeths = np.shape(self.hyper)[-1]\n self.mixedNames = ['binaryBoundedOne' for i in range(nMeths)]\n\n ### Let's try some gaussians. Eccentricity and period\n self.mixedNames[2] = 'gaussianOne'\n self.mixedNames[3] = 'gaussianOne'\n\n self.findMixedMethods()", "def test_get_prior_string(self):\n dim = Dimension(\"yolo\", \"alpha\", 1, 2, 3, some=\"args\", plus=\"fluff\", n=4)\n assert (\n dim.get_prior_string() == \"alpha(1, 2, 3, some='args', plus='fluff', n=4)\"\n )", "def classical_preprocessing(*args, **kwargs):\r\n qnode.construct(args, kwargs)\r\n return qml.math.stack(qnode.qtape.get_parameters())", "def init_to_prior(site, skip_param=False):\n return init_to_median(site, num_samples=1, skip_param=skip_param)", "def _initial_population(draws, model, variables):\n\n population = []\n var_info = {}\n start = model.test_point\n init_rnd = pm.sample_prior_predictive(draws, model=model)\n for v in variables:\n var_info[v.name] = (start[v.name].shape, start[v.name].size)\n\n for i in range(draws):\n point = pm.Point({v.name: init_rnd[v.name][i] for v in variables}, model=model)\n population.append(model.dict_to_array(point))\n\n return np.array(floatX(population)), var_info", "def proximal(self):\n return proximal_cconj_kl_cross_entropy(space=self.domain, g=self.prior)", "def p(self):\n return hlp.parms(self.y(0))", "def PLP(self, *_):\n self.reg.P = self.pop()", "def propose(self):\n\n p = type(self)(self.n, alpha=self.alpha)\n\n return p, p.compute_prior() - self.compute_prior()", "def calculate_uniform_prior(grids_dict):\n shape = list(grids_dict.values())[0].shape\n prior = np.ones(shape, dtype=\"float\")\n # The prior will be normalised later\n return prior", "def prob_category(new_music, fit):\n\tr = robjects.r\n\t#Be careful not to include the word 'data' in the function call below, although data is a keyword\n\tpredictions = r.predict(fit,new_music,type=\"prob\")\n\treturn predictions", "def test_no_prior(self):\n dim = Dimension(\"yolo\", None)\n print(dim._prior_name)\n assert dim.prior is None\n assert dim._prior_name == \"None\"", "def any_preprocessing(name):\n return hp.choice('%s' % name, [\n [pca(name + '.pca')],\n [standard_scaler(name + '.standard_scaler')],\n [min_max_scaler(name + '.min_max_scaler')],\n [normalizer(name + '.normalizer')],\n # -- not putting in one-hot because it can make vectors huge\n #[one_hot_encoder(name + '.one_hot_encoder')],\n []\n ])", "def lnprior(self, params):\n self.debug.start_function('lnprior')\n lower_bounds = self.mcmc_version.prior_bounds[:, 0]\n upper_bounds = self.mcmc_version.prior_bounds[:, 1]\n inside_bounds = np.logical_and(params > lower_bounds,\n params < upper_bounds)\n\n if False in inside_bounds:\n self.debug.end_function()\n return self.zero_lhood\n\n if self.has_logz:\n z_input = params[self.param_idxs['logz']]\n else:\n z = params[self.param_idxs['z']]\n z_input = np.log10(z / z_sun)\n\n prior_lhood = np.log(self.z_prior(z_input))\n\n # ===== anisotropy/inclination priors =====\n if self.has_two_f:\n xi_ratio = params[self.param_idxs['f_p']] / params[self.param_idxs['f_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n elif self.has_xi_ratio:\n xi_ratio = params[self.param_idxs['xi_ratio']]\n d_b = params[self.param_idxs['d_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n prior_lhood += np.log(self.d_b_prior(d_b))\n\n self.debug.variable('prior_lhood', prior_lhood, formatter='f')\n self.debug.end_function()\n return prior_lhood", "def __init__(self, p1_proba=0.5):\n self.p1_proba = p1_proba", "def ppf(self,x):\n if x > 1.0 or x < 0:\n self.raiseAnError(IOError,'Categorical distribution cannot calculate ppf for', str(x), '! Valid value should within [0,1]!')\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == 1.0:\n return float(sortedMapping[-1][0]) if self.isFloat else sortedMapping[-1][0]\n else:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if cumulative >= x:\n return float(element[0]) if self.isFloat else element[0]", "def __init__(self, zone, value):\n from datamodel import Pressure\n CommonInitialCondition.__init__(self,zone, value,[Pressure])", "def getPopulation(self):\n\n return self.p", "def Poincare_pol( self, var = 'x', uterm = 0):\n n = self.index().rank()\n s = self.special_weight()\n v = PolynomialRing( IntegerRing(), var).gens()[0]\n top = Rational(n/2+12-s).ceil()\n a = dict([ (s+l,self.dimension(s+l)) for l in range(-10,top,2) if l != 0])\n a[s] = uterm\n Poincare_pol = s,\\\n sum( (a[s+l]-a[s+l-4]-a[s+l-6]+a[s+l-10])* v**l for l in range(0,top,2))\n\n # L = self.index()\n # h = self.character()\n # s = self.special_weight()\n # N = 50 #TODO: does 13 suffice?\n # P = uterm + sum( self.dimension( s + 2*k) * x**(2*k) for k in range( 1, N)) + O(x**N)\n # P *= (1-x**4)*(1-x**6)\n # Poincare_pol = s, P.truncate()\n\n return Poincare_pol", "def set_initial_condition(self):\n X0 = np.array([0.5, 0.5])\n XB = self.bary\n q0 = 1 + np.exp(-0.5*(np.sum((XB-X0[np.newaxis])**2., axis=1))/0.1**2)\n q1 = np.zeros(q0.shape)\n #import pdb; pdb.set_trace()\n return np.array([q0, q1, q1]).T", "def lower_cb(self,p):\n avg_elo=self.elo0+self.elo1\n delta=self.elo1-self.elo0\n N=30\n# Various error conditions must be handled better here!\n while True:\n Elo0=avg_elo-N*delta\n Elo1=avg_elo+N*delta\n sol=brentq(lambda elo:self.outcome_prob(elo)-(1-p),Elo0,Elo1)\n if sol['msg']=='no bracket':\n N*=2\n continue\n break\n return sol['x0']", "def _set_priorunc(self, lai_coeff_absunc=None, statevec_absunc=None, is_generic_prior=False):\n if not self.__dict__.has_key('prstate') or self.prstate is None:\n msg = \"internal error, prior state does not yet exist!\"\n FileLogger.fatal(msg)\n raise RuntimeError(msg)\n if lai_coeff_absunc!=None:\n self.lai_coeff_unc = np.maximum( lai_coeff_absunc, self.lai_coeff_uncfloor )\n msg = \"applied absolute uncertainty on lai coefficient, \"\n msg += \"lai_coeff_absunc={}\".format(lai_coeff_absunc)\n FileLogger.info(msg)\n else:\n self.lai_coeff_unc = np.maximum( self.lai_coeff*self.lai_coeff_relunc,\n self.lai_coeff_uncfloor )\n msg = \"applied relative uncertainty {} on lai coefficient.\".format(\n self.lai_coeff_relunc)\n FileLogger.info(msg)\n\n #-- allocate space for uncertainty array\n self.prstate_unc = np.empty(self.prstate.shape, dtype=np.float64)\n\n #-- generic prior, user-supplied relative uncertainties\n if is_generic_prior and self.relunc_by_user:\n self.prstate_unc[0,:] = np.maximum( self.prstate[0,:]*self.lai_relunc,\n self.lai_uncfloor )\n self.prstate_unc[1,:] = np.maximum( self.prstate[1,:]*self.canht_relunc,\n self.canht_uncfloor )\n self.prstate_unc[2,:] = np.maximum( self.prstate[2,:]*self.sm_relunc,\n self.sm_uncfloor )\n msg = \"applied relative uncertainty on state vector components, \"\n msg += \"lai_relunc={} canht_relunc={} sm_relunc={}\".format(\n self.lai_relunc, self.canht_relunc, self.sm_relunc)\n FileLogger.info(msg)\n #-- either: prior information from ini-file or generic prior\n elif statevec_absunc!=None:\n lai_absunc,canht_absunc,sm_absunc = statevec_absunc\n self.prstate_unc[0,:] = np.maximum( lai_absunc, self.lai_uncfloor )\n self.prstate_unc[1,:] = np.maximum( canht_absunc, self.canht_uncfloor )\n self.prstate_unc[2,:] = np.maximum( sm_absunc, self.sm_uncfloor )\n msg = \"applied absolute uncertainty on state vector components, \"\n msg += \"lai_absunc={} canht_absunc={} sm_absunc={}\".format(\n lai_absunc, canht_absunc, sm_absunc)\n FileLogger.info(msg)\n #-- relative uncertainty (default or user-supplied)\n else:\n self.prstate_unc[0,:] = np.maximum( self.prstate[0,:]*self.lai_relunc,\n self.lai_uncfloor )\n self.prstate_unc[1,:] = np.maximum( self.prstate[1,:]*self.canht_relunc,\n self.canht_uncfloor )\n self.prstate_unc[2,:] = np.maximum( self.prstate[2,:]*self.sm_relunc,\n self.sm_uncfloor )\n msg = \"applied relative uncertainty on state vector components, \"\n msg += \"lai_relunc={} canht_relunc={} sm_relunc={}\".format(\n self.lai_relunc, self.canht_relunc, self.sm_relunc)\n FileLogger.info(msg)\n #-- logging\n msg = \"uncertainty floor values were applied as follows: \"\n msg += \"lai_uncfloor={} canht_uncfloor={} sm_uncfloor={}\".format(\n self.lai_uncfloor, self.canht_uncfloor, self.sm_uncfloor)\n FileLogger.info(msg)", "def _sample(preds, temperature=1.0):\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)", "def decision_function(self, X):\n ...", "def prior(old_params,params):\n \n for s in range(len(params)):\n if params[s] < 0.0 or params[s] > 2:\n return 0\n return 1", "def __init__(self, prior: float, beta: float = 0., gamma: float = 1.0):\n super(NonNegativePULoss, self).__init__()\n\n self.beta = beta\n self.prior = prior\n self.gamma = gamma\n self.loss_fn = torch.nn.CrossEntropyLoss(reduction='none')", "def proximal(self):\n return proximal_cconj_kl(space=self.domain, g=self.prior)", "def initial_state(self, parameters = None):\n if parameters is None:\n parameters = self._get_static_parameters_or_die()\n return Value(\n state=ed.Categorical(logits=parameters.get('initial_dist_logits')))" ]
[ "0.6308094", "0.6022625", "0.5991592", "0.5984886", "0.5965109", "0.5834765", "0.58280075", "0.57996655", "0.57636964", "0.57478815", "0.57455534", "0.56727767", "0.56464076", "0.559691", "0.5589314", "0.55594623", "0.5548906", "0.55311286", "0.55226177", "0.5502985", "0.55029565", "0.54959536", "0.5439871", "0.5436019", "0.5436019", "0.5436019", "0.5436019", "0.539356", "0.53821176", "0.5377098", "0.53693557", "0.536474", "0.53237545", "0.53091085", "0.5301612", "0.52554184", "0.5233248", "0.5228837", "0.52247405", "0.52246016", "0.52221376", "0.5212335", "0.5211667", "0.52094454", "0.5205328", "0.52033174", "0.51617795", "0.51360536", "0.513474", "0.5121069", "0.5118465", "0.5111627", "0.5091997", "0.509117", "0.5088737", "0.50887144", "0.5055117", "0.505289", "0.50496936", "0.50348014", "0.5033493", "0.5029479", "0.5023981", "0.50212705", "0.5019829", "0.5013308", "0.5011374", "0.500651", "0.50002277", "0.4993474", "0.49847063", "0.49705672", "0.4961182", "0.49607715", "0.4957997", "0.49564055", "0.49546066", "0.4947397", "0.4944695", "0.49319875", "0.49286905", "0.49273798", "0.49207783", "0.49154046", "0.4912758", "0.49108392", "0.4909929", "0.4894188", "0.4893653", "0.48884737", "0.48855412", "0.48831168", "0.48809043", "0.48771307", "0.48758757", "0.48696363", "0.48652762", "0.4851623", "0.4850767", "0.48463" ]
0.6626557
0
Log population weights $log \; \pi$.
def log_pi(self) -> Tensor: return torch.log_softmax(self.pi_logit_ratio * self.pi_logit, dim=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weight_log(val):\n return val * math.log(val)", "def log_prob(self):", "def my_loglike(theta, x, data, sigma):\n\n model = my_model(theta, x)\n\n return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)", "def loglike(store):\n nobs = store['yvec'].shape[0]\n calcweighted(store)\n store['regsampler'].update_yvec(store['yvectil'])\n store['regsampler'].update_xmat(store['xmattil'])\n return store['regsampler'].loglike(store['sigma'], store['beta'])", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions)))", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)", "def _perplexity(self, X, log_w):\n return np.exp(-log_w/X.sum())", "def logp(self, x):\n pass", "def log_probability(self, samples):\n pass", "def logrels(rets):\n return np.log(rets + 1)", "def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)", "def logprob(predictions, labels):\n # prevent negative probability\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def log_poisson(k, l):\n return k*np.log(l) -l - gammaln(k+1)", "def log_prob(self, samples):\n return -0.5 * sum_except_batch(\n np.log(2 * np.pi) + self.logstd + \\\n tf.exp(-2 * self.logstd) * tf.square(samples - self.mean))", "def logP(self):\n raise NotImplementedError", "def poisson_log_likelihood(x, log_rate):\n return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)", "def logpowerlaw(x, p=default()):\n xtr, ytr, gradtr = logcontinuity(p)\n power = p[3]\n x0 = xtr - power/gradtr\n b = ytr - power*np.log(xtr-x0)\n return b + power*np.log(x-x0)", "def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True", "def gmmloglik(log_emlik, weights):", "def gmmloglik(log_emlik, weights):", "def loglike(self, params, *args, **kwargs):\n return np.sum(self.loglikeobs(params, *args, **kwargs))", "def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx", "def log1p(x):\n return 0.0", "def logtomo(self, psi):\n return -1j / self.wavenumber() * self.mlog(psi) / self.voxelsize", "def gmmloglik(log_emlik, weights):\n N,_ = log_emlik.shape;\n ll = 0;\n for i in range(N):\n ll += logsumexp(log_emlik[i, :] + np.log(weights));\n return ll", "def log_probability(theta):\n global priors\n global logp\n lp = np.sum([priors[p].logpdf(x) for p, x in zip(logp.parnames, theta)])\n if not np.isfinite(lp) or np.isnan(lp):\n return -np.inf\n ll = logp(theta)\n if not np.isfinite(ll):\n return -np.inf\n return lp + ll", "def log_probability(theta):\n global priors\n global logp\n lp = np.sum([priors[p].logpdf(x) for p, x in zip(logp.parnames, theta)])\n if not np.isfinite(lp) or np.isnan(lp):\n return -np.inf\n ll = logp(theta)\n if not np.isfinite(ll):\n return -np.inf\n return lp + ll", "def weighted_log_flops(self):\n return self.mu*math.log(self.flops)", "def log_prob(self):\n res = -self.L_h/2*np.log(2*np.pi*self.la)\n res = res + self.L_h*(self.L_h-1)/2*self.a\n\n\n res = res - 1/(2*self.la)*np.square(np.linalg.norm(self.e*self.pie))\n\n res = res - 1/(2*self.la)*np.sum(self.e2*self.pie_var)\n\n res = res - self.L_h/2*np.log(2*np.pi*self.sigma2)\n res = res - 1/(2*self.sigma2)*(np.square(np.linalg.norm(self.w))+np.trace(self.R))\n\n print(\"Log-probability difference = {}\".format(res - self.LP), file=self.logfile)\n self.LP = res\n return res", "def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood", "def theil(x, weights):\n assert numpy.all(x >= 0), \"negative numbers can't be used in Theil\"\n x_mean = numpy.average(x, weights=weights) + 1e-100\n normed = x / x_mean\n normed[normed < 1e-10] = 1e-10\n return numpy.average(normed * numpy.log(normed), weights=weights)", "def log_prior(x):\n logp = (-0.5 * x.pow(2) - torch.tensor(2 * math.pi).sqrt().log()).sum(dim=1)\n return logp", "def log_prob_from_logits(x):\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))", "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def _logprob(self, sample):\n return 0, 0", "def log_joint(self):\n return sum([\n self.log_marg_like(self.gamma, self.gamma0, self.lamb, self.nu),\n self._gamma0_distribution.logpdf(self.gamma0),\n self._nu_distribution.logpdf(self.nu),\n self._lambda_distribution.logpdf(self.lamb),\n self.probit_distribution(self.xi).logpdf(self.gamma),\n self._xi_distribution.logpdf(self.xi) if self.sample_xi else 0.0\n ])", "def _logprob_X(self, X, **kwargs):\n pass", "def log_prob(theta):\n params = parse_params(theta)\n prior = log_prior(**params)\n if not np.isfinite(prior):\n return -np.inf, -np.inf\n like = log_like(**params)\n if not np.isfinite(like):\n return -np.inf, prior\n return prior + like, prior", "def logpow(x, m):\n # return m * log(x)\n return pt.switch(pt.eq(x, 0), pt.switch(pt.eq(m, 0), 0.0, -np.inf), m * pt.log(x))", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def log_ess(log_weight):\n dim = 1 if log_weight.ndimension() == 2 else 0\n\n return 2 * torch.logsumexp(log_weight, dim=dim) - \\\n torch.logsumexp(2 * log_weight, dim=dim)", "def _estimate_weighted_log_prob(self, X, precision_cholesky):\n return self._estimate_log_prob(X, precision_cholesky) + self._estimate_log_weights(X.location)", "def my_log(num):\n\n if num == 0.0:\n return -9999999999\n return math.log(num)", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def weighted_log_density(self):\n return self.rho*math.log(self.density)", "def _logprob(\n op: Op,\n values: Sequence[TensorVariable],\n *inputs: TensorVariable,\n **kwargs,\n):\n raise NotImplementedError(f\"Logprob method not implemented for {op}\")", "def log_weights(self):\n m = self.kernel.feature_log_prob_[self._match_class_pos()]\n u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()]\n\n return self._prob_inverse_transform(m - u)", "def logp(self, F, Y):\n raise NotImplementedError(\"implement the logp function\\\n for this likelihood\")", "def log_prob(target_distribution, x0, xs, accepteds):\n return np.mean([target_distribution.log_probability(x) for x in xs])", "def logpow(x, m):\n # return m * log(x)\n return tt.switch(tt.eq(x, 0), -np.inf, m * tt.log(x))", "def log(self): # just use base?\n return Factor().__build( VarSet(self.v) , np.log(self.t) )", "def logSumExp(ns):\n mx = np.max(ns)\n ds = ns - mx\n sumOfExp = np.exp(ds).sum()\n return mx + np.log(sumOfExp)", "def log_marg(self):\n log_prob_X_given_z = 0.\n for k in range(self.K):\n log_prob_X_given_z += self.log_marg_k(k)\n return log_prob_X_given_z", "def __call__(self,logits):\n \n #sample from Gumbel(0, 1)\n uniform = self._srng.uniform(logits.shape,low=0,high=1)\n gumbel = -T.log(-T.log(uniform + self.eps) + self.eps)\n \n #draw a sample from the Gumbel-Softmax distribution\n return T.nnet.softmax((logits + gumbel) / self.temperature)", "def logp(self, args):\n mean, stddev, action = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n logp = dist.log_prob(action)\n return logp", "def nlogp(n, p):\n return 0 if n == 0 else n * _np.log(max(p, 1e-8))", "def sum_log(*args):\n # if all(a == LOG_ZERO for a in args):\n # return LOG_ZERO\n a_max = np.max(args, 0)\n lsp = np.log(np.sum([np.exp(a - a_max) for a in args], 0))\n return a_max + lsp", "def compute_loglike(self, tools: ModelingTools) -> float:\n return -1.5", "def log_marg_like(self, gamma, gamma0, lamb, nu):\n return self.ppi_distribution(gamma, gamma0, lamb).logpdf(self.Y, precision_multiplier=nu)", "def logp(X, nu, V):\n\n p = V.shape[0]\n\n IVI = det(V)\n IXI = det(X)\n\n return check_parameters(\n (\n (nu - p - 1) * pt.log(IXI)\n - trace(matrix_inverse(V).dot(X))\n - nu * p * pt.log(2)\n - nu * pt.log(IVI)\n - 2 * multigammaln(nu / 2.0, p)\n )\n / 2,\n matrix_pos_def(X),\n pt.eq(X, X.T),\n nu > (p - 1),\n )", "def Log(num):\n return math.log(float(num))", "def log_p(self,z): \n return np.array([self.log_p_blanket(i) for i in z])", "def gmmloglik(log_emlik, weights):\n gmm_loglik = np.mean(log_emlik)\n\n\n return gmm_loglik", "def log_Schechter_log(self, logl, alpha, logls, logl0):\n phi = (logl - logls) * (alpha+1) * np.log(10.) - np.power(10., logl-logls)\n lik = phi.copy()\n lik [logl < logl0] = -1e99\n return lik", "def log_likelihood(self,samples,times):\n prior_mu = np.ones(2*len(self.A)+1) \n prior_var = np.eye(2*len(self.A)+1)*0.7\n prior_p = np.log(self.prior_pdf())\n #prior_p = np.log(self.normal_prior(prior_mu,prior_var))\n xform = [self.sum_exp(t) for t in times]\n lp = scipy.stats.norm(xform,np.sqrt(self.var)).pdf(samples)\n sample_p =np.sum(np.log(lp))\n ll = prior_p + sample_p\n\n if np.isnan(ll):\n return -np.infty\n return ll", "def compute_loglike(self, tools: ModelingTools) -> float:\n return -3.0 * self.placeholder", "def logsum_pair(logx, logy):\n if logx == logzero():\n return logy\n elif logx > logy:\n return logx + np.log1p(np.exp(logy-logx))\n else:\n return logy + np.log1p(np.exp(logx-logy))", "def logistic_pen(weights, data, targets, hyperparameters):\n\n # get regularizer and original logistic return values\n regularizer = hyperparameters['weight_regularization']\n\n E, df, y = logistic(weights, data, targets, hyperparameters)\n\n # sum of all weights squared multiplied by lambda/2. Add on top of logistic\n pen_1 = regularizer * 0.5 * (reduce(lambda x,y: x + y * y, weights))\n f = E + pen_1\n\n # calculat pen for dL/dwi - dE/dwi, add the difference to df\n df = df + regularizer * weights\n\n return f, df, y", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())", "def log_gaussian_likelihood(x, mu, log_std):\n log_gaussian_prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 - log_std - 0.5 * np.log(2 * np.pi))\n return tf.reduce_sum(log_gaussian_prob, axis=1)", "def take_log_weights(self, data):\n\n n_row = data[:, 0].size\n log_data = np.zeros(data.shape)\n for i in xrange(data.shape[0]):\n idx_nonzero = (data[i, :] > 0).nonzero()[0]\n log_data[i, idx_nonzero] = np.log(data[i, idx_nonzero])\n return log_data", "def log_probability(self, X):\n\n\t\treturn self.__log_probability(X)", "def log_prob_from_logits(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x-m), axis, keep_dims=True))", "def ln(x):\n return log(x, const.e)", "def _logp(self, trace, **inputs):\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n\n phen_probs = [calc_log(trace[idx])\n for idx in np.random.randint(0, len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp", "def _log_linear_interpolation(predictions):\n log_probs = utils.average_arrays([mx.nd.log(p) for p in predictions])\n return -mx.nd.log(mx.nd.softmax(log_probs))", "def logistic_pen(weights, data, targets, hyperparameters):\n\n wr = hyperparameters['weight_regularization']\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n\n f += np.dot(weights[:-1].transpose()[0], weights[:-1].transpose()[0]) * wr / 2\n df = np.reshape(df, ((len(df), 1)))\n df += np.reshape(np.append(weights[:-1] * wr, 0), (len(weights), 1))\n\n f += (weights[-1, 0] ** 2) * wr / 2\n df[-1] += weights[-1,0] * wr \n\n return f, df, np.reshape(y, (len(y), 1))", "def log_prob_from_logits(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))", "def brownian_motion_log_returns(param):\n sqrt_delta_sigma = math.sqrt(param.time_rate) * param.vol\n return nrand.normal(loc=0, scale=sqrt_delta_sigma, size=param.time)", "def log_check(w_in: np.ndarray, w_log: np.ndarray) -> None:\n w_log[:] = np.nan\n\n if np.isnan(w_in).any():\n return\n\n if np.any(w_in <= 0):\n return\n\n w_log[:] = np.log(w_in[:])", "def log_prob(sentence, LM, smoothing=False, delta=0, vocabSize=0):\n word_list = sentence.split()\n log_prob = 0\n for i in range(len(word_list)-1):\n print(word_list[i], word_list[i+1])\n bi_count = LM['bi'][word_list[i]][word_list[i+1]]\n uni_count = LM['uni'][word_list[i]]\n if uni_count == 0 and smoothing:\n return float('-inf')\n log_prob += log(((bi_count + delta)/(uni_count + delta * vocabSize)))\n return log_prob", "def sumLogProb(a, b):\n if a > b:\n return a + log1p(exp(b - a))\n else:\n return b + log1p(exp(a - b))", "def log_likelihood(self, data, reward_model, bias_params):", "def _update_logprobs(self):\n #self._logp_src = self._log_lim(self.p_source)\n self._logp_I0 = self._log_lim(self.p_source)\n self._logp_R0 = self._log_lim(self.p_source/(self.p_rec_div))\n self._logp_S_fin = self._log_lim(self.p_S_fin)\n self._logp_inf_fin = self._log_lim(self.p_infect_fin)", "def logp(value, nu, mu, scale):\n quaddist, logdet, ok = quaddist_parse(value, mu, scale)\n k = floatX(value.shape[-1])\n\n norm = gammaln((nu + k) / 2.0) - gammaln(nu / 2.0) - 0.5 * k * pt.log(nu * np.pi)\n inner = -(nu + k) / 2.0 * pt.log1p(quaddist / nu)\n res = norm + inner - logdet\n\n return check_parameters(res, ok, nu > 0, msg=\"posdef, nu > 0\")", "def get_weights_from_log(log, plot = False):\n with open(log, 'r') as f:\n log_file = f.readlines()\n\n i = 0\n time = []\n weights_info = []\n weights_0 = []\n while i < len(log_file):\n if 'init-lambda-weights[' in log_file[i]:\n weights_0.append(float(log_file[i].split('=')[-1]))\n\n if 'MC-lambda information' in log_file[i]:\n # Finding the time\n for j in range(i,0,-1):\n if log_file[j].startswith(' Step Time'):\n j += 1\n time.append(float(log_file[j].split()[-1]))\n break\n # Finding the weight\n weights_info_tmp = []\n i += 3\n while log_file[i] != '\\n':\n split = log_file[i].split()\n count = int(split[2])\n weight = float(split[3])\n weights_info_tmp.append((count, weight))\n i += 1\n weights_info.append(weights_info_tmp)\n i += 1\n # Add weights at t = 0, because the counts are all 0 and I delate the entrances with total count 0 in next lines,\n # What i could do is put 1 in the initial temperature\n time.insert(0,0)\n weights_info.insert(0,list(zip([1] + (len(weights_0) - 1)*[0], weights_0)))\n\n #Converting to array\n time = np.array(time)\n weights_info = np.array(weights_info)\n # Some times (I don't know why) GROMACS reset all the weights and all the counts are 0. We need to eliminate those points\n sum_of_weights = weights_info[:,:,0].sum(axis = 1)\n time = time[sum_of_weights != 0]\n weights_info = weights_info[sum_of_weights != 0]\n sum_of_weights = sum_of_weights[sum_of_weights != 0]\n\n\n if plot:\n dir = os.path.dirname(log)\n fig, axes = plt.subplots(2, figsize = (16,9), sharex=True)\n NUM_COLORS = weights_info.shape[1]\n cm = plt.get_cmap('viridis')#gist_rainbow viridis\n for axe in axes:\n axe.set_prop_cycle('color', [cm(1.*j/NUM_COLORS) for j in range(NUM_COLORS)])\n\n probability = weights_info[:,:,0] / sum_of_weights[:,np.newaxis]\n for j in range(weights_info.shape[1]):\n #axes[0].plot(time, weights_info[:,j,0], label = str(j))\n axes[0].plot(time, probability[:,j], label = str(j))\n axes[1].plot(time, weights_info[:,j,1])\n\n fig.legend(loc = 'lower center', ncol = int(weights_info.shape[1] / 2))\n axes[0].set(\n xlim = (time.min(), time.max()),\n ylim = (0,1),\n ylabel = 'Probability',\n )\n axes[1].set(\n xlabel = 'Time [ps]',\n ylabel = 'Weight values'\n )\n #plt.show()\n fig.savefig(os.path.join(dir,'weights_progression.svg'), bbox_inches=\"tight\")\n\n # Plotting the violin plot of the weights\n df = pd.DataFrame()\n for j in range(weights_info.shape[1]):\n #df[temperatures[j]] = weights_info[:,j,1]\n df[j] = weights_info[:,j,1]\n # Set up the matplotlib figure\n sns.set_theme(style=\"whitegrid\")\n fig, ax = plt.subplots(figsize=(25, 25))\n\n # Draw a violinplot with a narrower bandwidth than the default\n sns.violinplot(data=df, palette=\"Set3\", bw=.2, cut=1, linewidth=1)\n # The plot is not over the actual temperatures, the temperatures ara only labels\n ax.plot(range(len(weights_info[0,:,1])), weights_info[0,:,1], '-o', label = 'Initial weights')\n ax.set(\n title = 'Weights per state over the entire simulation',\n xlabel = 'Sate',\n ylabel = 'Weight',\n )\n plt.legend()\n sns.despine(left=True, bottom=True)\n #plt.show()\n fig.savefig(os.path.join(dir,'weights_per_state.svg'), bbox_inches=\"tight\")\n sns.reset_defaults()\n\n return time, weights_info", "def log(x, base=math.e):\n return 0.0", "def log_prob_parameters(self, parameters):\n lp = 0.0\n parameters_model = self.get_parameters_model\n index = 0\n\n for parameter in parameters_model:\n dimension = parameter.dimension\n lp += parameter.log_prior(parameters[index: index + dimension])\n\n index += dimension\n\n if not np.isinf(lp):\n lp += self.log_likelihood(parameters[0], parameters[1], parameters[2:])\n\n return lp", "def logTF(self, tf):\n return math.log(tf)", "def get_log_prob(self, pi: Normal, actions: Tensor):\n return pi.log_prob(actions).sum(axis=-1)", "def logpdf(self, X, pool=None):\n logpdfs = []\n for logweight, space, kde in zip(self._logweights,\n self._spaces,\n self._kdes):\n # Calculate the probability for each parameter space individually\n if np.all(space == ~X.mask) and np.isfinite(logweight):\n logpdfs.append(logweight + kde(X[space], pool=pool))\n\n return logsumexp(logpdfs, axis=0)", "def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])", "def logistic(self,w,Xi):\n # print(w.T)\n # print(Xi)\n a = np.dot(w.T,Xi)\n return 1/(1+np.exp(-a))", "def get_loglikelis(\n self, points: numpy.ndarray | Sequence[numpy.ndarray]\n ) -> numpy.ndarray:\n return numpy.log(numpy.asarray(self.weights)[points])" ]
[ "0.75484306", "0.74073654", "0.6920241", "0.69167686", "0.6878915", "0.6841056", "0.6841056", "0.68350965", "0.6828722", "0.68049484", "0.6777457", "0.6776188", "0.6734402", "0.6714555", "0.6667137", "0.66558814", "0.6647851", "0.6641693", "0.6629896", "0.6627645", "0.6611287", "0.66056955", "0.66056955", "0.6578992", "0.6543087", "0.65403366", "0.6531704", "0.6525632", "0.6525329", "0.6525329", "0.6523754", "0.65206826", "0.6509764", "0.6508481", "0.64949787", "0.6488792", "0.64732146", "0.64455295", "0.64455295", "0.6438719", "0.6429467", "0.6422457", "0.640478", "0.64010096", "0.63988817", "0.63870704", "0.6381759", "0.6378246", "0.63744533", "0.63739645", "0.6370567", "0.63669574", "0.6360664", "0.63472086", "0.63440514", "0.63236773", "0.63226014", "0.6321292", "0.6318302", "0.6303114", "0.6300089", "0.6297582", "0.62934864", "0.62849694", "0.6274859", "0.6261691", "0.6258104", "0.62485355", "0.62478423", "0.6247748", "0.6241876", "0.62241584", "0.6217897", "0.6212548", "0.6211622", "0.62105036", "0.62100875", "0.6207076", "0.620571", "0.6204396", "0.62042135", "0.62012845", "0.62009406", "0.61972", "0.6194119", "0.6194034", "0.6191115", "0.6190447", "0.61857986", "0.6179896", "0.61678386", "0.6164184", "0.6161883", "0.61609775", "0.6159372", "0.6149305", "0.6148048", "0.6132315", "0.6128939", "0.6123467" ]
0.62821656
64
Compute the log weights with temperature $log \; \pi^{(T)}$
def log_pi_temperature(self, T: float) -> Tensor: return torch.log_softmax(self.pi_logit_ratio * self.pi_logit / T, dim=0).detach()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weight_log(val):\n return val * math.log(val)", "def logTF(self, tf):\n return math.log(tf)", "def theil(x, weights):\n assert numpy.all(x >= 0), \"negative numbers can't be used in Theil\"\n x_mean = numpy.average(x, weights=weights) + 1e-100\n normed = x / x_mean\n normed[normed < 1e-10] = 1e-10\n return numpy.average(normed * numpy.log(normed), weights=weights)", "def compute_log(tx, index_log, mean=[], std=[]):\n tx_new = np.log10(3+abs(tx[:,index_log]))\n return standardize(tx_new,mean,std)", "def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood", "def logistic_pen(weights, data, targets, hyperparameters):\n\n wr = hyperparameters['weight_regularization']\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n\n f += np.dot(weights[:-1].transpose()[0], weights[:-1].transpose()[0]) * wr / 2\n df = np.reshape(df, ((len(df), 1)))\n df += np.reshape(np.append(weights[:-1] * wr, 0), (len(weights), 1))\n\n f += (weights[-1, 0] ** 2) * wr / 2\n df[-1] += weights[-1,0] * wr \n\n return f, df, np.reshape(y, (len(y), 1))", "def log_boltzmann_dist(Q, temperature):\n return nn.LogSoftmax(dim=0)(Q/temperature)", "def weighted_log_density(self):\n return self.rho*math.log(self.density)", "def get_logits(self, hidden_states: torch.FloatTensor,\n temperature: float = 1.0):\n return self.logits(hidden_states) / temperature", "def logistic(self,w,Xi):\n # print(w.T)\n # print(Xi)\n a = np.dot(w.T,Xi)\n return 1/(1+np.exp(-a))", "def loglike(store):\n nobs = store['yvec'].shape[0]\n calcweighted(store)\n store['regsampler'].update_yvec(store['yvectil'])\n store['regsampler'].update_xmat(store['xmattil'])\n return store['regsampler'].loglike(store['sigma'], store['beta'])", "def logloss(self,tple):\n feats = self.dataset.input_features\n res = 0\n cc = self.class_counts\n fc = self.feature_counts\n for c in range(self.num_classes):\n res += prod(fc[i][feat(tple)][c]\n for (i,feat) in enumerate(feats))/(cc[c]**(len(feats)-1))\n if res>0:\n return -math.log2(res/len(self.dataset.train))\n else:\n return float(\"inf\") #infinity", "def log_ess(log_weight):\n dim = 1 if log_weight.ndimension() == 2 else 0\n\n return 2 * torch.logsumexp(log_weight, dim=dim) - \\\n torch.logsumexp(2 * log_weight, dim=dim)", "def logistic(weights, data, targets, hyperparameters):\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n df = np.reshape(df, ((len(df), 1)))\n\n return f, df, np.reshape(y, (len(y), 1))", "def get_weights_from_log(log, plot = False):\n with open(log, 'r') as f:\n log_file = f.readlines()\n\n i = 0\n time = []\n weights_info = []\n weights_0 = []\n while i < len(log_file):\n if 'init-lambda-weights[' in log_file[i]:\n weights_0.append(float(log_file[i].split('=')[-1]))\n\n if 'MC-lambda information' in log_file[i]:\n # Finding the time\n for j in range(i,0,-1):\n if log_file[j].startswith(' Step Time'):\n j += 1\n time.append(float(log_file[j].split()[-1]))\n break\n # Finding the weight\n weights_info_tmp = []\n i += 3\n while log_file[i] != '\\n':\n split = log_file[i].split()\n count = int(split[2])\n weight = float(split[3])\n weights_info_tmp.append((count, weight))\n i += 1\n weights_info.append(weights_info_tmp)\n i += 1\n # Add weights at t = 0, because the counts are all 0 and I delate the entrances with total count 0 in next lines,\n # What i could do is put 1 in the initial temperature\n time.insert(0,0)\n weights_info.insert(0,list(zip([1] + (len(weights_0) - 1)*[0], weights_0)))\n\n #Converting to array\n time = np.array(time)\n weights_info = np.array(weights_info)\n # Some times (I don't know why) GROMACS reset all the weights and all the counts are 0. We need to eliminate those points\n sum_of_weights = weights_info[:,:,0].sum(axis = 1)\n time = time[sum_of_weights != 0]\n weights_info = weights_info[sum_of_weights != 0]\n sum_of_weights = sum_of_weights[sum_of_weights != 0]\n\n\n if plot:\n dir = os.path.dirname(log)\n fig, axes = plt.subplots(2, figsize = (16,9), sharex=True)\n NUM_COLORS = weights_info.shape[1]\n cm = plt.get_cmap('viridis')#gist_rainbow viridis\n for axe in axes:\n axe.set_prop_cycle('color', [cm(1.*j/NUM_COLORS) for j in range(NUM_COLORS)])\n\n probability = weights_info[:,:,0] / sum_of_weights[:,np.newaxis]\n for j in range(weights_info.shape[1]):\n #axes[0].plot(time, weights_info[:,j,0], label = str(j))\n axes[0].plot(time, probability[:,j], label = str(j))\n axes[1].plot(time, weights_info[:,j,1])\n\n fig.legend(loc = 'lower center', ncol = int(weights_info.shape[1] / 2))\n axes[0].set(\n xlim = (time.min(), time.max()),\n ylim = (0,1),\n ylabel = 'Probability',\n )\n axes[1].set(\n xlabel = 'Time [ps]',\n ylabel = 'Weight values'\n )\n #plt.show()\n fig.savefig(os.path.join(dir,'weights_progression.svg'), bbox_inches=\"tight\")\n\n # Plotting the violin plot of the weights\n df = pd.DataFrame()\n for j in range(weights_info.shape[1]):\n #df[temperatures[j]] = weights_info[:,j,1]\n df[j] = weights_info[:,j,1]\n # Set up the matplotlib figure\n sns.set_theme(style=\"whitegrid\")\n fig, ax = plt.subplots(figsize=(25, 25))\n\n # Draw a violinplot with a narrower bandwidth than the default\n sns.violinplot(data=df, palette=\"Set3\", bw=.2, cut=1, linewidth=1)\n # The plot is not over the actual temperatures, the temperatures ara only labels\n ax.plot(range(len(weights_info[0,:,1])), weights_info[0,:,1], '-o', label = 'Initial weights')\n ax.set(\n title = 'Weights per state over the entire simulation',\n xlabel = 'Sate',\n ylabel = 'Weight',\n )\n plt.legend()\n sns.despine(left=True, bottom=True)\n #plt.show()\n fig.savefig(os.path.join(dir,'weights_per_state.svg'), bbox_inches=\"tight\")\n sns.reset_defaults()\n\n return time, weights_info", "def likelihood(ts,w,Phi):\n a = Phi.dot(w)\n return np.exp(a*ts)*sigmoid(-a)", "def calc_tf_log(doc):\r\n tf = calc_tf(doc)\r\n max_tf = tf[max(tf, key=tf.get)]\r\n tf_log = {}\r\n for key, val in tf.items():\r\n tf_log[key] = (1 + math.log(val)) / (1 + math.log(max_tf))\r\n return tf_log", "def log(self): # just use base?\n return Factor().__build( VarSet(self.v) , np.log(self.t) )", "def forward(self, logits, temperature):\n flat = logits.view(logits.shape[:-2] + (-1,))\n weights = F.softmax(flat / temperature, dim=-1).view_as(logits)\n\n x = (weights.sum(-2) * torch.linspace(-1, 1, logits.shape[-1]).type_as(logits)).sum(-1)\n y = (weights.sum(-1) * torch.linspace(-1, 1, logits.shape[-2]).type_as(logits)).sum(-1)\n\n return torch.stack((x, y), -1), weights", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def logpowerlaw(x, p=default()):\n xtr, ytr, gradtr = logcontinuity(p)\n power = p[3]\n x0 = xtr - power/gradtr\n b = ytr - power*np.log(xtr-x0)\n return b + power*np.log(x-x0)", "def my_loglike(theta, x, data, sigma):\n\n model = my_model(theta, x)\n\n return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)", "def __call__(self,logits):\n \n #sample from Gumbel(0, 1)\n uniform = self._srng.uniform(logits.shape,low=0,high=1)\n gumbel = -T.log(-T.log(uniform + self.eps) + self.eps)\n \n #draw a sample from the Gumbel-Softmax distribution\n return T.nnet.softmax((logits + gumbel) / self.temperature)", "def get_logits(self, logits):\n if not tf.is_tensor(logits):\n logits = tf.convert_to_tensor(value=logits)\n return logits / self._temperature", "def loglik(self, theta, t=None):\n if t is None:\n t = self.T - 1\n l = np.zeros(shape=theta.shape[0])\n for s in range(t + 1):\n l += self.logpyt(theta, s)\n return l", "def compute_importance_weights(behavior_logits, target_logits, actions):\n logrho = compute_unclipped_logrho(behavior_logits, target_logits, actions)\n print(\"logrho:\", logrho) if debug else None\n print(\"logrho.shape:\", logrho.shape) if debug else None\n\n # change to pytorch version\n return torch.clamp(torch.exp(logrho), max=1.)", "def logistic(weights, data, targets, hyperparameters):\n # TODO: Finish this function\n n_data = len(data)\n dim_data = len(data[0])\n\n f = 0\n y = logistic_predict(weights, data)\n\n data = mod_data(data)\n\n # dl/dw_j = SUM(x_ij * (t_i - (1 - sigmoid(z))))\n df = np.dot(data.T, (1.0 * targets) - (1 - y))\n\n # to calculate f, we need to sum the negative log of all y iff target is 0 and (1-y) iff target is 1\n f = -1.0 * np.dot(targets.T, np.log(1 - y)) - 1.0 * np.dot(1 - targets.T, np.log(y))\n\n # calculate P(C=0|x_i) for all x_i \n return f[0,0], df, y", "def logpow(x, m):\n # return m * log(x)\n return tt.switch(tt.eq(x, 0), -np.inf, m * tt.log(x))", "def log(tensor, base=np.e):\n if base == np.e:\n return _elementary_op(tensor, np.log, lambda x: 1 / x)\n return log(tensor) / log(base)", "def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True", "def weighted_log_flops(self):\n return self.mu*math.log(self.flops)", "def logZ_brute(self, T, pos, psi, phi):\n ms = self.get_all_tag_seq(len(T))\n log_scores = tr.zeros(len(ms), dtype=tr.float64)\n for i in range(len(ms)):\n log_scores[i] = self.log_score(T, pos, ms[i], psi, phi)\n log_z = logsumexp(log_scores)\n return log_z", "def log_likelihood_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-y)", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def logZ(self, T, pos, psi, phi):\n msgs = belief_propagation(T, pos, psi, phi, True)\n log_z = calculate_belief_sum(msgs, True)\n return log_z", "def compute_hessian_logreg(tx, w):\n t = tx.dot(w)\n s = np.diag(sigmoid(t)*(1 - sigmoid(t)))\n\n return tx.T.dot(s).dot(tx)", "def gmmloglik(log_emlik, weights):", "def gmmloglik(log_emlik, weights):", "def tent(x: torch.Tensor) -> torch.Tensor:\n return -(x.softmax(1) * x.log_softmax(1)).sum(1).mean(0)", "def log_weights(self):\n m = self.kernel.feature_log_prob_[self._match_class_pos()]\n u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()]\n\n return self._prob_inverse_transform(m - u)", "def gmmloglik(log_emlik, weights):\n N,_ = log_emlik.shape;\n ll = 0;\n for i in range(N):\n ll += logsumexp(log_emlik[i, :] + np.log(weights));\n return ll", "def compute_loglike(self, tools: ModelingTools) -> float:\n return -3.0 * self.placeholder", "def logistic_pen(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def loglike(self, params, *args, **kwargs):\n return np.sum(self.loglikeobs(params, *args, **kwargs))", "def weighted_log_loss(yt, yp) -> Any:\n from keras import backend as K\n\n pos_loss = -(0 + yt) * K.log(0 + yp + K.epsilon())\n neg_loss = -(1 - yt) * K.log(1 - yp + K.epsilon())\n\n return LOSS_BIAS * K.mean(neg_loss) + (1. - LOSS_BIAS) * K.mean(pos_loss)", "def log_weights_statistics(self):\n for weight_name, weight_parameter in self._weights.items():\n for statistic_function in self._statistics_functions:\n self._weights_statistics[statistic_function.__name__][\n weight_name\n ].append(float(statistic_function(weight_parameter)))", "def log_likelihood_loss(y, tx, w):\n p_1 = sigmoid(tx.dot(w))\n p_0 = np.log(1-p_1)\n p_1 = np.log(p_1)\n return -np.sum((y == 1)*p_1+(y == 0)*p_0)", "def log_error(X, y, w):\r\n N = X.shape[0]\r\n J = np.sum(np.log(1.0 + np.exp(-y * (X @ w)))) / N\r\n return J", "def _perplexity(self, X, log_w):\n return np.exp(-log_w/X.sum())", "def logpow(x, m):\n # return m * log(x)\n return pt.switch(pt.eq(x, 0), pt.switch(pt.eq(m, 0), 0.0, -np.inf), m * pt.log(x))", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def logit_cost(self, theta, X, y):\n\n cost = 0.0\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n \n for i in range(0, X.shape[0]):\n cost += (y[i]-1)*theta[i] + np.log(sig[i])\n ### END YOUR CODE\n cost = cost #+ 0.01 * self.regularizer[0](self.weights)\n return cost", "def logistic_pen(weights, data, targets, hyperparameters):\n\n # get regularizer and original logistic return values\n regularizer = hyperparameters['weight_regularization']\n\n E, df, y = logistic(weights, data, targets, hyperparameters)\n\n # sum of all weights squared multiplied by lambda/2. Add on top of logistic\n pen_1 = regularizer * 0.5 * (reduce(lambda x,y: x + y * y, weights))\n f = E + pen_1\n\n # calculat pen for dL/dwi - dE/dwi, add the difference to df\n df = df + regularizer * weights\n\n return f, df, y", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def take_log_weights(self, data):\n\n n_row = data[:, 0].size\n log_data = np.zeros(data.shape)\n for i in xrange(data.shape[0]):\n idx_nonzero = (data[i, :] > 0).nonzero()[0]\n log_data[i, idx_nonzero] = np.log(data[i, idx_nonzero])\n return log_data", "def log_prob(self):", "def logrels(rets):\n return np.log(rets + 1)", "def compute_loglike(self, tools: ModelingTools) -> float:\n return -1.5", "def brownian_motion_log_returns(param):\n sqrt_delta_sigma = math.sqrt(param.time_rate) * param.vol\n return nrand.normal(loc=0, scale=sqrt_delta_sigma, size=param.time)", "def log_t(u, t):\n\n def _internal_log_t(u, t):\n return (u ** (1.0 - t) - 1.0) / (1.0 - t)\n\n return tf.cond(\n tf.math.equal(t, 1.0), lambda: tf.math.log(u),\n functools.partial(_internal_log_t, u, t))", "def compute_loss_logreg(y, tx, w):\n assert len(set(y).difference({0., 1.})) == 0, \"Class labels must be encoded as {0, 1}\"\n\n z = tx.dot(w)\n\n return np.sum(np.log(1 + np.exp(z)) - y * z)", "def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)", "def log_target(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_parameters(x) + self.log_prior_wilson_coeffs(x)", "def log_Schechter_log(self, logl, alpha, logls, logl0):\n phi = (logl - logls) * (alpha+1) * np.log(10.) - np.power(10., logl-logls)\n lik = phi.copy()\n lik [logl < logl0] = -1e99\n return lik", "def log10(tensor):\n return log(tensor, base=10)", "def compute_log_prior(self,params: ndarray) -> float:\n ln_tE = params[0]\n ln_A0 = params[1]\n ln_deltaT = params[2]\n fbl = params[3]\n mb = params[4]\n\n # Equation (16,15,17) (note that Albrow uses \"log\" for log10)\n log10e = np.log10(np.exp(1))\n ln_pr_ln_tE = np.log(0.476) - ((log10e*ln_tE - 1.333)**2 / 0.330) + np.log(log10e)\n ln_pr_ln_A0 = np.log(0.660) - (1.289*log10e*ln_A0) + np.log(log10e)\n ln_pr_ln_deltaT = np.log(0.156) - ((log10e*ln_deltaT - 1.432)**2 / 0.458) +\\\n np.log(log10e)\n \n # Paper doesnt mention the prior used, but I assume it to be uniform\n ln_pr_fbl = uniform.logpdf(fbl,0.0,1.0)\n\n # Paper doesnr mention the prior used but I will asuumed it to be uniform\n ln_pr_mb = uniform.logpdf(mb,self.mag_min - 1.0, self.mag_max + 1.0)\n \n \n return ln_pr_fbl + ln_pr_ln_A0 + ln_pr_ln_deltaT + ln_pr_ln_tE + ln_pr_mb", "def compute_logistic_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w)) - y) / len(y)", "def log_w(self, high_energy, max_energy, temperature_factors=1.0):\n from deep_boltzmann.util import linlogcut\n z = self.input_z\n x = self.output_x\n # compute z energy\n Ez = self.dim * tf.log(tf.sqrt(temperature_factors)) + tf.reduce_sum(z**2, axis=1) / (2.0 * temperature_factors)\n # compute x energy and regularize\n Ex = self.energy_model.energy_tf(x) / temperature_factors\n Exreg = linlogcut(Ex, high_energy, max_energy, tf=True)\n # log weight\n log_w = -Exreg + Ez + self.log_det_Jzx[:, 0]\n return log_w", "def evaluate(observations, model, states=None, log=False):\r\n N = model.N\r\n T = observations.shape[0]\r\n A = numpy.log(model.A)\r\n B = numpy.log(model.B)\r\n\r\n if states is None:\r\n alphas = forward_path(observations, numpy.log(model.pi), A, B, T, N)\r\n\r\n \"\"\" Termination \"\"\"\r\n result = add_logs(alphas[T-1, :])\r\n if log:\r\n return result\r\n else:\r\n return math.exp(result)\r\n\r\n else:\r\n result = 0\r\n for i in range(T):\r\n result += B[states[i], observations[i]]\r\n\r\n if log:\r\n return result\r\n else:\r\n return math.exp(result)", "def logistic(self, X, w):\n g = 1 / (1 + np.exp(-X.dot(w)))\n return g", "def _estimate_weighted_log_prob(self, X, precision_cholesky):\n return self._estimate_log_prob(X, precision_cholesky) + self._estimate_log_weights(X.location)", "def logistic(scale, shift, stretch, t):\r\n return scale / (1 + np.power(np.e, -1.0*(t - shift )/ stretch))", "def reweight_distribution(original_distribution, temperature=0.5):\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n distribution = np.log(original_distribution) / temperature\n distribution = np.exp(distribution)\n return distribution / np.sum(distribution)", "def weights(self):\n return self.mul(self.P, self.mul(\n self.L * self.tril_mask + self.I,\n #self.U * self.triu_mask + self.s.diag()\n self.U * self.triu_mask + (self.sign_s * self.log_abs_s.exp()).diag()\n ))", "def logpyt(self, theta, t):\n raise NotImplementedError('StaticModel: logpyt not implemented')", "def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)", "def log_prior(x):\n logp = (-0.5 * x.pow(2) - torch.tensor(2 * math.pi).sqrt().log()).sum(dim=1)\n return logp", "def apply_temperature(prob, temperature):\r\n # Apply temperature\r\n if temperature != 1:\r\n # Inverse sigmoid\r\n x = -np.log(1 / prob - 1)\r\n # Apply temperature to sigmoid function\r\n prob = 1 / (1 + np.exp(-x / temperature))\r\n return prob", "def log(self):\n return F.Log.apply(self)", "def We(self):\n We = trapz_loglog(self._gam * self._nelec, self._gam * mec2)\n return We", "def logtomo(self, psi):\n return -1j / self.wavenumber() * self.mlog(psi) / self.voxelsize", "def log_prior_grad(self, inputs):", "def get_weight(self, count: int) -> float:\n log_count = np.log(count) / np.log(self.log_base)\n log_base_count = np.log(self.idf_base_count) / np.log(self.log_base)\n weight = max(1.0 / (1.0 + log_count - log_base_count), self.min_idf_weight)\n return weight", "def log10(self):\n return Factor().__build( VarSet(self.v) , np.log10(self.t) )", "def log2(tensor):\n return log(tensor, base=2)", "def log(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.log())", "def logpost(self, theta, t=None):\n return self.prior.logpdf(theta) + self.loglik(theta, t)", "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def electron_temp(I4363, I4959, I5007):\n return 33000 / (np.log(0.14) - np.log(I4363/(I4959+I5007)))", "def log(x, eps=1e-7, name=None):\n return tf.log(x + eps, name=name)", "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all data #\n # points, gradient of parameters, and the probabilities given by #\n # logistic regression. #\n #####################################################################\n f = None\n df = None\n\n f = evaluate(targets, y)[0]\n\n N = len(data)\n M = len(weights) - 1 \n temp = np.ones([N, M + 1])\n temp[: N, : M] = np.array(data)\n\n\n df = np.zeros([M+1, 1])\n\n df[:, 0] = np.array([[np.mean([(y.flatten()[i] - targets.flatten()[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n # df = np.matrix([[np.mean([(y[i] - targets[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return f, df, y", "def logIP(self): # just use base?\n np.log(self.t, out=self.t)\n return self", "def log_distr_fdmw(self, dnu, logflux, dme, logw, alpha, logls, logl0, mu, sigma, gtype=None):\n #stepdms = 100/1000.\n #vdms = np.arange(0, 100, stepdm)\n stepz = (np.log(self.Zmax) - np.log(self.Zmin)) / 1000\n vz = np.exp(np.arange(np.log(self.Zmin), np.log(self.Zmax), stepz))\n lik = 0\n for z in vz:\n likv = np.exp(self.log_distr_fdmwz(dnu, logflux, dme, logw, z, alpha, logls, logl0, mu, sigma, gtype=gtype))\n lik += z * stepz * likv\n ind = lik > 0\n ind2 = lik <= 0\n loglik = lik.copy()\n loglik[ind] = np.log(lik[ind])\n loglik[ind2] = np.ones(loglik[ind2].shape) * -1e99\n return loglik", "def wlogmorlet_fft(f0, sd, sampling_rate, ns=5, nt=None):\r\n if nt == None:\r\n st = 1. / (2. * np.pi * sd)\r\n nt = 2 * int(ns * st * sampling_rate) + 1\r\n f = fftpack.fftfreq(nt, 1. / sampling_rate)\r\n\r\n sfl = np.log(1 + 1. * sd / f0)\r\n wf = (2 * np.exp(-(np.log(f) - np.log(f0)) ** 2 / (2 * sfl ** 2)) *\r\n np.sqrt(sampling_rate / (np.sqrt(np.pi) * sd)))\r\n wf[f < 0] = 0\r\n wf[f == 0] /= 2\r\n return wf", "def Log(num):\n return math.log(float(num))", "def my_log(num):\n\n if num == 0.0:\n return -9999999999\n return math.log(num)", "def gmmloglik(log_emlik, weights):\n gmm_loglik = np.mean(log_emlik)\n\n\n return gmm_loglik", "def ln(x):\n return log(x, const.e)", "def _de_exp_const_w(z,w):\n return np.log((z+1.)**(3.*(1.+w)))/3." ]
[ "0.7621643", "0.69197744", "0.6796218", "0.6738715", "0.6605438", "0.6519413", "0.65133", "0.65053755", "0.64697593", "0.6465963", "0.64617324", "0.6432319", "0.64254224", "0.6399813", "0.6398325", "0.63717324", "0.6365299", "0.63402146", "0.6339453", "0.6321659", "0.6321659", "0.6311897", "0.6282256", "0.62816244", "0.6263995", "0.6238767", "0.62192106", "0.6203947", "0.61688864", "0.6166921", "0.6160686", "0.6155968", "0.6151198", "0.6133893", "0.6133066", "0.6118834", "0.6112425", "0.61096084", "0.61096084", "0.6103033", "0.60981214", "0.60940415", "0.6092483", "0.60847026", "0.6084571", "0.6084493", "0.605794", "0.6057103", "0.6056345", "0.60530883", "0.604708", "0.6033715", "0.60314494", "0.6024258", "0.6019556", "0.60188526", "0.6008158", "0.5996278", "0.5992277", "0.59919477", "0.59890425", "0.5984917", "0.5968284", "0.59636694", "0.5947977", "0.5931346", "0.5919424", "0.5919147", "0.590944", "0.59071", "0.5903567", "0.5891186", "0.5890912", "0.5890441", "0.58803827", "0.58791995", "0.5876325", "0.5873584", "0.58724284", "0.5868802", "0.5867899", "0.5866087", "0.5865515", "0.5859114", "0.58580494", "0.58545595", "0.5853134", "0.5848388", "0.58440125", "0.58318913", "0.58262163", "0.58164847", "0.5815912", "0.5801371", "0.57978535", "0.57752544", "0.576834", "0.57661855", "0.57602316", "0.5757131" ]
0.676322
3
Compute probabilities used in the loss function.
def compute_probabilities( self, x: Tensor, covariates: Tensor, use_temp: bool = False ) -> Tuple[Tensor, Tensor, Tensor]: u, _, ldj_sum = self(x, covariates) log_pi = ( self.log_pi_temperature(-self.hparams.temperature) if use_temp else self.log_pi ) log_probs = self.prior.log_prob(u) + log_pi # size N x P return log_probs, ldj_sum, u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def probabilities(self):\n raise NotImplementedError", "def get_probability_loss(self):\n return sum(self._loss)/len(self._loss)", "def get_probs(self, a):\n with torch.no_grad():\n probabilities = (np.array(self.priorities) ** a) / sum(np.array(self.priorities) ** a)\n return probabilities", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs", "def _compute_model_prob(self, per_list_logodds):\n with tf.compat.v1.name_scope(name='compute_model_prob'):\n return tf.stop_gradient(\n tf.exp(-self._alpha *\n (per_list_logodds -\n tf.reduce_min(per_list_logodds, axis=2, keepdims=True))))", "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def priorProbabilities():\r\n\ttotal = 0.0\r\n\tpos = 0.0\r\n\tneg = 0.0\r\n\r\n\t# Count the amount of positives and negatives in the training data\r\n\tfor item in trainingData:\r\n\t\ttotal += 1\r\n\t\tif item[1] == '0':\r\n\t\t\tpos +=1\r\n\t\tif item[1] == '1':\r\n\t\t\tneg +=1\r\n\t\t\t\r\n\t\t\t\r\n\t# Return the positive and negative probabilities \r\n\tposProb = float(pos / total * 100)\r\n\tnegProb = float(neg / total * 100)\r\n\r\n\t\r\n\t\r\n\treturn posProb, negProb", "def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]", "def prob(self, w):\n return self.counts[w] / self.total_count", "def probability(self, samples):\n pass", "def p(self) -> Probability:\n ...", "def compute_perplexity(self,loss: float):\n return math.exp(loss)", "def compute_em_probabilities(self, predictions, data, epsilon=1e-6):\n\t\tloss = data * predictions + (1 - data) * (1 - predictions)\n\n\t\t# sum loss over channels\n\t\tloss = torch.sum(loss, 4, keepdim=True)\n\n\t\tif epsilon > 0:\n\t\t\tloss += epsilon\n\t\treturn loss", "def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)", "def calculate_probability(self):\n return 0", "def probabilities(self, x, y):\n return self.feed_and_return(x, y, self.network.a)", "def update_probabilities(self):\n self.probabilities = self.pheromones**self.EXP_PH * self.mcv**self.EXP_MCV", "def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # Add one for bias to the first columns\n probs = np.zeros(X.shape[0])\n ### YOUR CODE HERE\n z = X.dot(self.w)\n probs = log_reg.logistic(z)\n ### END CODE\n assert probs.shape == (X.shape[0],)\n return probs", "def _calculate_probs_and_entropies(self):\n self._calculate_probs_and_entropy_y()\n self._calculate_probs_and_entropy_x(self.cat_cols)", "def get_probs(self, *vars):\n freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])\n k = np.prod([len(v.values) for v in vars])\n return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k)", "def loss_probability(reliability, k, chunk_count):\n return (factorial(chunk_count)/(factorial(chunk_count-k)*factorial(k))\n * pow(1 - reliability,k)*pow(reliability,chunk_count-k))", "def get_training_probs(losses0, losses1):\r\n\r\n diffs = movingAverage(losses0, lossSmoothingBoxcarSize) - movingAverage(losses1, lossSmoothingBoxcarSize)\r\n diffs[diffs < 0] = 0\r\n max_diff = np.max(diffs)\r\n \r\n if max_diff == 0:\r\n max_diff = 1\r\n\r\n diffs += 0.05*max_diff\r\n cumDiffs = np.cumsum(diffs)\r\n cumProbs = cumDiffs / np.max(cumDiffs, axis=None)\r\n\r\n return cumProbs.astype(np.float32)", "def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # add bias variable 1\n prob = np.zeros(X.shape[0], self.num_classes)\n ### YOUR CODE HERE\n z = X.dot(self.w)\n prob = soft_reg.softmax(z)\n ### END CODE\n return prob", "def calculate_probabilities(self):\n all_probabilities = [self.calculate_insolation_probabilities(),\n self.calculate_soil_demand_probabilities(),\n self.calculate_soil_depth_probabilities(),\n self.calculate_water_demand_probabilities()]\n final_probabilities = Image(size=self.controller.image_height_map.size, dtype=np.float)\n reasons_for_not_growing = [0, 0, 0, 0]\n for y in range(self.controller.image_height_map.size):\n for x in range(self.controller.image_height_map.size):\n probability = 1.0\n for i in range(len(all_probabilities)):\n if all_probabilities[i][y][x] < probability:\n probability = all_probabilities[i][y][x]\n if probability == 0.0:\n reasons_for_not_growing[i] += 1\n final_probabilities.image[y][x] = probability\n location_factor_with_max_reasons_for_not_growing = 0\n for j in range(len(reasons_for_not_growing)):\n if j >= 2: # soil demand should be skipped because it is a obvious reason\n if reasons_for_not_growing[j] > reasons_for_not_growing[location_factor_with_max_reasons_for_not_growing]:\n location_factor_with_max_reasons_for_not_growing = j\n location_factors = [\"insolation\", \"soil demand\", \"soil depth\", \"water demand\"]\n print(\"Main reason for not growing (except soil demand): \" + location_factors[location_factor_with_max_reasons_for_not_growing])\n return final_probabilities", "def calculate_perplexity(loss):\n return math.exp(float(loss)) if loss < 300 else float(\"inf\")", "def compute_loss(self):", "def get_probs(self):\n\t\tprobArray = []\n\t\tfor combination in self.codepool:\n\t\t\tif self.feasible(combination):\n\t\t\t\tprobArray.append(self.get_probability(combination))\n\t\t\telse:\n\t\t\t\tprobArray.append(0)\n\t\tprobArray = np.array(probArray) / np.sum(probArray)\n\t\treturn probArray", "def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs", "def log_prob(self):", "def perplexity(self, sents):\n return 2 ** self.cross_entropy(sents)", "def _precompute_probabilities(self):\n\n d_graph = self.d_graph\n first_travel_done = set()\n\n nodes_generator = self.graph.nodes() if self.quiet \\\n else tqdm(self.graph.nodes(), desc='Computing transition probabilities')\n\n for source in nodes_generator:\n\n # Init probabilities dict for first travel\n if self.PROBABILITIES_KEY not in d_graph[source]:\n d_graph[source][self.PROBABILITIES_KEY] = dict()\n\n for current_node in self.graph.neighbors(source):\n\n # Init probabilities dict\n if self.PROBABILITIES_KEY not in d_graph[current_node]:\n d_graph[current_node][self.PROBABILITIES_KEY] = dict()\n\n unnormalized_weights = list()\n first_travel_weights = list()\n d_neighbors = list()\n\n # Calculate unnormalized weights\n for destination in self.graph.neighbors(current_node):\n\n p = self.sampling_strategy[current_node].get(self.P_KEY,\n self.p) if current_node in self.sampling_strategy else self.p\n q = self.sampling_strategy[current_node].get(self.Q_KEY,\n self.q) if current_node in self.sampling_strategy else self.q\n\n if destination == source: # Backwards probability\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1) * 1 / p\n elif destination in self.graph[source]: # If the neighbor is connected to the source\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1)\n else:\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1) * 1 / q\n\n # Assign the unnormalized sampling strategy weight, normalize during random walk\n unnormalized_weights.append(ss_weight)\n if current_node not in first_travel_done:\n first_travel_weights.append(self.graph[current_node][destination].get(self.weight_key, 1))\n d_neighbors.append(destination)\n\n # Normalize\n unnormalized_weights = np.array(unnormalized_weights)\n d_graph[current_node][self.PROBABILITIES_KEY][\n source] = unnormalized_weights / unnormalized_weights.sum()\n\n if current_node not in first_travel_done:\n unnormalized_weights = np.array(first_travel_weights)\n d_graph[current_node][self.FIRST_TRAVEL_KEY] = unnormalized_weights / unnormalized_weights.sum()\n first_travel_done.add(current_node)\n\n # Save neighbors\n d_graph[current_node][self.NEIGHBORS_KEY] = d_neighbors", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def prob_distr(self, x):\n return 1.0/x", "def probs(self) -> List:\n return self._probs", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def update_probs(self, measure, p, enemy_net = False):\n tmp_net = []\n net_size = len(self.net) \n if not enemy_net:\n net = self.net\n else:\n net = self.enemy_net\n #Maps a given color to its corresponding column in the color's \n #probability table.\n if measure == GREEN:\n color = 0\n elif measure == YELLOW:\n color = 1\n elif measure == ORANGE:\n color = 2\n elif measure == RED:\n color = 3\n #Obtains new probabilities by using the distance between the\n #observed position (the one measured) and any other position.\n for j in range(0, net_size):\n distance = self.__get_distance(p, j)\n if distance == 0: #When updating the measured position's probability.\n tmp_net.append(net[j].value * self.ct[0][color])\n elif distance == 1: #When updating an adjacent position to the one measured.\n tmp_net.append(net[j].value * self.ct[1][color])\n elif distance == 2: #When updating a position at two cells from the one measured.\n tmp_net.append(net[j].value * self.ct[2][color])\n elif distance == 3: #When updating a position at three cells from the one measured.\n tmp_net.append(net[j].value * self.ct[3][color])\n else: #When updating a position at four or more cells from the one measured.\n tmp_net.append(net[j].value * self.ct[4][color])\n #Obtains summation of new probabilities in order to execute \n #a posterior normalization.\n total = sum(tmp_net)\n #Normalizes new probabilities and assigns them to its \n #corresponding position.\n for i in range(0, net_size):\n net[i].value = tmp_net[i]/total", "def get_pred_class_probs(self, pred_mu, pred_sigma):", "def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)", "def perplexity(self, corpus):\n sum_pro = 0.0\n total_words = 0\n for sentence in corpus:\n sen_pro = self.sentence_logprob(sentence)\n sum_pro += sen_pro\n total_words += len(sentence)\n\n \n\n l = sum_pro/total_words\n w = 0.0\n w = 2**(-l)\n\n return w", "def CalculateProbabilities(self, beta_0, beta_1):\n denom = self.zero_zero + self.zero_one + self.one_zero + self.one_one + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero = min( max( (self.zero_zero + self.zero_one + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one = min( max( (self.one_zero + self.one_one + beta_1 - 1) / denom, 0.0 ), 1.0 )\n \n denom = self.zero_zero + self.one_zero + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero_given_zero = min( max( (self.zero_zero + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one_given_zero = min( max( (self.one_zero + beta_1 - 1) / denom, 0.0 ), 1.0 )\n \n denom = self.zero_one + self.one_one + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero_given_one = min( max( (self.zero_one + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one_given_one = min( max( (self.one_one + beta_1 - 1) / denom, 0.0 ), 1.0 )", "def calc_probs(log_p):\n\n N = log_p.shape[0]\n\n log_Z_per_N = np.zeros(shape=(N, 1))\n\n for i in range(N):\n\n log_Z_per_N[i] = log_norm(log_p[i])\n\n log_p_new = log_p - log_Z_per_N\n\n p = np.exp(log_p_new)\n\n # log_Z = log_norm(log_p)\n\n # p = np.exp(log_p - log_Z)\n\n return p", "def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob", "def prob(self, tple, class_counts, feature_counts):\n feats = self.dataset.input_features\n unnorm = [prod(feature_counts[i][feat(tple)][c]\n for (i,feat) in enumerate(feats))\n /(class_counts[c]**(len(feats)-1))\n for c in range(self.num_classes)]\n thesum = sum(unnorm)\n return [un/thesum for un in unnorm]", "def perplexity(self, sents):\n # total words seen\n M = 0\n for sent in sents:\n M += len(sent)\n # cross-entropy\n l = 0\n print('Computing Perplexity on {} sents...\\n'.format(len(sents)))\n for sent in sents:\n l += self.sent_log_prob(sent) / M\n return pow(2, -l)", "def perplexity(y_true, y_pred):\n cross_entropy = K.categorical_crossentropy(y_true, y_pred)\n perplexity = K.pow(2.0, cross_entropy)\n return perplexity", "def prob(x):\n\treturn 1. * bivariate_normal(x, (0., 1.2), (1., 1.), .8) + \\\n\t 1.05 * bivariate_normal(x, (.6, -1.), (1.3, .7), -.6)", "def prob_update(self):\n pass", "def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm", "def prob(self, e):\n\t\treturn self.enumerate_all(self.variables, e)", "def compute_loss(self, obs, returns):", "def predict_proba(states):\r\n # convert states, compute logits, use softmax to get probability\r\n predicted = agent(torch.Tensor(states))\r\n probs = F.softmax(predicted).data.numpy()\r\n return probs", "def calculate_actor_loss(self, state_batch):\n action, (action_probabilities, log_action_probabilities), _ = self.produce_action_and_action_info(state_batch)\n qf1_pi = self.critic_local(state_batch)\n qf2_pi = self.critic_local_2(state_batch)\n min_qf_pi = torch.min(qf1_pi, qf2_pi)\n inside_term = self.alpha * log_action_probabilities - min_qf_pi\n policy_loss = (action_probabilities * inside_term).sum(dim=1).mean()\n log_action_probabilities = torch.sum(log_action_probabilities * action_probabilities, dim=1)\n return policy_loss, log_action_probabilities", "def prob(x, w):\n y = tf.constant(np.array([0., 1.]), dtype=tf.float32)\n prob_ = tf.exp(tf.matmul(x, w) * y) / (1 + tf.exp(tf.matmul(x, w)))\n return prob_", "def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))", "def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no", "def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]", "def predict_proba(self, x):\n e = self.predict_evidence(x)\n a = e + self.prior\n return a / torch.sum(a, dim=-1, keepdim=True)", "def probability(self, tokens):\n\n return 2 ** self.log_probability(tokens)", "def evaluate_probabilities(self, batches):\n total_batches = batches.batches_per_epoch()\n catprobs = []\n for batch in range(total_batches):\n X_batch, y_batch = batches.get_batch()\n feed_dict = {\n self.x: X_batch,\n self.y: y_batch,\n self.keep_prob: 1.0}\n fetch_dict = {\n \"catprobs\": self.categorical_probabilities}\n result = self.session.run(fetch_dict, feed_dict)\n catprobs.append(result[\"catprobs\"])\n catprobs = np.concatenate(catprobs)\n return catprobs", "def calc_prob_local(self, *args):\n return 0", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def lnprobability(self):\n return", "def perplexity(model, data):\n probs = [model.get_prob(word) for word in data] # get word's probability\n probs_log = [\n log2(word_prob) if word_prob > 0 else log2(float_info.epsilon)\n for word_prob in probs\n ] # log the probabilities. using epsilon when the probability is 0\n sum_probs = reduce(lambda a, b: a + b, probs_log) # sum all\n power_val = (-1 * sum_probs) / len(probs_log) # divide by n and neg all\n return 2 ** power_val", "def class_probability(self, x):\n # permutation before softmax b x a x c x spatial dims --> b x c x a x spatial dims\n # as expected by PyTorch Softmax the class axis = 1 \n return self._class_prob(x.permute([0, 2, 1, 3, 4]))", "def reconstructed_probability(self, x: torch.Tensor) -> torch.Tensor:\n with torch.no_grad():\n pred = self.predict(x)\n recon_dist = Normal(pred['recon_mu'], pred['recon_sigma'])\n x = x.unsqueeze(0)\n p = recon_dist.log_prob(x).exp().mean(dim=0).mean(dim=-1) # vector of shape [batch_size]\n return p", "def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def policy_loss(sal_box_prob, oracle_action, sample_weights):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=sal_box_prob, labels=oracle_action)\n \n return tf.reduce_mean(loss) * (1.0 - sample_weights / 10.0)", "def probability_array(self):\n q = self.apply_weights()\n return np.exp(q)/(1 + np.exp(q))", "def normalize_to_prob(inp):\n return (inp + 1)/2", "def u_probs(self):\n log_u = self.kernel.feature_log_prob_[self._nonmatch_class_pos()]\n\n return self._prob_inverse_transform(numpy.exp(log_u))", "def get_raw_probability(self):\n\t\tproba = RunOrder.BASE_SUCCESS_PROBABILITY\n\t\tproba += (self.additional_percents + self.hidden_percents) * 10\n\t\treturn proba", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def pred_prob(self, t, x, **kwargs):\n raise NotImplementedError", "def action_log_probs(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob", "def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result", "def calc_bpr_loss(\n self, user_emd, item_emd, user_list, pos_item_list, neg_item_list\n ):\n u_e = user_emd[user_list]\n pi_e = item_emd[pos_item_list]\n ni_e = item_emd[neg_item_list]\n p_scores = torch.mul(u_e, pi_e).sum(dim=1)\n n_scores = torch.mul(u_e, ni_e).sum(dim=1)\n\n l1 = torch.sum(-F.logsigmoid(p_scores - n_scores))\n\n u_e_p = self.user_embedding(user_list)\n pi_e_p = self.item_embedding(pos_item_list)\n ni_e_p = self.item_embedding(neg_item_list)\n\n l2 = self.reg_loss(u_e_p, pi_e_p, ni_e_p)\n\n return l1 + l2 * self.reg_weight", "def _get_selection_probabilities(self):\r\n probabilities = np.arange(1, self.population_size+1, dtype=float)[::-1]\r\n probabilities /= probabilities.sum()\r\n return probabilities", "def _compute_loss(self, predictions, targets, **params):\n pass", "def prediction_prob(self, x, weights):\n _, probs = self.predict_probability(x, weights)\n preds = []\n for p in probs:\n if p>0.5: preds.append(1)\n else: preds.append(-1)\n return preds", "def predict_proba(self, X):\n linear_model = X.dot(self.W) + self.b\n prob = 1 / (1 + np.exp(-linear_model))\n return prob", "def predict_probability(self, x, weights):\n # Take dot product of feature_matrix and coefficients \n scores = np.dot(x, weights)\n \n # Compute P(y_i = +1 | x_i, w) using the link function\n probs = 1./(1. + np.exp(-scores))\n \n # return probs predictions\n return scores, probs", "def cumprob(self):\r\n return self.probabilities.cumsum(-1)", "def compute_policy_log_probs(available_actions, policy, actions):\n def compute_log_probs(probs, labels):\n # Select arbitrary element for unused arguments (log probs will be masked)\n labels = tf.maximum(labels, 0)\n indices = tf.stack([tf.range(tf.shape(labels)[0]), labels], axis=1)\n # TODO tf.log should suffice\n return safe_log(tf.gather_nd(probs, indices))\n\n\n fn_id, arg_ids = actions\n fn_pi, arg_pis = policy\n # TODO: this should be unneccessary\n fn_pi = mask_unavailable_actions(available_actions, fn_pi)\n fn_log_prob = compute_log_probs(fn_pi, fn_id)\n tf.summary.scalar('log_prob/fn', tf.reduce_mean(fn_log_prob))\n\n log_prob = fn_log_prob\n for arg_type in arg_ids.keys():\n arg_id = arg_ids[arg_type]\n arg_pi = arg_pis[arg_type]\n arg_log_prob = compute_log_probs(arg_pi, arg_id)\n arg_log_prob *= tf.to_float(tf.not_equal(arg_id, -1))\n log_prob += arg_log_prob\n tf.summary.scalar('log_prob/arg/%s' % arg_type.name,\n tf.reduce_mean(arg_log_prob))\n\n return log_prob", "def prob(self, state, action):\n if state + action == 100:\n reward = 1\n else:\n reward = 0\n\n return [(state + action, self._p_head, reward), (state - action, 1 - self._p_head, 0)]", "def log_prob(self, scores : torch.Tensor, permutations):\n s = torch.log(select_indices(scores, permutations))\n n = len(scores)\n p = self.upto if self.upto is not None else n - 1\n return -sum(\n torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0))\n for k in range(p))", "def predict_proba(self, states):\n states = Variable(torch.FloatTensor(states))\n probas = F.softmax(self.network.forward(states))\n return probas.data.numpy()", "def perplexity(self, corpus):\n M = 0\n prob = 0\n\n for line in corpus:\n M += len(line)\n M += 1 # consider \"STOP\"\n prob += self.sentence_logprob(line)\n result = 2**(-(prob/M))\n\n return result", "def log_loss(self):\n probabilities = self.probability_array().copy()\n # need to flip the probabilities for p < 0.5 with this binary case.\n # 1 - old_val is same as oldval*-1 + 1. Do in 2 steps:\n probabilities[np.equal(0, self.y)] *= -1\n probabilities[np.equal(0, self.y)] += 1\n # when multiclass: np.amax(probabilities, 1)\n return np.log(probabilities).sum()", "def evaluate_ppl(self, dev_data, batch_size: int=32):\n\n cum_loss = 0.\n cum_tgt_words = 0.\n\n # you may want to wrap the following code using a context manager provided\n # by the NN library to signal the backend to not to keep gradient information\n # e.g., `torch.no_grad()`\n\n with torch.no_grad():\n for sents in batch_iter(dev_data, batch_size):\n loss = self.__call__(sents)\n\n cum_loss += loss\n # should I include 0? \n tgt_word_num_to_predict = sum(len(s[1:]) for s in sents) # omitting the leading `<s>`\n cum_tgt_words += tgt_word_num_to_predict\n\n ppl = torch.exp(cum_loss / cum_tgt_words)\n\n return ppl", "def probability_of_all_successes(p: float, r: int, n: int) -> float:\n\n if r == 1:\n return pow(p, n)\n elif n == 0:\n return 1\n else:\n result = 0\n for x in range(0, n+1):\n result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x)\n return result", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n a = self.config['alpha']\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n weights = self.weights[state]\n # print(weights)\n\n\n # obtains the probability vector from Hedge: p_i(t) = (1+alpha)^s_i(t) / sum_{j \\in K} (1+alpha)^s_j(t)\n sum_weights_exponentials = sum([(1 + a) ** w for w in weights])\n pre_prob = [(((1 + a) ** w) / sum_weights_exponentials) for w in weights]\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * p) + (g / n) for p in pre_prob]\n\n return pi_s", "def calc_probabilities(applications):\n sum_advantage = sum(app.get_advantage() for app in applications)\n return [app.get_advantage() / sum_advantage for app in applications]", "def loss(self, log_prob, C):\n W = self.W\n T = self.T\n average_log_loss = -C * log_prob\n W_norm = torch.sum(torch.tensor([(torch.norm(Wy.double())) ** 2 for Wy in W])) / 2\n T_norm = torch.sum(torch.tensor([torch.sum(torch.tensor([Tij ** 2 for Tij in row])) for row in T])) / 2\n loss = average_log_loss + W_norm + T_norm\n return loss", "def perplexity(self, corpus):\n l = 0\n total_word_count = 0\n for sentence in corpus :\n l += self.sentence_logprob(sentence)\n # 2 extra START tokens and 1 extra STOP token\n total_word_count += len(sentence)\n l /= total_word_count\n return math.pow(2, -l)", "def __get_net_probs(self):\n return np.array([node.value for node in self.net]).reshape(5,5)", "def prob(self, x, y):\n p = self.tag_prob(y)\n for i in range(len(y)):\n p *= self.out_prob(x[i], y[i])\n\n return p", "def loss(posterior, pars_to_penalize, c_rim):\n marginal = posterior.mean(axis=0)\n cond_entropy = misc.cat_entropy(posterior).mean()\n entropy = misc.cat_entropy(marginal.dimshuffle('x', 0)).sum()\n\n nmi = cond_entropy - entropy\n\n n_samples = posterior.shape[0]\n penalties = [(i ** 2).sum() / n_samples for i in pars_to_penalize]\n penalty = sum(penalties)\n\n loss = nmi + c_rim * penalty\n\n return get_named_variables(locals())", "def calculate_policy(self, state):\n # short aliases\n s = state # s stands for state\n g = self.config['gamma'] # g stands for gamma\n n = self.action_space.n # n stands for the number of actions\n pi_s = self.policy[state] # pi_s stands for the policy in state s\n\n sum_weights = sum(self.weights[s])\n\n # the policy is a probability vector, giving the probability of each action\n pi_s = [((1 - g) * w / sum_weights) + (g / n) for w in self.weights[s]]\n # print(state, pi_s)\n return pi_s", "def postProb(self, alpha, beta):\n gamma = None\n\n # -------------------------------------------->\n\n # Your Code goes here\n gamma = np.zeros(alpha.shape)\n for i in range(len(gamma)):\n s = 0\n for j in range(len(gamma[i])):\n gamma[i,j] = alpha[i,j]*beta[i,j]\n s += gamma[i,j]\n for j in range(len(gamma[i])):\n gamma[i,j] = gamma[i,j]/s\n # <---------------------------------------------\n\n return gamma" ]
[ "0.7476701", "0.7470944", "0.7260153", "0.72360164", "0.7207925", "0.71791935", "0.69240594", "0.69167626", "0.6853515", "0.68420863", "0.68305", "0.6796231", "0.67783356", "0.67678964", "0.6767037", "0.67276955", "0.6719253", "0.66585714", "0.66527164", "0.66472435", "0.66368335", "0.6636556", "0.6624642", "0.66233623", "0.65988827", "0.65544194", "0.6551755", "0.6513214", "0.6485143", "0.64548814", "0.64487576", "0.6447441", "0.64231056", "0.64212555", "0.64176863", "0.6414326", "0.6394991", "0.6389878", "0.6386701", "0.63844913", "0.6384227", "0.6379162", "0.63777435", "0.63733757", "0.6367245", "0.63663524", "0.6363102", "0.6362874", "0.63589513", "0.63544315", "0.63427496", "0.6342576", "0.6330642", "0.63242906", "0.63183403", "0.6309711", "0.630957", "0.63089174", "0.6299468", "0.62964344", "0.62956", "0.6295346", "0.6289053", "0.6287727", "0.6280816", "0.62790424", "0.62744904", "0.62601966", "0.6258479", "0.62567985", "0.625356", "0.62506723", "0.62504923", "0.62504923", "0.62504506", "0.6250198", "0.62489736", "0.6244579", "0.6235604", "0.62338346", "0.62330997", "0.6228931", "0.62273705", "0.62217194", "0.6220248", "0.6213219", "0.62126225", "0.62116355", "0.6209638", "0.62017953", "0.6194985", "0.6191565", "0.6183022", "0.6177294", "0.6169938", "0.61638534", "0.6157528", "0.6157431", "0.6156194", "0.61493593", "0.6145208" ]
0.0
-1
Compute the module loss for one minibatch.
def kl( self, x: Tensor, covariates: Tensor, use_temp: bool, ) -> Tuple[Tensor, Tensor]: log_probs, ldj_sum, _ = self.compute_probabilities(x, covariates, use_temp) return -(torch.logsumexp(log_probs, dim=1) + ldj_sum).mean()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_loss(self):", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss_(self, batch):\n raise NotImplementedError", "def loss(self):\n if not self.run:\n self._run()\n return self.model_loss", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def loss(self) -> KernelLoss:\n return self._loss", "def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss", "def loss_op(self):\n return self.loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def loss(self):\n return la.norm(self.resids) / self.normX", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def compute_loss(self, obs, returns):", "def calc_loss(self, x: np.ndarray, y: np.ndarray) -> float:\n return self.descent.calc_loss(x, y)", "def loss_fn(self, recons, inputs, mu, log_var, **kwargs):\n# kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss = F.mse_loss(recons, inputs)\n# recons_loss = F.binary_cross_entropy(recons, inputs)\n KLD = torch.mean(-0.5 * torch.sum(1 + log_var - mu**2 - log_var.exp(), dim=1), dim=0)\n loss = recons_loss - KLD\n return loss, recons_loss, KLD", "def _compute_loss(self, predictions, targets, **params):\n pass", "def _compute_loss(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n feat_static_cat = batch[\"feat_static_cat\"]\n feat_static_real = batch[\"feat_static_real\"]\n past_time_feat = batch[\"past_time_feat\"]\n past_target = batch[\"past_target\"]\n future_time_feat = batch[\"future_time_feat\"]\n future_target = batch[\"future_target\"]\n past_observed_values = batch[\"past_observed_values\"]\n\n picnn = self.model.picnn\n\n _, scale, hidden_state, _, _ = self.model.unroll_lagged_rnn(\n feat_static_cat,\n feat_static_real,\n past_time_feat,\n past_target,\n past_observed_values,\n future_time_feat,\n future_target,\n )\n\n hidden_state = hidden_state[:, : self.model.context_length]\n\n distr = self.model.output_distribution(picnn, hidden_state, scale)\n\n context_target = past_target[:, -self.model.context_length + 1 :]\n target = torch.cat(\n (context_target, future_target),\n dim=1,\n )\n\n loss_values = self.loss(distr, target)\n\n return loss_values.mean()", "def get_loss(self):\n return self.loss / self.cnt", "def calculate_loss(self, X, y):\n probs = self.predict(X)\n\n num_examples = X.shape[0]\n\n sub = np.subtract(probs, y)\n abs_sum = np.abs(sub)\n sm = np.sum(abs_sum)\n loss = 1 - sm / num_examples\n print(\"Current loss: [ \" + str(\"{:6.5f}\").format(loss) + \" ]\")\n return loss", "def loss(self):\n return self._loss", "def compute_loss(y, tx, w):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute loss by MSE / MAE\n # ***************************************************\n \n # vector e\n e = compute_e(y, tx, w)\n N = compute_N(e)\n L_MSE = np.dot(np.matrix.transpose(e), e)\n L_MSE = L_MSE / (2 * N)\n \n return L_MSE", "def calculate_loss(self, output, target, redmode = 'mean'):\n\n loss = F.cross_entropy(output, target, reduction = redmode)\n return loss", "def loss(self, X_batch, y_batch, reg):\n pass", "def reduce_loss(self, all_loss):\n if self._gpu_num == 1:\n total_loss = all_loss[0]\n else:\n layer_loss = [all_loss[j] for j in range(self._gpu_num)]\n total_loss = tf.reduce_mean(layer_loss)\n\n return total_loss", "def loss(self, batch: base.Batch, key: base.RngKey) -> base.Array:", "def compute_loss(self, x, gt):\n loss = sum([torch.mean((out - gt)**2) for out in self.forward(x)])\n return loss", "def calc_loss(self, outputs, labels):\n information_loss = self.bottleneck.buffer_capacity.mean() # Taking the mean is equivalent of scaling with 1/K\n cross_entropy = F.cross_entropy(outputs, target=labels)\n total = cross_entropy + self.beta * information_loss\n self.ce_loss.append(cross_entropy.cpu().detach().numpy())\n self.info_loss.append(information_loss.cpu().detach().numpy())\n self.total_loss.append(total.cpu().detach().numpy())\n return total", "def compute_batch_loss(self, batch_data):\n loss = 0\n for data in batch_data:\n x, y = data\n x = x.view(-1,x.shape[0],x.shape[1])\n y = y.view(-1,y.shape[0], y.shape[1])\n loss += self.compute_loss(x.to(self.device), y.to(self.device))\n \n return loss", "def loss_fn(self, targets, outputs, model):", "def _create_loss(self):\n with tf.device('/cpu:0'):\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=self.labels_placeholder, \n logits=self.logits, name='loss'))", "def compute_loss(self, *args, **kwargs):\n raise NotImplementedError", "def get_loss(self):\n raise NotImplementedError", "def kl_loss_batch(self):\n return sum([e for m in self.modules for e in m._kl_losses])", "def compute_loss(self, batch, y_next_true):\n\n # Get the output of the gru layer for the input which serves as input to the reconstruction + forecasting model\n gru_output = self.model(batch, training=True)\n\n # Forecasting model loss calculation\n # Using mse yields the same result as RMSE and is more stable\n y_next_pred = self.model.forecasting_model(gru_output, training=True)\n y_next_pred = y_next_pred[:, -1, :] # only get the prediction for the last timestamp\n\n mse_for = tf.keras.losses.MeanSquaredError()\n loss_for = mse_for(y_next_true, y_next_pred)\n\n # Reconstruction model loss calculation\n # Like VAE based on: https://bit.ly/3oRMiQz\n mse_rec = tf.keras.losses.MeanSquaredError()\n reconstructed_output = self.model.reconstruction_model(gru_output)\n reconstruction_target = gru_output if 'reconstruct_gru' in self.hyper.variants else batch\n\n loss_rec = mse_rec(reconstruction_target, reconstructed_output)\n loss_rec += sum(self.model.reconstruction_model.losses) # Add KLD regularization loss\n\n # Overall loss\n loss = loss_for + loss_rec\n\n return loss", "def compute_loss(self, x, y):\n\n self.batch_size = x.shape[0]\n self.x = x\n self.y = y\n self.soft = self.softmax(x) + 10**(-11)\n out = np.zeros(self.batch_size)\n for i in range(self.batch_size):\n out[i] = -(y[i] @ np.log(self.soft[i]))\n\n return out", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def pseudo_loss(self, params, batches):\n loss = 0\n for batch in batches:\n states = batch[\"states\"]\n actions = batch[\"actions\"]\n returns = batch[\"returns\"]\n\n preds = self.predict_jax(params, states)\n\n baseline = jnp.mean(returns, axis=0)\n preds_select = jnp.take_along_axis(preds, jnp.expand_dims(actions, axis=2), axis=2).squeeze()\n loss += (-jnp.mean(jnp.sum(preds_select * (returns - baseline))))\n\n return loss + self.l2_regularizer(params, 0.001) # try to divide by len(batches)?", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def _get_loss(self):\n raise NotImplementedError", "def _calc_loss(self, p_act_output:torch.Tensor, p_pred_output:torch.Tensor) -> float:\r\n\r\n return self._loss_fct(p_act_output, p_pred_output)", "def compute_loss(self, sample):\n observations_batch, actions_batch, return_batch, masks_batch, \\\n old_action_log_probs_batch, adv_targ = sample\n\n assert old_action_log_probs_batch.shape == (self.mini_batch_size, 1)\n assert adv_targ.shape == (self.mini_batch_size, 1)\n assert return_batch.shape == (self.mini_batch_size, 1)\n\n values, action_log_probs, dist_entropy = self.evaluate_actions(\n observations_batch, actions_batch)\n\n assert values.shape == (self.mini_batch_size, 1)\n assert action_log_probs.shape == (self.mini_batch_size, 1)\n assert values.requires_grad\n assert action_log_probs.requires_grad\n assert dist_entropy.requires_grad\n\n # [TODO] Implement policy loss\n ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n surr1 = ratio * adv_targ\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ\n policy_loss = -torch.min(surr1, surr2).mean()\n\n # [TODO] Implement value loss\n value_loss = F.mse_loss(return_batch, values)\n\n # This is the total loss\n loss = policy_loss + self.config.value_loss_weight * value_loss - self.config.entropy_loss_weight * dist_entropy\n\n return loss, policy_loss, value_loss, dist_entropy", "def apply_loss_mod(num):\n tmp = num - LOSS\n tmp = tmp if tmp > 0 else 0\n return apply_mod(tmp)", "def compute_loss(self, X, y):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n\r\n # Computing the loss using the below formula\r\n # Loss = -(1/m)*sum( (y_i)*log(σ(wTx_i)) + (1-y_i)*log(1 - σ(wTx_i)))\r\n # m = number of examples and i for ith example\r\n\r\n loss = 0\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n # for idx,example in enumerate(X):\r\n # loss = loss + y[idx] * np.log(self.sigmoid(np.dot(example, self.w))) + (1 - y[idx]) * np.log(1 - self.sigmoid(np.dot(example, self.w)))\r\n # loss = -loss/ X.shape[0]\r\n\r\n loss = -np.mean(y * np.log(self.sigmoid(np.dot(X, self.w))) + (1 - y) * np.log(1 - self.sigmoid(np.dot(X, self.w))))\r\n return loss", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def calculate_loss(self, output, batch, training_context, last_activation=None):\n if self._model_loss_key is None:\n return output\n else:\n return output[self._model_loss_key]", "def ss_loss_(self, batch):\n raise NotImplementedError", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def calculate_loss(self, output, batch):\n\n detailed_loss = {}\n for loss_func_key, this_loss_func, weight in self.loss_funcs:\n this_loss = this_loss_func(output, batch) * weight\n detailed_loss[loss_func_key] = this_loss\n loss = sum(detailed_loss.values())\n return loss, detailed_loss", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def compute_loss(y, tx, w):\n e = y - tx.dot(w)\n return calculate_mse(e)", "def get_loss(self, Loss, results, inputs, device):\n return", "def calculate_loss(self, pred, gold, smoothing=False):\n gold = gold.contiguous().view(-1)\n if smoothing:\n epsilon = 0.1\n n_class = pred.size(1)\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - epsilon) + \\\n (1 - one_hot) * epsilon / (n_class - 1)\n\n log_prb = F.log_softmax(pred, dim=1)\n # create non-padding mask with torch.ne()\n non_pad_mask = gold.ne(self.constants.PAD)\n loss = -(one_hot * log_prb).sum(dim=1)\n # losses are averaged later\n loss = loss.masked_select(non_pad_mask).sum()\n else:\n loss = F.cross_entropy(\n pred, gold, ignore_index=self.constants.PAD, reduction='sum')\n return loss", "def eval_loss(self, input_dataset, target_dataset):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tprediction = self.network.forward(input_dataset)\n\t\tloss = self._loss_layer.forward(prediction, target_dataset)\n\t\t\n\t\treturn loss\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # We are gonna store everythin in a dictionnary hidden\n hidden = {}\n hidden['h0'] = X.reshape(X.shape[0], np.prod(X.shape[1:]))\n\n for i in range(self.L):\n idx = i + 1\n # Naming of the variable\n w = self.params['W' + str(idx)]\n b = self.params['b' + str(idx)]\n h = hidden['h' + str(idx - 1)]\n\n # Computing of the forward pass.\n # Special case of the last layer (output)\n if idx == self.L:\n h, cache_h = affine_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n # For all other layers\n else:\n h, cache_h = affine_relu_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n scores = hidden['h' + str(self.L)]\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n # Computing of the loss\n data_loss, dscores = softmax_loss(scores, y)\n reg_loss = 0\n for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']:\n reg_loss += 0.5 * self.reg * np.sum(w * w)\n\n loss = data_loss + reg_loss\n\n # Backward pass\n\n hidden['dh' + str(self.L)] = dscores\n for i in range(self.L)[::-1]:\n idx = i + 1\n dh = hidden['dh' + str(idx)]\n h_cache = hidden['cache_h' + str(idx)]\n if idx == self.L:\n dh, dw, db = affine_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n else:\n dh, dw, db = affine_relu_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n\n # w gradients where we add the regulariation term\n list_dw = {key[1:]: val + self.reg * self.params[key[1:]]\n for key, val in hidden.iteritems() if key[:2] == 'dW'}\n # Paramerters b\n list_db = {key[1:]: val for key, val in hidden.iteritems() if key[:2] == 'db'}\n # Parameters gamma\n list_dgamma = {key[1:]: val for key, val in hidden.iteritems() if key[:6] == 'dgamma'}\n # Paramters beta\n list_dbeta = {key[1:]: val for key, val in hidden.iteritems() if key[:5] == 'dbeta'}\n grads = {}\n grads.update(list_dw)\n grads.update(list_db)\n grads.update(list_dgamma)\n grads.update(list_dbeta)\n return loss, grads", "def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)", "def _compute_single_loss(logits, positions):\n loss = fluid.layers.softmax_with_cross_entropy(\n logits=logits, label=positions)\n loss = fluid.layers.mean(x=loss)\n return loss", "def compute_loss(self, x, label):\n # Forward propagation\n y_hat = self.forward_pass(x)\n return -np.log(y_hat[label])", "def getLoss(self, x_test, t_test):\n x_t = Variable(x_test, requires_grad=False)\n #Feed inputes into neural network\n t_pred = self.model(x_t)\n #Now lets compute out loss\n loss = self.loss_fn(t_pred, t_test)\n return loss", "def ridge_loss(w: FloatTensor, x: FloatTensor, y: FloatTensor, lmb: float) -> float:\n return ols_loss(w, x, y, 0.0) + lmb * w.pow(2).sum()", "def loss(self, y: torch.Tensor, state: AlgorithmState) -> torch.Tensor:\n\n raise NotImplementedError()", "def lfads_training_loss(params, lfads_hps, key, x_bxt, kl_scale, keep_rate):\n losses = lfads_losses(params, lfads_hps, key, x_bxt, kl_scale, keep_rate)\n return losses['total']", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def setup_loss(self):\n with vs.variable_scope(\"loss\"):\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label_placeholder, logits=self.label_predictions))", "def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)", "def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE ***\"\n predictedY = self.run(xs)\n return nn.SoftmaxLoss(predictedY, y)\n # return nn.SquareLoss(predictedY, y)", "def ranknet_loss(y, m_):\n conf = 1.0\n ones_ = tf.ones_like(m_, dtype=tf.float32)\n y_m_ = tf.mul(y, ones_)\n y_diff_ = tf.sub(y_m_, tf.transpose(y_m_))\n t_1_ = -tf.mul(conf*ones_, y_diff_)\n t_2_ = tf.log(ones_ + tf.exp(y_diff_))\n sum_ = tf.add(t_1_, t_2_)\n mult_sum_ = tf.mul(m_, sum_)\n loss_ = tf.reduce_sum(mult_sum_) / tf.reduce_sum(m_)\n return loss_, m_", "def loss(self):\n return 'mse'", "def ddpm_loss(rng,\n state,\n batch,\n ddpm_params,\n train=True,\n optimize_fn=None,\n pmap_axis_name='batch'):\n\n x = batch['image']\n rng1, rng2 = random.split(rng)\n betas = jnp.asarray(ddpm_params['betas'], dtype=jnp.float32)\n sqrt_alphas_cumprod = jnp.asarray(\n ddpm_params['sqrt_alphas_cumprod'], dtype=jnp.float32)\n sqrt_1m_alphas_cumprod = jnp.asarray(\n ddpm_params['sqrt_1m_alphas_cumprod'], dtype=jnp.float32)\n T = random.choice(rng1, len(betas), shape=(x.shape[0],)) # pylint: disable=invalid-name\n\n noise = random.normal(rng2, x.shape)\n\n perturbed_data = sqrt_alphas_cumprod[T, None, None, None] * x + \\\n sqrt_1m_alphas_cumprod[T, None, None, None] * noise\n\n run_rng, _ = random.split(rng2)\n\n @jax.jit\n def loss_fn(model):\n if train:\n with nn.stateful(state.model_state) as new_model_state:\n with nn.stochastic(run_rng):\n scores = model(perturbed_data, T, train=train)\n else:\n with nn.stateful(state.model_state, mutable=False):\n with nn.stochastic(run_rng):\n scores = model(perturbed_data, T, train=train)\n\n new_model_state = state.model_state\n\n scores = scores.reshape((scores.shape[0], -1))\n target = noise.reshape((noise.shape[0], -1))\n loss = jnp.mean((scores - target)**2)\n return loss, new_model_state\n\n if train:\n grad_fn = jax.jit(jax.value_and_grad(loss_fn, has_aux=True))\n (loss, new_model_state), grad = grad_fn(state.optimizer.target)\n grad = jax.lax.pmean(grad, axis_name=pmap_axis_name)\n ## WARNING: the gradient clip step differs slightly from the\n ## original DDPM implementation, and seem to be more reasonable.\n ## The impact of this difference on performance is negligible.\n new_optimizer = optimize_fn(state, grad)\n new_params_ema = jax.tree_map(\n lambda p_ema, p: p_ema * state.ema_rate + p * (1. - state.ema_rate),\n state.params_ema, new_optimizer.target.params)\n step = state.step + 1\n new_state = state.replace( # pytype: disable=attribute-error\n step=step,\n optimizer=new_optimizer,\n model_state=new_model_state,\n params_ema=new_params_ema)\n else:\n model_ema = state.optimizer.target.replace(params=state.params_ema)\n loss, _ = loss_fn(model_ema)\n new_state = state\n\n loss = jax.lax.pmean(loss, axis_name=pmap_axis_name)\n return loss, new_state", "def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)", "def loss_function(self, train_head, train_tail, train_relation, train_head_corrupted, train_tail_corrupted):\n\n # train_head = tf.nn.l2_normalize(train_head, 1)\n # train_tail = tf.nn.l2_normalize(train_tail, 1)\n # train_head_corrupted = tf.nn.l2_normalize(train_head_corrupted, 1)\n # train_tail_corrupted = tf.nn.l2_normalize(train_tail_corrupted, 1)\n\n # loss = tf.reduce_mean(\n # tf.maximum(self.dict_paras['margin']\n # + self.distance(tf.add(train_head, train_relation), train_tail)\n # - self.distance(tf.add(train_head_corrupted, train_relation), train_tail_corrupted), 0.))\n\n loss = tf.reduce_mean(self.distance(tf.add(train_head, train_relation), train_tail))\n\n return loss", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)", "def _get_loss_weight(self) -> torch.Tensor:\n n_pos: torch.Tensor = 0.0\n n_neg: torch.Tensor = 0.0\n\n for _, ground_truth in self.train_loader:\n n_poss_curr = ground_truth.sum()\n n_pos += n_poss_curr\n n_neg += ground_truth.numel() - n_poss_curr\n\n eps = torch.finfo(n_pos.dtype).eps\n return n_neg / (n_pos + eps)", "def get_heat_loss_coefficient_of_partition() -> float:\n return 1 / 0.46", "def computeLoss(self):\n return sum(np.arccosh(-minkowskiArrayDot(self.examples, self.centroid)) ** 2)[0] / np.shape(self.examples)[0]", "def calculate_loss(self, logits, one_hot_true_labels):\n loss = 0.0\n loss += self.calculate_loss_by_granularity(logits, one_hot_true_labels, end=self.coarse_slice)\n loss += self.calculate_loss_by_granularity(logits, one_hot_true_labels, ini=self.coarse_slice,\n end=self.fine_slice)\n loss += self.calculate_loss_by_granularity(logits, one_hot_true_labels, ini=self.fine_slice)\n return loss", "def loss_weights(self):\n return None", "def _compute_unreduced_loss_impl(self, labels, logits, mask=None):\n raise NotImplementedError('Calling an abstract method.')", "def loss(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n c2 = 0\n c1 = (np.log(1+np.exp(-1*y1*k1)))\n for i in range(N):\n c2 += c1[i][0]\n l = c2 / N + (0.5 * self.l2_reg * np.dot(self.w,np.transpose(self.w)))\n l1 = l[0][0]\n return l1\n\n\n #raise NotImplementedError", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 3 ***\"\n return nn.SoftmaxLoss(self.run(x), y)", "def loss(self, x, y):\n\n return self.loss_fn(x, y)", "def configure_loss_fn(self) -> nn.Module:\n pass", "def get_loss(self, inputs, targets, dags):\n if not isinstance(dags, list):\n dags = [dags]\n\n loss = 0\n for dag in dags:\n output = self.shared(inputs, dag)\n sample_loss = (self.model_loss(output, targets) /\n self.args.shared_num_sample)\n loss += sample_loss\n\n loss =loss/len(dags)\n\n return loss", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def evaluate_loss(\n model,\n ds,\n loss_func_name = 'CE'\n):\n loss = 0\n if loss_func_name == 'CE':\n loss_func = tf.keras.losses.SparseCategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.SUM\n )\n else:\n raise ValueError(f'Not supported loss function {loss_func_name}!')\n n = 0\n for batch_x, batch_y in ds:\n batch_output = get_model_output(model, batch_x)\n loss += loss_func(batch_y, batch_output)\n n += batch_y.shape[0]\n return loss / n", "def _create_loss_op(self):\n # 1.) The reconstruction loss, which forces the NN towards reconstructing more accurately the\n # given input. This function is configurable, but usually it is the Bernoulli negative log-likelihood.\n if self.cost_function == 'abs':\n reconstr_loss = tf.reduce_sum(tf.abs(self.x_decoded - self.x_in), 1)\n elif self.cost_function in ('mse', 'l2', 'square'):\n reconstr_loss = tf.reduce_sum(tf.squared_difference(self.x_in, self.x_decoded), 1)\n elif self.cost_function in ('xentropy', 'log'):\n reconstr_loss = \\\n -tf.reduce_sum(self.x_in * tf.log(1e-10 + self.x_decoded)\n + (1 - self.x_in) * tf.log(1e-10 + 1 - self.x_decoded),\n 1)\n else:\n raise ValueError(self.cost_function, \"Unknown cost function name!\")\n\n # 2.) The latent loss, which is defined as the Kullback Leibler divergence\n ## between the distribution in latent space induced by the encoder on\n # the data and some prior. This acts as a kind of regularizer.\n # This can be interpreted as the number of \"nats\" required\n # for transmitting the the latent space distribution given\n # the prior.\n latent_loss = -0.5 * tf.reduce_sum(1. + self.z_log_sigma_sq\n - tf.square(self.z_mean)\n - tf.exp(self.z_log_sigma_sq), 1)\n\n self.loss_op = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch\n tf.add_to_collection(\"losses\", self.loss_op)\n\n if self.learning_rate is not None:\n global_step = tf.train.get_or_create_global_step()\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(\n self.loss_op,\n global_step=global_step,\n var_list=tf.get_collection(self.training_scope) if self.training_scope is not None else None)\n\n tf.add_to_collection(\"train_ops\", self.train_op)\n tf_logging.info(\"Added AdamOptimizer with learning rate: %.8f\" % self.learning_rate)\n\n tf.summary.scalar(\"latent_loss\", tf.reduce_mean(latent_loss))\n tf.summary.scalar(\"reconstruction_loss\", tf.reduce_mean(reconstr_loss))\n tf.summary.scalar(\"vae_loss\", self.loss_op)", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 2 ***\"\n return nn.SquareLoss(self.run(x), y)", "def get_loss_fn():\n return reconstruction", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def make_loss_fn(\n cls, config: ml_collections.ConfigDict\n ) -> Callable[..., Tuple[float, MetricGroups, Dict[str, Any]]]:\n mlm_weight = config.mlm_weight\n el_im_weight = config.el_im_weight\n el_final_weight = config.el_final_weight\n el_score_mode = config.get('el_score_mode', 'dot')\n mtb_im_weight = config.get('mtb_im_weight', 0)\n mtb_final_weight = config.get('mtb_final_weight', 0)\n mtb_score_mode = config.get('mtb_score_mode', 'dot')\n\n def loss_fn(\n model_config: ml_collections.FrozenConfigDict,\n model_params: Dict[str, Any],\n model_vars: Dict[str, Any], # pylint: disable=unused-argument\n batch: Dict[str, Any],\n deterministic: bool,\n dropout_rng: Optional[Dict[str, Array]] = None,\n ) -> Tuple[float, MetricGroups, Dict[str, Any]]:\n \"\"\"Task-specific loss function. See BaseTask.\"\"\"\n\n batch_size = batch['text_ids'].shape[0]\n loss_helpers, logging_helpers = cls.build_model(model_config).apply( # pylint: disable=unused-variable\n {'params': model_params},\n batch,\n deterministic=deterministic,\n rngs=dropout_rng)\n mention_target_is_masked = batch['mention_target_is_masked']\n mention_target_is_not_masked = 1 - batch['mention_target_is_masked']\n mention_target_ids = batch['mention_target_ids']\n mention_target_ids = mention_target_ids * batch['mention_target_weights']\n\n mlm_logits = loss_helpers['mlm_logits']\n\n mlm_loss, mlm_denom = metric_utils.compute_weighted_cross_entropy(\n mlm_logits, batch['mlm_target_ids'], batch['mlm_target_weights'])\n\n mlm_correct_mask = jnp.equal(\n jnp.argmax(mlm_logits, axis=-1),\n batch['mlm_target_ids']) * batch['mlm_target_weights']\n mlm_acc = mlm_correct_mask.sum()\n mlm_mention_acc = (mlm_correct_mask *\n batch['mlm_target_is_mention']).sum()\n mlm_mention_denom = (batch['mlm_target_weights'] *\n batch['mlm_target_is_mention']).sum()\n mlm_non_mention_acc = (mlm_correct_mask *\n (1 - batch['mlm_target_is_mention'])).sum()\n mlm_non_mention_denom = (batch['mlm_target_weights'] *\n (1 - batch['mlm_target_is_mention'])).sum()\n\n metrics = {\n 'mlm': {\n 'loss': mlm_loss,\n 'acc': mlm_acc,\n 'denominator': mlm_denom,\n },\n 'mlm_mention': {\n 'acc': mlm_mention_acc,\n 'denominator': mlm_mention_denom,\n },\n 'mlm_non_mention': {\n 'acc': mlm_non_mention_acc,\n 'denominator': mlm_non_mention_denom,\n },\n }\n\n if 'intermediate_mention_encodings' in loss_helpers:\n intermediate_target_mention_encodings = jut.matmul_slice(\n loss_helpers['intermediate_mention_encodings'],\n batch['mention_target_indices'])\n else:\n intermediate_target_mention_encodings = loss_helpers[\n 'im_target_mention_encodings']\n\n if model_config.encoder_config.get('no_entity_attention', False):\n (el_im_loss, el_im_metrics,\n (el_im_acc_per_mention,\n el_im_weight_per_mention)) = mention_losses.entity_linking_loss(\n intermediate_target_mention_encodings,\n loss_helpers['entity_embeddings'], mention_target_ids,\n batch['mention_target_weights'], el_score_mode)\n el_im_denom = el_im_metrics['denominator']\n metrics['el_intermediate'] = el_im_metrics\n metrics['el_intermediate_masked'] = {\n 'acc':\n jnp.dot(el_im_acc_per_mention,\n el_im_weight_per_mention * mention_target_is_masked),\n 'denominator':\n jnp.dot(el_im_weight_per_mention, mention_target_is_not_masked),\n }\n metrics['el_intermediate_non_masked'] = {\n 'acc':\n jnp.dot(el_im_acc_per_mention,\n el_im_weight_per_mention * mention_target_is_masked),\n 'denominator':\n jnp.dot(el_im_weight_per_mention, mention_target_is_not_masked),\n }\n else:\n intermediate_entity_attention = loss_helpers[\n 'intermediate_entity_attention']\n\n # Construct targets and ids for intermediate entity linking loss\n intermediate_target_ids = jnp.zeros_like(batch['mention_mask'])\n intermediate_target_ids = intermediate_target_ids.at[\n batch['mention_target_indices']].add(\n mention_target_ids * batch['mention_target_weights'])\n\n intermediate_target_weights = jnp.zeros_like(\n batch['mention_mask'], dtype=intermediate_entity_attention.dtype)\n intermediate_target_weights = intermediate_target_weights.at[\n batch['mention_target_indices']].add(\n batch['mention_target_weights'])\n\n mention_is_masked = jnp.zeros_like(batch['mention_mask'])\n mention_is_masked = mention_is_masked.at[\n batch['mention_target_indices']].add(\n mention_target_is_masked * batch['mention_target_weights'])\n\n el_im_loss, el_im_denom = metric_utils.compute_weighted_cross_entropy(\n intermediate_entity_attention,\n intermediate_target_ids,\n intermediate_target_weights,\n inputs_are_prob=True)\n\n el_im_correct_mask = jnp.equal(\n jnp.argmax(intermediate_entity_attention, axis=-1),\n intermediate_target_ids) * intermediate_target_weights\n el_im_acc = el_im_correct_mask.sum()\n\n el_im_acc, _ = metric_utils.compute_weighted_accuracy(\n intermediate_entity_attention, intermediate_target_ids,\n intermediate_target_weights)\n\n intermediate_entity_cos_sim = loss_helpers[\n 'intermediate_entity_cos_sim'][batch['mention_target_indices'],\n mention_target_ids]\n\n metrics['el_intermediate'] = {\n 'loss':\n el_im_loss,\n 'acc':\n el_im_acc,\n 'cos_sim':\n jnp.dot(intermediate_entity_cos_sim,\n batch['mention_target_weights']),\n 'denominator':\n el_im_denom,\n }\n metrics['el_intermediate_masked'] = {\n 'acc':\n jnp.dot(el_im_correct_mask, mention_is_masked),\n 'denominator':\n jnp.dot(batch['mention_target_weights'],\n batch['mention_target_is_masked']),\n }\n metrics['el_intermediate_non_masked'] = {\n 'acc':\n jnp.dot(el_im_correct_mask, (1 - mention_is_masked)),\n 'denominator':\n jnp.dot(batch['mention_target_weights'],\n (1 - batch['mention_target_is_masked'])),\n }\n\n im_final_mention_encodings_cos_sim = jut.cosine_similarity(\n intermediate_target_mention_encodings,\n loss_helpers['target_mention_encodings'])\n metrics['im_final_mention_encodings'] = {\n 'cos_sim':\n jnp.dot(im_final_mention_encodings_cos_sim,\n batch['mention_target_weights']),\n 'denominator':\n batch['mention_target_weights'].sum(),\n }\n\n (el_final_loss, el_final_metrics,\n (el_final_acc_per_mention,\n el_final_weight_per_mention)) = mention_losses.entity_linking_loss(\n loss_helpers['target_mention_encodings'],\n loss_helpers['entity_embeddings'], mention_target_ids,\n batch['mention_target_weights'], el_score_mode)\n el_final_denom = el_final_metrics['denominator']\n metrics['el_final'] = el_final_metrics\n metrics['el_final_masked'] = {\n 'acc':\n jnp.dot(el_final_acc_per_mention,\n el_final_weight_per_mention * mention_target_is_masked),\n 'denominator':\n jnp.dot(el_final_weight_per_mention, mention_target_is_masked),\n }\n metrics['el_final_non_masked'] = {\n 'acc':\n jnp.dot(\n el_final_acc_per_mention,\n el_final_weight_per_mention * mention_target_is_not_masked),\n 'denominator':\n jnp.dot(el_final_weight_per_mention,\n mention_target_is_not_masked),\n }\n\n loss = mlm_weight * mlm_loss / mlm_denom\n loss += el_im_weight * el_im_loss / el_im_denom\n loss += el_final_weight * el_final_loss / el_final_denom\n\n if mtb_im_weight > 0:\n (mtb_im_loss, mtb_im_metrics) = mention_losses.mtb_loss(\n intermediate_target_mention_encodings,\n batch['mention_target_batch_positions'], mention_target_ids,\n batch_size, mtb_score_mode, mention_target_is_masked, 'im_')\n mtb_im_denom = mtb_im_metrics['im_mtb']['denominator']\n loss += mtb_im_weight * mtb_im_loss / mtb_im_denom\n metrics.update(mtb_im_metrics)\n\n if mtb_final_weight > 0:\n (mtb_final_loss, mtb_final_metrics) = mention_losses.mtb_loss(\n loss_helpers['target_mention_encodings'],\n batch['mention_target_batch_positions'], mention_target_ids,\n batch_size, mtb_score_mode, mention_target_is_masked, 'final_')\n mtb_final_denom = mtb_final_metrics['final_mtb']['denominator']\n loss += mtb_final_weight * mtb_final_loss / mtb_final_denom\n metrics.update(mtb_final_metrics)\n\n metrics['agg'] = {\n 'loss': loss,\n 'denominator': 1.0,\n }\n return loss, metrics, {} # pytype: disable=bad-return-type # jax-ndarray\n\n return loss_fn", "def get_loss_fn(self):\n raise NotImplementedError()", "def loss(self, logits, labels):\r\n return tf.reduce_mean(tf.keras.losses.binary_crossentropy(labels,logits))", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def unsupervised_loss_pwcnet(batch_input, module=None, loss_weights_dict=None,\n params=None, return_flow=False, normalization=None, device_ids=0):\n\n full_res = params.get('full_res')\n\n normalization = normalization / 255\n pyramid_loss = params.get('pyramid_loss')\n\n Image1 = batch_input['image1'].to(device=torch.device('cuda:0'))\n Image2 = batch_input['image2'].to(device=torch.device('cuda:0'))\n\n Image1 = Image1 / 255\n Image2 = Image2 / 255\n\n if params.get('border_mask'):\n border_mask = batch_input['border_mask'].to(device=torch.device('cuda:0'))\n else:\n border_mask = None\n\n Image1_norm = Image1 - normalization\n Image2_norm = Image2 - normalization\n\n flow_fw = module(Image1_norm, Image2_norm)\n flow_bw = module(Image2_norm, Image1_norm)\n\n layer_weights = [12.7, 4.35, 3.9, 3.4, 1.1]\n layer_patch_distances = [3, 2, 2, 1, 1]\n\n im1_s = Image1.clone().detach()\n im2_s = Image2.clone().detach()\n mask_s = border_mask.clone().detach()\n\n if full_res:\n layer_weights = [12.7, 5.5, 5.0, 4.35, 3.9, 3.4, 1.1]\n layer_patch_distances = [3, 3] + layer_patch_distances\n final_flow_scale = FLOW_SCALE * 4\n scale_factor = 1.0\n final_flow_fw = flow_fw[0] * final_flow_scale\n final_flow_bw = flow_bw[0] * final_flow_scale\n flow_pair = zip(flow_fw, flow_bw)\n else:\n final_flow_scale = FLOW_SCALE\n scale_factor = 0.25\n final_flow_fw = flow_fw[0] * final_flow_scale * 4\n final_flow_bw = flow_bw[0] * final_flow_scale * 4\n flow_pair = zip(flow_fw[2:], flow_bw[2:])\n\n if pyramid_loss:\n flow_enum = enumerate(flow_pair)\n else:\n flow_enum = [(0, (flow_fw[0], flow_bw[0]))]\n\n mask_occlusion = params.get('mask_occlusion', '')\n\n combined_loss = 0.0\n\n for i, flow in flow_enum:\n flow_scale = final_flow_scale / (2 ** i)\n flow_fw_s, flow_bw_s = flow\n\n im1_s = torch.nn.functional.interpolate(im1_s, scale_factor=scale_factor, mode='bilinear')\n im2_s = torch.nn.functional.interpolate(im2_s, scale_factor=scale_factor, mode='bilinear')\n mask_s = torch.nn.functional.interpolate(mask_s, scale_factor=scale_factor, mode='bilinear')\n\n losses = compute_losses(im1_s, im2_s,\n flow_fw_s * flow_scale, flow_bw_s * flow_scale,\n border_mask=mask_s if params.get('border_mask') else None,\n mask_occlusion=mask_occlusion,\n data_max_distance=layer_patch_distances[i])\n\n layer_loss = 0.0\n\n for loss_type, loss_weight in loss_weights_dict.items():\n name = loss_type.rstrip('_weight')\n layer_loss += loss_weight * losses[name]\n\n combined_loss += layer_weights[i] * layer_loss\n\n scale_factor = 0.5\n\n if not return_flow:\n return combined_loss\n\n return combined_loss, final_flow_fw, final_flow_bw" ]
[ "0.7217973", "0.69893515", "0.69603366", "0.68529373", "0.6852118", "0.6713502", "0.66971284", "0.6693088", "0.6590194", "0.6569734", "0.6569734", "0.6549821", "0.6538307", "0.6513913", "0.64711624", "0.64643025", "0.6439348", "0.6421351", "0.640085", "0.6387436", "0.6382425", "0.637107", "0.6364577", "0.6364013", "0.63639396", "0.6360035", "0.6359058", "0.63566315", "0.63346773", "0.6332506", "0.6317075", "0.63084525", "0.62990695", "0.62882036", "0.62837595", "0.62784296", "0.6268909", "0.6260668", "0.6257334", "0.62503016", "0.6243068", "0.6230273", "0.62278473", "0.6211914", "0.62105596", "0.62098515", "0.62081695", "0.6200075", "0.6198826", "0.6198826", "0.61978877", "0.6190886", "0.6183885", "0.6173007", "0.6170978", "0.6162508", "0.6157516", "0.61560565", "0.615493", "0.61537415", "0.6152694", "0.6137915", "0.6133941", "0.6130868", "0.6118289", "0.61120534", "0.6105639", "0.6101697", "0.610013", "0.6098667", "0.6097911", "0.60972667", "0.6080447", "0.6072406", "0.6072138", "0.6052929", "0.60511994", "0.60442865", "0.60360867", "0.60357773", "0.6032033", "0.60291845", "0.6025178", "0.60145134", "0.60105735", "0.6006329", "0.60049105", "0.5996915", "0.5994219", "0.5992575", "0.59876156", "0.5985356", "0.59830666", "0.59823036", "0.5974007", "0.5969568", "0.59524685", "0.595179", "0.5947261", "0.59418404", "0.5934913" ]
0.0
-1
Test vertex_areas. Vertex area is the area of all of the triangles who are in contact
def test_vertex_areas(self, faces, point): number_of_contact_faces = gs.array([3, 5, 5, 5, 5, 5, 3, 5]) triangle_area = 0.5 * 2 * 2 expected = 2 * (number_of_contact_faces * triangle_area) / 3 space = self.Space(faces) result = space.vertex_areas(point) assert result.shape == (8,) assert expected.shape == (8,) assert gs.allclose(result, expected), result point = gs.array([point, point]) expected = gs.array([expected, expected]) result = space.vertex_areas(point) assert point.shape == (2, 8, 3) assert result.shape == (2, 8), result.shape assert gs.allclose(result, expected), result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_triangle_area():\n v1 = (0,0); v2 = (1,0); v3 = (0,2)\n verticies = [v1,v2,v3]\n expected = 1\n computed = get_triangle_area(verticies)\n tol = 1E-14\n success = abs(expected-computed) < tol\n msg = 'computed area={} != {} (expected)'.format(computed,expected)\n assert success,msg", "def vertex_areas(\n points: np.ndarray,\n triangles: np.ndarray,\n tri_areas: Optional[np.ndarray] = None,\n) -> np.ndarray:\n if tri_areas is None:\n tri_areas = triangle_areas(points, triangles)\n v_areas = np.zeros(len(points), dtype=float)\n for a, t in zip(tri_areas / 3, triangles):\n v_areas[t[0]] += a\n v_areas[t[1]] += a\n v_areas[t[2]] += a\n return v_areas", "def vertex_areas(self, point):\n batch_shape = point.shape[:-2]\n n_vertices = point.shape[-2]\n n_faces = self.faces.shape[0]\n area = self._triangle_areas(point)\n id_vertices = gs.broadcast_to(\n gs.flatten(self.faces), batch_shape + (math.prod(self.faces.shape),)\n )\n incident_areas = gs.zeros(batch_shape + (n_vertices,))\n val = gs.reshape(\n gs.broadcast_to(gs.expand_dims(area, axis=-2), batch_shape + (3, n_faces)),\n batch_shape + (-1,),\n )\n incident_areas = gs.scatter_add(\n incident_areas, dim=len(batch_shape), index=id_vertices, src=val\n )\n vertex_areas = 2 * incident_areas / 3.0\n return vertex_areas", "def test_regular_polygon_area(self):\n self.assertEqual(10, regular_polygon_area(\n self.values['perimeter'], self.values['apothem']))", "def test_absolute_areas(self):\n\n assert len(self.test_shape.areas) == 4\n assert len(set([round(i) for i in self.test_shape.areas])) == 3\n assert self.test_shape.areas.count(pytest.approx(60 * math.pi * 2 * 1000)) == 2\n assert self.test_shape.areas.count(pytest.approx(50 * math.pi * 2 * 970)) == 1\n assert self.test_shape.areas.count(pytest.approx(50 * math.pi * 2 * 1030)) == 1", "def compute_triangle_area(vertices):\n v01 = vertices[0] - vertices[1]\n v02 = vertices[0] - vertices[2]\n cross_prod = np.cross(v01, v02)\n area = 0.5 * np.linalg.norm(cross_prod)\n return area", "def _triangle_areas(self, point):\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n len_edge_12 = gs.linalg.norm((vertex_1 - vertex_2), axis=-1)\n len_edge_02 = gs.linalg.norm((vertex_0 - vertex_2), axis=-1)\n len_edge_01 = gs.linalg.norm((vertex_0 - vertex_1), axis=-1)\n half_perimeter = 0.5 * (len_edge_12 + len_edge_02 + len_edge_01)\n return gs.sqrt(\n (\n half_perimeter\n * (half_perimeter - len_edge_12)\n * (half_perimeter - len_edge_02)\n * (half_perimeter - len_edge_01)\n ).clip(min=1e-6)\n )", "def test_absolute_shape_areas(self):\n\n assert self.test_shape.area == pytest.approx((math.pi * (10**2) * 2) + (math.pi * (2 * 10) * 30))\n assert len(self.test_shape.areas) == 3\n assert self.test_shape.areas.count(pytest.approx(math.pi * (10**2))) == 2\n assert self.test_shape.areas.count(pytest.approx(math.pi * (2 * 10) * 30)) == 1", "def test_triangle(self):\n result = shape_area.triangle_area(10,5)\n self.assertEqual(result,25)", "def test_triangle_area(self):\n self.assertEqual(6, triangle_area(\n self.values['base'], self.values['height']))", "def test_polyarea(self):\n\n xcoords, ycoords = [0, 1, 1, 0, 0], [0, 0, 1, 1, 0]\n xycoords = np.stack((xcoords, ycoords), axis=1)\n\n # Area calculation from separately provided x, y coordinates\n self.assertEqual(po.polyarea(x=xcoords, y=ycoords), 1.)\n # Area calculation from combined x, y coordinates\n self.assertEqual(po.polyarea(coords=xycoords), 1.)", "def test_triangle_positive_area(self):\n t = Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023))\n self.assertEqual(t.area(1), 4.0,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(1),\\\n returned value != 4.0.\")\n self.assertEqual(t.area(), 4.013,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(1) failed,\\\n returned value != 4.013.\")\n self.assertEqual(t.area(6), 4.012568,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(6) failed,\\\n returned value != 4.012568.\")", "def test_inside_triangle(self):\n\n # defining triangle vertices\n v1x, v1y = 0, 0\n v2x, v2y = 1, 1\n v3x, v3y = 1, 0\n\n # test vertices are inside\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v1x, v1y))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v2x, v2y))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v3x, v3y))\n\n # check line segments are inside\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 1, 0.5))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0.5))\n\n # check an interior point\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0.1))\n\n # check an exterior point\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, -0.5, -0.5))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, -0.01))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 1.01, 0.5))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.49999, 0.5001))", "def select_area(minArea): \n # Switch in edit mode \n bpy.ops.object.mode_set(mode='EDIT')\n \n # Deselect everything\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n \n # Load mesh\n me = bpy.context.edit_object.data\n bm = bmesh.from_edit_mesh(me)\n # Ensure internal data needed for int subscription is initialized\n bm.faces.ensure_lookup_table()\n\n # Array containing the different areas\n loops = []\n faces = bm.faces\n\n # Loop for detect multiple areas\n while faces:\n faces[0].select_set(True) # Select 1st face\n bpy.ops.mesh.select_linked() # Select all linked faces makes a full loop\n loops.append([f.index for f in faces if f.select])\n bpy.ops.mesh.hide(unselected=False) # Hide the detected loop\n faces = [f for f in bm.faces if not f.hide] # Update faces\n\n # Unhide all faces\n bpy.ops.mesh.reveal()\n print(\"Mesh has {} parts\".format(len(loops)))\n\n print(\"\\nThe face lists are:\")\n for loop in loops:\n print(loop)\n \n # Switch in edit mode \n bpy.ops.object.mode_set(mode='EDIT')\n # Deselect everything\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n # Switch in object mode\n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Loop to select areas are higher than the area min\n area = 0 \n for rows in range(len(loops)):\n area = 0\n for columns in loops[rows]:\n # Calculate the area\n area = area + bpy.context.active_object.data.polygons[columns].area\n print(rows)\n print(area)\n print(minArea)\n # Compare the area with the area min\n if area > minArea:\n for columns in loops[rows]:\n # Select all the faces of the area\n bpy.context.active_object.data.polygons[columns].select = True\n\n # Switch in edit mode \n bpy.ops.object.mode_set(mode='EDIT')", "def test_polygon_area(self):\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Create closed simple polygon (clock wise)\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n A = calculate_polygon_area(P, signed=True)\n msg = 'Calculated signed area was %f, expected -1.0 deg^2' % A\n assert numpy.allclose(A, -1), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n A = calculate_polygon_area(P)\n\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n A = calculate_polygon_area(P)\n\n # Verify against area reported by qgis (only three decimals)\n qgis_area = 0.003\n assert numpy.allclose(A, qgis_area, atol=1.0e-3)\n\n # Verify against area reported by ESRI ARC (very good correspondence)\n esri_area = 2.63924787273461e-3\n assert numpy.allclose(A, esri_area, rtol=0, atol=1.0e-10)", "def inside(self, areas):\n\n poly_orig = geometry.Polygon(self.area_poly)\n poly_origb = affinity.scale(poly_orig, xfact=1.1, yfact=1.1)\n idf = shapely.vectorized.contains(\n poly_origb, areas['RA'], areas['Dec'])\n\n return areas[idf]", "def test(x_0, y_0, x_1, y_1, x_2, y_2):\n \n print(\"A triangle with vertices (\" + str(x_0) + \",\" + str(y_0) + \"),\")\n print(\"(\" + str(x_1) + \",\" + str(y_1) + \"), and\")\n print(\"(\" + str(x_2) + \",\" + str(y_2) + \") has an area of\")\n print(str(triangle_area(x_0, y_0, x_1, y_1, x_2, y_2)) + \".\")", "def calculate_areas(polygon):\n project = ft.partial(pj.transform,\n pj.Proj(init='epsg:4326'),\n pj.Proj('+proj=eck4 +lat_0=' + str(polygon.centroid.y) + ' +lon_0=' + str(polygon.centroid.x)))\n field_projected = transform(project, polygon)\n # convert from square meters to acres\n return uom.Uom(field_projected.area, uom.SquareMeter)", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def get_face_areas(self, idx=-1):\n if idx >= len(self.faces):\n raise IndexError\n if idx >= 0:\n v1, v2, v3 = self.faces[idx]\n v1, v2, v3 = self.vertices[v1], self.vertices[v2], self.vertices[v3]\n a = np.linalg.norm(v1 - v2)\n b = np.linalg.norm(v1 - v3)\n c = np.linalg.norm(v2 - v3)\n s = (a + b + c) / 2\n area = np.sqrt(s * (s - a) * (s - b) * (s - c))\n return area\n else:\n v1, v2, v3 = self.faces[:, 0], self.faces[:, 1], self.faces[:, 2]\n v1, v2, v3 = self.vertices[v1], self.vertices[v2], self.vertices[v3]\n a = np.linalg.norm(v1 - v2, axis=1)\n b = np.linalg.norm(v1 - v3, axis=1)\n c = np.linalg.norm(v2 - v3, axis=1)\n s = (a + b + c) / 2\n area = np.sqrt(s * (s - a) * (s - b) * (s - c))\n return area", "def find_area(self):\n min_lat_point = self.latitude_min\n max_lat_point = self.latitude_max\n min_lon_point = self.longitude_min\n max_lon_point = self.longitude_max\n self.rename_latitude()\n self.rename_longitude()\n all_lat_bounds = self.cube.coord('latitude').bounds\n all_lon_bounds = self.cube.coord('longitude').bounds\n # print(all_lat_bounds)\n # print(all_lon_bounds)\n for i, lat in enumerate(all_lat_bounds):\n for j, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= min_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= min_lon_point < lon_bounds[1]:\n nlat_min = i\n nlon_min = j\n else:\n pass\n else:\n pass\n\n for k, lat in enumerate(all_lat_bounds):\n for l, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= max_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= max_lon_point < lon_bounds[1]:\n nlat_max = k\n nlon_max = l\n else:\n pass\n else:\n pass\n\n area_subset = self.cube[:, nlat_min:nlat_max+1, nlon_min:nlon_max+1]\n # print(area_subset.coord('latitude').points)\n # print(area_subset.coord('longitude').points)\n area_mean = area_subset.collapsed(['latitude', 'longitude'],\n iris.analysis.MEAN)\n\n return area_mean", "def test_area():\n\n pt0 = [0, 0]\n pt1 = [5, 5]\n pt2 = [5, 0]\n\n truth = 12.5\n\n assert isclose(truth, area([pt0, pt1, pt2]))", "def compute_mesh_area(mesh):\n vertices = mesh.vertices\n faces = mesh.faces\n areas = [compute_triangle_area(vertices[face]) for face in faces]\n mesh_surface_area = sum(areas)\n return mesh_surface_area", "def planar_intersection_polygon(area_corners, segment_corners):\n # First test each \n lons = np.array([])\n lats = np.array([])\n for segment_corner in segment_corners:\n if planar_point_inside(segment_corner,area_corners):\n currlon = segment_corner.lon\n # MLS use wrap_longitudes?\n if currlon < 0:\n currlon += 2*math.pi\n lons = np.concatenate((lons,[currlon]))\n lats = np.concatenate((lats,[segment_corner.lat]))\n log.info('Adding intersection from segment '+str(segment_corner))\n for area_corner in area_corners:\n if planar_point_inside(area_corner,segment_corners):\n currlon = area_corner.lon\n # MLS use wrap_longitudes?\n if currlon < 0:\n currlon += 2*math.pi\n lons = np.concatenate((lons,[currlon]))\n lats = np.concatenate((lats,[area_corner.lat]))\n log.info('Adding intersection from area '+str(area_corner))\n\n area_line1 = Line(area_corners[0], area_corners[1])\n area_line2 = Line(area_corners[1], area_corners[2])\n area_line3 = Line(area_corners[2], area_corners[3])\n area_line4 = Line(area_corners[3], area_corners[0])\n\n segment_line1 = Line(segment_corners[0], segment_corners[1])\n segment_line2 = Line(segment_corners[1], segment_corners[2])\n segment_line3 = Line(segment_corners[2], segment_corners[3])\n segment_line4 = Line(segment_corners[3], segment_corners[0])\n\n for i in (area_line1, area_line2, area_line3, area_line4):\n for j in (segment_line1, segment_line2, segment_line3, segment_line4):\n intersect = i.intersection(j)\n if intersect:\n log.info('Adding actual intersection '+str(intersect))\n currlon = intersect.lon\n # MLS use wrap_longitudes?\n if intersect.lon < 0:\n currlon += 2*math.pi\n lons = np.concatenate((lons,[currlon]))\n lats = np.concatenate((lats,[intersect.lat]))\n\n minlon = math.degrees(lons.min())\n maxlon = math.degrees(lons.max())\n minlat = math.degrees(lats.min())\n maxlat = math.degrees(lats.max())\n # Coordinate MUST be between -180 and 180\n # MLS use wrap_longitudes?\n if minlon > 180:\n minlon -= 180\n if maxlon > 180:\n maxlon -= 180\n from pyresample.spherical_geometry import Coordinate\n return [Coordinate(minlon,maxlat),\n Coordinate(maxlon,maxlat),\n Coordinate(maxlon,minlat),\n Coordinate(minlon,minlat)]", "def test_multi_area(self):\n pass", "def averageInsideVertices(mesh):\r\n cmds.select(mesh)\r\n cmds.polySelectConstraint(m=3, t=0x0001, w=2)\r\n cmds.polySelectConstraint(dis=True)\r\n cmds.polyAverageVertex(i = 10, ch = 0)", "def polygon_area(ppath): # pragma: no cover\n v_ = ppath.vertices\n if len(v_) < 3:\n return 0.0\n x_ = v_[:, 1] - v_[:, 1].mean()\n y_ = v_[:, 0] - v_[:, 0].mean()\n correction = x_[-1] * y_[0] - y_[-1] * x_[0]\n main_area = np.dot(x_[:-1], y_[1:]) - np.dot(y_[:-1], x_[1:])\n return 0.5 * np.abs(main_area + correction)", "def triangle_areas(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n xy = points[triangles]\n # s1 = xy[:, 2, :] - xy[:, 1, :]\n # s2 = xy[:, 0, :] - xy[:, 2, :]\n # s3 = xy[:, 1, :] - xy[:, 0, :]\n # which can be simplified to\n # s = xy[:, [2, 0, 1]] - xy[:, [1, 2, 0]] # 3D\n s = xy[:, [2, 0]] - xy[:, [1, 2]] # 2D\n a = np.linalg.det(s)\n return a * 0.5", "def refinement_func_area(tri_points, area):\r\n max_area = 0.005\r\n return bool(area > max_area)", "def calc_surface_area(faces, verts):\n # Calculate the surface area of a mesh from it's triangle faces.\n # faces: List of all the faces on the surface. Each face indexes three\n # points from verts which make up the triangle face.\n # verts: List of all the vertices on the surface.\n area = 0\n for face in faces:\n # Extract x's and y's from the face's vertices.\n xs = [verts[face[0]][0], verts[face[1]][0], verts[face[2]][0]]\n ys = [verts[face[0]][1], verts[face[1]][1], verts[face[2]][1]]\n # Compute area of face from triangle points.\n base = max(xs) - min(xs)\n height = max(ys) - min(ys)\n area += 0.5 * (base + height)\n return area", "def check_geometry_inside_l_areas(geometry: BaseGeometry, id_area: int, geom_srid: int):\n wkt = geometry.wkt\n return check_wkt_inside_area_id(wkt=wkt, id_area=id_area, wkt_srid=geom_srid)", "def test_rectangle(self):\n result = shape_area.rectangle_area(6,7)\n self.assertEqual(result,26)", "def checkVertices(vertices, limits):\n isWithin = True\n for i,v in enumerate(vertices):\n x = v[0]\n y = v[1]\n z = v[2]\n if x < limits[0][0] or x > limits[0][1]:\n isWithin = False\n break\n if y < limits[1][0] or y > limits[1][1]:\n isWithin = False\n break\n if z < limits[2][0] or z > limits[2][1]:\n isWithin = False\n break\n return isWithin", "def test_area3(self):\n r3 = Rectangle(8, 7, 0, 0, 12)\n self.assertEqual(r3.area(), 56)", "def _triangle_area_at_points(self, p1, p2, p3):\n a = sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n b = sqrt((p2[0] - p3[0]) ** 2 + (p2[1] - p3[1]) ** 2)\n c = sqrt((p1[0] - p3[0]) ** 2 + (p1[1] - p3[1]) ** 2)\n s = (a + b + c) / float(2)\n area = sqrt(s * (s - a) * (s - b) * (s - c))\n return area", "def polygon_area(points):\n def area(triangles):\n \"\"\"\n Area of a spherical triangle. Vectorized version of\n :func:`triangle_area`.\n \"\"\"\n # sides of the triangle\n sides = great_circle_distance(triangles,\n numpy.roll(triangles, 1, axis=1))\n\n assert numpy.all(sides >= 0.)\n\n # s = (a + b + c) / 2.\n s = (numpy.sum(sides, axis=1) / 2.)\n\n # tan(s / 2) * tan((s - a) / 2) * tan((s - b) / 2) * tan((s - c) / 2)\n product = (tan(s / 2.) *\n numpy.prod(tan((s[:, numpy.newaxis] - sides) / 2.), axis=1))\n\n try:\n return 4. * arctan(sqrt(product))\n except FloatingPointError:\n # floating point weirdness\n\n def individual(prod):\n \"\"\"\n Area of an individual triangle.\n \"\"\"\n try:\n return 4. * arctan(sqrt(prod))\n except FloatingPointError:\n return 0.\n\n return numpy.array([individual(prod) for prod in product])\n\n triangles = numpy.array(list(decompose_polygon(points)))\n return area(triangles).sum()", "def polygon_area(nodes, edges):\n # extract the (x, y) coordinates of the boundary nodes in the order\n x = []\n y = []\n for e in edges:\n n = nodes[e[0]]\n x.append(n[0])\n y.append(n[1])\n n = nodes[e[1]]\n x.append(n[0])\n y.append(n[1])\n # \"close\" the polygon\n x.append(x[0])\n x.append(x[1])\n y.append(y[0])\n y.append(y[1])\n # compute the area\n a = 0.0\n for i in range(1, len(x)-1):\n a += x[i] * (y[i+1] - y[i-1])\n a /= 2;\n return a", "def test_area_method(self):\n points = np.array([[0, 0],\n [1, 0],\n [1, 1],\n [0, 1]])\n element = FEMOL.elements.Q4(points)\n self.assertTrue(element.area() == 1)", "def showArea(self, surface):\n ps = [tuple(p) for p in self.points]\n if len(ps) > 1:\n surface.draw.polygon(surface.screen, self.area_color, ps, False)", "def buildings_in_area(self, polygon):\n return [b for b in self.buildings if polygon.contains(b.geometry.convex_hull)]", "def insideTriangle(p, p0, p1, p2, area):\n s = (\n 1.0\n / (2.0 * area)\n * (\n p0[1] * p2[0]\n - p0[0] * p2[1]\n + (p2[1] - p0[1]) * p[0]\n + (p0[0] - p2[0]) * p[1]\n )\n )\n t = (\n 1.0\n / (2.0 * area)\n * (\n p0[0] * p1[1]\n - p0[1] * p1[0]\n + (p0[1] - p1[1]) * p[0]\n + (p1[0] - p0[0]) * p[1]\n )\n )\n return s > 0 and t > 0 and 1 - s - t > 0", "def triangle_area(a, b, c):\n\n return 0.5 * abs(\n a[0] * (b[1] - c[1]) +\n b[0] * (c[1] - a[1]) +\n c[0] * (a[1] - b[1])\n )", "def test_areas(self):\n df = self.spark.createDataFrame(\n [\n (Box2d(1, 2, 1.0, 1.0),),\n (Box2d(10, 12, 1.0, 5.0),),\n ],\n [\"bbox\"],\n )\n df = df.withColumn(\"area\", area(col(\"bbox\")))\n self.assertCountEqual((1.0, 5.0), df.select(\"area\").toPandas()[\"area\"])", "def test_random_polygon(self):\n p = g.trimesh.path.polygons.random_polygon()\n assert p.area > 0.0\n assert p.is_valid", "def test_area(self):\n r1 = Rectangle(3, 2)\n self.assertEqual(r1.area(), 6)\n\n r2 = Rectangle(2, 10)\n self.assertEqual(r2.area(), 20)\n\n r3 = Rectangle(10, 10)\n self.assertEqual(r3.area(), 100)", "def Areas(self, with_sign=False, gpoints=None):\n\n assert self.elements is not None\n assert self.element_type is not None\n if gpoints is None:\n assert self.points is not None\n gpoints = self.points\n\n if self.element_type == \"tri\":\n points = np.ones((gpoints.shape[0],3),dtype=np.float64)\n points[:,:2] = gpoints\n # FIND AREAS OF ALL THE ELEMENTS\n area = 0.5*np.linalg.det(points[self.elements[:,:3],:])\n\n elif self.element_type == \"quad\":\n # NODE ORDERING IS IRRELEVANT, AS IT IS THESE AREAS\n # WHICH DETERMINE NODE ORDERING\n # AREA OF QUAD ABCD = AREA OF ABC + AREA OF ACD\n points = np.ones((gpoints.shape[0],3),dtype=np.float64)\n points[:,:2] = gpoints\n # FIND AREAS ABC\n area0 = np.linalg.det(points[self.elements[:,:3],:])\n # FIND AREAS ACD\n area1 = np.linalg.det(points[self.elements[:,[0,2,3]],:])\n # FIND AREAS OF ALL THE ELEMENTS\n area = 0.5*(area0+area1)\n\n elif self.element_type == \"tet\":\n # GET ALL THE FACES\n faces = self.GetFacesTet()\n\n points = np.ones((gpoints.shape[0],3),dtype=np.float64)\n points[:,:2]=gpoints[:,:2]\n area0 = np.linalg.det(points[faces[:,:3],:])\n\n points[:,:2]=gpoints[:,[2,0]]\n area1 = np.linalg.det(points[faces[:,:3],:])\n\n points[:,:2]=gpoints[:,[1,2]]\n area2 = np.linalg.det(points[faces[:,:3],:])\n\n area = 0.5*np.linalg.norm(area0+area1+area2)\n\n elif self.element_type == \"hex\":\n\n from Florence.Tensor import unique2d\n C = self.InferPolynomialDegree() - 1\n\n area = 0\n node_arranger = NodeArrangementHex(C)[0]\n for i in range(node_arranger.shape[0]):\n # print node_arranger[i,:]\n # AREA OF FACES\n points = np.ones((gpoints.shape[0],3),dtype=np.float64)\n if i==0 or i==1:\n points[:,:2] = gpoints[:,:2]\n elif i==2 or i==3:\n points[:,:2] = gpoints[:,[0,2]]\n elif i==4 or i==5:\n points[:,:2] = gpoints[:,1:]\n # FIND AREAS ABC\n area0 = np.linalg.det(points[self.elements[:,node_arranger[i,:3]],:])\n # FIND AREAS ACD\n area1 = np.linalg.det(points[self.elements[:,node_arranger[i,1:]],:])\n # FIND AREAS OF ALL THE ELEMENTS\n area += 0.5*np.linalg.norm(area0+area1)\n\n # print area\n raise ValueError('Hex areas implementation requires further checks')\n\n else:\n raise NotImplementedError(\"Computing areas for\", self.element_type, \"elements not implemented yet\")\n\n if with_sign is False:\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n area = np.abs(area)\n elif self.element_type == \"tet\":\n raise NotImplementedError('Numbering order of tetrahedral faces could not be determined')\n\n return area", "def test_generalized_banana_polygon_is_valid():\n park = query_row(db_conf, 'osm_landusages', 7101)\n # geometry is not valid\n assert not park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen0', 7101)\n # but simplified geometies are valid\n assert park['geometry'].is_valid, park\n park = query_row(db_conf, 'osm_landusages_gen1', 7101)\n assert park['geometry'].is_valid, park", "def test_triangle_get_area(self):\n triangle = Triangle(0, 9, 10, 11)\n self.assertEqual(triangle.get_area(), 42.42640687119285)", "def showAllArea(self, surface, **kwargs):\n if not \"area_color\" in kwargs:\n kwargs[\"area_color\"] = self.area_color\n ps = [tuple(p) for p in self.points]\n if len(ps) > 1:\n surface.draw.polygon(surface.screen, kwargs[\"area_color\"], ps, False)", "def test_area():\n N_GRID = 1000\n kind_list = [\"linear\", \"previous\"]\n x_grid = np.linspace(0, 1, N_GRID)\n\n kind = np.random.choice(kind_list)\n\n N = np.random.randint(2, 20)\n x = np.random.rand(N)\n x.sort()\n x[0] = x_grid[0]\n x[-1] = x_grid[-1]\n y = np.random.rand(N)\n\n auc, = util.area(x[None, :], y[None, :], kind)\n\n y_grid = util._interp1d(x_grid, x, y, kind)\n auc2, = util.area(x_grid[None, :], y_grid[None, :], kind)\n\n # Make sure interp1d and area are consistent with each other\n assert np.abs(auc - auc2) <= 10.0 / N_GRID", "def face_areas(self, point):\n surface_metrics_bp = self.surface_metric_matrices(point)\n return gs.sqrt(gs.linalg.det(surface_metrics_bp))", "def calcFaceAreas(x,y,z):\n (nLonP1, nLatP1) = x.shape\n (nLon, nLat) = (nLonP1-1, nLatP1-1)\n\n area = numpy.zeros((nLon, nLat))\n\n for i in range(nLon):\n for j in range(nLat):\n left = distance( (x[i,j], y[i,j], z[i,j]), (x[i,j+1], y[i,j+1], z[i,j+1]) )\n right = distance( (x[i+1,j], y[i+1,j], z[i+1,j]), (x[i+1,j+1], y[i+1,j+1], z[i+1,j+1]) )\n top = distance( (x[i,j+1], y[i,j+1], z[i,j+1]), (x[i+1,j+1], y[i+1,j+1], z[i+1,j+1]) )\n bot = distance( (x[i,j], y[i,j], z[i,j]), (x[i+1,j], y[i+1,j], z[i+1,j]) )\n \n area[i,j] = 0.5*(left+right) * 0.5*(top+bot)\n\n return area", "def is_clockwise(vertices):\n v = vertices\n area = ((v[1][0] - v[0][0]) * (v[1][1] + v[0][1]) +\n (v[2][0] - v[1][0]) * (v[2][1] + v[1][1]) +\n (v[0][0] - v[2][0]) * (v[0][1] + v[2][1])) / 2\n return (area > 0)", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def test_areas_locked_ok(self):", "def triangle_area(triangle):\n # sides of the triangle\n a = great_circle_distance(triangle[0], triangle[1])\n b = great_circle_distance(triangle[0], triangle[2])\n c = great_circle_distance(triangle[1], triangle[2])\n\n # it may happen that the triangle is degenerate\n # for the rare case where a fourth generator just\n # touches the circumcircle\n assert a >= 0.\n assert b >= 0.\n assert c >= 0.\n\n s = (a + b + c) / 2.\n\n # does not quite work for extra large polygons\n # where area is ambiguous\n try:\n return 4. * arctan(sqrt(tan(s / 2.) *\n tan((s - a) / 2.) *\n tan((s - b) / 2.) *\n tan((s - c) / 2.)))\n except FloatingPointError:\n # floating point weirdness\n return 0.", "def test_faces(self):\n\n self.test_shape.workplane = \"XY\"\n self.test_shape.rotation_axis = \"Z\"\n\n assert self.test_shape.area == pytest.approx((((math.pi * (10**2)) * 2) + (math.pi * (10 * 2) * 100)) * 8)\n assert len(self.test_shape.areas) == 24\n assert self.test_shape.areas.count(pytest.approx(math.pi * (10**2))) == 16\n assert self.test_shape.areas.count(pytest.approx(math.pi * (10 * 2) * 100)) == 8", "def getArea(rob):\r\n def dfs(visit, i, j):\r\n visit.add((i, j))\r\n for k in range(4):\r\n newi, newj = i + x[k], j + y[k]\r\n if (newi, newj) in visit or not rob.move(k):\r\n continue\r\n dfs(visit, newi, newj)\r\n rob.move((k + 2) % 4)\r\n visit = set()\r\n dfs(visit, 0, 0)\r\n return len(visit)", "def triangle_area(side1: number, side2: number, side3: number) -> number:\n s = (side1+side2+side3)/2\n area = sqrt(s*(s-side1)*(s-side2)*(s-side3))\n return sqrt(s*(s-side1)*(s-side2)*(s-side3))", "def refinement_func_location(tri_points, area):\r\n center_tri = np.sum(np.array(tri_points), axis=0)/3.\r\n max_area = 0.005 + lp.norm(np.abs(center_tri) - 1.0) * 0.05\r\n return bool(area > max_area)", "def area(triangles):\n # sides of the triangle\n sides = great_circle_distance(triangles,\n numpy.roll(triangles, 1, axis=1))\n\n assert numpy.all(sides >= 0.)\n\n # s = (a + b + c) / 2.\n s = (numpy.sum(sides, axis=1) / 2.)\n\n # tan(s / 2) * tan((s - a) / 2) * tan((s - b) / 2) * tan((s - c) / 2)\n product = (tan(s / 2.) *\n numpy.prod(tan((s[:, numpy.newaxis] - sides) / 2.), axis=1))\n\n try:\n return 4. * arctan(sqrt(product))\n except FloatingPointError:\n # floating point weirdness\n\n def individual(prod):\n \"\"\"\n Area of an individual triangle.\n \"\"\"\n try:\n return 4. * arctan(sqrt(prod))\n except FloatingPointError:\n return 0.\n\n return numpy.array([individual(prod) for prod in product])", "def plot_voronoi_polys_with_points_in_area(ax, area_shape, poly_shapes, points, poly_to_pt_assignments=None,\n area_color='white', area_edgecolor='black',\n voronoi_and_points_cmap='tab20',\n voronoi_color=None, voronoi_edgecolor=None,\n points_color=None, points_markersize=5, points_marker='o',\n voronoi_labels=None, voronoi_label_fontsize=10, voronoi_label_color=None,\n point_labels=None, point_label_fontsize=7, point_label_color=None,\n plot_area_opts=None,\n plot_voronoi_opts=None,\n plot_points_opts=None):\n plot_area_opts = plot_area_opts or {}\n plot_voronoi_opts = plot_voronoi_opts or {'alpha': 0.5}\n plot_points_opts = plot_points_opts or {}\n\n _plot_polygon_collection_with_color(ax, [area_shape], color=area_color, edgecolor=area_edgecolor, **plot_area_opts)\n\n if voronoi_and_points_cmap and poly_to_pt_assignments and \\\n not all(map(bool, (voronoi_color, voronoi_edgecolor, points_color))):\n voronoi_color, points_color = colors_for_voronoi_polys_and_points(poly_shapes, poly_to_pt_assignments,\n cmap_name=voronoi_and_points_cmap)\n\n if voronoi_color is None and voronoi_edgecolor is None:\n voronoi_edgecolor = 'black' # better visible default value\n\n plot_voronoi_polys(ax, poly_shapes, color=voronoi_color, edgecolor=voronoi_edgecolor,\n labels=voronoi_labels, label_fontsize=voronoi_label_fontsize, label_color=voronoi_label_color,\n **plot_voronoi_opts)\n\n plot_points(ax, points, points_markersize, points_marker, color=points_color,\n labels=point_labels, label_fontsize=point_label_fontsize, label_color=point_label_color,\n **plot_points_opts)", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def test_convexHullFacetArea(self):\n try:\n import pyhull\n except ImportError:\n self.skipTest(\"Pyhull (optional) is not available so cannot compute facet area.\")\n \n # make points\n N = 8\n pts = [0, 0, 0,\n 3, 0, 0,\n 0, 3, 0,\n 0, 0, 3,\n 3, 3, 0,\n 0, 3, 3,\n 3, 0, 3,\n 3, 3, 3]\n \n # calc volume\n volume, facetArea = clusters.findConvexHullVolume(N, pts)\n \n self.assertAlmostEqual(facetArea, 54.0)", "def test_square_area(self):\n self.assertEqual(4, square_area(self.values['side']))", "def test_polygonize():\n # A collection with one non-zero-area Polygon is returned as a Polygon.\n geom1 = GeometryCollection([POLY, ZERO_POLY])\n result1 = polygonize(geom1)\n assert result1.geom_type == \"Polygon\"\n assert result1.area == 1.0\n\n # A collection with multiple non-zero-area polygons is returned as a MultiPolygon.\n geom2 = GeometryCollection([POLY, POLY])\n result2 = polygonize(geom2)\n assert result2.geom_type == \"MultiPolygon\"\n assert result2.area == 2.0\n\n # Zero-area geometries are not permitted.\n with pytest.raises(ValueError) as err:\n _ = polygonize(ZERO_POLY)\n assert err.match(\"Geometry has zero area\")", "def compare_area(geometry_x, geometry_y):\n arct = CreateGeometryFromWkt(geometry_x)\n pgis = CreateGeometryFromWkt(geometry_y)\n\n intersection_area = Geometry.Area(Geometry.Intersection(arct, pgis))\n arct_area = Geometry.Area(arct)\n pgis_area = Geometry.Area(pgis)\n\n # print('arctern area: %s, postgis area: %s, intersection area: %s' %\n # (str(arct_area), str(pgis_area), str(intersection_area)))\n # result = compare_float(intersection_area, arct_area, pgis_area, EPOCH_SURFACE)\n result = compare3float_relative(pgis_area, arct_area, intersection_area,\n EPOCH_SURFACE_RELATIVE)\n return result", "def compute_triangle_area(a,b,c):\n ab = np.sqrt( ((a-b)**2).sum() )\n ac = np.sqrt( ((a-c)**2).sum() )\n bc = np.sqrt( ((b-c)**2).sum() )\n \n s = (ab+ac+bc)/2\n area = np.sqrt(s*(s-ab)*(s-bc)*(s-ac))\n \n return area", "def isInside(point_x, point_y, area_left, area_top, area_width, area_height):\n return (area_left <= point_x < area_left + area_width) and (area_top <= point_y < area_top + area_height)", "def test_area(self):\r\n rect = Rectangle(30, 50, 130, 60)\r\n assert rect.area == 100 * 10\r\n\r\n rect = Rectangle(10.5, 20.7, 11.2, 50.6)\r\n assert abs(rect.area - 20.93) < 1e-10\r\n\r\n rect = Rectangle(-10, -20, 10, 60)\r\n assert rect.area == 20 * 80", "def parse_area(x,y):\r\n # if (x,y) in gone :\r\n # return False\r\n # print(\"!\",end=\"\")\r\n # made useless thanks to the loop's conditions\r\n gone.add((x,y))\r\n if (x,y) in pos_turtle.values() :\r\n return True\r\n else :\r\n for (i,j) in [(x-UNIT,y), (x+UNIT,y), (x,y-UNIT), (x,y+UNIT)] :\r\n if (i,j) in pos_tracker or (i,j) in gone or abs(i)>=RAY or abs(j)>=RAY :\r\n continue\r\n if parse_area(i,j) :\r\n return True\r\n return False", "def area_polygon(polygon):\n o = centroid_points(polygon)\n u = subtract_vectors(polygon[-1], o)\n v = subtract_vectors(polygon[0], o)\n a = 0.5 * length_vector(cross_vectors(u, v))\n for i in range(0, len(polygon) - 1):\n u = v\n v = subtract_vectors(polygon[i + 1], o)\n a += 0.5 * length_vector(cross_vectors(u, v))\n return a", "def poly_area(polygon,sort=True):\n npts = len(polygon)\n if npts < 3: return 0.\n if sort == True:\n (points,angles) = sort_points(*polygon)\n else:\n points = polygon\n \n # now loop through points cyclically computing\n # area of each polygon segment defined by the points\n # [0,0],[x1,y1],[x2,y2]\n A = []\n for j in range(npts):\n p1 = points[j]\n if j == npts - 1:\n p2 = points[0]\n else:\n p2 = points[j+1]\n a = segment_area(p1,p2)\n A.append(a)\n return num.sum(A)", "def test_active_area(get_touchmat):\n touchmat = get_touchmat\n touchmat_model = check_device_types.get_device_model(touchmat)\n\n if touchmat_model == Devices.touchmat_g1:\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area()\n assert 'Functionality not available' in str(execinfo.value)\n\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'enabled': True})\n assert 'Functionality not available' in str(execinfo.value)\n return\n\n x_min = 0\n y_min = 0\n x_max = 15360\n y_max = 8640\n\n cur_area = touchmat.active_area()\n assert isinstance(cur_area['enabled'], bool)\n assert isinstance(cur_area['top_left'], dict)\n assert isinstance(cur_area['bottom_right'], dict)\n assert isinstance(cur_area['top_left']['x'], int)\n assert x_min <= cur_area['top_left']['x'] <= x_max\n assert isinstance(cur_area['top_left']['y'], int)\n assert y_min <= cur_area['top_left']['y'] <= y_max\n assert isinstance(cur_area['bottom_right']['x'], int)\n assert x_min <= cur_area['bottom_right']['x'] <= x_max\n assert isinstance(cur_area['bottom_right']['y'], int)\n assert y_min <= cur_area['bottom_right']['y'] <= y_max\n assert cur_area['top_left']['x'] <= cur_area['bottom_right']['x']\n assert cur_area['top_left']['y'] <= cur_area['bottom_right']['y']\n\n tl_x = random.randint(x_min, x_max-1)\n tl_y = random.randint(y_min, y_max-1)\n br_x = random.randint(tl_x+1, x_max)\n br_y = random.randint(tl_y+1, y_max)\n area = {'enabled': True, 'top_left': {'x': tl_x, 'y': tl_y},\n 'bottom_right': {'x': br_x, 'y': br_y}}\n set_area = touchmat.active_area(area)\n assert set_area == area\n assert touchmat.active_area() == area\n\n # Test only changing one key at a time\n set_area = touchmat.active_area({'enabled':False})\n area['enabled'] = False\n assert set_area == area\n assert touchmat.active_area() == area\n\n tl_x = random.randint(x_min, br_x)\n tl_y = random.randint(y_min, br_y)\n set_area = touchmat.active_area({'top_left': {'x': tl_x, 'y': tl_y}})\n area['top_left'] = {'x': tl_x, 'y': tl_y}\n assert set_area == area\n assert touchmat.active_area() == area\n\n br_x = random.randint(tl_x+1, x_max)\n br_y = random.randint(tl_y+1, y_max)\n set_area = touchmat.active_area({'bottom_right': {'x': br_x, 'y': br_y}})\n area['bottom_right'] = {'x': br_x, 'y': br_y}\n assert set_area == area\n assert touchmat.active_area() == area\n\n # Test the edge cases\n area = {'enabled': True, 'top_left': {'x': x_min, 'y': y_min},\n 'bottom_right': {'x': x_max, 'y': y_max}}\n set_area = touchmat.active_area(area)\n assert set_area == area\n assert touchmat.active_area() == area\n\n # Verify that out of range values throw the appropriate errors\n err_msg = 'Valid range is {} <= top_left x <= {}'.format(x_min, x_max)\n # Test top_left x < min value\n bad_x = random.randint(x_min-1000, x_min-1)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'top_left': {'x': bad_x, 'y': tl_y}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n # Test top_left x > max value\n bad_x = random.randint(x_max+1, x_max+1000)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'top_left': {'x': bad_x, 'y': tl_y}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n\n err_msg = 'Valid range is {} <= bottom_right x <= {}'.format(x_min, x_max)\n # Test bottom_right x < min value\n bad_x = random.randint(x_min-1000, x_min-1)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'bottom_right': {'x': bad_x, 'y': br_y}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n # Test bottom_right x > max value\n bad_x = random.randint(x_max+1, x_max+1000)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'bottom_right': {'x': bad_x, 'y': br_y}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n\n err_msg = 'Valid range is {} <= top_left y <= {}'.format(y_min, y_max)\n # Test top_left y < min value\n bad_y = random.randint(y_min-1000, y_min-1)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'top_left': {'x': tl_x, 'y': bad_y}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n # Test top_left y > max value\n bad_y = random.randint(y_max+1, y_max+1000)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'top_left': {'x': tl_x, 'y': bad_y}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n\n err_msg = 'Valid range is {} <= bottom_right y <= {}'.format(y_min, y_max)\n # Test bottom_right y < min value\n bad_y = random.randint(y_min-1000, y_min-1)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'bottom_right': {'x': br_x, 'y': bad_y}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n # Test bottom_right y > max value\n bad_y = random.randint(y_max+1, y_max+1000)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'bottom_right': {'x': br_x, 'y': bad_y}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n\n # Test bottom_right y < top_left y\n br_y = random.randint(y_min, y_max-1)\n tl_y = random.randint(br_y+1, y_max)\n err_msg = 'top_left y ({}) must be less than bottom_right y ({})'.format(tl_y, br_y)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'top_left': {'x': x_min, 'y': tl_y},\n 'bottom_right': {'x': x_max, 'y': br_y}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n # Test bottom_right x < top_left x\n br_x = random.randint(x_min, x_max-1)\n tl_x = random.randint(br_x+1, x_max)\n err_msg = 'top_left x ({}) must be less than bottom_right x ({})'.format(tl_x, br_x)\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'top_left': {'x': tl_x, 'y': y_min},\n 'bottom_right': {'x': br_x, 'y': y_max}})\n assert err_msg in execinfo.value.message\n assert touchmat.active_area() == area\n\n # Test passing in the wrong types, empty dictionaries, etc...\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'top_left': 7})\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'bottom_right': 'moo'})\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'enabled': 'moo'})\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({'enabled': 3})\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area(\"test\")\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_area({})\n assert 'Invalid parameter' in execinfo.value.message", "def refinement_func_anomaly(tri_points, area):\r\n polygon = Path(refinement_func_anomaly.polygon)\r\n center_tri = np.sum(np.array(tri_points), axis=0)/3.\r\n if area > 0.005:\r\n refine_needed = True\r\n elif (area > 0.002) and polygon.contains_point(center_tri):\r\n refine_needed = True\r\n else:\r\n refine_needed = False\r\n\r\n return refine_needed", "def face_area(lon_b, lat_b, r_sphere = 6.375e6):\n \n # Convert inputs to radians\n lon_b_rad = lon_b * np.pi / 180.0\n lat_b_rad = lat_b * np.pi / 180.0\n \n r_sq = r_sphere * r_sphere\n n_cs = lon_b.shape[1] - 1\n \n # Allocate output array\n cs_area = np.zeros((n_cs,n_cs))\n \n # Ordering\n valid_combo = np.array([[1,2,4],[2,3,1],[3,2,4],[4,1,3]]) - 1\n \n for i_lon in range(n_cs):\n for i_lat in range(n_cs):\n lon_corner = np.zeros(4)\n lat_corner = np.zeros(4)\n xyz_corner = np.zeros((4,3))\n for i_vert in range(4):\n x_lon = i_lon + (i_vert > 1)\n x_lat = i_lat + (i_vert == 0 or i_vert == 3)\n lon_corner[i_vert] = lon_b_rad[x_lon,x_lat]\n lat_corner[i_vert] = lat_b_rad[x_lon,x_lat]\n for i_vert in range(4):\n xyz_corner[i_vert,:] = ll2xyz(lon_corner[i_vert],lat_corner[i_vert])\n tot_ang = 0.0\n for i_corner in range(4):\n curr_combo = valid_combo[i_corner,:]\n xyz_mini = np.zeros((3,3))\n for i_mini in range(3):\n xyz_mini[i_mini,:] = xyz_corner[curr_combo[i_mini],:]\n curr_ang = sphere_angle(xyz_mini[0,:],xyz_mini[1,:],xyz_mini[2,:])\n tot_ang += curr_ang\n cs_area[i_lon,i_lat] = r_sq * (tot_ang - (2.0*np.pi))\n \n return cs_area", "def areas(cells):\n return numpy.array([polygon_area(cells[i])\n for i in range(len(cells.keys()))])", "def test_rectangle_area(self):\n self.assertEqual(12, rectangle_area(\n self.values['base'], self.values['height']))", "def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r", "def polygon_area(x, y):\n return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))", "def test_polygon_to_vertex_arrays(self):\n\n these_vertex_x_coords, these_vertex_y_coords = (\n skeleton_lines._polygon_to_vertex_arrays(POLYGON_OBJECT_XY))\n\n self.assertTrue(numpy.allclose(\n these_vertex_x_coords, VERTEX_X_COORDS, atol=TOLERANCE))\n self.assertTrue(numpy.allclose(\n these_vertex_y_coords, VERTEX_Y_COORDS, atol=TOLERANCE))", "def force_vertices_with_npoints(area_par, bbox, coords, npts, **kwargs):\n fac = 1.02\n count = 0\n max_iter = 20\n while True:\n t = calculate_mesh(\n area_par,\n bbox,\n None,\n get_t=True)\n pt_count = count_points_near_vertices(t, coords, **kwargs)\n if pt_count.min() >= npts:\n break\n area_par *= fac\n count += 1\n if np.mod(count, 2) == 0:\n fac += 0.5\n if np.mod(count, max_iter) == 0:\n e = (\"did not meet vertex requirement \"\n \"after %d iterations\" % max_iter)\n raise MeshLensCorrectionException(e)\n return t, area_par", "def test_find_triangle(self):\n points = np.array([[2.435, -3.37], [2.435, -1.82], [2.635, -2.], [2.535, -1.7]])\n connectivity_list = np.array([[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5]], dtype=np.intp)\n point = np.array([2.6, -1.9])\n self.assertEqual(1, find_triangle(point, points, connectivity_list))\n point = np.array([3., 1.]) # outside of defined vertices\n self.assertEqual(-1, find_triangle(point, points, connectivity_list))", "def test_polygon_with_duplicate_nodes_is_valid():\n geom = query_row(db_conf, 'osm_landusages', 30005)['geometry']\n assert geom.is_valid\n assert len(geom.exterior.coords) == 4", "def compute_mesh_area_numpy(mesh):\n pass", "def intersectarea(p1,p2,size):\n x1, y1 = p1\n x2, y2 = p2\n ix1, iy1 = max(x1,x2), max(y1,y2)\n ix2, iy2 = min(x1+size,x2+size), min(y1+size,y2+size)\n iarea = abs(ix2-ix1)*abs(iy2-iy1)\n if iy2 < iy1 or ix2 < ix1: iarea = 0\n return iarea", "def compute_triangle_area(triangle_points: np.ndarray) -> float:\n (x1, y1), (x2, y2), (x3, y3) = unpack_triangle_coordinates(triangle_points)\n # Pythagorean theorem\n l1 = sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n l2 = sqrt((x2 - x3) ** 2 + (y2 - y3) ** 2)\n l3 = sqrt((x3 - x1) ** 2 + (y3 - y1) ** 2)\n # Heron's Formula\n semi_perimeter = (l1 + l2 + l3) / 2\n to_sqrt = semi_perimeter * (semi_perimeter - l1) * (semi_perimeter - l2) * (semi_perimeter - l3)\n to_sqrt = to_sqrt if to_sqrt > 0 else 0\n area = sqrt(to_sqrt)\n return area", "def convex_hull_area( contours, debug= False ):\r\n ret_areas = []\r\n ret_hulls = []\r\n for c in contours:\r\n hull = cv2.convexHull( c )\r\n area = cv2.contourArea( hull )\r\n ret_areas.append( area )\r\n ret_hulls.append( hull )\r\n if( debug ):\r\n print( \"Hull area: {0}\".format( area ) )\r\n\r\n return ( ret_areas, ret_hulls )", "def Ez_area(position, angle, detect):\n# a = range(round(-2*Ez_height),round(2*Ez_height))\n# b = range(round(-2*Ez_height),round(2*Ez_height))\n# a_valid = []\n# b_valid= []\n \n # These are the grid points in a coordinate system based on the Ez's angle\n if detect:\n a_valid = [-11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6]\n b_valid = [-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3]\n else:\n a_valid = [-9, -9, -9, -9, -9, -9, -9, -9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4]#[-19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -19, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -18, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -17, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -14, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -12, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -7, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9]\n b_valid = [-4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, -2, -1, 0, 1, 2]#[-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4]\n positions = []\n# for i in a:\n# for j in b:\n# if (i > ((Ez_width/2)-Ez_height-detect_length) and abs(j) < (Ez_width/2+detect_length) and i < 0) or (i > 0 and np.sqrt(i**2 + j**2) < Ez_width/2+detect_length):\n# a_valid.append(i)\n# b_valid.append(j)\n# print('AAAA', a_valid)\n# print(' ')\n# print('BBBB', b_valid)\n# print(' ')\n \n # This is a coordinate transfromation to x,y\n for i in range(len(a_valid)):\n positions.append((int(round(a_valid[i]*np.cos(angle) + b_valid[i]*np.sin(angle) + position[0])), int(round(a_valid[i]*np.sin(angle) - b_valid[i]*np.cos(angle) + position[1]))))\n return positions", "def _bounding_area(index, hull):\n unit_vector_p = _unit_vector(hull[index], hull[index + 1])\n unit_vector_o = _orthogonal_vector(unit_vector_p)\n\n dis_p = tuple(np.dot(unit_vector_p, pt) for pt in hull)\n dis_o = tuple(np.dot(unit_vector_o, pt) for pt in hull)\n\n min_p = min(dis_p)\n min_o = min(dis_o)\n len_p = max(dis_p) - min_p\n len_o = max(dis_o) - min_o\n\n return {'area': len_p * len_o,\n 'length_parallel': len_p,\n 'length_orthogonal': len_o,\n 'rectangle_center': (min_p + len_p / 2, min_o + len_o / 2),\n 'unit_vector': unit_vector_p,\n }", "def area(poly):\n if len(poly) < 3: # not a plane - no area\n return 0\n total = [0, 0, 0]\n num = len(poly)\n for i in range(num):\n vi1 = poly[i]\n vi2 = poly[(i+1) % num]\n prod = np.cross(vi1, vi2)\n total[0] += prod[0]\n total[1] += prod[1]\n total[2] += prod[2]\n result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))\n return abs(result/2)", "def get_element_areas(self):\n areas = [\n self._get_area_polygon(\n points_x, points_z\n ) for points_x, points_z in zip(self.grid['x'], self.grid['z'])\n ]\n return np.array(areas)", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def test_incomplete_polygons():\n assert not query_row(db_conf, 'osm_landusages', 30004)\n assert not query_row(db_conf, 'osm_landusages', 30006)", "def test_area(self):\n s1 = Square(3)\n self.assertEqual(9, s1.area())\n s4 = Square(5, 0, 0, 12)\n self.assertEqual(25, s4.area())", "def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())", "def insideArea(point, area):\n x=point.real\n y=point.imag\n n = len(area)\n inside = False\n p1x = area[0].real\n p1y = area[0].imag\n for i in range(1, n + 1):\n p2x = area[i % n].real\n p2y = area[i % n].imag\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xinters:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside", "def is_in_area(pt, areasize, radius=None):\n if areasize is None:\n return True\n\n node = np.asarray(pt)\n if radius is None:\n radius = 0\n\n if np.all(node > (0 + radius)) and np.all(node < (areasize - radius)):\n return True\n else:\n return False", "def findArea(self):\n\n a, b = self.sides\n area = a * b\n print(f\"Are of rectangle is: {area}\")" ]
[ "0.7278366", "0.7158183", "0.68438345", "0.67501736", "0.66490245", "0.6619761", "0.658673", "0.6489864", "0.64847827", "0.63019234", "0.6286598", "0.6198661", "0.618283", "0.61759", "0.61708784", "0.61697274", "0.61374867", "0.6110089", "0.606067", "0.59845203", "0.5979044", "0.59579587", "0.59507424", "0.59365904", "0.58778286", "0.58488214", "0.58386195", "0.5832783", "0.58326", "0.5831316", "0.5825578", "0.58230543", "0.5807224", "0.579269", "0.57864916", "0.57757974", "0.5768382", "0.5750605", "0.57503736", "0.5743256", "0.5730785", "0.5697653", "0.5689164", "0.56889623", "0.568807", "0.56723285", "0.5664015", "0.5663899", "0.56543535", "0.5612615", "0.5581475", "0.5570641", "0.5565979", "0.5557224", "0.55423236", "0.5534444", "0.5530529", "0.55217135", "0.55187434", "0.5518402", "0.55147874", "0.5503744", "0.54993445", "0.549724", "0.5487361", "0.5467667", "0.54658735", "0.54618263", "0.54614204", "0.5457161", "0.5451902", "0.5448791", "0.54396313", "0.5438491", "0.5432419", "0.5411642", "0.5410828", "0.54077595", "0.5403064", "0.5397223", "0.5387187", "0.53866464", "0.53804946", "0.5373829", "0.53647053", "0.5360177", "0.53559005", "0.53543586", "0.5353181", "0.5341351", "0.5340994", "0.5338718", "0.5333503", "0.5333503", "0.5331155", "0.5329242", "0.5328348", "0.5326941", "0.53240013", "0.5321001" ]
0.8256436
0
Test normals. We test this on a space whose initializing point is a cube, and we test the function on a cube with sides of length 2 centered at the origin. The cube is meshed with 12 triangles (2 triangles per face.) Recall that the magnitude of each normal vector is equal to the area of the face it is normal to.
def test_normals(self, faces, point): space = self.Space(faces=faces) cube_normals = gs.array( [ [0.0, 0.0, 2.0], [0.0, 0.0, 2.0], [0.0, 2.0, 0.0], [0.0, 2.0, 0.0], [2.0, 0.0, 0.0], [2.0, 0.0, 0.0], [0.0, -2.0, 0.0], [0.0, -2.0, 0.0], [-2.0, 0.0, 0.0], [-2.0, 0.0, 0.0], [0.0, 0.0, -2.0], [0.0, 0.0, -2.0], ] ) expected = cube_normals result = space.normals(point) are_close = [ (gs.allclose(res, exp) or gs.allclose(res, -exp)) for res, exp in zip(result, expected) ] assert gs.all(are_close) point = gs.array([point, point]) result = space.normals(point) are_close_0 = [ (gs.allclose(res, exp) or gs.allclose(res, -exp)) for res, exp in zip(result[0], expected) ] are_close_1 = [ (gs.allclose(res, exp) or gs.allclose(res, -exp)) for res, exp in zip(result[1], expected) ] assert gs.all(gs.array([are_close_0, are_close_1]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_surface_normal(self):\n vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]])\n expected = np.array([0, 0, 1])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test against multiple triangles\n vertices = np.r_[vertices[np.newaxis, :, :], [[[0, 0, 0], [0, 2, 0], [2, 0, 0]]]]\n expected = np.array([[0, 0, 1], [0, 0, -1]])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Some real data\n vertices = np.array([[2.435, -1.82, -0.53], [2.635, -2., -0.58], [2.535, -1.7, -0.58]])\n expected = np.array([0.33424239, 0.11141413, 0.93587869])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test input validation\n self.assertRaises(ValueError, surface_normal, np.array([[1, 2, 3, 4]]))", "def face_normals(xyz, triangles):\n\n\tabc_xyz = face_attr(xyz, triangles)\n\n\tbc_xyz = abc_xyz[:,:,1:3] - abc_xyz[:,:,0:1]\n\tfn = tf.linalg.cross(bc_xyz[:,:,0], bc_xyz[:,:,1])\n\tfn = tf.math.l2_normalize(fn, -1)\n\treturn fn", "def FaceNormals(self):\n\n self.__do_memebers_exist__()\n\n points = np.copy(self.points)\n if points.shape[1] < 3:\n dum = np.zeros((points.shape[0],3))\n dum[:,:points.shape[1]] = points\n points = dum\n\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n faces = self.faces\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n faces = self.elements\n else:\n raise ValueError(\"Cannot compute face normals on {}\".format(self.element_type))\n\n\n face_coords = self.points[faces[:,:3],:]\n\n p1p0 = face_coords[:,1,:] - face_coords[:,0,:]\n p2p0 = face_coords[:,2,:] - face_coords[:,0,:]\n\n normals = np.cross(p1p0,p2p0)\n norm_normals = np.linalg.norm(normals,axis=1)\n normals[:,0] /= norm_normals\n normals[:,1] /= norm_normals\n normals[:,2] /= norm_normals\n\n # CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetElementsWithBoundaryFaces()\n meds = self.Medians()\n face_element_meds = meds[self.boundary_face_to_element[:,0],:]\n p1pm = face_coords[:,1,:] - face_element_meds\n # IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP\n _check = np.einsum(\"ij,ij->i\",normals,p1pm)\n normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]\n\n return normals", "def test_normal_unit_length(self):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n normals = np.array(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[3:6])\n lengths = np.sum(normals * normals, axis=0)\n np.testing.assert_almost_equal(np.ones_like(lengths), lengths)", "def compute_face_normals(vertices_zyx, faces, normalize=False):\n # numpy is faster than numba for face normals.\n # Always use numpy.\n return compute_face_normals_numpy(vertices_zyx, faces, normalize)", "def test_normal_always_up(self):\n z_of_normals = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n z_of_normals += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[5])\n np.testing.assert_array_less(np.zeros_like(z_of_normals), z_of_normals)", "def unit_normals(p,q,r): \n vx1 = p[0] - r[0] # x1 - x3. \n vy1 = p[1] - r[1] # y1 - y3. \n vz1 = p[2] - r[2] # z1 - z3. \n\n vx2 = q[0] - r[0] # x2 - x3. \n vy2 = q[1] - r[1] # y2 - y3. \n vz2 = q[2] - r[2] # z2 - z3. \n\n vnx = vy1*vz2 - vz1*vy2 \n vny = vz1*vx2 - vx1*vz2 \n vnz = vx1*vy2 - vy1*vx2 \n\n len_vn = math.sqrt(vnx*vnx + vny*vny + vnz*vnz) \n vnx = vnx/len_vn \n vny = vny/len_vn \n vnz = vnz/len_vn \n\n return vnx, vny, vnz", "def Normals(self, show_plot=False):\n\n ndim = self.InferSpatialDimension()\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetBoundaryEdges()\n\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n normals = self.FaceNormals()\n elif self.element_type == \"tri\" or self.element_type == \"quad\" or self.element_type == \"line\":\n if self.points.shape[1] == 3:\n normals = self.FaceNormals()\n else:\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n edges = self.edges\n elif self.element_type == \"line\":\n edges = self.elements\n\n edge_coords = self.points[edges[:,:2],:]\n p1p0 = edge_coords[:,1,:] - edge_coords[:,0,:]\n\n normals = np.zeros_like(p1p0)\n normals[:,0] = -p1p0[:,1]\n normals[:,1] = p1p0[:,0]\n norm_normals = np.linalg.norm(normals,axis=1)\n normals[:,0] /= norm_normals\n normals[:,1] /= norm_normals\n\n # CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetElementsWithBoundaryEdges()\n meds = self.Medians()\n edge_element_meds = meds[self.boundary_edge_to_element[:,0],:]\n p1pm = edge_coords[:,1,:] - edge_element_meds\n # IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP\n _check = np.einsum(\"ij,ij->i\",normals,p1pm)\n normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]\n\n\n if show_plot:\n\n if ndim == 2:\n mid_edge_coords = 0.5*(edge_coords[:,1,:] + edge_coords[:,0,:])\n\n import matplotlib.pyplot as plt\n figure = plt.figure()\n\n self.SimplePlot(figure=figure, show_plot=False)\n\n q = plt.quiver(mid_edge_coords[:,0], mid_edge_coords[:,1],\n normals[:,0], normals[:,1],\n color='Teal', headlength=5, width=0.004)\n\n plt.axis('equal')\n plt.axis('off')\n plt.tight_layout()\n plt.show()\n\n\n elif ndim == 3:\n faces = self.faces\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n faces = self.elements\n mid_face_coords = np.sum(self.points[faces,:3],axis=1)/faces.shape[1]\n\n import os\n os.environ['ETS_TOOLKIT'] = 'qt4'\n from mayavi import mlab\n\n figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))\n\n self.SimplePlot(figure=figure, show_plot=False)\n\n mlab.quiver3d(mid_face_coords[:,0], mid_face_coords[:,1], mid_face_coords[:,2],\n normals[:,0], normals[:,1], normals[:,2],\n color=(0.,128./255,128./255),line_width=5)\n mlab.show()\n\n return normals", "def normalVect(self, n=2):\n L = len(self.vertices)\n normals = []\n while len(normals) < n:\n j = randrange(L)\n v0 = vector(self.vertices[j].coords())\n v1 = vector(self.vertices[int(j + L / 3) % L].coords())\n v2 = vector(self.vertices[int(j + 2 * L / 3) % L].coords())\n try:\n normals.append(((v1 - v0) * (v2 - v0)).normalize())\n except ValueError:\n pass\n return (1 / len(normals)) * sum(normals, vector(0, 0, 0))", "def generate_normals(v1, v2, v3, normalize_result=True):\n # make vectors relative to v2\n # we assume opengl counter-clockwise ordering\n a = v1 - v2\n b = v3 - v2\n n = cross(b, a)\n if normalize_result:\n n = normalize(n)\n return n", "def face_normals(self) -> np.ndarray:\n if self._face_normals is None:\n self.compute_face_normals()\n assert self._face_normals is not None\n return self._face_normals", "def parse_normals(lines):\n print \" * Parsing normals\"\n return _parse_vn(lines, \"vn %.6f %.6f %.6f\")", "def compareNormals():\n computeNormals = False\n if computeNormals:\n r1,r2,r3 = read('r1'),read('r2'),read('r3')\n r = [r1,r2,r3]\n x2 = [like(r1),like(r1),like(r1)]\n x3 = [like(r1),like(r1),like(r1)]\n v = [like(r1),like(r1),like(r1)]\n FlattenerUtil.getFrame(r,None,x2,x3)\n FlattenerUtil.cross(x3,x2,v)\n FlattenerUtil.normalize(v,v)\n write('v1',v[0])\n write('v2',v[1])\n write('v3',v[2])\n v1,v2,v3 = read('v1'),read('v2'),read('v3')\n u1,u2,u3 = read('u1'),read('u2'),read('u3')\n display(sub(v1,u1),cmap=rwb,cmin=-0.2,cmax=0.2,name='v1-u1')\n display(sub(v2,u2),cmap=rwb,cmin=-0.2,cmax=0.2,name='v2-u2')\n display(sub(v3,u3),cmap=rwb,cmin=-0.2,cmax=0.2,name='v3-u3')", "def vert_normals(xyz, triangles):\n\n\tB, N, _ = _shape(xyz)\n\tM = _shape(triangles)[-2]\n\ttriangles = _i64(triangles)\n\t\n\tfn = face_normals(xyz, triangles)\n\tbfn = tf.reshape(tf.tile(fn, [1,1,3]), [B*M*3, 3])\n\tbt = tf.reshape(\n\t\ttriangles[tf.newaxis,:,:] + _i64(tf.range(B)[:,tf.newaxis,tf.newaxis] * N),\n\t\t[B*M*3])\n\tvn = tf.reshape(tf.math.unsorted_segment_sum(bfn, bt, B*N), [B,N,3])\n\tvn = tf.math.l2_normalize(vn, -1)\n\treturn vn", "def calculateMeshNormal(mesh_face_vertices):\n mesh_normal = []\n for mesh in mesh_face_vertices:\n v1x = mesh[1, 0] - mesh[0, 0]\n v1y = mesh[1, 1] - mesh[0, 1]\n v1z = mesh[1, 2] - mesh[0, 2]\n v2x = mesh[2, 0] - mesh[1, 0]\n v2y = mesh[2, 1] - mesh[1, 1]\n v2z = mesh[2, 2] - mesh[1, 2]\n \n normal = np.array([v1y * v2z - v1z * v2y, v1z * v2x - v1x * v2z, v1x * v2y - v1y * v2x])\n normal = normal / np.max((np.linalg.norm(normal), 1e-5))\n normal = (normal + 1) * 127.5\n mesh_normal.append(normal)\n return np.array(mesh_normal)", "def test_cube(self):\n\n # No isosurface\n cube_zero = numpy.zeros((2, 2, 2), dtype=numpy.float32)\n\n result = marchingcubes.MarchingCubes(cube_zero, 1.)\n self.assertEqual(result.shape, cube_zero.shape)\n self.assertEqual(result.isolevel, 1.)\n self.assertEqual(result.invert_normals, True)\n\n vertices, normals, indices = result\n self.assertEqual(len(vertices), 0)\n self.assertEqual(len(normals), 0)\n self.assertEqual(len(indices), 0)\n\n # Cube array dimensions: shape = (dim 0, dim 1, dim2)\n #\n # dim 0 (Z)\n # ^\n # |\n # 4 +------+ 5\n # /| /|\n # / | / |\n # 6 +------+ 7|\n # | | | |\n # |0 +---|--+ 1 -> dim 2 (X)\n # | / | /\n # |/ |/\n # 2 +------+ 3\n # /\n # dim 1 (Y)\n\n # isosurface perpendicular to dim 0 (Z)\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((1., 1.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 0], level)\n self.assertAllClose(normals, (1., 0., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 1 (Y)\n cube = numpy.array(\n (((0., 0.), (1., 1.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.2\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(vertices[:, 1], level)\n self.assertAllClose(normals, (0., -1., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 2 (X)\n cube = numpy.array(\n (((0., 1.), (0., 1.)),\n ((0., 1.), (0., 1.))), dtype=numpy.float32)\n level = 0.9\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 2], level)\n self.assertAllClose(normals, (0., 0., 1.))\n self.assertEqual(len(indices), 2)\n\n # isosurface normal in dim1, dim 0 (Y, Z) plane\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(normals[:, 2], 0.)\n self.assertEqual(len(indices), 2)", "def testNorm(self):\n assert(Vector(0, 3, 4).norm() == 5)\n assert(Vector(3, 4).norm() == 5)\n assert Vector(0, 3, 0, 0, 4, 0, size=10).norm() == 5", "def compute_normals(ring):\n # output lists\n normals = []\n points = []\n # create normals half way each segment\n ct = len(ring)\n for i in xrange(ct - 1):\n cur, nxt = ring[i], ring[i+1]\n n = segment_normal(nxt, cur)\n center = mul(add(cur, nxt), 0.5)\n normals.append(n)\n points.append(center)\n # create normals on every point, using normals for every segment\n ct = len(normals)\n for i in xrange(ct):\n cur, nxt = normals[i], normals[(i+1) % ct]\n n = unit(add(cur, nxt))\n pt = ring[i+1]\n normals.append(n)\n points.append(pt)\n return points, normals", "def get_face_normals(self, idx=-1, norm=False):\n if idx >= len(self.faces):\n raise IndexError\n if idx >= 0:\n v1, v2, v3 = self.faces[idx]\n v1, v2, v3 = self.vertices[v1], self.vertices[v2], self.vertices[v3]\n e1 = v2 - v1\n e2 = v3 - v1\n cross = np.cross(e1, e2)\n return cross / np.linalg.norm(cross) if norm else cross\n else:\n f = self.faces\n v = self.vertices\n a = v[f[:, 0], :]\n b = v[f[:, 1], :]\n c = v[f[:, 2], :]\n fn = np.cross(b - a, c - a)\n return fn / np.linalg.norm(fn) if norm else fn", "def get_normals(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, -s], [s, c]])\n us = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]])\n nsyms = 4 if self.halfexts[0] == self.halfexts[1] else 2\n return [(np.dot(r, u), nsyms) for u in us]", "def unit_normals(self):\n return np.stack(self.centers_cartesian(), axis=-1)", "def normal_vector(self, facet):\n assert len(facet) == 3\n pos = self.cluster.get_positions()\n v1 = pos[facet[1], :] - pos[facet[0], :]\n v2 = pos[facet[2], :] - pos[facet[0], :]\n n = np.cross(v1, v2)\n length = np.sqrt(np.sum(n**2))\n return n / length", "def getNormals(vertA, vertB, vertC):\n xA = vertA[0]\n xB = vertB[0]\n xC = vertC[0]\n yA = vertA[1]\n yB = vertB[1]\n yC = vertC[1]\n zA = vertA[2]\n zB = vertB[2]\n zC = vertC[2]\n ABx = xB - xA\n ABy = yB - yA\n ABz = zB - zA\n BCx = xC - xB\n BCy = yC - yB\n BCz = zC - zB\n Nx = ABy * BCz - ABz * BCy\n Ny = ABz * BCx - ABx * BCz\n Nz = ABx * BCy - ABy * BCx\n VecMag = math.sqrt(Nx ** 2 + Ny ** 2 + Nz ** 2)\n Ni = Nx / VecMag\n Nj = Ny / VecMag\n Nk = Nz / VecMag\n return [Ni, Nj, Nk]", "def normals(t, v):\n n = numpy.zeros((len(t), 3))\n for i in range(0, len(t)):\n p = vertices(t[i], v)\n n[i] = triangle.normal(p)\n return n", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def get_normal_vector_of_plane(p1, p2, p3):\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)", "def vertex_normals(self) -> np.ndarray:\n\n if self._vertex_normals is None:\n self.compute_vertex_normals()\n assert self._vertex_normals is not None\n return self._vertex_normals", "def calculate_plane_normal(patches):\n normals = []\n for patch in patches:\n normal = get_normal(patch)\n normals.append(normal)\n # Taken naive mean of normals\n # TODO outlier removal\n normals = np.mean(np.array(normals), axis=0)\n return normals", "def normal_polygon(points, unitized=True):\n p = len(points)\n assert p > 2, \"At least three points required\"\n nx = 0\n ny = 0\n nz = 0\n o = centroid_points(points)\n a = subtract_vectors(points[-1], o)\n for i in range(p):\n b = subtract_vectors(points[i], o)\n n = cross_vectors(a, b)\n a = b\n nx += n[0]\n ny += n[1]\n nz += n[2]\n if not unitized:\n return nx, ny, nz\n l = length_vector([nx, ny, nz])\n return nx / l, ny / l, nz / l", "def normals(self, point):\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n normals_at_point = 0.5 * gs.cross(vertex_1 - vertex_0, vertex_2 - vertex_0)\n return normals_at_point", "def compute_vertex_normals(vertices_zyx, faces, weight_by_face_area=False, face_normals=None):\n if face_normals is None:\n face_normals = compute_face_normals_numpy(vertices_zyx, faces, not weight_by_face_area)\n\n # numba is slightly faster for vertex normals, but not face normals\n if _numba_available:\n assert vertices_zyx.dtype == np.float32, \\\n f\"Our numba implementation requires float32 vertices, not {vertices_zyx.dtype}\"\n return compute_vertex_normals_numba(vertices_zyx, faces, weight_by_face_area, face_normals)\n else:\n return compute_vertex_normals_numpy(vertices_zyx, faces, weight_by_face_area, face_normals)", "def get_vertex_normals(self, idx=-1, norm=False):\n if idx >= len(self.faces):\n raise IndexError\n if idx >= 0:\n neighbours = self.corners[idx]\n areas = np.vectorize(lambda x: self.get_face_areas(x))(neighbours)\n face_norms = np.vectorize(lambda x: self.get_face_normals(x, True), signature='()->(n)')(neighbours)\n vertex_normal = np.sum(face_norms * areas[:, np.newaxis], axis=0)\n return vertex_normal / np.linalg.norm(vertex_normal) if norm else vertex_normal\n else:\n fn = self.get_face_normals(norm=False)\n matrix = self._get_vertex_face_adjacency()\n vertex_normal = matrix.dot(fn)\n return vertex_normal / np.linalg.norm(vertex_normal) if norm else vertex_normal", "def compute_normalvect(self):\n normvect = np.zeros((len(self.tri_pnts),3,3))\n zvec = np.array([0, 0, 1])\n for itri, tri in enumerate(self.tri_pnts):\n #import pdb; pdb.set_trace()\n tri0, tri1, tri2 = tri\n x1,y1 = self.points[tri1]-self.points[tri0]\n v1 = np.array([x1,y1,0])\n x2,y2 = self.points[tri2]-self.points[tri1]\n v2 = np.array([x2,y2,0])\n x3,y3 = self.points[tri0]-self.points[tri2]\n v3 = np.array([x3,y3,0])\n v1 = v1/np.linalg.norm(v1)\n v2 = v2/np.linalg.norm(v2)\n v3 = v3/np.linalg.norm(v3)\n #import pdb; pdb.set_trace()\n normvect[itri,:,:] = np.cross(v1,zvec), np.cross(v2,zvec), np.cross(v3,zvec)\n #import pdb; pdb.set_trace()\n return normvect", "def normalize(m, cube):\n face = cube[:, :, m]\n face_sum = test.np.sum(face)\n for i in range(cube.shape[0]):\n for j in range(cube.shape[1]):\n cube[i][j][m] = cube[i][j][m] / face_sum", "def test_magnitude(self):\n\n # test small magnitudes with regular unit vectors\n u1 = (1,)\n u2 = (0, 1/2, 0, 1/2, 1/2, 0, 0, 0, 1/2)\n u3 = (12/13, 4/13, 3/13)\n for k in (0, -1, 1):\n s = space(fake_curvature=k)\n for d in (0, 1, 1/3, 3/2):\n for n in (u1, u2, u3):\n p = s.make_point(n, d)\n self.assertTrue(isclose(\n abs(p),\n d\n ))\n self.assertTrue(isclose(\n s.distance_between(p, s.make_origin(len(n))),\n d\n ))\n\n # test direction vector normalization\n v1 = (73733,)\n v2 = tuple(range(30))\n v3 = (-11, 1, 0, -1, 11, 1/11)\n for k in (0, -1, 1):\n s = space(fake_curvature=k)\n for d in (0, 1, 1/3, 3/2):\n for n in (v1, v2, v3):\n p = s.make_point(n, d, normalize=True)\n self.assertTrue(isclose(\n abs(p),\n d\n ))\n self.assertTrue(isclose(\n s.distance_between(p, s.make_origin(len(n))),\n d\n ))\n \n # test elliptic space looping property\n pi_ref = 3.14159265358979323846264338327933\n for r in (1, 2, 3, 1/3):\n k = 1/r\n s = space(fake_curvature=k)\n for j, d in ((2, pi_ref - 2), (pi_ref, 0)):\n j *= r\n d *= r\n for n in (u1, u2, u3):\n p = s.make_point(n, j)\n self.assertTrue(isclose(\n abs(p),\n d,\n abs_tol = 1e-15\n ))\n self.assertTrue(isclose(\n s.distance_between(p, s.make_origin(len(n))),\n d,\n abs_tol = 1e-15\n ))", "def norm(self):\n self.assertTrue(np.allclose(self.vectors.norm('dog.n.01'), 0.97757602))\n self.assertTrue(np.allclose(self.vectors.norm('mammal.n.01'), 0.03914723))", "def testNormalize(self):\n v1 = Vector.ones(4)\n n = v1.norm()\n assert n == 2\n assert v1.normalize() == [ 0.5, 0.5, 0.5, 0.5 ]", "def normal(X, F, A=None):\n\n n = X.shape[0]\n eps = 1e-6\n\n # generate adjacency matrix\n if not A:\n A = adjacency(F)\n\n # compute vertex degree\n D = np.asarray(A.sum(1)).squeeze()\n\n # compute normals of each face\n Nf = ncrossp(X[F[:, 1], :]-X[F[:, 0], :],\n X[F[:, 2], :]-X[F[:, 0], :])\n\n rows = np.concatenate([F[:, 0], F[:, 1], F[:, 2]])\n cols = np.concatenate([F[:, 0], F[:, 1], F[:, 2]])\n\n d0 = np.concatenate([Nf[:, 0], Nf[:, 0], Nf[:, 0]])\n d1 = np.concatenate([Nf[:, 1], Nf[:, 1], Nf[:, 1]])\n d2 = np.concatenate([Nf[:, 2], Nf[:, 2], Nf[:, 2]])\n\n N = np.zeros((n, 3))\n N[:, 0] = sparse.csr_matrix((d0, (rows, cols)), shape=(n, n)).diagonal()\n N[:, 1] = sparse.csr_matrix((d1, (rows, cols)), shape=(n, n)).diagonal()\n N[:, 2] = sparse.csr_matrix((d2, (rows, cols)), shape=(n, n)).diagonal()\n N = N / D[:, None]\n\n dnorm = np.sqrt((N**2).sum(1))\n dnorm[dnorm < eps] = 1\n N = N / dnorm[:, None]\n\n return N", "def make_inward_normal(tetrahedron):\n\n convert_to_np_array = lambda v: np.array([v.x, v.y, v.z])\n np_vertices = list(map(convert_to_np_array, [tetrahedron.get_vertex(i) for i in range(4)]))\n # This is the middle point\n # midpoint = np.mean(np_vertices, axis=0)\n\n midpoint = np_vertices[0]\n for i in range(1, 4):\n midpoint += np_vertices[i]\n midpoint = midpoint / 2.0\n\n for i in range(4):\n face = tetrahedron.get_face(i)\n d = distance(face, midpoint)\n if d < 0:\n face.nx *= -1.0\n face.ny *= -1.0\n face.nz *= -1.0\n face.d *= -1.0", "def _normal_polygon(points, unitized=True):\n p = len(points)\n assert p > 2, \"At least three points required\"\n nx = 0\n ny = 0\n nz = 0\n for i in range(-1, p - 1):\n p1 = points[i - 1]\n p2 = points[i]\n p3 = points[i + 1]\n v1 = subtract_vectors(p1, p2)\n v2 = subtract_vectors(p3, p2)\n n = cross_vectors(v1, v2)\n nx += n[0]\n ny += n[1]\n nz += n[2]\n if not unitized:\n return nx, ny, nz\n l = length_vector([nx, ny, nz])\n return nx / l, ny / l, nz / l", "def normal_triangle(triangle, unitized=True):\n assert len(triangle) == 3, \"Three points are required.\"\n a, b, c = triangle\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = cross_vectors(ab, ac)\n if not unitized:\n return n\n lvec = length_vector(n)\n return n[0] / lvec, n[1] / lvec, n[2] / lvec", "def generate_vertex_normals(vertices, index, normalize_result=True):\n v1, v2, v3 = np.rollaxis(vertices[index], axis=-2)\n face_normals = generate_normals(v1, v2, v3, normalize_result=False)\n vertex_normals = np.zeros_like(vertices)\n for i in range(3):\n np.add.at(vertex_normals, index[..., i], face_normals)\n if normalize_result:\n vertex_normals = normalize(vertex_normals)\n return vertex_normals", "def getAveNormals(nodes, elems):\n nodetrilist = []\n for nodenum in range(len(nodes)):\n nodetrilist.append([])\n for elemnum in range(len(elems)):\n if nodenum in elems[elemnum]:\n nodetrilist[nodenum].append(elemnum)\n avenorms = []\n for tri in nodetrilist:\n aveNi = 0.0\n aveNj = 0.0\n aveNk = 0.0\n denom = max(float(len(tri)), 1)\n for elem in tri:\n vert1 = [nodes[elems[elem][0]][0], nodes[elems[elem][0]][1],\n nodes[elems[elem][0]][2]]\n vert2 = [nodes[elems[elem][1]][0], nodes[elems[elem][1]][1],\n nodes[elems[elem][1]][2]]\n vert3 = [nodes[elems[elem][2]][0], nodes[elems[elem][2]][1],\n nodes[elems[elem][2]][2]]\n normals = getNormals(vert1, vert2, vert3)\n aveNi += normals[0]\n aveNj += normals[1]\n aveNk += normals[2]\n avenorms.append([aveNi / denom, aveNj / denom, aveNk / denom])\n return avenorms", "def face_info(xyz, A, B, C, D, average=True, normalize_normals=True, **kwargs):\n if \"normalizeNormals\" in kwargs:\n warnings.warn(\n \"The normalizeNormals keyword argument has been deprecated, please use normalize_normals. \"\n \"This will be removed in discretize 1.0.0\",\n DeprecationWarning,\n )\n normalize_normals = kwargs[\"normalizeNormals\"]\n if not isinstance(average, bool):\n raise TypeError(\"average must be a boolean\")\n if not isinstance(normalize_normals, bool):\n raise TypeError(\"normalize_normals must be a boolean\")\n # compute normal that is pointing away from you.\n #\n # A -------A-B------- B\n # | |\n # | |\n # D-A (X) B-C\n # | |\n # | |\n # D -------C-D------- C\n\n AB = xyz[B, :] - xyz[A, :]\n BC = xyz[C, :] - xyz[B, :]\n CD = xyz[D, :] - xyz[C, :]\n DA = xyz[A, :] - xyz[D, :]\n\n def cross(X, Y):\n return np.c_[\n X[:, 1] * Y[:, 2] - X[:, 2] * Y[:, 1],\n X[:, 2] * Y[:, 0] - X[:, 0] * Y[:, 2],\n X[:, 0] * Y[:, 1] - X[:, 1] * Y[:, 0],\n ]\n\n nA = cross(AB, DA)\n nB = cross(BC, AB)\n nC = cross(CD, BC)\n nD = cross(DA, CD)\n\n length = lambda x: np.sqrt(x[:, 0] ** 2 + x[:, 1] ** 2 + x[:, 2] ** 2)\n normalize = lambda x: x / np.kron(np.ones((1, x.shape[1])), mkvc(length(x), 2))\n if average:\n # average the normals at each vertex.\n N = (nA + nB + nC + nD) / 4 # this is intrinsically weighted by area\n # normalize\n N = normalize(N)\n else:\n if normalize_normals:\n N = [normalize(nA), normalize(nB), normalize(nC), normalize(nD)]\n else:\n N = [nA, nB, nC, nD]\n\n # Area calculation\n #\n # Approximate by 4 different triangles, and divide by 2.\n # Each triangle is one half of the length of the cross product\n #\n # So also could be viewed as the average parallelogram.\n #\n # TODO: This does not compute correctly for concave quadrilaterals\n area = (length(nA) + length(nB) + length(nC) + length(nD)) / 4\n\n return N, area", "def createNormaldata(self, face_normals_to_draw):\n mesh_normals = np.repeat(face_normals_to_draw, 3, axis=0)\n data_mesh_normals = mesh_normals.flatten()\n\n return data_mesh_normals", "def test_magnitude_normalize(self):\n\n a1 = vectors.Vector(1, 2, 3)\n self.assertEqual(a1.normalize().magnitude(), 1)", "def volume_tetrahedron(xyz, A, B, C, D):\n\n AD = xyz[A, :] - xyz[D, :]\n BD = xyz[B, :] - xyz[D, :]\n CD = xyz[C, :] - xyz[D, :]\n\n V = (\n (BD[:, 0] * CD[:, 1] - BD[:, 1] * CD[:, 0]) * AD[:, 2]\n - (BD[:, 0] * CD[:, 2] - BD[:, 2] * CD[:, 0]) * AD[:, 1]\n + (BD[:, 1] * CD[:, 2] - BD[:, 2] * CD[:, 1]) * AD[:, 0]\n )\n return V / 6", "def get_surface_normals_o3d(normals, points, scale=2):\n # total number of points:\n N = points.shape[0]\n\n points = np.vstack(\n (points.to_numpy(), points.to_numpy() + scale * normals)\n )\n lines = [[i, i+N] for i in range(N)]\n colors = np.zeros((N, 3)).tolist()\n\n # build pca line set:\n surface_normals_o3d = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(points),\n lines=o3d.utility.Vector2iVector(lines),\n )\n surface_normals_o3d.colors = o3d.utility.Vector3dVector(colors)\n\n return surface_normals_o3d", "def compute_normals(points, mask):\n # Mask invalid points\n points = np.copy(points)\n for i in range(3):\n points[np.logical_not(mask), i] = points[..., i].min()\n\n # Use a dilation filter to \"grow\" each point\n grow_points = np.copy(points)\n for i in range(4):\n grow_points = np.where(np.dstack((mask,)*3),\n grow_points,\n ndi.filters.maximum_filter(grow_points, size=(3,3,1))\n )\n\n # Blur point image\n for i in range(3):\n grow_points[...,i] = ndi.filters.gaussian_filter(grow_points[...,i], 1)\n\n # Compute row-wise and col-wise gradients\n dxdu, dxdv = np.gradient(grow_points[...,0])\n dydu, dydv = np.gradient(grow_points[...,1])\n dzdu, dzdv = np.gradient(grow_points[...,2])\n\n # Compute 3d tangent vectors\n row_tangents = np.dstack((dxdu, dydu, dzdu))\n col_tangents = -np.dstack((dxdv, dydv, dzdv)) # NB: -ve since v points along -ve y\n\n # Take cross product to compute normal\n normals = np.cross(row_tangents, col_tangents)\n\n # Normalise normals\n norm_lens = np.sqrt(np.sum(normals ** 2, axis=-1))\n norm_lens[norm_lens == 0] = 1 # Don't touch zero-length normals\n for i in range(3):\n normals[...,i] /= norm_lens\n normals[...,i] *= mask\n\n return normals", "def testRendersSimpleCube(self):\n\n model_transforms = camera_utils.euler_matrices(\n [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]\n\n vertices_world_space = torch.matmul(\n torch.stack([self.cube_vertices, self.cube_vertices]),\n model_transforms.transpose())\n\n normals_world_space = torch.matmul(\n torch.stack([self.cube_normals, self.cube_normals]),\n model_transforms.transpose())\n\n # camera position:\n eye = torch.tensor([[0.0, 0.0, 6.0], [0.0, 0.0, 6.0]], dtype=torch.float32)\n center = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32)\n world_up = torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], dtype=torch.float32)\n image_width = 640\n image_height = 480\n light_positions = torch.tensor([[[0.0, 0.0, 6.0]], [[0.0, 0.0, 6.0]]])\n light_intensities = torch.ones([2, 1, 3], dtype=torch.float32)\n vertex_diffuse_colors = torch.ones_like(vertices_world_space, dtype=torch.float32)\n\n renders = mesh_renderer.mesh_renderer(\n vertices_world_space, self.cube_triangles, normals_world_space,\n vertex_diffuse_colors, eye, center, world_up, light_positions,\n light_intensities, image_width, image_height)\n\n for image_id in range(renders.shape[0]):\n target_image_name = \"Gray_Cube_%i.png\" % image_id\n baseline_image_path = os.path.join(self.test_data_directory,\n target_image_name)\n test_utils.expect_image_file_and_render_are_near(\n self, baseline_image_path, renders[image_id, :, :, :])", "def test_other_side_mesh(self):\n layered_volume = np.array(\n [\n [\n [0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1],\n [0, 1, 1, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0],\n ]\n ]\n )\n\n def quad(v1, v2, v3, v4):\n \"\"\"counterclockwise winding faces to make quad\"\"\"\n return [[v3, v2, v1], [v4, v3, v2]]\n\n top_mesh = trimesh.Trimesh(\n vertices=np.array(\n [\n [0, 1, 5],\n [1, 1, 5],\n [0, 1, 2],\n [1, 1, 2],\n [0, 3.5, 1.5],\n [1, 3.5, 1.5],\n ]\n ),\n faces=np.concatenate([quad(0, 1, 3, 2), quad(2, 3, 5, 4)], axis=0),\n )\n\n bot_mesh = trimesh.Trimesh(\n vertices=np.array([[0, 2, 5], [1, 2, 5], [0, 4, 2], [1, 4, 2]]),\n faces=quad(0, 1, 3, 2),\n )\n\n up = [0, -1, 0]\n dup = [0, -np.sqrt(0.5), -np.sqrt(0.5)]\n nanvec = [np.nan, np.nan, np.nan]\n vectors = np.array(\n [\n [\n [nanvec, nanvec, nanvec, nanvec, nanvec],\n [nanvec, nanvec, dup, up, up],\n [nanvec, dup, dup, nanvec, nanvec],\n [nanvec, dup, up, up, nanvec],\n [nanvec, nanvec, nanvec, nanvec, nanvec],\n ]\n ]\n )\n\n distances, something_wrong = tested.distances_from_voxels_to_meshes_wrt_dir(\n layered_volume, [top_mesh, bot_mesh], vectors\n )\n\n npt.assert_array_almost_equal(distances, get_expected_distances_to_meshes())\n assert not np.any(something_wrong)", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def test_2_normal(self):\n print(\"test 2: normal distributions\")\n\n mean = self.means[0]\n dispersion = self.dispersions[0]\n\n for i, x in enumerate(self.X):\n print(i+1, normal(x, mean, dispersion), sep=' : ')", "def normal(vx,vy,n):\n if vx==0:\n if vy==0: \n return (0,0)\n else:\n return (0,n)\n elif vy==0:\n return (n,0)\n else:\n return (n/sqrt(1+(vy/vx)**2),n/sqrt(1+(vx/vy)**2))", "def test_normalize(self):\n\n a1 = vectors.Vector(4, 0, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(1, 0, 0))\n\n a1 = vectors.Vector(0, 4, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 1, 0))\n\n a1 = vectors.Vector(0, 0, 4)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 0, 1))", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def test_norm_vector():\n random_state = np.random.RandomState(0)\n for n in range(1, 6):\n v = pr.random_vector(random_state, n)\n u = pr.norm_vector(v)\n assert_almost_equal(np.linalg.norm(u), 1)", "def normal(self) -> Vec:\n # The three points are in clockwise order, so compute differences\n # in the clockwise direction, then cross to get the normal.\n point_1 = self.planes[1] - self.planes[0]\n point_2 = self.planes[2] - self.planes[1]\n\n return Vec.cross(point_1, point_2).norm()", "def test_volume_3d(self):\n # generate voronoi mesh \n mesh = Mesh3d(self.particles, self.bound)\n print(\"building mesh...\")\n mesh.build_geometry()\n print(\"mesh complete\")\n\n # calculate voronoi volumes of all real particles \n real_indices = self.particles[\"tag\"] == ParticleTAGS.Real\n tot_vol = np.sum(self.particles[\"volume\"][real_indices])\n\n self.assertAlmostEqual(tot_vol, 1.0)", "def normal(self,points):\n ez=np.array([[0,0,1]])\n v=((points-self.pos()*ez)*self.C-ez)\n return (v/np.linalg.norm(v,axis=1)[:,np.newaxis])#*np.sign(self.C)", "def test_surface_one_forms(self, faces, point):\n space = self.Space(faces=faces)\n\n result = space.surface_one_forms(point=point)\n assert result.shape == (space.n_faces, 2, 3), result.shape\n\n first_vec = result[:, 0, :]\n second_vec = result[:, 1, :]\n inner_prods = gs.einsum(\"ni,ni->n\", first_vec, second_vec)\n result = [prod in [0.0, 4.0] for prod in inner_prods]\n assert gs.all(result)\n\n singleton_point = gs.expand_dims(point, axis=0)\n result = space.surface_one_forms(point=singleton_point)\n assert result.shape == (1, space.n_faces, 2, 3)\n\n point = gs.array([point, point])\n result = space.surface_one_forms(point=point)\n assert result.shape == (2, space.n_faces, 2, 3)\n\n first_vec = result[:, :, 0, :]\n second_vec = result[:, :, 1, :]\n inner_prods = gs.einsum(\"mni,mni->mn\", first_vec, second_vec)\n result = []\n for inner_prod in inner_prods:\n result.append([prod in [0.0, 4.0] for prod in inner_prod])\n assert gs.all(result)", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize(self.Z)\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/4, 1/4, 2/6, 2/6, 3/4, 3/4, 4/6, 4/6])", "def normal_vector_3p(a: Vector, b: Vector, c: Vector) -> Vector:\n return (b - a).cross(c - a).normalize()", "def normalize_face_landmarks(face_landmarks):\r\n\tface_landmarks_norm = np.zeros(face_landmarks.shape)\r\n\t\r\n\tfor (i, lm) in enumerate(face_landmarks):\r\n\t\tface_landmarks_norm[i] = lm - lm[nose_center_idx]\r\n\t\t\t\r\n\tstd_x = np.std(face_landmarks_norm[:,:,0].reshape((-1,)))\r\n\tstd_y = np.std(face_landmarks_norm[:,:,1].reshape((-1,)))\r\n\t\r\n\tface_landmarks_norm[:,:,0] = np.multiply(face_landmarks_norm[:,:,0], 1./std_x)\r\n\tface_landmarks_norm[:,:,1] = np.multiply(face_landmarks_norm[:,:,1], 1./std_y)\r\n\t\r\n\treturn face_landmarks_norm", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.normal((2,2), -1, 2))\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.normal(-1, 2, size=(2,2))\r\n numpy_val1 = rng.normal(-1, 2, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def normal(self):\n M = numpy.sqrt(self.magnitude())\n self.pure = self.pure / M\n self.real = self.real / M", "def cube_vertices(x, y, z, n):\n #def cube_vertices(self):\n # \"\"\" Return the vertices of the cube at position x, y, z with size 2*n.\n #\n # \"\"\"\n # return [\n # x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n # x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n # x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n # x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n # x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n # x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n # ]\n return [\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n ]", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize([self.Y, self.Z])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/10, 1/10, 2/10, 2/10, 3/10, 3/10, 4/10, 4/10])", "def vec_normal(vec):\r\n n = sqrt(sum(x ** 2 for x in vec)) or 1\r\n return [x / n for x in vec]", "def testThatCubeRotates(self):\n image_height = 480\n image_width = 640\n initial_euler_angles = [[0.0, 0.0, 0.0]]\n\n euler_angles = torch.tensor(initial_euler_angles, requires_grad=True)\n model_rotation = camera_utils.euler_matrices(euler_angles)[0, :3, :3]\n model_rotation.requires_grad = True\n\n vertices_world_space = torch.reshape(\n torch.matmul(self.cube_vertices, model_rotation.transpose()),\n [1, 8, 3])\n\n normals_world_space = torch.reshape(\n torch.matmul(self.cube_normals, model_rotation.transpose()),\n [1, 8, 3])\n\n # camera position:\n eye = torch.tensor([[0.0, 0.0, 6.0]], dtype=torch.float32)\n center = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32)\n world_up = torch.tensor([[0.0, 1.0, 0.0]], dtype=torch.float32)\n\n vertex_diffuse_colors = torch.ones_like(vertices_world_space)\n light_positions = torch.reshape(eye, [1, 1, 3])\n light_intensities = torch.ones([1, 1, 3], dtype=torch.float32)\n\n # Pick the desired cube rotation for the test:\n test_model_rotation = camera_utils.euler_matrices([[-20.0, 0.0, 60.0]])[0, :3, :3]\n\n desired_vertex_positions = torch.reshape(\n torch.matmul(self.cube_vertices, test_model_rotation.transpose())\n [1, 8, 3])\n desired_normals = torch.reshape(\n torch.matmul(self.cube_normals, test_model_rotation.transpose()),\n [1, 8, 3])\n\n optimizer = torch.optim.SGD([euler_angles], lr=0.7, momentum=0.1)\n for _ in range(35):\n optimizer.zero_grad()\n render = mesh_renderer.mesh_renderer(\n vertices_world_space,\n self.cube_triangles,\n normals_world_space,\n vertex_diffuse_colors,\n eye,\n center,\n world_up,\n light_positions,\n light_intensities,\n image_width,\n image_height)\n desired_render = mesh_renderer.mesh_renderer(\n desired_vertex_positions,\n self.cube_triangles,\n desired_normals,\n vertex_diffuse_colors,\n eye,\n center,\n world_up,\n light_positions,\n light_intensities,\n image_width,\n image_height)\n loss = torch.mean(torch.abs(render - desired_render))\n loss.backward()\n optimizer.step()\n\n render = torch.reshape(render, [image_height, image_width, 4])\n desired_render = torch.reshape(desired_render, [image_height, image_width, 4])\n target_image_name = \"Gray_Cube_0.png\"\n baseline_image_path = os.path.join(self.test_data_directory,\n target_image_name)\n test_utils.expect_image_file_and_render_are_near(\n self, baseline_image_path, desired_render)\n test_utils.expect_image_file_and_render_are_near(\n self,\n baseline_image_path,\n render,\n max_outlier_fraction=0.01,\n pixel_error_threshold=0.04)", "def vector_3d_magnitude(x, y, z):\n return math.sqrt((x * x) + (y * y) + (z * z))", "def test_double_normalization(self):\n\n v = Vector({\"x\": 3, \"y\": 1.2, \"z\": -2})\n v.normalize()\n w = v.copy()\n w.normalize()\n self.assertEqual(v.dimensions, w.dimensions)", "def test_antinormal_reflection(self):\n n1 = 1.0\n n2 = 1.5\n normal = (0.0, 0.0, -1.0)\n angle = 0.0\n ray = Ray(position=(0.0, 0.0, 0.0), direction=(0.0, 0.0, 1.0), wavelength=None)\n fresnel = FresnelReflection()\n assert np.isclose(fresnel.reflectivity(angle, n1, n2), 0.04)\n new_ray = fresnel.transform(ray, {\"normal\": normal})\n assert np.allclose(flip(ray.direction), new_ray.direction)", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize([self.X, self.Z])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/8, 1/12, 2/8, 2/12, 3/8, 3/12, 4/8, 4/12])", "def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)", "def polyNormalPerVertex(*args, allLocked: bool=True, deformable: bool=True, freezeNormal:\n bool=True, normalX: Union[float, bool]=0.0, normalXYZ: Union[List[float,\n float, float], List[List[float, float, float]], bool]=None, normalY:\n Union[float, bool]=0.0, normalZ: Union[float, bool]=0.0, relative:\n bool=True, unFreezeNormal: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[bool, Any]:\n pass", "def testAlphaTwoNllsMatchANormalDistribution(self):\n x = jnp.linspace(-10, 10, 1000)\n scale = 1.7\n nll = self.variant(self._distribution.nllfun)(x, 2, scale)\n nll_true = -scipy.stats.norm(0., scale).logpdf(x)\n chex.assert_tree_all_close(nll, nll_true, atol=1e-5, rtol=1e-5)", "def surface_norm(self, pt):\n\n return self.normal.normalize()", "def triplet_loss_normals(self, normals):\n batch, T, dim = normals.size()\n similarity_list = []\n for i in range(T):\n for j in range(i + 1, T):\n similarity = torch.pow(torch.cosine_similarity(normals[:, i, :], normals[:, j, :]), 10).unsqueeze(0)\n similarity_list.append(similarity)\n sim_tensor = torch.cat(similarity_list)\n sim_tensor = torch.transpose(sim_tensor, 0, 1)\n sim_tensor = torch.sum(sim_tensor, dim=1).unsqueeze(1)\n # sim_tensor = sim_tensor.mean(-1) #in case we want average over all combinations\n zeros = torch.zeros(batch).unsqueeze(1).to(self.device)\n losses = torch.max(sim_tensor, zeros)\n return losses", "def test_2_1_3D_cube_init(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0), (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5)]\n\n nn_checks = {\n (1, 1, 1): [(1, 1, 0), (0, 1, 1), (1, 0, 0), (0, 0, 1), (1, 0, 1),\n (0.5, 0.5, 0.5), (0, 1, 0)],\n (1, 0, 1): [(1, 0, 0), (0, 0, 1), (0, 0, 0), (0.5, 0.5, 0.5),\n (1, 1, 1)],\n (0.5, 0.5, 0.5): [(1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0, 0),\n (0, 0, 1), (1, 0, 1), (0, 0, 0), (1, 1, 1)]}\n\n init_triangulation(3, 0, check, nn_checks)", "def surface(func, umin=0, umax=2*np.pi, ucount=64, urepeat=1.0,\n vmin=0, vmax=2*np.pi, vcount=64, vrepeat=1.0):\n\n vtype = [('position', np.float32, 3),\n ('texcoord', np.float32, 2),\n ('normal', np.float32, 3)]\n itype = np.uint32\n\n # umin, umax, ucount = 0, 2*np.pi, 64\n # vmin, vmax, vcount = 0, 2*np.pi, 64\n\n vcount += 1\n ucount += 1\n n = vcount*ucount\n\n Un = np.repeat(np.linspace(0, 1, ucount, endpoint=True), vcount)\n Vn = np.tile (np.linspace(0, 1, vcount, endpoint=True), ucount)\n U = umin+Un*(umax-umin)\n V = vmin+Vn*(vmax-vmin)\n\n vertices = np.zeros(n, dtype=vtype)\n for i,(u,v) in enumerate(zip(U,V)):\n vertices[\"position\"][i] = func(u,v)\n\n vertices[\"texcoord\"][:,0] = Un*urepeat\n vertices[\"texcoord\"][:,1] = Vn*vrepeat\n\n indices = []\n for i in range(ucount-1):\n for j in range(vcount-1):\n indices.append(i*(vcount) + j )\n indices.append(i*(vcount) + j+1 )\n indices.append(i*(vcount) + j+vcount+1)\n indices.append(i*(vcount) + j+vcount )\n indices.append(i*(vcount) + j+vcount+1)\n indices.append(i*(vcount) + j )\n indices = np.array(indices, dtype=itype)\n vertices[\"normal\"] = normals(vertices[\"position\"],\n indices.reshape(len(indices)//3,3))\n\n return vertices.view(gloo.VertexBuffer), indices.view(gloo.IndexBuffer)", "def get_face_normal(self):\n if self.mesh is None:\n self.load_mesh()\n self.mesh.set_face_normal()", "def MillerNormalVectors_111():\r\n k_111 = np.array(\r\n [[1, 1, 1], [-1, 1, 1], [1, -1, 1], [1, 1, -1]])/np.sqrt(3.0)\r\n return np.array([k_111[0, :], k_111[1, :], k_111[2, :], k_111[3, :],\r\n -k_111[0, :], -k_111[1, :], -k_111[2, :], -k_111[3, :]])", "def _normal_vector(o, p0_3d, p1_3d):\n # The vector between middle point of v1-v2 and object center location\n # is the normal vector I'm looking for\n vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation\n # normalize so I can to length computation on it\n vn.normalize()\n return vn", "def getNormalizedNormalVec(self):\n TriPos = self.position\n # calc normalized normal vecor for Tri\n # get vectors Vert1Vert2 & Vert2Vert3\n TriVectors = np.subtract(TriPos[1:],TriPos[:-1])\n # get crossproduct of Vert1Vert2 & Vert2Vert3 (= surface normal)\n TriNorm = np.cross(TriVectors[0],TriVectors[1])+0.0\n # get length of surface normal\n length = np.linalg.norm(TriNorm)\n # divide each component of surface normal by length (= normalized surface normal)\n NormalizedNormalVec = np.around(TriNorm / length, decimals=5) # rounded, otherwise different values, equals not found\n # create string of tuple for segment dict \n #SegmDict = str(tuple(NormalizedNormalVec))\n return NormalizedNormalVec.tolist()", "def get_normal_vectors(self, p, x1, y1, z1, x2, y2, z2, x3, y3, z3):\n x1.value, y1.value, z1.value, x2.value, y2.value, z2.value, x3.value, y3.value, z3.value = self._get_normal_vectors(p, x1.value, y1.value, z1.value, x2.value, y2.value, z2.value, x3.value, y3.value, z3.value)", "def PlaneNormalVector(h, k, l):\r\n vec = np.array([h, k, l])\r\n return vec/np.linalg.norm(vec)", "def normal(self, uv):\n res = GeomLProp_SLProps(self.surface(), uv[0], uv[1], 1, 1e-9)\n if not res.IsNormalDefined():\n return (0, 0, 0)\n normal = geom_utils.gp_to_numpy(res.Normal())\n if self.reversed():\n normal = -normal\n return normal", "def test_marching_points(self):\n try:\n from skimage import measure # NOQA\n except ImportError:\n g.log.warning('no skimage, skipping marching cubes test')\n return\n\n # get some points on the surface of an icosahedron\n points = g.trimesh.creation.icosahedron().sample(1000)\n # make the pitch proportional to scale\n pitch = points.ptp(axis=0).min() / 10\n # run marching cubes\n mesh = g.trimesh.voxel.ops.points_to_marching_cubes(\n points=points, pitch=pitch)\n\n # mesh should have faces\n assert len(mesh.faces) > 0\n # mesh should be roughly centered\n assert (mesh.bounds[0] < -.5).all()\n assert (mesh.bounds[1] > .5).all()", "def normalize_test_4(self):\n\n res = self.XYZ_factor_n.normalize(self.X)\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/2, 1/2, 2/4, 2/4, 3/6, 3/6, 4/8, 4/8])", "def volume_polyhedron(polyhedron):\n V = 0\n for fkey in polyhedron.face:\n vertices = polyhedron.face_vertices(fkey, ordered=True)\n if len(vertices) == 3:\n faces = [vertices]\n else:\n faces = []\n for i in range(1, len(vertices) - 1):\n faces.append(vertices[0:1] + vertices[i:i + 2])\n for face in faces:\n a = polyhedron.vertex_coordinates(face[0])\n b = polyhedron.vertex_coordinates(face[1])\n c = polyhedron.vertex_coordinates(face[2])\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = cross_vectors(ab, ac)\n V += dot_vectors(a, n)\n return V / 6.", "def test_perpendicular_to_vectors():\n random_state = np.random.RandomState(0)\n a = pr.norm_vector(pr.random_vector(random_state))\n a1 = pr.norm_vector(pr.random_vector(random_state))\n b = pr.norm_vector(pr.perpendicular_to_vectors(a, a1))\n c = pr.norm_vector(pr.perpendicular_to_vectors(a, b))\n assert_almost_equal(pr.angle_between_vectors(a, b), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(a, c), np.pi / 2.0)\n assert_almost_equal(pr.angle_between_vectors(b, c), np.pi / 2.0)\n assert_array_almost_equal(pr.perpendicular_to_vectors(b, c), a)\n assert_array_almost_equal(pr.perpendicular_to_vectors(c, a), b)", "def normal(self) -> 'MultiVector':\n\n return self / np.sqrt(abs(self.mag2()))", "def test_random_sphere_vector():\n\ttest_vector = o_gen_instance.generate_random_sphere_vector()\n\tassert isinstance(test_vector, np.ndarray)\n\tassert test_vector.shape == (3,)\n\tfor component in test_vector:\n\t\tassert component != 0.\n\tassert np.isclose(np.linalg.norm(test_vector), 1.0)", "def test_magnitude(self):\n\n a1 = vectors.Vector(1, 2, 3)\n self.assertEqual(a1.magnitude(), math.sqrt(14))\n\n a1 = vectors.Vector(-1, -2, -3)\n self.assertEqual(a1.magnitude(), math.sqrt(14))\n\n a1 = vectors.Vector(1, 0, 0)\n self.assertEqual(a1.magnitude(), 1)\n\n a1 = vectors.Vector(0, 1, 0)\n self.assertEqual(a1.magnitude(), 1)\n\n a1 = vectors.Vector(0, 0, 1)\n self.assertEqual(a1.magnitude(), 1)", "def extract_normals(points, search_radius = 0.1):\n cloud = pclpy.pcl.PointCloud.PointXYZ(points)\n cloud_normal_estimator = pclpy.pcl.features.NormalEstimationOMP.PointXYZ_Normal()\n tree = pclpy.pcl.search.KdTree.PointXYZ()\n cloud_normal_estimator.setInputCloud(cloud)\n cloud_normal_estimator.setSearchMethod(tree)\n cloud_normal_estimator.setRadiusSearch(search_radius)\n normals = pclpy.pcl.PointCloud.Normal()\n cloud_normal_estimator.compute(normals)\n return normals", "def on_sphere():\n vec = np.random.standard_normal(3)\n return vec / np.linalg.norm(vec)", "def normalize_test_6(self):\n\n res = self.XYZ_factor_n.normalize([self.X, self.Y])\n assert(res.rand_vars == [self.X, self.Y, self.Z] and\n res.values == [1/6, 1/6, 2/6, 2/6, 3/20, 3/20, 4/20, 4/20])", "def test_sphere3(self):\n fun = get_problem('sphere3', dimension=2, lower=-65.536, upper=65.536)\n self.assertAlmostEqual(fun(np.zeros(2)), 0.0)" ]
[ "0.7417501", "0.7163587", "0.71501", "0.7141003", "0.6889132", "0.6833686", "0.6778833", "0.66802526", "0.6537442", "0.65106905", "0.64550817", "0.64479667", "0.6442539", "0.6430552", "0.6334144", "0.6329478", "0.6270688", "0.62549996", "0.6243213", "0.6220221", "0.61403275", "0.6138672", "0.613805", "0.61320025", "0.60644686", "0.6056448", "0.60092866", "0.59946376", "0.5993378", "0.59783924", "0.59600085", "0.5924578", "0.58938414", "0.5861656", "0.58511186", "0.58480376", "0.58467025", "0.5839233", "0.58345103", "0.5828732", "0.57934827", "0.57863057", "0.5785902", "0.57774514", "0.5771695", "0.5766326", "0.5749029", "0.57400805", "0.56986064", "0.568109", "0.56590295", "0.5653561", "0.56470287", "0.56228966", "0.5621856", "0.56135815", "0.55908656", "0.5590858", "0.5587914", "0.55721647", "0.5557104", "0.5553281", "0.55323523", "0.5526479", "0.54790235", "0.54677755", "0.546464", "0.5463785", "0.5454099", "0.5453137", "0.54353815", "0.54266447", "0.541642", "0.5411297", "0.5410763", "0.54073083", "0.5404962", "0.53984153", "0.5384784", "0.536098", "0.5356605", "0.53534395", "0.53521055", "0.5351853", "0.53485066", "0.5342855", "0.5339623", "0.53367263", "0.5335021", "0.5327403", "0.5315681", "0.5315242", "0.5314254", "0.5309935", "0.5298663", "0.52896756", "0.5288111", "0.528619", "0.5259934", "0.5258041" ]
0.7896348
0
Test surface one forms.
def test_surface_one_forms(self, faces, point): space = self.Space(faces=faces) result = space.surface_one_forms(point=point) assert result.shape == (space.n_faces, 2, 3), result.shape first_vec = result[:, 0, :] second_vec = result[:, 1, :] inner_prods = gs.einsum("ni,ni->n", first_vec, second_vec) result = [prod in [0.0, 4.0] for prod in inner_prods] assert gs.all(result) singleton_point = gs.expand_dims(point, axis=0) result = space.surface_one_forms(point=singleton_point) assert result.shape == (1, space.n_faces, 2, 3) point = gs.array([point, point]) result = space.surface_one_forms(point=point) assert result.shape == (2, space.n_faces, 2, 3) first_vec = result[:, :, 0, :] second_vec = result[:, :, 1, :] inner_prods = gs.einsum("mni,mni->mn", first_vec, second_vec) result = [] for inner_prod in inner_prods: result.append([prod in [0.0, 4.0] for prod in inner_prod]) assert gs.all(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_contains_forms(self):\n response = self.client.get(self.url)\n \n first_author_form = response.context.get('first_author_form')\n authors_fs = response.context.get('authors_fs')\n book_form = response.context.get('book_form')\n language_form = response.context.get('language_form')\n \n self.assertIsInstance(first_author_form, AuthorForm)\n self.assertIsInstance(authors_fs, AuthorFormSet)\n self.assertIsInstance(book_form, BookForm)\n self.assertIsInstance(language_form, LanguageForm)", "def test_make_form():", "def test_afancy_form(self):\n\n sel = self.selenium\n\n self.open(\"@@p.a.jqt.testPage/\", look_for=\"id=taform\")\n\n sel.click(\"taform\")\n self.waitForElement(\"div.overlay-ajax form\")\n time.sleep(1)\n self.assertTrue(sel.is_text_present(\"Test Form\"))\n self.assertFalse(sel.is_text_present(\"exact:Should not show\"))\n self.assertTrue(sel.is_text_present(\"exact:ajax_load:\"))\n\n sel.type(\"Password\", \"xxx\")\n sel.click(\"//input[@name='Check' and @value='3']\")\n sel.click(\"//input[@name='Radio' and @value='3']\")\n sel.click(\"submitButton\")\n time.sleep(3)\n\n self.assertTrue(sel.is_text_present(\"exact:ajax_load:\"))\n self.assertTrue(sel.is_text_present(\"exact:Multiple:one\"))\n self.assertTrue(sel.is_text_present(\"exact:Name:MyName1\"))\n self.assertTrue(sel.is_text_present(\"exact:Single2:A\"))\n self.assertTrue(sel.is_text_present(\"exact:Single:one\"))\n self.assertTrue(sel.is_text_present(\"exact:Radio:3\"))\n self.assertTrue(sel.is_text_present(\"exact:Text:This is Form1\"))\n self.assertTrue(sel.is_text_present(\"exact:submitButton:Submit1\"))\n self.assertTrue(sel.is_text_present(\"exact:Hidden:hiddenValue\"))\n self.assertTrue(sel.is_text_present(\"exact:Password:xxx\"))\n self.assertTrue(sel.is_text_present(\"exact:Check:3\"))\n\n # Make sure we can handle other submit methods, and that the\n # value of the submit button is in the request\n sel.click(\"//input[@name='submitButton' and @value='Submit2']\")\n time.sleep(3)\n self.assertTrue(sel.is_text_present(\"exact:submitButton:Submit2\"))\n sel.click(\"//button[@name='submitButton']\")\n time.sleep(3)\n self.assertTrue(sel.is_text_present(\"exact:submitButton:Submit5\"))\n\n # pushing submit6 should close the overlay\n sel.click(\"//input[@name='submitButton6']\")\n time.sleep(3)\n self.assertFalse(sel.is_text_present(\"Test Form\"))", "def test_form(self):\n\t\tform = self.resp.context['form']\n\t\tself.assertIsInstance(form, PadawanForm)", "def testOpe_titreForm(self):\n ope = models.Ope_titre.objects.get(id=2)\n form = gsb_forms.Ope_titreForm(instance=ope)\n self.assertTrue('titre' in [k for k in list(form.fields.keys())])\n self.assertTrue('compte' in [k for k in list(form.fields.keys())])", "def test_has_form(self):\n form = self.response.context['form']\n self.assertIsInstance(form, UserForm)", "def test_standardForm(self):\n\n a, b, t, M = self.cs.standardForm\n self.assertEqual((a, b, t[0, 0], t[1, 0]), (1., 1., 0., 0.))", "def test_get_form(self):\n form_class = Mock()\n self.view.banner = Mock()\n\n form = self.view.get_form(form_class)\n eq_(form, form_class.return_value)\n form_class.assert_called_with(self.view.banner, foo='bar', baz=1)", "def test_contains_form(self):\n form = self.response.context.get('form')\n self.assertIsInstance(form, UserCreationForm)", "def test_build_forms_from_questionnaire(self):\n self.view._build_forms_from_questionnaire()\n self.assertIsInstance(self.view.form_list, types.DictType)\n self.assertIs(self.view.form_list.get(str(len(self.view.form_list) - 1)), TempLanguageForm)", "def test_qualifierForm(self):\n print 'Running %s ...' % getName()\n \n s1 = self.sequenceListingFixture.create_sequence_instance(self.sequenceListing)\n \n f1 = Feature.objects.create(sequence=s1, \n featureKey='modified_base', \n location='7')\n qf1 = QualifierForm(feature=f1, \n data={'qualifierName': 'note',\n 'qualifierValue':'test for value'})\n \n self.assertTrue(qf1.is_valid())\n self.assertEqual('note', qf1.cleaned_data['qualifierName']) \n \n qf2 = QualifierForm(feature=f1, \n data={'qualifierName': 'xxx',\n 'qualifierValue':'test for xxx value'})\n \n self.assertTrue(qf2.is_valid())", "def test_submit_form_using_valid_data():", "def test_create_entry_route_has_form(testapp):\n response = testapp.get('/journal/new-entry', status=200)\n html = response.html\n assert len(html.find_all(\"form\")) == 1", "def isConstantForm(self, form):", "def test_unbound_form(self):\r\n self.assertTrue(self.elements, 'No input fields in form')\r\n self.assertFalse(self.unbound_form.is_bound)\r\n self.check_form_fields(self.unbound_form)\r\n self.check_form_fields(self.unbound_form.sub1)\r\n self.check_form_fields(self.unbound_form.sub2)", "def display_form(self):\n\n result = self.client.get(\"/submit_image\")\n self.assertIn(b\"multipart/form-data\", result.data)", "def test_has_form(self):\n form = self.response.context['form']\n self.assertIsInstance(form, UploadFileForm)", "def default_unittest(formdef):\n formdef(None) \n return\n ## I'd like to do an automatic check on validation but how?\n #f.defaults = {}\n #request = build_request('form',f.request_data)\n #f.validate(request)", "def test_all_forms():\n for forms in os.listdir('tests/forms/'):\n if forms.endswith(\".ql\"):\n report = debug_grammar(\"tests/forms/arithmetic.ql\")\n if \"line\" in report:\n print(report)\n assert False\n # todo: Make a folder with forms that should fail\n # elif forms.endswith('fails.ql'):\n\n else:\n assert True", "def testFormValidates(self):\n sdq1 = getattr(self.s1, 'sdq1')\n app = self.app\n dummy_controller_state = ControllerState(\n id='base_edit',\n context=sdq1,\n button='submit',\n status='success',\n errors={},\n next_action=None,)\n controller = self.portal.portal_form_controller\n controller_state = controller.validate(dummy_controller_state, app.REQUEST, ['validate_base',])\n errors = controller_state.getErrors()\n errors = sdq1.post_validate(self.app.REQUEST, errors)\n assert errors == {}, \"Validation error raised: %s\" % controller_state.getErrors()", "def test_get_form_class(self):\n # Single label\n self.view.learning_model = TestSingleLabelClassifierModel()\n self.assertEqual(self.view.get_form_class(), SingleLabelClassifierForm)\n\n # Multi label\n self.view.learning_model = TestMultiLabelClassifierModel()\n self.assertEqual(self.view.get_form_class(), MultiLabelClassifierForm)", "def test_new_route_has_form(testapp, login_testcase):\n response = testapp.get('/journal/new-entry', status=200)\n html = response.html\n assert len(html.find_all('form')) == 1", "def test_Ope_titre_dividendeForm1(self):\n form_data = {'date': \"02/09/2012\", 'titre': \"3\", 'compte_titre': '5', 'compte_espece': '2', 'montant': '10'}\n cpt_titre = models.Compte.objects.get(id=1)\n form = gsb_forms.Ope_titre_dividendeForm(data=form_data, cpt=cpt_titre)\n r = form.is_valid()\n self.assertTrue(r)", "def test_main_config_form_same_casestatus(self):\n\n # create obects\n casestatus_1 = Casestatus.objects.create(\n casestatus_name='casestatus_1'\n ).casestatus_id\n casestatus_2 = Casestatus.objects.create(\n casestatus_name='casestatus_2'\n ).casestatus_id\n casestatus_3 = Casestatus.objects.create(\n casestatus_name='casestatus_3'\n ).casestatus_id\n casestatus_4 = Casestatus.objects.create(\n casestatus_name='casestatus_4'\n ).casestatus_id\n casestatus_5 = Casestatus.objects.create(\n casestatus_name='casestatus_5'\n ).casestatus_id\n # get object\n form = MainConfigForm(\n data={\n 'statushistory_entry_numbers': 2,\n 'cron_export_path': '/tmp',\n 'cron_username': 'cron',\n 'main_overview': 'main_overview_system',\n 'casestatus_start': [\n casestatus_1,\n casestatus_2,\n casestatus_3,\n ],\n 'casestatus_end': [\n casestatus_3,\n casestatus_4,\n casestatus_5,\n ],\n }\n )\n # compare\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['casestatus_start'],\n ['Same casestatus were chosen for start and end time.'],\n )\n self.assertEqual(\n form.errors['casestatus_end'],\n ['Same casestatus were chosen for start and end time.'],\n )", "def test_create_new_form(self):\n\n survey = self._create_test_survey()\n assert survey is not None\n\n new_survey = SurveyForm.get(self.test_survey_name)\n assert new_survey is not None\n assert new_survey.form == self.test_form", "def fl_check_forms():\n _fl_check_forms = library.cfuncproto(\n library.load_so_libforms(), \"fl_check_forms\", \\\n cty.POINTER(xfdata.FL_OBJECT), [], \\\n \"\"\"FL_OBJECT * fl_check_forms() \"\"\")\n library.check_if_flinitialized()\n retval = _fl_check_forms()\n return retval", "def test_config(self):\n self.assertIs(self.view.model, TempLanguage)\n self.assertIs(self.view.form_class, TempLanguageForm)\n self.assertEqual(self.view.template_name, \"resources/templanguage_form.html\")", "def test_form_has_fields(self):\n form = self.response.context['form']\n self.assertSequenceEqual(['arquivo1', 'arquivo2'], list(form.fields))", "def test_simple(self):\n form = Form2()\n form(self.data, initial_data=self.initial_data)\n self.assertEqual({\n 'name1': ['one', ],\n 'name2': ['iname2', ],\n }, form.data)", "def test_main_config_form_same_artifactstatus(self):\n\n # create obects\n artifactstatus_1 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_1'\n ).artifactstatus_id\n artifactstatus_2 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_2'\n ).artifactstatus_id\n artifactstatus_3 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_3'\n ).artifactstatus_id\n artifactstatus_4 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_4'\n ).artifactstatus_id\n artifactstatus_5 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_5'\n ).artifactstatus_id\n # get object\n form = MainConfigForm(\n data={\n 'statushistory_entry_numbers': 4,\n 'cron_export_path': '/tmp',\n 'cron_username': 'cron',\n 'main_overview': 'main_overview_system',\n 'artifactstatus_requested': [\n artifactstatus_1,\n artifactstatus_2,\n artifactstatus_3,\n ],\n 'artifactstatus_acquisition': [\n artifactstatus_3,\n artifactstatus_4,\n artifactstatus_5,\n ],\n }\n )\n # compare\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['artifactstatus_requested'],\n ['Same artifactstatus were chosen for requested and acquisition time.'],\n )\n self.assertEqual(\n form.errors['artifactstatus_acquisition'],\n ['Same artifactstatus were chosen for requested and acquisition time.'],\n )", "def test_request_form_successful(self):\n response = self.client.get(reverse(\n 'form', kwargs={'slug': self.agency.slug}))\n self.assertContains(response, self.agency.name)", "def testAllFormsSubmitted(self):\n # both forms are submitted\n self.profile.student_data.tax_form = blobstore.BlobKey('fake key')\n self.profile.student_data.enrollment_form = blobstore.BlobKey('fake key')\n forms_submitted = profile_logic.allFormsSubmitted(self.profile.student_data)\n self.assertTrue(forms_submitted)", "def test_template(self):\n self.assertTemplateUsed(self.response, 'formularios.html')", "async def test_form(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == \"form\"\n assert result[\"errors\"] == {}\n # Patch functions\n with _patch_wizlight(), patch(\n \"homeassistant.components.wiz.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, patch(\n \"homeassistant.components.wiz.async_setup\", return_value=True\n ) as mock_setup:\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n TEST_CONNECTION,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == \"create_entry\"\n assert result2[\"title\"] == \"WiZ Dimmable White ABCABC\"\n assert result2[\"data\"] == {\n CONF_HOST: \"1.1.1.1\",\n }\n assert len(mock_setup.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1", "def assertEqualForms(self, form1, form2):\n # in case of failures the messages could be nicer ...\n\n self.assertEqual([x.getId() for x in form1.get_fields()],\n [x.getId() for x in form2.get_fields()])\n for field in form1.get_fields():\n self.assertTrue(form2.has_field(field.getId()))\n field2 = getattr(form2, field.getId())\n # test if values are the same\n self.assertEqual(field.values, field2.values)\n # test if default renderings are the same\n self.assertEqual(field.render(), field2.render())\n\n self.assertEqual(form1.title, form2.title)\n # self.assertEqual(form1.row_lenght, form2.row_lenght) # not\n # initialized ?\n self.assertEqual(form1.name, form2.name)\n self.assertEqual(form1.action, form2.action)\n self.assertEqual(form1.method, form2.method)\n self.assertEqual(form1.enctype, form2.enctype)\n self.assertEqual(form1.encoding, form2.encoding)\n self.assertEqual(form1.stored_encoding, form2.stored_encoding)\n self.assertEqual(form1.unicode_mode, form2.unicode_mode)\n self.assertEqual(form1.i18n_domain, form2.i18n_domain)\n\n self.assertEqual(form1.get_groups(), form2.get_groups())\n\n # if we have forgotten something, this will usually remind us ;-)\n self.assertEqual(form1.render(), form2.render())", "def test_display_form(self):\n\n result = self.client.get(\"/registration\")\n self.assertIn(b\"password\", result.data)", "def test_get_category_forms(self):\n self.get_add_form()\n self.get_edit_form(self.test_data['pants'])\n self.get_delete_confirmation_form(self.test_data['shirts'])", "def test_main_config_form_different_casestatus(self):\n\n # create obects\n casestatus_1 = Casestatus.objects.create(\n casestatus_name='casestatus_1'\n ).casestatus_id\n casestatus_2 = Casestatus.objects.create(\n casestatus_name='casestatus_2'\n ).casestatus_id\n casestatus_3 = Casestatus.objects.create(\n casestatus_name='casestatus_3'\n ).casestatus_id\n casestatus_4 = Casestatus.objects.create(\n casestatus_name='casestatus_4'\n ).casestatus_id\n casestatus_5 = Casestatus.objects.create(\n casestatus_name='casestatus_5'\n ).casestatus_id\n casestatus_6 = Casestatus.objects.create(\n casestatus_name='casestatus_6'\n ).casestatus_id\n # get object\n form = MainConfigForm(\n data={\n 'statushistory_entry_numbers': 3,\n 'cron_export_path': '/tmp',\n 'cron_username': 'cron',\n 'main_overview': 'main_overview_system',\n 'casestatus_start': [\n casestatus_1,\n casestatus_2,\n casestatus_3,\n ],\n 'casestatus_end': [\n casestatus_4,\n casestatus_5,\n casestatus_6,\n ],\n }\n )\n # compare\n self.assertTrue(form.is_valid())", "def test_make_form_hidden():", "def test_form_missing(self):\n self.step_data[\"basics\"] = {\"advanced\": True}\n resp = self.post_step(\"basics\")\n self.assertWizardFailure(resp, \"name\")", "def test_fields(self):\n tags = (\n ('<form', 1),\n ('<input', 6),\n ('type=\"text\"', 3),\n ('type=\"email\"', 1),\n ('type=\"submit\"', 1),\n )\n for text, number in tags:\n with self.subTest():\n self.assertContains(self.reps, text, number)", "def test_home(self):\n\n resp = self.client.get('/')\n self.assertTrue('form' in resp.context)\n self.assertEqual(resp.status_code, 200)", "def test_form_fields(self):\n\n response = self.client.get(reverse('edit-poi', kwargs={'id': '1'}))\n\n fields = {\n \"name\": \"Newport Lighthouse\",\n \"alt_name\": \"\",\n \"latitude\": 43.966874,\n \"longitude\": -124.10534,\n \"description\": \"A pretty nice lighthouse\",\n \"history\": \"It was built at some time in the past\",\n \"facts\": \"It's a lighthouse\",\n \"street\": \"123 Fake St\",\n \"city\": \"Newport\",\n \"state\": \"Oregon\",\n \"location_description\": \"out on the cape over there\",\n \"zip\": \"11234\",\n \"website\": \"\",\n \"email\": \"\",\n \"phone\": None,\n }\n\n form = response.context['poi_form']\n\n for field in fields:\n self.assertEqual(fields[field], form[field].value())", "def test_fields(self):\n form = self._get_form(data=None)\n self.assertEquals(len(form.fields), 5)\n self.assertTrue('pkg_type' in form.fields)\n self.assertTrue('name' in form.fields)\n self.assertTrue('tests_url' in form.fields)\n self.assertTrue('repo_url' in form.fields)\n self.assertTrue('tags' in form.fields)", "def test_review_form(self):\n\n result = self.client.get(\"/brand/P87985432\")\n self.assertIn(b\"review_form\", result.data)", "def test_get_start_form_data(self):\n pass", "def test_display_form(self):\n response = self.client.get(reverse('tracking:internal-register'))\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertContains(\n response,\n '<h1>Registro de Entrada Comunidad Anáhuac</h1>',\n html=True\n )", "def testQuestionField(self):\n sdq1 = getattr(self.s1, 'sdq1')\n self.app.REQUEST.form['showYMD'] = False\n self.app.REQUEST.form['showHM'] = False\n app = self.app\n dummy_controller_state = ControllerState(\n id='base_edit',\n context=sdq1,\n button='submit',\n status='success',\n errors={},\n next_action=None,)\n controller = self.portal.portal_form_controller\n controller_state = controller.validate(dummy_controller_state, app.REQUEST, ['validate_base',])\n errors = controller_state.getErrors()\n errors = sdq1.post_validate(self.app.REQUEST, errors)\n assert errors != {}, \"Validation error not raised\"\n assert errors.has_key('showYMD')\n assert errors.has_key('showHM')", "def test_match(self):\n form_data = self.form_data('r*A9x=^2hg&v7u?u9tg?u',\n 'A=r7-%=K?K@B^!9Q8=C+')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_display_form(self):\n\n result = self.client.get(\"/login\")\n self.assertIn(b\"Email address\", result.data)", "def test_fields(self):\n form = self._get_form(data=None)\n self.assertEquals(len(form.fields), 4)\n self.assertTrue('tests_url' in form.fields)\n self.assertTrue('repo_url' in form.fields)\n self.assertTrue('pkg_type' in form.fields)\n self.assertTrue('tags' in form.fields)", "def test_empty_ui(self):", "def test_html_structure(self):\n self.assertContains(self.response, '<form', 1)\n self.assertContains(self.response, '<input', 3)\n #3 pois são 2 filefield mais o csrf\n self.assertContains(self.response, 'type=\"file\"', 1)\n self.assertContains(self.response, 'type=\"submit\"', 1)", "def test_form_fields(self):\n self.login()\n\n res = self.client.get(url_for('create_user'))\n fields = ['form', 'input', 'Username', 'Password', 'Display Name',\n 'Email', 'Site Admin', 'Site Spectator', 'Site Manager',\n 'Active']\n\n for field in fields:\n assert field in res.get_data()", "def test_form_inputs(self):\n self.assertContains(self.response, '<input', 4)\n self.assertContains(self.response, 'type=\"text\"', 1)\n self.assertContains(self.response, 'type=\"password\"', 2)", "def test_main_config_form_different_artifactstatus(self):\n\n # create obects\n artifactstatus_1 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_1'\n ).artifactstatus_id\n artifactstatus_2 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_2'\n ).artifactstatus_id\n artifactstatus_3 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_3'\n ).artifactstatus_id\n artifactstatus_4 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_4'\n ).artifactstatus_id\n artifactstatus_5 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_5'\n ).artifactstatus_id\n artifactstatus_6 = Artifactstatus.objects.create(\n artifactstatus_name='artifactstatus_6'\n ).artifactstatus_id\n # get object\n form = MainConfigForm(\n data={\n 'statushistory_entry_numbers': 5,\n 'cron_export_path': '/tmp',\n 'cron_username': 'cron',\n 'main_overview': 'main_overview_system',\n 'artifactstatus_requested': [\n artifactstatus_1,\n artifactstatus_2,\n artifactstatus_3,\n ],\n 'artifactstatus_acquisition': [\n artifactstatus_4,\n artifactstatus_5,\n artifactstatus_6,\n ],\n }\n )\n # compare\n self.assertTrue(form.is_valid())", "def test_has_form(self):\n form = self.resp.context['form']\n self.assertIsInstance(form, SubscriptionForm)", "async def test_form(opp):\n result = await opp.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == \"form\"\n assert result[\"errors\"] is None\n\n with patch(\n \"openpeerpower.components.coolmaster.config_flow.CoolMasterNet.status\",\n return_value={\"test_id\": \"test_unit\"},\n ), patch(\n \"openpeerpower.components.coolmaster.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result2 = await opp.config_entries.flow.async_configure(\n result[\"flow_id\"], _flow_data()\n )\n await opp.async_block_till_done()\n\n assert result2[\"type\"] == \"create_entry\"\n assert result2[\"title\"] == \"1.1.1.1\"\n assert result2[\"data\"] == {\n \"host\": \"1.1.1.1\",\n \"port\": 10102,\n \"supported_modes\": AVAILABLE_MODES,\n }\n assert len(mock_setup_entry.mock_calls) == 1", "async def test_form(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == \"form\"\n assert result[\"errors\"] == {}\n\n with patch(\n \"homeassistant.components.volumio.config_flow.Volumio.get_system_info\",\n return_value=TEST_SYSTEM_INFO,\n ), patch(\n \"homeassistant.components.volumio.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n TEST_CONNECTION,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == \"create_entry\"\n assert result2[\"title\"] == \"TestVolumio\"\n assert result2[\"data\"] == {**TEST_SYSTEM_INFO, **TEST_CONNECTION}\n\n assert len(mock_setup_entry.mock_calls) == 1", "def test_main_config_form_empty(self):\n\n # get object\n form = MainConfigForm(\n data={\n 'cron_export_path': '/tmp',\n }\n )\n # compare\n self.assertFalse(form.is_valid())", "def test_Ope_titre_add_venteForm2(self):\n form_data = {'date': \"01/07/2011\", 'titre': \"3\", 'compte_titre': '5', 'compte_espece': '2', 'nombre': '1000',\n 'cours': '1.56', 'frais': '10', }\n cpt_titre = models.Compte.objects.get(id=5)\n form = gsb_forms.Ope_titre_add_venteForm(data=form_data, cpt=cpt_titre)\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors, {'titre': [\"titre pas en portefeuille\", ]})", "def test_form_ope_normal(self):\n form_data = {'compte': \"1\", 'date': \"02/09/2012\", 'date_val': \"\", 'montant': decimal.Decimal(24), 'tiers': \"1\",\n 'cat': \"3\", \"notes\": \"\", 'moyen': \"1\", \"num_cheque\": \"\", 'rapp': \"\", \"exercice\": \"\", \"ib\": \"\",\n \"piece_comptable\": \"\", \"nouveau_tiers\": \"\", \"operation_mere\": \"\", }\n form = gsb_forms.OperationForm(data=form_data, initial=form_data)\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['compte'].id, 1)\n self.assertEqual(form.cleaned_data['date'], utils.strpdate(\"2012-09-02\"))\n self.assertEqual(form.cleaned_data['montant'], decimal.Decimal(-24))\n self.assertEqual(form.cleaned_data['tiers'].id, 1)\n self.assertEqual(form.cleaned_data['cat'].id, 3)\n self.assertEqual(form.cleaned_data['moyen'].id, 1)\n self.assertEqual(form.cleaned_data['rapp'], None)", "def test_Ope_titre_dividendeForm3(self):\n form_data = {'date': \"02/09/2012\", 'titre': \"1\", 'compte_titre': '5', 'compte_espece': '2', 'montant': '10'}\n cpt_titre = models.Compte.objects.get(id=5)\n form = gsb_forms.Ope_titre_dividendeForm(data=form_data, cpt=cpt_titre)\n r = form.is_valid()\n self.assertFalse(r)\n self.assertEqual(form.errors,\n {'titre': ['Sélectionnez un choix valide. Ce choix ne fait pas partie de ceux disponibles.']})", "def test_context_data_with_empty_form(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIsInstance(context['form'], forms.SourceTraitSearchMultipleStudiesForm)\n self.assertFalse(context['form'].is_bound)\n self.assertFalse(context['has_results'])\n self.assertIn('results_table', context)", "def test_blank(self):\n form_data = self.form_data('')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_form_errors(self):\n form = self.response.context.get('form')\n self.assertTrue(form.errors)", "def render_form():", "def make_form(self):", "def test_CRMA080_CT001(self):\n self.oHelper.WaitShow(\"Configuração de Filtros - Funil de Vendas\")\n self.oHelper.ScrollGrid(column=\"Processo\",match_value=\"FAT004\",grid_number=2)\n self.oHelper.ClickBox(fields=\"Processo\", contents_list=\"FAT004\", select_all=False, grid_number=2)\n self.oHelper.SetValue(\"Dt. Início\",\"18/07/2019\")\n self.oHelper.ClickCheckBox(\"Todos Vendedores\")\n self.oHelper.SetButton(\"Confirmar\")\n self.oHelper.WaitShow(\"Consulta - Funil de Vendas\")\n self.oHelper.ClickGridCell(column=\"Oportunidade\",row=2,grid_number=2)\n self.oHelper.SetButton(\"Outras Ações\",\"Visualizar Oportunidade\")\n self.oHelper.WaitShow(\"Oportunidade de Venda - Visualizar Oportunidade\")\n self.oHelper.SetButton(\"Fechar\")\n self.oHelper.WaitShow(\"Consulta - Funil de Vendas\")\n self.oHelper.SetButton(\"Outras Ações\",\"Configurar Filtros\")\n self.oHelper.WaitShow(\"Configuração de Filtros - Funil de Vendas\")\n self.oHelper.SetButton(\"Confirmar\")\n self.oHelper.WaitShow(\"Consulta - Funil de Vendas\")\n self.oHelper.SetButton(\"Cancelar\")\n self.oHelper.WaitHide(\"Consulta - Funil de Vendas\")\n self.oHelper.AssertTrue()", "def test_context_data_with_empty_form(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n self.assertIsInstance(context['form'], forms.SourceTraitSearchForm)\n self.assertFalse(context['form'].is_bound)\n self.assertFalse(context['has_results'])\n self.assertIn('results_table', context)", "def test_form_entity(admin_client):\n entity_datas = factory.build(dict, FACTORY_CLASS=factories.EntityFormFactory)\n\n form = EntityForm(data=entity_datas)\n\n assert form.is_valid() == True\n\n form.save()\n\n assert Entity.objects.count() == 1", "def i_check_that_the_form_has_been_subimtted():\n driver.find_element_by_id(\"submit_message\").click()\n assert \"Contact Confirmation\" in driver.title", "def fl_check_only_forms():\n _fl_check_only_forms = library.cfuncproto(\n library.load_so_libforms(), \"fl_check_only_forms\", \\\n cty.POINTER(xfdata.FL_OBJECT), [], \\\n \"\"\"FL_OBJECT * fl_check_only_forms() \"\"\")\n library.check_if_flinitialized()\n retval = _fl_check_only_forms()\n return retval", "def test_Ope_titre_addForm1(self):\n form_data = {'date': \"02/09/2012\", 'titre': \"1\", 'compte_titre': '4', 'compte_espece': '2', 'nombre': '0',\n 'cours': '1.56', 'frais': '10'}\n form = gsb_forms.Ope_titre_addForm(data=form_data)\n r = form.is_valid()\n self.assertFalse(r)\n self.assertEqual(form.errors, {'nombre': ['le nombre de titre ne peut être nul']})", "def test_preview_post(self):\n pass", "def test_make_form_field():", "def test_form_ope_sans_tiers(self):\n form_data = {'compte': \"1\", 'date': \"02/09/2012\", 'date_val': \"\", 'montant': \"24\", 'tiers': \"\", 'cat': \"3\",\n \"notes\": \"\", 'moyen': \"1\", \"num_cheque\": \"\", 'rapp': \"\", \"exercice\": \"\", \"ib\": \"\",\n \"piece_comptable\": \"\", \"nouveau_tiers\": \"\", \"operation_mere\": \"\", }\n form = gsb_forms.OperationForm(data=form_data)\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors,\n {'nouveau_tiers': ['si vous ne choisissez pas un tiers, vous devez taper le nom du nouveau'],\n 'tiers': [\n \"si vous ne choisissez pas un tiers, vous devez taper le nom du nouveau dans le champs 'nouveau tiers'\"]})", "def test_context_data(self):\n response = self.client.get(self.get_url())\n context = response.context\n self.assertIn('form', context)\n self.assertIsInstance(context['form'], forms.SourceObjectLookupForm)", "def test_form_has_fields(self):\r\n self.form = SubscriptionForm()\r\n expect = ['name', 'cpf', 'email', 'phone']\r\n self.assertSequenceEqual(expect, list(self.form.fields))", "def test__InteractionForm__eq():\n title = 'important'\n components = [Component(ComponentType.button, label = 'chata')]\n custom_id = 'lie'\n \n keyword_parameters = {\n 'title': title,\n 'components': components,\n 'custom_id': custom_id,\n }\n \n interaction_form = InteractionForm(**keyword_parameters)\n vampytest.assert_eq(interaction_form, interaction_form)\n vampytest.assert_ne(interaction_form, object())\n \n for field_name, field_value in (\n ('title', 'fire'),\n ('components', None),\n ('custom_id', 'heart'),\n ):\n test_interaction_form = InteractionForm(**{**keyword_parameters, field_name: field_value})\n vampytest.assert_ne(interaction_form, test_interaction_form)", "def test_search_4(self):\n\n # search for \"cheese\"\n form = FrontSearchForm()\n form.search_box.set_value('cheese')\n form.submit.click()\n\n # check that results are shown\n AppBar() \\\n .result_stats.should(be.visible)", "def test_form_pass(self):\n resp = self.post_step(\"basics\")\n self.assertEqual(resp.status_code, 200)\n\n resp = self.post_step(\"config\", session=list(resp._request.session.items()))\n\n self.assertIsInstance(resp, HttpResponseRedirect)\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp[\"location\"], \"/projects/foobar/\")\n\n proj = Project.objects.get(name=\"foobar\")\n self.assertIsNotNone(proj)\n for key, val in list(self.step_data[\"basics\"].items()):\n self.assertEqual(getattr(proj, key), val)\n self.assertEqual(proj.documentation_type, \"sphinx\")", "async def test_form(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == \"form\"\n assert result[\"errors\"] is None\n\n with patch(\n \"homeassistant.components.rituals_perfume_genie.config_flow.Account\",\n side_effect=_mock_account,\n ), patch(\n \"homeassistant.components.rituals_perfume_genie.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n CONF_EMAIL: TEST_EMAIL,\n CONF_PASSWORD: VALID_PASSWORD,\n },\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == \"create_entry\"\n assert result2[\"title\"] == TEST_EMAIL\n assert isinstance(result2[\"data\"][ACCOUNT_HASH], str)\n assert len(mock_setup_entry.mock_calls) == 1", "def test_virement_forms(self):\n form_data = {'date': \"02/09/2012\", 'compte_origine': '1', 'moyen_origine': '5', 'compte_destination': '2',\n 'moyen_destination': '5', 'montant': decimal.Decimal(\"13.50\"), 'notes': 'ceci est des notes',\n 'pointe': \"\"\n\n }\n form = gsb_forms.VirementForm(data=form_data)\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['montant'], decimal.Decimal(\"13.50\"))\n self.assertEqual(form.cleaned_data['compte_destination'].id, 2)\n form.save()\n self.assertEqual(models.Ope.objects.count(), 15)\n self.assertEqual(str(models.Ope.objects.filter(id__in=(14, 15)).order_by('id')),\n \"[<Ope: (14) le 02/09/2012 : -13.50 EUR tiers: cpte1 => cptb2 cpt: cpte1>, <Ope: (15) le 02/09/2012 : 13.50 EUR tiers: cpte1 => cptb2 cpt: cptb2>]\")", "def test_document_form_has_right_fields(self):\n form = DocumentForm()\n self.assertEqual(len(form.fields.keys()), 2)\n self.assertIn('file', form.fields.keys())\n self.assertIn('doctype', form.fields.keys())", "async def test_form(hass: HomeAssistant) -> None:\n result1 = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result1[\"type\"] == RESULT_TYPE_FORM\n assert result1[\"errors\"] is None\n\n with patch(\"aussiebb.asyncio.AussieBB.__init__\", return_value=None), patch(\n \"aussiebb.asyncio.AussieBB.login\", return_value=True\n ), patch(\n \"aussiebb.asyncio.AussieBB.get_services\", return_value=[FAKE_SERVICES[0]]\n ), patch(\n \"homeassistant.components.aussie_broadband.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result2 = await hass.config_entries.flow.async_configure(\n result1[\"flow_id\"],\n FAKE_DATA,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result2[\"title\"] == TEST_USERNAME\n assert result2[\"data\"] == FAKE_DATA\n assert result2[\"options\"] == {CONF_SERVICES: [\"12345678\"]}\n assert len(mock_setup_entry.mock_calls) == 1", "def test_Ope_titre_add_venteForm4(self):\n form_data = {'date': \"01/01/2013\", 'titre': \"3\", 'compte_titre': '5', 'compte_espece': '2', 'nombre': '1000',\n 'cours': '1.56', 'frais': '10', }\n cpt_titre = models.Compte.objects.get(id=5)\n form = gsb_forms.Ope_titre_add_venteForm(data=form_data, cpt=cpt_titre)\n self.assertFalse(form.is_valid())\n self.assertEqual(form.errors,\n {'titre': [\"titre pas assez en portefeuille pour que l'opération puisse s'effectuer\", ]})", "def test_incomplete_form(self):\n page = self.get_assert_200(self.url, user=self.voting_user1.username)\n form = page.forms[\"student-vote-form\"]\n self.fill_form(form, fill_complete=False)\n response = form.submit()\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"vote for all rating questions\", response)\n\n form = page.forms[\"student-vote-form\"]\n self.assertEqual(form[question_id(self.course.general_contribution, self.general_questionnaire, self.general_text_question)].value, \"some text\")\n self.assertEqual(form[question_id(self.course.general_contribution, self.general_questionnaire, self.general_likert_question)].value, \"1\")\n self.assertEqual(form[question_id(self.course.general_contribution, self.general_questionnaire, self.general_grade_question)].value, \"3\")\n\n self.assertEqual(form[question_id(self.contribution1, self.contributor_questionnaire, self.contributor_text_question)].value, \"some other text\")\n self.assertEqual(form[question_id(self.contribution1, self.contributor_questionnaire, self.contributor_likert_question)].value, \"4\")\n\n self.assertEqual(form[question_id(self.contribution2, self.contributor_questionnaire, self.contributor_text_question)].value, \"some more text\")", "def test_individual_ACH(self):\n form_data = self.form_data()\n form_data['payment_type'] = 'DirectDebit'\n form = DonationPaymentForm(data=form_data)\n self.assertTrue(form.is_valid())", "def test_Ope_titre_addForm3(self):\n form_data = {'date': \"02/09/2012\", 'titre': \"1\", 'compte_titre': '4', 'compte_espece': '2', 'nombre': '10',\n 'cours': '1.56', 'frais': '10'}\n form = gsb_forms.Ope_titre_addForm(data=form_data)\n r = form.is_valid()\n self.assertTrue(r)\n self.assertEqual(form.cleaned_data['frais'], -10)", "def show_madlib_form():\n\n game_response = request.args.get(\"play_game\")\n\n if game_response == \"yes\":\n return render_template(\"game.html\")\n else:\n return render_template(\"goodbye.html\")", "def test_charter_form_incomplete(self):\n\n data = {}\n charter_form = CharterForm(data=data)\n result = charter_form.is_valid()\n self.assertFalse(result)", "def test_form_not_found_none(self):\n\n assert SurveyForm.get(self.test_survey_name, throw_if_not_found=False) is None", "def test_compare_outputs_surface_form(self):\n # load models\n options = [\n {\"surface form\": cap} for cap in [\"false\", \"differential\", \"algebraic\"]\n ]\n model_combos = [\n ([pybamm.lead_acid.LOQS(opt) for opt in options]),\n ([pybamm.lead_acid.Full(opt) for opt in options]),\n ]\n\n for models in model_combos:\n # load parameter values (same for all models)\n param = models[0].default_parameter_values\n param.update({\"Current function [A]\": 1})\n for model in models:\n param.process_model(model)\n\n # set mesh\n var_pts = {\"x_n\": 5, \"x_s\": 5, \"x_p\": 5}\n\n # discretise models\n discs = {}\n for model in models:\n geometry = model.default_geometry\n param.process_geometry(geometry)\n mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n discs[model] = disc\n\n # solve model\n solutions = []\n t_eval = np.linspace(0, 3600 * 20, 100)\n for model in models:\n solution = pybamm.CasadiSolver().solve(model, t_eval)\n solutions.append(solution)\n\n # compare outputs\n comparison = StandardOutputComparison(solutions)\n comparison.test_all(skip_first_timestep=True)", "def test_valid_form_true(self):\n form = UserRegisterForm(data=self.data)\n self.assertTrue(form.is_valid())", "def show_form():\n\n prompts = story.prompts\n\n return render_template(\"base.html\", prompts = prompts )", "def show_madlib_form():\n\n answer = request.args.get(\"game-choice\")\n\n if answer == \"No\":\n return render_template(\"goodbye.html\")\n else:\n return render_template(\"game.html\")", "def test_Ope_titre_add_venteForm1(self):\n form_data = {'date': \"02/09/2012\", 'titre': \"3\", 'compte_titre': '5', 'compte_espece': '2', 'nombre': '10',\n 'cours': '1.56', 'frais': '10', }\n form = gsb_forms.Ope_titre_add_venteForm(data=form_data)\n self.assertTrue(form.is_valid())\n self.assertQueryset_list(form.fields['titre'].queryset, models.Titre.objects.all().order_by('pk').values_list('pk', flat=True))", "def test_constructor(self):\n f = ListingForm()\n self.assertEqual('form-control', f.fields['comments'].widget.attrs['class'])\n self.assertEqual('form-control', f.fields['asking_price'].widget.attrs['class'])", "def test_valid_post_data(self):\n data = {\n # first_author_form\n 'name': 'Test author 1',\n \n # authors_fs --> 'form-0-name', ..., 'form-3-name' + ManagementForm\n 'form-0-name': 'Test author 2',\n 'form-1-name': '',\n 'form-2-name': '',\n 'form-3-name': '',\n 'form-TOTAL_FORMS': ['4'],\n 'form-INITIAL_FORMS': ['0'],\n 'form-MIN_NUM_FORMS': ['0'],\n 'form-MAX_NUM_FORMS': ['1000'],\n \n # language_form\n 'code': 'xx',\n \n # book_form\n 'title': 'Test title 1',\n 'pub_date': '2000-1-1',\n 'pages': '200',\n 'isbn': '9780575079212',\n 'cover_url': 'http://127.0.0.1:8000/',\n }\n self.assertTrue(Book.objects.count() == 0)\n self.assertTrue(Author.objects.count() == 0)\n self.assertTrue(Language.objects.count() == 0)\n \n self.client.post(self.url, data)\n \n self.assertTrue(Book.objects.count() == 1)\n self.assertTrue(Author.objects.count() == 2)\n self.assertTrue(Language.objects.count() == 1)", "def test_Fieldform_has_fields(self):\n self.assertSequenceEqual(\n [\n \"date\",\n \"start_time\",\n \"end_time\",\n \"temperature\",\n \"humidity\",\n \"coordinator\",\n \"staff\",\n \"parcel_id\",\n ],\n list(self.Fieldform.fields),\n )" ]
[ "0.67187005", "0.66679955", "0.6620312", "0.6506094", "0.6404817", "0.63322777", "0.6299859", "0.62120825", "0.6142838", "0.6095488", "0.6078851", "0.6059996", "0.6051694", "0.6024828", "0.6017514", "0.59854054", "0.5980818", "0.59628683", "0.5926908", "0.59222543", "0.5915682", "0.59151053", "0.5909431", "0.5891898", "0.58892375", "0.58809274", "0.5862783", "0.5858445", "0.584944", "0.5841367", "0.58144695", "0.57990247", "0.579689", "0.5790615", "0.5759528", "0.5747735", "0.5722214", "0.57191294", "0.57055545", "0.5700707", "0.56999743", "0.5688971", "0.56742144", "0.5673919", "0.5671842", "0.56708586", "0.5665376", "0.5651421", "0.564804", "0.56479275", "0.5645855", "0.5638962", "0.5627505", "0.5610505", "0.559397", "0.55886376", "0.5585066", "0.55806476", "0.557713", "0.5576055", "0.5574327", "0.55670184", "0.556656", "0.5564192", "0.55547774", "0.55445606", "0.5543916", "0.55413425", "0.5536898", "0.5526403", "0.55231494", "0.55225414", "0.55173117", "0.55171305", "0.5515865", "0.5509008", "0.5505511", "0.55004865", "0.5498138", "0.54979616", "0.5497943", "0.5485023", "0.54812", "0.5477839", "0.54751617", "0.546621", "0.5456699", "0.54552996", "0.54535496", "0.54349476", "0.54342496", "0.54299814", "0.54271626", "0.54244196", "0.5415822", "0.5404595", "0.5400351", "0.53995734", "0.5387249", "0.5380998", "0.5380873" ]
0.0
-1
Test surface metric matrices.
def test_surface_metric_matrices(self, faces, point): space = self.Space(faces=faces) result = space.surface_metric_matrices(point=point) assert result.shape == ( space.n_faces, 2, 2, ), result.shape point = gs.array([point, point]) result = space.surface_metric_matrices(point=point) assert result.shape == (2, space.n_faces, 2, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def surface_metric_matrices(self, point):\n one_forms = self.surface_one_forms(point)\n\n return self._surface_metric_matrices_from_one_forms(one_forms)", "def test_symmetry_surface_average_2(self):\n\n def test(grid, basis, true_avg=1):\n transform = Transform(grid, basis)\n\n # random data with specified average on each surface\n coeffs = np.random.rand(basis.num_modes)\n coeffs[np.where((basis.modes[:, 1:] == [0, 0]).all(axis=1))[0]] = 0\n coeffs[np.where((basis.modes == [0, 0, 0]).all(axis=1))[0]] = true_avg\n\n # compute average for each surface in grid\n values = transform.transform(coeffs)\n numerical_avg = surface_averages(grid, values, expand_out=False)\n if isinstance(grid, ConcentricGrid):\n # values closest to axis are never accurate enough\n numerical_avg = numerical_avg[1:]\n np.testing.assert_allclose(\n numerical_avg,\n true_avg,\n err_msg=str(type(grid)) + \" \" + str(grid.sym),\n )\n\n M = 10\n M_grid = 23\n test(\n QuadratureGrid(L=M_grid, M=M_grid, N=0),\n FourierZernikeBasis(L=M, M=M, N=0),\n )\n test(\n LinearGrid(L=M_grid, M=M_grid, N=0, sym=True),\n FourierZernikeBasis(L=M, M=M, N=0, sym=\"cos\"),\n )\n test(\n ConcentricGrid(L=M_grid, M=M_grid, N=0),\n FourierZernikeBasis(L=M, M=M, N=0),\n )\n test(\n ConcentricGrid(L=M_grid, M=M_grid, N=0, sym=True),\n FourierZernikeBasis(L=M, M=M, N=0, sym=\"cos\"),\n )", "def test_compare_outputs_surface_form(self):\n # load models\n options = [\n {\"surface form\": cap} for cap in [\"false\", \"differential\", \"algebraic\"]\n ]\n model_combos = [\n ([pybamm.lead_acid.LOQS(opt) for opt in options]),\n ([pybamm.lead_acid.Full(opt) for opt in options]),\n ]\n\n for models in model_combos:\n # load parameter values (same for all models)\n param = models[0].default_parameter_values\n param.update({\"Current function [A]\": 1})\n for model in models:\n param.process_model(model)\n\n # set mesh\n var_pts = {\"x_n\": 5, \"x_s\": 5, \"x_p\": 5}\n\n # discretise models\n discs = {}\n for model in models:\n geometry = model.default_geometry\n param.process_geometry(geometry)\n mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n discs[model] = disc\n\n # solve model\n solutions = []\n t_eval = np.linspace(0, 3600 * 20, 100)\n for model in models:\n solution = pybamm.CasadiSolver().solve(model, t_eval)\n solutions.append(solution)\n\n # compare outputs\n comparison = StandardOutputComparison(solutions)\n comparison.test_all(skip_first_timestep=True)", "def test_comp_surface(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface()\n\n a = result\n b = test_dict[\"S_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n b = comp_surface(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def test_symmetry_surface_average_1(self):\n\n def test(grid):\n r = grid.nodes[:, 0]\n t = grid.nodes[:, 1]\n z = grid.nodes[:, 2] * grid.NFP\n true_surface_avg = 5\n function_of_rho = 1 / (r + 0.35)\n f = (\n true_surface_avg\n + np.cos(t)\n - 0.5 * np.cos(z)\n + 3 * np.cos(t) * np.cos(z) ** 2\n - 2 * np.sin(z) * np.sin(t)\n ) * function_of_rho\n np.testing.assert_allclose(\n surface_averages(grid, f),\n true_surface_avg * function_of_rho,\n rtol=1e-15,\n err_msg=type(grid),\n )\n\n # these tests should be run on relatively low resolution grids,\n # or at least low enough so that the asymmetric spacing test fails\n L = [3, 3, 5, 3]\n M = [3, 6, 5, 7]\n N = [2, 2, 2, 2]\n NFP = [5, 3, 5, 3]\n sym = np.asarray([True, True, False, False])\n # to test code not tested on grids made with M=.\n even_number = 4\n n_theta = even_number - sym\n\n # asymmetric spacing\n with pytest.raises(AssertionError):\n theta = 2 * np.pi * np.asarray([t**2 for t in np.linspace(0, 1, max(M))])\n test(LinearGrid(L=max(L), theta=theta, N=max(N), sym=False))\n\n for i in range(len(L)):\n test(LinearGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n test(LinearGrid(L=L[i], theta=n_theta[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, 2 * np.pi, n_theta[i]),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, 2 * np.pi, n_theta[i] + 1),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(QuadratureGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i]))\n test(ConcentricGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n # nonuniform spacing when sym is False, but spacing is still symmetric\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, np.pi, n_theta[i]),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, np.pi, n_theta[i] + 1),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )", "def _surface_metric_matrices_from_one_forms(one_forms):\n ndim = one_forms.ndim\n transpose_axes = tuple(range(ndim - 2)) + tuple(reversed(range(ndim - 2, ndim)))\n transposed_one_forms = gs.transpose(one_forms, axes=transpose_axes)\n return gs.matmul(one_forms, transposed_one_forms)", "def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)", "def test_surface_one_forms(self, faces, point):\n space = self.Space(faces=faces)\n\n result = space.surface_one_forms(point=point)\n assert result.shape == (space.n_faces, 2, 3), result.shape\n\n first_vec = result[:, 0, :]\n second_vec = result[:, 1, :]\n inner_prods = gs.einsum(\"ni,ni->n\", first_vec, second_vec)\n result = [prod in [0.0, 4.0] for prod in inner_prods]\n assert gs.all(result)\n\n singleton_point = gs.expand_dims(point, axis=0)\n result = space.surface_one_forms(point=singleton_point)\n assert result.shape == (1, space.n_faces, 2, 3)\n\n point = gs.array([point, point])\n result = space.surface_one_forms(point=point)\n assert result.shape == (2, space.n_faces, 2, 3)\n\n first_vec = result[:, :, 0, :]\n second_vec = result[:, :, 1, :]\n inner_prods = gs.einsum(\"mni,mni->mn\", first_vec, second_vec)\n result = []\n for inner_prod in inner_prods:\n result.append([prod in [0.0, 4.0] for prod in inner_prod])\n assert gs.all(result)", "def metric_test(self):\n k = 10\n latent_factor = 10\n n_users = 10\n n_items = 12\n\n interactions, user_features, item_features = util.generate_dummy_data_with_indicator (num_users=n_users, num_items=n_items, interaction_density=.5)\n print (\"interactiosn shape={}\".format( np.shape(interactions) ))\n print (\"user features shape={}\".format( np.shape(user_features.toarray()) ))\n print (\"item features shape={}\".format( np.shape(item_features.toarray()) ))\n\n model = TensorRec(n_components=latent_factor)\n\n model.fit(interactions, user_features, item_features, epochs=19)\n\n ranks = model.predict_rank(user_features=user_features, item_features=item_features)\n\n print (\"Ranks shape={}\".format(np.shape(ranks)))\n\n self.assertTrue(np.shape(interactions) == np.shape(ranks))\n\n tr_recall_result = eval.recall_at_k(predicted_ranks=ranks, test_interactions=interactions, k=k, preserve_rows=False)\n # print (tr_recall_result.mean())\n\n tr_precision_result = eval.precision_at_k(predicted_ranks=ranks, test_interactions=interactions, k=k, preserve_rows=False)\n # print(tr_precision_result.mean())\n\n # we need csr for interactions data\n interactions_ = interactions.tocsr()\n recall_result = metrics.recall_at_k(ranks, interactions_, k=k, preserve_rows=False)\n # print(recall_result.mean())\n\n precision_result = metrics.precision_at_k(ranks, interactions_, k=k, preserve_rows=False)\n # print (precision_result.mean())\n\n self.assertTrue (tr_recall_result.mean() == recall_result.mean())\n self.assertTrue (tr_precision_result.mean() == precision_result.mean())", "def test_surface_feature(self):\n\n # Fully valid image\n sf1 = SurfaceFeature(1, 1, 2, 2, 'dummy_wkt_string', 0.5, 'dummy_id')\n sf1.determine_quadkey()\n\n self.assertEqual(sf1.quadkey, '3000000')", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def test_fwhm(self):\n for i, func in enumerate(self.fwhm_funcs):\n for j, arr1d in enumerate(self.input_arrays):\n res = func(arr1d)\n assert_allclose(res.fwhm, self.answers[i][j], atol=1e-4)", "def test_check_matrix_threshold():\n R = np.array([\n [-9.15361835e-01, 4.01808328e-01, 2.57475872e-02],\n [5.15480570e-02, 1.80374088e-01, -9.82246499e-01],\n [-3.99318925e-01, -8.97783496e-01, -1.85819250e-01]])\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)", "def test_matrix(self, tol):\n\n res_static = qml.QFT.compute_matrix(2)\n res_dynamic = qml.QFT(wires=[0, 1]).matrix()\n res_reordered = qml.QFT(wires=[0, 1]).matrix([1, 0])\n\n expected = np.array(\n [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.0 + 0.5j, -0.5 + 0.0j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.0 - 0.5j, -0.5 + 0.0j, 0.0 + 0.5j],\n ]\n )\n\n assert np.allclose(res_static, expected, atol=tol, rtol=0)\n assert np.allclose(res_dynamic, expected, atol=tol, rtol=0)\n\n expected_permuted = [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.0 + 0.5j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, -0.0 - 0.5j, 0.0 + 0.5j],\n ]\n assert np.allclose(res_reordered, expected_permuted, atol=tol, rtol=0)", "def UnitCubeTest(P):\n above = 0\n below = 0\n for (a,b,c) in [(0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0), (1,1,1)]:\n s = P.test(a, b, c)\n if s > 0:\n above = 1\n elif s < 0:\n below = 1\n return above - below", "def test_window_funcs():\n # get a PSpecData\n uvd = UVData()\n uvd.read_miriad(\n os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'),\n use_future_array_shapes=True\n )\n beam = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, \"HERA_NF_dipole_power.beamfits\"))\n ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd)], beam=beam)\n ds.set_spw((0, 20))\n ds.set_taper('bh')\n bl = (37, 38)\n key = (0, bl, 'xx')\n d = uvd.get_data(bl)\n C = np.cov(d[:, :20].T).real\n iC = np.linalg.pinv(C)\n # iterate over various R and M matrices and ensure\n # normalization and dtype is consistent\n for data_weight in ['identity', 'iC']:\n ds.set_weighting(data_weight)\n for norm in ['H^-1', 'I', 'V^-1/2']:\n for exact_norm in [True, False]:\n if exact_norm and norm != 'I':\n # exact_norm only supported for norm == 'I'\n continue\n ds.clear_cache()\n if data_weight == 'iC':\n # fill R with iC\n ds._R[(0, (37, 38, 'xx'), 'iC', 'bh')] = iC\n # compute G and H\n Gv = ds.get_G(key, key, exact_norm=exact_norm, pol='xx')\n Hv = ds.get_H(key, key, exact_norm=exact_norm, pol='xx')\n Mv, Wv = ds.get_MW(Gv, Hv, mode=norm, exact_norm=exact_norm,\n band_covar=C)\n # assert row-sum is normalized to 1\n assert np.isclose(Wv.sum(axis=1).real, 1).all()\n # assert this is a real matrix, even though imag is populated\n assert np.isclose(Wv.imag, 0, atol=1e-6).all()", "def test_surface_normal(self):\n vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]])\n expected = np.array([0, 0, 1])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test against multiple triangles\n vertices = np.r_[vertices[np.newaxis, :, :], [[[0, 0, 0], [0, 2, 0], [2, 0, 0]]]]\n expected = np.array([[0, 0, 1], [0, 0, -1]])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Some real data\n vertices = np.array([[2.435, -1.82, -0.53], [2.635, -2., -0.58], [2.535, -1.7, -0.58]])\n expected = np.array([0.33424239, 0.11141413, 0.93587869])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test input validation\n self.assertRaises(ValueError, surface_normal, np.array([[1, 2, 3, 4]]))", "def test_model(model, dataObj, index):\n\t(s,m,l), img = dataObj.__getitem__(index)\n\timg = img.float().unsqueeze(0)\n\t\n\tif next(model.parameters()).is_cuda:\n\t\toutput = model(img.cuda()) \n\telse:\n\t\toutput = model(img)\n\n\ts_pred,m_pred,l_pred = output[0].squeeze(0).cpu(), output[1].squeeze(0).cpu(), output[2].squeeze(0).cpu()\n\ts_pred = s_pred.detach().numpy()\n\tm_pred = m_pred.detach().numpy()\n\tl_pred = l_pred.detach().numpy()\n\n\timg = img.float().squeeze(0)\n\timg = img.permute(1,2,0)\n\n\tfor j in range(22):\n\t\tvisualize(img, s[j], m[j], l[j], s_pred[j], m_pred[j], l_pred[j])\n\t\tk = np.array(s[j])", "def test_matrix_stats1(self):\r\n headers_list = [['a', 'c', 'b'], ['a', 'c', 'b']]\r\n d1 = numpy.array([[0, .2, .9],\r\n [.2, 0, .8],\r\n [.9, .8, 0]], 'float')\r\n d2 = numpy.array([[0, .3, 1.1],\r\n [.3, 0, .8],\r\n [1.1, .8, 0]], 'float')\r\n distmats_list = [d1, d2]\r\n\r\n exp_mean = numpy.array([[0, .25, 1.0],\r\n [.25, 0, .8],\r\n [1.0, .8, 0]], 'float')\r\n exp_median = numpy.array([[0, .25, 1.0],\r\n [.25, 0, .8],\r\n [1.0, .8, 0]], 'float')\r\n exp_std = numpy.array([[0, .05, .1],\r\n [.05, 0, 0],\r\n [.1, 0, 0]], 'float')\r\n results = matrix_stats(headers_list, distmats_list)\r\n assert_almost_equal(results[1:], [exp_mean, exp_median, exp_std])\r\n self.assertEqual(results[0], ['a', 'c', 'b'])", "def test_surf():\n def f(x, y):\n sin, cos = numpy.sin, numpy.cos\n return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)\n\n x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]\n s = surf(x, y, f)\n mlab.show()\n #cs = contour_surf(x, y, f, contour_z=0)\n return", "def test(self):\n bs = verif.metric.Bs()\n bsrel = verif.metric.BsRel()\n bsres = verif.metric.BsRes()\n bsunc = verif.metric.BsUnc()\n bss = verif.metric.Bss()\n obs = [[0],\n [0],\n [0],\n [1],\n [0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\n fcst = [[0],\n [1],\n [0.3],\n [0.1],\n [0.21, 0.21, 0.21, 0.91, 0.91],\n [0.06, 0.61, 0.45, 0.87, 0.13, 0.61, 0.79, 0.61, 0.06, 0.06, 0.79, 0.61, 0.13, 0.13, 0.79, 0.21, 0.06, 0.55, 0.37, 0.37]]\n ans = {bs: [0, 1, 0.09, 0.81, 0.1457, 0.34928],\n bsrel: [0, 1, 0.09, 0.81, 0.01236667, 0.2076133],\n bsres: [0, 0, 0, 0, 0.1066667, 0.1083333],\n bsunc: [0, 0, 0, 0, 0.24, 0.25],\n bss: [np.nan, np.nan, np.nan, np.nan, 0.3929167, -0.39712]}\n for i in range(len(obs)):\n o = np.array(obs[i])\n f = np.array(fcst[i])\n for key in ans:\n print(key, i)\n calculated = key.compute_from_obs_fcst(o, f)\n expected = ans[key][i]\n if np.isnan(expected):\n self.assertTrue(np.isnan(expected), np.isnan(calculated))\n else:\n self.assertAlmostEqual(expected, calculated, places=5)", "def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)", "def test_basic_property_of_random_matrix():\n for name, random_matrix in all_random_matrix.items():\n print(name)\n\n check_input_size_random_matrix(random_matrix)\n check_size_generated(random_matrix)\n if name != \"random_subsample_normalized\":\n check_zero_mean_and_unit_norm(random_matrix)\n check_approximate_isometry(random_matrix)", "def test_basic(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertIsInstance(result, iris.cube.Cube)", "def test_number_of_surface_objects(self):\n for O in self.mod.objts.itervalues():\n no_of_surfaces = 0\n for C in O.conts.itervalues():\n if C.surf != 0:\n no_of_surfaces += 1\n self.assertEqual(O.surfsize, no_of_surfaces)", "def test_basic(self):\n data = get()\n metrics = [verif.metric.Within(),\n verif.metric.A(), # Hit\n verif.metric.B(), # FA\n verif.metric.C(), # Miss\n verif.metric.D(), # Correct rejection\n verif.metric.Hit(),\n verif.metric.Threat(),\n verif.metric.Conditional(),\n verif.metric.XConditional(func=np.median),\n ]\n intervals = [verif.interval.Interval(-np.inf, 0, True, True), # [-inf, 0]\n verif.interval.Interval(-np.inf, 1, True, True),\n verif.interval.Interval(-np.inf, 2, True, True),\n ]\n obs = [0, 1.5, 2]\n fcst = [3.1, 1.1, -2.1]\n N = len(obs)*1.0\n\n # Each line is one metric (one number for each threshold)\n expected = [[0/N, 100/N, 100/N], # Within\n [0/N, 0/N, 2/N], # Hit\n [1/N, 1/N, 0/N], # FA\n [1/N, 1/N, 1/N], # Miss\n [1/N, 1/N, 0/N], # Correct rejection\n [0, 0, 2.0/3], # Hit rate\n [0, 0, 2.0/3], # Threat score\n [3.1, 3.1, 0.7], # Average fcst given obs in interval\n [0, 0, 1.5], # Average obs given obs in interval\n ]\n\n for m in range(len(metrics)):\n metric = metrics[m]\n for i in range(len(intervals)):\n value = metric.compute_from_obs_fcst(np.array(obs), np.array(fcst), intervals[i])\n ex = expected[m][i] * 1.0\n if np.isnan(value):\n self.assertTrue(np.isnan(ex))\n else:\n self.assertAlmostEqual(ex, value)", "def test_3(self):\n for _ in range(10):\n\n # Draw random requests for testing purposes.\n num_draws_emax = np.random.randint(2, 1000)\n dim = np.random.randint(1, 6)\n\n matrix = np.random.uniform(size=dim ** 2).reshape(dim, dim)\n cov = np.dot(matrix, matrix.T)\n\n # PDF of normal distribution\n args = np.random.normal(size=3)\n args[-1] **= 2\n\n f90 = fort_debug.wrapper_normal_pdf(*args)\n py = norm.pdf(*args)\n\n assert_almost_equal(py, f90)\n\n # Singular Value Decomposition\n py = scipy.linalg.svd(matrix)\n f90 = fort_debug.wrapper_svd(matrix, dim)\n\n for i in range(3):\n assert_allclose(py[i], f90[i])\n\n # Pseudo-Inverse\n py = np.linalg.pinv(matrix)\n f90 = fort_debug.wrapper_pinv(matrix, dim)\n\n assert_allclose(py, f90)\n\n # Inverse\n py = np.linalg.inv(cov)\n f90 = fort_debug.wrapper_inverse(cov, dim)\n assert_allclose(py, f90)\n\n # Determinant\n py = np.linalg.det(cov)\n f90 = fort_debug.wrapper_determinant(cov)\n\n assert_allclose(py, f90)\n\n # Trace\n py = np.trace(cov)\n f90 = fort_debug.wrapper_trace(cov)\n\n assert_allclose(py, f90)\n\n # Random normal deviates. This only tests the interface, requires\n # visual inspection in IPYTHON notebook as well.\n fort_debug.wrapper_standard_normal(num_draws_emax)\n\n # Clipping values below and above bounds.\n num_values = np.random.randint(1, 10000)\n lower_bound = np.random.randn()\n upper_bound = lower_bound + np.random.ranf()\n values = np.random.normal(size=num_values)\n\n f90 = fort_debug.wrapper_clip_value(\n values, lower_bound, upper_bound, num_values\n )\n py = np.clip(values, lower_bound, upper_bound)\n\n assert_almost_equal(py, f90)\n\n # Spectral condition number\n py = _spectral_condition_number(cov)\n fort = fort_debug.wrapper_spectral_condition_number(cov)\n assert_almost_equal(py, fort)", "def par_test_12(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYW_par_factor.setMaxDepth(i)\n\n res = self.XYZ_factor.mult(self.XYW_factor)\n par_res = self.XYZ_par_factor.mult(self.XYW_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def test_voxel(self):\n for m in [g.get_mesh('featuretype.STL'),\n g.trimesh.primitives.Box(),\n g.trimesh.primitives.Sphere()]:\n for pitch in [.1, .1 - g.tol.merge]:\n surface = m.voxelized(pitch=pitch)\n\n # make sure the voxelized pitch is similar to passed\n assert g.np.allclose(surface.pitch, pitch)\n\n for fill_method in ('base', 'orthographic'):\n solid = surface.copy().fill(method=fill_method)\n\n assert len(surface.encoding.dense.shape) == 3\n assert surface.shape == surface.encoding.dense.shape\n assert surface.volume > 0.0\n\n assert isinstance(surface.filled_count, int)\n assert surface.filled_count > 0\n\n box_surface = surface.as_boxes()\n box_solid = solid.as_boxes()\n\n assert isinstance(box_surface, g.trimesh.Trimesh)\n assert abs(box_solid.volume - solid.volume) < g.tol.merge\n\n assert g.trimesh.util.is_shape(\n surface.sparse_indices, (-1, 3))\n assert len(\n solid.sparse_indices) >= len(\n surface.sparse_indices)\n assert solid.sparse_indices.shape == solid.points.shape\n outside = m.bounds[1] + m.scale\n for vox in surface, solid:\n assert vox.sparse_indices.shape == vox.points.shape\n assert g.np.all(vox.is_filled(vox.points))\n assert not vox.is_filled(outside)\n\n try:\n cubes = surface.marching_cubes\n assert cubes.area > 0.0\n except ImportError:\n g.log.info('no skimage, skipping marching cubes test')\n\n g.log.info('Mesh volume was %f, voxelized volume was %f',\n m.volume,\n surface.volume)", "def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])", "def test_cube(self):\n\n # No isosurface\n cube_zero = numpy.zeros((2, 2, 2), dtype=numpy.float32)\n\n result = marchingcubes.MarchingCubes(cube_zero, 1.)\n self.assertEqual(result.shape, cube_zero.shape)\n self.assertEqual(result.isolevel, 1.)\n self.assertEqual(result.invert_normals, True)\n\n vertices, normals, indices = result\n self.assertEqual(len(vertices), 0)\n self.assertEqual(len(normals), 0)\n self.assertEqual(len(indices), 0)\n\n # Cube array dimensions: shape = (dim 0, dim 1, dim2)\n #\n # dim 0 (Z)\n # ^\n # |\n # 4 +------+ 5\n # /| /|\n # / | / |\n # 6 +------+ 7|\n # | | | |\n # |0 +---|--+ 1 -> dim 2 (X)\n # | / | /\n # |/ |/\n # 2 +------+ 3\n # /\n # dim 1 (Y)\n\n # isosurface perpendicular to dim 0 (Z)\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((1., 1.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 0], level)\n self.assertAllClose(normals, (1., 0., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 1 (Y)\n cube = numpy.array(\n (((0., 0.), (1., 1.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.2\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(vertices[:, 1], level)\n self.assertAllClose(normals, (0., -1., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 2 (X)\n cube = numpy.array(\n (((0., 1.), (0., 1.)),\n ((0., 1.), (0., 1.))), dtype=numpy.float32)\n level = 0.9\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 2], level)\n self.assertAllClose(normals, (0., 0., 1.))\n self.assertEqual(len(indices), 2)\n\n # isosurface normal in dim1, dim 0 (Y, Z) plane\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(normals[:, 2], 0.)\n self.assertEqual(len(indices), 2)", "def test_predictor():", "def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def setUp(self):\n\n self.thresholds = np.array([276, 277], dtype=np.float32)\n self.rain_name = \"probability_of_falling_rain_level_above_surface\"\n self.snow_name = \"probability_of_falling_snow_level_below_surface\"\n\n rain_prob = np.array(\n [\n [[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],\n [[0.5, 0.1, 1.0], [0.0, 0.2, 0.5], [0.1, 0.1, 0.3]],\n ],\n dtype=np.float32,\n )\n self.rain_prob_cube = set_up_probability_cube(\n rain_prob, self.thresholds, variable_name=self.rain_name\n )\n\n snow_prob = np.array(\n [\n [[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],\n [[0.0, 0.4, 0.0], [0.5, 0.3, 0.1], [0.0, 0.4, 0.3]],\n ],\n dtype=np.float32,\n )\n self.snow_prob_cube = set_up_probability_cube(\n snow_prob, self.thresholds, variable_name=self.snow_name\n )\n\n high_prob = np.array(\n [\n [[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],\n [[1.0, 0.7, 0.2], [0.8, 0.8, 0.7], [0.9, 0.9, 0.7]],\n ],\n dtype=np.float32,\n )\n self.high_prob_cube = set_up_probability_cube(\n high_prob, self.thresholds, variable_name=self.snow_name\n )", "def testMatrix(m):\n print \"Testing the spread matrix:\"\n for i in m.matrix:\n if float('%.3g' % sum(i)) != 1.000 and sum(i) != 0:\n print \"The spread is not as expected\", sum(i)\n return\n print \"Matrix is acceptable\"", "def qsurface(zmatrix, nx, ny):\n dislin.qplsur(zmatrix, ny, ny)", "def testBasics(self):\n for imageClass in (afwImage.ImageF, afwImage.ImageD):\n inImage = makeRampImage(bbox=self.bbox, start=-5, stop=2500, imageClass=imageClass)\n\n measImage = inImage.Factory(inImage, True)\n linSq = LinearizeSquared()\n linRes = linSq(image=measImage, detector=self.detector)\n desNumLinearized = np.sum(self.sqCoeffs.flatten() > 0)\n self.assertEqual(linRes.numLinearized, desNumLinearized)\n self.assertEqual(linRes.numAmps, len(self.detector.getAmpInfoCatalog()))\n\n refImage = inImage.Factory(inImage, True)\n refLinearizeSquared(image=refImage, detector=self.detector)\n\n self.assertImagesAlmostEqual(refImage, measImage)\n\n # make sure logging is accepted\n log = Log.getLogger(\"ip.isr.LinearizeSquared\")\n linRes = linSq(image=measImage, detector=self.detector, log=log)", "def _test(self):\n self.pytorch_layer.eval()\n pytorch_layer = copy.deepcopy(self.pytorch_layer).cpu()\n image_w_h = int(self.input_size ** 0.5)\n input_image = torch.rand(1, self.n_in_channels, image_w_h, image_w_h)\n output_tensor = pytorch_layer(input_image)[0]\n for channel in range(self.n_in_channels):\n current_channel = input_image[0, channel].squeeze().flatten().cpu().numpy()\n normalized_data = (current_channel - self.running_mean[channel]) / np.sqrt(\n self.running_var[channel] + self.epsilon\n )\n if self.affine:\n output_numpy = (self.weights[channel] * normalized_data) + self.bias[\n channel\n ]\n else:\n output_numpy = normalized_data\n\n assert np.isclose(\n output_numpy,\n output_tensor[channel].detach().flatten().cpu().numpy(),\n atol=1e-6,\n ).all()", "def test_model_multi(self, feature_matrix, cfg, embed_model):\n\n device = self.device\n batch_size = cfg.batch_size\n batches = int(np.ceil(len(feature_matrix) / batch_size))\n\n with torch.no_grad():\n self.eval()\n running_map = 0.0\n for i in range(batches):\n batch_pos = torch.tensor(\n feature_matrix.iloc[i * batch_size:(i + 1) * batch_size][\"pos\"].values.astype(int)).to(device)\n batch_target = feature_matrix.iloc[i * batch_size:(i + 1) * batch_size][self.class_columns].astype(int)\n\n if i == batches - 1:\n batch_pos = torch.tensor(\n feature_matrix.iloc[i * batch_size:][\"pos\"].values.astype(int)).to(device)\n batch_target = feature_matrix.iloc[i * batch_size:][self.class_columns].astype(int)\n\n batch_embed = embed_model.pos_embed(batch_pos.long())\n\n \"Forward Pass\"\n batch_pred = self.forward(batch_embed, \"test\")\n batch_pred = batch_pred.cpu().numpy()\n mAP = average_precision_score(batch_target, batch_pred)\n\n running_map += mAP\n\n mean_mAP = running_map / batches\n print('mAP : %s' % (mean_mAP))\n return mean_mAP, self", "def test_comp_surface_wind(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_wind()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def testing(model):\n from matplotlib import colors as matplot_colors\n import six\n colors = list(six.iteritems(matplot_colors.cnames))\n colors = zip(*colors)[1]\n\n files = [\"novHighC.csv\", \"surface.csv\"]\n markers = [\"|\", \"_\"]\n data = np.empty([0,2])\n for idx, fname in enumerate(files):\n print fname\n col = pd.read_csv(\"data/\"+fname, header=None, error_bad_lines=False, warn_bad_lines=False, names=[fname],\n dtype=np.float64)\n print \"convert to matrix\"\n col = col.as_matrix()\n print \"col is read\"\n col = np.append(col, col, 1) # this to append col to col, so it will result in a matrix with two identical cols\n print \"col is appended\"\n data = np.append(data, col, axis=0)\n print \"appended to data\"\n predicted_u = model.predict(col)\n model.draw_points(ax=plt, X=col, colors=colors, u=predicted_u, marker=markers[idx], lw=50)\n print \"now will predict\"\n predicted_u = model.predict(data)\n print \"predicted\"\n\n #model.draw_points(ax=plt, X=data, colors=colors, u=predicted_u, marker=\"+\", lw=50)\n #return data, predicted_u", "def unit_test():\n # Fixed filename\n ROOT = '/home/knmac/projects/vid_time_model/data/EDTCN_results/50Salads/'\\\n 'mid/mid_motionconstraint_nomotion_g0/nepoch_200'\n RUN = 'run_11'\n SPLIT = 'Split_1'\n FNAME = os.path.join(ROOT, RUN, SPLIT+'.mat')\n\n # Load computed results\n content = open(os.path.join(ROOT, RUN, 'trials.txt')).read().splitlines()\n for line in content:\n if SPLIT in line:\n break\n tokens = line.split(' ')\n acc_rec = tokens[2].replace('accuracy:', '').replace(',', '')\n edit_rec = tokens[3].replace('edit_score:', '').replace(',', '')\n f1_rec = tokens[4].replace('overlap_f1:', '').replace(',', '')\n\n # Load data\n data = scipy.io.loadmat(FNAME)\n P, S, Y = data['P'].squeeze(), data['S'].squeeze(), data['Y'].squeeze()\n P = [x.squeeze() for x in P]\n S = S.tolist()\n Y = [x.squeeze() for x in Y]\n\n # Compute metrics\n acc = accuracy(P, Y)\n edit = edit_score(P, Y, norm=True, bg_class=0)\n f1 = overlap_f1(P, Y, n_classes=18, bg_class=0)\n _, mAP = mid_mAP(P, Y, S, bg_class=0)\n\n # Print out\n print('Testing metrics...')\n print(' Acc: computed={:.02f} - recorded={}'.format(acc, acc_rec))\n print(' Edit: computed={:.02f} - recorded={}'.format(edit, edit_rec))\n print(' F1@10: computed={:.02f} - recorded={}'.format(f1, f1_rec))\n print(' mAP: computed={:.02f}'.format(mAP))\n return 0", "def test_4_2_5D_cube_splits(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0), (1, 1, 1, 0, 0), (1, 1, 1, 1, 0),\n (1, 1, 1, 0, 1), (1, 1, 0, 1, 0), (1, 1, 0, 1, 1),\n (1, 1, 0, 0, 1), (1, 0, 1, 0, 0), (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1), (1, 0, 0, 0, 1), (0, 1, 0, 0, 0),\n (0, 1, 1, 0, 0), (0, 1, 1, 1, 0), (0, 1, 1, 1, 1),\n (0, 1, 1, 0, 1), (0, 1, 0, 1, 0), (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1), (0, 0, 1, 0, 1), (0, 0, 0, 1, 0),\n (0, 0, 0, 1, 1), (0, 0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.0), (0.0, 0.0, 0.5, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5, 0.5), (0.0, 0.5, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.0, 0.0, 0.0), (0.0, 0.5, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.5, 0.0, 0.5), (0.0, 0.5, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0, 0.0), (0.5, 0.0, 0.0, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.0, 0.5), (0.5, 0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.0, 0.0), (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.25, 0.25, 0.25, 0.25, 0.25), (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5), (1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 0.5), (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 1.0, 0.5, 0.5, 1.0), (1.0, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0), (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 0.5, 1.0, 1.0), (1.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5), (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0), (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 1.0), (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5), (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0), (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5), (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5, 0.5), (1.0, 0.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.0, 0.5, 0.0),\n (1.0, 0.0, 0.5, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0, 0.0),\n (1.0, 0.0, 0.5, 0.5, 0.0), (1.0, 0.5, 0.0, 0.5, 0.5),\n (1.0, 0.5, 0.0, 0.0, 0.5), (1.0, 0.5, 0.0, 0.0, 0.0),\n (1.0, 0.5, 0.0, 0.5, 0.0), (1.0, 0.5, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25, 0.25), (1.0, 1.0, 0.0, 0.5, 0.5),\n (1.0, 1.0, 0.0, 0.0, 0.5), (1.0, 1.0, 0.0, 0.5, 0.0),\n (1.0, 1.0, 0.5, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0, 0.0),\n (1.0, 1.0, 0.5, 0.5, 0.0), (0.5, 1.0, 0.0, 0.5, 0.5),\n (0.5, 1.0, 0.0, 0.0, 0.5), (0.5, 1.0, 0.0, 0.0, 0.0),\n (0.5, 1.0, 0.0, 0.5, 0.0), (0.5, 1.0, 0.5, 0.0, 0.5),\n (0.5, 1.0, 0.5, 0.0, 0.0), (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25, 0.25), (1.0, 1.0, 1.0, 0.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.0), (1.0, 0.5, 1.0, 0.0, 0.5),\n (1.0, 0.5, 1.0, 0.0, 0.0), (1.0, 0.5, 1.0, 0.5, 0.0),\n (0.5, 1.0, 1.0, 0.0, 0.5), (0.5, 1.0, 1.0, 0.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0), (0.5, 0.5, 1.0, 0.0, 0.5),\n (0.5, 0.5, 1.0, 0.0, 0.0), (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.75, 0.25, 0.25), (1.0, 1.0, 0.5, 1.0, 0.0),\n (1.0, 0.5, 1.0, 1.0, 0.0), (1.0, 0.5, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 1.0, 0.0), (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 0.5, 1.0, 1.0, 0.0), (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.75, 0.25), (1.0, 1.0, 0.5, 0.0, 1.0),\n (1.0, 0.5, 1.0, 0.0, 1.0), (1.0, 0.5, 0.5, 0.0, 1.0),\n (0.5, 1.0, 1.0, 0.0, 1.0), (0.5, 1.0, 0.5, 0.0, 1.0),\n (0.5, 0.5, 1.0, 0.0, 1.0), (0.5, 0.5, 0.5, 0.0, 1.0),\n (0.75, 0.75, 0.75, 0.25, 0.75), (1.0, 1.0, 0.0, 1.0, 0.5),\n (1.0, 0.5, 0.0, 1.0, 0.5), (1.0, 0.5, 0.0, 1.0, 0.0),\n (0.5, 1.0, 0.0, 1.0, 0.5), (0.5, 1.0, 0.0, 1.0, 0.0),\n (0.5, 0.5, 0.0, 1.0, 0.5), (0.5, 0.5, 0.0, 1.0, 0.0),\n (0.75, 0.75, 0.25, 0.75, 0.25), (1.0, 1.0, 0.0, 0.5, 1.0),\n (1.0, 0.5, 0.0, 1.0, 1.0), (1.0, 0.5, 0.0, 0.5, 1.0),\n (0.5, 1.0, 0.0, 1.0, 1.0), (0.5, 1.0, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0, 1.0), (0.5, 0.5, 0.0, 0.5, 1.0),\n (0.75, 0.75, 0.25, 0.75, 0.75), (1.0, 0.5, 0.0, 0.0, 1.0),\n (0.5, 1.0, 0.0, 0.0, 1.0), (0.5, 0.5, 0.0, 0.0, 1.0),\n (0.75, 0.75, 0.25, 0.25, 0.75), (1.0, 0.0, 1.0, 0.5, 0.5),\n (1.0, 0.0, 1.0, 0.0, 0.5), (1.0, 0.0, 1.0, 0.5, 0.0),\n (0.5, 0.0, 1.0, 0.5, 0.5), (0.5, 0.0, 1.0, 0.0, 0.5),\n (0.5, 0.0, 1.0, 0.0, 0.0), (0.5, 0.0, 1.0, 0.5, 0.0),\n (0.75, 0.25, 0.75, 0.25, 0.25), (1.0, 0.0, 1.0, 1.0, 0.5),\n (1.0, 0.0, 0.5, 1.0, 0.5), (1.0, 0.0, 0.5, 1.0, 0.0),\n (0.5, 0.0, 1.0, 1.0, 0.5), (0.5, 0.0, 1.0, 1.0, 0.0),\n (0.5, 0.0, 0.5, 1.0, 0.5), (0.5, 0.0, 0.5, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.75, 0.25), (1.0, 0.0, 1.0, 0.5, 1.0),\n (1.0, 0.0, 0.5, 1.0, 1.0), (1.0, 0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 1.0, 1.0, 1.0), (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0, 1.0), (0.5, 0.0, 0.5, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75, 0.75), (1.0, 0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 1.0, 0.0, 1.0), (0.5, 0.0, 0.5, 0.0, 1.0),\n (0.75, 0.25, 0.75, 0.25, 0.75), (1.0, 0.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 0.0, 1.0, 0.5), (0.5, 0.0, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.25, 0.75, 0.25), (1.0, 0.0, 0.0, 0.5, 1.0),\n (0.5, 0.0, 0.0, 1.0, 1.0), (0.5, 0.0, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.5), (0.0, 1.0, 0.0, 0.0, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.0), (0.0, 1.0, 0.5, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0, 0.0), (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.25, 0.75, 0.25, 0.25, 0.25), (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.0, 0.5), (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5), (0.0, 0.5, 1.0, 0.0, 0.5),\n (0.0, 0.5, 1.0, 0.0, 0.0), (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.25, 0.75, 0.75, 0.25, 0.25), (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5), (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5), (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.5), (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.75, 0.25), (0.0, 1.0, 1.0, 0.5, 1.0),\n (0.0, 1.0, 0.5, 1.0, 1.0), (0.0, 1.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 1.0, 1.0, 1.0), (0.0, 0.5, 1.0, 0.5, 1.0),\n (0.0, 0.5, 0.5, 1.0, 1.0), (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75, 0.75), (0.0, 1.0, 0.5, 0.0, 1.0),\n (0.0, 0.5, 1.0, 0.0, 1.0), (0.0, 0.5, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.75, 0.25, 0.75), (0.0, 1.0, 0.0, 1.0, 0.5),\n (0.0, 0.5, 0.0, 1.0, 0.5), (0.0, 0.5, 0.0, 1.0, 0.0),\n (0.25, 0.75, 0.25, 0.75, 0.25), (0.0, 1.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0, 1.0), (0.0, 0.5, 0.0, 0.5, 1.0),\n (0.25, 0.75, 0.25, 0.75, 0.75), (0.0, 0.5, 0.0, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.0, 0.5), (0.0, 0.0, 1.0, 0.5, 0.0),\n (0.25, 0.25, 0.75, 0.25, 0.25), (0.0, 0.0, 1.0, 1.0, 0.5),\n (0.0, 0.0, 0.5, 1.0, 0.5), (0.0, 0.0, 0.5, 1.0, 0.0),\n (0.25, 0.25, 0.75, 0.75, 0.25), (0.0, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.0, 0.5, 1.0, 1.0), (0.0, 0.0, 0.5, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75, 0.75), (0.0, 0.0, 0.5, 0.0, 1.0),\n (0.25, 0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(1, 1, 1, 1, 1): [(1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0),\n (1.0, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0.75, 0.75, 0.75, 0.75, 0.75),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0),\n (0.5, 1.0, 0.5, 1.0, 1.0)],\n (0.25, 0.75, 0.75, 0.75, 0.25): [(0.5, 1.0, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0, 1, 1, 1, 0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (\n 0.5, 0.5, 1.0, 0.5, 0.5)],\n (0.0, 0.0, 1.0, 0.5, 1.0): [(0.5, 0.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.0, 0.0, 0.5, 0.5, 1.0),\n (0, 0, 1, 1, 1),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.5, 1.0, 0.5, 1.0),\n (0, 0, 1, 0, 1),\n (0.5, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.25, 0.25, 0.75, 0.75, 0.75),\n (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (\n 0.25, 0.25, 0.75, 0.25, 0.75)]}\n\n init_triangulation(5, 1, check, nn_checks)", "def test_density_multiple(self):\n earth = PREM()\n radii = np.linspace(0, 6500e3, 6501)\n expected = [earth.density(r) for r in radii]\n assert np.array_equal(earth.density(radii), expected)", "def test_QFT(self):\n op = qml.QFT(wires=range(3))\n res = op.matrix()\n exp = QFT\n assert np.allclose(res, exp)", "def par_test_2(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYZ_par_factor.setMaxDepth(i)\n\n res = [\n self.XYZ_factor.mult(self.scalar),\n self.XYZ_factor.mult(self.scalarf),\n self.scalarf.mult(self.XYZ_factor),\n ]\n\n par_res = [\n self.XYZ_par_factor.mult(self.scalar),\n self.XYZ_par_factor.mult(self.par_scalarf),\n self.par_scalarf.mult(self.XYZ_par_factor),\n ]\n\n for i, ele in enumerate(res):\n assert (\n ele.rand_vars == par_res[i].rand_vars\n and ele.values == par_res[i].values\n )", "def calculate3_onemetric(pred_ccm, pred_ad, truth_ccm, truth_ad, rnd=0.01, method=\"orig\", verbose=False, full_matrix=True, in_mat=2):\n # Get the cousin matrices\n truth_cous = 1 - truth_ccm - truth_ad - truth_ad.T\n pred_cous = 1 - pred_ccm - pred_ad - pred_ad.T\n if verbose:\n if(np.amax(truth_cous) > 1 or np.amin(truth_cous) < 0):\n Warning(\"Cousin Truth is wrong. Maximum matrix entry is greater than 1 or minimum matrix entry is less than 0\")\n if(np.amax(pred_cous) > 1 or np.amin(pred_cous) < 0):\n Warning(\"Cousin Predicted is wrong. Maximum matrix entry is greater than 1 or minimum matrix entry is less than 0\")\n\n # Calculate the metric measure for each specified matrix\n func = method_funcs[method]\n results = []\n ccm_res, ad_res, ad_res_t, cous_res = [float('nan')] * 4\n if method in (\"pseudoV\",\n \"simpleKL\",\n \"sym_pseudoV\"):\n if in_mat != 2:\n ccm_res = func(pred_ccm, truth_ccm, rnd, full_matrix=full_matrix)\n results.append(ccm_res)\n if in_mat != 3:\n ad_res = func(pred_ad, truth_ad, rnd, full_matrix=full_matrix)\n results.append(ad_res)\n if in_mat != 4:\n ad_res_t = func(np.transpose(pred_ad), np.transpose(truth_ad), rnd, full_matrix=full_matrix)\n results.append(ad_res_t)\n if in_mat != 5:\n cous_res = func(pred_cous, truth_cous, rnd, full_matrix=full_matrix)\n results.append(cous_res)\n else:\n if in_mat != 2:\n ccm_res = func(pred_ccm, truth_ccm, full_matrix=full_matrix)\n results.append(ccm_res)\n if in_mat != 3:\n ad_res = func(pred_ad, truth_ad, full_matrix=full_matrix)\n results.append(ad_res)\n if in_mat != 4 or method in ('mcc',\n 'pearson',\n 'spearman'):\n ad_res_t = func(np.transpose(pred_ad), np.transpose(truth_ad), full_matrix=full_matrix)\n results.append(ad_res_t)\n if in_mat != 5:\n cous_res = func(pred_cous, truth_cous, full_matrix=full_matrix)\n results.append(cous_res)\n\n res = 0\n n = 0\n for r in results: # TODO: fix the NA's\n if not math.isnan(r):\n n += 1\n res += r\n if n > 0:\n res = res / float(n)\n\n if verbose:\n print(\"%s for Matrices\\nCC: %s, AD: %s, AD Transpose: %s, Cousin: %s\\nResult: %s\" %\n (method, str(ccm_res), str(ad_res), str(ad_res_t), str(cous_res), str(res)))\n return res", "def test_perspective_transform():\n # TODO: write this\n assert(True)", "def test_is_unital_isometry_true():\n v_mat = np.array([[1, 0, 0], [0, 1, 0]])\n np.testing.assert_equal(is_unital([v_mat], dim=[3, 2]), True)", "def test_get_routing_matrix_example_1():\n assert np.allclose(\n get_routing_matrix(\n lambda_2=1,\n lambda_1_1=1,\n lambda_1_2=1,\n mu_1=1,\n mu_2=1,\n num_of_servers_1=3,\n num_of_servers_2=3,\n system_capacity_1=3,\n system_capacity_2=3,\n buffer_capacity_1=2,\n buffer_capacity_2=2,\n alpha=0.5,\n ),\n np.array([[0.5, 0.0, 0.0], [1.0, 0.5, 0.0], [1.0, 1.0, 0.5]]),\n )", "def test_evaluate(self):\n # test normalized by 'bbox_size'\n jhmdb_pck_metric = JhmdbPCKAccuracy(thr=0.5, norm_item='bbox')\n jhmdb_pck_metric.process(self.data_batch, self.data_samples)\n pck_results = jhmdb_pck_metric.evaluate(self.batch_size)\n target = {\n 'Head PCK': 1.0,\n 'Sho PCK': 1.0,\n 'Elb PCK': 1.0,\n 'Wri PCK': 1.0,\n 'Hip PCK': 1.0,\n 'Knee PCK': 1.0,\n 'Ank PCK': 1.0,\n 'PCK': 1.0,\n }\n self.assertDictEqual(pck_results, target)\n\n # test normalized by 'torso_size'\n jhmdb_tpck_metric = JhmdbPCKAccuracy(thr=0.2, norm_item='torso')\n jhmdb_tpck_metric.process(self.data_batch, self.data_samples)\n tpck_results = jhmdb_tpck_metric.evaluate(self.batch_size)\n target = {\n 'Head tPCK': 1.0,\n 'Sho tPCK': 1.0,\n 'Elb tPCK': 1.0,\n 'Wri tPCK': 1.0,\n 'Hip tPCK': 1.0,\n 'Knee tPCK': 1.0,\n 'Ank tPCK': 1.0,\n 'tPCK': 1.0,\n }\n self.assertDictEqual(tpck_results, target)", "def test_matrix_stats2(self):\r\n headers_list = [['a', 'c', 'b'], ['b', 'c', 'a']]\r\n d1 = numpy.array([[0, .2, .9],\r\n [.2, 0, .8],\r\n [.9, .8, 0]], 'float')\r\n d2 = numpy.array([[0, .3, 1.1],\r\n [.3, 0, .8],\r\n [1.1, .8, 0]], 'float')\r\n distmats_list = [d1, d2]\r\n\r\n exp_mean = numpy.array([[0, .25, 1.0],\r\n [.25, 0, .8],\r\n [1.0, .8, 0]], 'float')\r\n exp_median = numpy.array([[0, .25, 1.0],\r\n [.25, 0, .8],\r\n [1.0, .8, 0]], 'float')\r\n exp_std = numpy.array([[0, .05, .1],\r\n [.05, 0, 0],\r\n [.1, 0, 0]], 'float')\r\n self.assertRaises(\r\n ValueError,\r\n matrix_stats,\r\n headers_list,\r\n distmats_list)", "def test_kernel_matrix(kernel, sample):\n sample = [ele for ele in sample] # consumed several times\n\n potato = KernelMethod(kernel)\n mat = potato.matrix(sample)\n assert np.all(np.linalg.eigvals(mat) > 0) or np.isclose(\n [np.min(np.linalg.eigvals(mat))], [0]\n )", "def test_verify(perfectModelEnsemble_initialized_control):\n assert perfectModelEnsemble_initialized_control.verify(\n metric=\"mse\", comparison=\"m2e\", dim=[\"init\", \"member\"]\n )", "def test_fstatisticsAndReshapedSpectrum(self):\n data = _load_mtdata('v22_174_series.dat.gz')\n # Calculate the spectra.\n spec, freq, jackknife, fstatistics, _ = mtspec(\n data, 4930., 3.5, nfft=312, number_of_tapers=5, statistics=True,\n rshape=0, fcrit=0.9)\n # No NaNs are supposed to be in the output.\n self.assertEqual(np.isnan(spec).any(), False)\n self.assertEqual(np.isnan(freq).any(), False)\n self.assertEqual(np.isnan(jackknife).any(), False)\n self.assertEqual(np.isnan(fstatistics).any(), False)\n # Load the good data.\n datafile = os.path.join(os.path.dirname(__file__), 'data',\n 'fstatistics.npz')\n record = np.load(datafile)\n spec2 = record['spec']\n jackknife2 = record['jackknife']\n fstatistics2 = record['fstatistics']\n freq2 = np.arange(157) * 6.50127447e-07\n # Compare.\n np.testing.assert_almost_equal(freq, freq2)\n np.testing.assert_almost_equal(spec / spec, spec2 / spec)\n np.testing.assert_almost_equal(jackknife / jackknife,\n jackknife2 / jackknife, 5)\n np.testing.assert_almost_equal(fstatistics / fstatistics,\n fstatistics2 / fstatistics, 5)", "def test_zero_matrix(self, backend, mat):\n mat.zero()\n expected_matrix = numpy.zeros((4,4), dtype=valuetype)\n eps=1.e-14\n assert_allclose(mat.values, expected_matrix, eps)", "def test_mw_test(self):\r\n U, p = mw_test(self.x, self.y)\r\n self.assertFloatEqual(U, 123.5)\r\n self.assertTrue(0.02 <= p <= 0.05)", "def test042_2d_numerical_comparison_on_fprop_vs_np_mp(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def test3(self):\r\n a = T.matrix()\r\n self.assertTrue(None == _as_scalar(a))\r\n self.assertTrue(None == _as_scalar(T.DimShuffle([False, False],\r\n [0, 'x', 1])(a)))", "def test_build_game_using_payoff_matrices_example_2():\n game = build_game_using_payoff_matrices(\n lambda_2=5,\n lambda_1_1=1,\n lambda_1_2=1,\n mu_1=3,\n mu_2=3,\n num_of_servers_1=2,\n num_of_servers_2=2,\n system_capacity_1=4,\n system_capacity_2=5,\n buffer_capacity_1=2,\n buffer_capacity_2=2,\n target=2,\n )\n\n assert np.allclose(\n game.payoff_matrices[0],\n 1\n + np.array(\n [\n [-0.00224433, -0.00224433, -0.00224433, -0.00224433, -0.00224433],\n [-0.00221647, -0.00222381, -0.00222728, -0.00223013, -0.00223415],\n [-0.00205908, -0.00211616, -0.00214196, -0.00216115, -0.00218337],\n [-0.00187811, -0.00197168, -0.00202778, -0.00206889, -0.00211227],\n ]\n ),\n )\n\n assert np.allclose(\n game.payoff_matrices[1],\n 1\n + np.array(\n [\n [-0.00224261, -0.00221144, -0.00203882, -0.00178084, -0.00151419],\n [-0.00224261, -0.00221978, -0.00210315, -0.00192509, -0.00169457],\n [-0.00224261, -0.00222403, -0.00213345, -0.0019975, -0.00182025],\n [-0.00224261, -0.00222935, -0.00216478, -0.0020671, -0.00193602],\n ]\n ),\n )", "def test_zero_beta(self):\n # Since the Euler calculations are all done using matrices, it's easier\n # to construct the test cases by directly using matrices as well. We\n # assume gamma is 0 since, due to gimbal lock, only either alpha+gamma\n # or alpha-gamma is a relevant parameter, and we just scan the other\n # possible values. The actual function is defined such that gamma will\n # always be zero in those cases. We define the matrices using lambda\n # functions to support sweeping a range of values for alpha and beta,\n # specifically to test cases where signs flip e.g. cos(0) vs cos(pi).\n # These sign flips lead to changes in the rotation angles that must be\n # tested.\n mats_euler_intrinsic = [\n (\n \"xzx\",\n \"intrinsic\",\n lambda alpha, beta: [\n [np.cos(beta), 0, 0],\n [0, np.cos(beta) * np.cos(alpha), -np.sin(alpha)],\n [0, np.cos(beta) * np.sin(alpha), np.cos(alpha)],\n ],\n ),\n (\n \"xyx\",\n \"intrinsic\",\n lambda alpha, beta: [\n [np.cos(beta), 0, 0],\n [0, np.cos(alpha), -np.cos(beta) * np.sin(alpha)],\n [0, np.sin(alpha), np.cos(beta) * np.cos(alpha)],\n ],\n ),\n (\n \"yxy\",\n \"intrinsic\",\n lambda alpha, beta: [\n [np.cos(alpha), 0, np.cos(beta) * np.sin(alpha)],\n [0, np.cos(beta), 0],\n [-np.sin(alpha), 0, np.cos(beta) * np.cos(alpha)],\n ],\n ),\n (\n \"yzy\",\n \"intrinsic\",\n lambda alpha, beta: [\n [np.cos(beta) * np.cos(alpha), 0, np.sin(alpha)],\n [0, np.cos(beta), 0],\n [-np.cos(beta) * np.sin(alpha), 0, np.cos(alpha)],\n ],\n ),\n (\n \"zyz\",\n \"intrinsic\",\n lambda alpha, beta: [\n [np.cos(beta) * np.cos(alpha), -np.sin(alpha), 0],\n [np.cos(beta) * np.sin(alpha), np.cos(beta), 0],\n [0, 0, np.cos(beta)],\n ],\n ),\n (\n \"zxz\",\n \"intrinsic\",\n lambda alpha, beta: [\n [np.cos(alpha), -np.cos(beta) * np.sin(alpha), 0],\n [np.sin(alpha), np.cos(beta) * np.cos(beta), 0],\n [0, 0, np.cos(beta)],\n ],\n ),\n ]\n\n mats_tb_intrinsic = [\n (\n \"xzy\",\n \"intrinsic\",\n lambda alpha, beta: [\n [0, -np.sin(beta), 0],\n [np.sin(beta) * np.cos(alpha), 0, -np.sin(alpha)],\n [np.sin(beta) * np.sin(alpha), 0, np.cos(alpha)],\n ],\n ),\n (\n \"xyz\",\n \"intrinsic\",\n lambda alpha, beta: [\n [0, 0, np.sin(beta)],\n [np.sin(beta) * np.sin(alpha), np.cos(alpha), 0],\n [-np.sin(beta) * np.cos(alpha), np.sin(alpha), 0],\n ],\n ),\n (\n \"yxz\",\n \"intrinsic\",\n lambda alpha, beta: [\n [np.cos(alpha), np.sin(beta) * np.sin(alpha), 0],\n [0, 0, -np.sin(beta)],\n [-np.sin(alpha), np.sin(beta) * np.cos(alpha), 0],\n ],\n ),\n (\n \"yzx\",\n \"intrinsic\",\n lambda alpha, beta: [\n [0, -np.sin(beta) * np.cos(alpha), np.sin(alpha)],\n [np.sin(beta), 0, 0],\n [0, np.sin(beta) * np.sin(alpha), np.cos(alpha)],\n ],\n ),\n (\n \"zyx\",\n \"intrinsic\",\n lambda alpha, beta: [\n [0, -np.sin(alpha), np.sin(beta) * np.cos(alpha)],\n [0, np.cos(alpha), np.sin(beta) * np.sin(alpha)],\n [-np.sin(beta), 0, 0],\n ],\n ),\n (\n \"zxy\",\n \"intrinsic\",\n lambda alpha, beta: [\n [np.cos(alpha), 0, np.sin(beta) * np.sin(alpha)],\n [np.sin(alpha), 0, -np.sin(beta) * np.cos(alpha)],\n [0, -1, 0],\n ],\n ),\n ]\n\n # Extrinsic rotations can be tested identically to intrinsic rotations\n # in the case of proper Euler angles.\n mats_euler_extrinsic = [(m[0], \"extrinsic\", m[2]) for m in mats_euler_intrinsic]\n\n # For Tait-Bryan angles, extrinsic rotations axis order must be\n # reversed (since axes 1 and 3 are not identical), but more\n # importantly, due to the sum/difference of alpha and gamma that\n # arises, we need to test the negative of alpha to catch the dangerous\n # cases. In practice we get the same results since we're sweeping alpha\n # values in the tests below, but it's useful to set this up precisely.\n mats_tb_extrinsic = [\n (m[0][::-1], \"extrinsic\", lambda alpha, beta: m[2](-alpha, beta))\n for m in mats_tb_intrinsic\n ]\n\n # Since angle representations may not be unique, checking that\n # quaternions are equal may not work. Instead we perform rotations and\n # check that they are identical. For simplicity, we rotate the\n # simplest vector with all 3 components (otherwise tests won't catch\n # the problem because there's no component to rotate).\n test_vector = [1, 1, 1]\n\n mats_intrinsic = (mats_euler_intrinsic, mats_tb_intrinsic)\n mats_extrinsic = (mats_euler_extrinsic, mats_tb_extrinsic)\n\n # The beta angles are different for proper Euler angles and Tait-Bryan\n # angles because the relevant beta terms will be sines and cosines,\n # respectively.\n all_betas = ((0, np.pi), (np.pi / 2, -np.pi / 2))\n alphas = (0, np.pi / 2, np.pi, 3 * np.pi / 2)\n\n for mats in (mats_intrinsic, mats_extrinsic):\n for betas, mat_set in zip(all_betas, mats):\n for convention, axis_type, mat_func in mat_set:\n quaternions = []\n for beta in betas:\n for alpha in alphas:\n mat = mat_func(alpha, beta)\n if np.linalg.det(mat) == -1:\n # Some of these will be improper rotations.\n continue\n quat = rowan.from_matrix(mat)\n quaternions.append(quat)\n euler = rowan.to_euler(quat, convention, axis_type)\n converted = rowan.from_euler(\n *euler, convention=convention, axis_type=axis_type\n )\n correct_rotation = rowan.rotate(quat, test_vector)\n test_rotation = rowan.rotate(converted, test_vector)\n self.assertTrue(\n np.allclose(correct_rotation, test_rotation, atol=1e-6),\n msg=\"\"\"\n Failed for convention {},\n axis type {},\n alpha = {},\n beta = {}.\n Expected quaternion: {}.\n Calculated: {}.\n Expected vector: {}.\n Calculated vector: {}.\"\"\".format(\n convention,\n axis_type,\n alpha,\n beta,\n quat,\n converted,\n correct_rotation,\n test_rotation,\n ),\n )\n\n # For completeness, also test with broadcasting.\n quaternions = np.asarray(quaternions).reshape(-1, 4)\n all_euler = rowan.to_euler(quaternions, convention, axis_type)\n converted = rowan.from_euler(\n all_euler[..., 0],\n all_euler[..., 1],\n all_euler[..., 2],\n convention,\n axis_type,\n )\n self.assertTrue(\n np.allclose(\n rowan.rotate(quaternions, test_vector),\n rowan.rotate(converted, test_vector),\n atol=1e-6,\n )\n )", "def test_get_meshgrid(mock_grid):\n\n windows = [corr_window.CorrWindow(j, 0, WS=127) for j in range(0, 129, 64)]\n mock_grid._array[0] = windows\n windows = [corr_window.CorrWindow(0, i, WS=127) for i in range(0, 193, 64)]\n for ii in range(1, 4):\n mock_grid._array[ii][0] = windows[ii]\n\n expx, expy = np.meshgrid([0, 64, 128], [0, 64, 128, 192])\n actx, acty = mock_grid.get_meshgrid()\n\n assert np.all(actx == expx)\n assert np.all(acty == expy)", "def test_schwefel221(self):\n fun = get_problem('schwefel221', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_basic(self):\n scale_factor = 1.0\n expected = self.cube_uv_down.data.copy()\n result = calculate_uv_index(self.cube_uv_down, scale_factor)\n self.assertArrayEqual(result.data, expected)", "def Test():\n x=np.array([[4,-100],[1,50],[4,50]])\n x_norm=z_score(x)\n print(x_norm)\n return", "def test_has_alpha(self):\n image_3d = np.array([[ # One image with shape (1, 2, 3)\n [1, 2, 3],\n [4, 5, 6]\n ]])\n image_4d = np.array([[ # One image with shape (1, 3, 4)\n [1, 2, 3, 4],\n [4, 5, 6, 7],\n [8, 9, 10, 11]\n ]])\n image_5d = np.array([[ # One image with shape (1, 1, 5)\n [1, 2, 3, 4, 5]\n ]])\n self.assertEqual(localHDR.has_alpha(image_3d), False)\n self.assertEqual(localHDR.has_alpha(image_4d), True)\n self.assertEqual(localHDR.has_alpha(image_5d), False)", "def testCube(self):\n cube = {i:(i^1,i^2,i^4) for i in range(8)}\n self.check(cube,6)", "def test_cube_values(self):\n t_min, t_max, t_increment = 183.15, 338.15, 10.0\n expected = [\n 0.0096646,\n 0.0546844,\n 0.2613554,\n 1.0799927,\n 3.9333663,\n 12.8286096,\n 37.9714586,\n 103.1532749,\n 259.6617372,\n 610.6359361,\n 1227.0888425,\n 2337.0801979,\n 4242.7259947,\n 7377.3294046,\n 12338.9996048,\n 19925.4362844,\n ]\n result = SaturatedVapourPressureTable(\n t_min=t_min, t_max=t_max, t_increment=t_increment\n ).process()\n\n self.assertArrayAlmostEqual(result.data, expected)", "def test_el_small_surface_instability():\n levels = np.array([959., 931.3, 925., 899.3, 892., 867.9, 850., 814.,\n 807.9, 790., 779.2, 751.3, 724.3, 700., 655., 647.5,\n 599.4, 554.7, 550., 500.]) * units.mbar\n temperatures = np.array([22.2, 20.2, 19.8, 18.4, 18., 17.4, 17., 15.4, 15.4,\n 15.6, 14.6, 12., 9.4, 7., 2.2, 1.4, -4.2, -9.7,\n -10.3, -14.9]) * units.degC\n dewpoints = np.array([20., 18.5, 18.1, 17.9, 17.8, 15.3, 13.5, 6.4, 2.2,\n -10.4, -10.2, -9.8, -9.4, -9., -15.8, -15.7, -14.8, -14.,\n -13.9, -17.9]) * units.degC\n el_pressure, el_temperature = el(levels, temperatures, dewpoints)\n assert_nan(el_pressure, levels.units)\n assert_nan(el_temperature, temperatures.units)", "def test_scores(self) -> np.ndarray:\n return np.asarray(self.test_metric_dict[self.metric_name])", "def test_MeshMat_1group(self):\n\n MS_grp = self.meshsol.get_group(\"stator\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[0, 1, 2], [1, 2, 3]])\n result_tgl = cells_grp[\"triangle\"]\n testA = np.sum(abs(solution - result_tgl))\n msg = (\n \"Wrong output: returned \" + str(result_tgl) + \", expected: \" + str(solution)\n )\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)\n\n MS_grp = self.meshsol.get_group(\"rotor\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[3, 3], [1, 2], [2, 3]])\n results = cells_grp[\"triangle\"] # The point indices have changed !\n points = MS_grp.get_mesh().get_point(results)\n testA = np.sum(abs(solution - points))\n msg = \"Wrong output: returned \" + str(results) + \", expected: \" + str(solution)\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)", "def test_9(self):\n for _ in range(1000):\n num_free = np.random.randint(1, 100)\n values = np.random.uniform(-1000.0, 1000.0, size=num_free)\n py = get_scales_magnitudes(values)\n f90 = fort_debug.wrapper_get_scales_magnitude(values, num_free)\n assert_almost_equal(py, f90)", "def test_cube_attributes(self):\n t_min, t_max, t_increment = 200.15, 220.15, 10.0\n result = SaturatedVapourPressureTable(\n t_min=t_min, t_max=t_max, t_increment=t_increment\n ).process()\n self.assertEqual(result.attributes[\"minimum_temperature\"], t_min)\n self.assertEqual(result.attributes[\"maximum_temperature\"], t_max)\n self.assertEqual(result.attributes[\"temperature_increment\"], t_increment)\n self.assertEqual(result.units, Unit(\"Pa\"))", "def test_near_surface_consistency():\n bead_diameter = 0.5\n shared_pars = {\n \"bead_diameter\": bead_diameter,\n \"viscosity\": 1.1e-3,\n \"temperature\": 25,\n \"rho_sample\": 997.0,\n \"rho_bead\": 1040.0,\n \"distance_to_surface\": bead_diameter,\n }\n sim_pars = {\n \"sample_rate\": 78125,\n \"stiffness\": 0.1,\n \"pos_response_um_volt\": 0.618,\n \"driving_sinusoid\": (500, 31.95633),\n \"diode\": (0.6, 15000),\n }\n\n np.random.seed(1337)\n volts, stage = generate_active_calibration_test_data(\n 10, hydrodynamically_correct=True, **sim_pars, **shared_pars\n )\n power_spectrum = calculate_power_spectrum(volts, sim_pars[\"sample_rate\"])\n\n active_pars = {\n \"force_voltage_data\": volts,\n \"driving_data\": stage,\n \"sample_rate\": 78125,\n \"driving_frequency_guess\": 32,\n }\n\n def fit_spectrum(active, hydro):\n model = (\n ActiveCalibrationModel(**active_pars, **shared_pars, hydrodynamically_correct=hydro)\n if active\n else PassiveCalibrationModel(**shared_pars, hydrodynamically_correct=hydro)\n )\n return fit_power_spectrum(power_spectrum, model)\n\n parameters_of_interest = {\n \"kappa\": sim_pars[\"stiffness\"],\n \"Rd\": sim_pars[\"pos_response_um_volt\"],\n \"Rf\": sim_pars[\"stiffness\"] * sim_pars[\"pos_response_um_volt\"] * 1e3,\n }\n\n for active_calibration in (True, False):\n for hydrodynamic_model in (True, False):\n fit = fit_spectrum(active_calibration, hydrodynamic_model)\n\n # Note that the corner frequency of the hydrodynamic model is specified in bulk, while\n # the regular model has its corner frequency specified at the current height.\n fc_bulk = (\n fit[\"fc\"].value\n if hydrodynamic_model\n else fit[\"fc\"].value\n * faxen_factor(shared_pars[\"distance_to_surface\"] * 1e-6, bead_diameter * 1e-6 / 2)\n )\n np.testing.assert_allclose(fc_bulk, 3070.33, rtol=2e-2)\n for param, ref_value in parameters_of_interest.items():\n np.testing.assert_allclose(fit[param].value, ref_value, rtol=2e-2)", "def par_test_11(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYZ_par_factor.setMaxDepth(i)\n\n res = self.XYZ_factor.mult(self.XYZ_factor)\n par_res = self.XYZ_par_factor.mult(self.XYZ_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def test_frechet_distance_univariate(self):\n mu_x = torch.rand((1,), device=self.device)\n sigma_x = torch.rand((1, 1), device=self.device)\n\n mu_y = torch.rand((1,), device=self.device)\n sigma_y = torch.rand((1, 1), device=self.device)\n\n # Matrix square root reduces to scalar square root.\n expected = (mu_x - mu_y) ** 2 + sigma_x + sigma_y - 2 * torch.sqrt(sigma_x * sigma_y)\n expected = expected.item()\n actual = F.frechet_distance(mu_x, sigma_x, mu_y, sigma_y)\n\n self.assertEqual(expected, actual)", "def return_MatchUpTest___w():\n\n ####################################################################################################################\n # 1. Initialise test data\n ####################################################################################################################\n\n w1 = array([[0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25]])\n w2 = array([[0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00],\n [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25]])\n\n u1 = array([1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0])\n u2 = array([2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0])\n\n values = array([5.0, 3.0, 3.0, 2.5, 6.0, 3.0, 2.0, 4.0, 3.0, 4.0])\n unc = [Uncertainty(3, (0, 0)),\n Uncertainty(3, (1, 1))]\n ks = array([1.2, 1.7, 1.3, 1.4, 1.3])\n unck = [Uncertainty(1, array([0.25, 0.25, 0.25, 0.25, 0.25]))]\n idx = {\"Nm\": [5],\n \"cNm\": [0, 5, 10],\n \"Im\": [[1, 2]],\n \"sensors\": [1, 2],\n \"sensor_ms\": [1],\n \"n_sensor\": [1, 2],\n \"n_mu\": [1, 1],\n \"n_cov\": [1, 1],\n \"N_var\": [5, 5],\n \"idx\": [0, 5, 10],\n \"Ia\": [1, 1, 1, 2, 2, 2]}\n a = array([1., 1.3, 0.002, 0.5, 1.1, 0.0005])\n w_matrices = [csr_matrix(w1), csr_matrix(w2)]\n u_matrices = [u1, u2]\n\n ####################################################################################################################\n # 3. Initialise MatchUp object\n ####################################################################################################################\n\n MatchUpTest = MatchUp()\n MatchUpTest.values = values\n MatchUpTest.unc = unc\n MatchUpTest.ks = ks\n MatchUpTest.unck = unck\n MatchUpTest.idx = idx\n MatchUpTest.a = a\n MatchUpTest.w_matrices = w_matrices\n MatchUpTest.u_matrices = u_matrices\n\n return MatchUpTest", "def evaluate_matrices(self, precision_matrix, recall_matrix):\n\t\tground_truth_count = precision_matrix.shape[0]\n\t\tdetection_count = precision_matrix.shape[1]\n\t\tground_truth_sets_precision = defaultdict(set) # number of ground truth items that match a particular detection in the precision matrix\n\t\tdetection_sets_precision = defaultdict(set) # number of detection items that match a particular ground truth in the precision matrix\n\t\tground_truth_sets_recall = defaultdict(set) # number of ground truth items that match a particular detection in the recall matrix\n\t\tdetection_sets_recall = defaultdict(set) # number of detection items that match a particular ground truth in the recall matrix\n\n\t\tfor gt_index in range(ground_truth_count):\n\t\t\tfor det_index in range(detection_count):\n\t\t\t\tif precision_matrix[gt_index, det_index] >= self.precision_threshold:\n\t\t\t\t\tground_truth_sets_precision[det_index].add(gt_index)\n\t\t\t\t\tdetection_sets_precision[gt_index].add(det_index)\n\t\t\t\tif recall_matrix[gt_index, det_index] >= self.recall_threshold:\n\t\t\t\t\tground_truth_sets_recall[det_index].add(gt_index)\n\t\t\t\t\tdetection_sets_recall[gt_index].add(det_index)\n\n\t\tmatch_ground_truth = 0. # sum of MatchG\n\t\tmatch_detection = 0. # sum of MatchD\n\n\t\tone_to_one_precision = set()\n\t\tfor gt_index in detection_sets_precision:\n\t\t\tmatching_detections_precision = detection_sets_precision[gt_index]\n\t\t\tif len(matching_detections_precision) == 1:\n\t\t\t\t(detection_precision, ) = matching_detections_precision\n\t\t\t\tif len(ground_truth_sets_precision[detection_precision]) == 1:\n\t\t\t\t\tone_to_one_precision.add((gt_index, detection_precision))\n\t\t\telse:\n\t\t\t\t# one-to-many (one ground truth to many detections)\n\t\t\t\tgt_sum = 0.\n\t\t\t\tfor detection_precision in matching_detections_precision:\n\t\t\t\t\tgt_sum += recall_matrix[gt_index, detection_precision]\n\t\t\t\tif gt_sum >= self.recall_threshold:\n\t\t\t\t\t#print(\"1:N ~ GT {} : DT {}\".format(gt_index,matching_detections_precision))\n\t\t\t\t\tmatch_ground_truth += self.scatter_punishment(matching_detections_precision)\n\t\t\t\t\tmatch_detection += len(matching_detections_precision) * self.scatter_punishment(matching_detections_precision)\n\n\t\tone_to_one_recall = set()\n\t\tfor det_index in ground_truth_sets_recall:\n\t\t\tmatching_ground_truths_recall = ground_truth_sets_recall[det_index]\n\t\t\tif len(matching_ground_truths_recall) == 1:\n\t\t\t\t(ground_truth_recall, ) = matching_ground_truths_recall\n\t\t\t\tif len(detection_sets_recall[ground_truth_recall]) == 1:\n\t\t\t\t\tone_to_one_recall.add((ground_truth_recall, det_index))\n\t\t\telse:\n\t\t\t\t# many-to-one (many ground truths covered by one detection)\n\t\t\t\tdet_sum = 0\n\t\t\t\tfor ground_truth_recall in matching_ground_truths_recall:\n\t\t\t\t\tdet_sum += precision_matrix[ground_truth_recall, det_index]\n\t\t\t\tif det_sum >= self.precision_threshold:\n\t\t\t\t\t#print(\"N:1 ~ DT {} : GT {}\".format(det_index,matching_ground_truths_recall))\n\t\t\t\t\tmatch_detection += self.scatter_punishment(matching_ground_truths_recall)\n\t\t\t\t\tmatch_ground_truth += len(matching_ground_truths_recall) * self.scatter_punishment(matching_ground_truths_recall)\n\n\t\tone_to_one_matches = one_to_one_precision & one_to_one_recall\n\t\tmatch_ground_truth += len(one_to_one_matches)\n\t\tmatch_detection += len(one_to_one_matches)\n\n\t\trecall = match_ground_truth / float(ground_truth_count)\n\t\tprecision = match_detection / float(detection_count)\n\t\treturn (precision, recall, (match_detection, float(detection_count)), (match_ground_truth, float(ground_truth_count)))", "def test_sum_squares(self):\n fun = get_problem('sum_squares', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_metrics(self):\n # Check the route\n self.check_metrics(self.test_metrics_submission_id, False, \"award\")\n self.check_metrics(self.test_metrics_submission_id, True, \"award_financial\")\n self.check_metrics(self.test_metrics_submission_id, True, \"appropriations\")", "def test_schwefel222(self):\n fun = get_problem('schwefel222', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_marching(self):\n try:\n from skimage import measure # NOQA\n except ImportError:\n g.log.warning('no skimage, skipping marching cubes test')\n return\n\n # make sure offset is correct\n matrix = g.np.ones((3, 3, 3), dtype=bool)\n mesh = g.trimesh.voxel.ops.matrix_to_marching_cubes(matrix=matrix)\n assert mesh.is_watertight\n\n mesh = g.trimesh.voxel.ops.matrix_to_marching_cubes(\n matrix=matrix).apply_scale(3.0)\n assert mesh.is_watertight", "def test_jaccard_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = jaccard_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[1/3, 1/4],\n [1/3, 2/3]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def test_none_alpha_results() -> None:\n estimator = LinearRegression()\n estimator.fit(X, y)\n y_pred_est = estimator.predict(X)\n mapie = MapieRegressor(estimator=estimator, cv=\"prefit\")\n mapie.fit(X, y)\n y_pred_mapie = mapie.predict(X)\n np.testing.assert_allclose(y_pred_est, y_pred_mapie)", "def test_suite():\n test(calc_det([[2, 1],[3, 4]]), 5)", "def test_param(self):\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedLinearDML(model_y=automl_model_reg(),\n model_t=GradientBoostingClassifier(),\n featurizer=None,\n discrete_treatment=True)\n est.fit(Y, T, X=X)\n _ = est.effect(X)", "def test_real_trace_transform(self):\n dec = TwoQubitDecomposeUpToDiagonal()\n u4 = scipy.stats.unitary_group.rvs(4, random_state=83)\n su4, _ = dec._u4_to_su4(u4)\n real_map = dec._real_trace_transform(su4)\n self.assertTrue(dec._cx2_test(real_map @ su4))", "def test_multi_return(self):\r\n c = AlphaDiversityCalc(osd)\r\n res = c(data_path=self.otu_table1_fp)\r\n assert_almost_equal(res, array([[2, 1, 1],\r\n [4, 4, 0],\r\n [0, 0, 0]]))", "def test_wl_metric():\n z1 = np.random.normal(size=int(1e5)) + 1\n z2 = np.random.normal(size=int(1e5)) + 2\n res = pval.wl_metric(z1, z2)\n np.testing.assert_almost_equal(res, 1, 2)", "def compute_metrics(mat,language='English',method ='dimensional',output='data_frame'):\n language = language.lower()\n method = method.lower()\n if language == 'english':\n if method == 'dimensional':\n if output == 'data_frame':\n mat['NegCount'] = mat['DetectCount'] - mat['PosCount']\n mat['MeanNegVal'] = mat['NegVal'] / mat['NegCount']\n mat['MeanPosVal'] = mat['PosVal'] / mat['PosCount']\n mat['MeanArousal'] = mat['Arousal'] / mat['DetectCount']\n mat['MeanDominance'] = mat['Dominance'] / mat['DetectCount']\n mat['PosNegValDifference'] = mat['MeanPosVal'] - mat['MeanNegVal']\n mat['MeanValence'] = (mat['NegVal'] + mat['PosVal'])/ mat['DetectCount'] \n mat['AbsMeanNegVal'] = abs(mat['MeanNegVal'])\n mat['DetectPercent'] = mat['DetectCount'] / mat['TokenCount']\n mat['DensityValence'] =(mat['NegVal'] + mat['PosVal'])/ mat['TokenCount'] \n mat['DensityNegVal'] = mat['NegVal'] / mat['TokenCount']\n mat['DensityPosVal'] = mat['PosVal'] / mat['TokenCount']\n mat['DensityArousal'] = mat['Arousal'] / mat['TokenCount']\n mat['DensityDominance'] = mat['Dominance'] / mat['TokenCount']\n mat['MeanSquaredValence'] = mat['ValSq'] / mat['DetectCount']\n mat['ValenceDeviation'] = np.sqrt(mat['MeanSquaredValence'])\n return(mat)\n elif output == 'array':\n out_dict = {}\n out_dict['PosVal'] = mat[:,:,0]\n out_dict['NegVal'] = mat[:,:,1]\n out_dict['Arousal'] = mat[:,:,2]\n out_dict['Dominance'] = mat[:,:,3]\n out_dict['PosCount'] = mat[:,:,4]\n out_dict['DetectCount'] = mat[:,:,5]\n out_dict['TokenCount'] = mat[:,:,6]\n out_dict['ValSq'] = mat[:,:,7]\n\n out_dict['DetectPercent'] = np.divide(out_dict['DetectCount'],out_dict['TokenCount'])\n out_dict['NegCount'] = np.subtract(out_dict['DetectCount'],out_dict['PosCount'])\n # Mean Values:\n out_dict['MeanValence'] = np.divide(np.add(out_dict['PosVal'],out_dict['NegVal']),out_dict['DetectCount'])\n out_dict['MeanNegVal'] = np.divide(out_dict['NegVal'],out_dict['NegCount'])\n out_dict['MeanPosVal'] = np.divide(out_dict['PosVal'],out_dict['PosCount'])\n out_dict['MeanArousal'] = np.divide(out_dict['Arousal'],out_dict['DetectCount'])\n out_dict['MeanDominance'] = np.divide(out_dict['Dominance'],out_dict['DetectCount'])\n out_dict['PosNegValDifference'] = np.subtract(out_dict['MeanPosVal'] ,out_dict['MeanNegVal'])\n # Percentages:\n out_dict['DetectPosPercent'] = np.divide(out_dict['PosCount'],out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['PosCount'],out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['NegCount'],out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['NegCount'],out_dict['TokenCount'])\n out_dict['MeanSquaredValence'] = np.divide(out_dict['ValSq'],out_dict['DetectCount'])\n out_dict['ValenceDeviation'] = np.sqrt(out_dict['MeanSquaredValence'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\")\n elif method == 'discrete':\n if output == 'data_frame':\n mat['function_Percent'] = mat.function / mat.TokenCount\n mat['pronoun_Percent'] = mat.pronoun / mat.TokenCount\n mat['ppron_Percent'] = mat.ppron / mat.TokenCount\n mat['i_Percent'] = mat.i / mat.TokenCount\n mat['we_Percent'] = mat.we / mat.TokenCount\n mat['you_Percent'] = mat.you / mat.TokenCount\n mat['shehe_Percent'] = mat.shehe / mat.TokenCount\n mat['they_Percent'] = mat.they / mat.TokenCount\n mat['ipron_Percent'] = mat.ipron / mat.TokenCount\n mat['article_Percent'] = mat.article / mat.TokenCount\n mat['prep_Percent'] = mat.prep / mat.TokenCount\n mat['auxverb_Percent'] = mat.auxverb / mat.TokenCount\n mat['adverb_Percent'] = mat.adverb / mat.TokenCount\n mat['conj_Percent'] = mat.conj / mat.TokenCount\n mat['negate_Percent'] = mat.negate / mat.TokenCount\n mat['verb_Percent'] = mat.verb / mat.TokenCount\n mat['adj_Percent'] = mat.adj / mat.TokenCount\n mat['compare_Percent'] = mat.compare / mat.TokenCount\n mat['interrog_Percent'] = mat.interrog / mat.TokenCount\n mat['number_Percent'] = mat.number / mat.TokenCount\n mat['quant_Percent'] = mat.quant / mat.TokenCount\n mat['affect_Percent'] = mat.affect / mat.TokenCount\n mat['posemo_Percent'] = mat.posemo / mat.TokenCount\n mat['negemo_Percent'] = mat.negemo / mat.TokenCount\n mat['anx_Percent'] = mat.anx / mat.TokenCount\n mat['anger_Percent'] = mat.anger / mat.TokenCount\n mat['sad_Percent'] = mat.sad / mat.TokenCount\n mat['social_Percent'] = mat.social / mat.TokenCount\n mat['family_Percent'] = mat.family / mat.TokenCount\n mat['friend_Percent'] = mat.friend / mat.TokenCount\n mat['female_Percent'] = mat.female / mat.TokenCount\n mat['male_Percent'] = mat.male / mat.TokenCount\n mat['cogproc_Percent'] = mat.cogproc / mat.TokenCount\n mat['insight_Percent'] = mat.insight / mat.TokenCount\n mat['cause_Percent'] = mat.cause / mat.TokenCount\n mat['discrep_Percent'] = mat.discrep / mat.TokenCount\n mat['tentat_Percent'] = mat.tentat / mat.TokenCount\n mat['certain_Percent'] = mat.certain / mat.TokenCount\n mat['differ_Percent'] = mat.differ / mat.TokenCount\n mat['percept_Percent'] = mat.percept / mat.TokenCount\n mat['see_Percent'] = mat.see / mat.TokenCount\n mat['hear_Percent'] = mat.hear / mat.TokenCount\n mat['feel_Percent'] = mat.feel / mat.TokenCount\n mat['bio_Percent'] = mat.bio / mat.TokenCount\n mat['body_Percent'] = mat.body / mat.TokenCount\n mat['health_Percent'] = mat.health / mat.TokenCount\n mat['sexual_Percent'] = mat.sexual / mat.TokenCount\n mat['ingest_Percent'] = mat.ingest / mat.TokenCount\n mat['drives_Percent'] = mat.drives / mat.TokenCount\n mat['affiliation_Percent'] = mat.affiliation / mat.TokenCount\n mat['achieve_Percent'] = mat.achieve / mat.TokenCount\n mat['power_Percent'] = mat.power / mat.TokenCount\n mat['reward_Percent'] = mat.reward / mat.TokenCount\n mat['risk_Percent'] = mat.risk / mat.TokenCount\n mat['focuspast_Percent'] = mat.focuspast / mat.TokenCount\n mat['focuspresent_Percent'] = mat.focuspresent / mat.TokenCount\n mat['focusfuture_Percent'] = mat.focusfuture / mat.TokenCount\n mat['relativ_Percent'] = mat.relativ / mat.TokenCount\n mat['motion_Percent'] = mat.motion / mat.TokenCount\n mat['space_Percent'] = mat.space / mat.TokenCount\n mat['time_Percent'] = mat.time / mat.TokenCount\n mat['work_Percent'] = mat.work / mat.TokenCount\n mat['leisure_Percent'] = mat.leisure / mat.TokenCount\n mat['home_Percent'] = mat.home / mat.TokenCount\n mat['money_Percent'] = mat.money / mat.TokenCount\n mat['relig_Percent'] = mat.relig / mat.TokenCount\n mat['death_Percent'] = mat.death / mat.TokenCount\n mat['informal_Percent'] = mat.informal / mat.TokenCount\n mat['swear_Percent'] = mat.swear / mat.TokenCount\n mat['netspeak_Percent'] = mat.netspeak / mat.TokenCount\n mat['assent_Percent'] = mat.assent / mat.TokenCount\n mat['nonflu_Percent'] = mat.nonflu / mat.TokenCount\n mat['filler_Percent'] = mat.filler / mat.TokenCount\n mat['Detect_Percent'] = mat.DetectCount / mat.TokenCount\n return(mat)\n elif output == 'array':\n out_dict = {}\n out_dict['Affect'] = mat[:,:,21]\n out_dict['Posemo'] = mat[:,:,22]\n out_dict['Negemo'] = mat[:,:,23]\n out_dict['Anx'] = mat[:,:,24]\n out_dict['Anger'] = mat[:,:,25]\n out_dict['Sad'] = mat[:,:,26]\n out_dict['Function'] = mat[:,:,0]\n out_dict['CogProc'] = mat[:,:,32]\n out_dict['DetectCount'] = mat[:,:,-2]\n out_dict['TokenCount'] = mat[:,:,-1]\n\n out_dict['DetectPosPercent'] = np.divide(out_dict['Posemo'], out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['Posemo'], out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['Negemo'], out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['Negemo'], out_dict['TokenCount'])\n out_dict['EmoPosPercent'] = np.divide(out_dict['Posemo'],np.add(out_dict['Posemo'],out_dict['Negemo']))\n out_dict['DetectAnxPercent'] = np.divide(out_dict['Anx'], out_dict['DetectCount'])\n out_dict['OverallAnxPercent'] = np.divide(out_dict['Anx'], out_dict['TokenCount'])\n out_dict['DetectAngerPercent'] = np.divide(out_dict['Anger'], out_dict['DetectCount'])\n out_dict['OverallAngerPercent'] = np.divide(out_dict['Anger'], out_dict['TokenCount'])\n out_dict['DetectSadPercent'] = np.divide(out_dict['Sad'], out_dict['DetectCount'])\n out_dict['OverallSadPercent'] = np.divide(out_dict['Sad'], out_dict['TokenCount'])\n out_dict['DetectAffectPercent'] = np.divide(out_dict['Affect'], out_dict['DetectCount'])\n out_dict['OverallAffectPercent'] = np.divide(out_dict['Affect'], out_dict['TokenCount'])\n\n\n out_dict['DetectFunctionPercent'] = np.divide(out_dict['Function'], out_dict['DetectCount'])\n out_dict['OverallFunctionPercent'] = np.divide(out_dict['Function'], out_dict['TokenCount'])\n out_dict['DetectCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['DetectCount'])\n out_dict['OverallCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['TokenCount'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\") \n else:\n print(\"Error: Method not found!\")\n elif language == 'german':\n if method == 'dimensional':\n if output == 'data_frame':\n mat['NegCount'] = mat['DetectCount'] - mat['PosCount']\n mat['MeanNegVal'] = mat['NegVal'] / mat['NegCount']\n mat['MeanPosVal'] = mat['PosVal'] / mat['PosCount']\n mat['MeanArousal'] = mat['Arousal'] / mat['DetectCount']\n mat['MeanDominance'] = mat['Dominance'] / mat['DetectCount']\n mat['MeanPotency'] = mat['Potency'] / mat['DetectCount']\n mat['PosNegValDifference'] = mat['MeanPosVal'] - mat['MeanNegVal']\n mat['MeanValence'] = (mat['NegVal'] + mat['PosVal'])/ mat['DetectCount'] \n mat['AbsMeanNegVal'] = abs(mat['MeanNegVal'])\n mat['DetectPercent'] = mat['DetectCount'] / mat['TokenCount']\n mat['DensityValence'] =(mat['NegVal'] + mat['PosVal'])/ mat['TokenCount'] \n mat['DensityNegVal'] = mat['NegVal'] / mat['TokenCount']\n mat['DensityPosVal'] = mat['PosVal'] / mat['TokenCount']\n mat['DensityArousal'] = mat['Arousal'] / mat['TokenCount']\n mat['DensityDominance'] = mat['Dominance'] / mat['TokenCount']\n mat['MeanSquaredValence'] = mat['ValSq'] / mat['DetectCount']\n mat['ValenceDeviation'] = np.sqrt(mat['MeanSquaredValence'])\n return(mat)\n elif output == 'array':\n out_dict = {}\n out_dict['PosVal'] = mat[:,:,0]\n out_dict['NegVal'] = mat[:,:,1]\n out_dict['Arousal'] = mat[:,:,2]\n out_dict['Dominance'] = mat[:,:,3]\n out_dict['PosCount'] = mat[:,:,4]\n out_dict['DetectCount'] = mat[:,:,5]\n out_dict['Imagine'] = mat[:,:,6]\n out_dict['Potency'] = mat[:,:,7]\n out_dict['DomPot_Count'] = mat[:,:,8]\n out_dict['TokenCount'] = mat[:,:,9]\n out_dict['ValSq'] = mat[:,:,10]\n\n out_dict['DetectPercent'] = np.divide(out_dict['DetectCount'],out_dict['TokenCount'])\n out_dict['NegCount'] = np.subtract(out_dict['DetectCount'],out_dict['PosCount'])\n # Mean Values:\n out_dict['MeanValence'] = np.divide(np.add(out_dict['PosVal'],out_dict['NegVal']),out_dict['DetectCount'])\n out_dict['MeanNegVal'] = np.divide(out_dict['NegVal'],out_dict['NegCount'])\n out_dict['MeanPosVal'] = np.divide(out_dict['PosVal'],out_dict['PosCount'])\n out_dict['MeanArousal'] = np.divide(out_dict['Arousal'],out_dict['DetectCount'])\n out_dict['MeanDominance'] = np.divide(out_dict['Dominance'],out_dict['DomPot_Count'])\n out_dict['MeanPotency'] = np.divide(out_dict['Potency'],out_dict['DomPot_Count'])\n out_dict['PosNegValDifference'] = np.subtract(out_dict['MeanPosVal'] ,out_dict['MeanNegVal'])\n # Percentages:\n out_dict['DetectPosPercent'] = np.divide(out_dict['PosCount'],out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['PosCount'],out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['NegCount'],out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['NegCount'],out_dict['TokenCount'])\n out_dict['MeanSquaredValence'] = np.divide(out_dict['ValSq'],out_dict['DetectCount'])\n out_dict['ValenceDeviation'] = np.sqrt(out_dict['MeanSquaredValence'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\")\n elif method == 'discrete':\n if output == 'data_frame':\n mat['Pronoun_Percent'] = mat.Pronoun / mat.TokenCount\n mat['I_Percent'] = mat.I / mat.TokenCount\n mat['We_Percent'] = mat.We / mat.TokenCount\n mat['Self_Percent'] = mat.Self / mat.TokenCount\n mat['You_Percent'] = mat.You / mat.TokenCount\n mat['Other_Percent'] = mat.Other / mat.TokenCount\n mat['Negate_Percent'] = mat.Negate / mat.TokenCount\n mat['Assent_Percent'] = mat.Assent / mat.TokenCount\n mat['Article_Percent'] = mat.Article / mat.TokenCount\n mat['Preps_Percent'] = mat.Preps / mat.TokenCount\n mat['Number_Percent'] = mat.Number / mat.TokenCount\n mat['Affect_Percent'] = mat.Affect / mat.TokenCount\n mat['Posemo_Percent'] = mat.Posemo / mat.TokenCount\n mat['Posfeel_Percent'] = mat.Posfeel / mat.TokenCount\n mat['Optim_Percent'] = mat.Optim / mat.TokenCount\n mat['Negemo_Percent'] = mat.Negemo / mat.TokenCount\n mat['Anx_Percent'] = mat.Anx / mat.TokenCount\n mat['Anger_Percent'] = mat.Anger / mat.TokenCount\n mat['Sad_Percent'] = mat.Sad / mat.TokenCount\n mat['Cogmech_Percent'] = mat.Cogmech / mat.TokenCount\n mat['Cause_Percent'] = mat.Cause / mat.TokenCount\n mat['Insight_Percent'] = mat.Insight / mat.TokenCount\n mat['Discrep_Percent'] = mat.Discrep / mat.TokenCount\n mat['Inhib_Percent'] = mat.Inhib / mat.TokenCount\n mat['Tentat_Percent'] = mat.Tentat / mat.TokenCount\n mat['Certain_Percent'] = mat.Certain / mat.TokenCount\n mat['Senses_Percent'] = mat.Senses / mat.TokenCount\n mat['See_Percent'] = mat.See / mat.TokenCount\n mat['Hear_Percent'] = mat.Hear / mat.TokenCount\n mat['Feel_Percent'] = mat.Feel / mat.TokenCount\n mat['Social_Percent'] = mat.Social / mat.TokenCount\n mat['Comm_Percent'] = mat.Comm / mat.TokenCount\n mat['Othref_Percent'] = mat.Othref / mat.TokenCount\n mat['Friends_Percent'] = mat.Friends / mat.TokenCount\n mat['Family_Percent'] = mat.Family / mat.TokenCount\n mat['Humans_Percent'] = mat.Humans / mat.TokenCount\n mat['Time_Percent'] = mat.Time / mat.TokenCount\n mat['Past_Percent'] = mat.Past / mat.TokenCount\n mat['Present_Percent'] = mat.Present / mat.TokenCount\n mat['Future_Percent'] = mat.Future / mat.TokenCount\n mat['Space_Percent'] = mat.Space / mat.TokenCount\n mat['Up_Percent'] = mat.Up / mat.TokenCount\n mat['Down_Percent'] = mat.Down / mat.TokenCount\n mat['Incl_Percent'] = mat.Incl / mat.TokenCount\n mat['Excl_Percent'] = mat.Excl / mat.TokenCount\n mat['Motion_Percent'] = mat.Motion / mat.TokenCount\n mat['Occup_Percent'] = mat.Occup / mat.TokenCount\n mat['School_Percent'] = mat.School / mat.TokenCount\n mat['Job_Percent'] = mat.Job / mat.TokenCount\n mat['Achieve_Percent'] = mat.Achieve / mat.TokenCount\n mat['Leisure_Percent'] = mat.Leisure / mat.TokenCount\n mat['Home_Percent'] = mat.Home / mat.TokenCount\n mat['Sports_Percent'] = mat.Sports / mat.TokenCount\n mat['TV_Percent'] = mat.TV / mat.TokenCount\n mat['Music_Percent'] = mat.Music / mat.TokenCount\n mat['Money_Percent'] = mat.Money / mat.TokenCount\n mat['Metaph_Percent'] = mat.Metaph / mat.TokenCount\n mat['Relig_Percent'] = mat.Relig / mat.TokenCount\n mat['Death_Percent'] = mat.Death / mat.TokenCount\n mat['Physcal_Percent'] = mat.Physcal / mat.TokenCount\n mat['Body_Percent'] = mat.Body / mat.TokenCount\n mat['Sexual_Percent'] = mat.Sexual / mat.TokenCount\n mat['Eating_Percent'] = mat.Eating / mat.TokenCount\n mat['Sleep_Percent'] = mat.Sleep / mat.TokenCount\n mat['Groom_Percent'] = mat.Groom / mat.TokenCount\n mat['Swear_Percent'] = mat.Swear / mat.TokenCount\n mat['Nonfl_Percent'] = mat.Nonfl / mat.TokenCount\n mat['Fillers_Percent'] = mat.Fillers / mat.TokenCount\n mat['Swiss_Percent'] = mat.Swiss / mat.TokenCount\n mat['Ideo_Percent'] = mat.Ideo / mat.TokenCount\n mat['Personalpronomina_Percent'] = mat.Personalpronomina / mat.TokenCount\n mat['Indefinitpronomina_Percent'] = mat.Indefinitpronomina / mat.TokenCount\n mat['AuxiliaryVerbs_Percent'] = mat.AuxiliaryVerbs / mat.TokenCount\n mat['Konjunktionen_Percent'] = mat.Konjunktionen / mat.TokenCount\n mat['Adverbien_Percent'] = mat.Adverbien / mat.TokenCount\n mat['Detect_Percent'] = mat.LIWC_Counter / mat.TokenCount\n mat['Bedrohung_Percent'] = mat.Bedrohung / mat.TokenCount\n return(mat)\n\n elif output == 'array':\n out_dict = {}\n out_dict['Affect'] = mat[:,:,11]\n out_dict['Posemo'] = mat[:,:,12]\n out_dict['Posfeel'] = mat[:,:,13]\n out_dict['Optim'] = mat[:,:,14]\n out_dict['Negemo'] = mat[:,:,15]\n out_dict['Anx'] = mat[:,:,16]\n out_dict['Anger'] = mat[:,:,17]\n out_dict['Sad'] = mat[:,:,18]\n out_dict['Function'] = mat[:,:,0]\n out_dict['CogProc'] = mat[:,:,32]\n out_dict['DetectCount'] = mat[:,:,-2]\n out_dict['TokenCount'] = mat[:,:,-1]\n\n out_dict['DetectPosPercent'] = np.divide(out_dict['Posemo'], out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['Posemo'], out_dict['TokenCount'])\n out_dict['DetectPosfeelPercent'] = np.divide(out_dict['Posfeel'], out_dict['DetectCount'])\n out_dict['OverallPosfeelPercent'] = np.divide(out_dict['Posfeel'], out_dict['TokenCount'])\n out_dict['DetectOptimPercent'] = np.divide(out_dict['Optim'], out_dict['DetectCount'])\n out_dict['OverallOptimPercent'] = np.divide(out_dict['Optim'], out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['Negemo'], out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['Negemo'], out_dict['TokenCount'])\n out_dict['EmoPosPercent'] = np.divide(out_dict['Posemo'],np.add(out_dict['Posemo'],out_dict['Negemo']))\n out_dict['DetectAnxPercent'] = np.divide(out_dict['Anx'], out_dict['DetectCount'])\n out_dict['OverallAnxPercent'] = np.divide(out_dict['Anx'], out_dict['TokenCount'])\n out_dict['DetectAngerPercent'] = np.divide(out_dict['Anger'], out_dict['DetectCount'])\n out_dict['OverallAngerPercent'] = np.divide(out_dict['Anger'], out_dict['TokenCount'])\n out_dict['DetectSadPercent'] = np.divide(out_dict['Sad'], out_dict['DetectCount'])\n out_dict['OverallSadPercent'] = np.divide(out_dict['Sad'], out_dict['TokenCount'])\n\n out_dict['DetectAffectPercent'] = np.divide(out_dict['Affect'], out_dict['DetectCount'])\n out_dict['OverallAffectPercent'] = np.divide(out_dict['Affect'], out_dict['TokenCount'])\n out_dict['DetectFunctionPercent'] = np.divide(out_dict['Function'], out_dict['DetectCount'])\n out_dict['OverallFunctionPercent'] = np.divide(out_dict['Function'], out_dict['TokenCount'])\n out_dict['DetectCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['DetectCount'])\n out_dict['OverallCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['TokenCount'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\") \n else:\n print(\"Error: Method not found!\") \n elif language == 'chinese':\n if method == 'dimensional':\n if output == 'data_frame':\n print(\"Error: This combination doesn't exist yet!\")\n elif output == 'array':\n print(\"Error: This combination doesn't exist yet!\")\n else:\n print(\"Error: Output Format not found!\")\n elif method == 'discrete':\n if output == 'data_frame':\n print(\"Error: This combination doesn't exist yet!\")\n elif output == 'array':\n out_dict = {}\n out_dict['Affect'] = mat[:,:,30]\n out_dict['Posemo'] = mat[:,:,31]\n out_dict['Negemo'] = mat[:,:,32]\n out_dict['Anx'] = mat[:,:,33]\n out_dict['Anger'] = mat[:,:,34]\n out_dict['Sad'] = mat[:,:,35]\n out_dict['Function'] = mat[:,:,0]\n out_dict['CogProc'] = mat[:,:,41]\n out_dict['DetectCount'] = mat[:,:,-2]\n out_dict['TokenCount'] = mat[:,:,-1]\n\n out_dict['DetectPosPercent'] = np.divide(out_dict['Posemo'], out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['Posemo'], out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['Negemo'], out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['Negemo'], out_dict['TokenCount'])\n out_dict['EmoPosPercent'] = np.divide(out_dict['Posemo'],np.add(out_dict['Posemo'],out_dict['Negemo']))\n out_dict['DetectAnxPercent'] = np.divide(out_dict['Anx'], out_dict['DetectCount'])\n out_dict['OverallAnxPercent'] = np.divide(out_dict['Anx'], out_dict['TokenCount'])\n out_dict['DetectAngerPercent'] = np.divide(out_dict['Anger'], out_dict['DetectCount'])\n out_dict['OverallAngerPercent'] = np.divide(out_dict['Anger'], out_dict['TokenCount'])\n out_dict['DetectSadPercent'] = np.divide(out_dict['Sad'], out_dict['DetectCount'])\n out_dict['OverallSadPercent'] = np.divide(out_dict['Sad'], out_dict['TokenCount'])\n out_dict['DetectAffectPercent'] = np.divide(out_dict['Affect'], out_dict['DetectCount'])\n out_dict['OverallAffectPercent'] = np.divide(out_dict['Affect'], out_dict['TokenCount'])\n out_dict['DetectPercent'] = np.divide(out_dict['DetectCount'], out_dict['TokenCount'])\n\n out_dict['DetectFunctionPercent'] = np.divide(out_dict['Function'], out_dict['DetectCount'])\n out_dict['OverallFunctionPercent'] = np.divide(out_dict['Function'], out_dict['TokenCount'])\n out_dict['DetectCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['DetectCount'])\n out_dict['OverallCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['TokenCount'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\") \n else:\n print(\"Error: Method not found!\") \n else:\n print(\"Error: Language not found!\")", "def test_distr_evaluate(normal, metric, multivariate):\n y_pred = normal.create_test_instance()\n y_true = y_pred.sample()\n\n m = metric(multivariate=multivariate)\n\n if not multivariate:\n expected_cols = y_true.columns\n else:\n expected_cols = [\"score\"]\n\n res = m.evaluate_by_index(y_true, y_pred)\n assert isinstance(res, pd.DataFrame)\n assert (res.columns == expected_cols).all()\n assert res.shape == (y_true.shape[0], len(expected_cols))\n\n res = m.evaluate(y_true, y_pred)\n assert isinstance(res, pd.DataFrame)\n assert (res.columns == expected_cols).all()\n assert res.shape == (1, len(expected_cols))", "def test_run_multi_r__(self):\n\n # Test Description\n # ================\n #\n # 1. This test intialises an example *eopy.matchup.matchupIO.MatchUp* object\n #\n # 2. Compare transformed dataset to expected value\n\n ################################################################################################################\n # 1. Initialise Test Data Object\n ################################################################################################################\n\n MatchUpTest = return_MatchUpTest_r__()\n\n ################################################################################################################\n # 2. Define expected values\n ################################################################################################################\n\n # Original dataset values (should be unchanged)\n MatchUpOriginal_expected = return_MatchUpTest_r__()\n\n # Transformed dataset\n values_expected = array([294.0625, 480.3733333, 300.6, 227.3846154, 210.1533333,\n 22.74193548, 22.0625, 21.96875, 22.80645161, 23.5,\n 21.66666667, 21.05882353, 23, 22.40625,\n 38.33333333, 36.63636364, 36.5, 38.42857143,\n 30.1, 32.14893617, 29.37254902, 28.88461538, 28.56603774,\n 33.45238095, 32.81395349, 31.77272727, 32.60465116,\n 40.125, 43.54054054, 38.59090909, 34.08510638,\n 13.72727273, 12, 14.1, 11.79069767, 17.53846154,\n 12.69565217, 31.16666667, 12.26086957, 11.52272727,\n 8.8125, 12, 7.4, 10.13207547])\n unc_expected = [Uncertainty(1, array([1.6, 1.5, 1.5, 1.3, 1.5])),\n Uncertainty(1, array([3.1, 3.2, 3.2, 3.1, 3.0])),\n Uncertainty(1, array([3.3, 3.4, 3.1, 3.2])),\n Uncertainty(1, array([2.1, 2.2, 2.2, 2.1])),\n Uncertainty(1, array([5.0, 4.7, 5.1, 5.2, 5.3])),\n Uncertainty(1, array([4.2, 4.3, 4.4, 4.3])),\n Uncertainty(1, array([4.0, 3.7, 4.4, 4.7])),\n Uncertainty(1, array([2.2, 1.7, 2.0, 4.3, 2.6])),\n Uncertainty(1, array([2.3, 1.2, 2.3, 4.4])),\n Uncertainty(1, array([3.2, 2.7, 3.0, 5.3]))]\n ks_expected = array([4.8, 6.8, 5.2, 5.6, 5.2, 12.10287443, 13.99394856, 12.48108926, 12.85930408])\n unck_expected = [Uncertainty(1, array([0.25, 0.25, 0.25, 0.25, 0.25])),\n Uncertainty(1, array([0.2644, 0.2644, 0.2644, 0.2644]))]\n idx_expected = {\"Nm\": [5, 4],\n \"cNm\": [0, 5, 9],\n \"Im\": [[0, 1], [1, 2]],\n \"sensors\": [-1, 1, 2],\n \"sensor_ms\": [1, 3, 3],\n \"n_sensor\": [0, 1, 1, 2, 1, 1, 2, 1, 1, 2],\n \"n_mu\": [1, 1, 2, 2, 1, 2, 2, 1, 2, 2],\n \"n_cov\": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],\n \"N_var\": [5, 5, 4, 4, 5, 4, 4, 5, 4, 4],\n \"idx\": [0, 5, 10, 14, 18, 23, 27, 31, 36, 40, 44],\n \"Ia\": [1, 1, 1, 2, 2, 2]}\n a_expected = array([1., 1.3, 0.002, 0.5, 1.1, 0.0005])\n w_matrices_expected = []\n u_matrices_expected = []\n\n ################################################################################################################\n # 3. Run Transform2NormInd.run()\n ################################################################################################################\n\n Transform2NormIndOp = Transform2NormInd()\n MatchUpTransform = Transform2NormIndOp.run(MatchUpTest)\n\n values_test = MatchUpTransform.values\n unc_test = MatchUpTransform.unc\n w_matrices_test = MatchUpTransform.w_matrices\n u_matrices_test = MatchUpTransform.u_matrices\n ks_test = MatchUpTransform.ks\n unck_test = MatchUpTransform.unck\n idx_test = MatchUpTransform.idx\n\n ################################################################################################################\n # 4. Compare retrieve values to expect values\n ################################################################################################################\n\n # Test transformed data object attribute by attribute\n\n # a. values\n for i, (value_expected, value_test) in enumerate(zip(values_expected, values_test)):\n self.assertAlmostEqual(value_expected, value_test, places=5, msg=str(i))\n\n # b. unc\n for block_unc_test, block_unc_expected in zip(unc_test, unc_expected):\n self.assertEqual(block_unc_expected.typeID, block_unc_test.typeID)\n self.assertEqual(block_unc_expected.uR.tolist(), block_unc_test.uR.tolist())\n\n # c. w_matrices\n self.assertEqual(w_matrices_test, w_matrices_expected)\n\n # d. u_matrices\n self.assertEqual(u_matrices_test, u_matrices_expected)\n\n # e. ks\n for k_expected, k_test in zip(ks_expected, ks_test):\n self.assertAlmostEqual(k_test.tolist(), k_expected.tolist(), places=5)\n\n # f. unck\n for block_unck_test, block_unck_expected in zip(unck_test, unck_expected):\n self.assertEqual(block_unck_expected.typeID, block_unck_test.typeID)\n self.assertEqual(block_unck_expected.uR.tolist(), block_unck_test.uR.tolist())\n\n # h. idx\n self.assertEqual(set(idx_expected.keys()), set(idx_test.keys()))\n for key in idx_expected.keys():\n idx_i_test = idx_test[key]\n idx_i_expected = idx_expected[key]\n if isinstance(idx_i_expected, ndarray):\n self.assertEqual(idx_i_test.tolist(), idx_i_expected.tolist())\n else:\n self.assertEqual(idx_i_test, idx_i_expected)\n\n # Test original data object preserved attribute by attribute\n\n # a. values\n for i, (value_original_expected, value_original_test) in enumerate(zip(MatchUpOriginal_expected.values, MatchUpTest.values)):\n self.assertAlmostEqual(value_original_expected, value_original_test, places=5)\n\n # b. unc\n for block_unc_original_expected, block_unc_original_test in zip(MatchUpOriginal_expected.unc, MatchUpTest.unc):\n self.assertEqual(block_unc_original_expected.typeID, block_unc_original_test.typeID)\n self.assertEqual(block_unc_original_expected.uR.tolist(), block_unc_original_test.uR.tolist())\n\n # c. w_matrices\n self.assertEqual(MatchUpOriginal_expected.w_matrices, MatchUpTest.w_matrices)\n\n # d. u_matrices\n self.assertEqual(MatchUpOriginal_expected.u_matrices, MatchUpTest.u_matrices)\n\n # e. ks\n for k_original_expected, k_original_test in zip(MatchUpOriginal_expected.ks, MatchUpTest.ks):\n self.assertAlmostEqual(k_original_test.tolist(), k_original_expected.tolist(), places=5)\n\n # f. unck\n for block_unck_original_expected, block_unck_original_test in zip(MatchUpOriginal_expected.unck, MatchUpTest.unck):\n self.assertEqual(block_unck_original_expected.typeID, block_unck_original_test.typeID)\n self.assertEqual(block_unck_original_expected.uR.tolist(), block_unck_original_test.uR.tolist())\n\n # h. idx\n self.assertEqual(set(MatchUpOriginal_expected.idx), set(MatchUpTest.idx))\n for key in MatchUpOriginal_expected.idx.keys():\n idx_i_original_test = MatchUpTest.idx[key]\n idx_i_original_expected = MatchUpOriginal_expected.idx[key]\n if isinstance(idx_i_original_expected, ndarray):\n self.assertEqual(idx_i_original_test.tolist(), idx_i_original_expected.tolist())\n else:\n self.assertEqual(idx_i_original_test, idx_i_original_expected)", "def is_perfect_square():", "def test_get_routing_matrix_example_2():\n assert np.allclose(\n get_routing_matrix(\n lambda_2=10,\n lambda_1_1=0,\n lambda_1_2=5,\n mu_1=1,\n mu_2=1,\n num_of_servers_1=4,\n num_of_servers_2=4,\n system_capacity_1=3,\n system_capacity_2=3,\n buffer_capacity_1=2,\n buffer_capacity_2=4,\n alpha=0.5,\n ),\n np.array(\n [\n [1.0, 0.95206422, 0.16897752],\n [1.0, 0.98501658, 0.51821881],\n [1.0, 1.0, 0.66397863],\n ]\n ),\n )", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def test_measure(self):\n\n result = qubit.measure(polarization)\n self.assertEqual(0, result)", "def test_basic(self):\n result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)\n self.assertIsInstance(result, Cube)\n self.assertEqual(result.name(), \"air_temperature\")", "def test_basic(self):\n result = NonLinearWeights(0.85).nonlinear_weights(3)\n self.assertIsInstance(result, np.ndarray)", "def test_marching_points(self):\n try:\n from skimage import measure # NOQA\n except ImportError:\n g.log.warning('no skimage, skipping marching cubes test')\n return\n\n # get some points on the surface of an icosahedron\n points = g.trimesh.creation.icosahedron().sample(1000)\n # make the pitch proportional to scale\n pitch = points.ptp(axis=0).min() / 10\n # run marching cubes\n mesh = g.trimesh.voxel.ops.points_to_marching_cubes(\n points=points, pitch=pitch)\n\n # mesh should have faces\n assert len(mesh.faces) > 0\n # mesh should be roughly centered\n assert (mesh.bounds[0] < -.5).all()\n assert (mesh.bounds[1] > .5).all()", "def _runTest(self, wtmode, dowtsp, testspw, interpolation=\"\", spwmap=[],\n atol=1.e-5, rtol=1.e-5):\n had_wtsp = self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\")\n had_sigsp = self._column_exists(self.inputms, \"SIGMA_SPECTRUM\")\n initweights(vis=self.inputms,wtmode=wtmode,\n tsystable=self.tsystable,\n interp=interpolation,spwmap=spwmap, dowtsp=dowtsp)\n # Test existence of MS and columns\n if self.verbose: print(\"Test if MS exists.\")\n self._check_file(self.inputms)\n # WEIGHT_SPECTRUM should exist when dowtsp=True or it pre-exists in MS\n if (dowtsp or had_wtsp) and not wtmode == \"delwtsp\":\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM does not exist even though dowtsp=True\")\n else:\n if self.verbose: print(\"Verify WEIGHT_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"WEIGHT_SPECTRUM\"),\n \"WEIGHT_SPECTRUM exists when it shouldn't\")\n # test if SIGMA_SPECTRUM column exists\n # The column should exist if\n # (a) dowtsp = True AND wtmode='tsys' or 'tinttsys', OR\n # (b) SIGMA_SPECTRUM pre-exists and wtmode='delwtsp'\n # otherwise, the column will be removed from MS if exists\n sigsp_should_exists = (dowtsp and wtmode.find('tsys') > -1) or \\\n (had_sigsp and wtmode=='delwtsp')\n if sigsp_should_exists:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM exists in MS after operation\")\n self.assertTrue(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM does not exist\")\n else:\n if self.verbose: print(\"Verify SIGMA_SPECTRUM does NOT exist in MS after operation\")\n self.assertFalse(self._column_exists(self.inputms, \"SIGMA_SPECTRUM\"),\n \"SIGMA_SPECTRUM exists when it shouldn't\")\n # more tests\n \n # if running on MMS, the following checks do not work because of\n # the different sorting order between MS and MMS\n if not self.testmms:\n self._test_results(wtmode, dowtsp, testspw, interpolation, atol, rtol)" ]
[ "0.6212602", "0.60263664", "0.6011479", "0.59304386", "0.5858633", "0.5743929", "0.5703311", "0.5686523", "0.5677409", "0.56035334", "0.55897456", "0.5561343", "0.5554519", "0.55176467", "0.5503147", "0.5501849", "0.5489476", "0.5488244", "0.5445561", "0.5432928", "0.54146785", "0.5406405", "0.5403345", "0.5401758", "0.539838", "0.53982306", "0.53784627", "0.53756195", "0.53690004", "0.53382355", "0.5301992", "0.52946293", "0.5291788", "0.5271924", "0.52622586", "0.525791", "0.52452964", "0.5241411", "0.52389026", "0.52223283", "0.5213437", "0.52127296", "0.5211367", "0.5207752", "0.5199324", "0.51949936", "0.51944846", "0.5181894", "0.51796514", "0.5175316", "0.51697314", "0.5163354", "0.51501703", "0.51483214", "0.51456594", "0.51407033", "0.5140321", "0.51400155", "0.51373714", "0.5133119", "0.5129247", "0.51234066", "0.5121301", "0.5120379", "0.5118815", "0.5110313", "0.51066476", "0.5099693", "0.5097507", "0.5096341", "0.509605", "0.5092448", "0.50900227", "0.50834185", "0.50819415", "0.50811124", "0.5081033", "0.50808716", "0.5079568", "0.5078618", "0.5073369", "0.50683373", "0.5063656", "0.50559163", "0.505025", "0.50496817", "0.5047036", "0.50374764", "0.50356036", "0.5034216", "0.5033486", "0.5031482", "0.50264364", "0.5014547", "0.5013476", "0.50134665", "0.50130874", "0.50104374", "0.50088775", "0.5008644" ]
0.743953
0
Check that energy of a path of surfaces is positive at each timestep.
def test_path_energy_per_time_is_positive( self, space, a0, a1, b1, c1, d1, a2, path, atol ): n_times = len(path) space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2) energy = space.metric.path_energy_per_time(path) self.assertAllEqual(energy.shape, (n_times - 1, 1)) result = gs.all(energy > -1 * atol) self.assertTrue(result) expected_shape = (2, n_times - 1, 1) path = gs.array([path, path]) energy = space.metric.path_energy_per_time(path) self.assertAllEqual(energy.shape, expected_shape) result = gs.all(energy > -1 * atol) self.assertTrue(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_path_energy_is_positive(self, space, a0, a1, b1, c1, d1, a2, path, atol):\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy(path)\n self.assertAllEqual(energy.shape, ())\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n\n path = gs.array([path, path])\n energy = space.metric.path_energy(path)\n self.assertAllEqual(energy.shape, (2,))\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)", "def is_positive(self, example_path):\n candidate_planets = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']\n return example_path in candidate_planets['lightcurve_path'].values", "def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False", "def assert_no_error(self): \r\n Nx = self['Nx']\r\n Nt = self.m.Nt\r\n L, T = self.problem['L T'.split()]\r\n L = L/2 # only half the domain used (symmetry)\r\n x = np.linspace(0, L, Nx+1) # Mesh points in space \r\n t = np.linspace(0, T, Nt+1) # Mesh points in time\r\n \r\n for n in range(len(t)):\r\n u_e = self.problem.u_exact(x, t[n])\r\n diff = np.abs(self.f.u[n,:] - u_e).max()\r\n print 'diff:', diff\r\n tol = 1E-13\r\n assert diff < tol", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def test_volume_surface_empty(self):\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s = space(fake_curvature=k) \n for name in ('sphere_s1', 'sphere_v2', 'sphere_s2', 'sphere_v3'):\n self.assertTrue(getattr(s, name)(0) == 0)", "def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)", "def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False", "def verify_shocksine(controller):\n import numpy as np\n import os\n\n test_solution = controller.solution.state.get_q_global()\n\n if test_solution is not None:\n thisdir = os.path.dirname(__file__)\n expected_density = np.loadtxt(os.path.join(thisdir,'shocksine_regression_density.txt'))\n test_density = test_solution[0,:]\n test_err = np.linalg.norm(expected_density-test_density)\n return check_diff(0, test_err, abstol=1.e-4)", "def is_edge_phase(x, x_last):\n _x = x/(2*np.pi)\n _x = round(_x - round(_x), 5)\n _x_last = x_last/(2*np.pi)\n _x_last = round(_x_last - round(_x_last), 5)\n if _x == 0.0 or (_x_last < 0.0 and _x > 0.0):\n return True\n else:\n return False", "def min_energy_storage_rule(_m, g, y, s, t):\r\n\r\n return - m.q[g, y, s, t] <= 0", "def run_one_step(self, dt):\n self.tldiffusion(dt)\n\n # Test code stability for timestep dt\n # Raise unstability error if local slope is reversed by erosion\n # and deposition during a timestep dt\n elev_dif = self.elev - self.elev[self.receiver]\n s = elev_dif[np.where(self.grid.at_node[\"flow__sink_flag\"] == 0)]\n if np.any(s < -1) is True:\n raise ValueError(\n \"The component is unstable\" \" for such a large timestep \" \"on this grid\"\n )\n else:\n pass", "def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1", "def sign_of_path(path):\n vectors = [(a[0] - b[0], a[1] - b[1]) for b, a in pairwise(path)]\n sign_exp = 0\n for idx, vector in enumerate(vectors):\n if vector == (0, 1):\n sign_exp += len([v for v in vectors[idx + 1:] if v == (1, 0)])\n return (-1) ** (sign_exp)", "def test_t0(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 0.0\n solrt = sol(r, t)\n for quant in ['velocity', 'pressure', 'sound_speed', 'density', 'xdet']:\n assert np.all(np.isnan(solrt[quant]))", "def test_steps(self, model):\r\n model.fs.unit.initialize()\r\n\r\n # Add disturbances\r\n for t in model.fs.time:\r\n if 300 <= t < 600:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 - 10)\r\n elif 600 <= t < 900:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n elif 900 <= t < 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 + 10)\r\n elif t >= 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n\r\n # Transient solution\r\n solver.solve(model)\r\n\r\n times = [0, 300, 600, 900, 1200, 1500]\r\n sco2_exp = [305.2, 304.9, 305.1, 306.5, 305.7, 305.2]\r\n air_exp = [370.4, 373.1, 370.3, 365.9, 370.7, 370.4]\r\n wall_exp = [339.4, 338.7, 339.1, 340.7, 339.9, 339.4]\r\n\r\n self.check_temperatures(model, times, sco2_exp, air_exp, wall_exp)", "def validate_edges(attack_surface_graph, admissible_path, starting_points):\n for i in range(len(admissible_path)-1):\n for edge in attack_surface_graph.edges(data=True):\n if edge[0] == admissible_path[i] and edge[1] == admissible_path[i+1]:\n descriptors = edge[2]\n if find_violation(descriptors) == [] and edge[0] not in starting_points:\n return False\n return True", "def test_check_conformer_energy(self):\n v_list = [-272.2779012225, -272.2774933703, -272.2768397635, -272.2778432059, -272.278645477, -272.2789602654,\n -272.2788749196, -272.278496709, -272.2779350675, -272.2777008843, -272.2777167286, -272.2780937643,\n -272.2784838846, -272.2788050464, -272.2787865352, -272.2785091607, -272.2779977452, -272.2777957743,\n -272.2779134906, -272.2781827547, -272.278443339, -272.2788244214, -272.2787748749]\n v_list = np.array(v_list, np.float64)\n v_diff = (v_list[0] - np.min(v_list)) * constants.E_h * constants.Na / 1000\n self.assertAlmostEqual(v_diff / 2.7805169838282797, 1, 5)", "def test_comp_surface_wind(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_wind()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True", "def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def testSplineCurveInverseIsCorrect(self):\n x_knot = jnp.arange(0, 16, 0.01)\n alpha = self.variant(distribution.inv_partition_spline_curve)(x_knot)\n x_recon = self.variant(distribution.partition_spline_curve)(alpha)\n chex.assert_tree_all_close(x_recon, x_knot, atol=1e-5, rtol=1e-5)", "def checkPointInLampsReach(self, p):\n v1 = XYPoint(self.Lime.x - self.Red.x, self.Lime.y - self.Red.y)\n v2 = XYPoint(self.Blue.x - self.Red.x, self.Blue.y - self.Red.y)\n\n q = XYPoint(p.x - self.Red.x, p.y - self.Red.y)\n s = self.crossProduct(q, v2) / self.crossProduct(v1, v2)\n t = self.crossProduct(v1, q) / self.crossProduct(v1, v2)\n\n return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)", "def test_e0_ts(self):\n self.assertAlmostEqual(self.tunneling.E0_TS.value_si * 0.001, self.E0_TS, 4)", "def check_if_in_shadow(psi, a_sat_vector, sun_pos):\n dot_prod = np.dot(a * n_vector.T, unit_sun_r(sun_pos))\n\n check = np.zeros((len(a_sat_vector)))\n if np.cos(psi) < 0 and a_sat_vector < r_earth and dot_prod <= r_earth:\n check = True\n\n return check", "def test_despike(curve):\n assert curve.df.max()[0] - curve.despike(50, z=1).df.max()[0] - 91.83918 < 0.001", "def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)", "def check_these(phi, chi, omega):\n #Calculate the leg position from these angles\n self.getplatepos(phi, chi, omega)\n #Make sure the legs are not in fault\n self.check_limits()\n return (not any(self.leg_fault))", "def check_paths(self):\n for path in self.paths:\n # check that arc starts at s\n arc = path[0]\n arc_start = self.arc_info[arc][\"start\"]\n assert(arc_start == self.source()), \"Path does not start at s\"\n # check that internal arcs are valid\n for (i, arc) in enumerate(path[:-1]):\n next_arc = path[i + 1]\n arc_destin = self.arc_info[arc][\"destin\"]\n next_arc_start = self.arc_info[next_arc][\"start\"]\n assert (arc_destin == next_arc_start), \"Invalid path\"\n arc = path[-1]\n arc_end = self.arc_info[arc][\"destin\"]\n assert(arc_end == self.sink()), \"Path does not end at t\"", "def can_throw(self):\n if self.round_points == 0:\n return False\n return True", "def is_positive_definite(x):\n return np.all(np.linalg.eigvals(x) > 0)", "def s_neg(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == -1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n running_total += -1 - ayxx\n return running_total", "def __eq__(self, line):\n \n return abs( 1 - np.dot(sm.unitvec(self.vec), sm.unitvec(line.vec))) < 10*_eps", "def test_energy():\n # Test something\n \n from nose.tools import assert_equal\n assert_equal(energy([0.0,0.0],1.0), 0)\n assert_equal(energy([4.0,5.0],1.0), 10)", "def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def has_negative(tensor, verbose=True):\n tensor_numpy = tensor.data.cpu().numpy().flatten()\n where_negative = np.argwhere(tensor_numpy < 0)\n\n if verbose:\n for idx in where_negative:\n value = float(tensor_numpy[idx])\n print(f\"Encountered negative value: {value:.5f}\")\n\n negative_count = len(where_negative)\n negative = negative_count != 0\n\n if verbose and negative:\n print(f\"Encountered {negative_count} negative values\")\n\n return negative", "def hasEnergyExpended(self, flags):\r\n return (flags & 0x08) != 0", "def _check_dr_sign(self, alpha=np.pi/2):\n u1, u3, gamma2 = self.emitter.get_rotation_velocities()\n iota, eta = self.iota, self.eta\n\n d1 = -gamma2 * u1 + (1 + gamma2 ** 2 * u1 ** 2 / (1 + gamma2)) * np.cos(iota) * np.sin(eta) + \\\n (gamma2 ** 2 * u1 * u3 / (1 + gamma2)) * np.sin(iota) * np.sin(eta)\n d3 = -gamma2 * u1 + (gamma2 ** 2 * u1 * u3 / (1 + gamma2)) * np.cos(iota) * np.sin(eta) + \\\n (1 + gamma2 ** 2 * u3 ** 2 / (1 + gamma2)) * np.sin(iota) * np.sin(eta)\n c1 = np.cos(alpha) * d1 + np.sin(alpha) * d3\n\n return self.chi * c1", "def test_absolute_volume(self):\n\n assert self.test_shape.volume() == pytest.approx(50 * 60 * math.pi * 2 * 1000)", "def trig_conditions(tr):\n \n\n \n ##############\n #TRACE RELATED\n ##############\n avg_tr = np.average(tr)\n \n shifted_tr = tr - avg_tr \n \n abs_tr = abs(shifted_tr)\n \n sort_tr = np.sort(abs_tr)\n \n tr_99 = sort_tr[int(0.999*len(sort_tr))]\n\n #95% noise to signal area ratio calculation\n area_big = tr_99 * int(0.95*len(sort_tr))\n \n area_small = np.sum(sort_tr[:int(0.95*len(sort_tr))])\n \n \n area_condition = area_small / area_big; print area_condition\n \n # if signal is quite noisy, use recursive STALTA, else use classic STALTA\n \n df = tr.stats.sampling_rate\n \n \n if area_condition <= 0.1:\n \n print(\"CLEAN SIGNAL!\\n\")\n\n ctf = abs(1-carlSTATrig(tr.data, int(5 * df), int(10 * df), 0.0001, 0.0001))\n \n sort_ctf = np.sort(ctf)\n \n ctf_low = sort_ctf[int(0.80*len(sort_ctf))]\n \n ctf_99 = sort_ctf[int(0.999*len(sort_ctf))]\n \n max_trig = ctf_99\n \n trig_on = ctf_low + 0.4 * (max_trig - ctf_low)\n\n trig_off = 0.8 * trig_on\n \n if not max_trig > 1.25 * ctf_low:\n trig_on = 0; trig_off = 0;\n \n \n \n elif 0.1 < area_condition < 0.16:\n \n print(\"NOISY SIGNAL!\\n\")\n\n ctf = abs(1-recSTALTA(tr.data, int(5 * df), int(10 * df)))\n \n sort_ctf = np.sort(ctf)\n \n ctf_low = sort_ctf[int(0.90*len(sort_ctf))]\n \n ctf_99 = sort_ctf[int(0.999*len(sort_ctf))]\n \n max_trig = ctf_99\n \n trig_on = ctf_low + 0.4 * (max_trig - ctf_low)\n\n trig_off = 0.8 * trig_on\n \n if not max_trig > 1.25 * ctf_low:\n trig_on = 0; trig_off = 0;\n \n else:\n print(\"TRACE TOO NOISY TO DETECT SIGNAL!\")\n trig_on = 0; trig_off = 0;\n ctf = abs(1-recSTALTA(tr.data, int(5 * df), int(10 * df)))\n\n \n\n\n\n \n\n \n \n ############\n #ctf RELATED\n ############\n\n \n #set conditional for noise. This is MINIMUM threshold.\n #ctf_avg = np.average(ctf)\n #ctf_mode = scipy.stats.mode(ctf)\n #ctf_std = np.std(ctf)\n \n\n \n \n #plt.figure()\n #plt.plot(sort_ctf)\n #plt.show()\n\n \n\n \n \n \n return trig_on, trig_off, ctf", "def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)", "def test_accurate(self):\n M = simulation.EventMonitor(self.G)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n times = self.G.pattern.nonzero()[1]*self.dt\n self.assertTrue(np.allclose(sorted(times), M.t))\n for (i, t) in zip(M.i, M.t):\n self.assertTrue(self.G.pattern[i, int_r(t/self.dt)])", "def test_spike_negative_vals(self):\n thresholds = (25, 50)\n\n arr = [-10, -12, -999.99, -13, -15, -40, -9, -9]\n\n # First and last elements should always be good data, unless someone\n # has set a threshold to zero.\n expected = [1, 4, 4, 4, 1, 3, 1, 1]\n\n inputs = [\n arr,\n np.asarray(arr, dtype=np.floating),\n dask_arr(np.asarray(arr, dtype=np.floating))\n ]\n for i in inputs:\n npt.assert_array_equal(\n qartod.spike_test(\n inp=i,\n suspect_threshold=self.suspect_threshold,\n fail_threshold=self.fail_threshold\n ),\n expected\n )", "def positive_slope(line:tuple)->bool:\n return line[0][1] < line[1][1] == line[0][0] < line[1][0]", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def test_triangle_positive_is_equilateral_property(self):\n a = Point(-9, 10)\n b = Point(-1, 4)\n c = Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7)\n t = Triangle(a, b, c)\n self.assertTrue(t.is_equilateral,\n \"Test of Triangle(Point(-9, 10), Point(-1, 4), Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7))\\\n failed, returned value != True.\")\n a = Point(-9, 21)\n b = Point(-1, 4)\n c = Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7)\n t = Triangle(a, b, c)\n self.assertFalse(t.is_equilateral,\n \"Test of Triangle(Point(-9, 21), Point(-1, 4), Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7))\\\n failed, returned value != False.\")", "def is_zero(self):\n return -0.0001 <= self.l2_norm() <= 0.0001", "def test_to_delta_time_positive_difference(with_tf_random_seed, np_time_points):\n time_points = tf.constant(np_time_points, dtype=default_float())\n\n with pytest.raises(InvalidArgumentError) as exp:\n to_delta_time(time_points)\n\n assert exp.value.message.find(\"Condition x >= y\") >= 0", "def testgradsorientation(self):\r\n # since z-coordinates of atomcoords are all 0 for dvb, z-values of grads should be all 0\r\n assert numpy.alltrue(numpy.abs(self.data.grads[:,:,2]) < 1e-14)", "def calc(path):\n if len(df_times)==0:\n return 0\n i = np.where(timegrid == t)[0][0]\n x_t = path[i]\n discount = np.vectorize(lambda T: model.zerobond(T, t, x_t))\n dfs = discount(df_times)\n # Calculate fixed leg npv\n fix_leg_npv = np.sum(fixed_amounts * dfs[fix_idx])\n # Estimate the index fixings\n index_fixings = (dfs[accrual_start_idx] / dfs[accrual_end_idx] - 1) \n index_fixings /= float_dcf\n # Calculate the floating leg npv\n float_leg_npv = np.sum(nominals * index_fixings * float_dcf * dfs[float_idx])\n # Calculate the already fixed accrual period of the floating leg\n t_f = accrual_start_time_ffp\n i = np.where(timegrid == t_f)[0][0]\n x_f = path[i]\n df_e = model.zerobond(accrual_end_time_ffp, t_f, x_f)\n npv_accrualperiod = (1. / df_e - 1) * nominals_ffp * model.zerobond(paytime_ffp, t, x_t)\n # Calculate swap npv\n npv = float_leg_npv + npv_accrualperiod - fix_leg_npv\n return npv", "def test_SemiF47_level_0_7(self):\n self.assertEqual(viol_check(self.vol,7), [[90, 130], [174, 235]])", "def test_sign_test(self):\r\n v = [(\"two sided\", 26, 50, 0.88772482734078251),\r\n (\"less\", 26, 50, 0.6641),\r\n (\"l\", 10, 50, 1.193066583837777e-05),\r\n (\"hi\", 30, 50, 0.1013193755322703),\r\n (\"h\", 0, 50, 1.0),\r\n (\"2\", 30, 50, 0.20263875106454063),\r\n (\"h\", 49, 50, 4.5297099404706387e-14),\r\n (\"h\", 50, 50, 8.8817841970012543e-16)\r\n ]\r\n for alt, success, trials, p in v:\r\n result = sign_test(success, trials, alt=alt)\r\n self.assertFloatEqual(result, p, eps=1e-5)", "def is_zero(self):\n for t in self:\n if t != TRIT_ZERO:\n return False\n return True", "def checkFuel(self):\n return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving", "def _is_eruption_in(self, days, from_time):\n for te in self.tes:\n if 0 < (te-from_time).total_seconds()/(3600*24) < days:\n return 1.\n return 0.", "def test_fluxes(self):\n\n t, x = self.t, self.x_edge\n np.testing.assert_array_almost_equal(self.N_e_hat(t, x[0]), 0, decimal=3)\n np.testing.assert_array_almost_equal(self.N_e_hat(t, x[-1]), 0, decimal=3)", "def is_path_valid(self,path):\n null_state=[0 for i in range(len(self.node_names))]\n null_state_matrix=np.matrix(null_state).T\n new_state=np.matrix(self.state).T\n for index,edge in enumerate(path):\n #print index\n #print edge\n edge_position=self.edges.index(edge)\n move_matrix=self.edge_matrices[edge_position]\n #print move_matrix\n new_state=move_matrix*new_state\n if new_state.any()==null_state_matrix.any():\n #print new_state\n #print null_state_matrix\n return False\n return True", "def testscfenergy(self):\r\n scf = self.data.scfenergies[-1]\r\n ref = self.b3lyp_energy\r\n tol = self.b3lyp_tolerance\r\n msg = f\"Final SCF energy: {scf:f} not {int(ref)} +- {int(tol)}eV\"\r\n assert abs(scf-ref) < 40, msg", "def penumbral_eclipse(sat, earth, sun, time):\n\n theta, theta_e, theta_s = eclipse_parameters(sat, earth, sun, time)\n return np.logical_and(np.abs(theta_e - theta_s) < theta,\n theta < (theta_e + theta_s))", "def test_low_voltage_passing_signal(self):\n data = gen_random_data(-0.5, 0.5, self.channels)\n self.assertFalse(self.lowvoltage_rule.is_broken(data))", "def _lowess_tricube(t):\n #t = (1-np.abs(t)**3)**3\n t[:] = np.absolute(t) #, out=t) #numpy version?\n _lowess_mycube(t)\n t[:] = np.negative(t) #, out = t)\n t += 1\n _lowess_mycube(t)", "def test(self, grid, flag):\n x = self.x+SPEED_X[flag]\n y = self.y+SPEED_Y[flag]\n return 0 <= x < self.n and 0 <= y < self.n and grid[y][x] == 1", "def epidemic_finish(states, iteration):\n return np.sum(states) == 0 and iteration > 10", "def check_energies(self, zero_degenerate_impropers=True, skip_assert=False):\n if zero_degenerate_impropers is True:\n self.zero_degenerate_impropers(self.torsion_force0)\n xyz = self.simulation0.context.getState(getPositions=True).getPositions()\n self.simulation0.context.reinitialize()\n self.simulation0.context.setPositions(xyz)\n\n self.zero_degenerate_impropers(self.torsion_force1)\n xyz = self.simulation1.context.getState(getPositions=True).getPositions()\n self.simulation1.context.reinitialize()\n self.simulation1.context.setPositions(xyz)\n\n state0 = self.simulation0.context.getState(getEnergy=True)\n energy0 = state0.getPotentialEnergy()\n\n state1 = self.simulation1.context.getState(getEnergy=True)\n energy1 = state1.getPotentialEnergy()\n\n if not skip_assert:\n delta = abs(energy0 - energy1)\n assert delta < ENERGY_EPSILON, \"Error, energy difference (%f kJ/mol) is greater than %f kJ/mol\" % (delta / u.kilojoules_per_mole, ENERGY_EPSILON / u.kilojoules_per_mole)\n\n return energy0, energy1", "def metropolis_hastings_accept(energy_prev, energy_next, s_rng):\r\n ediff = energy_prev - energy_next\r\n return (TT.exp(ediff) - s_rng.uniform(size=energy_prev.shape)) >= 0", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def calculate_edges_zero(self, verbose = False):\n\n ## calculates the first and last wavelength that has non-zero\n # w = np.where(self.throughput > 0)[0]\n # if verbose: print(w)\n # self._upper_edge = self.wavelength[w[-1]]\n # self._lower_edge = self.wavelength[w[0]]\n\n w = np.where(self.throughput > 0)[0]\n if verbose: print(w)\n if w[0] - 1 < 0:\n w_low = 0\n else:\n w_low = w[0] - 1\n\n if w[-1] + 1 == len(self.throughput):\n w_high = w[-1]\n else:\n w_high = w[-1] + 1\n\n self._upper_edge = self.wavelength[w_high]\n self._lower_edge = self.wavelength[w_low]", "def is_close_to_zero(value: Union[float, np.ndarray]) -> Union[bool, np.ndarray]:\n return abs(value) < 1.0e-10", "def test_numprops_different_sign(self):\n # Perform diff.\n df = Differ(key=\"name\", deltas={\"energy\": Delta(\"+-\")})\n d = df.diff(*self.engines)\n # Calculate expected results.\n is_different = lambda a, b: a < 0 < b or b < 0 < a\n changed = sum((int(is_different(e[0], e[1])) for e in self.energies))\n # Check results.\n self.assertEqual(len(d[Differ.CHANGED]), changed)", "def is_solved(self):\n return self.to_grid == self.from_grid", "def path_energy_per_time(self, path):\n n_times, _, _ = path.shape\n surface_diffs = path[1:, :, :] - path[:-1, :, :]\n surface_midpoints = path[: n_times - 1, :, :] + surface_diffs / 2\n energy = []\n for diff, midpoint in zip(surface_diffs, surface_midpoints):\n energy.extend([n_times * self.squared_norm(diff, midpoint)])\n return gs.array(energy)", "def one_step(self):\r\n assert (self.uv_vol is not None)\r\n assert (self.guv_vol is not None)\r\n assert (self.uv_bound is not None)\r\n assert (self.vf_vect_bound is not None)\r\n assert (self.vF_vect_vol is not None)\r\n # Shape checks\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vF_vect_vol.shape == self.vf_vect_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.uv_vol.shape[1])\r\n assert (self.uv_vol.shape == self.guv_vol.shape)\r\n assert (self.uv_vol.shape == self.uv_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.vF_vect_vol.shape[0])\r\n \r\n if self.step == 0:\r\n self.check_k_matrix_stability()\r\n # print(\"Epsilon is :\"+str(self.Epsilon))\r\n # print(\"Beta is :\"+str(self.Beta))\r\n\r\n # Form \"Stiffness\" matrix:\r\n K = self.make_k_matrix()\r\n # Form \"Force\" vector: \r\n f = self.vF_vect_vol + (self.Epsilon / self.Beta) * self.vf_vect_bound\r\n\r\n # print(\"FORCE VECTOR:\")\r\n # print(f)\r\n # print(\"STIFFNESS MATRIX\")\r\n # print(K)\r\n # print(\"UV_VOL\")\r\n # print(self.uv_vol)\r\n # print(\"EPSILON * GUV_VOL\")\r\n # print(self.Epsilon * self.guv_vol)\r\n # print(\"UV_BOUND * COEFF\")\r\n # print((self.Epsilon / self.Beta) * self.uv_bound)\r\n sol = scipy_sparse_linsolve(K, f)\r\n # print(\"SOLUTION\")\r\n # print(sol)\r\n return sol", "def update_status(self):\n if len(self.invalid) != 0:\n return False\n for row in self.grid:\n for num in row:\n if num == 0:\n return False\n self.solved = True\n print(\"solved\")\n return True", "def trajectories(t_upper=3600*24*687, h=100, m1=5.972e+24, m2=6.417e+23,\n m3=1.989e+30, a1=1.0*1.496e+11, a2=1.52*1.496e+11):\n\n # We check if parameters are all positive\n\n list_parameters = [t_upper, h, m1, m2, m3,\n a1, a2]\n\n for parameters in list_parameters:\n\n if parameters < 0:\n print(f'You have entered a negative parameter')\n\n # initial values for planet 1 in x, y and z direction\n x_i1 = a1\n y_i1 = 0\n v_x1i = 0\n v_y1i = 29779.301841746023\n z_i1 = 0\n v_z1i = 0\n\n # initial values for planet 2 in x, y and z direction\n x_i2 = a2\n y_i2 = 0\n v_x2i = 0\n v_y2i = 24154.203325249873\n z_i2 = 0\n v_z2i = 0\n\n # initial values for Sun in x, y and z direction\n x_i3 = 0\n y_i3 = 0\n v_x3i = 0\n v_y3i = 0\n z_i3 = 0\n v_z3i = 0\n\n# Initial positions and velocities\n r = np.array([x_i1, y_i1, v_x1i, v_y1i, x_i2,\n y_i2, v_x2i, v_y2i, x_i3, y_i3, v_x3i, v_y3i,\n z_i1, z_i2, z_i3, v_z1i, v_z2i, v_z3i])\n\n # We create vectors which will contains the trajectories\n # and velocities of each bodies\n x_pnts1 = [x_i1]\n y_pnts1 = [y_i1]\n v_x_pnts1 = [v_x1i]\n v_y_pnts1 = [v_y1i]\n\n x_pnts2 = [x_i2]\n y_pnts2 = [y_i2]\n v_x_pnts2 = [v_x2i]\n v_y_pnts2 = [v_y2i]\n\n x_pnts3 = [x_i3]\n y_pnts3 = [y_i3]\n v_x_pnts3 = [v_x3i]\n v_y_pnts3 = [v_y3i]\n\n x_pnts3 = [x_i3]\n y_pnts3 = [y_i3]\n v_x_pnts3 = [v_x3i]\n v_y_pnts3 = [v_y3i]\n\n z_pnts1 = [z_i1]\n z_pnts2 = [z_i2]\n z_pnts3 = [z_i3]\n\n v_z_pnts1 = [v_z1i]\n v_z_pnts2 = [v_z2i]\n v_z_pnts3 = [v_z3i]\n\n m1 = m1\n m2 = m2\n m3 = m3\n a1 = a1\n a2 = a2\n\n # We create a vector which will contain the time\n # Initial value\n t_i = 0.0\n t_values = [t_i]\n\n for t in range(0, t_upper, h):\n\n # We used the RK4 formula here\n k1 = h*derivative(r=r, t=0, m1=5.972e+24, m2=m2, m3=1.989e+30,\n a1=a1, a2=1.52*1.496e+11)\n k2 = h*derivative(r=r + 0.5*k1, t=t + (h/2), m1=5.972e+24,\n m2=6.417e+23, m3=1.989e+30, a1=1.0*1.496e+11,\n a2=1.52*1.496e+11)\n k3 = h*derivative(r=r + 0.5*k2, t=t + (h/2), m1=5.972e+24,\n m2=6.417e+23, m3=1.989e+30, a1=1.0*1.496e+11,\n a2=1.52*1.496e+11)\n k4 = h*derivative(r=r + h*k3, t=t+h, m1=5.972e+24, m2=6.417e+23,\n m3=1.989e+30, a1=1.0*1.496e+11, a2=1.52*1.496e+11)\n\n # We calculate the new vector r\n r += (k1 + 2*k2 + 2*k3 + k4)*(1.0/6.0)\n\n # We add the new points calculated\n x_pnts1.append(r[0])\n y_pnts1.append(r[1])\n\n v_x_pnts1.append(r[2])\n v_y_pnts1.append(r[3])\n\n x_pnts2.append(r[4])\n y_pnts2.append(r[5])\n v_x_pnts2.append(r[6])\n v_y_pnts2.append(r[7])\n\n x_pnts3.append(r[8])\n y_pnts3.append(r[9])\n v_x_pnts3.append(r[10])\n v_y_pnts3.append(r[11])\n\n z_pnts1.append(r[12])\n z_pnts2.append(r[13])\n z_pnts3.append(r[14])\n\n v_z_pnts1.append(r[15])\n v_z_pnts2.append(r[16])\n v_z_pnts3.append(r[17])\n\n t_values.append(t)\n\n # We return all the trajectories\n return x_pnts1, y_pnts1, x_pnts2, y_pnts2, x_pnts3, y_pnts3, z_pnts1, z_pnts2, z_pnts3", "def steadyYet(newg, oldg, newe, olde, newh, oldh, newf, oldf, tolerance):\n steady_yet = True\n if oldg == 0 or (abs(newg-oldg)/oldg > tolerance or\n abs(newe-olde)/olde > tolerance or\n abs(newh-oldh)/oldh > tolerance or\n abs(newf-oldf)/oldf > tolerance):\n steady_yet = False\n return steady_yet", "def test_el_small_surface_instability():\n levels = np.array([959., 931.3, 925., 899.3, 892., 867.9, 850., 814.,\n 807.9, 790., 779.2, 751.3, 724.3, 700., 655., 647.5,\n 599.4, 554.7, 550., 500.]) * units.mbar\n temperatures = np.array([22.2, 20.2, 19.8, 18.4, 18., 17.4, 17., 15.4, 15.4,\n 15.6, 14.6, 12., 9.4, 7., 2.2, 1.4, -4.2, -9.7,\n -10.3, -14.9]) * units.degC\n dewpoints = np.array([20., 18.5, 18.1, 17.9, 17.8, 15.3, 13.5, 6.4, 2.2,\n -10.4, -10.2, -9.8, -9.4, -9., -15.8, -15.7, -14.8, -14.,\n -13.9, -17.9]) * units.degC\n el_pressure, el_temperature = el(levels, temperatures, dewpoints)\n assert_nan(el_pressure, levels.units)\n assert_nan(el_temperature, temperatures.units)", "def _check_zero(self, h, i, j, u, v, w):\n return self._.p[u, h, i] != 0 and self._.p[v, h, j] != 0 and \\\n self._.p[w, i, j] != 0", "def test_badly_conditioned_spline(tmpdir):\n\n gulp_input = u\"\"\"single\n\ncell\n5.468 5.468 5.468 90.0 90.0 90.0\n\nfrac\nU 0 0 0\nU 1/2 1/2 0\nU 1/2 0 1/2\nU 0 1/2 1/2\n\nO 1/4 1/4 1/4\nO 1/4 3/4 1/4\nO 3/4 3/4 1/4\nO 3/4 1/4 1/4\n\nO 1/4 1/4 3/4\nO 1/4 3/4 3/4\nO 3/4 3/4 3/4\nO 3/4 1/4 3/4\n\n\nspecies\nU 2.4\nO -1.2\n\ninclude potentials.lib\"\"\"\n\n # First calculate the expected energy using GULP's built-in analytical potentials\n with tmpdir.join(\"potentials.lib\").open(\"w\") as potfile:\n potfile.write(\"buck\\n\")\n potfile.write(\"O O 1633.01 0.3270196735 3.94879 10.0\\n\")\n potfile.write(\"U U 294.640906285709 0.327022 0.0 10.0\\n\")\n potfile.write(\"O U 693.650933805978 0.327022 0.0 10.0\\n\")\n potfile.write(\"\\n\")\n potfile.write(\"morse\\n\")\n potfile.write(\"O U 0.577189831995 1.65 2.369 10.0\\n\")\n\n\n gulp_infile = io.StringIO(gulp_input)\n gulp_infile.seek(0)\n\n gulp_outfile = io.StringIO()\n runGULP(gulp_infile, gulp_outfile, cwd = tmpdir.strpath)\n\n gulp_outfile.seek(0)\n expect = extractGULPEnergy(gulp_outfile)\n\n tmpdir.join(\"potentials.lib\").remove()\n assert not tmpdir.join(\"potentials.lib\").exists()\n\n # Now build a potential model and tabulate it - then re-run the calculation and check the energies match.\n aspot = io.StringIO(u\"\"\"\n[Tabulation]\ntarget : GULP\ncutoff : 10.0\nnr : 1000\n\n[Pair]\nO-O = as.buck 1633.010242995040 0.327022 3.948787\nU-U = as.buck 294.640906285709 0.327022 0.0\nO-U = sum(as.buck 693.650933805978 0.327022 0.0, \n\t\t as.morse 1.65 2.369 0.577189831995)\n\"\"\"\n )\n\n aspot.seek(0)\n\n from atsim.potentials.config import Configuration\n tabulation = Configuration().read(aspot)\n\n with tmpdir.join(\"potentials.lib\").open(\"w\") as potfile:\n tabulation.write(potfile)\n\n gulp_infile.seek(0)\n\n gulp_outfile = io.StringIO()\n runGULP(gulp_infile, gulp_outfile, cwd = tmpdir.strpath)\n\n gulp_outfile.seek(0)\n actual = extractGULPEnergy(gulp_outfile)\n assert pytest.approx(expect, rel=1e-4) == actual\n\n tmpdir.join(\"potentials.lib\").remove()\n assert not tmpdir.join(\"potentials.lib\").exists()", "def testfunction(expr,n):\n \n if expr == g_plus: init, expr = R1d.SpinUp, g_plus\n elif expr == g_minus: init, expr = R1d.SpinDown, g_minus\n else: return \"error\"\n \n a = McLaurin(expr, n) \n \n bool_list = []\n for n in range(5):\n for i in range(-n,n+1):\n bool_list.append(Coef(a,n,i) == R1d.a(n,i, init))\n if bool_list[-1] == False:\n print(\"Step: \", n, \" pos: \", i)\n return all(bool_list)", "def test_absolute_shape_volume(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() == pytest.approx(math.pi * (10**2) * 30)" ]
[ "0.728073", "0.589016", "0.58340615", "0.56767166", "0.5647796", "0.56435287", "0.56361413", "0.5609778", "0.5564585", "0.55382043", "0.5497689", "0.5426183", "0.5391233", "0.5343787", "0.5343612", "0.5332643", "0.5260981", "0.5248749", "0.5233796", "0.52206767", "0.5216022", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.520243", "0.51864827", "0.51822317", "0.51787674", "0.5177978", "0.5173952", "0.51577944", "0.5146629", "0.5145757", "0.51403624", "0.51402485", "0.51375484", "0.5136832", "0.513223", "0.51268375", "0.51268005", "0.5121923", "0.51217884", "0.5121653", "0.5114184", "0.5111297", "0.5106145", "0.51038104", "0.5091454", "0.5086065", "0.5086065", "0.5086065", "0.5086065", "0.50757706", "0.50710887", "0.506629", "0.50637454", "0.50625646", "0.50623196", "0.5059657", "0.5054004", "0.50499016", "0.504539", "0.5044682", "0.5025977", "0.5017595", "0.5008913", "0.50077164", "0.50041497", "0.5000771", "0.49944177", "0.4993814", "0.4990697", "0.49734765", "0.4967922", "0.4966804", "0.49589676", "0.49558723", "0.49518397", "0.49466768", "0.49460375", "0.49384868", "0.49338064", "0.4931038", "0.4928861", "0.49265155", "0.49197364", "0.4918906" ]
0.719298
1
Check that energy of a path of surfaces is positive at each timestep.
def test_path_energy_is_positive(self, space, a0, a1, b1, c1, d1, a2, path, atol): space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2) energy = space.metric.path_energy(path) self.assertAllEqual(energy.shape, ()) result = gs.all(energy > -1 * atol) self.assertTrue(result) path = gs.array([path, path]) energy = space.metric.path_energy(path) self.assertAllEqual(energy.shape, (2,)) result = gs.all(energy > -1 * atol) self.assertTrue(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_path_energy_per_time_is_positive(\n self, space, a0, a1, b1, c1, d1, a2, path, atol\n ):\n n_times = len(path)\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy_per_time(path)\n\n self.assertAllEqual(energy.shape, (n_times - 1, 1))\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n\n expected_shape = (2, n_times - 1, 1)\n path = gs.array([path, path])\n energy = space.metric.path_energy_per_time(path)\n self.assertAllEqual(energy.shape, expected_shape)\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)", "def is_positive(self, example_path):\n candidate_planets = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']\n return example_path in candidate_planets['lightcurve_path'].values", "def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False", "def assert_no_error(self): \r\n Nx = self['Nx']\r\n Nt = self.m.Nt\r\n L, T = self.problem['L T'.split()]\r\n L = L/2 # only half the domain used (symmetry)\r\n x = np.linspace(0, L, Nx+1) # Mesh points in space \r\n t = np.linspace(0, T, Nt+1) # Mesh points in time\r\n \r\n for n in range(len(t)):\r\n u_e = self.problem.u_exact(x, t[n])\r\n diff = np.abs(self.f.u[n,:] - u_e).max()\r\n print 'diff:', diff\r\n tol = 1E-13\r\n assert diff < tol", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def test_volume_surface_empty(self):\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s = space(fake_curvature=k) \n for name in ('sphere_s1', 'sphere_v2', 'sphere_s2', 'sphere_v3'):\n self.assertTrue(getattr(s, name)(0) == 0)", "def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)", "def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False", "def verify_shocksine(controller):\n import numpy as np\n import os\n\n test_solution = controller.solution.state.get_q_global()\n\n if test_solution is not None:\n thisdir = os.path.dirname(__file__)\n expected_density = np.loadtxt(os.path.join(thisdir,'shocksine_regression_density.txt'))\n test_density = test_solution[0,:]\n test_err = np.linalg.norm(expected_density-test_density)\n return check_diff(0, test_err, abstol=1.e-4)", "def is_edge_phase(x, x_last):\n _x = x/(2*np.pi)\n _x = round(_x - round(_x), 5)\n _x_last = x_last/(2*np.pi)\n _x_last = round(_x_last - round(_x_last), 5)\n if _x == 0.0 or (_x_last < 0.0 and _x > 0.0):\n return True\n else:\n return False", "def min_energy_storage_rule(_m, g, y, s, t):\r\n\r\n return - m.q[g, y, s, t] <= 0", "def run_one_step(self, dt):\n self.tldiffusion(dt)\n\n # Test code stability for timestep dt\n # Raise unstability error if local slope is reversed by erosion\n # and deposition during a timestep dt\n elev_dif = self.elev - self.elev[self.receiver]\n s = elev_dif[np.where(self.grid.at_node[\"flow__sink_flag\"] == 0)]\n if np.any(s < -1) is True:\n raise ValueError(\n \"The component is unstable\" \" for such a large timestep \" \"on this grid\"\n )\n else:\n pass", "def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1", "def sign_of_path(path):\n vectors = [(a[0] - b[0], a[1] - b[1]) for b, a in pairwise(path)]\n sign_exp = 0\n for idx, vector in enumerate(vectors):\n if vector == (0, 1):\n sign_exp += len([v for v in vectors[idx + 1:] if v == (1, 0)])\n return (-1) ** (sign_exp)", "def test_t0(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 0.0\n solrt = sol(r, t)\n for quant in ['velocity', 'pressure', 'sound_speed', 'density', 'xdet']:\n assert np.all(np.isnan(solrt[quant]))", "def test_steps(self, model):\r\n model.fs.unit.initialize()\r\n\r\n # Add disturbances\r\n for t in model.fs.time:\r\n if 300 <= t < 600:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 - 10)\r\n elif 600 <= t < 900:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n elif 900 <= t < 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 + 10)\r\n elif t >= 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n\r\n # Transient solution\r\n solver.solve(model)\r\n\r\n times = [0, 300, 600, 900, 1200, 1500]\r\n sco2_exp = [305.2, 304.9, 305.1, 306.5, 305.7, 305.2]\r\n air_exp = [370.4, 373.1, 370.3, 365.9, 370.7, 370.4]\r\n wall_exp = [339.4, 338.7, 339.1, 340.7, 339.9, 339.4]\r\n\r\n self.check_temperatures(model, times, sco2_exp, air_exp, wall_exp)", "def validate_edges(attack_surface_graph, admissible_path, starting_points):\n for i in range(len(admissible_path)-1):\n for edge in attack_surface_graph.edges(data=True):\n if edge[0] == admissible_path[i] and edge[1] == admissible_path[i+1]:\n descriptors = edge[2]\n if find_violation(descriptors) == [] and edge[0] not in starting_points:\n return False\n return True", "def test_check_conformer_energy(self):\n v_list = [-272.2779012225, -272.2774933703, -272.2768397635, -272.2778432059, -272.278645477, -272.2789602654,\n -272.2788749196, -272.278496709, -272.2779350675, -272.2777008843, -272.2777167286, -272.2780937643,\n -272.2784838846, -272.2788050464, -272.2787865352, -272.2785091607, -272.2779977452, -272.2777957743,\n -272.2779134906, -272.2781827547, -272.278443339, -272.2788244214, -272.2787748749]\n v_list = np.array(v_list, np.float64)\n v_diff = (v_list[0] - np.min(v_list)) * constants.E_h * constants.Na / 1000\n self.assertAlmostEqual(v_diff / 2.7805169838282797, 1, 5)", "def test_comp_surface_wind(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_wind()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True", "def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def testSplineCurveInverseIsCorrect(self):\n x_knot = jnp.arange(0, 16, 0.01)\n alpha = self.variant(distribution.inv_partition_spline_curve)(x_knot)\n x_recon = self.variant(distribution.partition_spline_curve)(alpha)\n chex.assert_tree_all_close(x_recon, x_knot, atol=1e-5, rtol=1e-5)", "def checkPointInLampsReach(self, p):\n v1 = XYPoint(self.Lime.x - self.Red.x, self.Lime.y - self.Red.y)\n v2 = XYPoint(self.Blue.x - self.Red.x, self.Blue.y - self.Red.y)\n\n q = XYPoint(p.x - self.Red.x, p.y - self.Red.y)\n s = self.crossProduct(q, v2) / self.crossProduct(v1, v2)\n t = self.crossProduct(v1, q) / self.crossProduct(v1, v2)\n\n return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)", "def test_e0_ts(self):\n self.assertAlmostEqual(self.tunneling.E0_TS.value_si * 0.001, self.E0_TS, 4)", "def check_if_in_shadow(psi, a_sat_vector, sun_pos):\n dot_prod = np.dot(a * n_vector.T, unit_sun_r(sun_pos))\n\n check = np.zeros((len(a_sat_vector)))\n if np.cos(psi) < 0 and a_sat_vector < r_earth and dot_prod <= r_earth:\n check = True\n\n return check", "def test_despike(curve):\n assert curve.df.max()[0] - curve.despike(50, z=1).df.max()[0] - 91.83918 < 0.001", "def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)", "def check_these(phi, chi, omega):\n #Calculate the leg position from these angles\n self.getplatepos(phi, chi, omega)\n #Make sure the legs are not in fault\n self.check_limits()\n return (not any(self.leg_fault))", "def check_paths(self):\n for path in self.paths:\n # check that arc starts at s\n arc = path[0]\n arc_start = self.arc_info[arc][\"start\"]\n assert(arc_start == self.source()), \"Path does not start at s\"\n # check that internal arcs are valid\n for (i, arc) in enumerate(path[:-1]):\n next_arc = path[i + 1]\n arc_destin = self.arc_info[arc][\"destin\"]\n next_arc_start = self.arc_info[next_arc][\"start\"]\n assert (arc_destin == next_arc_start), \"Invalid path\"\n arc = path[-1]\n arc_end = self.arc_info[arc][\"destin\"]\n assert(arc_end == self.sink()), \"Path does not end at t\"", "def can_throw(self):\n if self.round_points == 0:\n return False\n return True", "def is_positive_definite(x):\n return np.all(np.linalg.eigvals(x) > 0)", "def s_neg(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == -1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n running_total += -1 - ayxx\n return running_total", "def __eq__(self, line):\n \n return abs( 1 - np.dot(sm.unitvec(self.vec), sm.unitvec(line.vec))) < 10*_eps", "def test_energy():\n # Test something\n \n from nose.tools import assert_equal\n assert_equal(energy([0.0,0.0],1.0), 0)\n assert_equal(energy([4.0,5.0],1.0), 10)", "def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def has_negative(tensor, verbose=True):\n tensor_numpy = tensor.data.cpu().numpy().flatten()\n where_negative = np.argwhere(tensor_numpy < 0)\n\n if verbose:\n for idx in where_negative:\n value = float(tensor_numpy[idx])\n print(f\"Encountered negative value: {value:.5f}\")\n\n negative_count = len(where_negative)\n negative = negative_count != 0\n\n if verbose and negative:\n print(f\"Encountered {negative_count} negative values\")\n\n return negative", "def hasEnergyExpended(self, flags):\r\n return (flags & 0x08) != 0", "def _check_dr_sign(self, alpha=np.pi/2):\n u1, u3, gamma2 = self.emitter.get_rotation_velocities()\n iota, eta = self.iota, self.eta\n\n d1 = -gamma2 * u1 + (1 + gamma2 ** 2 * u1 ** 2 / (1 + gamma2)) * np.cos(iota) * np.sin(eta) + \\\n (gamma2 ** 2 * u1 * u3 / (1 + gamma2)) * np.sin(iota) * np.sin(eta)\n d3 = -gamma2 * u1 + (gamma2 ** 2 * u1 * u3 / (1 + gamma2)) * np.cos(iota) * np.sin(eta) + \\\n (1 + gamma2 ** 2 * u3 ** 2 / (1 + gamma2)) * np.sin(iota) * np.sin(eta)\n c1 = np.cos(alpha) * d1 + np.sin(alpha) * d3\n\n return self.chi * c1", "def test_absolute_volume(self):\n\n assert self.test_shape.volume() == pytest.approx(50 * 60 * math.pi * 2 * 1000)", "def trig_conditions(tr):\n \n\n \n ##############\n #TRACE RELATED\n ##############\n avg_tr = np.average(tr)\n \n shifted_tr = tr - avg_tr \n \n abs_tr = abs(shifted_tr)\n \n sort_tr = np.sort(abs_tr)\n \n tr_99 = sort_tr[int(0.999*len(sort_tr))]\n\n #95% noise to signal area ratio calculation\n area_big = tr_99 * int(0.95*len(sort_tr))\n \n area_small = np.sum(sort_tr[:int(0.95*len(sort_tr))])\n \n \n area_condition = area_small / area_big; print area_condition\n \n # if signal is quite noisy, use recursive STALTA, else use classic STALTA\n \n df = tr.stats.sampling_rate\n \n \n if area_condition <= 0.1:\n \n print(\"CLEAN SIGNAL!\\n\")\n\n ctf = abs(1-carlSTATrig(tr.data, int(5 * df), int(10 * df), 0.0001, 0.0001))\n \n sort_ctf = np.sort(ctf)\n \n ctf_low = sort_ctf[int(0.80*len(sort_ctf))]\n \n ctf_99 = sort_ctf[int(0.999*len(sort_ctf))]\n \n max_trig = ctf_99\n \n trig_on = ctf_low + 0.4 * (max_trig - ctf_low)\n\n trig_off = 0.8 * trig_on\n \n if not max_trig > 1.25 * ctf_low:\n trig_on = 0; trig_off = 0;\n \n \n \n elif 0.1 < area_condition < 0.16:\n \n print(\"NOISY SIGNAL!\\n\")\n\n ctf = abs(1-recSTALTA(tr.data, int(5 * df), int(10 * df)))\n \n sort_ctf = np.sort(ctf)\n \n ctf_low = sort_ctf[int(0.90*len(sort_ctf))]\n \n ctf_99 = sort_ctf[int(0.999*len(sort_ctf))]\n \n max_trig = ctf_99\n \n trig_on = ctf_low + 0.4 * (max_trig - ctf_low)\n\n trig_off = 0.8 * trig_on\n \n if not max_trig > 1.25 * ctf_low:\n trig_on = 0; trig_off = 0;\n \n else:\n print(\"TRACE TOO NOISY TO DETECT SIGNAL!\")\n trig_on = 0; trig_off = 0;\n ctf = abs(1-recSTALTA(tr.data, int(5 * df), int(10 * df)))\n\n \n\n\n\n \n\n \n \n ############\n #ctf RELATED\n ############\n\n \n #set conditional for noise. This is MINIMUM threshold.\n #ctf_avg = np.average(ctf)\n #ctf_mode = scipy.stats.mode(ctf)\n #ctf_std = np.std(ctf)\n \n\n \n \n #plt.figure()\n #plt.plot(sort_ctf)\n #plt.show()\n\n \n\n \n \n \n return trig_on, trig_off, ctf", "def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)", "def test_accurate(self):\n M = simulation.EventMonitor(self.G)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n times = self.G.pattern.nonzero()[1]*self.dt\n self.assertTrue(np.allclose(sorted(times), M.t))\n for (i, t) in zip(M.i, M.t):\n self.assertTrue(self.G.pattern[i, int_r(t/self.dt)])", "def test_spike_negative_vals(self):\n thresholds = (25, 50)\n\n arr = [-10, -12, -999.99, -13, -15, -40, -9, -9]\n\n # First and last elements should always be good data, unless someone\n # has set a threshold to zero.\n expected = [1, 4, 4, 4, 1, 3, 1, 1]\n\n inputs = [\n arr,\n np.asarray(arr, dtype=np.floating),\n dask_arr(np.asarray(arr, dtype=np.floating))\n ]\n for i in inputs:\n npt.assert_array_equal(\n qartod.spike_test(\n inp=i,\n suspect_threshold=self.suspect_threshold,\n fail_threshold=self.fail_threshold\n ),\n expected\n )", "def positive_slope(line:tuple)->bool:\n return line[0][1] < line[1][1] == line[0][0] < line[1][0]", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def test_triangle_positive_is_equilateral_property(self):\n a = Point(-9, 10)\n b = Point(-1, 4)\n c = Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7)\n t = Triangle(a, b, c)\n self.assertTrue(t.is_equilateral,\n \"Test of Triangle(Point(-9, 10), Point(-1, 4), Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7))\\\n failed, returned value != True.\")\n a = Point(-9, 21)\n b = Point(-1, 4)\n c = Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7)\n t = Triangle(a, b, c)\n self.assertFalse(t.is_equilateral,\n \"Test of Triangle(Point(-9, 21), Point(-1, 4), Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7))\\\n failed, returned value != False.\")", "def is_zero(self):\n return -0.0001 <= self.l2_norm() <= 0.0001", "def test_to_delta_time_positive_difference(with_tf_random_seed, np_time_points):\n time_points = tf.constant(np_time_points, dtype=default_float())\n\n with pytest.raises(InvalidArgumentError) as exp:\n to_delta_time(time_points)\n\n assert exp.value.message.find(\"Condition x >= y\") >= 0", "def testgradsorientation(self):\r\n # since z-coordinates of atomcoords are all 0 for dvb, z-values of grads should be all 0\r\n assert numpy.alltrue(numpy.abs(self.data.grads[:,:,2]) < 1e-14)", "def calc(path):\n if len(df_times)==0:\n return 0\n i = np.where(timegrid == t)[0][0]\n x_t = path[i]\n discount = np.vectorize(lambda T: model.zerobond(T, t, x_t))\n dfs = discount(df_times)\n # Calculate fixed leg npv\n fix_leg_npv = np.sum(fixed_amounts * dfs[fix_idx])\n # Estimate the index fixings\n index_fixings = (dfs[accrual_start_idx] / dfs[accrual_end_idx] - 1) \n index_fixings /= float_dcf\n # Calculate the floating leg npv\n float_leg_npv = np.sum(nominals * index_fixings * float_dcf * dfs[float_idx])\n # Calculate the already fixed accrual period of the floating leg\n t_f = accrual_start_time_ffp\n i = np.where(timegrid == t_f)[0][0]\n x_f = path[i]\n df_e = model.zerobond(accrual_end_time_ffp, t_f, x_f)\n npv_accrualperiod = (1. / df_e - 1) * nominals_ffp * model.zerobond(paytime_ffp, t, x_t)\n # Calculate swap npv\n npv = float_leg_npv + npv_accrualperiod - fix_leg_npv\n return npv", "def test_SemiF47_level_0_7(self):\n self.assertEqual(viol_check(self.vol,7), [[90, 130], [174, 235]])", "def test_sign_test(self):\r\n v = [(\"two sided\", 26, 50, 0.88772482734078251),\r\n (\"less\", 26, 50, 0.6641),\r\n (\"l\", 10, 50, 1.193066583837777e-05),\r\n (\"hi\", 30, 50, 0.1013193755322703),\r\n (\"h\", 0, 50, 1.0),\r\n (\"2\", 30, 50, 0.20263875106454063),\r\n (\"h\", 49, 50, 4.5297099404706387e-14),\r\n (\"h\", 50, 50, 8.8817841970012543e-16)\r\n ]\r\n for alt, success, trials, p in v:\r\n result = sign_test(success, trials, alt=alt)\r\n self.assertFloatEqual(result, p, eps=1e-5)", "def is_zero(self):\n for t in self:\n if t != TRIT_ZERO:\n return False\n return True", "def checkFuel(self):\n return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving", "def _is_eruption_in(self, days, from_time):\n for te in self.tes:\n if 0 < (te-from_time).total_seconds()/(3600*24) < days:\n return 1.\n return 0.", "def test_fluxes(self):\n\n t, x = self.t, self.x_edge\n np.testing.assert_array_almost_equal(self.N_e_hat(t, x[0]), 0, decimal=3)\n np.testing.assert_array_almost_equal(self.N_e_hat(t, x[-1]), 0, decimal=3)", "def is_path_valid(self,path):\n null_state=[0 for i in range(len(self.node_names))]\n null_state_matrix=np.matrix(null_state).T\n new_state=np.matrix(self.state).T\n for index,edge in enumerate(path):\n #print index\n #print edge\n edge_position=self.edges.index(edge)\n move_matrix=self.edge_matrices[edge_position]\n #print move_matrix\n new_state=move_matrix*new_state\n if new_state.any()==null_state_matrix.any():\n #print new_state\n #print null_state_matrix\n return False\n return True", "def testscfenergy(self):\r\n scf = self.data.scfenergies[-1]\r\n ref = self.b3lyp_energy\r\n tol = self.b3lyp_tolerance\r\n msg = f\"Final SCF energy: {scf:f} not {int(ref)} +- {int(tol)}eV\"\r\n assert abs(scf-ref) < 40, msg", "def penumbral_eclipse(sat, earth, sun, time):\n\n theta, theta_e, theta_s = eclipse_parameters(sat, earth, sun, time)\n return np.logical_and(np.abs(theta_e - theta_s) < theta,\n theta < (theta_e + theta_s))", "def test_low_voltage_passing_signal(self):\n data = gen_random_data(-0.5, 0.5, self.channels)\n self.assertFalse(self.lowvoltage_rule.is_broken(data))", "def _lowess_tricube(t):\n #t = (1-np.abs(t)**3)**3\n t[:] = np.absolute(t) #, out=t) #numpy version?\n _lowess_mycube(t)\n t[:] = np.negative(t) #, out = t)\n t += 1\n _lowess_mycube(t)", "def test(self, grid, flag):\n x = self.x+SPEED_X[flag]\n y = self.y+SPEED_Y[flag]\n return 0 <= x < self.n and 0 <= y < self.n and grid[y][x] == 1", "def epidemic_finish(states, iteration):\n return np.sum(states) == 0 and iteration > 10", "def check_energies(self, zero_degenerate_impropers=True, skip_assert=False):\n if zero_degenerate_impropers is True:\n self.zero_degenerate_impropers(self.torsion_force0)\n xyz = self.simulation0.context.getState(getPositions=True).getPositions()\n self.simulation0.context.reinitialize()\n self.simulation0.context.setPositions(xyz)\n\n self.zero_degenerate_impropers(self.torsion_force1)\n xyz = self.simulation1.context.getState(getPositions=True).getPositions()\n self.simulation1.context.reinitialize()\n self.simulation1.context.setPositions(xyz)\n\n state0 = self.simulation0.context.getState(getEnergy=True)\n energy0 = state0.getPotentialEnergy()\n\n state1 = self.simulation1.context.getState(getEnergy=True)\n energy1 = state1.getPotentialEnergy()\n\n if not skip_assert:\n delta = abs(energy0 - energy1)\n assert delta < ENERGY_EPSILON, \"Error, energy difference (%f kJ/mol) is greater than %f kJ/mol\" % (delta / u.kilojoules_per_mole, ENERGY_EPSILON / u.kilojoules_per_mole)\n\n return energy0, energy1", "def metropolis_hastings_accept(energy_prev, energy_next, s_rng):\r\n ediff = energy_prev - energy_next\r\n return (TT.exp(ediff) - s_rng.uniform(size=energy_prev.shape)) >= 0", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def calculate_edges_zero(self, verbose = False):\n\n ## calculates the first and last wavelength that has non-zero\n # w = np.where(self.throughput > 0)[0]\n # if verbose: print(w)\n # self._upper_edge = self.wavelength[w[-1]]\n # self._lower_edge = self.wavelength[w[0]]\n\n w = np.where(self.throughput > 0)[0]\n if verbose: print(w)\n if w[0] - 1 < 0:\n w_low = 0\n else:\n w_low = w[0] - 1\n\n if w[-1] + 1 == len(self.throughput):\n w_high = w[-1]\n else:\n w_high = w[-1] + 1\n\n self._upper_edge = self.wavelength[w_high]\n self._lower_edge = self.wavelength[w_low]", "def is_close_to_zero(value: Union[float, np.ndarray]) -> Union[bool, np.ndarray]:\n return abs(value) < 1.0e-10", "def test_numprops_different_sign(self):\n # Perform diff.\n df = Differ(key=\"name\", deltas={\"energy\": Delta(\"+-\")})\n d = df.diff(*self.engines)\n # Calculate expected results.\n is_different = lambda a, b: a < 0 < b or b < 0 < a\n changed = sum((int(is_different(e[0], e[1])) for e in self.energies))\n # Check results.\n self.assertEqual(len(d[Differ.CHANGED]), changed)", "def is_solved(self):\n return self.to_grid == self.from_grid", "def path_energy_per_time(self, path):\n n_times, _, _ = path.shape\n surface_diffs = path[1:, :, :] - path[:-1, :, :]\n surface_midpoints = path[: n_times - 1, :, :] + surface_diffs / 2\n energy = []\n for diff, midpoint in zip(surface_diffs, surface_midpoints):\n energy.extend([n_times * self.squared_norm(diff, midpoint)])\n return gs.array(energy)", "def one_step(self):\r\n assert (self.uv_vol is not None)\r\n assert (self.guv_vol is not None)\r\n assert (self.uv_bound is not None)\r\n assert (self.vf_vect_bound is not None)\r\n assert (self.vF_vect_vol is not None)\r\n # Shape checks\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vF_vect_vol.shape == self.vf_vect_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.uv_vol.shape[1])\r\n assert (self.uv_vol.shape == self.guv_vol.shape)\r\n assert (self.uv_vol.shape == self.uv_bound.shape)\r\n assert (self.uv_vol.shape[0] == self.vF_vect_vol.shape[0])\r\n \r\n if self.step == 0:\r\n self.check_k_matrix_stability()\r\n # print(\"Epsilon is :\"+str(self.Epsilon))\r\n # print(\"Beta is :\"+str(self.Beta))\r\n\r\n # Form \"Stiffness\" matrix:\r\n K = self.make_k_matrix()\r\n # Form \"Force\" vector: \r\n f = self.vF_vect_vol + (self.Epsilon / self.Beta) * self.vf_vect_bound\r\n\r\n # print(\"FORCE VECTOR:\")\r\n # print(f)\r\n # print(\"STIFFNESS MATRIX\")\r\n # print(K)\r\n # print(\"UV_VOL\")\r\n # print(self.uv_vol)\r\n # print(\"EPSILON * GUV_VOL\")\r\n # print(self.Epsilon * self.guv_vol)\r\n # print(\"UV_BOUND * COEFF\")\r\n # print((self.Epsilon / self.Beta) * self.uv_bound)\r\n sol = scipy_sparse_linsolve(K, f)\r\n # print(\"SOLUTION\")\r\n # print(sol)\r\n return sol", "def update_status(self):\n if len(self.invalid) != 0:\n return False\n for row in self.grid:\n for num in row:\n if num == 0:\n return False\n self.solved = True\n print(\"solved\")\n return True", "def trajectories(t_upper=3600*24*687, h=100, m1=5.972e+24, m2=6.417e+23,\n m3=1.989e+30, a1=1.0*1.496e+11, a2=1.52*1.496e+11):\n\n # We check if parameters are all positive\n\n list_parameters = [t_upper, h, m1, m2, m3,\n a1, a2]\n\n for parameters in list_parameters:\n\n if parameters < 0:\n print(f'You have entered a negative parameter')\n\n # initial values for planet 1 in x, y and z direction\n x_i1 = a1\n y_i1 = 0\n v_x1i = 0\n v_y1i = 29779.301841746023\n z_i1 = 0\n v_z1i = 0\n\n # initial values for planet 2 in x, y and z direction\n x_i2 = a2\n y_i2 = 0\n v_x2i = 0\n v_y2i = 24154.203325249873\n z_i2 = 0\n v_z2i = 0\n\n # initial values for Sun in x, y and z direction\n x_i3 = 0\n y_i3 = 0\n v_x3i = 0\n v_y3i = 0\n z_i3 = 0\n v_z3i = 0\n\n# Initial positions and velocities\n r = np.array([x_i1, y_i1, v_x1i, v_y1i, x_i2,\n y_i2, v_x2i, v_y2i, x_i3, y_i3, v_x3i, v_y3i,\n z_i1, z_i2, z_i3, v_z1i, v_z2i, v_z3i])\n\n # We create vectors which will contains the trajectories\n # and velocities of each bodies\n x_pnts1 = [x_i1]\n y_pnts1 = [y_i1]\n v_x_pnts1 = [v_x1i]\n v_y_pnts1 = [v_y1i]\n\n x_pnts2 = [x_i2]\n y_pnts2 = [y_i2]\n v_x_pnts2 = [v_x2i]\n v_y_pnts2 = [v_y2i]\n\n x_pnts3 = [x_i3]\n y_pnts3 = [y_i3]\n v_x_pnts3 = [v_x3i]\n v_y_pnts3 = [v_y3i]\n\n x_pnts3 = [x_i3]\n y_pnts3 = [y_i3]\n v_x_pnts3 = [v_x3i]\n v_y_pnts3 = [v_y3i]\n\n z_pnts1 = [z_i1]\n z_pnts2 = [z_i2]\n z_pnts3 = [z_i3]\n\n v_z_pnts1 = [v_z1i]\n v_z_pnts2 = [v_z2i]\n v_z_pnts3 = [v_z3i]\n\n m1 = m1\n m2 = m2\n m3 = m3\n a1 = a1\n a2 = a2\n\n # We create a vector which will contain the time\n # Initial value\n t_i = 0.0\n t_values = [t_i]\n\n for t in range(0, t_upper, h):\n\n # We used the RK4 formula here\n k1 = h*derivative(r=r, t=0, m1=5.972e+24, m2=m2, m3=1.989e+30,\n a1=a1, a2=1.52*1.496e+11)\n k2 = h*derivative(r=r + 0.5*k1, t=t + (h/2), m1=5.972e+24,\n m2=6.417e+23, m3=1.989e+30, a1=1.0*1.496e+11,\n a2=1.52*1.496e+11)\n k3 = h*derivative(r=r + 0.5*k2, t=t + (h/2), m1=5.972e+24,\n m2=6.417e+23, m3=1.989e+30, a1=1.0*1.496e+11,\n a2=1.52*1.496e+11)\n k4 = h*derivative(r=r + h*k3, t=t+h, m1=5.972e+24, m2=6.417e+23,\n m3=1.989e+30, a1=1.0*1.496e+11, a2=1.52*1.496e+11)\n\n # We calculate the new vector r\n r += (k1 + 2*k2 + 2*k3 + k4)*(1.0/6.0)\n\n # We add the new points calculated\n x_pnts1.append(r[0])\n y_pnts1.append(r[1])\n\n v_x_pnts1.append(r[2])\n v_y_pnts1.append(r[3])\n\n x_pnts2.append(r[4])\n y_pnts2.append(r[5])\n v_x_pnts2.append(r[6])\n v_y_pnts2.append(r[7])\n\n x_pnts3.append(r[8])\n y_pnts3.append(r[9])\n v_x_pnts3.append(r[10])\n v_y_pnts3.append(r[11])\n\n z_pnts1.append(r[12])\n z_pnts2.append(r[13])\n z_pnts3.append(r[14])\n\n v_z_pnts1.append(r[15])\n v_z_pnts2.append(r[16])\n v_z_pnts3.append(r[17])\n\n t_values.append(t)\n\n # We return all the trajectories\n return x_pnts1, y_pnts1, x_pnts2, y_pnts2, x_pnts3, y_pnts3, z_pnts1, z_pnts2, z_pnts3", "def steadyYet(newg, oldg, newe, olde, newh, oldh, newf, oldf, tolerance):\n steady_yet = True\n if oldg == 0 or (abs(newg-oldg)/oldg > tolerance or\n abs(newe-olde)/olde > tolerance or\n abs(newh-oldh)/oldh > tolerance or\n abs(newf-oldf)/oldf > tolerance):\n steady_yet = False\n return steady_yet", "def test_el_small_surface_instability():\n levels = np.array([959., 931.3, 925., 899.3, 892., 867.9, 850., 814.,\n 807.9, 790., 779.2, 751.3, 724.3, 700., 655., 647.5,\n 599.4, 554.7, 550., 500.]) * units.mbar\n temperatures = np.array([22.2, 20.2, 19.8, 18.4, 18., 17.4, 17., 15.4, 15.4,\n 15.6, 14.6, 12., 9.4, 7., 2.2, 1.4, -4.2, -9.7,\n -10.3, -14.9]) * units.degC\n dewpoints = np.array([20., 18.5, 18.1, 17.9, 17.8, 15.3, 13.5, 6.4, 2.2,\n -10.4, -10.2, -9.8, -9.4, -9., -15.8, -15.7, -14.8, -14.,\n -13.9, -17.9]) * units.degC\n el_pressure, el_temperature = el(levels, temperatures, dewpoints)\n assert_nan(el_pressure, levels.units)\n assert_nan(el_temperature, temperatures.units)", "def _check_zero(self, h, i, j, u, v, w):\n return self._.p[u, h, i] != 0 and self._.p[v, h, j] != 0 and \\\n self._.p[w, i, j] != 0", "def test_badly_conditioned_spline(tmpdir):\n\n gulp_input = u\"\"\"single\n\ncell\n5.468 5.468 5.468 90.0 90.0 90.0\n\nfrac\nU 0 0 0\nU 1/2 1/2 0\nU 1/2 0 1/2\nU 0 1/2 1/2\n\nO 1/4 1/4 1/4\nO 1/4 3/4 1/4\nO 3/4 3/4 1/4\nO 3/4 1/4 1/4\n\nO 1/4 1/4 3/4\nO 1/4 3/4 3/4\nO 3/4 3/4 3/4\nO 3/4 1/4 3/4\n\n\nspecies\nU 2.4\nO -1.2\n\ninclude potentials.lib\"\"\"\n\n # First calculate the expected energy using GULP's built-in analytical potentials\n with tmpdir.join(\"potentials.lib\").open(\"w\") as potfile:\n potfile.write(\"buck\\n\")\n potfile.write(\"O O 1633.01 0.3270196735 3.94879 10.0\\n\")\n potfile.write(\"U U 294.640906285709 0.327022 0.0 10.0\\n\")\n potfile.write(\"O U 693.650933805978 0.327022 0.0 10.0\\n\")\n potfile.write(\"\\n\")\n potfile.write(\"morse\\n\")\n potfile.write(\"O U 0.577189831995 1.65 2.369 10.0\\n\")\n\n\n gulp_infile = io.StringIO(gulp_input)\n gulp_infile.seek(0)\n\n gulp_outfile = io.StringIO()\n runGULP(gulp_infile, gulp_outfile, cwd = tmpdir.strpath)\n\n gulp_outfile.seek(0)\n expect = extractGULPEnergy(gulp_outfile)\n\n tmpdir.join(\"potentials.lib\").remove()\n assert not tmpdir.join(\"potentials.lib\").exists()\n\n # Now build a potential model and tabulate it - then re-run the calculation and check the energies match.\n aspot = io.StringIO(u\"\"\"\n[Tabulation]\ntarget : GULP\ncutoff : 10.0\nnr : 1000\n\n[Pair]\nO-O = as.buck 1633.010242995040 0.327022 3.948787\nU-U = as.buck 294.640906285709 0.327022 0.0\nO-U = sum(as.buck 693.650933805978 0.327022 0.0, \n\t\t as.morse 1.65 2.369 0.577189831995)\n\"\"\"\n )\n\n aspot.seek(0)\n\n from atsim.potentials.config import Configuration\n tabulation = Configuration().read(aspot)\n\n with tmpdir.join(\"potentials.lib\").open(\"w\") as potfile:\n tabulation.write(potfile)\n\n gulp_infile.seek(0)\n\n gulp_outfile = io.StringIO()\n runGULP(gulp_infile, gulp_outfile, cwd = tmpdir.strpath)\n\n gulp_outfile.seek(0)\n actual = extractGULPEnergy(gulp_outfile)\n assert pytest.approx(expect, rel=1e-4) == actual\n\n tmpdir.join(\"potentials.lib\").remove()\n assert not tmpdir.join(\"potentials.lib\").exists()", "def testfunction(expr,n):\n \n if expr == g_plus: init, expr = R1d.SpinUp, g_plus\n elif expr == g_minus: init, expr = R1d.SpinDown, g_minus\n else: return \"error\"\n \n a = McLaurin(expr, n) \n \n bool_list = []\n for n in range(5):\n for i in range(-n,n+1):\n bool_list.append(Coef(a,n,i) == R1d.a(n,i, init))\n if bool_list[-1] == False:\n print(\"Step: \", n, \" pos: \", i)\n return all(bool_list)", "def test_absolute_shape_volume(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() == pytest.approx(math.pi * (10**2) * 30)" ]
[ "0.719298", "0.589016", "0.58340615", "0.56767166", "0.5647796", "0.56435287", "0.56361413", "0.5609778", "0.5564585", "0.55382043", "0.5497689", "0.5426183", "0.5391233", "0.5343787", "0.5343612", "0.5332643", "0.5260981", "0.5248749", "0.5233796", "0.52206767", "0.5216022", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.520243", "0.51864827", "0.51822317", "0.51787674", "0.5177978", "0.5173952", "0.51577944", "0.5146629", "0.5145757", "0.51403624", "0.51402485", "0.51375484", "0.5136832", "0.513223", "0.51268375", "0.51268005", "0.5121923", "0.51217884", "0.5121653", "0.5114184", "0.5111297", "0.5106145", "0.51038104", "0.5091454", "0.5086065", "0.5086065", "0.5086065", "0.5086065", "0.50757706", "0.50710887", "0.506629", "0.50637454", "0.50625646", "0.50623196", "0.5059657", "0.5054004", "0.50499016", "0.504539", "0.5044682", "0.5025977", "0.5017595", "0.5008913", "0.50077164", "0.50041497", "0.5000771", "0.49944177", "0.4993814", "0.4990697", "0.49734765", "0.4967922", "0.4966804", "0.49589676", "0.49558723", "0.49518397", "0.49466768", "0.49460375", "0.49384868", "0.49338064", "0.4931038", "0.4928861", "0.49265155", "0.49197364", "0.4918906" ]
0.728073
0
goes through each neuron, each neuron has a chance of mutating equal to the learning rate of the network. There is a 20% chance of a physical mutation.
def mutate(self): #First, mutate masses for neuronNum in range(self.neuronCounter - 1): if self.learningRate > random.random(): self.neurons[neuronNum].mutate() else: continue #Now determine physical mutations if random.random() < 0.2: try: physMutation = random.choice(['a','l','c']) if physMutation == 'a': self.addNeuron(random.choice([0,1,2])) elif physMutation == 'l': begin = random.randint(1,self.neuronCounter - 1) end = random.randint(1, self.neuronCounter - 1) self.link(begin, end) else: begin = random.randint(1,self.neuronCounter - 1) end = random.choice(self.neurons[begin].outDic.keys()) self.cut(begin, end) except: return self return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate_nonstructural(self):\n # TODO consider clamping weights and biases?\n for link in self.gene_links:\n # Disable/Enable links\n if event(link_toggle_prob): # Chance of toggling link\n link.enabled = True if link.enabled is False else False\n if link.enabled is False and event(link_enable_prob): # Chance of enabling a disabled link\n link.enabled = True\n # Mutate weights\n if event(weight_mutate_rate):\n if event(weight_replace_rate): # replace with random weight\n link.weight = random.uniform(weight_init_min, weight_init_max)\n else: # adjust weight\n link.weight += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n for node in self.gene_nodes:\n # Mutate bias\n if event(bias_mutate_rate):\n if event(bias_replace_rate): # replace with random bias\n node.bias = random.uniform(bias_init_min, bias_init_max)\n else: # adjust bias\n node.bias += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n # Mutate activation func\n if node.can_modify:\n if event(change_act_prob):\n node.act_func = self.act_set.get_random_activation_func()\n # reinit freq amp and vshift when act func changes\n if node.act_func.__name__[0] == \"g\":\n node.freq = random.uniform(-gauss_freq_range, gauss_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-gauss_vshift_range, gauss_vshift_range)\n elif node.act_func.__name__[0] == \"s\":\n node.freq = random.uniform(-sin_freq_range, sin_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-sin_vshift_range, sin_vshift_range)\n # Adjust freq amp and vshift of activation function\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)\n # Mutate substrate width/height rectangles\n if event(width_mutate_prob):\n if event(0.5):\n self.substrate_width += 1\n elif self.substrate_width > 1:\n self.substrate_width -= 1\n if event(height_mutate_prob):\n if event(0.5):\n self.substrate_height += 1\n elif self.substrate_height > 1:\n self.substrate_height -= 1\n \"\"\" ES-HyperNeat - no longer used\n # Mutate QuadTree variance\n if event(var_mutate_prob):\n self.var_thresh += np.random.normal(scale=gauss_var_scale)\n self.var_thresh = self.var_thresh if self.var_thresh > 0 else 0\n # Mutate QuadTree band thresh\n if event(band_mutate_prob):\n self.band_thresh += np.random.normal(scale=gauss_band_scale)\n self.band_thresh = self.band_thresh if self.band_thresh > 0 else 0\n \"\"\"", "def mutate(self, chance, amount):\r\n for layer in self.layers:\r\n for row in range(layer.output_size):\r\n for col in range(layer.input_size+1):\r\n if np.random.rand() < chance:\r\n new_val = layer.weights[row, col] + np.random.uniform(-amount, amount)\r\n new_val = min(max(-1, new_val), 1)\r\n layer.weights[row, col] = new_val", "def mutate(self, probability, rate):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)", "def mutation(child_weights):\n for index, _ in enumerate(child_weights):\n # Add a chance for random mutation\n has_mutation = random.uniform(0, 1)\n if has_mutation <= .1:\n child_weights[index] *= random.randint(0, 5)", "def mutate(self, perturbing_probability):\n for con in self.connections.values():\n if random() < perturbing_probability:\n con.weight *= random_gaussian()\n else:\n con.weight = random(-1, 1)", "def mutate(self):\n\n if len(self.genes) < 250:\n for g in self.genes:\n\n if MUTATION_CHANCE < random.random(): # random.random() gives float in [0,1)\n g.mutate()\n\n else:\n k = int(MUTATION_CHANCE*len(self.genes))\n for g in random.sample(self.genes,int(k)): #int(k)\n g.mutate()\n\n #To add random gene\n if ADD_GENE_CHANCE < random.random():\n self.genes.append(Gene(self.size)) #Call to Gene to add to genes list\n\n #To randomly remove genes\n\n if REM_GENE_CHANCE < random.random() and len(self.genes)>0:\n self.genes.remove(random.choice(self.genes))", "def _mutate(self, individuals):\n for cur in individuals:\n if random.random() < self.mutation_probability:\n self.op.mutate(cur['individual'])\n cur['fitness'] = None", "def mutate1(self, probability):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = np.random.normal(0, 1)", "def mutatePopulation(population, rate, strength, assetList):\n if rate < 0 or rate > 1:\n print('Mutation rate has to lie in [0,1].')\n return\n\n if strength <= 0 or strength > 1:\n print('Mutation strength has to lie in (0,1].')\n return\n\n mutatedPopulation = []\n\n for individual in population:\n if random.random() < rate:\n mutatedPopulation.append(mutateIndividual(individual, strength, assetList))\n else:\n mutatedPopulation.append(individual)\n\n return mutatedPopulation", "def mutation(self):\n\n for r in range(self.pop_num*3, 5): # Mutation.\n for w in range(0,self.length): \n if random.random()<0.2: \n self.par_and_sons[r].A[w] = self.par_and_sons[r].A[w] + np.random.randint(-20, 20) # Offset + -20 pixels.", "def __mutate(self, chromosomes, mutation_probability):\n\n for chromosome in chromosomes:\n for i in range(self.chromosome_size):\n if random.randint(1, 100) <= mutation_probability:\n logging.getLogger().debug(\n \"---> Mutation in Chromosome \" + str(\n chromosome.chromosome_id) + \"in gene \" + str(i)\n + \" <---\")\n chromosome.genes[i] = random.choice(self.gene_pool)", "def mutate(self):\n \n # Mutate each weight\n self.w1 = self.w1 + np.random.normal(0, 1, 8).reshape((2,4))\n self.b1 = self.b1 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w2 = self.w2 + np.random.normal(0, 1, 4).reshape((2,2))\n self.b2 = self.b2 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w3 = self.w3 + np.random.normal(0, 1, 2).reshape((1,2))\n self.b3 = self.b3 + np.random.normal(0, 1, 1)\n \n # Return thyself\n return self", "def modify(nets, probs, ranks, desc, hypers, seed=0, seed2=0):\n\n name = str(seed)\n\n np.random.seed(seed2)\n tf.random.set_random_seed(seed2)\n random.seed(seed2)\n\n if not rnd: # If randomness is not applied\n print(ranks.sum(axis=1))\n if (ranks.sum(axis=1) == 0).any(): # If there are any network in the bottom three in importance in all objectives\n probs = (ranks.sum(axis=1) == 0) * probs # Only accept a network as modifiable if they rank between 3 least important networks in all three objectives\n probs = probs / np.sum(probs) # Update probabilities once the networks more important than bottom three have been taken away\n trainables, res, mutation, comp, reaching_outs = reducing_mutations(nets, probs, desc)\n else:\n trainables, res, mutation, comp, reaching_outs = increasing_mutations(nets, probs, desc)\n else: # Random application\n comp = np.random.choice(nets)\n _, in_conns, out_conns, _ = desc.get_net_context(comp)\n conns = in_conns + out_conns # Checka si esto da error\n reaching_outs = list(set([x for x in desc.reachable[comp] if \"o\" in x])) # Outputs affected by the mutation\n mutations = [con for con in conns if is_deletable(desc, con)]\n\n mutations += [\"add_con\", \"divide_con\", \"reinit\"]\n\n if is_bypassable(desc, comp):\n mutations += [\"bypass\"]\n\n mutation = np.random.choice(mutations)\n res, trainables = mutate(mutation, desc, comp, conns)\n print(mutation)\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, load=None, init=False, random_seed=seed2, lr=0.001)\n\n model.initialize(load=True, load_path=\"\", variables=trainables)\n\n model.convergence_train(hypers[\"btch_sz\"], iter_lim//100, conv_param, proportion, iter_lim//20, display_step=-1)\n\n results = evaluate_model(model)\n\n del model\n\n if rnd == 1:\n n = \"resultsrandom\"\n else:\n n = \"results\"\n\n np.save(n + str(seed) + \"_\" + str(seed2) + \".npy\", np.concatenate((results, [res, mutation, comp], reaching_outs)))", "def explore(self):\n for k, v in self._hyperparameters.items():\n mutation = random.choice([0.8, 1.2])\n self._hyperparameters[k] = mutation * v", "def __init__(self, neurons, random=True, silent=False):\n\n self.neurons = neurons\n self.silent = silent\n\n # Set weights\n lastneuron = 0\n self.weights = []\n\n for neuron in self.neurons:\n if lastneuron != 0:\n x = np.random.rand(neuron, lastneuron) * 2.0 - 1.0\n if not random:\n for y in range(len(x)):\n for z in range(len(x[y])):\n x[y][z] = 0.0\n self.weights.append(x)\n lastneuron = neuron", "def reducing_mutations(nets, probs, desc):\n\n if (np.isnan(probs)).any(): # If probabilites could not be computed or mutation has to be randomly applied, apply random probabilities\n print(\"NaN prob\")\n probs = np.array([1/probs.shape[0]]*probs.shape[0])\n if rnd == 1:\n probs = np.array([1 / probs.shape[0]] * probs.shape[0])\n\n comp = np.random.choice(nets, p=probs) # Choose network to which area the mutation is going to be applied\n\n reaching_outs = list(set([x for x in desc.reachable[comp] if \"o\" in x])) # Outputs affected by the mutation\n _, in_conns, out_conns, _ = desc.get_net_context(comp)\n conns = in_conns + out_conns # Checka si esto da error\n mutations = [con for con in conns if is_deletable(desc, con)] # Add deletable connections to the mutation pool\n mutations += [\"reinit\"]\n\n if is_bypassable(desc, comp):\n mutations += [\"bypass\"]\n mutation = np.random.choice(mutations) # Choose mutation\n\n res, trainables = mutate(mutation, desc, comp, conns)\n\n return trainables, res, mutation, comp, reaching_outs", "def run(self, i):\n j = 0\n sum = 0\n probability = 0\n count = self.neuron_count\n\n sum = 0\n for j in range(0, count):\n sum += self.get_weight(i, j) * (1 if (self.current_state[j] > 0) else 0)\n\n sum -= self.threshold[i]\n probability = 1 / (1 + math.exp(-sum / self.temperature))\n if np.random.rand() <= probability:\n self.current_state[i] = 1.0\n else:\n self.current_state[i] = 0.0", "def _mutate(self, offspring):\n weight_idx = random.choice(range(len(offspring)))\n mutation_modifier = 1 + random.uniform(-self.mutation_delta, self.mutation_delta)\n offspring[weight_idx] *= mutation_modifier\n return self._normalize_weights(offspring)", "def multiplication_test():\r\n\r\n def fitness_function(neural_net):\r\n \"\"\"Calculate the fitness of a neural_net.\"\"\"\r\n fitness = 25\r\n for i in range(1, 6):\r\n for j in range(1, 6):\r\n answer = np.exp(neural_net.calculate([np.log(i), np.log(j)])[0])\r\n result = i*j\r\n fitness -= abs(answer - result)\r\n\r\n return fitness\r\n\r\n gen_size = 50\r\n net_size = (2, 1)\r\n genetic_algorithm = GeneticAlgorithm(gen_size, net_size, mutation_rate=0.3, mutation_chance=0.5)\r\n\r\n highest_so_far = 0\r\n while True:\r\n # Testing creatures\r\n for neural_net in genetic_algorithm.population:\r\n neural_net.fitness = fitness_function(neural_net)\r\n\r\n # Sorting creatures\r\n genetic_algorithm.calculate_stats()\r\n\r\n print(\"Gen\", genetic_algorithm.current_generation, \":\")\r\n print(\"Max fitness\", genetic_algorithm.stats.max_fitness)\r\n print(\"Mean fitness\", genetic_algorithm.stats.mean_fitness)\r\n highest_so_far = max(genetic_algorithm.stats.max_fitness, highest_so_far)\r\n print(\"Highest so far\", highest_so_far)\r\n\r\n\r\n # Starting next generation\r\n if genetic_algorithm.stats.max_fitness < 24.9 and genetic_algorithm.current_generation < 1000:\r\n genetic_algorithm.next_generation()\r\n else:\r\n break\r\n\r\n\r\n quit()\r\n\r\n\r\n for net in genetic_algorithm.sorted_population:\r\n print(net.fitness)\r\n best_neural_net = genetic_algorithm.sorted_population[0]\r\n print(\"Weights:\")\r\n print(best_neural_net.layers[0].weights[0])\r\n while True:\r\n print()\r\n in_a = input(\"Give net first number: \")\r\n in_b = input(\"Give net second number: \")\r\n answer = best_neural_net.calculate([np.log(float(in_a)), np.log(float(in_b))])[0]\r\n print(\"Net's answer:\", np.exp(answer))", "def mutate(weights,gen):\n mutated_weights = []\n for weight in weights:\n new_weight = np.random.normal(loc=weight, scale=0.5/(gen+1))\n if new_weight >= -1 and new_weight <= 1:\n mutated_weights.append(new_weight)\n elif new_weight < -1:\n mutated_weights.append(-1)\n else:\n mutated_weights.append(1)\n return np.array(mutated_weights)", "def mutate(self):\n #inlined 'flip_coin' for speed\n if prng.random() < self.mutation_rate:\n self._value = self.mutator.evaluate(self)\n return 1\n return 0", "def _mutate(self, noise_generator, sigma):\n\n mutation_indexes = torch.distributions.categorical.Categorical(\n torch.tensor([self.mutation_prob, 1 - self.mutation_prob])).sample([self.population_size]) > 0.5\n\n noise = noise_generator.sample([self.population_size, len(self.population[0])]).squeeze(-1)\n self.population[mutation_indexes] += noise[mutation_indexes] * sigma", "def make_neural_net_challenging():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n seed_random()\n wt1 = random_weight()\n wt2 = random_weight()\n wt3 = random_weight()\n wt4 = random_weight()\n wt5 = random_weight()\n wt6 = random_weight()\n wt7 = random_weight()\n wt8 = random_weight()\n wt9 = random_weight()\n wt10 = random_weight()\n\t\n w1A = Weight('w1A', wt1)\n w2A = Weight('w2A', wt2)\n w1B = Weight('w1B', wt3)\n w2B = Weight('w2B', wt4)\n wA = Weight('wA', -1)\n wB = Weight('wB', -1)\n wAC = Weight('wAC', wt5)\n wBC = Weight('wBC', wt6)\n wC = Weight('wC', -1)\n wAD = Weight('wAD', wt7)\n wBD = Weight('wBD', wt8)\n wD = Weight('wD', -1)\n wCE = Weight('wCE', wt9)\n wDE = Weight('wDE', wt10)\n wE = Weight('wE', -1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n B = Neuron('B', [i1,i2,i0], [w1B,w2B,wB])\n C = Neuron('C', [A,B,i0], [wAC,wBC,wC])\n D = Neuron('D', [A,B,i0], [wAD,wBD,wD])\n E = Neuron('D', [C,D,i0], [wCE,wDE,wE])\n P = PerformanceElem(E, 0.0)\n\n net = Network(P,[A, B, C, D, E])\n return net", "def anneal():\n best_sol = list(range(SIZE))\n best_sum = get_sum(best_sol)\n shuffle(best_sol)\n\n temp = 10000000\n cool_rate = 0.0003\n\n counter = 0\n while temp > 1:\n new_sol = best_sol.copy()\n i, j = randint(0, SIZE - 1), randint(0, SIZE - 1)\n new_sol[i], new_sol[j] = new_sol[j], new_sol[i]\n new_energy = get_sum(new_sol)\n cur_energy = best_sum\n if calculate_probability(cur_energy, new_energy, temp) > random():\n best_sol = new_sol.copy()\n best_sum = new_energy\n temp *= 1 - cool_rate\n counter += 1\n\n print(counter)\n\n print(best_sol)\n print(best_sum)\n return best_sol, best_sum", "def update(self):\n\n self._eps_count += 1\n if self._replay.size >= self._min_replay_size:\n for _ in range(self._learning_updates):\n samples_indices, minibatch = self._replay.sample(self._batch_size)\n tf_minibatch = [tf.constant(mat, dtype=tf_type) for mat, tf_type in zip(minibatch, [tf.float32, tf.int32, tf.float32, tf.float32, tf.float32])]\n self._learn(*tf_minibatch)\n\n self._learn_iter_counter += 1\n if (self._target_update_period > 1) and (self._learn_iter_counter % self._target_update_period == 0):\n self._update_target_nets()", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def mutate(net, seed, noise_std, copy_net=True):\r\n # copy current net\r\n mutated_net = copy.deepcopy(net) if copy_net else net\r\n # set seed for mutation\r\n np.random.seed(seed)\r\n for param in mutated_net.parameters():\r\n noise = torch.tensor(np.random.normal(size=param.shape).astype(np.float32))\r\n param.data += noise * noise_std\r\n \r\n return mutated_net", "def evolve(self, elitism='on', save='off', probability=0.05, rate=0.05):\n if self.state == 'dead':\n\n self.member_fitness = [self.members[i].fitness for i in range(self.size)]\n\n self.fittest_brain = self.members[self.member_fitness.index(max(self.member_fitness))]\n\n if save == 'on':\n self.fittest_brain.save_as('fittest_brain')\n\n self.total_population_fitness = sum(self.member_fitness)\n\n print('Total population fitness is %s' % (self.total_population_fitness))\n\n self.mating_pool = [[self.members[i]] * round(self.member_fitness[i] * 1000 / self.total_population_fitness) for i in range(self.size)]\n\n self.mating_pool = [brain for sublist in self.mating_pool for brain in sublist]\n\n self.children = []\n\n if elitism == 'on':\n\n self.children.append(self.fittest_brain)\n\n for i in range(self.size - 1):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n else:\n for i in range(self.size):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n\n self.members = self.children\n\n self.members[0].state = 'alive'\n\n self.state = 'alive'\n self.generation += 1\n\n else:\n print('Cannot evolve: some members are still alive')", "def mutate(lr_schedule):\n for i in range(2): # mutate two to increase variance\n # Choose a random key.\n idx = randint(0, len(lr_schedule)-1)\n # Mutate one of the params. Will be within (1/10, 10)*Old param 68% of the time\n lr_schedule[idx] = lr_schedule[idx]*math.pow(10, random.normalvariate(0, 1.5)) \n return lr_schedule", "def mut_individual(individual, pexist):\n \n network = individual.network\n for i in network.index.values:\n age = network.loc[i,'age']\n if random.random() < AGEDEP(age, pexist):\n if network.loc[i,'in'] == 1:\n network.loc[i, :] = 0\n network.loc[:, i] = 0\n \n if network.loc[i,'in'] == 0:\n network.loc[i,'in'] = 1\n network.loc[i,'age'] = 1\n for j in network.columns.values[2:]:\n if random.random() < 0.1 and i != j:\n network.loc[i,j] = 1\n network.loc[j,i] = network.at[i,j]\n \n relevant = network.loc[network['in']==1]\n for _ in range(10):\n i = random.choice(relevant.index.values)\n j = random.choice(relevant.columns.values[2:])\n network.loc[i,j] = abs(network.at[i,j]-1)\n network.loc[j,i] = network.at[i,j]\n \n if network.loc[i][1:].sum() == 0:\n network.loc[i,'in'] = 0 \n network.loc[i,'age'] = 0\n \n individual.network = network\n individual.age = 1\n return individual,", "def increasing_mutations(nets, probs, desc):\n inverse = [] # After the \"while\", this variable will contain the list of networks in reverse order of \"candidateness\" to be mutated.\n\n # Decide the place in which the mutation is performed\n\n while len(nets) != 0: # While there are networks which have not been added to the inverse list\n net = nets[np.argmax(probs)] # Determine the \"most useless\" network still available\n cands = [net] + desc.comp_by_input(net) + desc.comp_by_output(net) # Candidates to be added to the inverse list (the most useless one and the ones around it, because they dont need more modeling poer, as the \"most useless network appears to be not needed)\n inverse += [x for x in cands if (x not in inverse) and (\"i\" not in x) and (\"o\" not in x)] # Add to inverse list if they are networks and are not wet in\n\n # Update original lists.\n probs = [probs[i] for i in range(len(nets)) if nets[i] not in inverse]\n nets = [nets[i] for i in range(len(nets)) if nets[i] not in inverse]\n\n for comp in reversed(inverse): # Try mutation near the networks according to the previous arrangement (could happen that some places cannot fit mutations).\n\n reaching_outs = list(set([x for x in desc.reachable[comp] if \"o\" in x])) # Outputs affected by the mutation\n _, in_conns, out_conns, _ = desc.get_net_context(comp) # Connections near the selected network (which could be affected by the mutation)\n conns = in_conns + out_conns # Checka si esto da error\n for mutation in np.random.permutation([\"add_con\", \"divide_con\"]): # Try both mutations in case the first one does not work\n res, trainables = mutate(mutation, desc, comp, conns)\n if res != -1:\n return trainables, res, mutation, comp, reaching_outs # If the mutation is successful, return, else try the second mutation or the next network.", "def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)", "def run(self):\n count = self.neuron_count\n for i in range(0, count):\n self.run(i)", "def mutate(self):\n mutate_forward = self.forward.copy()\n mutate_reverse = self.reverse.copy()\n\n # Between 3% and 6% of the C's of the forward half strand mutates into T's\n number_of_mutations = round(random.randint(3, 6) / 100 * mutate_forward['C'])\n mutate_forward['C'] -= number_of_mutations\n mutate_forward['T'] += number_of_mutations\n\n return Gen(\n self.epoch,\n mutate_forward,\n mutate_reverse\n )", "def mc_micro_sweep(self): \n for i in range(self.N):\n if random.random()>0.3:\n self.mc_update_micro_fixed(i,xy = True)", "def runconnectome(self, ):\n for ps in self.postSynaptic:\n if ps[:3] not in self.muscles and abs(self.postSynaptic[ps][self.thisState]) > self.threshold:\n self.fireNeuron(ps)\n self.motorcontrol()\n for ps in self.postSynaptic:\n # if self.postSynaptic[ps][thisState] != 0:\n # print ps\n # print \"Before Clone: \", self.postSynaptic[ps][thisState]\n\n # fired neurons keep getting reset to previous weight\n # wtf deepcopy -- So, the concern is that the deepcopy doesnt\n # scale up to larger neural networks?? \n self.postSynaptic[ps][self.thisState] = copy.deepcopy(self.postSynaptic[ps][self.nextState]) \n\n # this deep copy is not in the functioning version currently.\n # print \"After Clone: \", self.postSynaptic[ps][thisState]\n\n self.thisState, self.nextState = self.nextState, self.thisState", "def mutate(offspring, individuals, params, *args):\n\n prob_mut = params.get(\"prob_mutation\", 0.3)\n prob_stand = 1 / 3 * prob_mut\n prob_point = 1 / 3 * prob_mut\n prob_mono = prob_mut - prob_stand - prob_point\n prob_replace = prob_mut\n r = np.random.rand()\n\n for ind in offspring:\n if r <= prob_stand:\n # Standard mutation\n #\n # This picks a random subtree anywhere within the tree\n rand_node = choice(ind.nodes[1:])\n tree = ind.grow_tree(method=\"grow\", depth=rand_node.depth, ind=rand_node)\n rand_node.value = tree.value\n rand_node.roots = tree.roots\n\n # This picks a whole subtree at depth=1 under the linear node\n # rand_subtree = np.random.randint(len(ind.roots))\n # del ind.roots[rand_subtree]\n # ind.grow_tree(method=\"grow\", ind=ind)\n\n ind.nodes = ind.get_sub_nodes()\n\n elif r <= prob_point + prob_stand:\n # Small mutation\n for node in ind.nodes[1:]:\n if np.random.rand() < prob_replace and callable(node.value):\n value = choice(node.function_set)\n while node.value.__code__.co_argcount != value.__code__.co_argcount:\n value = choice(node.function_set)\n node.value = value\n elif np.random.rand() < prob_replace:\n node.value = choice(node.terminal_set)\n ind.nodes = ind.get_sub_nodes()\n\n elif r <= prob_mono + prob_point + prob_stand:\n # Mono parental\n swap_nodes = sample(ind.nodes[1:], 2)\n tmp_value = swap_nodes[0].value\n tmp_roots = swap_nodes[0].roots\n swap_nodes[0].value = swap_nodes[1].value\n swap_nodes[0].roots = swap_nodes[1].roots\n swap_nodes[1].value = tmp_value\n swap_nodes[1].roots = tmp_roots\n ind.nodes = ind.get_sub_nodes()\n\n else:\n pass", "def dwindle(self, rate):\n\n if self.generation % rate == 0:\n self.mutation_prob /= 2", "def __update(self, learning_rate):\n for layer in self.layers:\n layer.weights.set_value((layer.weights - learning_rate * layer.dW).eval())\n layer.biases.set_value((layer.biases - learning_rate * layer.db).eval())", "def testRandomForwards(self):\n with higher.innerloop_ctx(self.target_net, self.opt) as (fnet, _):\n for i in range(10):\n fast_named_weights = OrderedDict(\n (name, torch.rand(p.shape, requires_grad=True))\n for name, p in self.reference_net.named_parameters()\n )\n fast_weights = [p for _, p in fast_named_weights.items()]\n inputs = torch.rand(\n self.batch_size, self.num_in_channels, self.in_h, self.in_w\n )\n self.assertTrue(\n torch.equal(\n self.reference_net(inputs, params=fast_named_weights),\n fnet(inputs, params=fast_weights)\n )\n )", "def test_change(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents and deepcopy everything (just to be sure)\n cfg = Config().genome\n gene1, gene2 = get_gru_node_gene(0, cfg)\n gene1_act = deepcopy(gene1.activation)\n gene1_bias = deepcopy(gene1.bias)\n gene1_bias_hh = deepcopy(gene1.bias_hh)\n gene1_bias_ih = deepcopy(gene1.bias_ih)\n gene1_weight_hh = deepcopy(gene1.weight_hh)\n gene1_weight_ih = deepcopy(gene1.weight_ih)\n gene1_weight_ih_full = deepcopy(gene1.weight_ih_full)\n gene2_act = deepcopy(gene2.activation)\n gene2_bias = deepcopy(gene2.bias)\n gene2_bias_hh = deepcopy(gene2.bias_hh)\n gene2_bias_ih = deepcopy(gene2.bias_ih)\n gene2_weight_hh = deepcopy(gene2.weight_hh)\n gene2_weight_ih = deepcopy(gene2.weight_ih)\n gene2_weight_ih_full = deepcopy(gene2.weight_ih_full)\n \n # Perform crossover and mutations\n gene3 = gene1.crossover(other=gene2, cfg=cfg, ratio=0.5)\n gene3.add_input_key(cfg=cfg, k=-1)\n gene3.update_weight_ih()\n gene3.activation = 'c'\n gene3.bias = -10\n gene3.bias_hh[0] = -10 # Make modifications directly on the vector\n gene3.bias_ih[0] = -10 # Make modifications directly on the vector\n gene3.weight_hh[0, 0] = -10 # Make modifications directly on the vector\n gene3.weight_ih[0, 0] = -10 # Make modifications directly on the vector\n gene3.weight_ih_full[0, 0] = -10 # Make modifications directly on the vector\n \n # Check for unchanged parents\n self.assertEqual(gene1.activation, gene1_act)\n self.assertEqual(gene1.bias, gene1_bias)\n self.assertEqual(np.linalg.norm(gene1.bias_hh - gene1_bias_hh), 0)\n self.assertEqual(np.linalg.norm(gene1.bias_ih - gene1_bias_ih), 0)\n self.assertEqual(np.linalg.norm(gene1.weight_hh - gene1_weight_hh), 0)\n self.assertEqual(np.linalg.norm(gene1.weight_ih - gene1_weight_ih), 0)\n self.assertEqual(np.linalg.norm(gene1.weight_ih_full - gene1_weight_ih_full), 0)\n self.assertEqual(gene2.activation, gene2_act)\n self.assertEqual(gene2.bias, gene2_bias)\n self.assertEqual(np.linalg.norm(gene2.bias_hh - gene2_bias_hh), 0)\n self.assertEqual(np.linalg.norm(gene2.bias_ih - gene2_bias_ih), 0)\n self.assertEqual(np.linalg.norm(gene2.weight_hh - gene2_weight_hh), 0)\n self.assertEqual(np.linalg.norm(gene2.weight_ih - gene2_weight_ih), 0)\n self.assertEqual(np.linalg.norm(gene2.weight_ih_full - gene2_weight_ih_full), 0)", "def update(self, samples, agent_number):\n\n # need to transpose each element of the samples\n # to flip obs[parallel_agent][agent_number] to\n # obs[agent_number][parallel_agent]\n obs, obs_full, action, reward, next_obs, next_obs_full, done = samples\n \n agent = self.maddpg_agent[agent_number]\n agent.critic_optimizer.zero_grad()\n\n #critic loss = batch mean of (y- Q(s,a) from target network)^2\n #y = reward of this timestep + discount * Q(st+1,at+1) from target network\n target_actions = self.target_act(next_obs)\n \n with torch.no_grad():\n q_next = agent.target_critic(next_obs_full, target_actions.view(-1, 4))\n\n y = reward[:,agent_number].view(-1, 1) + self.discount_factor * q_next * (1 - done[:, agent_number].view(-1, 1))\n q = agent.critic(obs_full, action.view(-1, 4))\n\n huber_loss = torch.nn.SmoothL1Loss()\n critic_loss = huber_loss(q, y.detach())\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 0.5)\n agent.critic_optimizer.step()\n\n #update actor network using policy gradient\n agent.actor_optimizer.zero_grad()\n # make input to agent\n # detach the other agents to save computation\n # saves some time for computing derivative\n \n agent_obs = obs[:, agent_number]\n agent_actions = agent.actor(agent_obs)\n q_input = action.clone()\n q_input[:, agent_number] = agent_actions\n\n # get the policy gradient\n actor_loss = -agent.critic(obs_full, q_input.view(-1, 4)).mean()\n actor_loss.backward()\n torch.nn.utils.clip_grad_norm_(agent.actor.parameters(),0.5)\n agent.actor_optimizer.step()\n\n al = actor_loss.cpu().detach().item()\n cl = critic_loss.cpu().detach().item()\n \n return al, cl", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def make_simulations(self):\n pass", "def update(self, samples, agent_number, logger):\n\n # need to transpose each element of the samples\n # to flip obs[parallel_agent][agent_number] to\n # obs[agent_number][parallel_agent]\n obs, obs_full, action, reward, next_obs, next_obs_full, done = map(transpose_to_tensor, samples)\n\n obs_full = torch.stack(obs_full)\n next_obs_full = torch.stack(next_obs_full)\n \n agent = self.maddpg_agent[agent_number]\n agent.critic_optimizer.zero_grad()\n\n #critic loss = batch mean of (y- Q(s,a) from target network)^2\n #y = reward of this timestep + discount * Q(st+1,at+1) from target network\n target_actions = self.target_act(next_obs)\n target_actions = torch.cat(target_actions, dim=1)\n \n target_critic_input = torch.cat((next_obs_full.t(),target_actions), dim=1).to(device)\n \n with torch.no_grad():\n q_next = agent.target_critic(target_critic_input)\n \n y = reward[agent_number].view(-1, 1) + self.discount_factor * q_next * (1 - done[agent_number].view(-1, 1))\n action = torch.cat(action, dim=1)\n critic_input = torch.cat((obs_full.t(), action), dim=1).to(device)\n q = agent.critic(critic_input)\n\n huber_loss = torch.nn.SmoothL1Loss()\n critic_loss = huber_loss(q, y.detach())\n critic_loss.backward()\n #torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 0.5)\n agent.critic_optimizer.step()\n\n #update actor network using policy gradient\n agent.actor_optimizer.zero_grad()\n # make input to agent\n # detach the other agents to save computation\n # saves some time for computing derivative\n q_input = [ self.maddpg_agent[i].actor(ob) if i == agent_number \\\n else self.maddpg_agent[i].actor(ob).detach()\n for i, ob in enumerate(obs) ]\n \n q_input = torch.cat(q_input, dim=1)\n # combine all the actions and observations for input to critic\n # many of the obs are redundant, and obs[1] contains all useful information already\n q_input2 = torch.cat((obs_full.t(), q_input), dim=1)\n \n # get the policy gradient\n actor_loss = -agent.critic(q_input2).mean()\n actor_loss.backward()\n #torch.nn.utils.clip_grad_norm_(agent.actor.parameters(),0.5)\n agent.actor_optimizer.step()\n\n al = actor_loss.cpu().detach().item()\n cl = critic_loss.cpu().detach().item()\n logger.add_scalars('agent%i/losses' % agent_number,\n {'critic loss': cl,\n 'actor_loss': al},\n self.iter)", "def eval_randoms(count):\n\t\tfor person in Simulation.community:\n\t\t\tSimulation.community[person].eval_random_strategy(count)", "def test_net_weight_update(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 1, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n for node in nn.layers[3].nodes:\n node.weights = [1.0, 1.0]\n\n nn.update_weights([2, 3], [0], test=True)\n\n test_weight = nn.layers[-1].nodes[0].weights[0]\n self.assertEqual(round(test_weight, 4), 0.9901)", "def run(self, iterations):\n # print(f'Before:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome before: {self.best_genome.array}, fitness={self.best_genome.fitness} ')\n\n mutator = Rand1MutationOperator(self.population, self.bounds, 0.2)\n mixer = ExponentialCrossoverOperator(self.minfun)\n replacer = ElitistReplacementOperator()\n\n for _ in range(iterations):\n candidate_population = Population(None, None, 0)\n for target in self.population.collection:\n # List with genomes who will be the donors\n mutant = mutator.apply(target)\n # Genome modified by replacing a few random positions\n candidate_genome = mixer.apply(target, mutant)\n\n candidate_population.add(candidate_genome)\n\n # Targets are replaced by candidates from the population if candidate has less fitness than target\n self.population = replacer.apply(self.population, candidate_population)\n\n # print(f'After:\\n {self.population}\\n')\n # self.best()\n # print(f'Best Genome after: {self.best_genome.array}, fitness={self.best_genome.fitness} ')", "def mutate(self, number_of_mutations):\n self.mutated.clear()\n mutations = []\n for i in range(number_of_mutations+1):\n old_gene = random.choice(self.genes)\n while old_gene in mutations:\n old_gene = random.choice(self.genes)\n # print(self.max_time)\n old_gene.start_time = random.choice(range(self.max_time - old_gene.finish))\n self.mutated.append(self.genes.index(old_gene))", "def simulate(self):\r\n while self.t < self.T:\r\n plays = [(int)(player.play()) for player in self.players] # plays of all players\r\n obs, rews = self.simulate_single_step(plays) # observations of all players\r\n for i in range(self.M):\r\n self.players[i].update(plays[i], obs[i]) # update strategies of all player\r\n if self.players[i].phase == self.players[i].COMMUNICATION: # If communication starts\r\n self.communication_flag = True\r\n reward_one_round = self.reward_function(rews)\r\n self.rewards_record.append(reward_one_round) # list of rewards\r\n self.t += 1\r\n if self.communication_flag:\r\n self.communication()\r\n self.communication_flag = False", "def __add_multipliers(self) -> None:\n num_players = len(self.players)\n num_multipliers = random.randint(num_players - 1, num_players)\n for i in range(num_multipliers):\n self.__add_random_multiplier()", "def calc(self):\n\t\tfor neuron in self.neurons.items():\n\t\t\tneuron.calculate()", "def _reweight(self):\n self._seed_weights = [self._graph.degree(seed) for seed in self._seeds]\n weight_sum = np.sum(self._seed_weights)\n self._seed_weights = [float(weight)/weight_sum for weight in self._seed_weights]", "def simulated_annealing(x0, energy, label_set, label_weights=None, epochs=1, T=20.0, eta=0.99995, max_change=3,\n subset=1, verbose=False, sample=None):\n np.random.seed(7)\n\n N = x0.shape[0]\n\n energies = []\n state_perm = x0\n perm = state_perm.copy()\n\n label_dict = {l: i for i, l in enumerate(label_set)}\n label_mapper = np.vectorize(lambda ind: label_dict.get(ind))\n\n energ = np.ones(len(label_set))\n\n if label_weights is None:\n label_weights = np.ones(len(label_set)) / len(label_set)\n\n labels, new_energ = energy(perm)\n label_inds = label_mapper(labels)\n\n energ[label_mapper(labels)] = new_energ\n energies.append((perm.copy(), energ.copy(), T))\n\n samples = []\n\n T_n = T\n for e in range(epochs):\n if verbose:\n print('\\r\\r Epoch %d, T=%.9f: \\n perm %s, \\n energy %s' % (e, T_n, str(state_perm), str(energies[-1][1].mean())))\n\n print({l: energ[i] for l, i in label_dict.items()})\n\n for idx in np.random.permutation(N):\n\n if max_change > 1:\n n_inds = np.random.choice(range(1, max_change + 1), 1)\n inds = np.r_[np.random.choice(np.setdiff1d(np.arange(N), np.array([idx])), n_inds - 1), idx]\n\n else:\n inds = [idx]\n\n if len(inds) == 1:\n new_label = np.random.choice(np.setxor1d(label_set, perm[idx]), 1)[0]\n else:\n new_label = np.random.choice(label_set, 1)[0]\n\n labels, new_energ = energy(perm, inds=inds, new_label=new_label, subset=subset)\n label_inds = label_mapper(labels)\n\n diff = np.sum((new_energ - energ[label_inds]) * label_weights[label_inds]) / np.sum(label_weights[label_inds])\n p = min(1, np.exp(- (diff.max() / T_n)))\n b = np.random.binomial(1, p, 1)\n\n if b == 1:\n state_perm[inds] = new_label\n energ[label_inds] = new_energ\n else:\n pass\n\n if sample is not None and e > epochs - sample:\n samples.append(state_perm.copy())\n\n perm = state_perm.copy()\n energ = energ.copy()\n\n T_n *= eta\n\n energies.append((state_perm, energ, T_n))\n\n return np.asarray(state_perm), energies, T_n, samples", "def test_mutate(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n\n self.assertFalse(ga.generations[-1].new)\n\n for i in range(10):\n ga.mutate()\n\n self.assertTrue(ga.generations[-1].new)", "def update_weights(self, BMU, currentIteration, input_data, lambda1):\n # Learning rate selection for each epoch\n lr = self.currentLearningRate(currentIteration, lambda1)\n \n # Neighborhood radius selection for each epoch\n radius = self.currentNeighbourhoodRadius(currentIteration, lambda1)\n \n # Iterating through randomly initialized weights and update weights\n for i in range(len(self.weights[0])):\n for j in range(len(self.weights)):\n tmpDist = np.power(BMU[0] - i, 2) + np.power(BMU[1] - j, 2)\n theta = np.exp(-tmpDist / (2*np.power(radius, 2)))\n for k in range(self.input_dimension):\n self.weights[i][j][k] = self.weights[i][j][k] + lr * theta * (input_data[k] - self.weights[i][j][k])", "def mutation(population, m_rate=.5, kw=None, **kwargs):\n _population = []\n for individual in population:\n rnd_rate = random.uniform(0, 1)\n if rnd_rate > m_rate:\n logger.debug(\"Mutating Individual!\\nRandom rate is: %s\\nM Rate is: %s\", rnd_rate, m_rate)\n split_point = random.randint(1, len(individual['notes']) - 1)\n start, stop = random_sampling(0, len(individual['notes']), split_point)\n\n # generate a new corpus via cached markov chain\n settings = cache_get('settings')\n key = \"{}:{}\".format(settings['artist'], settings['song'])\n new_corpus = cache_get(key)['markov'][start:stop]\n logger.debug(\"New corpus is %s\", new_corpus)\n\n # tuples are immuteable so we need to remake the tuple of notes/chords\n individual['notes'] = individual['notes'][:start] + tuple(map(tuple, new_corpus)) + individual['notes'][stop:]\n _population.append(individual['notes'])\n else:\n logger.debug(\"Not mutating\\nRandom rate is: %s\\nM Rate is: %s\", rnd_rate, m_rate)\n _population.append(individual['notes'])\n return _population", "def simulate(self, hot, traversal):\n hot = self.eval(hot, traversal)\n traversal[self] = traversal.get(self, self.State.NEUTRAL) or hot\n for neuron, polarity in self.outputs.iteritems():\n neuron.simulate(polarity.resolve(hot), traversal)", "def test_prop_learning_rate(self):\n tmax = 10.0\n dt = 1.0\n\n learning_rate1 = 0.1\n learning_rate2 = 0.5\n\n ini_rate = 80.0\n\n tutor = SimpleNeurons(1, out_fct=lambda _: ini_rate+20.0)\n reward = MockReward(lambda t: 1.0 if t < tmax/2 else -1)\n tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,\n constrain_rates=False, ini_rate=ini_rate, learning_rate=learning_rate1,\n use_tutor_baseline=False)\n\n sim1 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim1.run(tmax)\n\n drates1 = tutor_rule.rates - ini_rate\n\n tutor_rule.reset_rates()\n tutor_rule.learning_rate = learning_rate2\n\n sim2 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim2.run(tmax)\n\n drates2 = tutor_rule.rates - ini_rate\n\n self.assertLess(np.max(np.abs(learning_rate2*drates1 -\n learning_rate1*drates2)), 1e-6)", "def next_generation(self):\r\n self.calculate_stats()\r\n\r\n self.population = []\r\n\r\n # Getting amounts for different types of neural net replacements\r\n random_size = self.random_round(self.population_size * self.settings[\"random_offspring\"])\r\n elitism_size = self.random_round(self.population_size * self.settings[\"elitism_offspring\"])\r\n crossover_size = self.population_size - random_size - elitism_size\r\n\r\n # Keeping best neural nets (elitism)\r\n self.population.extend(self.sorted_population[i].copy() for i in range(elitism_size))\r\n\r\n # Adding neural nets with crossover\r\n\r\n probs = self._get_selection_probabilities()\r\n crossovers = (self._uniform_crossover(*np.random.choice(self.sorted_population, 2, replace=False, p=probs)) for _ in range(crossover_size))\r\n self.population.extend(crossovers)\r\n\r\n # Mutating neural nets\r\n for neural_net in self.population:\r\n if np.random.rand() < self.settings[\"mutation_rate\"]:\r\n neural_net.mutate(self.settings[\"mutation_chance\"], self.settings[\"mutation_amount\"])\r\n\r\n # Adding random nets\r\n self.population.extend(self._random_child() for _ in range(random_size))\r\n\r\n # Shuffling new population\r\n np.random.shuffle(self.population)\r\n\r\n # Increment current generation\r\n self.current_generation += 1", "def batch_anneal(self, times=10):\n for i in range(1, times + 1):\n print(f\"Iteration {i}/{times} -------------------------------\")\n self.T = self.T_save\n self.iteration = 1\n self.cur_solution, self.cur_fitness = self.initial_solution()\n self.anneal()", "def mutate_link_weights(self, perturb_prob=.9, cold_prob=.1):\n # genetics.cpp:737 - Looks like they either just add a random value\n # in (-1,1) or they make the weight a value (-1,1). This seems a bit\n # odd. Also, not sure why they say \"GAUSSIAN\" since I think they are\n # using a uniform value. This is complicated somewhat by the power and\n # powermod, but randposneg()*randfloat() just yields a random number in\n # (-1,1). These functions are defined in networks.h\n\n # Their code for this section contains much more than was described in\n # the paper. For now, I'm implementing it as it sounds from the paper\n # \"There was an 80% chance of a genome having its connection weights\n # mutated, in which case each weight had a 90% chance of being\n # uniformly perturbed and a 10% chance of being assigned a new random\n # value.\n\n if perturb_prob + cold_prob > 1:\n raise ValueError('perturb_prob + cold_prob cannot be greater than 1')\n for g in self.link_genes:\n r = random.random()\n weight_change = random.uniform(-1,1)\n if r < perturb_prob:\n g.weight += weight_change\n elif r < perturb_prob+cold_prob:\n g.weight = weight_change\n # Else do nothing to that weight", "def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)", "def mutate(ways, multiply):\n mutated=ways[:]\n for way in ways:\n for i in range(multiply):\n shuffle = [random.randrange(len(way)),random.randrange(len(way)-1)]\n shuffle[1] = shuffle[1] if shuffle[1]<shuffle[0] else shuffle[1]+1\n shuffle.sort()\n new_way = way[:shuffle[0]]+[way[shuffle[1]]]+way[shuffle[0]+1:shuffle[1]]+[way[shuffle[0]]]+way[shuffle[1]+1:]\n if new_way not in mutated:\n mutated.append(new_way)\n return mutated", "def apply_neurons(self):\n for neuron in range(self.n_outputs):\n self.uf_activate(neuron)", "def process_simulation(self):\n for i in range(self._n):\n probability = self._alpha / float(self._alpha + i - 1)\n tmp = np.random.uniform(size=(1,))\n if tmp < probability:\n self._results.append(np.random.normal(1))\n else:\n self._results.append(np.random.choice(self._results[:i-1], 1)[0])", "def update_probs(self, measure, p, enemy_net = False):\n tmp_net = []\n net_size = len(self.net) \n if not enemy_net:\n net = self.net\n else:\n net = self.enemy_net\n #Maps a given color to its corresponding column in the color's \n #probability table.\n if measure == GREEN:\n color = 0\n elif measure == YELLOW:\n color = 1\n elif measure == ORANGE:\n color = 2\n elif measure == RED:\n color = 3\n #Obtains new probabilities by using the distance between the\n #observed position (the one measured) and any other position.\n for j in range(0, net_size):\n distance = self.__get_distance(p, j)\n if distance == 0: #When updating the measured position's probability.\n tmp_net.append(net[j].value * self.ct[0][color])\n elif distance == 1: #When updating an adjacent position to the one measured.\n tmp_net.append(net[j].value * self.ct[1][color])\n elif distance == 2: #When updating a position at two cells from the one measured.\n tmp_net.append(net[j].value * self.ct[2][color])\n elif distance == 3: #When updating a position at three cells from the one measured.\n tmp_net.append(net[j].value * self.ct[3][color])\n else: #When updating a position at four or more cells from the one measured.\n tmp_net.append(net[j].value * self.ct[4][color])\n #Obtains summation of new probabilities in order to execute \n #a posterior normalization.\n total = sum(tmp_net)\n #Normalizes new probabilities and assigns them to its \n #corresponding position.\n for i in range(0, net_size):\n net[i].value = tmp_net[i]/total", "def mutate(self):\n #mutation_size = max(1,int(round(random.gauss(15,4))))/100\n\n\n\n mutation_size = max(1,int(round(random.gauss(15,4))))/100\n \"\"\"\n Changed the mutation by using random.randint rather than the gaussian one \n after observing that the gaussian random never really gave an output of more than 0.25\n \"\"\"\n\n #Decide what will be mutated, just randomly picking onr of the three params\n mutation_type = random.choice(self.params)\n\n #Mutate the thing\n if mutation_type == \"diameter\":\n \"\"\"\n Over here, what we are providing a range between self.diameter*x where x=1-mutation size and self.diameter*y where =1+mutation size\n Basically we add or subtract from 1 because the mutation has to be small\n \"\"\"\n self.diameter = max(1,random.randint(int(self.diameter*(1-mutation_size)),int(self.diameter*(1+mutation_size))))\n return self.diameter\n #same thing here\n elif mutation_type == \"pos\":\n x = max(0,random.randint(int(self.pos.x*(1-mutation_size)),int(self.pos.x*(1+mutation_size))))\n y = max(0,random.randint(int(self.pos.y*(1-mutation_size)),int(self.pos.y*(1+mutation_size))))\n self.pos = Point(min(x,self.size[0]),min(y,self.size[1]))\n return self.pos\n elif mutation_type == \"color\":\n r = min(max(0,random.randint(int(self.color.r*(1-mutation_size)),int(self.color.r*(1+mutation_size)))),255)\n g = min(max(0,random.randint(int(self.color.g*(1-mutation_size)),int(self.color.g*(1+mutation_size)))),255)\n b = min(max(0,random.randint(int(self.color.b*(1-mutation_size)),int(self.color.b*(1+mutation_size)))),255)\n self.color = Color(r,g,b)\n return self.color", "def test_update():\n learner = optlearner.VolatilityLearner()\n\n for reward in [0, 1]:\n slow_pIk = slow_update(learner, reward)\n learner._update(reward)\n yield npt.assert_array_equal, slow_pIk, learner.pIk\n learner.reset()", "def run(self):\n\n # initializing random network activity\n s_rand_T = np.zeros((self.T, self.N_rand))\n p_rand_T = np.zeros((self.T, self.N_rand))\n r_rand_T = np.zeros((self.T, self.N_rand))\n\n s_rand_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_rand))\n\n # initializing sensory networks\n s_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n p_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n r_sens_T = np.zeros((self.T, self.N_sensory_nets * self.N_sensory))\n s_sens_T[0, :] = np.random.uniform(low=0, high=0.01, size=(self.N_sensory_nets * self.N_sensory))\n\n # extend input to be T timesteps and only nonzero for 100 ts\n s_ext_T = np.broadcast_to(self.s_ext, (self.T, self.N_sensory * self.N_sensory_nets)).copy()\n # stimulus is presented for 100 ms\n stim_T = int(200/self.rand_net.dt)\n s_ext_T[:100] = 0\n s_ext_T[100+stim_T:] = 0\n # s_ext_T *= 0\n\n for t in range(1, self.T):\n if (t + 1) % 100 == 0:\n print(f'step {t} of {self.T}')\n s_sens_prev = s_sens_T[t - 1]\n s_rand_prev = s_rand_T[t - 1]\n p_rand_prev = p_rand_T[t - 1]\n s_ext = s_ext_T[t - 1]\n step = self.forward(s_ext=s_ext, s_rand_prev=s_rand_prev, s_sens_prev=s_sens_prev, p_rand_prev=p_rand_prev)\n s_sens_T[t] = step['s_sens']\n p_sens_T[t] = step['p_sens']\n r_sens_T[t] = step['r_sens']\n s_rand_T[t] = step['s_rand']\n r_rand_T[t] = step['r_rand']\n p_rand_T[t] = step['p_rand']\n\n p_sens_T = p_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_ext_T = s_ext_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n r_sens_T = r_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n s_sens_T = s_sens_T.reshape(self.T, self.N_sensory_nets, self.N_sensory)\n\n return dict(\n n_sensory=self.N_sensory,\n n_rand=self.N_rand,\n mus=self.mus,\n sigma=self.sigma,\n s_ext=s_ext_T,\n s_sens=s_sens_T,\n r_sens=r_sens_T,\n p_sens=p_sens_T,\n s_rand=s_rand_T,\n r_rand=r_rand_T,\n p_rand=p_rand_T\n )", "def fitness_function(neural_net):\r\n fitness = 25\r\n for i in range(1, 6):\r\n for j in range(1, 6):\r\n answer = np.exp(neural_net.calculate([np.log(i), np.log(j)])[0])\r\n result = i*j\r\n fitness -= abs(answer - result)\r\n\r\n return fitness", "def mute(individual):\n mutatePt=random.randint(0,len(individual)-1)\n if mutatePt==0:\n individual[mutatePt]=random.uniform(kNN.features_min[0], kNN.features_max[0])\n elif mutatePt==2:\n individual[mutatePt]=random.uniform(kNN.features_min[1], kNN.features_max[1])\n elif mutatePt==3:\n individual[mutatePt]=random.uniform(kNN.features_min[2], kNN.features_max[2])\n elif mutatePt==4:\n individual[mutatePt]=random.uniform(kNN.features_min[3], kNN.features_max[3])\n elif mutatePt==5:\n individual[mutatePt]=random.uniform(kNN.features_min[4], kNN.features_max[4])\n\n return individual,", "def evaluate():\n\tmodel.eval()\n\tstddev = 1 # And mean=0\n\tfor batch_idx, (data, _) in enumerate(syn_test_loader):\n\t\tdata = data.cuda()\n\t\tif batch_idx == 0:\n\t\t\tnoise = torch.autograd.Variable(torch.randn(batch_size, bottleneck).cuda() * stddev)\n\t\t\tsample_representation(\"orig_nat\", data, noise)\n\t\t\tsample_representation(\"natural\", data, noise)\n\t\t\tsample_representation(\"orig_syn\", data, noise)\n\t\t\tsample_representation(\"synth\", data, noise)", "def update_network(self):\n\n device = torch.device(\"cpu\")\n self.model = ProLoNet(input_dim=13,\n weights=None,\n comparators=None,\n leaves=32,\n output_dim=1,\n bayesian_embedding_dim=8,\n alpha=1.5,\n use_gpu=False,\n vectorized=True,\n is_value=True).to(device)\n\n self.embedding_optimizer = torch.optim.RMSprop([{'params': self.model.bayesian_embedding.parameters()}], lr=.1)\n self.embedding_list = [torch.ones(3) * 1 / 3 for i in range(2000)]\n self.opt = torch.optim.RMSprop(\n [{'params': list(self.model.parameters())[:-1]}, {'params': self.model.bayesian_embedding.parameters(), 'lr': .01}], lr=.01)\n\n criterion = torch.nn.BCELoss()\n\n n_epochs = 4000 + self.global_schedule_num * 3\n for epoch in range(n_epochs):\n which_schedule = np.random.randint(len(self.data_so_far))\n timestep_within_schedule = np.random.randint(len(self.teacher_actions[which_schedule]))\n\n index_within_network_state = timestep_within_schedule * 20\n timestep_data_from_agg = self.data_so_far[which_schedule][index_within_network_state:index_within_network_state+20]\n task = self.teacher_actions[which_schedule][timestep_within_schedule]\n # set the embedding\n self.model.set_bayesian_embedding(self.embedding_list[which_schedule].clone())\n # update loop\n\n phi_i_num = task\n phi_i = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, phi_i_num)\n phi_i_numpy = np.asarray(phi_i)\n loss_counter = 0\n # iterate over pairwise comparisons\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_i_numpy - phi_j_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.ones((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.ones((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model(feature_input))\n loss = criterion(output, label)\n # prepare optimizer, compute gradient, update params\n loss_counter += loss.item()\n self.opt.zero_grad()\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_j_numpy - phi_i_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.zeros((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.zeros((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model.forward(feature_input))\n\n self.opt.zero_grad()\n loss = criterion(output, label)\n loss_counter += loss.item()\n\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n self.loss_array.append(loss_counter / 38)\n self.embedding_list[which_schedule] = torch.Tensor(self.model.get_bayesian_embedding().detach().cpu().numpy()).clone() # very ugly", "def mutate(self, solutions):\r\n k = ceil(self.mutation_rules.mutation_rate * len(solutions))\r\n solutions_to_mutate = sample(solutions, k)\r\n for solution in solutions_to_mutate:\r\n new_color = choice(ALL_COLORS)\r\n polygon_num = randrange(1, NUM_OF_GENETIC_UNITS + 1)\r\n polygon = solution.genetic_units[polygon_num]\r\n polygon.color = new_color", "def simulated_annealing_replacement(random, population, parents, offspring, args):\r\n try:\r\n temp = args['temperature']\r\n cooling_rate = args['cooling_rate']\r\n temp = temp * cooling_rate\r\n args['temperature'] = temp\r\n except KeyError:\r\n try:\r\n num_evals = args['_ec'].num_evaluations\r\n max_evals = args['max_evaluations']\r\n temp = float(max_evals - num_evals) / float(max_evals)\r\n except KeyError:\r\n num_gens = args['_ec'].num_generations\r\n max_gens = args['max_generations']\r\n temp = 1 - float(max_gens - num_gens) / float(max_gens)\r\n \r\n new_pop = []\r\n for p, o in zip(parents, offspring):\r\n if o >= p:\r\n new_pop.append(o)\r\n elif temp > 0 and random.random() < math.exp(-abs(p.fitness - o.fitness) / float(temp)):\r\n new_pop.append(o)\r\n else:\r\n new_pop.append(p)\r\n \r\n return new_pop", "def _sample_lam(self, cur_y, cur_z):\n old_loglik = self._loglik(cur_y, cur_z)\n old_lam = self.lam\n \n # modify the feature ownership matrix\n self.lam = np.random.beta(1,1)\n new_loglik = self._loglik(cur_y, cur_z)\n move_prob = 1 / (1 + np.exp(old_loglik - new_loglik));\n if random.random() < move_prob:\n pass\n else:\n self.lam = old_lam", "def evolve(self, generations=10000):\n\n for gen in range(generations):\n # run the tournament\n self.tournament()\n\n # generate the next generation\n self.p = self.nextGen()", "def eval_fitness(genomes, config):\n for _, genome in genomes:\n cppn = neat.nn.FeedForwardNetwork.create(genome, config)\n network = ESNetwork(SUBSTRATE, cppn, DYNAMIC_PARAMS)\n net = network.create_phenotype_network()\n\n sum_square_error = 0.0\n\n for xor_inputs, xor_expected in zip(XOR_INPUTS, XOR_OUTPUTS):\n new_xor_input = xor_inputs + (1.0,)\n net.reset()\n\n for _ in range(network.activations):\n xor_output = net.activate(new_xor_input)\n\n sum_square_error += ((xor_output[0] - xor_expected[0])**2.0)/4.0\n\n genome.fitness = 1 - sum_square_error", "def batch_simulation(self, iters=10000):\n power_cnt = 0\n correct_sign_cnt = 0\n\n for i in range(iters):\n if (self.verbose) and (i>0) and (i % (iters/10) == 0):\n print(i, \" / \", iters)\n f_stat, p_value, effect_point_estimates = self.simulate()\n power_cnt += (p_value < self.alpha)\n correct_sign_cnt += (effect_point_estimates * self.absolute_effect > 0)\n\n if self.verbose: print(iters, \" / \", iters)\n power = round(power_cnt / float(iters), 5)\n pct_correct_sign = round(correct_sign_cnt / float(iters), 5)\n return power, pct_correct_sign", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def update(self, samples, agent_number, logger):\n\n # need to transpose each element of the samples\n # to flip obs[parallel_agent][agent_number] to\n # obs[agent_number][parallel_agent]\n # obs, obs_full, action, reward, next_obs, next_obs_full, done = map(transpose_to_tensor, samples)\n obs, action, reward, next_obs, done = map(transpose_to_tensor, samples)\n\n # import pdb; pdb.set_trace()\n\n # obs_full = torch.stack(obs_full)\n # next_obs_full = torch.stack(next_obs_full)\n\n # adj = [self.get_adj(i) for i in np.swapaxes(np.array([x.numpy() for x in obs]), 1, 0)]\n adj = np.ones((len(obs[0]), len(obs), len(obs)), dtype=float)\n adj = torch.Tensor(adj).to(device)\n obs_full = torch.cat(obs, dim=1)\n next_obs_full = torch.cat(next_obs, dim=1)\n\n # TODO: review if .to(device) no_agents\n\n agent = self.maddpg_agent[agent_number]\n agent.critic_optimizer.zero_grad()\n\n non_zeros = 0\n for name, param in agent.critic.gat2.named_parameters():\n if 'out_att.W' in name:\n x = param.detach()\n x[x < 0] = 0\n print(x.cpu().numpy())\n non_zeros = np.count_nonzero(x.cpu().numpy())\n\n\n\n # ---------------------------- update critic ---------------------------- #\n # Get predicted next-state actions and Q values from target models\n # critic loss = batch mean of (y- Q(s,a) from target network)^2\n # y = reward of this timestep + discount * Q(st+1,at+1) from target network\n target_actions_next = self.target_act(next_obs)\n target_actions_next_cat = torch.cat(target_actions_next, dim=1)\n # target_critic_input = torch.cat((next_obs_full.t(),target_actions_next), dim=1).to(device)\n target_critic_input = torch.cat((next_obs_full, target_actions_next_cat), dim=1).to(device)\n\n target_critic_input_gat = torch.cat( (torch.stack(obs), torch.stack(target_actions_next)), dim=2).permute(1,0,2).to(device)\n with torch.no_grad():\n q_next = agent.target_critic(target_critic_input_gat, adj)\n\n # Compute Q targets (y) for current states (y_i)\n\n y = reward[agent_number].view(-1, 1).to(device) + self.discount_factor * q_next * (\n 1 - done[agent_number].view(-1, 1)).to(device)\n\n # Compute Q expected (q)\n action_cat = torch.cat(action, dim=1)\n # critic_input = torch.cat((obs_full.t(), action), dim=1).to(device)\n critic_input = torch.cat((obs_full, action_cat), dim=1).to(device)\n stack_obs = torch.stack(next_obs).to(device)\n stack_act = torch.stack(action).to(device)\n critic_input_gat = torch.cat((stack_obs, stack_act), dim=2).permute(1, 0, 2).to(device)\n q = agent.critic(critic_input_gat, adj) # doing forward(...)\n\n # Priorized Experience Replay\n # aux = abs(q - y.detach()) + 0.1 #we introduce a fixed small constant number to avoid priorities = 0.\n # aux = np.matrix(aux.detach().numpy())\n # new_priorities = np.sqrt(np.diag(aux*aux.T))\n\n # import pdb; pdb.set_trace()\n # Compute critic loss\n # huber_loss = torch.nn.SmoothL1Loss()\n # critic_loss = huber_loss(q, y.detach())\n # Compute critic loss\n loss_mse = torch.nn.MSELoss()\n critic_loss = loss_mse(q, y.detach())\n\n # CHECK IF EXPLODING GRADIENTS IS HAPPENING\n # register_hooks(critic_loss)\n # Minimize the loss\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 0.5)\n # torch.nn.utils.clip_grad_value_(agent.critic.parameters(), 1.0)\n agent.critic_optimizer.step()\n\n # ---------------------------- update actor ---------------------------- #\n # update actor network using policy gradient\n # Compute actor loss\n agent.actor_optimizer.zero_grad()\n # make input to agent\n obs_input = obs[agent_number].to(device)\n curr_q_input = self.maddpg_agent[agent_number].actor(obs_input)\n # use Gumbel-Softmax sample\n # curr_q_input = gumbel_softmax(curr_q_input, hard = True) # this should be used only if the action is discrete (for example in comunications, but in general the action is not discrete)\n # detach the other agents to save computation\n # saves some time for computing derivative\n # q_input = [ self.maddpg_agent[i].actor(ob.to(device)) if i == agent_number \\\n # else self.maddpg_agent[i].actor(ob.to(device)).detach()\n # for i, ob in enumerate(obs) ]\n q_input = [curr_q_input if i == agent_number \\\n else self.maddpg_agent[i].actor(ob.to(device)).detach()\n for i, ob in enumerate(obs)]\n\n q_input_cat = torch.cat(q_input, dim=1).to(device)\n # combine all the actions and observations for input to critic\n # many of the obs are redundant, and obs[1] contains all useful information already\n # q_input2 = torch.cat((obs_full.t(), q_input), dim=1)\n q_input2 = torch.cat((obs_full.to(device), q_input_cat), dim=1)\n obs_st = torch.stack(obs).to(device)\n q_st = torch.stack(q_input).to(device)\n q_input_gat = torch.cat((obs_st, q_st), dim=2).permute(1, 0, 2).to(device)\n actor_loss = -agent.critic(q_input_gat, adj).mean() # get the policy gradient # TODO: add the adjacency\n # modification from https://github.com/shariqiqbal2810/maddpg-pytorch/blob/master/algorithms/maddpg.py\n actor_loss += (curr_q_input).mean() * 1e-3\n\n # Minimize the loss\n actor_loss.backward()\n torch.nn.utils.clip_grad_norm_(agent.actor.parameters(), 0.5)\n agent.actor_optimizer.step()\n\n al = actor_loss.cpu().detach().item()\n cl = critic_loss.cpu().detach().item()\n logger.add_scalars('agent%i/losses' % agent_number,\n {'critic loss': cl,\n 'actor_loss': al,\n 'non_zeros' : non_zeros\n },\n self.iter)\n\n # return (new_priorities)", "def run_random(self):\n manager = Manager()\n self.all_probabilities = manager.list()\n self.total_iterations = manager.Value('d', 0)\n num_counterexamples = manager.Value('d', 0)\n counter_lock = manager.Lock()\n all_probabilities_lock = manager.Lock()\n\n file_q = manager.Queue()\n\n self.mug_pipeline.set_folder_names(self.folder_name)\n self.mug_pipeline.set_optimizer_type(OptimizerType.RANDOM)\n pool = Pool(self.num_processes + 1, maxtasksperchild=60)\n\n filename = '{}/logs/results_{}.csv'.format(self.folder_name, self.trial_folder)\n watcher = Process(target=self.listener, args=(file_q, filename))\n watcher.start()\n\n iter_num = 0\n start_time = time.time()\n max_time_per_map = 60*60\n\n try:\n # TODO: change this from while true to terminate by timeout (try/except)\n while ((self.retrain_with_random and self.total_iterations.value < self.max_added_to_training) or\n (self.retrain_with_counterexamples and num_counterexamples.value < self.max_added_to_training)):\n result = None\n\n while result is None:\n try:\n result = func_timeout(max_time_per_map, pool.starmap,\n args=(self.mug_pipeline.run_inference,\n zip(self.generate_all_mug_initial_poses(), \n range(iter_num, iter_num + self.num_processes),\n repeat(self.all_probabilities), repeat(all_probabilities_lock), \n repeat(self.total_iterations), repeat(num_counterexamples),\n repeat(counter_lock), repeat(file_q), repeat(False), repeat(False))))\n except FunctionTimedOut:\n print('FUNCTION TIMED OUT, MORE THAN {} SECONDS!!!!'.format(max_time_per_map))\n\n # all_mug_initial_poses = []\n # for j in range(self.num_processes):\n # mug_initial_poses = []\n # for i in range(self.num_mugs):\n # mug_initial_poses += \\\n # RollPitchYaw(np.random.uniform(0.0, 2.0*np.pi, size=3)).ToQuaternion().wxyz().tolist() + \\\n # [np.random.uniform(-0.1, 0.1), np.random.uniform(-0.1, 0.1), np.random.uniform(0.1, 0.2)]\n # all_mug_initial_poses.append(mug_initial_poses)\n\n # result = pool.starmap(self.mug_pipeline.run_inference,\n # zip(all_mug_initial_poses, \n # range(iter_num, iter_num + self.num_processes),\n # repeat(self.all_probabilities), repeat(all_probabilities_lock), \n # repeat(self.total_iterations), repeat(num_counterexamples),\n # repeat(counter_lock), repeat(file_q), repeat(False), repeat(False)))\n\n iter_num += self.num_processes\n print('new iter_num: {}'.format(iter_num), flush=True)\n total_min = (time.time() - start_time)/60.0\n print('avg min/image: {}, total minutes: {}'.format(total_min/(iter_num + 1), total_min))\n print('------------------------------------------------', flush=True)\n sys.stdout.flush()\n except Exception as e:\n raise e\n\n pool.close()\n pool.join()\n\n sys.stdout.flush()", "def simulated_annealing_replacement(random, population, parents, offspring, args):\n try:\n temp = args['temperature']\n cooling_rate = args['cooling_rate']\n temp = temp * cooling_rate\n args['temperature'] = temp\n except KeyError:\n try:\n num_evals = args['_ec'].num_evaluations\n max_evals = args['max_evaluations']\n temp = float(max_evals - num_evals) / float(max_evals)\n except KeyError:\n num_gens = args['_ec'].num_generations\n max_gens = args['max_generations']\n temp = 1 - float(max_gens - num_gens) / float(max_gens)\n \n new_pop = []\n for p, o in zip(parents, offspring):\n if o >= p:\n new_pop.append(o)\n elif temp > 0 and random.random() < math.exp(-abs(p.fitness - o.fitness) / float(temp)):\n new_pop.append(o)\n else:\n new_pop.append(p)\n \n return new_pop", "def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,\n weight_decay=1.e-5):\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n self.n_seed = np.random.seed(seed)\n self.num_agents = num_agents\n self.update_times = update_times\n self.n_step = 0\n self.TAU = 1e-3\n\n self.noise = []\n for i in range(num_agents):\n self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))\n\n # critic local and target network (Q-Learning)\n self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)\n\n self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)\n self.critic_target.load_state_dict(self.critic_local.state_dict())\n\n # actor local and target network (Policy gradient)\n self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)\n self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)\n self.actor_target.load_state_dict(self.actor_local.state_dict())\n\n # optimizer for critic and actor network\n self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)\n self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)\n\n # Initialize time step (for updating every UPDATE_EVERY steps)\n self.t_step = 0\n self.a_step = 0", "def mutation(self, ind):\n if random.random() > self.mutationRate:\n return\n indexA = random.randint(0, self.genSize-1)\n indexB = random.randint(0, self.genSize-1)\n\n tmp = ind.genes[indexA]\n ind.genes[indexA] = ind.genes[indexB]\n ind.genes[indexB] = tmp\n\n ind.computeFitness()\n self.updateBest(ind)", "def run_evolutionary_generations(self):\n \n # Evolve the generation.\n for i in range(self.generations):\n logging.info(\"***Doing generation %d of %d***\" %\n (i + 1, self.generations))\n \n self.train_networks(self.networks)\n \n if self.is_classification:\n average_accuracy, highest_accuracy, lowest_accuracy, highest_scoring_network = self.get_accuracy_stats(self.networks) \n \n if highest_scoring_network is not None:\n highest_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_acc%f\" % (i, highest_accuracy)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_accuracy * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_accuracy * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_accuracy * 100))\n logging.info('-'*80)\n else:\n average_loss, highest_loss, lowest_loss, best_scoring_network = self.get_loss_stats(self.networks) \n if best_scoring_network is not None:\n best_scoring_network.save_trained_model(os.path.join(self.save_directory, self.dataset + \"_best_network_at_iteration_%d_loss%f\" % (i, lowest_loss)))\n \n logging.info(\"Generation average: %.2f%%\" % (average_loss * 100))\n logging.info(\"Generation best: %.2f%%\" % (highest_loss * 100))\n logging.info(\"Generation worst: %.2f%%\" % (lowest_loss * 100))\n logging.info('-'*80)\n # Evolve, except on the last iteration.\n if i != self.generations - 1:\n self.networks = self.optimizer.evolve(self.networks)\n \n self.save_network_objects(self.networks)\n \n if self.is_classification:\n self.networks = sorted(self.networks, key=lambda x: x.accuracy, reverse=True)\n else:\n self.networks = sorted(self.networks, key=lambda x: x.loss, reverse=False)\n \n self.print_networks(self.networks[:5])\n \n self.save_trained_network_models(self.dataset, self.networks[:5])", "def learn(self):\n if self.step_count < self.learn_start_step or self.step_count % self.learn_interval != 0:\n return\n\n s, a, r, s_, t = self.sample()\n self.update_critics(s, a, r, t, s_)\n self.update_actor_alpha(s)\n self.update_target()\n self.learn_cur += 1", "def run():\n\n Number_repetitions = 1\n Rate = np.zeros((Number_repetitions,1))\n Rate20 = np.zeros((Number_repetitions,1))\n Penalty20 = np.zeros((Number_repetitions, 1))\n\n # Loop to average\n for idx in np.arange(0,Number_repetitions,1):\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n # I've edited the enviroment variable to do the plot creating an completions array\n completions = np.array(e.completions)\n rate = float(completions.sum())/float((len(completions)))\n rate20 = float(completions[-20:].sum())/20\n\n Rate[idx] = rate\n Rate20[idx] = rate20\n\n Wrong = np.array(a.wrong_moves_per_run[-20:]).mean()\n Penalty20[idx] = Wrong\n\n plt.scatter(np.arange(0,len(completions)),completions)\n plt.plot(Wrong)\n plt.xlabel('Trial')\n plt.ylabel('1 = Get in the destination, 0 = did not get')\n plt.title('Reiforcement learning progress')\n plt.legend(['Rate of completion: ' + str(rate) + '. Rate last 20: ' + str(rate20) + '.Mean penalty last 20: ' + str(Wrong)])\n plt.show()\n\n #print 'Accuracy: ' + str(Rate) + '. Mean: ' + str(np.mean(Rate))\n #print 'Mean 20: ' + str(np.mean(Rate20))#'Accuracy 20: ' + str(Rate20) + '. Mean 20: ' + str(np.mean(Rate20))\n #print 'Mean_penalty: ' + str(np.mean(Penalty20))\n\n # Print state table with actions\n #t = 0\n #for state in a.states:\n #print 'State ' + str(state) + '. Best action: ' + str((str(np.argmax(a.QTable[t][:]))))\n #t += 1", "def mutate(self, mutations: int):\n\n # Thus, our objective is to sample in such a way that:\n # Na' = Na + x - y\n # NA' = NA + z - w\n # NAa' = NAa + (y + w) - (x + z)\n # Na', NA', NAa' >= 0\n # 0 <= x, y, z\n # x + y + z <= n\n\n # The algorithm here is a probabilistic algorithm that attempts mutation in the general case,\n # or removes selectively from one bin if the others are empty (e.g. when starting a simulation)\n\n p = np.array([1 / 4, 1 / 4, 1 / 4, 1 / 4], dtype=np.float64)\n for gene in range(self.num_genes):\n gene_array = self._store[gene]\n for i in range(10):\n # Note: numba does not support random states\n swaps = np.random.multinomial(mutations, p)\n # Does destructuring work in numba?\n x = swaps[0]\n y = swaps[1]\n z = swaps[2]\n w = swaps[3]\n\n new_array = gene_array + np.array(\n [x - y, z - w, y + w - x - z], dtype=np.float64\n )\n if np.all(new_array >= 0.0):\n self._store[gene] = new_array", "def totem_random():\n random_head()\n random_head()\n random_head()", "def test_feature_computation(self):\n k = [2, 3, 4, 5, 6]\n mn = self.create_chain_model(k)\n d = 4\n\n for i in range(len(k)):\n mn.set_unary_weights(i, np.random.randn(k[i], d))", "def test_bayes_updates_permuted(self):\r\n for obs, exp in zip(bayes_updates(self.permuted), self.result):\r\n self.assertFloatEqualAbs(obs, exp, 1e-11)", "def learn(self):\n \n # target parameter update\n # target parameter update\n if self.learn_step_counter % self.nu_iter == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict())\n #testing the preformace of the network\n if self.learn_step_counter == 0:\n print('As referece this first test on dev data. Is maded with the Q networks, initialized randomly : ' )\n else:\n print(\"\\n Lets copy the Q-value Net in to Q-target net!. And test the performace on the dev data: \")\n \n current_bleu = self.dev_network()\n print(\"Current Bleu score is: \", current_bleu)\n \n self.learn_step_counter += 1\n\n \n long_Batch = self.sample_size*3\n # Sampling the higgest rewards values\n b_memory_big = self.memory[np.argsort(-self.memory[:-self.max_output_length, self.state_size+1])][:long_Batch]\n \n sample_index = np.random.choice(long_Batch, self.sample_size)\n b_memory = b_memory_big[sample_index, :]\n\n b_s = torch.FloatTensor(b_memory[:, :self.state_size])\n b_a = torch.LongTensor(b_memory[:, self.state_size:self.state_size+1].astype(int))\n b_r = torch.FloatTensor(b_memory[:, self.state_size+1:self.state_size+2])\n b_s_ = torch.FloatTensor(b_memory[:, self.state_size+2: self.state_size+2 + self.state_size])\n\n b_is_eos = torch.FloatTensor(b_memory[:, self.size_memory1-1:]).view(self.sample_size, 1)\n #print(b_a, b_a.size)\n #print(b_is_eos)\n #Activate the eval_net\n unfreeze_model(self.eval_net)\n \n # q_eval w.r.t the action in experience\n q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)\n q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\n #taking the most likely action.\n b_a_ = torch.LongTensor(q_next.max(1)[1].view(self.sample_size, 1).long())\n #b_a_ = q_next.max(1)[0].view(self.sample_size, 1).long() # shape (batch, 1)\n q_eval_next = self.eval_net(b_s_).gather(1, b_a_) # shape (batch, 1)\n \n #If eos q_target = reward. \n q_target = b_r + self.gamma * b_is_eos* q_eval_next.view(self.sample_size, 1) # shape (batch, 1)\n #version 0\n #q_target = b_r + self.gamma * q_next.max(1)[0].view(self.sample_size, 1) # shape (batch, 1)\n \n loss = self.loss_func(q_eval, q_target)\n \n self.tb_writer.add_scalar(\"learn/learn_batch_loss\",\n loss.data, self.learn_step_counter)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n #desctivate the eval_net\n freeze_model(self.eval_net)", "def get_random_inhibitory_weights(self):\n \n self.W_ei=np.zeros((self.N_e,self.N_i))\n self.W_ie=np.zeros((self.N_i,self.N_e)) \n self. W_ii=np.zeros((self.N_i,self.N_i))\n\n \n # connections to the excitatory neurons \n for row_idx in xrange(self.N_e):\n \n # from ihibitory\n all_idxs_ei=np.arange(self.N_i)\n np.random.shuffle(all_idxs_ei)\n self.W_ei[row_idx,all_idxs_ei[0:self.num_conns_ei]]=self.W_max_ei \n \n # connections to inhibitory neurons\n for row_idx in range(self.N_i):\n \n # from exitatory \n all_idxs_ie=np.arange(self.N_e)\n np.random.shuffle(all_idxs_ie)\n self.W_ie[row_idx,all_idxs_ie[0:self.num_conns_ie]]=self.W_max_ie\n \n # from inhibitory\n all_idxs_ii=np.arange(self.N_i)\n np.random.shuffle(all_idxs_ii)\n self.W_ii[row_idx,all_idxs_ii[0:self.num_conns_ii]]=self.W_max_ii\n \n \n self.W[:self.N_e,self.N_e:]=self.W_ei\n self.W[self.N_e:,:self.N_e]=self.W_ie\n self.W[self.N_e:,self.N_e:]=self.W_ii", "def scramble_mutate(\n self, crossover_pop_dict, test=False, mutation_prob={}\n ):\n\n print('Performing mutations')\n\n # Initialises dictionary of mutated child networks\n mutated_pop_dict = OrderedDict()\n\n # Scrambles the amino acid identities of randomly selected nodes\n for network_num in list(crossover_pop_dict.keys()):\n G = copy.deepcopy(crossover_pop_dict[network_num])\n\n scrambled_nodes = []\n aa_ids = []\n for node in list(G.nodes):\n if G.nodes()[node]['type'] == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = mutation_prob[network_num][node]\n if random_number <= self.mutation_prob:\n scrambled_nodes.append(node)\n aa_ids.append(G.nodes()[node]['aa_id'])\n\n if test is False:\n random.shuffle(aa_ids)\n else:\n aa_ids = aa_ids[::-1]\n attributes = OrderedDict({\n node: {'aa_id': aa_id} for node, aa_id in zip(scrambled_nodes, aa_ids)\n })\n nx.set_node_attributes(G, values=attributes)\n\n mutated_pop_dict[network_num] = G\n\n return mutated_pop_dict", "def mutate(offspring):\n\n # get the children and their genes\n offspring = offspring\n for child in offspring:\n\n # don't mutate every child, make it 50% of the offspring\n if np.random.uniform(0,0.4,1) < mutation:\n for gene in range(0, len(child)-1):\n\n # pick a random number between 0-1, mutate if < mutation rate\n if np.random.uniform(0,1,1) < mutation:\n\n # change the gene by a small number from a very narrow normal distribution\n child[gene] += np.random.normal(0, 0.2, 1)\n\n # make sure the genes don't get values outside of the limits\n if child[gene] > dom_u:\n child[gene] = dom_u\n if child[gene] < dom_l:\n child[gene] = dom_l\n\n return offspring", "def simulated_annealing(instance, n_commodity, temp, temp_step=False, iteration=1000, check=False, network_gif=False):\n if network_gif:\n fig = plt.figure()\n camera = Camera(fig)\n G, mapping = instance.network()\n node_dict = instance.network_nodes_species()\n colour_map = colour_node(instance)\n\n pos = nx.spring_layout(G, k=0.5, iterations=100)\n nx.draw_networkx(G, pos=pos, node_color=colour_map, with_labels=False, node_size=50, width=0.5)\n\n plt.annotate(\"0\", xy=(0.9, 0.9))\n\n camera.snap()\n\n circ_isntance = copy.deepcopy(instance)\n\n ##opmtised data\n # empty array with columns for (rewire no., percent waste, waste per product)\n opt_arr = np.empty((0, 3), int)\n # empty array with columns for (rewire no. time, temperature, probability, r)\n opt_ad = np.empty((0, 4), int)\n\n ##all data\n # empty array with columns for (rewire no., percent waste, waste per product, time, temp, p)\n all_arr = np.empty((0, 6), int)\n\n rewire_count = 0\n count = 0\n\n # calc waste for rewire 0 and add to matrix's\n waste, resources, intmed_products = circ_isntance.amenities()\n opt_arr = np.append(opt_arr, np.array(\n [[rewire_count, percent_waste(waste, resources, intmed_products), waste_per_product(waste, n_commodity)]]),\n axis=0)\n opt_ad = np.append(opt_ad, np.array([[0, temp, 1, 0]]), axis=0)\n\n # all\n all_arr = np.append(all_arr, np.array([[rewire_count, percent_waste(waste, resources, intmed_products),\n waste_per_product(waste, n_commodity), 0, temp, 1]]), axis=0)\n\n # temperature decrease iterations\n\n if temp_step:\n t_iter = round(iteration * 0.01)\n else:\n t_iter = 1\n\n # start iterations\n while count < iteration:\n if check:\n print(\"count:\", count)\n\n for i in range(0, t_iter):\n\n start = time.time()\n\n # make copy of reactions\n rewire_test = copy.deepcopy(circ_isntance)\n\n # make copy of opt matrix\n opt_arr_cp = copy.deepcopy(opt_arr)\n\n # greedy search for reaction rewire\n rewire_test.greedy_search(check)\n\n rewire_count += 1\n count += 1\n\n # calculate new rewire waste and add to copy of matrix\n waste, resources, intmed_products = rewire_test.amenities()\n opt_arr_cp = np.append(opt_arr, np.array([[rewire_count, percent_waste(waste, resources, intmed_products),\n waste_per_product(waste, n_commodity)]]), axis=0)\n\n # define old(g) and new circulaity (waste) measurements (g_)\n g = opt_arr_cp[rewire_count - 1, 1]\n g_ = opt_arr_cp[rewire_count, 1]\n\n if g_ <= g:\n p = 1\n r = 0\n # stop = time.time()\n\n elif g_ > g:\n if temp == 0:\n p = 0\n r = 1\n else:\n p = math.exp((-1 * (g_ - g)) / temp)\n r = random.uniform(0, 1)\n\n # if r < p:\n # optimise = True\n # stop = time.time()\n # else:\n # p = 0\n\n if r < p:\n stop = time.time()\n duration = stop - start\n\n # time for rewire is the time for previous rewire + the duration of current rewire\n t = opt_ad[rewire_count - 1, 0] + duration\n\n # merge instance with copy\n circ_isntance = rewire_test\n\n # merge copy with original array\n opt_arr = opt_arr_cp\n\n # add data to addition data array\n opt_ad = np.append(opt_ad, np.array([[t, temp, p, r]]), axis=0)\n\n # add to all matrix for rewire 0\n all_arr = np.append(all_arr, np.array([[rewire_count, percent_waste(waste, resources, intmed_products),\n waste_per_product(waste, n_commodity), t, temp, p]]), axis=0)\n\n if network_gif:\n G, mapping = instance.network()\n node_dict = instance.network_nodes_species()\n colour_map = colour_node(instance)\n\n pos = nx.spring_layout(G, k=0.5, iterations=100)\n nx.draw_networkx(G, pos=pos, node_color=colour_map, with_labels=False, node_size=50, width=0.5)\n\n plt.annotate(\"0\", xy=(0.9, 0.9))\n\n camera.snap()\n\n else:\n stop = time.time()\n duration = stop - start\n\n # time for rewire is the time for previous rewire + the duration of current rewire\n t = opt_ad[rewire_count - 1, 0] + duration\n\n all_arr = np.append(all_arr, np.array([[rewire_count, percent_waste(waste, resources, intmed_products),\n waste_per_product(waste, n_commodity), t, temp, p]]), axis=0)\n rewire_count -= 1\n\n # decrease temperature\n temp = 0.9 * temp\n\n opt_arr_cat = np.concatenate((opt_arr, opt_ad), axis=1)\n\n # dataframes\n opt_data = pd.DataFrame(data=opt_arr_cat[0:, 0:], index=None,\n columns=[\"Rewire no.\", \"Percent waste (%)\", \"Waste per product\", \"Time (s)\", \"Temperature\",\n \"Probability\", \"R\"])\n all_data = pd.DataFrame(data=all_arr[0:, 0:], index=None,\n columns=[\"Rewire no.\", \"Percent waste (%)\", \"Waste per product\", \"Time (s)\", \"Temperature\",\n \"Probability\"])\n\n if network_gif:\n animation = camera.animate()\n return animate\n else:\n return circ_isntance, opt_data, all_data", "def update(self, samples, agent_number):\n\n # need to transpose each element of the samples\n # to flip obs[parallel_agent][agent_number] to\n # obs[agent_number][parallel_agent]\n obs, obs_full, action, reward, next_obs, next_obs_full, done = map(transpose_to_tensor, samples)\n #obs, obs_full, action, reward, next_obs, next_obs_full, done = samples\n #obs, action, reward, next_obs, done = map(transpose_to_tensor, [obs, action, reward, next_obs, done])\n #obs_full = torch.from_numpy( obs_full ).float().to(device)\n #next_obs_full = torch.from_numpy( next_obs_full ).float().to(device)\n\n obs_full = torch.stack(obs_full)\n next_obs_full = torch.stack(next_obs_full)\n \n agent = self.maddpg_agent[agent_number]\n agent.critic_optimizer.zero_grad()\n\n #critic loss = batch mean of (y- Q(s,a) from target network)^2\n #y = reward of this timestep + discount * Q(st+1,at+1) from target network\n target_actions = self.target_act(next_obs)\n #pdb.set_trace() #########################################################################################\n target_actions = torch.cat(target_actions, dim=1)\n #pdb.set_trace() #########################################################################################\n \n #target_critic_input = torch.cat((next_obs_full.t(),target_actions), dim=1).to(device)\n \n with torch.no_grad():\n #q_next = agent.target_critic(target_critic_input)\n q_next = agent.target_critic(next_obs_full.t(), target_actions)\n \n y = reward[agent_number].view(-1, 1) + self.discount_factor * q_next * (1 - done[agent_number].view(-1, 1))\n action = torch.cat(action, dim=1)\n #critic_input = torch.cat((obs_full.t(), action), dim=1).to(device)\n #q = agent.critic(critic_input)\n q = agent.critic(obs_full.t(), action)\n\n huber_loss = torch.nn.SmoothL1Loss()\n critic_loss = huber_loss(q, y.detach())\n critic_loss.backward()\n #torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 0.5)\n agent.critic_optimizer.step()\n\n #update actor network using policy gradient\n agent.actor_optimizer.zero_grad()\n # make input to agent\n # detach the other agents to save computation\n # saves some time for computing derivative\n q_input = [ self.maddpg_agent[i].actor(ob) if i == agent_number \\\n else self.maddpg_agent[i].actor(ob).detach()\n for i, ob in enumerate(obs) ]\n \n q_input = torch.cat(q_input, dim=1)\n # combine all the actions and observations for input to critic\n # many of the obs are redundant, and obs[1] contains all useful information already\n #q_input2 = torch.cat((obs_full.t(), q_input), dim=1)\n \n # get the policy gradient\n #actor_loss = -agent.critic(q_input2).mean()\n actor_loss = -agent.critic(obs_full.t(), q_input).mean()\n actor_loss.backward()\n #torch.nn.utils.clip_grad_norm_(agent.actor.parameters(),0.5)\n agent.actor_optimizer.step()", "def _do_update(self):\n sample = np.random.choice(self._seeds, 1, replace=False, p=self._seed_weights)[0]\n index = self._seeds.index(sample)\n new_seed = random.choice([neb for neb in self._graph.neighbors(sample)])\n self._edges.add((sample, new_seed))\n self._nodes.add(sample)\n self._nodes.add(new_seed)\n self._seeds[index] = new_seed" ]
[ "0.7139885", "0.6918553", "0.6862937", "0.67987317", "0.6480224", "0.6446079", "0.64306605", "0.63981", "0.6360692", "0.6357444", "0.62559646", "0.62238425", "0.61995184", "0.61872697", "0.6183759", "0.6120707", "0.6096974", "0.6002629", "0.60015005", "0.599701", "0.59915847", "0.5989076", "0.59830415", "0.5968423", "0.59647167", "0.59471655", "0.5940044", "0.592021", "0.5907812", "0.58886296", "0.5886999", "0.58681107", "0.5854273", "0.58335763", "0.58288896", "0.5826624", "0.58130157", "0.5811398", "0.579638", "0.57874286", "0.57856137", "0.57754266", "0.5758206", "0.57452106", "0.5742394", "0.57314897", "0.5715838", "0.57156485", "0.5711922", "0.57104206", "0.5697035", "0.5677112", "0.5674251", "0.5672507", "0.5670179", "0.5650961", "0.563091", "0.56285346", "0.56253326", "0.5616036", "0.5605354", "0.55885726", "0.5586925", "0.5585503", "0.55821025", "0.555035", "0.5549283", "0.5544078", "0.55285805", "0.5527642", "0.5526307", "0.5523532", "0.5505321", "0.54983824", "0.54915255", "0.54786205", "0.54681367", "0.54662323", "0.5466113", "0.5456692", "0.5452582", "0.5449554", "0.544143", "0.5436492", "0.54356754", "0.54351574", "0.5432875", "0.5432715", "0.5429421", "0.5428376", "0.5425071", "0.5424747", "0.5424365", "0.5411927", "0.5411039", "0.54070735", "0.5407016", "0.5397037", "0.5393771", "0.5390706" ]
0.78212637
0
Create an entry for the flow.
async def async_oauth_create_entry(self, data): await self.async_set_unique_id(unique_id=f"{DOMAIN}Cloud") return self.async_create_entry(title=f"{DOMAIN}Cloud", data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def create_entry(self, entry_group_name, entry_id, entry):\n try:\n entry = self.__datacatalog.create_entry(parent=entry_group_name,\n entry_id=entry_id,\n entry=entry)\n self.__log_entry_operation('created', entry=entry)\n return entry\n except (exceptions.FailedPrecondition,\n exceptions.PermissionDenied) as e:\n entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)\n self.__log_entry_operation('was not created',\n entry_name=entry_name)\n raise e", "def add_entry(self, *args, **kwargs):\n entry = Entry(*args, **kwargs) # NOTE: not sure this is good\n self._entries[entry.uuid] = entry\n return entry", "def create_and_add_entry(self, **attrs):\n return self.add_entry(self.create_entry(**attrs))", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response", "def create_entry(validator):\n entry = ValidationEntry()\n entry.setValidator(validator.build(entry))\n return entry", "def create_entry_for_topic(cls, topic, entry_id, content_hash):\n\t\tkey = cls.create_key(topic, entry_id)\n\t\treturn cls(key_name=key.name(),\n\t\t\t\t\t\t\t parent=key.parent(),\n\t\t\t\t\t\t\t entry_id=entry_id,\n\t\t\t\t\t\t\t entry_id_hash=utils.sha1_hash(entry_id),\n\t\t\t\t\t\t\t entry_content_hash=content_hash)", "def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def create():\n if request.method == 'POST':\n if request.form.get('title') and request.form.get('content'):\n entry = Entry.create(\n title = request.form.get('title'),\n content = request.form.get('content'),\n published = request.form.get('published') or False)\n flash('Entry created successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n else:\n flash('Title and Content are required!', 'danger')\n return render_template('create.html')", "def add_new_entry(self):\n clear_screen()\n new_entry = Entry.create()\n if new_entry is None:\n print(\"Add new entry cancelled. Returning to main menu...\")\n time.sleep(1)\n return None\n self.entries.append(new_entry)\n with open(self.file_name, \"a\") as file:\n writer = csv.writer(file)\n writer.writerow([new_entry.date, new_entry.name, new_entry.minutes, new_entry.note])", "def test_Entry_creation(self):\n test_entry = self.create_Entry()\n self.assertTrue(isinstance(test_entry, Entry))", "def new_entry(self, entry=\"entry\", program_name=\"pyFAI\",\n title=\"description of experiment\",\n force_time=None, force_name=False):\n\n if not force_name:\n nb_entries = len(self.get_entries())\n entry = \"%s_%04i\" % (entry, nb_entries)\n entry_grp = self.h5.require_group(entry)\n entry_grp.attrs[\"NX_class\"] = numpy.string_(\"NXentry\")\n entry_grp[\"title\"] = numpy.string_(title)\n entry_grp[\"program_name\"] = numpy.string_(program_name)\n if force_time:\n entry_grp[\"start_time\"] = numpy.string_(force_time)\n else:\n entry_grp[\"start_time\"] = numpy.string_(get_isotime())\n self.to_close.append(entry_grp)\n return entry_grp", "def _create_input_entry(self, course_id=None):\r\n to_option = SEND_TO_ALL\r\n course_id = course_id or self.course.id\r\n course_email = CourseEmail.create(course_id, self.instructor, to_option, \"Test Subject\", \"<p>This is a test message</p>\")\r\n task_input = {'email_id': course_email.id} # pylint: disable=E1101\r\n task_id = str(uuid4())\r\n instructor_task = InstructorTaskFactory.create(\r\n course_id=course_id,\r\n requester=self.instructor,\r\n task_input=json.dumps(task_input),\r\n task_key='dummy value',\r\n task_id=task_id,\r\n )\r\n return instructor_task", "def create_entry(number, name, type_1, type_2, health_points, attack, defense, special_attack, special_defense, speed,\n generation, is_legendary):\n battle_stats = {'HP': health_points, 'Attack': attack, 'Defense': defense, 'Sp. Atk': special_attack, 'Sp. Def': special_defense, 'Speed': speed}\n if type_2 == \"\":\n types = (type_1, None)\n else:\n types = (type_1, type_2)\n entry = {'Number': number, \"Name\": name, 'Types': types, 'Battle Stats': battle_stats, 'Generation': generation, 'Legendary': is_legendary}\n return entry", "def create_entry(cls, title, date, timeSpent, learned, resources):\n try:\n with DATABASE.transaction():\n cls.create(\n title=title,\n date=date,\n timeSpent=timeSpent,\n learned=learned,\n resources=resources\n )\n except IntegrityError:\n raise ValueError(\"Entry already exists\")", "def generate_entry(self, params):\n try:\n self.assert_well_formed()\n self.assert_sane_params(params)\n except EmailAssertionError as e:\n logger.exception(\"The EmailKind: {ek} is not well formed or was badly called with: {pa}\"\\\n .format(ek=self, pa=params))\n raise e\n\n context = params.get('context', None) or self.default_context\n sender = params.get('sender', None) or self.default_sender\n recipients = ','.join(params.get('recipients', [])) or \\\n self.default_recipients\n subject = params.get('subject', None) or self.default_subject\n reply_to = ','.join(params.get('reply_to', [])) or \\\n self.default_reply_to\n\n\n with transaction.atomic():\n entry = EmailEntry.objects.create(\n kind=self,\n customer_id=params.get('customer_id', ''),\n context=context,\n sender=sender,\n recipients=recipients,\n subject=subject,\n reply_to=reply_to,\n send_at=params.get('send_at', None),\n check_url=params.get('check_url', ''),\n backend=params.get('backend', ''),\n metadata=params.get('meta_fields', {}),\n )\n\n attachs = params.get('attachs', [])\n for attach in attachs:\n uploaded_file = SimpleUploadedFile(\n attach['filename'],\n give_me_bytes(base64.b64decode(attach['content'])),\n attach['content_type']\n )\n Attachment.objects.create(entry=entry, attached_file=uploaded_file,\n content_type=attach['content_type'],\n name=attach['filename'])\n\n return entry", "def add_entry(unique_ID,value,label):\n\t\ttry:\n\t\t\tdata[unique_ID].appendEntry(value,label)\n\t\texcept InvalidInput:\n\t\t\t#deal with bad input\n\t\t\tpass", "def add_entry(self, scenario_info):\n print(\"--> Adding entry in execute table on server\")\n entry = \"%s,created\" % scenario_info[\"id\"]\n command = \"echo %s >> %s\" % (entry, self._server_path)\n err_message = \"Failed to update %s on server\" % self._EXECUTE_LIST\n _ = self._execute_and_check_err(command, err_message)", "def add_item_entry(self, the_spec):\n debug(\"Adding entry {}\".format(the_spec))\n entry = tk.Entry(self.current_parent)\n self.entries[the_spec.value] = entry\n if not self.parent_is_grid:\n entry.pack()\n return entry", "def createAtomEntry(self, postLink, atomNewEntry): #$NON-NLS-1$\r\n atomRequest = self._createNewEntryRequest(postLink, atomNewEntry)\r\n self._sendAtomEntry(atomRequest, atomNewEntry)\r\n atomEntry = atomRequest.getEntry()\r\n del atomRequest\r\n return atomEntry", "def __call__(self, config):\n entry = Entry(self.name, make_key(config), config, None, None, None)\n if not hasattr(_CONTEXT, \"on_entry\"):\n return entry\n on_entry = _CONTEXT.on_entry\n if on_entry:\n on_entry(entry)\n return entry", "async def create_entry_from_data(self):\n self._auth_data[\"access_token\"] = self._gateway.sense_access_token\n self._auth_data[\"user_id\"] = self._gateway.sense_user_id\n self._auth_data[\"device_id\"] = self._gateway.device_id\n self._auth_data[\"refresh_token\"] = self._gateway.refresh_token\n self._auth_data[\"monitor_id\"] = self._gateway.sense_monitor_id\n existing_entry = await self.async_set_unique_id(self._auth_data[CONF_EMAIL])\n if not existing_entry:\n return self.async_create_entry(\n title=self._auth_data[CONF_EMAIL], data=self._auth_data\n )\n\n self.hass.config_entries.async_update_entry(\n existing_entry, data=self._auth_data\n )\n await self.hass.config_entries.async_reload(existing_entry.entry_id)\n return self.async_abort(reason=\"reauth_successful\")", "def add_entry():\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n cur = db.execute('insert into entries (title, ingredients, steps, \\\n tags, url) values (?, ?, ?, ?, ?)',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url']])\n db.commit()\n flash('Recipe, ' + escape(request.form['title'])\n + ', was successfully added', 'success')\n return view_entry(str(cur.lastrowid))\n else:\n return render_template('add_entry.html')", "def WriteFlowLogEntry(self, entry: rdf_flow_objects.FlowLogEntry) -> None:\n key = (entry.client_id, entry.flow_id)\n\n if key not in self.flows:\n raise db.UnknownFlowError(entry.client_id, entry.flow_id)\n\n entry = entry.Copy()\n entry.timestamp = rdfvalue.RDFDatetime.Now()\n\n self.flow_log_entries.setdefault(key, []).append(entry)", "def create_recipe(*, recipe_in: RecipeCreate) -> dict:\n new_entry_id = len(RECIPES) + 1\n recipe_entry = Recipe(\n id=new_entry_id,\n label=recipe_in.label,\n source=recipe_in.source,\n url=recipe_in.url,\n )\n RECIPES.append(recipe_entry.dict())\n\n return recipe_entry", "def _async_create_entry_from_vars(self):\n return self.async_create_entry(\n title=TITLE,\n data={\n CONF_USB_PATH: self.usb_path,\n CONF_NETWORK_KEY: self.network_key,\n CONF_USE_ADDON: self.use_addon,\n CONF_INTEGRATION_CREATED_ADDON: self.integration_created_addon,\n },\n )", "def create_entry(number, name, type_1, type_2, health_points, attack, defense, special_attack, special_defense, speed,\n generation, is_legendary):\n if type_2 == '':\n type_2 = None\n\n types = (type_1, type_2)\n\n battle_stats = {\"HP\": health_points, \"Attack\": attack, \"Defense\": defense, \"Sp. Atk\": special_attack,\n \"Sp. Def\": special_defense, \"Speed\": speed, }\n entry = {\n \"Number\": number,\n \"Name\": name,\n \"Types\": types,\n \"Battle Stats\": battle_stats,\n \"Generation\": generation,\n \"Legendary\": is_legendary\n }\n\n return entry", "def _create_entry(self, task_state=QUEUING, task_output=None, student=None):\r\n task_id = str(uuid4())\r\n progress_json = json.dumps(task_output) if task_output is not None else None\r\n task_input, task_key = encode_problem_and_student_input(self.problem_url, student)\r\n\r\n instructor_task = InstructorTaskFactory.create(course_id=TEST_COURSE_KEY,\r\n requester=self.instructor,\r\n task_input=json.dumps(task_input),\r\n task_key=task_key,\r\n task_id=task_id,\r\n task_state=task_state,\r\n task_output=progress_json)\r\n return instructor_task", "def create_entry():\n new_entry = DB_Entry() # Create instance of entry to add the info to\n print('Eratosthenes is ready to add your new entry.\\n')\n new_entry.set_id()\n title = input('Enter the title:\\n')\n new_entry.set_title(title)\n authors = input('Enter the authors as list of surname, firstname separated by semicolons:\\n')\n new_entry.set_authors(authors)\n try:\n year = int(input('Enter the year:\\n'))\n except ValueError:\n try:\n year = int(input('Enter the year as an integer:\\n'))\n except ValueError:\n print('You failed to follow basic instructions. The year is set to 2000\\n')\n year = 2000\n new_entry.set_year(year)\n pub_type = input('Enter the publication type as article/review/book/other:\\n')\n try:\n new_entry.set_type(pub_type)\n except ValueError:\n try:\n pub_type = input('Type must be one of article/review/book/other:\\n')\n new_entry.set_type(pub_type)\n except ValueError:\n print('You failed to follow basic instructions. Type is now set to \\'other\\'\\n')\n pub_type = 'other'\n new_entry.set_type(pub_type)\n keywords = input('Enter list of keywords separated by semicolons:\\n')\n new_entry.set_keywords(keywords.split(';'))\n current_path = input('Enter the current path to the file\\n')\n current_path = current_path.replace('~', '/Users/marcus')\n if not os.path.isfile(current_path):\n print('File not found. Please try again')\n current_path = input('Enter the current path to the file\\n')\n if not os.path.isfile(current_path):\n print('File not found')\n new_entry.set_new_path()\n db_actions.copy_file(new_entry.get_path(), current_path)\n return new_entry", "def add_entry(self, number: int, entry: Entry) -> None:\n raise NotImplementedError", "def create():", "def create():", "def add_workflow_entry(entry_message, data=''):\n return partial(__add_entry,\n event_type='WORKFLOW',\n entry_message=entry_message,\n data=data)", "def feed(self, entry):\r\n pass", "def create(self, *args, **kwargs):\n pass", "def _create_input_entry(self, student_ident=None, use_problem_url=True, course_id=None):\r\n task_id = str(uuid4())\r\n task_input = {}\r\n if use_problem_url:\r\n task_input['problem_url'] = self.location\r\n if student_ident is not None:\r\n task_input['student'] = student_ident\r\n\r\n course_id = course_id or self.course.id\r\n instructor_task = InstructorTaskFactory.create(course_id=course_id,\r\n requester=self.instructor,\r\n task_input=json.dumps(task_input, cls=i4xEncoder),\r\n task_key='dummy value',\r\n task_id=task_id)\r\n return instructor_task", "def add_entry(self, entry):\n if self.get_entry(entry):\n return entry\n\n keys, values = [], []\n for i in entry:\n keys.append(\"'{}'\".format(i))\n if not isinstance(entry[i], str):\n values.append(\"'{}'\".format(str(entry[i])))\n else:\n values.append(\"'{}'\".format(entry[i]))\n\n keys.append(\"'hash'\")\n values.append(\"'{}'\".format(self._calculate_hash(entry)))\n sql = 'INSERT INTO {t_id} ({keys}) VALUES ({values})'.format(\n t_id=self.table_id, keys=','.join(keys), values=','.join(values))\n self.fusiontables.query().sql(sql=sql).execute()", "def add_step_entry(entry_message, data=''):\n return partial(__add_entry,\n event_type='STEP',\n entry_message=entry_message,\n data='')", "def init_new_entry(args, page=False):\n\n buildingfor = \"posts\"\n if (page):\n buildingfor = \"pages\"\n\n def _remove_temporary_entries(entries):\n result = {}\n for key, value in processed_entries.items():\n if (not \"_\" in key):\n result[key] = value\n\n return result\n\n def _get_new_entry(final_header):\n default_entry = \"---\\n\" + yaml.dump(final_header, allow_unicode=True,\n default_flow_style=False) + \"---\"\n return default_entry\n\n # Get configs\n user_config = configurator.get_config(os.path.join(args.src, paths.CFG_FILE))\n if (not user_config):\n logging.error(\"Error, could not find user config at {}\".format(\n os.path.join(args.src, paths.CFG_FILE)))\n return\n\n theme_headers = defaults.DEFAULT_THEME_HEADERS\n theme_headers_file = os.path.join(args.src, paths.THEMES_PATH,\n user_config[\"theme\"], paths.THEME_HEADERS_FILE)\n if (os.path.isfile(theme_headers_file)):\n tmp = configurator.get_yaml(theme_headers_file)\n # theme headers file might only define entries for posts/pages\n if (tmp[buildingfor]):\n theme_headers = tmp\n\n # Parse remainder (header content)\n processed_entries = _process_header_dict(theme_headers[buildingfor], args.header_content)\n final_entries = _remove_temporary_entries(processed_entries)\n\n # Generate entry file name from user / default template\n file_name = _get_new_entry_path(args, user_config, processed_entries, page)\n\n logging.debug(\"Creating new entry file at \" + file_name)\n\n with open(file_name, 'w+') as stream:\n stream.write(_get_new_entry(final_entries))\n\n logging.debug(\"Done creating entry.\")", "def __init_entry(self, utterance: dict, usernames: tuple) -> JsonDialogue:\n return JsonDialogue(self.topic, utterance['Title'], utterance['CreationDate_post'], usernames, self.sid)", "def build(self, name, opened, entry):\n raise NotImplementedError()", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def add_entry():\n\n host = input('\\nEnter Mail Server Host: ')\n email = input('\\nEnter Email ID: ')\n password = getpass(prompt='\\nEnter Password: ')\n mailbox = input('\\nEnter MailBox: ')\n mobile = input('\\nEnter Mobile Number: ')\n\n if not isfile('data.json'):\n print('No input data.json found...')\n create_input_file()\n\n append_entry(host, email, password, mailbox)", "def create(self):\n\n pass", "def _create_uefi_entry(self, target, psci_enable, entry_name):\n self._wait_for_vemsd_mount(target)\n try:\n selection_pattern = '\\[([0-9]+)\\] *'\n\n # Identify and select boot manager menu item.\n target.expect(selection_pattern + 'Boot Manager', timeout=15)\n bootmanager_item = target.match.group(1)\n target.sendline(bootmanager_item)\n\n # Identify and select 'add new entry'.\n target.expect(selection_pattern + 'Add Boot Device Entry', timeout=15)\n new_entry_item = target.match.group(1)\n target.sendline(new_entry_item)\n\n # Identify and select BootMonFs.\n target.expect(selection_pattern + 'NOR Flash .*', timeout=15)\n BootMonFs_item = target.match.group(1)\n target.sendline(BootMonFs_item)\n\n # Specify the parameters of the new entry.\n target.expect('.+the kernel', timeout=5)\n target.sendline(self.config.kernel) # kernel path\n target.expect('Has FDT support\\?.*\\[y\\/n\\].*', timeout=5)\n time.sleep(0.5)\n target.sendline('y') # Has Fdt support? -> y\n target.expect('Add an initrd.*\\[y\\/n\\].*', timeout=5)\n time.sleep(0.5)\n target.sendline('y') # add an initrd? -> y\n target.expect('.+the initrd.*', timeout=5)\n time.sleep(0.5)\n target.sendline(self.config.initrd) # initrd path\n target.expect('.+to the binary.*', timeout=5)\n time.sleep(0.5)\n _slow_sendline(target, self.config.kernel_arguments + psci_enable) # arguments to pass to binary\n time.sleep(0.5)\n target.expect('.+new Entry.+', timeout=5)\n _slow_sendline(target, entry_name) # Entry name\n target.expect('Choice.+', timeout=15)\n time.sleep(2)\n except pexpect.TIMEOUT:\n raise DeviceError('Timed out while creating UEFI entry.')\n self._perform_uefi_reboot(target)", "def add_entry(self, account):\n def txn():\n entry = self.entries.filter('account =', account).get()\n if not entry:\n entry = Entry(account=account, parent=self)\n entry.put()\n created = True\n else:\n created = False\n return entry, created\n return db.run_in_transaction(txn)", "async def async_oauth_create_entry(self, data: dict[str, Any]) -> FlowResult:\n api = GeocachingApi(\n environment=ENVIRONMENT,\n token=data[\"token\"][\"access_token\"],\n session=async_get_clientsession(self.hass),\n )\n status = await api.update()\n if not status.user or not status.user.username:\n return self.async_abort(reason=\"oauth_error\")\n\n if existing_entry := await self.async_set_unique_id(\n status.user.username.lower()\n ):\n self.hass.config_entries.async_update_entry(existing_entry, data=data)\n await self.hass.config_entries.async_reload(existing_entry.entry_id)\n return self.async_abort(reason=\"reauth_successful\")\n return self.async_create_entry(title=status.user.username, data=data)", "def flowdetail_create(name, wf, fd_id=None):\n return IMPL.flowdetail_create(name, wf, fd_id)", "def __call__(self, entry):\n return self", "def create_new_event(self):\n pass", "def add_entry(self, entry: str) -> None:\n self.entries.append(f\"{self.count}: {entry}\")\n self.count += 1", "def create(self):\n ...", "async def create_flow(self, flow: \"FlowObject\") -> UUID:\n return await self.create_flow_from_name(flow.name)", "def create_entry(comment: str, config_index: int) -> str:\n\n def env_var_name(config_entry):\n return 'SW_' + config_entry.upper()\n\n configuration = list(OPTIONS.keys())[config_index]\n type_ = OPTIONS[configuration][1]\n default_val = OPTIONS[configuration][0]\n\n # special case for randomly generated default value\n if configuration == 'agent_instance_name':\n default_val = \"str(uuid.uuid1()).replace('-', '')\"\n return f'| {configuration} | {env_var_name(configuration)} | {str(type_)} | {default_val} | {comment} |'", "def new_entry(request, stock_id):\n stock= Stock.objects.get(id= stock_id)\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form= EntryForm()\n else:\n # POST data submitted; process data.\n form= EntryForm(data= request.POST)\n if form.is_valid():\n new_entry= form.save(commit= False)\n new_entry.stock= stock\n new_entry.save()\n return redirect('stock_trackers:stock', stock_id= stock_id)\n\n # display a blank or invalid form\n context= {'stock':stock, 'form': form}\n return render(request, 'stock_trackers/new_entry.html', context)", "def create(self,params=None, headers=None):\n path = '/mandate_import_entries'\n \n if params is not None:\n params = {self._envelope_key(): params}\n\n response = self._perform_request('POST', path, params, headers,\n retry_failures=True)\n return self._resource_for(response)", "def __init__(__self__, *,\n endpoint_type: pulumi.Input[str],\n entry: pulumi.Input[str],\n instance_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n module_name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"endpoint_type\", endpoint_type)\n pulumi.set(__self__, \"entry\", entry)\n pulumi.set(__self__, \"instance_id\", instance_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if module_name is not None:\n pulumi.set(__self__, \"module_name\", module_name)", "def add_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n introduction = request.form['introduction']\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n new_entry = Entries(title, category, buydate, introduction)\n db.session.add(new_entry)\n\n try :\n db.session.commit()\n except IntegrityError as e :\n flash(e.message)\n return redirect(url_for('show_entries_admin'))\n\n flash(u'成功添加新的条目')\n return redirect(url_for('show_entries_admin'))", "def post_entry(self, body, link=None, to=None, **args):\n args.update(body=body)\n if link: args.update(link=link)\n if to: args.update(to=to)\n return self.fetch(\"/entry\", post_args=args)", "def db_insert(name, task, time, note):\n Entry.create(name=name,\n task=task,\n time=time,\n note=note)\n return main()", "def _add_entry(self, cat_entry):\n\n # run through category apps and add orphans to Desktop\n # database, add DM and categories to database\n models.cat_apps(cat_entry)\n\n # run through and categories to database\n models.cat_list(cat_entry.categories)\n\n # create new - models.py \n cat_record = models.Categories(category=cat_entry.category) \n\n # fill in values \n cat_record.fill_record(cat_entry) \n\n BaseInfo.session.add(cat_record)\n\n try:\n BaseInfo.session.commit( )\n except exc.SQLAlchemyError:\n logger.error(\"Commit error\")", "def GetNewItem(self):\n if not self.category.Value:\n cat = 'None'\n else:\n cat = self.category.Value\n \n return Entry(self.name.Value, self.username.Value, self.password.Value, \n cat, self.comments.Value)", "def post(self, request, *args, **kwargs):\n self.create_flow_file_db_entry()\n self.handle_chunk(request)\n return self.return_response(self.flow_file.identifier)", "async def add_entry(self, **values):\r\n query = \"INSERT OR IGNORE INTO {table_name} ({table_headers}) VALUES({entry_values})\"\r\n\r\n headers = \", \".join([e for e in values.keys()])\r\n entry_val = \", \".join(\"?\"*len(values.values()))\r\n attrs = [e for e in values.values()]\r\n\r\n query = query.format(table_name = self.name, table_headers=headers, entry_values=entry_val)\r\n\r\n await self.data.db.execute(query, attrs)\r\n await self.data.db.commit()", "def create_entry(hass: HomeAssistant, device_id: str = DEVICE_UNIQUE_ID) -> ConfigEntry:\n entry = MockConfigEntry(\n domain=DOMAIN,\n title=\"Anova\",\n data={\n CONF_USERNAME: \"[email protected]\",\n CONF_PASSWORD: \"sample\",\n \"devices\": [(device_id, \"type_sample\")],\n },\n unique_id=\"[email protected]\",\n )\n entry.add_to_hass(hass)\n return entry", "def new_entry(path, name):\n\n default_config = {'prompt': \"Select command to run:\", 'choices': {}}\n with open(path, 'w') as f:\n json.dump(default_config, f)\n\n add_entry_to_database(path, name)", "def _create_entry():\r\n entry_widget = tk.Entry(password_window, bd=0, font=('Helvetica', 16), width=40,\r\n bg='gray15', fg='white', insertbackground='white')\r\n entry_widget.place(x=10, y=105)\r\n\r\n entry_widget.focus()\r\n\r\n return entry_widget", "def __init__(__self__, *,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_type: Optional[pulumi.Input[str]] = None,\n entry: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n module_name: Optional[pulumi.Input[str]] = None):\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if endpoint_type is not None:\n pulumi.set(__self__, \"endpoint_type\", endpoint_type)\n if entry is not None:\n pulumi.set(__self__, \"entry\", entry)\n if instance_id is not None:\n pulumi.set(__self__, \"instance_id\", instance_id)\n if module_name is not None:\n pulumi.set(__self__, \"module_name\", module_name)", "def add_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n je.Editor(self.session, self.source.tbl, self.source)", "def create(self):", "def _create_failure_entry(self):\r\n # view task entry for task failure\r\n progress = {'message': TEST_FAILURE_MESSAGE,\r\n 'exception': TEST_FAILURE_EXCEPTION,\r\n }\r\n return self._create_entry(task_state=FAILURE, task_output=progress)", "def test_create_valid_entry(self):\n url = reverse('airlines:aircraft-list')\n response = self.client.post(url, self.valid_payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['user_defined_id'], 6)", "def create(ctx):\n pass", "def createNewBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createNewEntryDocument()\r\n self._initNewEntryDocument(atomdoc)\r\n return ZAtomNewBlogEntry(atomdoc)", "def create_flow_event(self, source, sink, instance):\n try: # Check the cache\n flow = self.flow_lookup[(source, sink)]\n if flow:\n flow.add_event(instance, source, sink)\n except KeyError:\n for flow in self.flows:\n if flow.add_event(instance, source, sink): break\n else:\n flow = None\n # Cache the result\n self.flow_lookup[(source, sink)] = flow", "def make_entry(line):\n #focus on relevant parts\n parts = line.split(\" - \")\n visitor_id = parts[0]\n subparts = parts[1].split('\"')\n method_and_uri = subparts[1]\n method_and_uri_parts = method_and_uri.split(\" \")\n method = method_and_uri_parts[0]\n uri = method_and_uri_parts[1]\n d = dict()\n d[\"visitor_id\"] = visitor_id\n d[\"method\"] = method\n d[\"uri\"] = uri\n return d", "def create_base_entry(vin=\"INVALID\", time_unix=None):\n\t\treturn LogEntry(vin=vin, app_id=\"INVALID\", time_unix=time_unix)", "def create():\n pass", "def submit_entry(self, entry_type=\"Log\"):\n # Gratitude List\n gratitude_li = []\n for grat_entry in self.grat_entry_li:\n grat_txt = grat_entry.get()\n if grat_txt != \"\":\n gratitude_li.append(grat_txt.replace(\",\", \"\\comma\"))\n\n # Goals List\n goals_li = []\n\n for goal_entry in self.goals_entry_li:\n goal_txt = goal_entry.get()\n if goal_txt != \"\":\n goals_li.append(goal_txt.replace(\",\", \"\\comma\"))\n\n \"\"\"\n Plans List: {\"Description\": description entry box,\n \"Priority\": priority combo box, \"Steps\": list of step entry boxes }\n \n Step list: {\"Description\": new_step_box, \"Status\": check_var}\n \"\"\"\n plans_li = []\n\n for plan_entry in self.plans_entry_li:\n if plan_entry[\"Description\"].get() == \"\" and len(plan_entry[\"Steps\"]) == 0:\n pass\n else:\n plan = {}\n plan[\"Plan_Type\"] = plan_entry[\"Plan_Type\"].get()\n plan[\"Description\"] = plan_entry[\"Description\"].get().replace(\n \",\", \"\\comma\")\n plan[\"Status\"] = plan_entry[\"Status\"].get()\n plan[\"Priority\"] = plan_entry[\"Priority\"].get()\n plan[\"Steps\"] = []\n for step_entry in plan_entry[\"Steps\"]:\n step_txt = step_entry[\"Description\"].get()\n if step_txt == \"\":\n pass\n else:\n full_step = {\"Status\": step_entry[\"Status\"].get(),\n \"Description\": step_txt}\n plan[\"Steps\"].append(full_step)\n plans_li.append(plan)\n\n # Affirmation Entry\n affirmation = self.affirm_entry.get(\"1.0\", 'end-1c')\n\n # Additional Comment Entry\n additional_notes = self.notes_entry.get(\"1.0\", 'end-1c')\n\n add_new_entry(entry_type=entry_type,\n gratitude=gratitude_li,\n goals=goals_li,\n plans=plans_li,\n affirmation=affirmation,\n additional_notes=additional_notes,\n test=True)\n\n self.root.switch_page(self.root._HomePage)", "def create(\n self,\n __template_id,\n __payload,\n *,\n workflow_id=None,\n command_id=None,\n read_as=None,\n act_as=None,\n ):\n raise NotImplementedError", "def add_task():\n # get values from user\n responses = accept_inputs([\"Task label\", \"Short task description\", \"Parent task label\"])\n # insert into db\n query_no_results(\"insert into task values(?, ?, ?)\",\n [responses[\"Task label\"], responses[\"Short task description\"], responses[\"Parent task label\"]])\n print(\"New task created\")", "def add(self, update, context):\n\n telegram_user = update.message.from_user\n\n if len(context.args) != 2:\n message = \"Sorry! I could not add the entry! Please use the the command passing the following arguments:\\n\\n /add <url> <entryname> \\n\\n Here is a short example: \\n\\n /add http://www.feedforall.com/sample.xml ExampleEntry\"\n update.message.reply_text(message)\n return\n print(f'context.args: {context.args}')\n # arg_url = FeedHandler.format_url_string(string=context.args[0])\n arg_url = context.args[0]\n arg_entry = context.args[1]\n print(f'arg_entry: {arg_entry}')\n print(f'arg_url: {arg_url}')\n\n # Check if argument matches url format\n # if not FeedHandler.is_parsable(url=arg_url):\n # message = (\n # \"Sorry! It seems like '\"\n # + str(arg_url)\n # + \"' doesn't provide an RSS news feed.. Have you tried another URL from that provider?\"\n # )\n # update.message.reply_text(message)\n # return\n\n # Check if entry does not exists\n entries = self.db.get_urls_for_user(telegram_id=telegram_user.id)\n print(entries)\n\n if any(arg_url in entry for entry in entries):\n message = (\n \"Sorry, \"\n + telegram_user.first_name\n + \"! I already have that url with stored in your subscriptions.\"\n )\n update.message.reply_text(message)\n return\n\n if any(arg_entry in entry for entry in entries):\n message = (\n \"Sorry! I already have an entry with name \"\n + arg_entry\n + \" stored in your subscriptions.. Please choose another entry name or delete the entry using '/remove \"\n + arg_entry\n + \"'\"\n )\n update.message.reply_text(message)\n return\n\n self.db.add_user_bookmark(\n telegram_id=telegram_user.id, url=arg_url, alias=arg_entry\n )\n message = \"I successfully added \" + arg_entry + \" to your subscriptions!\"\n update.message.reply_text(message)", "def add_transaction(inbound_entry):\n try:\n data = inbound_entry[\"data\"]\n key_pair = Key()\n encrypted_private_key = encrypt_private_key(\n AES_KEY, key_pair.public_key, key_pair.private_key_bytes\n )\n inbound_entry[\"public_key\"] = key_pair.public_key\n inbound_entry[\"private_key\"] = encrypted_private_key\n\n if inbound_entry[\"data_type\"] == \"user\":\n next_user = get_next_id(\n \"user_mapping\", data[\"remote_id\"], inbound_entry[\"provider_id\"]\n )\n # Generate Ids\n if next_user:\n next_id = next_user[0][\"next_id\"]\n else:\n next_id = str(uuid4())\n\n object_id = User().hash(next_id)\n address = User().address(object_id=object_id)\n\n inbound_entry[\"next_id\"] = next_id\n inbound_entry[\"address\"] = bytes_from_hex(address)\n inbound_entry[\"object_id\"] = bytes_from_hex(object_id)\n inbound_entry[\"object_type\"] = addresser.ObjectType.USER.value\n message = User().imports.make(\n signer_keypair=key_pair, next_id=next_id, **data\n )\n batch = User().imports.batch(\n signer_keypair=key_pair,\n signer_user_id=key_pair.public_key,\n message=message,\n )\n inbound_entry[\"batch\"] = batch.SerializeToString()\n add_metadata(inbound_entry, message)\n\n elif inbound_entry[\"data_type\"] == \"group\":\n next_id = str(uuid4())\n object_id = Role().hash(next_id)\n address = Role().address(object_id=object_id)\n\n inbound_entry[\"address\"] = bytes_from_hex(address)\n inbound_entry[\"object_id\"] = bytes_from_hex(object_id)\n inbound_entry[\"object_type\"] = addresser.ObjectType.ROLE.value\n\n message = Role().imports.make(\n signer_keypair=key_pair, role_id=next_id, **data\n )\n batch = Role().imports.batch(\n signer_keypair=key_pair,\n signer_user_id=key_pair.public_key,\n message=message,\n )\n inbound_entry[\"batch\"] = batch.SerializeToString()\n add_metadata(inbound_entry, message)\n\n except Exception as err: # pylint: disable=broad-except\n LOGGER.exception(\n \"Unable to create transaction for inbound data:\\n%s\", inbound_entry\n )\n LOGGER.exception(err)", "def add_entry(\n self,\n the_id: str,\n the_name: str,\n the_parent: str = '') -> None:\n\n # validate inputs\n the_id, the_name, the_parent = self._validate_entry(the_id, the_name, the_parent)\n\n # verify that the_id doesn't already exist\n if the_id in self.labels:\n raise KeyError('the_id = {} already exists'.format(the_id))\n\n # check if name is already being used, and warn if so\n for key, value in self.labels.items():\n if value == the_name:\n logger.warning(\n 'Note that id {} is already using name {}. Having repeated names is '\n 'permitted, but may lead to confusion.'.format(key, value))\n\n # add the entry into the labels and subtypes dicts and reset the values\n # perform copy in case of failure\n labels = self.labels.copy()\n subtypes = self.subtypes.copy()\n labels[the_id] = the_name\n if the_parent in subtypes:\n subtypes[the_parent].append(the_id)\n else:\n subtypes[the_parent] = [the_id, ]\n\n try:\n self.set_labels_and_subtypes(labels, subtypes)\n except (ValueError, KeyError) as e:\n logger.error(\n 'Setting new entry id {}, name {}, and parent {} failed with '\n 'exception {}'.format(the_id, the_name, the_parent, e))", "def new_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n return False\n default_storage.save(filename, ContentFile(content))\n return True", "def test_add_entry_creates_db_entry_with_correct_details(self):\n example_inputs = [\n 'Example Employee',\n '2018-05-01',\n 'Example Task',\n 100,\n 'Example Note'\n ]\n with patch('builtins.input', side_effect=example_inputs):\n self.menu.add_entry()\n\n query = (db_manager\n .LogEntry\n .select()\n .join(db_manager.Employee)\n .where(\n db_manager.Employee.name == example_inputs[0],\n db_manager.LogEntry.date == example_inputs[1],\n db_manager.LogEntry.task_name == example_inputs[2],\n db_manager.LogEntry.duration == example_inputs[3],\n db_manager.LogEntry.notes == example_inputs[4]\n ))\n record = query[0]\n record_data = [\n record.employee.name,\n record.date.strftime(\"%Y-%m-%d\"),\n record.task_name,\n record.duration,\n record.notes\n ]\n self.assertEqual(example_inputs, record_data)", "def feed(self, entry):\r\n if entry.name not in self.names:\r\n self.names[entry.name] = list()\r\n self.names[entry.name].append(entry)", "def _new_entry(self, home_coordinates, feature, global_data):\n return NswRuralFireServiceFeedEntry(home_coordinates, feature)", "def new_task(self):\n print \"Create a new task.\"\n\n # Collect new task info from user\n description = raw_input(\"Enter task (140 characters max) > \")\n due_date = raw_input(\"Enter due date as 'year-mm-dd' (optional). > \")\n tags = raw_input(\n \"Enter tags for the task (comma separated) (optional). > \")\n tag_list = [tag.strip() for tag in tags.split(',')]\n try:\n new_task = doto.Task(self.user, description, due_date, tag_list)\n except (NameError, ValueError) as e:\n # On error, print and return.\n print \"Task not created. Error: \", e\n raw_input(\"Press Enter to continue.\")\n return\n self.current_collection.add(new_task)\n return", "def create_and_exercise(\n self,\n __template_id,\n __payload,\n __choice_name,\n __argument=None,\n *,\n workflow_id=None,\n command_id=None,\n read_as=None,\n act_as=None,\n ):\n raise NotImplementedError", "def create_hit(client, hit_info):\n hit_id = \"\"\n hit_type = hit_info.type\n assert(hit_type in ['gen', 'val'])\n if hit_type == 'gen':\n hit = create_generation_hit(client, hit_info)\n else:\n hit = create_validation_hit(client, hit_info)\n hit_id = hit['HIT']['HITId']\n # for Sandbox HITs\n # hit_url = \"https://workersandbox.mturk.com/mturk/preview?groupId=\" + hit['HIT']['HITGroupId']\n # for live publishing\n hit_url = \"https://worker.mturk.com/mturk/preview?groupId=\" + hit['HIT']['HITGroupId']\n assert(hit_id != \"\")\n logger.info(f'* Created {hit_type} HIT for annotation {hit_info.annotation_id} of question {hit_info.question_id}, HIT id is: {hit_id}, HIT url: {hit_url}')\n return hit_id", "def add(self, entry):\n \"An entry is a tuple of (id, datatime, text).\"\n id = entry[0]\n datee = entry[1]\n text = re.sub('[^A-Za-z0-9]+', ' ', entry[2].lower())\n self.recordsDict[id].create(id, datee, entry[2])\n for word in text.split():\n self.wordDict[word].add(id)", "def create(*args):", "def create_tag_with_entry(title):\n tag = Tag.objects.create(title=title)\n tag.save()\n tag.entry.add(1)\n return tag" ]
[ "0.7608277", "0.6764989", "0.6709134", "0.6657233", "0.65733117", "0.64365363", "0.6302687", "0.6290588", "0.6275084", "0.6263564", "0.62014914", "0.62014914", "0.6190114", "0.615221", "0.614616", "0.6126723", "0.61106926", "0.60714495", "0.60638577", "0.6062632", "0.60251296", "0.59568596", "0.59353113", "0.5933559", "0.5914452", "0.5907611", "0.5895111", "0.5872048", "0.5870459", "0.5870164", "0.5818613", "0.58147717", "0.5814105", "0.5812094", "0.5798853", "0.5798853", "0.5793946", "0.5768605", "0.5762752", "0.5744585", "0.57376736", "0.57263744", "0.57001954", "0.56974113", "0.5686855", "0.568064", "0.568064", "0.568064", "0.56521386", "0.56368005", "0.5630794", "0.562325", "0.56228685", "0.5603844", "0.56028724", "0.55973625", "0.55696404", "0.5554749", "0.55500203", "0.553784", "0.5536505", "0.55301195", "0.5519555", "0.5519217", "0.5501961", "0.5495721", "0.5485506", "0.5485481", "0.54792523", "0.54757476", "0.54688674", "0.5463585", "0.5460362", "0.54598624", "0.54585636", "0.54532", "0.5452742", "0.5441337", "0.54411995", "0.5432569", "0.54325455", "0.5421531", "0.54194385", "0.54148483", "0.5407328", "0.53867364", "0.53756136", "0.53657943", "0.5364487", "0.5359154", "0.5344265", "0.5334983", "0.53283095", "0.53222567", "0.5315563", "0.5308018", "0.5304368", "0.53040487", "0.5302992", "0.529491" ]
0.65311766
5
Handle a flow initiated by the user.
async def async_step_user(self, user_input=None): # If there is a CLOUD entry already, abort a new LOCAL entry if self.is_cloud_device_already_added(): return self.async_abort(reason="already_configured_device") return await self.async_step_environment()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request(self, flow: mitmproxy.http.HTTPFlow):\n pass", "async def flow(self, session: ClientSession, data: Dict) -> None:", "def action(self):\n current_action = self.get_script_entry()\n if current_action[\"type\"] == \"request\":\n self._handle_request(current_action)\n elif current_action[\"type\"] == \"event\":\n self._handle_event(current_action)\n elif current_action[\"type\"] == \"response\":\n self._handle_response(current_action)\n else:\n raise AttributeError(\"Wrong action type!\" +\n \" Scenario: \" + str(self._loaded_sc[\"name\"]) +\n \" Action: \" + str(self._scenario_script_cur))", "def process_step(self, request, step, form):\n pass", "def flow(self, flow):\n\n self._flow = flow", "def handle_interaction(request):\n\n payload = json.loads(request.POST['payload'])\n interaction_type = payload.get('type', None)\n\n # Handle shortcut\n if interaction_type == \"shortcut\":\n callback_id = payload.get('callback_id', None)\n if callback_id == \"tfed\":\n blocks = views.tfed_modal()\n modal_id = open_modal(payload.get('trigger_id', None), blocks)\n if modal_id:\n return HttpResponse()\n return HttpResponseServerError(\"Failed to open modal\")\n if interaction_type == \"message_action\":\n callback_id = payload.get('callback_id', None)\n if callback_id == \"report\":\n channel = payload.get('channel', {'id': None})['id']\n sender = payload['message'].get('user', None)\n if not sender:\n sender = payload['message']['username']\n ts = payload['message']['ts']\n text = payload['message']['text']\n message, created = models.SlackMessage.objects.get_or_create(posted_to=channel, posted_by=sender, ts=ts,\n content=text)\n blocks = views.report_message_modal(message)\n modal_id = open_modal(payload.get('trigger_id', None), blocks)\n if modal_id:\n return HttpResponse()\n return HttpResponseServerError(\"Failed to open modal\")\n\n # Handle modal view submission\n if interaction_type == \"view_submission\":\n values = payload['view']['state']['values']\n callback_id = payload['view'].get('callback_id', None)\n\n # TFed ticket submission\n if callback_id == \"tfed-modal\":\n subject = values['subject']['subject-action']['value']\n description = values['description']['description-action']['value']\n topic = values['rt_topic']['rt_topic-action']['selected_option']['value']\n user_id = payload['user']['id']\n user = user_profile(user_id)\n if user['ok']:\n __create_ticket(user, subject, description, topic)\n return HttpResponse()\n return HttpResponseServerError(\"Failed to obtain user information\")\n\n # Update TFed ticket\n elif callback_id == \"ticket-update-modal\":\n ticket_info = payload['view']['blocks'][1]\n owner_id = None\n if ticket_info['type'] != \"divider\":\n ticket_info = payload['view']['blocks'][2]\n owner_id = values['ticket_assignee']['ticket_assignee-action']['selected_user']\n ticket_id = ticket_info['block_id'].split(\"#\")[0]\n channel = ticket_info['block_id'].split(\"#\")[1]\n ts = ticket_info['block_id'].split(\"#\")[2]\n status = values['ticket_status']['ticket_status-action']['selected_option']\n if status:\n status = status['value']\n comments = values['ticket_comment']['ticket_comment-action']['value']\n checkboxes = values['email_requestor']['email_requestor-action']['selected_options']\n notify_requestor = False\n if len(checkboxes) > 0:\n notify_requestor = True\n\n # Obtain user's RT token\n user_id = payload['user']['id']\n token = __retrieve_rt_token(user_id)\n\n __update_ticket(ticket_id, status, owner_id, comments, notify_requestor, token, user_id, channel, ts)\n return HttpResponse()\n elif callback_id == \"ticket-comment-modal\":\n ticket_id = payload['view']['blocks'][0]['block_id']\n comments = values[ticket_id]['comment-action']['value']\n user_id = payload['user']['id']\n token = __retrieve_rt_token(user_id)\n __post_ticket_comment(ticket_id, user_id, comments, token)\n return HttpResponse()\n elif callback_id == \"report-modal\":\n message_id = payload['view']['blocks'][0]['block_id']\n comments = values['report-comment']['comment-action']['value']\n reporter = payload['user']['id']\n __save_report(message_id, reporter, comments)\n return HttpResponse()\n return HttpResponseNotFound()\n\n # Handle block interaction event\n if interaction_type == \"block_actions\":\n action = payload['actions'][0]['action_id']\n channel = payload.get('channel', None)\n if channel:\n channel = channel['id']\n message = payload.get('message', None)\n view = payload.get('view', None)\n\n # TFed message\n if channel in [settings.SLACK_TARGET_TFED, settings.SLACK_TARGET_TFED_DB] and message and not view:\n ticket_id = message['blocks'][0]['block_id'].split('~')[0]\n blocks = views.ticket_update_modal(ticket_id, channel, message['ts'], action)\n\n # Get current ticket from RT\n __refresh_ticket_async(channel, message)\n\n # Check that user has token, if not display a warning\n user_id = payload['user']['id']\n token = __retrieve_rt_token(user_id)\n if not token:\n error_message = \"Hi there! Before you can update tickets, you'll need to set up access to your RT \" \\\n \"account. Visit https://lnl.wpi.edu\" + reverse(\"support:link-account\") + \\\n \" to get started.\"\n post_ephemeral(channel, error_message, user_id, 'Request Tracker')\n return HttpResponse()\n\n modal_id = open_modal(payload.get('trigger_id', None), blocks)\n if modal_id:\n return HttpResponse()\n return HttpResponseServerError(\"Failed to open modal\")\n\n # Home tab menu options\n if action == \"home-ticket-update\":\n ticket_id = payload['actions'][0]['block_id']\n option = payload['actions'][0]['selected_option']['value']\n if option == 'Comment':\n blocks = views.ticket_comment_modal(ticket_id)\n modal_id = open_modal(payload.get('trigger_id', None), blocks)\n if not modal_id:\n return HttpResponseServerError(\"Failed to open modal\")\n return HttpResponse()\n return HttpResponseNotFound()", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def request(self, flow: mitmproxy.http.HTTPFlow):", "def update_flow(self, flow):\r\n self.flow = flow", "def _handle_attempt(self):\n pass", "def execute(self, flow: Flow):\n while True:\n autosteps = flow.next_autosteps()\n steps = flow.next_steps()\n\n if not steps:\n log.debug(\"Flow ended correctly.Nothing left to do.\")\n with self._lock:\n self.in_flight.remove(flow)\n break\n\n if not autosteps and flow.current_step.hints:\n possible_next_steps = [f'You are in the flow **{flow.name}**, you can continue with:\\n\\n']\n for step in steps:\n cmd = step.command\n cmd_fnc = self._bot.all_commands[cmd]\n reg_cmd = cmd_fnc._err_re_command\n syntax_args = cmd_fnc._err_command_syntax\n reg_prefixed = cmd_fnc._err_command_prefix_required if reg_cmd else True\n syntax = self._bot.prefix if reg_prefixed else ''\n if not reg_cmd:\n syntax += cmd.replace('_', ' ')\n if syntax_args:\n syntax += syntax_args\n possible_next_steps.append(f'- {syntax}')\n self._bot.send(flow.requestor, '\\n'.join(possible_next_steps))\n break\n\n log.debug('Steps triggered automatically %s.', ', '.join(str(node) for node in autosteps))\n log.debug('All possible next steps: %s.', ', '.join(str(node) for node in steps))\n\n for autostep in autosteps:\n log.debug(\"Proceeding automatically with step %s\", autostep)\n if autostep == FLOW_END:\n log.debug('This flow ENDED.')\n with self._lock:\n self.in_flight.remove(flow)\n return\n try:\n msg = Message(frm=flow.requestor, flow=flow)\n result = self._bot.commands[autostep.command](msg, None)\n log.debug('Step result %s: %s', flow.requestor, result)\n\n except Exception as e:\n log.exception('%s errored at %s', flow, autostep)\n self._bot.send(flow.requestor, f'{flow} errored at {autostep} with \"{e}\"')\n flow.advance(autostep) # TODO: this is only true for a single step, make it forkable.\n log.debug('Flow execution suspended/ended normally.')", "def proceed(self):\n pass", "def on_goal(self, goal_handle):\n\n\t\t# Get the goal corresponding to the current goal handle\n\t\tgoal = goal_handle.get_goal()\n\n\t\trospy.loginfo(\"[BRIDGE] Received a goal from client\")\n\n\t\t# Validate goal parameter before publishing data to MQTT\n\t\tif(goal.protocol == 'google_apps'):\n\n\t\t\t# Set the goal as accepted\n\t\t\tgoal_handle.set_accepted()\n\t\t\trospy.logwarn(\"State Accepted\")\n\n\t\t\t# Start processing the goal\n\t\t\tself.process_goal(goal_handle)\n\n\t\telse:\n\n\t\t\t# Set the Goal as rejected\n\t\t\tgoal_handle.set_rejected()\n\t\t\trospy.logwarn(\"State Rejected\")\n\t\t\treturn", "def post(self, request, *args, **kwargs):\n self.create_flow_file_db_entry()\n self.handle_chunk(request)\n return self.return_response(self.flow_file.identifier)", "def process(self, user_event: UserEvent) -> None:\n pass", "def take_action(self, *args, **kwargs):\r\n pass", "def handle(self, context: Context):\n raise NotImplementedError()", "def handle(self, request):\n\t\tCORE.info('Incoming request of type %s' % (request.command,))\n\t\tif not self.authenticated and request.command != 'AUTH':\n\t\t\tself.request(request)\n\t\telif request.command == 'AUTH':\n\t\t\tfrom univention.management.console.protocol.server import Server\n\t\t\tServer.reload()\n\t\t\ttry:\n\t\t\t\tself.__auth.authenticate(request)\n\t\t\texcept (TypeError, KeyError):\n\t\t\t\tresponse = Response(request)\n\t\t\t\tresponse.status = 400\n\t\t\t\tself._response(response)\n\t\telif request.command == 'GET' and 'newsession' in request.arguments:\n\t\t\tCORE.info('Renewing session')\n\t\t\tif self.processor:\n\t\t\t\tself.__locale = str(self.processor.locale)\n\t\t\tself.processor = None\n\t\t\tself.finished(request.id, None)\n\t\telse:\n\t\t\tself.initalize_processor(request)\n\t\t\tself.processor.request(request)", "async def async_step_user(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n if is_hassio(self.hass):\n return await self.async_step_on_supervisor()\n\n return await self.async_step_manual()", "def handle(self) -> None:", "def handle(self, handler_input):\n speech = \"I'm a sample Alexa Skill. Let me give you a random Chuck Norris Fact. \"\n speech += getChuckFact()\n speech += \". Do you want more awesome Chuck facts?\"\n \n \"\"\"\n Take note of the set_should_end_session. If set to 'True', the alexa\n skill will gracefully end execution.AbstractExceptionHandler\n \n The set_card method specifies what kind of cards do you want to use when\n interacting with the user via display. A 'SimpleCard' display's text.\n \n For more info about cards, see:\n https://developer.amazon.com/docs/custom-skills/include-a-card-in-your-skills-response.html\n \"\"\"\n handler_input.response_builder.speak(speech).set_card(\n SimpleCard(speech)).set_should_end_session(False)\n return handler_input.response_builder.response", "def start_flow():\n if request.method == 'GET':\n tel = request.args.get('tel')\n flow = request.args.get('flow')\n to_rp = request.args.get('to')\n if to_rp == \"io\":\n client = io_client\n elif to_rp == \"datos\":\n client = mx_client\n else:\n return jsonify({}), 404\n contact = client.get_contacts(urn=['tel:+52' + tel]).all()\n if contact:\n client.create_flow_start(\n flow=flow,\n contacts=[contact[0].uuid],\n )\n return jsonify({\"Inicio_flow\": \"Si\"}), 201\n return jsonify({\"Inicio_flow\": \"No\"}), 404", "def handle(self, *args, **kwargs):\n raise NotImplementedError()", "def OnAttempt(self, event):\n pass", "def handle_event(request):\n\n payload = json.loads(request.body)\n if payload['type'] == \"url_verification\":\n return JsonResponse({\"challenge\": payload['challenge']})\n elif payload['type'] == \"event_callback\":\n event = payload['event']\n if event['type'] == \"team_join\":\n slack_post(event['user']['id'], text=\"Welcome to LNL!\", content=views.welcome_message())\n elif event['type'] == \"app_home_opened\":\n load_app_home(event['user'])\n elif event['type'] == \"channel_created\":\n if settings.SLACK_AUTO_JOIN:\n join_channel(event['channel']['id'])\n return HttpResponse()\n return HttpResponse(\"Not implemented\")", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def perform_step(self, action):\n pass", "def set_cur_flow(self, flow):\n self.cur_flow = flow", "def new_goal_callback(self, goal_handle):\n goal_handle = self._as.current_goal\n success = False\n goal = goal_handle.get_goal()\n \n # get the enumeration\n action_type = self._get_action_from_goal(goal)\n if action_type is not None and action_type.value in self._cbs.keys():\n \n rospy.logdebug('Executing action %s %s' % (self._action_name, action_type))\n # call the correct callback\n success, preempted = self._cbs[action_type.value](goal)\n\n # fill out feedback\n self._fill_feedback()\n goal_handle.publish_feedback(self._feedback)\n\n # return if preempted, takes precedence over 'success' state\n if preempted:\n self._preempted(goal_handle)\n else:\n # return if finished\n if success:\n self._finish(goal_handle)\n else:\n # specify failure\n self._aborted(goal_handle)", "async def async_step_user(self, user_input=None):\n flow = self.hass.data.get(DATA_FLOW_IMPL)\n\n if not flow:\n return self.async_abort(reason=\"no_flows\")\n\n if user_input:\n return await self.async_step_auth(user_input)\n\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {vol.Required(const.PROFILE): vol.In(flow.get(const.PROFILES))}\n ),\n )", "def handle_accept(self):\r\n pass", "def handle_event(self, event):\n # Get rid of all non keydown events\n if event.type != pygame.KEYDOWN:\n return\n\n if not self._initial_prompt.is_over():\n self._initial_prompt.handle_event(event)\n\n # If response is 2 then the user selected SEE YA!. Pass control to\n # seeya dialogue.\n elif self._response == 2 and not self._seeya_dialogue.is_over():\n self._seeya_dialogue.handle_event(event)\n\n # If response is 0 then the user selected BUY. Pass control to buy\n # menu.\n elif self._response == 0 and not self._buy_menu.is_over():\n self._buy_menu.handle_event(event)\n\n elif self._response == 1 and not self._sell_menu.is_over():\n self._sell_menu.handle_event(event)", "def step(self): \n self.reset_parameters()\n\n if np.random.uniform(0, 1) < self.model.churn_prob: self.exit_triggered = True \n if self.exit_triggered:\n self.exit()\n else:\n self.register_deposit(self.deposit_intent)\n self.register_contribution(self.contribution_intent)\n self.register_sponsorship(self.sponsor_intent)\n self.register_euro_exchange(self.euro_exchange_intent)\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)", "def _goal_received_cb(self):\n rospy.loginfo(\"[Server] Goal received, passing it on.\")\n self._action_client.wait_for_server()\n self._success = True\n goal = self._action_server.accept_new_goal()\n self._action_client.send_goal(goal, self._result_received_cb,\n self._active_cb,\n self._feedback_received_cb)", "def oauth_start_flow():\n # Have to do authentication!\n rest.default_user_authentication()\n\n account_type = flask.request.args.get('type')\n if account_type is None:\n flask.abort(400)\n\n cls = ACCOUNT_TYPES.get(account_type, None)\n if cls is None:\n flask.about(400)\n\n key = str(uuid.uuid4())\n instance = cls(id=key)\n instance.put()\n\n return flask.redirect(instance.AUTH_URL %\n {'client_id': instance.CLIENT_ID,\n 'state': key})", "def handle_req(reqid):\n req = hl.retrieveRequest(reqid)\n \n if request.method == 'POST':\n if request.form['reqOption'] == 'Approve':\n hl.acceptRequest(req)\n elif request.form['reqOption'] == 'Decline':\n hl.declineRequest(req)\n \n return redirect('/users')", "def handle(self, parsed_args):\n raise NotImplementedError", "def execute(self, message: ACLMessage):\n super().execute(message)\n\n # Filter for protocol\n if not message.protocol == ACLMessage.FIPA_REQUEST_PROTOCOL:\n return\n\n # Filter for session_id (conversation_id)\n session_id = message.conversation_id\n if session_id not in self.open_sessions:\n return\n\n # Resume generator\n generator = self.open_sessions[session_id]\n handlers = {\n ACLMessage.INFORM: lambda: generator.send(message),\n ACLMessage.AGREE: lambda: generator.throw(FipaAgreeHandler, message),\n ACLMessage.REFUSE: lambda: generator.throw(FipaRefuseHandler, message),\n ACLMessage.FAILURE: lambda: generator.throw(\n FipaFailureHandler, message)\n }\n try:\n handlers[message.performative]()\n except StopIteration:\n pass\n except KeyError:\n return\n\n # Clear session if final message was received\n if message.performative in (ACLMessage.REFUSE, ACLMessage.INFORM, ACLMessage.FAILURE):\n self.delete_session(session_id)", "def __handler(self, context: CallbackContext, update: Update, trigger: str):\n eff_user = update.effective_user\n logger.info('Handling user with id: {}'.format(eff_user.id))\n self.user = self.db_adapter.get_user(eff_user=eff_user)\n self.context = context\n self.update = update\n self.user.state = Constructor.START_STATE_NAME if not self.user.state else self.user.state\n\n if self.machine:\n self.machine.set_state(state=self.user.state, model=self)\n else:\n self.machine = Machine(\n model=self,\n states=self.states,\n initial=self.user.state,\n transitions=self.transitions\n )\n\n triggers = self.machine.get_triggers(self.state)\n matched_triggers = []\n for possible_trigger in triggers:\n if re.match(possible_trigger, trigger):\n matched_triggers.append(possible_trigger)\n\n if len(matched_triggers) == 0:\n trigger = Constructor.FREE_TEXT_TRIGGER\n elif len(matched_triggers) == 1:\n trigger = matched_triggers[0]\n else:\n raise ValueError(\n f'Proposed trigger {trigger} has more then one possible model\\'s '\n f'matched triggers: {matched_triggers}'\n )\n\n self.machine.model.trigger(trigger, self)\n\n self.user.state = self.state\n self.db_adapter.commit_user(self.user)\n\n if Constructor.PASSING_TRIGGER in self.machine.get_triggers(self.state):\n self.__handler(self, update, Constructor.PASSING_TRIGGER)", "def dispatch(intent_request):\r\n\r\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\r\n\r\n intent_name = intent_request['currentIntent']['name']\r\n\r\n # Dispatch to your bot's intent handlers\r\n if intent_name == 'gethousepredict':\r\n return housepredict(intent_request)\r\n elif intent_name == 'availablehouses':\r\n housetype = intent_request['currentIntent']['slots']['housetypesavail']\r\n location = intent_request['currentIntent']['slots']['locationavail']\r\n item_dtl = house_price_dtl(location,housetype)\r\n #print (\"housetype\",housetype)\r\n #print (\"location\",location)\r\n #print (\"House Pirce\",price)\r\n response = {\r\n \"dialogAction\": {\r\n \"type\": \"Close\",\r\n \"fulfillmentState\": \"Fulfilled\",\r\n \"message\": {\r\n \"contentType\": \"SSML\",\r\n \"content\": \" Hosue Details \\n {item_dtls}\".format(item_dtls = item_dtl)\r\n },\r\n }\r\n }\r\n print('result = ' + str(response))\r\n return response\r\n\r\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def handle(self, controller):\n \n username = self.entries[self.fieldnames[0]].get()\n \n if not username: ## set user name to be a required field\n showinfo(title=\"Pop-up\", message=\"Please Enter Your Name.\")\n return\n \n address = self.entries[self.fieldnames[1]].get()\n \n if not self.connected:\n package = {'customer_id':-1, 'customer_name':username, 'customer_address':address}\n msg = controller.transmit(package) ## receive a new customer id\n controller.customer = Customer(username, address, msg['customer_id'])\n self.connected = not self.connected\n\n controller.show_frame(PageOne)", "def process(self, event):\n pass", "def step(self, action):", "def handle_decision(message):\n Base.metadata.create_all(engine)\n session = Session()\n argument = message.action\n func = switcher_protocol.get(argument, 'nothing')\n return func(message.information, session)", "def handle_event(self, event):\n pass", "def handle_event(self, event):\n if self.sub_event is not None:\n self.sub_event.handle_event(event)\n else:\n self.confirm_response.handle_event(event)", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def on_go(state):\n pass", "def handle_migration(self, message):\n if self.migrating:\n if (messages.get_message_type(message) == \"OFPT_FLOW_REMOVED\"\n and message.cookie == 1991):\n self.migrating = False\n self.controller.start_sending_to_switch()\n #print \"Switch migration successfully completed!\"\n else:\n self.buffer.append(messages.of_flow_add)\n self.migrating = True\n self.activate_controller()", "def action_done(self):", "def perform_step(self) -> None:\n pass", "def hit_handler(request, hit_id):\n LOGGER.info('Rendering task handler view for user \"{0}\".'.format(\n request.user.username or \"Anonymous\"))\n \n hit = get_object_or_404(HIT, hit_id=hit_id)\n if not hit.active:\n LOGGER.debug('Detected inactive User/HIT mapping {0}->{1}'.format(\n request.user, hit))\n # Try to find a new HIT for the current annotation project\n if hit.project_set.count() > 0:\n annotation_project = list(hit.project_set.all())[0]\n new_hit = _compute_next_task_for_user(request.user, annotation_project, hit.language_pair)\n if new_hit:\n return redirect('appraise.wmt16.views.hit_handler',\n hit_id=new_hit.hit_id)\n \n # If that fails, return to overview page\n return redirect('appraise.wmt16.views.overview')\n \n items = RankingTask.objects.filter(hit=hit)\n if not items:\n return redirect('appraise.wmt16.views.overview')\n \n return _handle_ranking(request, hit, items)", "def handle(self, *args, **options):\n raise NotImplementedError()", "def handle_message(self, message):\n\n try:\n controller_func = get_controller_func(message.code)\n\n if controller_func:\n response = get_controller_func(message.code)(message.payload)\n self.send_message(response)\n else:\n self.send_bad_request()\n except Exception as e:\n Logger.log_error(e)\n self.send_server_error()", "def async_handle_dispatch(self, *args) -> None:\n if not args:\n self.update_ha_state()\n return\n\n payload = args[0]\n if payload.get(UNIQUE_ID) != self.unique_id:\n return\n elif payload[SERVICE] == SVC_RESET_SYSTEM_MODE:\n self._call_client_api(self._device.reset_mode)\n elif payload[SERVICE] == SVC_SET_SYSTEM_MODE:\n kwargs = dict(payload[DATA])\n kwargs[\"system_mode\"] = kwargs.pop(\"mode\", None)\n until = kwargs.pop(\"duration\", None) or kwargs.pop(\"period\", None)\n kwargs[\"until\"] = (dt.now() + until) if until else None\n self._call_client_api(self._device.set_mode, **kwargs)", "def onFlowUpdate(self, event):", "def __handler(self, bot, update, trigger):\n eff_user = update.effective_user\n user_id = eff_user.id\n logger.info('Handling user with id: {}'.format(user_id))\n self.user = self.db_adapter.get_user(eff_user=eff_user)\n self.bot = bot\n self.update = update\n self.user.state = Constructor.START_STATE_NAME if not self.user.state else self.user.state\n\n if self.machine:\n self.machine.set_state(state=self.user.state, model=self)\n else:\n self.machine = Machine(model=self, states=self.states, initial=self.user.state,\n transitions=self.transitions)\n\n triggers = self.machine.get_triggers(self.state)\n matched_triggers = []\n for possible_trigger in triggers:\n if re.match(possible_trigger, trigger):\n matched_triggers.append(possible_trigger)\n\n if len(matched_triggers) == 0:\n trigger = Constructor.FREE_TEXT_TRIGGER\n elif len(matched_triggers) == 1:\n trigger = matched_triggers[0]\n else:\n raise ValueError(\n 'Proposed trigger {} has more then one possible model\\'s matched triggers: {}'.format(trigger,\n matched_triggers))\n\n self.machine.model.trigger(trigger, self)\n\n self.user.state = self.state\n self.db_adapter.commit_user(self.user)\n\n if Constructor.PASSING_TRIGGER in self.machine.get_triggers(self.state):\n self.__handler(self, update, Constructor.PASSING_TRIGGER)", "def step(self, action):\n raise NotImplementedError()", "def view(self):\n\t\tself.done(1)", "def handle(self, data):\n pass", "def process_IN_OPEN(self, event):", "async def async_step_user(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n if self._async_current_entries():\n return self.async_abort(reason=\"single_instance_allowed\")\n\n if user_input is None:\n return self.async_show_form(\n step_id=\"user\", data_schema=STEP_USER_DATA_SCHEMA\n )\n\n errors = {}\n\n try:\n await validate_input(self.hass, user_input)\n except error.APIConnectionError:\n errors[\"base\"] = \"cannot_connect\"\n except error.AuthenticationError:\n errors[\"base\"] = \"invalid_auth\"\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Unexpected exception\")\n errors[\"base\"] = \"unknown\"\n else:\n return self.async_create_entry(title=\"OpenAI Conversation\", data=user_input)\n\n return self.async_show_form(\n step_id=\"user\", data_schema=STEP_USER_DATA_SCHEMA, errors=errors\n )", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def run_step(self):\n self.control_instance.run_step()", "def step(self, action):\n raise NotImplementedError", "def action_flow(self, action_flow):\n\n self._action_flow = action_flow", "def handle_event(self, event):\n self.confirmed_dialogue.handle_event(event)", "def handle(req):\n return logic(req)", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'bitbotSetNewAlert':\n return set_currency_alert(intent_request)\n # elif intent_name == 'Temp':\n # return set_currency_alert(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def handle(self):\n raise NotImplementedError", "async def test_flow_user(hass):\n mocked_device = _create_mocked_device()\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER},\n )\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] is None\n _flow_next(hass, result[\"flow_id\"])\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], user_input={CONF_ENDPOINT: ENDPOINT},\n )\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == MODEL\n assert result[\"data\"] == {\n CONF_NAME: MODEL,\n CONF_ENDPOINT: ENDPOINT,\n }\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_called_once()", "def do_step(self) -> None:", "def post_flow_form(self, req, **_kwargs):\n if req.POST:\n res = Response()\n s = self.api.process_flow_message(req.json)\n res.text = s if PYTHON3 else unicode(s, \"utf-8\")\n return res\n return Response(status=400) # bad request", "def step_impl(context):\n pass", "def step_impl(context):\n pass", "def _handle_first_request(self):\n pass", "async def next_step(\n self, step_context: WaterfallStepContext\n ) -> DialogTurnResult:\n step_context.values[\"input\"] = step_context.result\n user_input = step_context.values[\"input\"]\n\n # TODO: remove this notification, it is for demo purposes only.\n await step_context.context.send_activity(\n MessageFactory.text(f\"[In this step, we will use Recognizers-Text to learn the user intention.]\")\n )\n # -------------------------------------------------------------\n results = parse_all(user_input, DEFAULT_CULTURE)\n # Flatten results\n results = [item for sublist in results for item in sublist]\n\n # ------------\n # parse results to find the data we need:\n has_time_stamp = False\n has_price = False\n has_quantity = False\n amount = None\n\n # temporary lists\n list_number = []\n list_currency = []\n list_datetime = []\n value_key = \"value\"\n\n for i in results:\n # in each pass, according to type_name, append to a list, or several.\n type_name = i.type_name\n if type_name == Constants.currency_type_name:\n has_price = True\n list_currency.append(i.resolution.get(value_key))\n if type_name == Constants.datetime_type_name or type_name == Constants.date_type_name:\n has_time_stamp = True\n list_datetime.append(i.resolution.get(\"values\", \"\")[0][value_key])\n if type_name == Constants.number_type_name:\n if i.resolution.get(value_key):\n has_quantity = True\n value = i.resolution.get(value_key)\n else:\n value = i.text\n has_quantity = False\n\n list_number.append(value)\n\n # this contains the whole collection of stocks of the user.\n # in the init method, it should populate the holdings using the data text file\n self.portfolio = Portfolio()\n\n # this represents a position taken with an investment instrument.\n # usually, there are many open at the same time.\n holding = Holding()\n\n # represents the intermediary broker\n self.broker = Broker()\n\n # for current operation (buy, sell)\n self.operation = Operation()\n\n self.operation.buy = True if ('buy' in user_input or 'Buy' in user_input) else False\n self.operation.sell = True if ('sell' in user_input or 'Sell' in user_input) else False\n\n if self.operation.buy:\n self.operation = BuyOperation()\n self.operation.buy = True\n self.operation.sell = False\n self.operation.type = 'buy'\n\n if self.operation.sell:\n self.operation = SellOperation()\n self.operation.buy = False\n self.operation.sell = True\n self.operation.type = 'sell'\n\n # TODO: we should have a dict or similar with [ticker, company_name]\n # refactor this for other companies\n holding.stock.ticker = 'MSFT' if (\n 'MSFT' in user_input.upper() or 'microsoft' in user_input.lower()) else 'x'\n\n if holding.stock.ticker == 'MSFT':\n holding.stock.company = \"Microsoft\"\n\n if has_time_stamp:\n self.operation.time_stamp = list_datetime[0]\n\n if len(Sets.intersection(list_currency, list_number)) == 1:\n self.operation.price = Sets.intersection(list_currency, list_number)[0]\n holding.quantity = Sets.diff(list_number, list_currency)[0]\n\n if has_quantity and has_price:\n print(\"Quantity: \" + str(holding.quantity))\n amount = int(holding.quantity) * float(self.operation.price)\n self.operation.amount = round(amount, Constants.max_decimals)\n\n print(\"Stock: \" + holding.to_string())\n print(\"Price: $ \" + str(self.operation.price))\n\n if has_time_stamp:\n print(\"TimeStamp: \" + str(self.operation.time_stamp))\n\n if has_quantity and amount:\n print(Constants.separator)\n print(\"OPERATION DETAILS\")\n print(Constants.separator)\n print(\"Operation type: \" + self.operation.type)\n print(\"Amount: $ \" + str(amount))\n self.operation.commission = round(amount * self.broker.commission, Constants.max_decimals)\n # tax, over the commission is 0.01 (10%)\n self.operation.tax = round(self.operation.commission * Constants.tax, Constants.max_decimals)\n print(\"Commission: $ \" + str(self.operation.commission))\n print(\"TAX: $ \" + str(self.operation.tax))\n print(Constants.separator)\n print(\"Total: $ \" + str(amount + self.operation.commission + self.operation.tax))\n print(Constants.separator)\n self.operation.quantity = holding.quantity\n self.operation.stock.ticker = holding.stock.ticker\n self.operation.stock.company = holding.stock.company\n self.operation.stock.market = holding.stock.market\n\n str_quantity = str(holding.quantity)\n str_price = \"$ \" + str(self.operation.price)\n str_time_stamp = \" on \" + str(self.operation.time_stamp) if has_time_stamp else \"\"\n\n # TODO: Check if the ticker is in use.\n find_result = any(elem.stock.ticker == holding.stock.ticker for elem in self.portfolio.stocks_owned)\n\n if find_result:\n updated_holding = next((i for i in self.portfolio.stocks_owned if i.stock.ticker == holding.stock.ticker), None)\n a = int(updated_holding.quantity)\n b = int(holding.quantity)\n # TODO: Check if is a buy or sell, the arithmetic logic\n if self.operation.type == 'buy':\n updated_holding.quantity = str(a + b)\n # cash should be decreased by the total cost of the operation\n elif self.operation.type == 'sell':\n # in fact, this should alter the compromised quantity, until the order is executed. Its ok for now.\n updated_holding.quantity = str(a - b)\n # also, the cash should be incremented when selling\n # self.portfolio.cash =\n else:\n self.portfolio.stocks_owned.append(holding)\n # -------------------------------------------------------------\n\n # TODO: Test write the portfolio with new values\n self.portfolio.write_json_data_to_file()\n\n operation_details = \"\"\n if has_quantity and amount:\n commission = round(amount * self.broker.commission, Constants.max_decimals)\n tax = round(commission * Constants.tax, Constants.max_decimals)\n\n operation_details += Constants.separator + \"\\n\"\n operation_details += \"OPERATION DETAILS\" + \"\\n\"\n operation_details += Constants.separator + \"\\n\"\n operation_details += \"Operation type: \" + self.operation.type + \"\\n\"\n operation_details += \"Amount: $ \" + str(amount) + \"\\n\"\n operation_details += \"Commission: $ \" + str(commission) + \"\\n\"\n operation_details += \"TAX: $ \" + str(tax) + \"\\n\"\n operation_details += Constants.separator + \"\\n\"\n operation_details += \"Total: $ \" + str(amount + commission + tax) + \"\\n\"\n operation_details += Constants.separator + \"\\n\"\n\n await step_context.context.send_activity(\n MessageFactory.text(operation_details)\n )\n\n # TODO: Here, we can show how much profit comes from the sale operation.\n query = \"Do you wish to \" + self.operation.type + \" \" + str_quantity + \" \" + holding.stock.ticker + \" stocks at \" + str_price + str_time_stamp + \"?\"\n return await step_context.prompt(\n ConfirmPrompt.__name__,\n PromptOptions(\n prompt=MessageFactory.text(query)\n ),\n )\n\n # if we don't ask for confirmation, we terminate it:\n # return await step_context.end_dialog()", "def post_create_flow(self, response: gcdc_flow.Flow) -> gcdc_flow.Flow:\n return response", "def action_done(self):\n pass", "def on_step_choice(self, event, placement):\n\n self.log.trace(\"Handle step selection by user.\")\n result = {'successful': False}\n\n try:\n # check if next step already exists\n n = len(self.procedure_steps)\n if str(n) == str(placement):\n self.log.trace(\"Next step does not exist. Creating ...\")\n self.create_new_input_step()\n\n else:\n self.log.trace(\"Next step exists.\")\n\n result['successful'] = True\n except BaseException, e:\n self.handle_exception(e, \"handle module selection by user\")\n\n # return\n return result", "def on_turn(self, turn_state):\n game_state = gamelib.GameState(self.config, turn_state)\n #gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))\n #game_state.suppress_warnings(True) #Uncomment this line to suppress warnings.\n\n self.starter_strategy(game_state)\n\n game_state.submit_turn()", "def handler(event, context):\n if event['Records'][0]['Sns']['Message'] is None:\n _print_info('Unrecognized event, function will not be executed. Enable debug to log the actual event.')\n _print_debug('event: {}'.format(event))\n return\n\n message = event['Records'][0]['Sns']['Message']\n _print_debug('message received: {}'.format(message))\n\n event = json.loads(message)\n _print_info('event: {}'.format(json.dumps(event)))\n\n if event[ACTION] in ALLOWED_ACTIONS:\n\n _print_info('Requested action: {}'.format(event[ACTION]))\n\n _print_info('Initializing.')\n _init_vars_()\n\n # create a hive cursor which can be passed around and then closed when done.\n cursor = _create_hive_cursor()\n\n if event[ACTION] == FULL_SYNC:\n _sync_all(cursor)\n if event[ACTION] == DELTA_SYNC:\n if event[USER] and event[NAMESPACE]:\n _sync_delta(cursor, event[USER], event[NAMESPACE])\n else:\n _print_error(\n 'Invalid request. Expecting both: a valid \\'{}\\' and a valid \\'{}\\''.format(\n USER, NAMESPACE))\n\n # close the hive cursor when done\n _close_hive_cursor(cursor)\n else:\n _print_error(\n 'Unknown action. Expecting one of: \\'{}\\', \\'{}\\''.format(FULL_SYNC,\n DELTA_SYNC))", "def on_intent(intent_request, session, state):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n \n \n # If new user, and intent is not setting up, prompt to set up first\n # If corrupted user, prompt to set up again\n userId = session[\"user\"][\"userId\"]\n query_user = get_info(userId)\n print(query_user)\n if (len(query_user) == 0 and intent_name != \"NewUserCollectInfoIntent\") or \\\n (len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS):\n if len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS:\n delete_info(userId)\n \n return new_user_intro(session, state)\n\n handlers = {\n \"GetMainFocusIntent\": get_main_focus_intent_response,\n \"CheckinKeepMainFocusIntent\": keep_main_focus_intent,\n \"CheckinReplaceMainFocusIntent\": replace_main_focus_intent,\n \"ExecuteMorningRoutineIntent\": execute_morning_routine_intent,\n \"ExecuteEveningRoutineIntent\": execute_evening_routine_intent,\n \"AMAZON.YesIntent\": handle_yes_intent,\n \"AMAZON.NoIntent\": handle_no_intent,\n \"AMAZON.CancelIntent\": handle_session_end_request,\n \"AMAZON.StopIntent\": handle_session_end_request,\n }\n \n # Handlers that need more arguments\n if intent_name not in handlers:\n if intent_name == \"SetMorningRoutineIntent\":\n return set_routine_intent(intent, session, state, MORNING)\n elif intent_name == \"SetEveningRoutineIntent\":\n return set_routine_intent(intent, session, state, EVENING)\n elif intent_name == \"GetMorningRoutineIntent\":\n return get_routine_intent(intent, session, state, MORNING)\n elif intent_name == \"GetEveningRoutineIntent\":\n return get_routine_intent(intent, session, state, EVENING)\n elif intent_name == \"NewUserCollectInfoIntent\":\n return new_user_collect_info_intent(intent_request, session, state)\n elif intent_name == \"SetNameIntent\":\n return set_name_intent(intent_request, session, state)\n \n try:\n return handlers[intent_name](intent, session, state)\n except Exception as e:\n # This exception probably came from inside a handler\n print(e)\n raise ValueError(\"Invalid intent: \"+intent_name)", "def process_show_form(self, request, step, form):\n pass", "async def process_with_session(self, event: dict, http_session: ClientSession) -> dict:\n self.myq = await pymyq.login(self.user_name, self.password, http_session)\n\n if self.has_one_door():\n self.move_msg = self.move_msg.replace(' edgewood or encinal', '')\n self.check_msg = self.check1_msg = self.check1_msg.replace(' edgewood or encinal', '')\n\n if event['session']['new']:\n logger.info(f\"New session: request_id={event['request']['requestId']}, \" \n f\"sessionId={event['session']['sessionId']}\")\n\n request_type = event['request']['type']\n if request_type == 'LaunchRequest':\n return self.on_launch()\n elif request_type == 'IntentRequest':\n return await self.on_intent(event['request']['intent'])\n elif request_type == 'SessionEndedRequest':\n return self.on_session_ended()\n else:\n logger.error(f'Unknown request type: {request_type}')\n raise InputException(request_type)", "def create_flow(self, conf, dpid, params):\n\t\tpass", "def action_run(self):\n pass", "def handle_frame(self, frame):\n\n # Take actions to enter the game\n\n if self.frame_counter <= max(self.setup_moves.keys()):\n self.do_setup()\n return\n\n # If we're dead, skip through a couple of frames until we can\n # become alive again. The agent does not act during this period.\n\n if self.dead_until:\n if self.frame_counter == self.dead_until - 40:\n self.controller.handle_keys(['space'])\n elif self.frame_counter == self.dead_until - 36:\n self.controller.handle_keys([])\n elif self.frame_counter > self.dead_until:\n self.dead_until = None\n return\n\n # Figure out which move to make and observe the appropriate reward\n\n sequence = self.frame_processor.get_sequence()\n if sequence is None:\n return\n\n move = self.agent.generate_action(sequence)\n self.controller.handle_keys(move)\n\n if np.allclose(frame, 255):\n self.agent.observe(-1, terminal=True)\n self.dead_until = self.frame_counter + 140\n else:\n self.agent.observe(1, terminal=False)\n\n # Run some debugging code each frame.\n # self.debug(frame)", "def on_turn(self, turn_state):\n game_state = gamelib.GameState(self.config, turn_state)\n gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))\n game_state.suppress_warnings(True) #Comment this line to show warnings.\n\n self.emp_line_strategy(game_state)\n\n game_state.submit_turn()", "def run(self):\n self.print_welcome()\n self.handle_inputs()", "def process_action(*args, **kwargs):\n raise NotImplementedError()", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'VCH_Policies':\n return respond(intent_request, 'Policies')\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def process(self, request):\n pass", "def do_POST(self): # pylint: disable=invalid-name\n self.handle_request()", "def __call__(self, request, *args, **kwargs):\n\n self._init_wizard(request)\n\n slug = kwargs.get('slug', None)\n\n if not slug:\n raise Http404()\n\n step = self.get_step(request, slug)\n\n if not step:\n if slug == 'cancel':\n self.cancel(request)\n redirect = request.REQUEST.get('rd', '/')\n\n return HttpResponseRedirect(redirect)\n\n raise Http404()\n\n try:\n method_name = 'process_%s' % request.method\n method = getattr(self, method_name)\n\n return method(request, step)\n\n except AttributeError:\n raise Http404()", "def handle(self, body):\n event_type = body['event_type']\n method_name = event_type.replace('.', '_')\n try:\n method = getattr(self, method_name)\n method(body)\n except AttributeError:\n LOG.debug('%s needs a method called `%s` to handle %s' %\n (self.__class__.__name__, method_name, event_type))", "def action_handler(self):\n if self.state == data.DEAD:\n return\n\n x = 0\n for check in self.state_chart[self.state]:\n if not check:\n x += 1\n continue\n elif check():\n self.state = x\n\n # Some messages when state changes\n if self.state == data.CHASE:\n self.handler.message_box.add_msg(\"{} sees you!\".format(self.name), \n data.COLOURS['mob_behaviour_text'])\n elif self.state == data.RUN:\n self.handler.message_box.add_msg(\"{} runs away!\".format(self.name), \n data.COLOURS['mob_behaviour_text'])\n\n x += 1\n\n if self.state == data.HOLD:\n return\n elif self.state == data.CHASE:\n self.chase(self.handler.player)\n elif self.state == data.RUN:\n self.run(self.handler.player)", "def handle(self):", "def _post_dispatch(self, request, *args, **kwargs):\n pass", "def handle_io_event(self, data):\n getattr(\n self,\n 'control_{}'.format(self.model)\n )(data['action'])\n self.update_serverside_status({\n 'action': data['action'], 'event_id': data['event_id']\n })", "def handleEvent(self, event):\n pass" ]
[ "0.6017324", "0.59390163", "0.5888961", "0.5822091", "0.58176124", "0.5810652", "0.57601875", "0.57601875", "0.57484925", "0.57093275", "0.5647356", "0.5628661", "0.56186384", "0.55906844", "0.5579503", "0.55780405", "0.5541484", "0.5533322", "0.5515994", "0.5509516", "0.55082804", "0.5491568", "0.54624844", "0.545347", "0.54523575", "0.54035604", "0.53795934", "0.53668416", "0.5355", "0.534551", "0.5344157", "0.53210694", "0.531974", "0.5311353", "0.5307266", "0.5305663", "0.5301633", "0.5285267", "0.5282223", "0.5279716", "0.52709585", "0.5253919", "0.5253696", "0.5243839", "0.5227686", "0.5224712", "0.522355", "0.5223386", "0.5221684", "0.5218038", "0.5215863", "0.52142", "0.5211051", "0.5210973", "0.5208797", "0.52024394", "0.52014726", "0.51983917", "0.5188158", "0.5183592", "0.51827306", "0.5182646", "0.5174575", "0.51706755", "0.5170135", "0.51634264", "0.5148761", "0.5147319", "0.5142293", "0.5138783", "0.5133517", "0.5131167", "0.5129589", "0.5128589", "0.5128589", "0.5126194", "0.51243025", "0.51232725", "0.5120182", "0.5118234", "0.51174366", "0.51146674", "0.5112168", "0.51049066", "0.5100877", "0.5100298", "0.5095358", "0.5093086", "0.50879943", "0.5086813", "0.508558", "0.50852364", "0.5083343", "0.50825065", "0.50801957", "0.5073724", "0.5073553", "0.50735366", "0.50713056", "0.5071142", "0.507083" ]
0.0
-1
Decide environment, cloud or local.
async def async_step_environment(self, user_input=None): if user_input is None: return self.async_show_form( step_id="environment", data_schema=vol.Schema( { vol.Required("environment", default=ENV_CLOUD): vol.In( [ENV_CLOUD, ENV_LOCAL] ) } ), errors={}, ) # Environment chosen, request additional host information for LOCAL or OAuth2 flow for CLOUD # Ask for host detail if user_input["environment"] == ENV_LOCAL: return await self.async_step_local() # Abort cloud option if a LOCAL entry has already been added if user_input["environment"] == ENV_CLOUD and self._async_current_entries(): return self.async_abort(reason="already_configured_local_device") return await self.async_step_pick_implementation()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_environment():\n # Auto-set settings object based on App Engine dev environ\n if 'SERVER_SOFTWARE' in os.environ:\n if os.environ['SERVER_SOFTWARE'].startswith('Dev'):\n return Config.ENV_LOCAL\n elif os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/'):\n #For considering an environment staging we assume the version id\n # contains -staging and the URL\n current_version_id = str(os.environ['CURRENT_VERSION_ID']) if (\n 'CURRENT_VERSION_ID') in os.environ else ''\n if '-staging' in current_version_id:\n return Config.ENV_STAGING\n #If not local or staging then is production TODO: really?\n return Config.ENV_PRODUCTION\n return Config.ENV_LOCAL", "def get_site_env(self):\n return self.config['SITE_ENVIRONMENT'] == 'DEV'", "def current_config():\n if os.environ[\"ENVIRONMENT\"] == \"production\":\n return Production()\n elif os.environ[\"ENVIRONMENT\"] == \"staging\":\n return Staging()\n elif os.environ[\"ENVIRONMENT\"] == \"testing\":\n return Testing()\n elif os.environ[\"ENVIRONMENT\"] == \"development\":\n return Development()\n else:\n raise KeyError(f\"Unknown environment '{os.environ['ENVIRONMENT']}'\")", "def get_current_environment(self):\n for env in self.indicators:\n if self._is_env_indicator_in_url(self.indicators[env]):\n return env\n\n return Environment.PRODUCTION", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def PRODUCTION(cls):\n\n return DataCenter.Environment(\"https://www.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())", "def getEnvironment(self):\n pass", "def prod(environment):\n return environment == 'live' or environment == 'debug' or environment == 'prod'", "def environment(self):\n return self._get_field(\"environment\")", "def in_runtime(self):\n\n return self.is_valid_platform() and self['ENVIRONMENT']", "def is_development():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')", "def get_environment() -> Environment:\n return Environment(\n media_url=get_endpoint(\"MEDIA\"),\n datastore_reader_url=get_endpoint(\"DATASTORE_READER\"),\n datastore_writer_url=get_endpoint(\"DATASTORE_WRITER\"),\n vote_url=get_endpoint(\"VOTE\"),\n )", "def production():\n env.run = run\n env.cd = cd\n env.deployment = 'remote'", "def environment(self) -> Optional[pulumi.Input['EnvironmentArgs']]:\n return pulumi.get(self, \"environment\")", "def test_is_production_env(self) -> None:\n os.environ.update({\"NC_MODE\": \"production\"})\n is_develop = is_development_env()\n self.assertFalse(is_develop)", "def on_appengine():\n runtime = os.environ.get('SERVER_SOFTWARE', '')\n return (runtime.startswith('Development/') or\n runtime.startswith('Google App Engine/'))", "def _get_environment(cls):\n return cls.__name__.lower()", "def _is_google_env():\n tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV) or '{}')\n if not tf_config:\n logging.warn('TF_CONFIG should not be empty in distributed environment.')\n return tf_config.get(_ENVIRONMENT_KEY) == _ENVIRONMENT_GOOGLE_VALUE", "def localhost():\n env.run = local\n env.cd = lcd\n env.deployment = 'local'", "def env(self):\n return spack.schema.environment.parse(self.conf.get(\"environment\", {}))", "def ENVIRONMENT(self):\n return self._get_environment()", "def test_is_development_env(self) -> None:\n os.environ.update({\"NC_MODE\": \"development\"})\n is_develop = is_development_env()\n self.assertTrue(is_develop)", "def get_env(self):\n self.airflow_cluster_name = conf.get('core', 'cluster')\n bicommon = BICommon()\n self.env_type = bicommon.env\n\n self.parameters.update({'airflow_cluster_name': self.airflow_cluster_name, 'env': self.env_type})", "def get_env_class(environment_type):\n if environment_type == \"vanilla\":\n return city.CityGridEnv\n elif environment_type == \"distraction\":\n return city.DistractionGridEnv\n elif environment_type == \"map\":\n return city.MapGridEnv\n elif environment_type == \"cooking\":\n return cooking.CookingGridEnv\n elif environment_type == \"miniworld_sign\":\n # Dependencies on OpenGL, so only load if absolutely necessary\n from envs.miniworld import sign\n return sign.MiniWorldSign\n else:\n raise ValueError(\n \"Unsupported environment type: {}\".format(environment_type))", "def get_deploy_mode():\n inventory = Inventory.load()\n remote_mode = inventory.remote_mode\n deploy_mode = 'remote' if remote_mode else 'local'\n return deploy_mode", "def qa():\n env.config_file = 'config_production.py'\n env.hosts = ['[email protected]:34165']\n env.host_type = 'qa'\n env.user = 'ombu'\n env.host_webserver_user = 'www-data'\n env.host_site_path = '/mnt/main/qa/qa2/public'", "def on(stage):\n localhosts = ['localhost', '127.0.0.1']\n env.stage = stage\n env.context = get_context()\n hosts = env.context['hosts']\n if stage == 'dev' and len(hosts) == 1 and hosts[0] in localhosts:\n env.hosts = []\n else:\n env.hosts = env.context['hosts']", "def _production():\n env.environment = 'production'\n env.server_name = 'project-production.dimagi.com'\n env.hosts = [settings.PRODUCTION_HOST]", "def _staging():\n env.environment = 'staging'\n env.server_name = 'project-staging.dimagi.com'\n env.hosts = [settings.STAGING_HOST]", "def _require_environment():\n require('environment', 'host', provided_by=ENVS.keys())", "def GetEnvironment(self):\n environ = super(ServiceHandlerTest, self).GetEnvironment()\n if self.remote_host:\n environ['REMOTE_HOST'] = self.remote_host\n if self.server_host:\n environ['SERVER_HOST'] = self.server_host\n return environ", "def env(self) -> Optional[Env]:\n raise NotImplementedError", "def environment(self, name):\n return self.environments[name]", "def get_env(self) -> str:\n return self.env or ENV", "def get_server_environment(self):\n return self.client.getServerEnvironment(self.creds, self.transaction, self.environment)", "def get_environment(basedir):\n for env in ('devel', 'staging', 'prod'):\n if os.path.exists(os.path.join(basedir, env)):\n return env\n return 'devel'", "def map_environment(env):\n if env in {'dev', 'develop', 'development'}:\n return 'dev'\n if env in {'prod', 'production'}:\n return 'prod'\n return env", "def development():\n env.branch = 'development'", "def __init__(self, environment=None):\n if environment is None:\n environment = os.environ.get(\"SENTERA_ENV\") or \"prod\"\n environment = environment.lower()\n self.environment = environment\n\n if self.environment == \"prod\":\n self.config = {\n \"sentera_api_url\": \"https://api.sentera.com\",\n \"weather_api_url\": \"https://weather.sentera.com\",\n }\n else:\n self.config = {\n \"sentera_api_url\": f\"https://api{self.environment}.sentera.com\",\n \"weather_api_url\": f\"https://weather{self.environment}.sentera.com\",\n }\n\n if ENV_SENTERA_API_URL in os.environ:\n self.config[\"sentera_api_url\"] = os.environ.get(ENV_SENTERA_API_URL)\n\n if ENV_WEATHER_API_URL in os.environ:\n self.config[\"weather_api_url\"] = os.environ.get(ENV_WEATHER_API_URL)", "def SANDBOX(cls):\n\n return DataCenter.Environment(\"https://sandbox.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())", "def get_env_type ( base_name ) :\n return base_name.split( '-', 1 )[ 0 ]", "def environment(self) -> typing.Optional[aws_cdk.aws_codebuild.BuildEnvironment]:\n return self._values.get(\"environment\")", "def environment(self) -> typing.Optional[aws_cdk.aws_codebuild.BuildEnvironment]:\n return self._values.get(\"environment\")", "def environment(self) -> typing.Optional[aws_cdk.aws_codebuild.BuildEnvironment]:\n return self._values.get(\"environment\")", "def environment(self) -> typing.Optional[aws_cdk.aws_codebuild.BuildEnvironment]:\n return self._values.get(\"environment\")", "def environment(request):\n context = {\n 'COMPRESS_ENABLED': settings.COMPRESS_ENABLED,\n 'GOOGLE_ANALYTICS_CODE': getattr(settings, 'GOOGLE_ANALYTICS_CODE', None),\n 'GOOGLE_ANALYTICS_ADDRESS': getattr(settings, 'GOOGLE_ANALYTICS_ADDRESS', None),\n }\n return context", "def production():\n env.settings = 'production'\n env.hosts = ['db.tribapps.com'] \n env.user = 'newsapps'\n env.s3_bucket = 'media.apps.chicagotribune.com'", "def get_os_env():\n env = os.environ\n# print(\"env \\n\" , env)\n return env", "def get_env(self, loop):\n env = getattr(self.app, 'env', None)\n if not env:\n env = self.environment(self.app, loop, self.host, self.port)\n self.app.env = env\n return env", "def cloud_platform(self):\n return self._cloud_platform", "def DEVELOPER(cls):\n\n return DataCenter.Environment(\"https://developer.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())", "def get_system():\n if 'google.colab' in sys.modules:\n return Constant.SYS_GOOGLE_COLAB\n if os.name == 'posix':\n return Constant.SYS_LINUX\n if os.name == 'nt':\n return Constant.SYS_WINDOWS\n\n raise EnvironmentError('Unsupported environment')", "def get_environment(self):\r\n return self.mcas[0].get_environment()", "def _config(request):\n return request.environ['adminish']", "def read_environment(self):\n # Setup credentials\n if os.getenv(\"DO_API_TOKEN\"):\n self.api_token = os.getenv(\"DO_API_TOKEN\")\n if os.getenv(\"DO_API_KEY\"):\n self.api_token = os.getenv(\"DO_API_KEY\")", "def publisher_test_environment(self) -> Optional[str]:\n return pulumi.get(self, \"publisher_test_environment\")", "def is_production_environment(self):\n return self.get_current_environment() == Environment.PRODUCTION", "def isSciServerComputeEnvironment():\n if os.path.isfile(\"/home/idies/keystone.token\"):\n return True\n else:\n return False", "def current_sandbox():\n if get_setting_async('use_cabal_dev'):\n return get_setting_async('cabal_dev_sandbox')\n else:\n return None", "def SetupEnvironment(self):\n pass", "def environment(self):\n return self._environment", "def _get_environment(self):\n if self._cache.get(\"_environment\") is None:\n name = self.get(\"environmentname\", \"default\")\n if name:\n db = self.session\n try:\n env = db.query(models.Environment).filter(models.Environment.name==name).one()\n except config.NoResultFound as err:\n raise config.ConfigError(\"Bad environmentname %r: %s\" % (name, err))\n username = self.get(\"username\") # username should be set by test runner\n if username:\n if env.is_owned():\n if env.owner.username != username:\n raise config.ConfigError(\"Environment is currently owned by: %s\" % (env.owner,))\n env.set_owner_by_username(db, username)\n env = EnvironmentRuntime(db, env, self.logfile)\n self._cache[\"_environment\"] = env\n else:\n raise config.ConfigError, \"Bad environmentname %r.\" % (name,)\n return self._cache[\"_environment\"]", "def _env_switch(environment: str, prod_value: T, qa_value: T) -> T:\n if environment == PROD:\n return prod_value\n return qa_value", "def set_envdata():\n\n\t\tenv = ArgvParserObject.get_environment()\n\n\t\t# reads data for selected enviroment\n\t\tif env == 'DEV':\n\t\t\tenvdata = DevData\n\t\t# elif env == 'PREPROD':\n\t\t#\tenvdata = PreprodData\n\t\telse:\n\t\t\traise NameError('Wrong enviroment value. ')\n\n\t\treturn envdata", "def is_production(version=None):\n return is_host_google() and is_default_version(version)", "def env(self): # type: () -> t.List[str]\n return self.config['Env']", "def setup_config():\n if CONFIG.get(\"environment\", \"server\") == 'production':\n return 'config.ProductionConfig'\n else:\n return 'config.TestingConfig'", "def getPlatform(self):\n self.platform=util.get_platform()\n if not(self.platform.find('linux')==-1): self.platform='Unix' # i suppose, that in all unix systems are paths similiar\n if self.platform=='win32': self.platform='Win32' # this should be done automatically", "def test_get_environment_string(self):\n pass", "def is_staging(version=None):\n return is_host_google() and not is_default_version(version)", "def isSciServerComputeEnvironment():\n if os.path.isfile(KeystoneTokenPath):\n return True\n else:\n return False", "def production():\n env.config_file = 'config_production.py'\n env.hosts = ['[email protected]']\n env.host_type = 'production'\n env.user = 'ombu'\n env.host_webserver_user = 'nginx'\n env.host_site_path = '/home/ombu/webapps/ombuweb'", "def set_dev_environment(self):\n if 'RUSTUP_DEV_DIST_SERVER' in os.environ:\n self._download_url = os.environ['RUSTUP_DEV_DIST_SERVER']\n else:\n self._download_url = 'https://dev-static.rust-lang.org'", "def staging():\n env.settings = 'staging'\n env.hosts = ['db.beta.tribapps.com'] \n env.user = 'newsapps'\n env.s3_bucket = 'media-beta.tribapps.com'", "def validate_env(env):\n\n enviroments = [\"https://api.cloudcheckr.com\", \"https://eu.cloudcheckr.com\", \"https://au.cloudcheckr.com\", \"https://gov.cloudcheckr.com\", \"https://qa.cloudcheckr.com\"]\n\n if env not in enviroments:\n log_information(f\"The environment {env} is not valid. If this is a standalone environment, please add the url to the validate_env function.\")\n return", "def platform():\n return \"micaz\"", "def horizonhost():\n env.cd = cd\n env.run = run\n env.hosts = settings.HOSTS['horizon']\n env.exists = exists", "def is_production_mode(self):\n return getattr(self.env, 'mode', None) == 'production'", "def current_cabal():\n if get_setting_async('use_cabal_dev'):\n return get_setting_async('cabal_dev_sandbox')\n else:\n return 'cabal'", "def setUp(self) -> None:\n self.s3 = boto3.client('s3')\n\n try:\n self.prod_env = os.environ['TEST_ENV'] == \"prod\"\n except KeyError:\n self.prod_env = True", "def env(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"env\")", "def _get_environmentdef():\n if 'environmentdef' not in env:\n abort(\"Environment needs to be configured\")\n\n environmentdef = env.environmentdef\n\n # If we're running via `fab`, we should restrict the environment\n # to the current host.\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n\n return environmentdef", "def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])", "def setup_local_site(self):\n raise NotImplementedError", "def staging():\n env.hosts = ['staging.example.com']", "def environment(self) -> pulumi.Output['outputs.EnvironmentResponse']:\n return pulumi.get(self, \"environment\")", "def setup_env(app_dir, app_id, version, module_id, remote_api=False):\n # GCS library behaves differently when running under remote_api. It uses\n # SERVER_SOFTWARE to figure this out. See cloudstorage/common.py, local_run().\n if remote_api:\n os.environ['SERVER_SOFTWARE'] = 'remote_api'\n else:\n os.environ['SERVER_SOFTWARE'] = 'Development yo dawg/1.0'\n if app_dir:\n app_id = app_id or Application(app_dir).app_id\n version = version or 'default-version'\n if app_id:\n os.environ['APPLICATION_ID'] = app_id\n if version:\n os.environ['CURRENT_VERSION_ID'] = '%s.%d' % (\n version, int(time.time()) << 28)\n if module_id:\n os.environ['CURRENT_MODULE_ID'] = module_id", "def production():\n puts(green('>>> Running on Production!'))\n env.hosts = ['web1.precog.com']\n puts(green('Servers: %s' % \", \".join(env.hosts)))", "def set_env():\n env.local_dotenv_path = os.path.join(\n os.path.dirname(__file__), 'etc/base_image/.env')\n dotenv.load_dotenv(env.local_dotenv_path)\n env.project_name = os.environ.get('PROJECT_NAME', '')\n env.project_dir = posixpath.join('/srv/images/', env.project_name)\n env.use_ssh_config = True\n\n # Bug: when setting this inside a function. Using host_string as workaround\n env.hosts = [os.environ.get('HOST_NAME', ''), ]\n env.host_string = os.environ.get('HOST_NAME', '')\n\n env.base_image_name = os.environ.get('BASE_IMAGE_NAME', '')\n env.build_dir = '/srv/build'\n env.local_path = os.path.dirname(__file__)", "def __get_host(self) -> str:\n\t\treturn os.getenv('FLASK_DRIVER_HOST', '0.0.0.0')", "def cloud_type(self) -> str:\n return pulumi.get(self, \"cloud_type\")", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def prod():\n # Varnish proxies.\n # env.roledefs['varnish_servers'] = ['varnish1.example.org', 'varnish2.example.org']\n # The Django app servers.\n env.roledefs['webapp_servers'] = ['djangopatterns.com']\n # Static media servers\n # env.roledefs['media_servers'] = ['djangopatterns.com']\n # Postgres servers.\n env.roledefs['db_servers'] = ['djangopatterns.com']\n\n # Combine all of the roles into the env.hosts list.\n env.hosts = [host[0] for host in env.roledefs.values()]", "def common_environment(self, manifest):\n env = {}\n self._caches(manifest, env)\n self._databases(manifest, env)\n return env", "def dev():\n env.hosts = ['']\n env.user = ''\n env.virtualenv_dir = ''\n env.code_dir = ''\n env.var_dir = ''\n env.activate = 'source %s/bin/activate' % env.virtualenv_dir\n env.backup_on_deploy = False", "def is_sandbox_environment(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_sandbox_environment\")", "def test_is_local_dev(self):\n\n expected = True\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)", "def test_no_D2_ENVIRONMENT(self):\n self.assertIsNone(os.environ.get('D2_ENVIRONMENT'))", "def test_no_D2_ENVIRONMENT(self):\n self.assertIsNone(os.environ.get('D2_ENVIRONMENT'))", "def get_environment(self):\n return self._environment" ]
[ "0.69293857", "0.69236493", "0.6797534", "0.67813027", "0.6735507", "0.66701186", "0.66018623", "0.6579463", "0.6422409", "0.6391686", "0.6374094", "0.6291594", "0.62128055", "0.6179286", "0.61659306", "0.61260915", "0.61217046", "0.61181223", "0.60868585", "0.6076432", "0.60169834", "0.6001872", "0.59933865", "0.5992692", "0.5987433", "0.59782946", "0.5976465", "0.5969308", "0.5969119", "0.5943099", "0.591999", "0.59083056", "0.5885899", "0.58847064", "0.5878463", "0.5876945", "0.5874043", "0.58469015", "0.58195424", "0.58147115", "0.58106625", "0.58058286", "0.58058286", "0.58058286", "0.58058286", "0.57801604", "0.575894", "0.57508075", "0.57405823", "0.57335836", "0.5725353", "0.5721049", "0.5700881", "0.56904584", "0.5679431", "0.56756425", "0.56676173", "0.56623304", "0.56595844", "0.5656132", "0.5650972", "0.5625524", "0.5616757", "0.5607917", "0.56007886", "0.5593147", "0.55915564", "0.5585636", "0.5585503", "0.5580125", "0.55721396", "0.55624735", "0.5558978", "0.5556909", "0.55561864", "0.55539197", "0.5537803", "0.5534886", "0.5519165", "0.55187684", "0.55108845", "0.5505514", "0.54995674", "0.54953116", "0.5490994", "0.5487502", "0.5485559", "0.548319", "0.54686207", "0.54630053", "0.546193", "0.5456662", "0.5454404", "0.5453214", "0.5452824", "0.5449604", "0.5428001", "0.5412305", "0.5412305", "0.54019886" ]
0.61509675
15
Check if a CLOUD device has already been added.
def is_cloud_device_already_added(self): for entry in self._async_current_entries(): if entry.unique_id is not None and entry.unique_id == f"{DOMAIN}Cloud": return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_exists(device):\n return os.path.exists('/sys/class/net/%s' % device)", "def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self", "def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self", "def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self", "def exists_device_node(self, device_node: Path) -> bool:\n try:\n self.get_by_path(device_node)\n except HardwareNotFound:\n return False\n return True", "def isExistingSameDevice(config_db, deviceName, table):\n settings = config_db.get_table(table)\n for key,values in settings.items():\n if \"remote_device\" in values and deviceName == values[\"remote_device\"]:\n return True\n\n return False", "def is_existing(self):\n return self.backend.is_existing", "def exists(self):\n return True", "def exists(self):\n return True", "def check_existed_did(self):\n for wallet in self.wallet_state_manager.wallets.values():\n if (\n wallet.type() == WalletType.DECENTRALIZED_ID\n and self.did_info.origin_coin.name() == wallet.did_info.origin_coin.name()\n ):\n self.log.warning(f\"DID {self.did_info.origin_coin} already existed, ignore the wallet creation.\")\n raise ValueError(\"Wallet already exists\")", "def addUIdevice (self, deviceString):\n if deviceString.strip() not in self.UIdevices:\n self.UIdevices += [deviceString]\n else:\n print ( \"%s already in UI the device list\" % deviceString)", "def system_valid(self):\n return self.udev.devices_exist", "def _do_check(self):\n try:\n #breakpoint()\n ApplicationsItem.objects.exists()\n #print (\"Checking\")\n return True\n\n except Exception:\n client.captureException()\n return False", "def exists(self):\n\n if self:\n pass", "async def _exists(self, key):\n return await self.client.append(key, b'')", "def exists(self):\r\n try:\r\n self.refresh()\r\n except:\r\n return False\r\n return True", "def _check(self):\n\t\tif not self._raven:\n\t\t\traise NoDeviceFoundException", "def is_connected(cls, device_config):\n if \"console_port_name\" in device_config[\"persistent\"]:\n address = device_config[\"persistent\"][\"console_port_name\"]\n else:\n address = device_config[\"persistent\"][\"hub_port_name\"]\n return os.path.exists(address)", "def isExist(data):\n return True/False", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def item_exists(self, call_number):\n return call_number in self.item_list.keys()", "def has_upnp_devices(self) -> bool:\n return self._has_upnp_devices", "def devices_exist(self):\n return all(r.sys_path_exists for r in self.rules)", "def check_device(self, class_id, vendor_id, product_id):\n if len(self.class_id) > 0 and class_id != self.class_id:\n return False\n\n if len(self.vendor_id) > 0 and vendor_id != self.vendor_id:\n return False\n\n if len(self.devices) > 0 and product_id not in self.devices:\n return False\n\n return True", "def is_create_vendor_present(self):\n return self.is_element_present(self.create_vendor_locator)", "def hasAddOrDelete(self):\n return self.__hasAddOrDelete", "def deviceConnected(self, deviceName):\n if not deviceName:\n return False\n\n for driver in self.drivers:\n if not self.scanValid(driver=driver, deviceName=deviceName):\n continue\n\n self.drivers[driver]['uiDropDown'].setStyleSheet(self.BACK_GREEN)\n self.deviceStat[driver] = True\n # self.app.message.emit(f'{driver} connected', 0)\n return True", "def has_client(self):\n \n return len(self._clients) > 0", "def has_client(self):\n \n return len(self._clients) > 0", "def exist(self):", "async def _async_has_devices(hass) -> bool:\n gree_discovery = Discovery(DISCOVERY_TIMEOUT)\n devices = await gree_discovery.scan(wait_for=DISCOVERY_TIMEOUT)\n return len(devices) > 0", "def is_profile_device(cls, device: UpnpDevice) -> bool:\n try:\n profile_device = find_device_of_type(device, cls.DEVICE_TYPES)\n except UpnpError:\n return False\n\n # Check that every service required by the subclass is declared by the device\n device_service_ids = {\n service.service_id for service in profile_device.services.values()\n }\n\n if not cls.SERVICE_IDS.issubset(device_service_ids):\n return False\n\n return True", "def _id_exists(self):\n return self.app_id in self.engine.app_list", "def is_valid(self):\n if not self.__usb_if:\n return False\n return self.__usb_if.is_valid()\n #end is_valid()", "def is_devices_file_empty(self) -> bool:\n return len(self._devices_names) == 0", "def _exists(self) -> bool:\n client = MlflowClient()\n all_metrics = client._tracking_client.store.get_all_metrics(\n run_uuid=self.run_id\n )\n return any(self._is_dataset_metric(x) for x in all_metrics)", "def complete(self):\n return (self.memberDevices <= len(self.members)) or not self.exists", "def isRegistered(self, cid):\n return (self.__getIDFromCID(cid) is not None)", "def is_added(self, channel=None):\n return self.get_state(channel) == 1", "def _check_validdevice(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validdeviceids:\n\n return True\n else:\n return False", "def is_registered(self) -> bool:\n from arkouda.util import is_registered\n\n if self.registered_name is None:\n return False\n return is_registered(self.registered_name)", "def is_connected(self):\n try:\n if self.coordinator.data[self._system_id][\"devices\"][self._item_id].get(\n \"connected\"\n ):\n connected_ap = self.coordinator.data[self._system_id][\"devices\"][\n self._item_id\n ].get(\"apId\")\n if connected_ap:\n connected_ap = self.coordinator.data[self._system_id][\n \"access_points\"\n ][connected_ap][\"accessPointSettings\"][\"accessPointOtherSettings\"][\n \"roomData\"\n ][\n \"name\"\n ]\n self._attrs[\"connected_ap\"] = connected_ap\n else:\n self._attrs[\"connected_ap\"] = \"NA\"\n\n self._attrs[\"ip_address\"] = self.coordinator.data[self._system_id][\n \"devices\"\n ][self._item_id].get(\"ipAddress\", \"NA\")\n\n self._mac = self.coordinator.data[self._system_id][\"devices\"][\n self._item_id\n ].get(\"macAddress\")\n\n self._attrs[\"mac\"] = self._mac if self._mac else \"NA\"\n\n self._is_connected = True\n else:\n self._is_connected = False\n except TypeError:\n pass\n except KeyError:\n pass\n # self.hass.async_create_task(\n # self.hass.config_entries.async_reload(self.coordinator.entry.entry_id)\n # )\n\n return self._is_connected", "def check_if_already_used(self, key):\n for switch in self.new_switches:\n if key == self.new_switches[switch]:\n return True\n return False", "async def _async_has_devices(opp: OpenPeerPower) -> bool:\n # TODO Check if there are any devices that can be discovered in the network.\n devices = await opp.async_add_executor_job(my_pypi_dependency.discover)\n return len(devices) > 0", "def exists(self):\n return os.path.exists(self.sensorpath)", "def _check_already_present(self, new_da):\n for da in self:\n self._id_of_DataArrays_equal(da, new_da)", "def exists(self) -> bool:\n try:\n result = self.get()\n except KeyError:\n return False\n return True", "def load_devices(self, devices_dict, source=None):\n if source is not None:\n return False\n\n for dev in devices_dict:\n # Check that the name of the device is not in the dictionary of devices\n if not dev in self.devices:\n self.devices[dev] = {'dev': Device(devices_dict[dev]),\n 'actuators': {},\n 'sensors': {}}\n self.logger.debug('Added {} to the experiment'.format(dev))\n if 'driver' in devices_dict[dev]:\n self.devices[dev]['dev'].initialize_driver()\n\n else:\n self.logger.warning('Trying to load {}, but already exists'.format(dev))\n raise Warning('Loading a duplicated device')\n self.loaded_devices = True\n return True", "def has_add_on(self) -> bool:\n return self.add_on_tag != 0", "def is_registered(self):\n return self.faucet is not None", "def if_Add(self):\n if self.multiList is None:\n return True\n else:\n return False", "def bt_connect_and_check(self, bluetooth_device_name_to_connect):\n is_bt_connect_and_check = False\n try:\n logger.info(\"Check if bluetooth has already connected to DUT devices\")\n is_bt_already_connected = self.bt_is_connected_to(\n bluetooth_device_name_to_connect)\n\n if is_bt_already_connected:\n is_bt_connect_and_check = True\n else:\n logger.info(\"Start trying to connect to DUT devices\")\n self.bt_connect(bluetooth_device_name_to_connect)\n is_bt_connect_and_check = self.bt_is_connected_to(\n bluetooth_device_name_to_connect)\n except Exception as e:\n logger.error(\"Need to turn on bluetooth and DUT devices\")\n logger.error(repr(e))\n return is_bt_connect_and_check", "def IsCUDADriverSufficient():\n return IsCUDADriverSufficientCC()", "def check_registration(self, device_id):\n for item in self.ws.events['registrations']:\n # If asked device_id is found return its data. Otherwise return False\n if item['ep'] == device_id:\n return item\n return False", "def request_already_queued(self, request: str):\n try:\n self.create_request_queue_if_not_exists()\n queue = []\n db = self.get_db_safely()\n cursor = db.cursor()\n cursor.execute(\n \"\"\"SELECT rowid FROM queue WHERE request = ?\"\"\",\n (request,))\n for row in cursor:\n queue.append(row)\n if len(queue) == 0:\n return False\n else:\n return True\n except sqlite3.Error:\n # This is a lie, but we don't want to try and enqueue something if we got an error here.\n return True", "def request_seen(self, request):\n fp = request_fingerprint(request)\n # This returns the number of values added, zero if already exists.\n added = self.server.sadd(self.key, fp)\n return added == 0", "def _valid_device(device):\n required_fields = ('name', 'type', 'group', 'canonical_name')\n if all(field in device for field in required_fields):\n return True\n return False", "def has_muse_devices(self) -> bool:\n return self._has_muse_devices", "def client_exists(self, client=None):\n if type(client) is Client:\n return client.client_id in [c.client_id for c in self.client_list]\n else:\n return False", "async def exists(self, payload: TPayload) -> bool:", "def check_already_mounted(devpath, mountpoint):\n mounts = Mounter().read_mounts()\n for m in mounts:\n if devpath == m.device and mountpoint == m.mountpoint:\n return True\n return False", "def channels_last(self, device):\n return device not in self._gpu_devices", "def is_registered(self):\n return self._is_registered", "def exists(self):\n\n return self.ids[-1] is not None", "def _exists(isamAppliance, id):\n exists = False\n ret_obj = get_all(isamAppliance)\n\n for snmp in ret_obj['data']:\n if snmp['id'] == id:\n exists = True\n break\n\n return exists", "def _exists(isvgAppliance, uuid):\n exists = False\n ret_obj = get_all(isvgAppliance)\n\n for snmp in ret_obj['data']['snmpObjects']:\n if snmp['uuid'] == uuid:\n exists = True\n break\n\n return exists", "def check_device_state(self):", "def google_drive_connector_exists(self):\n return self.__google_drive_connector is not None", "def test_add_device(self):\n\n pass", "async def _exists(self, key):\n return key in SimpleMemoryBackend._cache", "def check_chip_ble_devices_advertising(devCtrl, name, deviceDetails=None):\n ble_chip_device = scan_chip_ble_devices(devCtrl)\n if ble_chip_device is None or len(ble_chip_device) == 0:\n log.info(\"No BLE CHIP device found\")\n return False\n\n chip_device_found = False\n\n for ble_device in ble_chip_device:\n if deviceDetails is not None:\n if (ble_device[\"name\"] == name and\n int(ble_device[\"discriminator\"]) == int(deviceDetails[\"Discriminator\"]) and\n int(ble_device[\"vendorId\"]) == int(deviceDetails[\"VendorID\"]) and\n int(ble_device[\"productId\"]) == int(deviceDetails[\"ProductID\"])):\n chip_device_found = True\n break\n else:\n if (ble_device[\"name\"] == name):\n chip_device_found = True\n break\n\n return chip_device_found", "def check_existing(self):\n if self.btcd_container != None:\n self.btcd_container.reload()\n if self.btcd_container.status == \"running\":\n rpcconn, container = self.detect_bitcoind_container(\n self.rpcconn.rpcport\n )\n if container == self.btcd_container:\n return rpcconn\n raise Exception(\"Ambigious Container running\")\n return None", "def dupe_event_exists(stripe_id):\n return models.Event.objects.filter(stripe_id=stripe_id).exists()", "def exists(self, name):\n return self.backend.exists(name)", "def registered(id):\n return True", "def has_platform(self, platform_name):\n return platform_name in self.platform_list", "def user_exists(self,unique_ID):\n\t\ttry:\n\t\t\tself.data[unique_ID]\n\t\texcept KeyError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def exists(self):\n return bool(self.get())", "def exists(self, conn, key):\n return conn.exists(key)", "def _find_device(self):\n found_device = False\n nearby_devices = None\n try:\n nearby_devices = self._adapter.scan()\n except Exception:\n pass\n\n if nearby_devices is not None:\n for device in nearby_devices:\n name = device['name']\n if name is not None and name.startswith(self._search_name):\n self._address = device['address']\n print(f'Found device named: {name} at {self._address}')\n found_device = True\n break\n\n return found_device", "async def connected(self) -> bool:\n args = ['-t', f\"DEVICE INFO,{self.conf['device_address']}\"]\n output = await self.run_vh(args)\n return \"IN USE BY: NO ONE\" not in output", "def addlocation(self, location):\n found = False\n for loc in self.__locations:\n if loc == location:\n found = True\n # print(\"Location already exists\")\n return False, 0\n if not found:\n self.__locations.append(location)\n return True, 0", "def addlocation(self, location):\n found = False\n for loc in self.__locations:\n if loc == location:\n found = True\n # print(\"Location already exists\")\n return False, 0\n if not found:\n self.__locations.append(location)\n return True, 0", "def addlocation(self, location):\n found = False\n for loc in self.__locations:\n if loc == location:\n found = True\n # print(\"Location already exists\")\n return False, 0\n if not found:\n self.__locations.append(location)\n return True, 0", "def addlocation(self, location):\n found = False\n for loc in self.__locations:\n if loc == location:\n found = True\n # print(\"Location already exists\")\n return False, 0\n if not found:\n self.__locations.append(location)\n return True, 0", "def addlocation(self, location):\n found = False\n for loc in self.__locations:\n if loc == location:\n found = True\n # print(\"Location already exists\")\n return False, 0\n if not found:\n self.__locations.append(location)\n return True, 0", "def addlocation(self, location):\n found = False\n for loc in self.__locations:\n if loc == location:\n found = True\n # print(\"Location already exists\")\n return False, 0\n if not found:\n self.__locations.append(location)\n return True, 0", "def addlocation(self, location):\n found = False\n for loc in self.__locations:\n if loc == location:\n found = True\n # print(\"Location already exists\")\n return False, 0\n if not found:\n self.__locations.append(location)\n return True, 0", "def _has_endpoint(self, endpoint):\n return self.endpoints.filter(pk=endpoint.pk).exists()", "def check_registration_updates(self, device_id):\n for item in self.ws.events['reg-updates']:\n # If asked device_id is found return its data. Otherwise return False\n if item['ep'] == device_id:\n return item\n return False", "def is_in_use(self):\n\t\treturn bool(call_sdk_function('PrlBootDev_IsInUse', self.handle))", "def _log_exists(name):\n return name in logging.Logger.manager.loggerDict", "def has_item(self, usage_key):\r\n try:\r\n self._find_one(usage_key)\r\n return True\r\n except ItemNotFoundError:\r\n return False", "def check_item_in(self, url):\n item_hash = tools.url_hash(url)\n if item_hash not in self.__items:\n self.__item_lock.acquire()\n self.__items.add(item_hash)\n self.__item_lock.release()\n return False\n else:\n return True", "def device_is_configured(self):\n\n\t\ttry:\n\t\t\t_ = self._dev\n\t\texcept:\n\t\t\treturn False\n\n\t\treturn True", "def exists(self):\n if self.host.exists(self.remote_path):\n print 'Yes, config exists already.'\n return True\n else:\n print 'Config doesn\\'t exist yet'\n return False", "def have_cdc() -> bool:", "def contains(self, container: Container) -> bool:\n self.network.reload()\n return container in self.network.containers", "def available(self):\n return (\n hub.get_first(\n \"$.doorLockStatusList[?(@.deviceLabel=='%s')]\", self._device_label\n )\n is not None\n )", "def recognize_device(self, device):\n return False" ]
[ "0.6239254", "0.609345", "0.609345", "0.609345", "0.6073092", "0.6065576", "0.6047093", "0.5987568", "0.5987568", "0.5947811", "0.58741647", "0.58736914", "0.58404255", "0.58278227", "0.5810434", "0.5785578", "0.5772156", "0.57598025", "0.5751442", "0.5747727", "0.57093596", "0.56728476", "0.5671144", "0.56695276", "0.56648475", "0.5637624", "0.5636435", "0.5633333", "0.5633333", "0.5628807", "0.5615231", "0.56150365", "0.56068313", "0.56014186", "0.5594132", "0.55822283", "0.5574836", "0.5574576", "0.5562252", "0.5524803", "0.55224097", "0.55087745", "0.55031705", "0.55015254", "0.54900455", "0.5474999", "0.5474225", "0.54599315", "0.54562277", "0.5450306", "0.54495454", "0.5442306", "0.54398865", "0.5431124", "0.54212236", "0.54122174", "0.54109174", "0.5400806", "0.5392086", "0.5390876", "0.53868604", "0.53778565", "0.53772306", "0.5368479", "0.53627", "0.53569764", "0.53553694", "0.5351981", "0.5347438", "0.5346123", "0.534278", "0.5334659", "0.53337556", "0.5324253", "0.5322991", "0.5322503", "0.5321228", "0.5317084", "0.53068817", "0.529404", "0.5280928", "0.528", "0.528", "0.528", "0.528", "0.528", "0.528", "0.528", "0.5278862", "0.5270487", "0.5263522", "0.52632487", "0.5262196", "0.52618164", "0.5260579", "0.52539843", "0.5242712", "0.5238502", "0.52377707", "0.5224189" ]
0.8269784
0
Convert data types in a pandas dataframe. Purpose is to reduce size of dataframe.
def convert_types(df): # Iterate through each column for c in df: # Convert ids and booleans to integers if ('SK_ID' in c): df[c] = df[c].fillna(0).astype(np.int32) # Convert objects to category elif (df[c].dtype == 'object') and (df[c].nunique() < df.shape[0]): df[c] = df[c].astype('category') # Booleans mapped to integers elif list(df[c].unique()) == [1, 0]: df[c] = df[c].astype(bool) # Float64 to float32 elif df[c].dtype == float: df[c] = df[c].astype(np.float32) # Int64 to int32 elif df[c].dtype == int: df[c] = df[c].astype(np.int32) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df", "def test_df_all_types():\n return pd.DataFrame({\n 'intcol': [1, 2],\n 'strcol': ['three', 'four'],\n 'floatcol': [5.0, 6.0],\n 'boolcol': [True, False],\n 'datetimecol': [\n np.datetime64('2020-01-01'), np.datetime64('2020-01-02')],\n })", "def pandas_typecast(self) -> dict:\n res = {}\n for feat in self.data_features:\n res[feat.key] = ApiForm.typecast(feat.dtype)\n return res", "def set_dtypes(df):\n # drop rows where a column names appear (happened while appending to csv)\n df = df.loc[df[df.columns[0]] != df.columns[0]]\n # convert numerics\n df = df.apply(pd.to_numeric, errors='ignore')\n # parse query_timestamp\n df.query_timestamp = df.query_timestamp.apply(pd.to_datetime)\n\n df.reset_index(inplace=True, drop=True)\n\n return df", "def astype(self, dtype: Union[Dict[str, str], str]) -> 'DataFrame':\n\n def change_each_array(new_loc, new_kind, old_kind, arr, new_arr, cur_srm):\n missing_value_code = utils.get_missing_value_code(new_kind)\n if new_kind == 'S':\n if old_kind == 'b':\n arr = arr + 1\n cur_srm = [False, 'False', 'True']\n elif old_kind in 'i':\n cur_srm, arr = _va.convert_int_to_str(arr)\n elif old_kind == 'f':\n cur_srm, arr = _va.convert_float_to_str(arr)\n elif old_kind in 'mM':\n cur_srm, arr = _va.convert_datetime_str_to_str(arr.astype('str'))\n\n new_arr[:, new_loc] = arr\n new_srm[new_loc] = cur_srm\n else:\n if new_kind != old_kind:\n nas = utils.isna_array(arr, old_kind)\n if new_kind == 'b' and old_kind != 'b':\n arr = arr.astype('bool').astype('int8')\n new_arr[:, new_loc] = arr\n if new_kind != old_kind:\n new_arr[nas, new_loc] = missing_value_code\n\n if isinstance(dtype, str):\n new_dtype: str = utils.check_valid_dtype_convert(dtype)\n new_kind: str = utils.convert_numpy_to_kind(new_dtype)\n utils.check_astype_compatible(new_kind, self._data.keys())\n\n new_column_info: ColInfoT = {}\n new_arr = utils.create_empty_arr(new_kind, self.shape)\n new_data = {new_kind: new_arr}\n new_srm = {}\n col_iter = enumerate(self._col_info_iter(with_order=True, with_arr=True))\n for i, (col, old_kind, loc, order, arr) in col_iter:\n new_column_info[col] = utils.Column(new_kind, i, order)\n if old_kind == 'S':\n cur_srm = self._str_reverse_map[loc].copy()\n else:\n cur_srm = []\n change_each_array(i, new_kind, old_kind, arr, new_arr, cur_srm)\n elif isinstance(dtype, dict):\n col_kind_convert = {}\n for col, new_dtype in dtype.items():\n self._validate_column_name(col)\n new_dtype: str = utils.check_valid_dtype_convert(new_dtype)\n new_kind: str = utils.convert_numpy_to_kind(new_dtype)\n col_kind_convert[col] = new_kind\n old_kind = self._column_info[col].dtype\n utils.check_astype_compatible(new_kind, {old_kind})\n\n new_column_info: ColInfoT = {}\n cols_per_kind: Dict[str, int] = defaultdict(int)\n for col, old_kind, loc, order in self._col_info_iter(with_order=True):\n new_kind = col_kind_convert.get(col, old_kind)\n cur_loc = cols_per_kind[new_kind]\n new_column_info[col] = utils.Column(new_kind, cur_loc, order)\n cols_per_kind[new_kind] += 1\n\n # create empty arrays for each type\n new_data = {}\n for new_kind, num_cols in cols_per_kind.items():\n shape = len(self), num_cols\n new_data[new_kind] = utils.create_empty_arr(new_kind, shape)\n\n new_srm = {}\n for col, old_kind, loc, order, arr in self._col_info_iter(with_order=True, with_arr=True):\n new_kind = new_column_info[col].dtype\n new_loc = new_column_info[col].loc\n new_arr = new_data[new_kind]\n if old_kind == 'S':\n cur_srm = self._str_reverse_map[loc].copy()\n else:\n cur_srm = []\n change_each_array(new_loc, new_kind, old_kind, arr, new_arr, cur_srm)\n else:\n raise TypeError('Argument dtype must be either a string or a dictionary')\n\n new_columns = self._columns.copy()\n return self._construct_from_new(new_data, new_column_info, new_columns, new_srm)", "def convert_dtypes(\n self,\n infer_objects: bool = True,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n dtype_backend: DtypeBackend = \"numpy_nullable\",\n ):\n return DataFrameDefault.register(pandas.DataFrame.convert_dtypes)(\n self,\n infer_objects=infer_objects,\n convert_string=convert_string,\n convert_integer=convert_integer,\n convert_boolean=convert_boolean,\n convert_floating=convert_floating,\n dtype_backend=dtype_backend,\n )", "def _convert(frame):\n frame = frame.convert_objects(convert_numeric=True)\n for column in frame:\n if column in c.dates:\n frame[column] = frame[column].astype('datetime64')\n return frame", "def convert_type(data):\n# Categorical features\n columns = ['Browser', 'OperatingSystems', 'Region', 'TrafficType']\n for col in columns:\n data[col] = data[col].apply(lambda x: str(x))\n return data", "def astype(self, col_dtypes, errors: str = \"raise\"): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.astype)(\n self, dtype=col_dtypes, errors=errors\n )", "def convertColumn(df, names, newType) -> pyspark.sql.dataframe.DataFrame:\n for name in names: \n df = df.withColumn(name, df[name].cast(newType))\n return df", "def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df", "def test_df():\n return pd.DataFrame({\n 'intcol': [1, 2, 3],\n 'strcol': ['four', 'five', 'six'],\n 'floatcol': [7.0, 8.0, 9.0]\n })", "def convert_to_df(data):\r\n ans = pd.DataFrame(data)\r\n return ans", "def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data", "def _coerce_and_store_data_types(tag_loop_dict):\n\n regex_format = re.compile(r\"\"\"\\d*\\.(?P<decimal>\\d+)(?:[Ee]?[+-]?(?P<exponent>\\d?))\"\"\")\n\n # Attempt to convert data columns from strings to integers or floats whenever possible\n # Skip any table with 'data_header' in its name because these contain mixed data\n for key in tag_loop_dict.keys():\n if u'data_header' not in key:\n tmp = tag_loop_dict[key].copy()\n tag_loop_dict[key] = tag_loop_dict[key].apply(lambda x: pd.to_numeric(x, errors=u'ignore'))\n \n # Preserve the formatting for all columns that were converted to floats\n float_cols = [x for x in tag_loop_dict[key].columns if tag_loop_dict[key][x].dtype == np.float]\n\n decimal_format = dict([(col, tmp[col].apply(lambda x: \n len(re.search(regex_format, x).group('decimal'))).max())\n for col in float_cols])\n\n exponent_format = dict([(col, tmp[col].apply(lambda x: \n len(re.search(regex_format, x).group('exponent'))).max())\n for col in float_cols])\n\n number_format = dict([(col,'f') if exponent_format[col] == 0 else (col,'E')\n for col in float_cols])\n\n formatter = dict([(col, '{:.' + str(decimal_format[col]) + number_format[col] + '}') \n for col in float_cols])\n \n # Save format instructions to dataframe\n tag_loop_dict[key]._print_format = formatter\n\n return tag_loop_dict", "def data_all_types(df):\n \n printmd (\"**Type of every column in the data**\")\n print(\"\")\n print(df.dtypes)", "def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data", "def to_numeric_and_downcast_data(df: pd.DataFrame):\n fcols = df.select_dtypes('float').columns\n \n icols = df.select_dtypes('integer').columns\n\n df[fcols] = df[fcols].apply(pd.to_numeric, downcast='float')\n \n df[icols] = df[icols].apply(pd.to_numeric, downcast='integer')\n\n return df", "def assign_column_types(self):\n type_list = [\"category\" if u_input == 1 else float for u_input in self.user_column_label]\n self.df = self.df.astype(dict(zip(self.df.columns, type_list)))\n df_types = pd.DataFrame(self.df.dtypes).reset_index()\n df_types.columns = [\"column_name\", \"dtype\"]\n df_types.dtype = df_types.dtype.astype(str)\n self.column_dtypes = {list(df_types.column_name)[i]: list(df_types.dtype)[i] for i in range(len(df_types))}", "def apply_to(self, df: pd.DataFrame) -> pd.DataFrame:\n schema_names = self.names\n data_columns = df.columns\n\n assert len(schema_names) == len(\n data_columns\n ), \"schema column count does not match input data column count\"\n\n for column, dtype in zip(data_columns, self.types):\n pandas_dtype = dtype.to_pandas()\n\n col = df[column]\n col_dtype = col.dtype\n\n try:\n not_equal = pandas_dtype != col_dtype\n except TypeError:\n # ugh, we can't compare dtypes coming from pandas,\n # assume not equal\n not_equal = True\n\n if not_equal or not dtype.is_primitive():\n new_col = convert(col_dtype, dtype, col)\n else:\n new_col = col\n df[column] = new_col\n\n # return data with the schema's columns which may be different than the\n # input columns\n df.columns = schema_names\n return df", "def _get_data_as_df(self, data):\n if isinstance(data, pd.DataFrame):\n return data\n if isinstance(data, dict):\n data = [data]\n data, original_df_dtypes = data_utils.json_to_dataframe(\n json_lines=data,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n self._original_df_dtypes = original_df_dtypes\n return data", "def _preprocess_temporal_columns(df: DataFrame) -> DataFrame:\n for col in df.select_dtypes(include=[\"datetime64[ns, UTC]\"]):\n df = df.astype({col: \"O\"})\n for col in df.select_dtypes(include=\"timedelta64[ns]\"):\n df = df.astype({col: \"O\"})\n return df", "def test_pandas_dtypes():\n assert pd.DataFrame([1, 2]).dtypes.values[0] == np.dtype('int64') == np.int64\n assert pd.DataFrame([1, 2, None]).dtypes.values[0] == np.dtype('float64') == np.float64\n\n assert pd.DataFrame([1.0, 2.0]).dtypes.values[0] == np.dtype('float64') == np.float64\n assert pd.DataFrame([1.0, 2.0, None]).dtypes.values[0] == np.dtype('float64') == np.float64\n\n assert pd.DataFrame([True, False]).dtypes.values[0] == np.dtype('bool') == np.bool\n assert pd.DataFrame([True, False, None]).dtypes.values[0] == np.dtype('object') == np.object\n\n assert pd.DataFrame([\"A\", \"B\"]).dtypes.values[0] == np.dtype('object') == np.object\n assert pd.DataFrame([\"A\", \"B\", None]).dtypes.values[0] == np.dtype('object') == np.object", "def from_pandas(cls, df: pd.DataFrame, dtypes: TYPE_DTYPE_INPUT = None) \\\n -> 'PlainFrame':\n\n converter = ConverterFromPandas(df)\n\n return converter(cls, dtypes=dtypes)", "def _maybe_pandas_data(self, data, feature_names, feature_types,\n meta=None, meta_type=None):\n if lazy_isinstance(data, 'pandas.core.series', 'Series'):\n dtype = meta_type if meta_type else 'float'\n return data.values.astype(dtype), feature_names, feature_types\n\n from pandas.api.types import is_sparse\n from pandas import MultiIndex, Int64Index\n\n data_dtypes = data.dtypes\n if not all(dtype.name in self.pandas_dtype_mapper or is_sparse(dtype)\n for dtype in data_dtypes):\n bad_fields = [\n str(data.columns[i]) for i, dtype in enumerate(data_dtypes)\n if dtype.name not in self.pandas_dtype_mapper\n ]\n\n msg = \"\"\"DataFrame.dtypes for data must be int, float or bool.\n Did not expect the data types in fields \"\"\"\n raise ValueError(msg + ', '.join(bad_fields))\n\n if feature_names is None and meta is None:\n if isinstance(data.columns, MultiIndex):\n feature_names = [\n ' '.join([str(x) for x in i]) for i in data.columns\n ]\n elif isinstance(data.columns, Int64Index):\n feature_names = list(map(str, data.columns))\n else:\n feature_names = data.columns.format()\n\n if feature_types is None and meta is None:\n feature_types = []\n for dtype in data_dtypes:\n if is_sparse(dtype):\n feature_types.append(self.pandas_dtype_mapper[\n dtype.subtype.name])\n else:\n feature_types.append(self.pandas_dtype_mapper[dtype.name])\n\n if meta and len(data.columns) > 1:\n raise ValueError(\n 'DataFrame for {meta} cannot have multiple columns'.format(\n meta=meta))\n\n dtype = meta_type if meta_type else 'float'\n data = data.values.astype(dtype)\n\n return data, feature_names, feature_types", "def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:", "def from_pandas(cls, df, data_cls):\n pass", "def encode_dtypes(df):\n\n global catn, cato\n\n # Nominal categories\n for name in catn:\n df[name] = df[name].astype(\"category\")\n # Add a None category for missing values\n if \"None\" not in df[name].cat.categories:\n df[name].cat.add_categories(\"None\", inplace=True)\n # Ordinal categories\n for name, levels in cato.items():\n df[name] = df[name].astype(CategoricalDtype(levels,\n ordered=True))\n return df", "def pandas_data_frame_to_rpy2_data_frame(pDataframe):\n orderedDict = OrderedDict()\n\n for columnName in pDataframe.columns:\n columnValues = pDataframe[columnName].values\n filteredValues = \\\n [value if pandas.notnull(value) else robj.NA_Real \\\n for value in columnValues]\n try:\n orderedDict[columnName] = robj.FloatVector(filteredValues)\n except ValueError:\n orderedDict[columnName] = robj.StrVector(filteredValues)\n\n rDataFrame = robj.DataFrame(orderedDict)\n rDataFrame.rownames = robj.StrVector(pDataframe.index)\n\n return rDataFrame", "def convert_dtypes(rows):\n dtype_map = {pd.Timestamp: lambda x: x.to_pydatetime(),\n np.int8: lambda x: int(x),\n np.int16: lambda x: int(x),\n np.int32: lambda x: int(x),\n np.int64: lambda x: int(x),\n np.float16: lambda x: float(x),\n np.float32: lambda x: float(x),\n np.float64: lambda x: float(x),\n np.float128: lambda x: float(x)}\n for row in rows:\n yield [dtype_map.get(type(elem), lambda x: x)(elem) for elem in row]", "def _set_data_types(self):\n temp_df = self.raw_data\n cols = temp_df.drop('room_location', axis=1).columns\n temp_df[cols] = temp_df[cols].apply(pd.to_numeric)\n temp_df['room_location'] = temp_df['room_location'].astype(str)\n self.raw_data = temp_df", "def create_quanti_df(df: pd.DataFrame) -> pd.DataFrame:\n\n # create a dictionary that contains datatype of each column\n dtypeDict = dict(df.dtypes)\n # create a list of column names that contains only quantitative data\n quanti_cols = [\n key\n for key, value in dtypeDict.items()\n if value == \"float64\" or value == \"int64\" or value == \"uint8\"\n ]\n df = df[quanti_cols]\n return df", "def rep_dtypes(df):\n return \"(\" + re.sub(\", dtype.*\", \"\", re.sub(r\" +\", \": \", str(df.dtypes)).replace(\"\\n\", \", \")) + \")\"", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def dtypes(self) -> 'DataFrame':\n dtype_list: List[str] = [utils.convert_kind_to_dtype(self._column_info[col].dtype)\n for col in self._columns]\n arr: ndarray = np.array(dtype_list, dtype='O')\n columns: List[str] = ['Column Name', 'Data Type']\n data, str_reverse_map = _va.convert_str_to_cat_list_2d([self._columns, arr])\n new_data: Dict[str, ndarray] = {'S': data}\n new_column_info: ColInfoT = {'Column Name': utils.Column('S', 0, 0),\n 'Data Type': utils.Column('S', 1, 1)}\n return self._construct_from_new(new_data, new_column_info, np.array(columns, dtype='O'),\n str_reverse_map)", "def reduce_memory_footprint(df):\n for col in df.columns:\n if df[col].dtypes == 'float64':\n df[col] = df[col].astype('float32')\n elif df[col].dtypes == 'int64':\n df[col] = df[col].astype('int32')\n \n return df", "def ibis_schema_apply_to(schema, df):\n\n for column, dtype in schema.items():\n pandas_dtype = dtype.to_pandas()\n if isinstance(dtype, dt.Interval):\n df[column] = df[column].values.astype(pandas_dtype)\n else:\n df[column] = df[column].astype(pandas_dtype, errors='ignore')\n\n if PY2 and dtype == dt.string:\n df[column] = df[column].str.decode('utf-8', errors='ignore')\n\n return df", "def convert(data, to):\n converted = None\n if to == 'array':\n if isinstance(data, np.ndarray):\n converted = data\n elif isinstance(data, pd.Series):\n converted = data.values\n elif isinstance(data, list):\n converted = np.array(data)\n elif isinstance(data, pd.DataFrame):\n converted = data.as_matrix()\n elif to == 'list':\n if isinstance(data, list):\n converted = data\n elif isinstance(data, pd.Series):\n converted = data.values.tolist()\n elif isinstance(data, np.ndarray):\n converted = data.tolist()\n elif to == 'dataframe':\n if isinstance(data, pd.DataFrame):\n converted = data\n elif isinstance(data, np.ndarray):\n converted = pd.DataFrame(data)\n else:\n raise ValueError(\"Unknown data conversion: {}\".format(to))\n if converted is None:\n raise TypeError(\n 'cannot handle data conversion of type: {} to {}'.format(\n type(data), to))\n else:\n return converted", "def change_col_type(df,schema):\n d = {'int':IntegerType(),'str':StringType(),'float':FloatType(),'bool':BooleanType()}\n \n for c,t in schema.items():\n df = df.withColumn(c,col(c).cast(d[t]))\n return df", "def geneset_to_pandas(geneset):\n items = []\n for n in geneset.dtype.names:\n v = geneset[n]\n # convert bytes columns to unicode (which pandas then converts to object)\n if v.dtype.kind == 'S':\n v = v.astype('U')\n items.append((n, v))\n return pandas.DataFrame.from_items(items)", "def qset_to_df(qset, datatype='object'):\n df = pd.DataFrame(list(qset.values()), dtype=datatype)\n return df", "def pandas_to_table(df):\n # type: (pd.DataFrame) -> Orange.data.Table\n index = df.index\n if not isinstance(index, pd.RangeIndex):\n df = df.reset_index()\n\n columns = [] # type: List[Tuple[Orange.data.Variable, np.ndarray]]\n\n for header, series in df.items(): # type: (Any, pd.Series)\n if pdtypes.is_categorical(series):\n coldata = series.values # type: pd.Categorical\n categories = [str(c) for c in coldata.categories]\n var = Orange.data.DiscreteVariable.make(\n str(header), values=categories, ordered=coldata.ordered\n )\n # Remap the coldata into the var.values order/set\n coldata = pd.Categorical(\n coldata, categories=var.values, ordered=coldata.ordered\n )\n codes = coldata.codes\n assert np.issubdtype(codes.dtype, np.integer)\n orangecol = np.array(codes, dtype=np.float)\n orangecol[codes < 0] = np.nan\n elif pdtypes.is_datetime64_any_dtype(series):\n # Check that this converts tz local to UTC\n series = series.astype(np.dtype(\"M8[ns]\"))\n coldata = series.values # type: np.ndarray\n assert coldata.dtype == \"M8[ns]\"\n mask = np.isnat(coldata)\n orangecol = coldata.astype(np.int64) / 10 ** 9\n orangecol[mask] = np.nan\n var = Orange.data.TimeVariable.make(str(header))\n var.have_date = var.have_time = 1\n elif pdtypes.is_object_dtype(series):\n coldata = series.fillna('').values\n assert isinstance(coldata, np.ndarray)\n orangecol = coldata\n var = Orange.data.StringVariable.make(str(header))\n elif pdtypes.is_integer_dtype(series):\n coldata = series.values\n var = Orange.data.ContinuousVariable.make(str(header))\n var.number_of_decimals = 0\n orangecol = coldata.astype(np.float64)\n elif pdtypes.is_numeric_dtype(series):\n orangecol = series.values.astype(np.float64)\n var = Orange.data.ContinuousVariable.make(str(header))\n var._out_format = \"%.15g\"\n else:\n warnings.warn(\n \"Column '{}' with dtype: {} skipped.\"\n .format(header, series.dtype),\n UserWarning\n )\n continue\n columns.append((var, orangecol))\n\n cols_x = [(var, col) for var, col in columns if var.is_primitive()]\n cols_m = [(var, col) for var, col in columns if not var.is_primitive()]\n\n variables = [v for v, _ in cols_x]\n if cols_x:\n X = np.column_stack([a for _, a in cols_x])\n else:\n X = np.empty((df.shape[0], 0), dtype=np.float)\n metas = [v for v, _ in cols_m]\n if cols_m:\n M = np.column_stack([a for _, a in cols_m])\n else:\n M = None\n\n domain = Orange.data.Domain(variables, metas=metas)\n return Orange.data.Table.from_numpy(domain, X, None, M)", "def create_data_types(self):\n for col in self.all_columns:\n try:\n if float(self.train[col].iloc[-3]):\n self.train[col] = self.train[col].astype(np.float32)\n except:\n pass\n self.d_types = self.train.dtypes", "def set_vars_as_type(df, varNames, dtype):\n\n myVars = list(set(df.columns).intersection(set(varNames)))\n df[myVars] = df[myVars].astype(dtype)", "def convert_to_dict(data_frame: pd.DataFrame) -> List[Dict[str, Any]]:\n type_conversion = {\"date\": str} if \"date\" in data_frame.columns else {}\n return data_frame.replace({np.nan: None}).astype(type_conversion).to_dict(\"records\")", "def clean_dtypes(df):\n df['AgentLat'] = df['AgentLat'].astype(float)\n df['AgentLong'] = df['AgentLong'].astype(float)\n df['ContaMediaAccount'] = df['ContaMediaAccount'].astype(int)\n df['DistVIPHamming'] = df['DistVIPHamming'].astype(float)\n df['Distance'] = df['Distance'].astype(float)\n df['Final'] = df['Final'].astype(float)\n df['LeadID'] = df['LeadID'].astype(int)\n df['LeadLat'] = df['LeadLat'].astype(float)\n df['LeadLong'] = df['LeadLong'].astype(float)\n df['MLDecision'] = df['MLDecision'].astype(float)\n df['SemDistCorrel'] = df['SemDistCorrel'].astype(float)\n df['SemDistCosine'] = df['SemDistCosine'].astype(float)\n df['SemDistHamming'] = df['SemDistHamming'].astype(float)\n df['StarRating'] = df['StarRating'].astype(float)\n df['StoryAgent'] = df['StoryAgent'].tolist()\n df['StoryLead'] = df['StoryLead'].tolist()\n df['VIPAgentStory'] = df['VIPAgentStory'].tolist()\n df['VIPLeadStory'] = df['VIPLeadStory'].tolist()\n df['VIPAgentStory'] = df['VIPAgentStory'].astype(str)\n df['VIPLeadStory'] = df['VIPLeadStory'].astype(str)\n df['WeightSem'] = df['WeightSem'].astype(float)\n\n return df", "def to_scalar_df(df: pd.DataFrame) -> pd.DataFrame:\n scalar_df = df\n column_ordering = []\n for c, s in df.items():\n if s.dtype == \"object\":\n s_list = s.to_list()\n try:\n ncols = s_list[0].shape[0]\n split_cols = [f\"{c}_{k}\" for k in range(ncols)]\n sdf = pd.DataFrame(s_list, columns=split_cols)\n scalar_df = pd.concat([scalar_df, sdf], axis=1)\n column_ordering += split_cols\n except AttributeError as e:\n raise ValueError(f\"Expected series of lists, but found {s_list[0]}\") from e\n else:\n column_ordering.append(c)\n return scalar_df[column_ordering]", "def _wrap_result(data, columns, index_col=None, coerce_float=True,\n parse_dates=None):\n\n frame = DataFrame.from_records(data, columns=columns,\n coerce_float=coerce_float)\n\n _parse_date_columns(frame, parse_dates)\n\n if index_col is not None:\n frame.set_index(index_col, inplace=True)\n\n return frame", "def to_pandas(df):\n pd_df = pd.concat(ray.get(df._df))\n pd_df.index = df.index\n pd_df.columns = df.columns\n return pd_df", "def __convToTyped(index, value, dtypes):\n\t#print(index, value)\n\tdtype = dtypes[index]\n\ttvalue = value\n\tif dtype == \"int\":\n\t\ttvalue = int(value)\n\telif dtype == \"float\":\n\t\ttvalue = float(value)\n\treturn tvalue", "def to_pandas(self):\n # TODO Add type translation.\n # Skipping analyzing 'pandas': found module but no type hints or library stubs\n import pandas as pd # type: ignore\n\n map = {}\n for n, c in self._field_data.items():\n map[n] = c.to_pandas()\n return pd.DataFrame(map)", "def data_column_conversion(data:pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame:\n data = data.assign(W = (data.label == 'W') + 0,D = (data.label == 'D') + 0,L = (data.label == 'L') + 0)\n data = data.drop(\"label\",axis=1)\n return data", "def create_quali_df(df: pd.DataFrame) -> pd.DataFrame:\n\n # create a dictionary that contains datatype of each column\n dtypeDict = dict(df.dtypes)\n # create a list of column names that contains only quantitative data\n quali_cols = [key for key, value in dtypeDict.items() if value == \"object\"]\n df = df[quali_cols]\n\n return df", "def _to_dataframe(self, dataset_name):\n values = self[dataset_name][:]\n columns = self.get_columns(dataset_name)\n timestamps = self.get_timestamps(dataset_name)[...]\n if len(columns) < values.shape[1]:\n columns.resize(values.shape[1])\n\n # transform missing data into NaNs\n mask = missing_values(values) != 0\n try:\n values[mask] = numpy.nan\n except ValueError: # ValueError: cannot convert float NaN to integer\n # don't bother converting non-float arrays' -0.0 into NaNs\n pass\n\n dataframe = pandas.DataFrame(data=values,\n index=[datetime.datetime.fromtimestamp(t) for t in timestamps],\n columns=columns)\n return dataframe", "def cast_columns(df: DataFrame, castType: [str, AtomicType], columns: List[str]) -> DataFrame:\n for column in columns:\n df = df.withColumn(column, col(column).cast(castType))\n\n return df", "def __convert_timestamps(data_frame: pd.DataFrame) -> pd.DataFrame:\n\n # get 2D array of data_frame as python builtin list,\n # get columns list to use for second pd.DataFrame constructor\n raw_array = data_frame.to_numpy(dtype=str).tolist()\n columns_list = data_frame.columns.tolist()\n\n # num_rows used to iterate through array,\n # num_cols used for naive error checking\n num_rows = len(raw_array)\n num_cols = len(raw_array[0])\n\n # here is why it is important to error check DataFrames\n # for correct dimensions before passing to this function\n # __convert_two and convert_tree_cols() both have\n # a different process and could hit errors or produce undefined behavior\n if num_cols == 3:\n __convert_three_cols(raw_array, num_rows)\n # convert_three_cols deletes a column, update our list of column names\n columns_list.remove(columns_list[1])\n elif num_cols == 2:\n __convert_two_cols(raw_array, num_rows)\n else:\n sys.stdout.write(f\"ERROR: GIVEN CSV FILE MUST CONTAIN TWO\"\n f\" OR THREE COLUMNS (NOT A TIME SERIES)\\n\"\n f\"(files with three columns are assumed to have date\"\n f\" in the first column and a time in the second)\\n\")\n raise NotImplementedError\n\n # once columns have been processed, use array and list of column names\n # to reassign data_frame to a new constructor\n data_frame = pd.DataFrame(data=raw_array, columns=columns_list)\n\n return data_frame", "def __call__(self, cls: PlainFrame,\n dtypes: Optional[TYPE_DTYPE_INPUT] = None) \\\n -> 'PlainFrame':\n\n dtypes_validated = self.get_forced_dtypes(dtypes)\n dtypes_validated.update(self.get_object_dtypes(dtypes_validated))\n dtypes_validated.update(self.get_inferred_dtypes(dtypes_validated))\n\n columns = self.df.columns.tolist()\n dtypes = [dtypes_validated[column] for column in columns]\n data = [self.convert_series(column, dtypes_validated[column])\n for column in columns]\n\n data = list(zip(*data))\n\n return cls.from_plain(data=data,\n columns=self.df.columns.tolist(),\n dtypes=dtypes)", "def convert_data_types(fields, src_db='mysql', dest_db='postgres'):\n\n data_type_map = {\n 'mysql': {\n 'postgres': {\n 'date': 'date',\n 'tinyint': 'smallint',\n 'smallint': 'smallint',\n 'mediumint': 'integer',\n 'int': 'bigint',\n 'bigint': 'numeric',\n 'float': 'real',\n 'double': 'double precision',\n 'tinytext': 'varchar',\n 'mediumtext': 'varchar',\n 'longtext': 'varchar',\n 'varchar': 'varchar',\n 'text': 'varchar',\n 'char': 'char',\n 'binary': 'bytea',\n 'varbinary': 'bytea',\n 'tinyblob': 'bytea',\n 'blob': 'bytea',\n 'mediumblob': 'bytea',\n 'longblob': 'bytea',\n 'datetime': 'timestamp',\n 'time': 'time',\n 'decimal': 'decimal',\n 'json': 'jsonb'\n }\n }\n }\n\n for elem in fields:\n elem['data_type'] = data_type_map[src_db][dest_db][elem['data_type']]\n\n if elem['data_type'] == 'decimal':\n elem['data_type'] += f'''{int(elem['numeric_precision']), int(elem['numeric_scale'])}'''\n\n fields = {e['column_name']: e['data_type'] for e in fields}\n\n return fields", "def coerce_numeric_values(df, annot_types):\n if \"numeric\" in annot_types:\n numeric_columns = df.xs(\n \"numeric\", axis=1, level=1, drop_level=False\n ).columns.tolist()\n try:\n # Round numeric columns to 3 decimal places\n df[numeric_columns] = df[numeric_columns].round(3).astype(float)\n except ValueError as e:\n log_exception(Annotations.dev_logger, Annotations.user_logger, e)\n raise ValueError(e)\n return df", "def as_dataframe(self) -> \"pd.DataFrame\":\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df", "def transform(self, data: pd.DataFrame):\n raise NotImplementedError", "def castData(data, type='int64'):\n data = data.astype(type)\n return data", "def _convert_to_dummies_sklearn(df: pd.DataFrame) -> pd.DataFrame:\n raise NotImplementedError", "def encoding_df(df, cols):\n import pandas as pd\n df = df[cols]\n obj_df = df.select_dtypes(include=['object']).copy()\n num_var = df.select_dtypes(include=['int','float']).copy()\n cat_var = pd.get_dummies(obj_df, columns = obj_df.columns)\n encoded_df = pd.concat([num_var, cat_var], axis=1, sort=False)\n return encoded_df", "def _convert_to_dummies_pandas(df: pd.DataFrame) -> pd.DataFrame:\n df = df.copy()\n print(\"Raw dataset shape = {}\".format(df.shape))\n print(\"Creating one hot encoded categories...\")\n for col in df.columns:\n if col in CATEGORICAL_TRANS and col != \"isFraud\" and not col.startswith(\"card\"):\n print(\"Handling category: {}\".format(col))\n # Convert to categorical type, may not be necessary.\n df[col] = pd.Categorical(df[col])\n one_hot_encoded = pd.get_dummies(df[col], prefix=col)\n df = df.drop(columns=[col]) # remove the original categorical column.\n # Add the one-hot-encoded column.\n df = pd.concat([df, one_hot_encoded], axis=1)\n print(\"One-hot-encoded dataset shape = {}\".format(df.shape))\n return df", "def convert_variable_type_n(df):\n # available columns\n \"\"\"\n 'source_file', 'source_id', 'report_id', 'observation_id',\n 'record_timestamp', 'iday', 'station_id', 'lat@hdr', 'lon@hdr',\n 'vertco_reference_1@body', 'obsvalue@body', 'varno@body', 'units',\n 'number_of_pressure_levels'\n \"\"\"\n dic_var_type = { 'int32' : ['varno@body', 'number_of_pressure_levels' , 'units', 'z_coordinate_type' , 'vertco_type@body' ] ,\n 'float32' : ['lat@hdr', 'lon@hdr' , 'vertco_reference_1@body', 'obsvalue@body', 'iday' ] ,\n 'string' : ['source_id' , 'station_id' , 'source_file' , 'report_id', 'observation_id', ] ,\n 'int64' : ['report_timestamp' , 'date_time', 'record_timestamp'] } \n \n convert = { 'int32' : np.int32 , \n 'string' : np.bytes_ ,\n 'float32' : np.float32 ,\n 'float64' : np.float64\n \n }\n # creating a dictionary variable - nptype \n mapping = {}\n for k in dic_var_type.keys():\n for l in dic_var_type[k]:\n mapping[l] = k \n\n for c in df.columns:\n try:\n #print('converting ' , c , ' to type ' , mapping[c] )\n df[c] = df[c].astype( convert[mapping[c]] )\n #print('converted: ', c )\n \n except:\n #print('could not convert type column ' , c )\n pass \n \n return df", "def recognize_dates(dframe):\n for i, dtype in enumerate(dframe.dtypes):\n if dtype.type == np.object_:\n column = dframe.columns[i]\n new_column = _convert_column_to_date(dframe, column)\n\n if not new_column is None:\n dframe[column] = new_column\n\n return dframe", "def initTypes(self):\n self.types = [ty.NoneType]*self.numcols()\n for k,row in enumerate(self.data):\n for i in range(self.numcols()):\n val = row[i]\n typ = self.types[i]\n if not val is None:\n if typ in [ty.NoneType,ty.IntType]:\n if val.isdigit():\n row[i] = int(val)\n if val.startswith('-') and val[1:].isdigit():\n row[i] = -int(val[1:])\n self.types[i] = ty.IntType\n continue\n if typ in [ty.NoneType,ty.IntType,ty.FloatType]:\n try:\n row[i] = float(val)\n if not typ == ty.FloatType:\n self.types[i] = ty.FloatType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else float(elt)\n continue\n except ValueError:\n pass\n if typ in [ty.NoneType,utils.Date]:\n try:\n row[i] = utils.Date(val)\n self.types[i] = utils.Date\n continue\n except ValueError:\n pass\n row[i] = unicode(val)\n if not typ == ty.UnicodeType:\n self.types[i] = ty.UnicodeType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else unicode(elt)", "def from_dataframe(cls, df, data_cls):\n pass", "def test__convert_to_str_dtype(self):\n new_column_types = process_mutation._convert_to_str_dtype(\n self.column_types, [\"foo\"]\n )\n assert new_column_types == {\"foo\": \"object\", \"bar\": \"object\"}", "def _convert_categorical(from_frame: DataFrame) -> DataFrame:\n for col in from_frame:\n ser = from_frame[col]\n if isinstance(ser.dtype, CategoricalDtype):\n cat = ser._values.remove_unused_categories()\n if cat.categories.dtype == object:\n categories = pd.Index._with_infer(cat.categories._values)\n cat = cat.set_categories(categories)\n from_frame[col] = cat\n return from_frame", "def clean_df(dataframe: pd.DataFrame) -> pd.DataFrame:\n dataframe[\"Close Date\"] = pd.to_datetime(dataframe['Close Date']).dt.strftime('%Y-%m-%d')\n dataframe[\"Min_salary\"] = dataframe[\"Min_salary\"].astype(int)\n dataframe[\"Max_salary\"] = dataframe[\"Max_salary\"].astype(int)\n dataframe['HiringPath'] = dataframe['HiringPath'].astype(str)\n return dataframe", "def convert_str(data, columns=None):\n if columns is None:\n columns = [\n name\n for name, col in data.items()\n if hasattr(col, 'str')\n ]\n\n def is_numeric(col):\n return col.str.isnumeric().all()\n\n def is_float(col):\n try:\n col.astype(float)\n except ValueError:\n return False\n else:\n return True\n\n def is_bool(col):\n return col.str.match(BOOL_PATTERN).all()\n\n for name in columns:\n col = data[name]\n\n if is_numeric(col) or is_float(col):\n data[name] = pd.to_numeric(col)\n elif is_bool(col):\n data[name] = col.replace({\n 'True': True,\n 'False': False\n })\n\n return data", "def byte_to_literal_strings(dataframe):\n # Select the str columns:\n str_df = dataframe.select_dtypes([np.object])\n\n if not str_df.empty:\n # Convert all of them into unicode strings\n str_df = str_df.stack().str.decode('utf-8').unstack()\n # Swap out converted cols with the original df cols\n for col in str_df:\n dataframe[col] = str_df[col]\n\n return dataframe", "def _harmonize_columns(self, parse_dates=None):\n # handle non-list entries for parse_dates gracefully\n if parse_dates is True or parse_dates is None or parse_dates is False:\n parse_dates = []\n\n if not hasattr(parse_dates, '__iter__'):\n parse_dates = [parse_dates]\n\n for sql_col in self.table.columns:\n col_name = sql_col.name\n try:\n df_col = self.frame[col_name]\n # the type the dataframe column should have\n col_type = self._numpy_type(sql_col.type)\n\n if col_type is datetime or col_type is date:\n if not issubclass(df_col.dtype.type, np.datetime64):\n self.frame[col_name] = _handle_date_column(df_col)\n\n elif col_type is float:\n # floats support NA, can always convert!\n self.frame[col_name] = df_col.astype(col_type, copy=False)\n\n elif len(df_col) == df_col.count():\n # No NA values, can convert ints and bools\n if col_type is np.dtype('int64') or col_type is bool:\n self.frame[col_name] = df_col.astype(col_type, copy=False)\n\n # Handle date parsing\n if col_name in parse_dates:\n try:\n fmt = parse_dates[col_name]\n except TypeError:\n fmt = None\n self.frame[col_name] = _handle_date_column(\n df_col, format=fmt)\n\n except KeyError:\n pass # this column not in results", "def correct_type(data):\n\n if all(data[col].dtypes == data.dtypes[0] for col in data.columns):\n if all(data[col].isnull().sum() == 0 for col in data.columns):\n print('All columns have values of the correct type.')\n else:\n print('Bad result.')", "def to_real_series(self, data: pd.Series) -> pd.Series:\n ...", "def to_pandas(self) -> np.dtype:\n return self._pandas_type", "def convert_data(df):\n print(\"Converting history...\")\n return [ dict(row) for i, row in df.iterrows() ]", "def _dataframe_conversion(da, order):\n assert da.data.squeeze().ndim == 2, (\n \"Dataframe conversion only possible for connectivity arrays when \"\n \"time dimension is missing\")\n da = da.squeeze().to_dataframe('mi').reset_index()\n da = da.pivot('sources', 'targets', 'mi')\n if isinstance(order, (list, np.ndarray)):\n da = da.reindex(order, axis='index').reindex(order, axis='columns')\n\n return da", "def convert_cols_to_numeric(df_in: pd.DataFrame, columns=None, rows=None, decimal_pt='.',\n cast_type=float) -> pd.DataFrame:\n # converts inputted columns or rows to numeric format.\n # if none are given, it converts all elements to numeric types\n # converts to type given\n if columns is None and rows is None:\n columns = df_in.columns\n if columns is None:\n columns = []\n if rows is None:\n rows = []\n df = df_in.copy()\n\n def convert_val(val):\n return _convert_val_to_numeric(val, cast_type, f'[^0-9{decimal_pt}()-]')\n\n for column in columns:\n df[column] = df[column].apply(convert_val)\n for row in rows:\n df.loc[row] = df.loc[row].apply(convert_val)\n\n return df", "def element_type_from_dataframe(proxy, include_indexes=False):\n # type: (pd.DataFrame, bool) -> type\n return element_typehint_from_dataframe_proxy(proxy, include_indexes).user_type", "def inspect_dtype_object(self, column: str) -> str:\n\n series = self.df[column].dropna()\n\n # check for bool\n try:\n conv = pd.to_numeric(series)\n return self.inspect_dtype(conv)\n except ValueError:\n pass\n\n # check for mixed dtypes\n dtypes = {type(x) for x in series}\n if len(dtypes) > 1:\n raise TypeError(\"Column `{}` has mixed dtypes: {}. Currently, \"\n \"this is not supported.\"\n .format(column, dtypes))\n\n # check for string\n if isinstance(series[0], str):\n return \"str\"\n\n # raise if unsupported dtype is encountered\n raise TypeError(\"Column `{}` has dtype `{}` which is currently \"\n \"not supported.\"\n .format(column, type(series[0])))", "def numerical(df):\r\n numerical_var=df.select_dtypes(include =['float64','int64']).columns.tolist()\r\n return numerical_var", "def obj_df(df):\n mask = np.array(df.dtypes == 'object')\n df_obj = df.iloc[:, mask]\n return df_obj", "def change_to_object(column, data):\n data[column] = data[column].astype('object')", "def test_roundtrip_from_dataframe2(self):\n import pandas as pd\n df = pd.DataFrame(data={\n 'a': np.arange(3),\n 'b': np.arange(3)[::-1]\n })\n ca = carray(df, dtype=np.dtype(np.float))\n assert_array_equal(df, ca)\n self.assertEqual(ca.dtype, np.dtype(np.float),\n msg='carray has been created with invalid dtype')", "def clean_and_transpose(df):\n print(\"Cleaning and Transposing\")\n df = df.apply(pd.Series)\n df = df[sorted(df.columns)]\n df = df.T\n df.index.name = 'date'\n return df", "def seek_types(dataframe: pd.DataFrame) -> Dict[str, List[str]]:\r\n\r\n def _get_global_type(t):\r\n if \"obj\" in str(t):\r\n return \"cat\"\r\n elif \"float\" in str(t):\r\n return \"float\"\r\n elif \"int\" in str(t):\r\n return \"int\"\r\n elif \"date\" in str(t):\r\n return \"date\"\r\n else:\r\n return \"other\"\r\n\r\n found_types = (\r\n dataframe.dtypes.apply(_get_global_type)\r\n .reset_index()\r\n .groupby(0)\r\n .agg(lambda x: list(x))\r\n )\r\n found_types = {k: v for k, v in zip(found_types.index, found_types[\"index\"])}\r\n return found_types", "def as_frame(df_like: DataFrameLike) -> pd.DataFrame:\n try:\n return df_like.to_frame()\n except AttributeError:\n return df_like", "def make_numeric(\n df,\n vars_=[\n 'emp',\n 'empszfi',\n 'firmpdemp',\n 'payann',\n 'rcppdemp',\n 'eth_group',\n 'geotype',\n 'rcpszfi',\n 'sex',\n 'vet_group']):\n df[vars_] = df[vars_].apply(pd.to_numeric)\n return df", "def split_dataframe_datatypes(df, target_var):\n\tdf_num = df.select_dtypes(include=np.number)\n\tdf_cat = df.select_dtypes(include=object)\n\n\tif target_var in df_num.columns:\n\t\tdf_tar = df_num.copy() \n\t\tdf_tar = df_tar[[target_var]]\n\t\tdf_num.drop(columns=[target_var], axis=1, inplace=True) \n\telif target_var in df_cat.columns:\n\t\tdf_tar = df_cat.copy()\n\t\tdf_tar = df_tar[[target_var]]\n\t\tdf_cat.drop(columns=[target_var], axis=1, inplace=True) \n\n\treturn df_num,df_cat,df_tar", "def transform(self, df: DataFrame) -> DataFrame:\n return df", "def transform(self, X):\n return X.select_dtypes(self.dtypes)", "def pandas2R(df):\n with localconverter(robjects.default_converter + pandas2ri.converter):\n data = robjects.conversion.py2rpy(df)\n return data", "def to_pandas(self) -> pd.DataFrame:\n\n data = {column.name: column.to_pandas()\n for column in self.plaincolumns}\n\n return pd.DataFrame(data, columns=self.columns)", "def convert_str_lists_to_real_lists(df: pd.DataFrame, \n columns: Union[List, Tuple]) -> pd.DataFrame:\n\n if columns is None:\n columns = df.columns\n\n for column in columns:\n new_values = list()\n\n for value in df[column].values:\n\n if type(value) is not str:\n continue\n if value[0] != \"[\" or value[-1] != \"]\":\n continue\n\n try: \n new_value = ast.literal_eval(new_value)\n except ValueError:\n continue\n\n new_values.append(new_value)\n\n df[column] = new_values\n\n return df", "def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])", "def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None):\n if not hasattr(data, \"dtype\"):\n # e.g. collections.deque\n return data, copy\n\n if is_float_dtype(data.dtype):\n # pre-2.0 we treated these as wall-times, inconsistent with ints\n # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes.\n # Note: data.astype(np.int64) fails ARM tests, see\n # https://github.com/pandas-dev/pandas/issues/49468.\n data = data.astype(DT64NS_DTYPE).view(\"i8\")\n copy = False\n\n elif lib.is_np_dtype(data.dtype, \"m\") or is_bool_dtype(data.dtype):\n # GH#29794 enforcing deprecation introduced in GH#23539\n raise TypeError(f\"dtype {data.dtype} cannot be converted to datetime64[ns]\")\n elif isinstance(data.dtype, PeriodDtype):\n # Note: without explicitly raising here, PeriodIndex\n # test_setops.test_join_does_not_recur fails\n raise TypeError(\n \"Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead\"\n )\n\n elif isinstance(data.dtype, ExtensionDtype) and not isinstance(\n data.dtype, DatetimeTZDtype\n ):\n # TODO: We have no tests for these\n data = np.array(data, dtype=np.object_)\n copy = False\n\n return data, copy", "def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')" ]
[ "0.74301016", "0.7253401", "0.71946836", "0.719138", "0.70960134", "0.7077248", "0.7054006", "0.6973285", "0.6967862", "0.69372344", "0.69322556", "0.69102204", "0.6864454", "0.6861238", "0.67856", "0.6762191", "0.67100865", "0.66672635", "0.65790963", "0.65746284", "0.65704054", "0.6556288", "0.6546347", "0.6488186", "0.64783674", "0.6439249", "0.6438686", "0.64277405", "0.6425212", "0.64037067", "0.6379642", "0.63581944", "0.63579893", "0.63090014", "0.630586", "0.6272856", "0.62713236", "0.62567794", "0.62444246", "0.6241867", "0.6228575", "0.6222346", "0.621869", "0.62096167", "0.61837465", "0.6164203", "0.6146334", "0.6140322", "0.6126824", "0.61104184", "0.610513", "0.6103148", "0.6101081", "0.6068614", "0.60355806", "0.60254765", "0.60105175", "0.5996903", "0.5989321", "0.5987619", "0.59801644", "0.59770024", "0.59704506", "0.59697413", "0.5964734", "0.5964304", "0.5958513", "0.595564", "0.59324384", "0.5912851", "0.59078884", "0.58983433", "0.5890912", "0.58701813", "0.5869384", "0.5868693", "0.58495486", "0.5843505", "0.5840918", "0.5818894", "0.5802769", "0.58016026", "0.57980657", "0.5794117", "0.5791616", "0.5779825", "0.57785565", "0.5772909", "0.5767635", "0.5761807", "0.5761513", "0.57543", "0.57515115", "0.57467693", "0.5745186", "0.5744561", "0.5726366", "0.572629", "0.572483", "0.570832" ]
0.7024564
7
Load the IMDB reviews dataset. Code adapted from the code for
def load_imdb_dataset(): (x_train, y_train), (x_test, y_test) = imdb.load_data( path="./datasets", num_words=_IMDB_CONFIG["max_features"]) num_train = _IMDB_CONFIG["num_train"] x_train, x_val = x_train[:num_train], x_train[num_train:] y_train, y_val = y_train[:num_train], y_train[num_train:] def preprocess(x, y, max_length): x = sequence.pad_sequences(x, maxlen=max_length) y = onp.array(y) x = onp.array(x) return x, y max_length = _IMDB_CONFIG["max_len"] x_train, y_train = preprocess(x_train, y_train, max_length=max_length) x_val, y_val = preprocess(x_val, y_val, max_length=max_length) x_test, y_test = preprocess(x_test, y_test, max_length=max_length) data_info = {"num_classes": 2} return (x_train, y_train), (x_test, y_test), (x_val, y_val), data_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(path='./data/train'):\n print(\"Loading IMDB Data...\")\n data = []\n\n dir = os.path.dirname(__file__)\n file_list = glob.glob(os.path.join(dir, path + '/pos/*'))\n file_list.extend(glob.glob(os.path.join(dir, path + '/neg/*')))\n print(\"Parsing %s files\" % len(file_list))\n for i, f in enumerate(file_list):\n with open(f, \"r\", encoding=\"utf8\") as openf:\n s = openf.read()\n data.append(imp.preprocess(s)) # NOTE: Preprocessing code called here on all reviews\n return data", "def load_movies_reviews():\n data = pd.read_csv(CSV_PATH + MOVIES_REVIEWS_CSV_NAME).T.to_dict()\n for i in range(len(data)):\n movie_id = Movies.query.filter(Movies.title == data[i]['Title'].strip()).first().id\n review = data[i]['Reviews'].strip()\n rating = float(data[i]['Rating'])*100000\n review_exist = Reviews.query.filter(Reviews.review == review).first()\n if not review_exist:\n db.session.add(Reviews(movie_id=movie_id, review=review, rating=int(rating)))\n db.session.commit()\n db.session.close()\n db.session.close()", "def load_train_test_imdb_data(data_dir):\r\n\r\n print(\"... IMDB loading \\t\\n\")\r\n data = {}\r\n for split in [\"train\", \"test\"]:\r\n data[split] = []\r\n for sentiment in [\"neg\", \"pos\"]:\r\n score = 1 if sentiment == \"pos\" else 0\r\n\r\n path = os.path.join(data_dir, split, sentiment)\r\n file_names = os.listdir(path)\r\n for f_name in file_names:\r\n with open(os.path.join(path, f_name), encoding=\"latin-1\") as f:\r\n review = f.read()\r\n data[split].append([review, score])\r\n\r\n np.random.shuffle(data[\"train\"]) \r\n\r\n return data[\"train\"], data[\"test\"]", "def prepare_review_data():\n with open(REVIEW_FILE, 'r') as fread:\n reviews = fread.read()\n with open(LABEL_FILE, 'r') as fread:\n labels = fread.read()\n return reviews, labels", "def load_reviews(id_reviews=(), load_polarities=False, load_sentences=False, load_words=False, load_deptrees=False):\n from loacore.conf import DB_TIMEOUT\n reviews = []\n conn = sql.connect(DB_PATH, timeout=DB_TIMEOUT)\n c = conn.cursor()\n if len(id_reviews) > 0:\n for id_review in id_reviews:\n c.execute(\"SELECT ID_Review, Review.ID_File, File_Index, Review \"\n \"FROM Review WHERE ID_Review = \" + str(id_review) + \" ORDER BY File_Index\")\n result = c.fetchone()\n if result is not None:\n reviews.append(Review(result[0], result[1], result[2], result[3]))\n else:\n c.execute(\"SELECT ID_Review, Review.ID_File, File_Index, Review FROM Review\")\n results = c.fetchall()\n for result in results:\n reviews.append(Review(result[0], result[1], result[2], result[3]))\n\n conn.close()\n\n if load_polarities:\n # Load Polarities\n import loacore.load.polarity_load as polarity_load\n polarity_load.load_polarities_in_reviews(reviews)\n\n if load_sentences:\n # Load Sentences\n import loacore.load.sentence_load as sentence_load\n sentence_load.load_sentences_in_reviews(reviews, load_words=load_words, load_deptrees=load_deptrees)\n\n return reviews", "def Preprocess_IMDB(path=\"datasets/raw/aclImdb/\"):\n output_path = \"datasets/preprocessed/IMDB_Data\"\n\n neg = glob.glob(os.path.join(path, 'test', 'neg', '*'))\n neg += glob.glob(os.path.join(path, 'train', 'neg', '*'))\n neg_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in neg]\n neg_data = [sentence[0] for sentence in neg_data]\n\n\n pos = glob.glob(os.path.join(path, 'test', 'pos', '*'))\n pos += glob.glob(os.path.join(path, 'train', 'pos', '*'))\n pos_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in pos]\n pos_data = [sentence[0] for sentence in pos_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def load_ratings():\n filepath = \"./seed_data/u.data\"\n ratings = open(filepath)\n\n for rating in ratings:\n rating = rating.rstrip().split()\n\n db_rating = Rating(movie_id=rating[1], user_id=rating[0],\n score=rating[2])\n db.session.add(db_rating)\n\n db.session.commit()", "def load_imdb(path, subset=\"all\", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False):\n #analizer = vct.build_tokenizer()\n # C:\\Users\\mramire8\\Documents\\Research\\Oracle confidence and Interruption\\dataset\\aclImdb\\raw-data\n\n data = bunch.Bunch()\n\n if subset in ('train', 'test'):\n data[subset] = load_files(\"{0}/{1}\".format(IMDB_HOME, subset), encoding=\"latin-1\", load_content=True,\n random_state=rnd)\n elif subset == \"all\":\n data[\"train\"] = load_files(\"{0}/{1}\".format(IMDB_HOME, \"train\"), encoding=\"latin-1\", load_content=True,\n random_state=rnd)\n data[\"test\"] = load_files(\"{0}/{1}\".format(IMDB_HOME, \"test\"), encoding=\"latin-1\", load_content=True,\n random_state=rnd)\n else:\n raise ValueError(\n \"subset can only be 'train', 'test' or 'all', got '%s'\" % subset)\n if not raw:\n data = process_data(data, fix_k, min_size, vct)\n\n return data", "def load_data(glove_dict):\n print(\"loading data\")\n filename = check_file('reviews.tar.gz',14839260)\n extract_data(filename)\n dir= os.path.dirname(__file__)\n\n files= glob.glob(os.path.join(dir,\n 'data2/pos/*'))\n files.extend(glob.glob(os.path.join(dir,\n 'data2/neg/*')))\n\n data = np.empty([total_reviews, review_word_limit])\n\n file_idx = 0;\n for f in files:\n with open(f, 'r') as openf:\n s = openf.read()\n s = clean_line(s)\n words = s.split(\" \")\n # for word in words:\n word_count = 0\n while word_count < review_word_limit:\n if words:\n word = words.pop(0)\n if(word in string.punctuation or any(char.isdigit() for char in word)):\n continue\n data[file_idx][word_count] = glove_dict.get(word, 0)\n else:\n data[file_idx][word_count] = 0\n word_count += 1\n file_idx += 1\n print(\"file\", file_idx, \"done\")\n print(data[:5])\n # np.save(\"data\", data)\n return data", "def _get_review_data(path, num_samples, train_test_ratio=0.8):\n _download_dataset()\n print(\"Load Data at {}\".format(path))\n reviews, sentiments = [], []\n with open(path, \"r\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for line in reader:\n reviews.append(line[\"review\"])\n sentiments.append(int(line[\"sentiment\"]))\n\n # Data shuffle\n random.seed(42)\n zipped = list(zip(reviews, sentiments))\n random.shuffle(zipped)\n reviews, sentiments = zip(*(zipped[:num_samples]))\n reviews, sentiments = np.asarray(reviews), np.asarray(sentiments)\n\n # Train/test split\n num_data, num_train = len(sentiments), int(len(sentiments) * train_test_ratio)\n return (reviews[:num_train], sentiments[:num_train]), (reviews[num_train:], sentiments[num_train:])", "def __init__(self, dir):\n self.metadata_path = dir+\"reviews_metadata.csv\"\n self.word_to_docs_path = dir + \"words_to_file.bin\"\n self.doc_to_words_path = dir + \"file_to_words.bin\"\n self.vocabulary_path = dir+\"vocabulary.dat\"\n self.prod_index = 1\n self.helpfulness_index = 2\n self.score_index = 3\n self.review_id_index = 0\n\n try:\n with open(self.vocabulary_path, \"r\") as voc:\n jsondata = voc.read()\n data = json.loads(jsondata)\n self.vocabulary = data[\"words\"]\n self.word_indexes = data[\"indexes\"]\n except Exception:\n print(\"Cant load vocabulary from: \" + self.vocabulary_path)\n traceback.print_exc()\n exit(1)", "def extract_imdb_reviews(review_file):\n\n print(f'Decoding {review_file} ...')\n with open(review_file, encoding='utf-8') as f:\n raw = f.read()\n\n print('Extracting review text and labels ...')\n trash = {'<sssss>', '-rrb-', '-lrb-'}\n lines = raw.split('\\n')[:-1]\n reviews = []\n for line in lines:\n chunks = line.split('\\t\\t')\n label = chunks[2]\n review = ' '.join(w for w in chunks[3].split() if w not in trash)\n reviews.append((review, label))\n\n return reviews", "def load_data(reviews_path):\n df1 = pd.read_csv(reviews_path)\n #substituting 0 for negative reviews labeled '__label__1' and 1 for positive reviews labeled '__label__2'\n df1 = df1.replace('__label__1', 0)\n df1= df1.replace('__label__2', 1)\n \n return df1", "def load_ratings():\n res = {}\n with open(RATINGS_PATH, newline='', encoding=RATINGS_ENCRYPTION) as csvfile:\n spamreader = csv.reader(csvfile)\n for i, row in enumerate(spamreader):\n if i:\n title = row[3]\n res[title] = imdbData(row)\n return res", "def loadData():\n\tprint \"Loading POS vectorized reviews\"\n\twith open(DATA_PATH, \"rb\") as data_file:\n\t\tdata = cPickle.load(data_file)\n\treturn data", "def get_data():\n dataset = []\n y_labels = []\n # Extract categories\n for cat in movie_reviews.categories():\n # for files in each cateogry \n for fileid in movie_reviews.fileids(cat):\n # Get the words in that category\n words = list(movie_reviews.words(fileid))\n dataset.append((words,cat))\n y_labels.append(cat)\n return dataset,y_labels", "def load_ratings():\n\n print \"Ratings\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Rating.query.delete()\n\n # Read u.data file and insert data\n for row in open(\"seed_data/u.data\"):\n row = row.rstrip()\n user_id, movie_id, score, timestamp = row.split(\"\\t\")\n\n user_id = int(user_id)\n movie_id = int(movie_id)\n score = int(score)\n\n #from rating class take the movie_id and make it equal to the movie_id \n #from the for loop above. We are calling it to make an instance of the rating\n #class\n rating = Rating(movie_id=movie_id, user_id=user_id, score=score)\n \n #We need to add to the session or it won't ever be stored\n db.session.add(rating)\n\n #Once we're done, we should commit our work\n db.session.commit()", "def load_movielens1m(path):\n if not os.path.isfile(path):\n data_dir = os.path.dirname(path)\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(data_dir)\n download_dataset(\n 'http://files.grouplens.org/datasets/movielens/ml-1m.zip', path)\n\n zp = zipfile.ZipFile(path, 'r')\n content = zp.read('ml-1m/ratings.dat')\n data_list = content.split('\\n')\n\n output1 = open('train', 'w')\n output2 = open('test', 'w')\n num_users = 0\n num_movies = 0\n corpus = []\n for item in data_list:\n term = item.split('::')\n if len(term) < 3:\n continue\n user_id = int(term[0]) - 1\n movie_id = int(term[1]) - 1\n rating = int(term[2])\n corpus.append((user_id, movie_id, rating))\n num_users = max(num_users, user_id + 1)\n num_movies = max(num_movies, movie_id + 1)\n\n corpus_data = np.array(corpus)\n np.random.shuffle(corpus_data)\n np.random.shuffle(corpus_data)\n N = np.shape(corpus_data)[0]\n Ndv = N // 20 * 17\n Ndv2 = N // 10 * 9\n train = corpus_data[:Ndv, :]\n valid = corpus_data[Ndv:Ndv2, :]\n test = corpus_data[Ndv2:, :]\n\n for i in range(np.shape(train)[0]):\n output1.write('%d\\t%d\\t%d\\n' % (train[i, 0], train[i, 1], train[i, 2]))\n output1.close()\n for i in range(np.shape(test)[0]):\n output2.write('%d\\t%d\\t%d\\n' % (test[i, 0], test[i, 1], test[i, 2]))\n output2.close() \n\n return num_movies, num_users, train, valid, test", "def load_movies():\n filepath = \"./seed_data/u.item\"\n movies = open(filepath)\n\n for movie in movies:\n movie = movie.rstrip().split('|')\n title = movie[1][:-7]\n title = title.decode(\"latin-1\")\n if movie[2]:\n date = datetime.strptime(movie[2], '%d-%b-%Y')\n else:\n date = None\n db_movie = Movie(\n movie_id = movie[0], title = title, \n released_at = date, imdb_url = movie[4])\n db.session.add(db_movie)\n\n db.session.commit()", "def load_data2(reviews_path):\n df2 = pd.read_csv(reviews_path)\n # substituting 0 (negative) for all reviews rated 0 to 3 and 1 (positive) for all reviews rated 4-5\n # renaming columns to 'label' containing ratings and 'text' containing reviews to match df1\n df2['label'] = np.where(df2['review_rating'] < 4, 0, 1)\n df2['text'] = df2['review_text']\n df2 = df2 [['text', 'label']]\n return df2", "def load_movies():\n print \"Movies\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Movie.query.delete()\n\n # Read u.item file and insert data\n for row in open(\"seed_data/u.item\"):\n row =row.rstrip()\n\n movie_id, title_long, released_string, imdb_url = row.split(\"|\")[:4]\n #we modified the datetime format changed released_string into \n #new format by using datetim.strptime to convert it. \n print row\n if released_string: \n release_at = datetime.strptime(released_string, \"%d-%b-%Y\")\n else: \n release_at = None \n\n #here we stripped the title of the (xxxx) year and parenthesis\n #using the slice method. \n title = title_long[:-7]\n\n print movie_id, title_long, released_string, imdb_url\n\n #assign the return values from our for loop to a new variable\n movie = Movie(movie_id=movie_id, title=title, released_at=release_at,\n imdb_url=imdb_url)\n \n # We need to add to the session or it won't ever be stored\n db.session.add(movie)\n\n #Once we're done, we should commit our work\n db.session.commit()", "def load_reveiws_dataset(filename):\n review_DataFrame = pd.read_json(filename, lines=True)\n return review_DataFrame", "def prepare_imdb_data(data, labels, should_shuffle=True):\n\n #Combine positive and negative reviews and labels\n data_train = data['train']['pos'] + data['train']['neg']\n data_test = data['test']['pos'] + data['test']['neg']\n labels_train = labels['train']['pos'] + labels['train']['neg']\n labels_test = labels['test']['pos'] + labels['test']['neg']\n\n #Shuffle reviews and corresponding labels within training and test sets\n if should_shuffle:\n data_train, labels_train = shuffle(data_train, labels_train)\n data_test, labels_test = shuffle(data_test, labels_test)\n\n # Return a unified training data, test data, training labels, test labets\n return data_train, data_test, labels_train, labels_test", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)", "def Classify_Data(self):\n\n lem = lemmatization()\n\n # Get Mongo Client\n client = MongoClient()\n db = client['allMovies']\n collection = db['Movies']\n\n # Path to folder containing the training model files\n path = self.path\n\n # Get the list of doc ids trained\n trained_docs = []\n\n # Mongo queries to retrieve Horror, Romance and Crime movies\n qr1 = self.collection.find({\"content.genres.name\": \"Horror\"})\n qr2 = self.collection.find({\"content.genres.name\": \"Romance\"})\n qr3 = self.collection.find({\"content.genres.name\": \"Crime\"})\n qr4 = self.collection.find({\"content.genres.name\": \"Comedy\"})\n print(\"111\")\n print(qr3)\n\n myfile = open('doc_ids.pkl', 'rb')\n trained_docs = pickle.load(myfile)\n # Get 100 Horror, Romance and Crime movies each, which are not in the trained data set\n\n horr = []\n i = 0\n for rec in qr1:\n if rec['_id'] not in trained_docs:\n i = i + 1\n horr.append(rec)\n\n if i >= 333:\n break\n rom = []\n i = 0\n for rec in qr2:\n if rec['_id'] not in trained_docs:\n i = i + 1\n rom.append(rec)\n\n if i >= 333:\n break\n\n crime = []\n i = 0\n for rec in qr3:\n if rec['_id'] not in trained_docs:\n i = i + 1\n crime.append(rec)\n\n if i >= 334:\n break\n comedy = []\n i = 0\n for rec in qr4:\n if rec['_id'] not in trained_docs:\n i = i + 1\n comedy.append(rec)\n\n if i >= 334:\n break\n\n # Combine the query results\n query_results = []\n for rec in horr:\n query_results.append(rec)\n for rec in rom:\n query_results.append(rec)\n for rec in crime:\n query_results.append(rec)\n print(query_results)\n # Data to be classified\n test_data = []\n\n # Genres of records to be classified\n categories = []\n a = 0\n for movie in query_results:\n test_data.append(movie['content']['overview'])\n for genre in movie['content']['genres']:\n a = a + 1\n if ((genre['name'] == 'Horror') or (genre['name'] == 'Romance') or (genre['name'] == 'Crime') or (\n genre['name'] == 'Comedy') and a <= 80):\n categories.append(genre['name'])\n\n # Lists of training models and vectorizers\n models = [\"SVM\", \"LOGISTIC REGRESSION\", \"GAUSSIAN NB\",\n \"MULTINOMIAL NB\", \"BERNOULLI NB\", \"RANDOM FOREST\", \"BAGGING\", \"GRADIENT\",\n \"Voting\", \"Voting With Weights\"]\n\n vectorizers = [\"COUNT VECTORIZER\", \"TFIDF VECTORIZER\"]\n\n # Load dictionary containing terms appearing in genres\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n\n vec_1 = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n vec_2 = feature_extraction.text.TfidfVectorizer(vocabulary=dictionary)\n vec_list = [vec_1, vec_2]\n\n # List to store the classification stats for each model\n stats = []\n # Generate results\n for i in range(0, len(models)):\n for j in range(0, len(vectorizers)):\n time0 = time.process_time()\n model = joblib.load(path + models[i] + \"_\" + vectorizers[j].replace('-', '') + \".pkl\")\n vec = vec_list[j]\n Y = vec.fit_transform(test_data).toarray()\n print(\"y\", Y)\n predicted_genres = model.predict(Y)\n\n k = 0\n horror = 0\n romance = 0\n crime = 0\n\n # Keeps track of correct predictions\n y_correct = []\n\n # Keeps track of incorrect predictions\n y_predicted = []\n for pred in predicted_genres:\n if (categories[k] == \"Horror\"):\n if (pred == \"Horror\"):\n horror += 1\n y_predicted.append(0)\n elif (pred == \"Romance\"):\n y_predicted.append(1)\n else:\n y_predicted.append(2)\n y_correct.append(0)\n elif (categories[k] == \"Romance\"):\n if (pred == \"Romance\"):\n romance += 1\n y_predicted.append(1)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(2)\n y_correct.append(1)\n elif (categories[k] == \"Crime\"):\n if (pred == \"Crime\"):\n crime += 1\n y_predicted.append(2)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(1)\n y_correct.append(2)\n k = k + 1\n\n # Print results\n score = precision_recall_fscore_support(y_correct, y_predicted, average='weighted')\n # print(\"Number of records classified per second = %d\" % (round((1000/(time.process_time()-time0)),3)))\n print(\"________SCORES__________\")\n print(\"MODEL : \" + models[i])\n print(\"VECTORIZER : \" + vectorizers[j])\n print(\"Horror : %d/333\" % (horror))\n print(\"Romance : %d/333\" % (romance))\n print(\"Crime : %d/334\" % (crime))\n print(\"Precision : %.5f\" % (score[0]))\n print(\"Recall : %.5f\" % (score[1]))\n print(\"F(1) Score : %.5f\" % ((score[1] * score[0] / (score[1] + score[0])) * 2))\n print(\"F(W) Score : %.5f\" % (score[2]))\n print(\"Accuracy : %.5f\" % accuracy_score(y_correct, y_predicted))\n # print(confusion_matrix(y_correct, y_predicted))\n\n dic = {}\n dic['model'] = models[i].title()\n dic['vectorizer'] = vectorizers[j][:-11]\n dic['horror'] = str(horror) + '/' + '333'\n dic['romance'] = str(romance) + '/' + '333'\n dic['crime'] = str(crime) + '/' + '334'\n dic['precision'] = round(score[0], 3)\n dic['Recall'] = round(score[1], 3)\n dic['F(1) Score'] = round(((score[1] * score[0] / (score[1] + score[0])) * 2), 3)\n dic['F(W) Score'] = round(score[2], 3)\n dic['accuracy'] = round(accuracy_score(y_correct, y_predicted), 3)\n stats.append(dic)\n # Store stats in file\n joblib.dump(stats, path + \"classification_results.txt\")\n\n print(\"Done\")\n return stats", "def load_data(max_len: int, vocab_size: int) -> Tuple[NumpyDataset, NumpyDataset]:\n (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.imdb.load_data(maxlen=max_len, num_words=vocab_size)\n # pad the sequences to max length\n x_train = np.array([pad(x, max_len, 0) for x in x_train])\n x_eval = np.array([pad(x, max_len, 0) for x in x_eval])\n\n train_data = NumpyDataset({\"x\": x_train, \"y\": y_train})\n eval_data = NumpyDataset({\"x\": x_eval, \"y\": y_eval})\n return train_data, eval_data", "def get_data(args, load_extracted=True):\n path = args.data_path1\n tokenizer_en = tokener()\n table = str.maketrans(\"\", \"\", '\"#$%&\\'()*+-/:;<=>@[\\\\]^_`{|}~')\n if load_extracted:\n df = load_pickle(\"df_unencoded.pkl\")\n else:\n logger.info(\"Extracting CNN stories...\")\n df = pd.DataFrame(index=[i for i in range(len(os.listdir(path)))], columns=[\"body\", \"highlights\"])\n for idx, file in tqdm(enumerate(os.listdir(path)), total=len(os.listdir(path))):\n with open(os.path.join(path, file), encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file)\n text = \"\"\n for row in csv_reader:\n text += \"\".join(t for t in row)\n highlights = re.search(\"@highlight(.*)\", text).group(1)\n highlights = highlights.replace(\"@highlight\", \". \")\n body = text[:re.search(\"@highlight\", text).span(0)[0]]\n df.iloc[idx][\"body\"] = body\n df.iloc[idx][\"highlights\"] = highlights\n \n if len(args.data_path2) > 2:\n path = args.data_path2\n logger.info(\"Extracting dailymail stories...\")\n df1 = pd.DataFrame(index=[i for i in range(len(os.listdir(path)))], columns=[\"body\", \"highlights\"])\n for idx, file in tqdm(enumerate(os.listdir(path)), total=len(os.listdir(path))):\n with open(os.path.join(path, file), encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file)\n text = \"\"\n for row in csv_reader:\n text += \"\".join(t for t in row)\n highlights = re.search(\"@highlight(.*)\", text).group(1)\n highlights = highlights.replace(\"@highlight\", \". \")\n body = text[:re.search(\"@highlight\", text).span(0)[0]]\n df1.iloc[idx][\"body\"] = body\n df1.iloc[idx][\"highlights\"] = highlights\n df = pd.concat([df, df1], ignore_index=True)\n del df1\n \n save_as_pickle(\"df_unencoded.pkl\", df)\n logger.info(\"Dataset length: %d\" % len(df)) \n \n if (args.level == \"word\") or (args.level == \"char\"):\n logger.info(\"Tokenizing and cleaning extracted text...\")\n df.loc[:, \"body\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"body\"], table, tokenizer_en), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"highlights\"], table, tokenizer_en), \\\n axis=1)\n df.loc[:, \"body_length\"] = df.apply(lambda x: len(x['body']), axis=1)\n df.loc[:, \"highlights_length\"] = df.apply(lambda x: len(x['highlights']), axis=1)\n df = df[(df[\"body_length\"] > 0) & (df[\"highlights_length\"] > 0)]\n \n logger.info(\"Limiting to max features length, building vocab and converting to id tokens...\")\n df = df[df[\"body_length\"] <= args.max_features_length]\n v = vocab(level=args.level)\n v.build_vocab(df[\"body\"])\n v.build_vocab(df[\"highlights\"])\n df.loc[:, \"body\"] = df.apply(lambda x: v.convert_w2idx(x[\"body\"]), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: v.convert_w2idx(x[\"highlights\"]), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: pad_sos_eos(x[\"highlights\"], 0, 2), axis=1)\n save_as_pickle(\"df_encoded.pkl\", df)\n save_as_pickle(\"vocab.pkl\", v)\n \n elif args.level == \"bpe\":\n encoder = Encoder(vocab_size=args.bpe_vocab_size, pct_bpe=args.bpe_word_ratio, word_tokenizer=tokenizer_en.tokenize)\n df.loc[:, \"body\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"body\"], table, tokenizer_en, clean_only=True), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"highlights\"], table, tokenizer_en, clean_only=True), \\\n axis=1)\n logger.info(\"Training bpe, this might take a while...\")\n text_list = list(df[\"body\"])\n text_list.extend(list(df[\"highlights\"]))\n encoder.fit(text_list); del text_list\n \n logger.info(\"Tokenizing to ids and limiting to max features length...\")\n df.loc[:, \"body\"] = df.apply(lambda x: next(encoder.transform([x[\"body\"]])), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: next(encoder.transform([x[\"highlights\"]])), axis=1)\n df.loc[:, \"body_length\"] = df.apply(lambda x: len(x['body']), axis=1)\n df.loc[:, \"highlights_length\"] = df.apply(lambda x: len(x['highlights']), axis=1)\n df = df[(df[\"body_length\"] > 0) & (df[\"highlights_length\"] > 0)]\n df = df[df[\"body_length\"] <= args.max_features_length]\n \n '''\n logger.info(\"Converting tokens to ids...\")\n df.loc[:, \"body\"] = df.apply(lambda x: next(encoder.transform(list(\" \".join(t for t in x[\"body\"])))),\\\n axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: next(encoder.transform(list(\" \".join(t for t in x[\"highlights\"])))),\\\n axis=1)\n '''\n df.loc[:, \"highlights\"] = df.apply(lambda x: pad_sos_eos(x[\"highlights\"], encoder.word_vocab[\"__sos\"], encoder.word_vocab[\"__eos\"]),\\\n axis=1)\n \n save_as_pickle(\"df_encoded.pkl\", df)\n encoder.save(\"./data/vocab.pkl\")\n return df", "def load_data():\n\n # Load data from categories\n comp = fetch_20newsgroups(subset='all', categories=['comp.graphics', 'comp.sys.mac.hardware', 'comp.windows.x'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n science = fetch_20newsgroups(subset='all', categories=['sci.crypt', 'sci.electronics', 'sci.space'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n politics = fetch_20newsgroups(subset='all', categories=['talk.politics.guns', 'talk.politics.mideast'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n religion = fetch_20newsgroups(subset='all', categories=['alt.atheism', 'soc.religion.christian'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n recreation = fetch_20newsgroups(subset='all', categories=['rec.autos', 'rec.sport.baseball', 'rec.sport.hockey'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n\n # Print total number of documents\n data_len = [len(comp.data), len(science.data), len(politics.data), len(recreation.data), len(religion.data)]\n\n # Subsample classes to create a balanced dataset\n sub_k = min(data_len)\n comp.data, comp.target = [list(t) for t in zip(*random.sample(list(zip(comp.data, comp.target)), sub_k))]\n science.data, science.target = [list(t) for t in zip(*random.sample(list(zip(science.data, science.target)), sub_k))]\n politics.data, politics.target = [list(t) for t in zip(*random.sample(list(zip(politics.data, politics.target)), sub_k))]\n religion.data, religion.target = [list(t) for t in zip(*random.sample(list(zip(religion.data, religion.target)), sub_k))]\n recreation.data, recreation.target = [list(t) for t in zip(*random.sample(list(zip(recreation.data, recreation.target)), sub_k))]\n\n # Subcategories labels\n subcat_comp = np.array(comp.target)\n subcat_scien = np.array(science.target) + len(comp.target_names)\n subcat_polit = np.array(politics.target) + len(comp.target_names) + len(science.target_names)\n subcat_rel = np.array(religion.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names)\n subcat_rec = np.array(recreation.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names) + len(religion.target_names)\n\n # Assign labels to train data based on categories\n y_comp = np.ones(len(comp.data))\n y_scien = 2*np.ones(len(science.data))\n y_polit = 3*np.ones(len(politics.data))\n y_rel = 4*np.ones(len(religion.data))\n y_rec = 5*np.ones(len(recreation.data))\n labels = np.concatenate((y_comp,y_scien,y_polit,y_rel,y_rec), axis=None)\n\n # Computers\n train_comp, test_comp, y_train_comp, y_test_comp, subcat_comp_train, subcat_comp_test = train_test_split(comp.data, y_comp, subcat_comp, test_size=0.2, random_state=42)\n train_comp, val_comp, y_train_comp, y_val_comp, subcat_comp_train, subcat_comp_val = train_test_split(train_comp, y_train_comp, subcat_comp_train, test_size=0.25, random_state=42)\n\n # Sciences\n train_scien, test_scien, y_train_scien, y_test_scien, subcat_scien_train, subcat_scien_test = train_test_split(science.data, y_scien, subcat_scien, test_size=0.2, random_state=42)\n train_scien, val_scien, y_train_scien, y_val_scien, subcat_scien_train, subcat_scien_val = train_test_split(train_scien, y_train_scien, subcat_scien_train, test_size=0.25, random_state=42)\n\n # Politics\n train_polit, test_polit, y_train_polit, y_test_polit, subcat_polit_train, subcat_polit_test = train_test_split(politics.data, y_polit, subcat_polit, test_size=0.2, random_state=42)\n train_polit, val_polit, y_train_polit, y_val_polit, subcat_polit_train, subcat_polit_val = train_test_split(train_polit, y_train_polit, subcat_polit_train, test_size=0.25, random_state=42)\n\n # Religion\n train_rel, test_rel, y_train_rel, y_test_rel, subcat_rel_train, subcat_rel_test = train_test_split(religion.data, y_rel, subcat_rel, test_size=0.2, random_state=42)\n train_rel, val_rel, y_train_rel, y_val_rel, subcat_rel_train, subcat_rel_val = train_test_split(train_rel, y_train_rel, subcat_rel_train, test_size=0.25, random_state=42)\n\n # Recreation\n train_rec, test_rec, y_train_rec, y_test_rec, subcat_rec_train, subcat_rec_test = train_test_split(recreation.data, y_rec, subcat_rec, test_size=0.2, random_state=42)\n train_rec, val_rec, y_train_rec, y_val_rec, subcat_rec_train, subcat_rec_val = train_test_split(train_rec, y_train_rec, subcat_rec_train, test_size=0.25, random_state=42)\n\n # Corpus from all categories in train set\n newsgroups_train = train_comp + train_scien + train_polit + train_rel + train_rec\n #print(f\"Total number of documents in all categories in the train set is {len(newsgroups_train)}.\")\n train_labels = np.concatenate((y_train_comp,y_train_scien,y_train_polit,y_train_rel,y_train_rec), axis=None)\n #print(train_labels.shape)\n train_subcat = np.concatenate((subcat_comp_train,subcat_scien_train,subcat_polit_train,subcat_rel_train,subcat_rec_train), axis=None)\n #print(train_subcat.shape)\n\n # Corpus from all categories in test set\n newsgroups_test = test_comp + test_scien + test_polit + test_rel + test_rec\n test_labels = np.concatenate((y_test_comp,y_test_scien,y_test_polit,y_test_rel,y_test_rec), axis=None)\n test_subcat = np.concatenate((subcat_comp_test,subcat_scien_test,subcat_polit_test,subcat_rel_test,subcat_rec_test), axis=None)\n\n # Corpus from all categories in validation set\n newsgroups_val = val_comp + val_scien + val_polit + val_rel + val_rec\n val_labels = np.concatenate((y_val_comp,y_val_scien,y_val_polit,y_val_rel,y_val_rec), axis=None)\n val_subcat = np.concatenate((subcat_comp_val,subcat_scien_val,subcat_polit_val,subcat_rel_val,subcat_rec_val), axis=None)\n\n # Data Split\n total = len(test_labels) + len(val_labels) + len(train_labels)\n\n return newsgroups_train, train_labels, newsgroups_test, test_labels, newsgroups_val, val_labels, train_subcat, test_subcat, val_subcat", "def read_data(self):\n # This matrix has the following shape: num_movies x num_users\n # The values stored in each row i and column j is the rating for\n # movie i by user j\n self.titles, self.ratings = ratings()\n reader = csv.reader(open('data/sentiment.txt', 'rb'))\n self.sentiment = dict(reader)\n\n self.titlesOnly = []\n\n for entry in self.titles:\n titleOnly = entry[0].split(' (')[0]\n self.titlesOnly.append(titleOnly.lower())\n self.sentiment.update({self.p.stem(k): v for k, v in self.sentiment.items()})", "def train(self, dataset = \"Amazon\", top_words=10000):\n assert dataset in datasets, 'Dataset should be in that list ' + str(datasets)\n if dataset == 'Amazon':\n X_train, y_train = load_dataset('dataset/amazonreviews/data', self.nb_lines_amazon)\n else:\n (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)\n raise Exception('Dead code... This should be retest again')", "def importData(dataDir='',dataset= \"ml-100k\"):\r\n ratingData = pd.read_table(dataDir+dataset+\"/u.data\",sep=\"\\t\",header=None)\r\n ratingData.columns=['userID','movieID','rating','timestamp']\r\n ratingData['userID'] = ratingData['userID'].astype(\"category\") #converting into categorical variables\r\n ratingData['movieID'] = ratingData['movieID'].astype('category')\r\n\r\n movieData = pd.read_table(dataDir+dataset+\"/u.item\",sep=\"|\",header=None)\r\n movieData.columns = ['movieID', 'movie title' , 'release date' , 'video release date' ,\r\n 'IMDb URL' , 'unknown' , 'Action' , 'Adventure' , 'Animation' ,\r\n 'Childrens' , 'Comedy' , 'Crime' , 'Documentary' , 'Drama' , 'Fantasy' ,\r\n ' Film-Noir' , 'Horror' , 'Musical' , 'Mystery' , 'Romance' , 'Sci-Fi' ,\r\n 'Thriller' , 'War' , 'Western']\r\n \r\n userData = pd.read_table(dataDir+dataset+\"/u.user\",sep=\"|\",header=None)\r\n userData.columns=['userID','age','gender','occupation','zipcode']\r\n \r\n return (ratingData,movieData,userData)", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def import_data(filename):\r\n regex = re.compile(\"\"\"\"(?P<show_name>.*?)\"\\s+\\((?P<year>\\d+)(?:|/.*?)\\)\\s+\\{(?P<episode_name>.*?)\\s?\\(\\#(?P<season_no>\\d+)\\.(?P<episode_no>\\d+)\\)\\}\"\"\")\r\n\r\n with codecs.open(filename, \"r\", \"latin-1\") as ratings:\r\n # Generate all the lines that matched.\r\n matches = (match for match in (regex.search(line.strip()) for line in ratings) if match)\r\n counter = 0\r\n for match in matches:\r\n counter += 1\r\n if not counter % 100:\r\n print counter\r\n episode = {}\r\n for field in [\"show_name\", \"year\", \"episode_name\", \"episode_no\", \"season_no\"]:\r\n episode[field] = match.group(field)\r\n\r\n # If the episode has no name it is given the same name as on imdb.com for consistency.\r\n if not episode[\"episode_name\"]:\r\n episode[\"episode_name\"] = \"Episode #%s.%s\" % (episode[\"season_no\"], episode[\"episode_no\"])\r\n\r\n try:\r\n show = session.query(Show).filter_by(name=episode[\"show_name\"], year=episode[\"year\"]).one()\r\n except sqlalchemy.orm.exc.NoResultFound:\r\n show = Show(episode[\"show_name\"], episode[\"year\"])\r\n session.add(show)\r\n\r\n try:\r\n episode = session.query(Episode).filter_by(name=episode[\"episode_name\"], show=show).one()\r\n except sqlalchemy.orm.exc.NoResultFound:\r\n episode = Episode(show, episode[\"episode_name\"], episode[\"season_no\"], episode[\"episode_no\"])\r\n session.add(episode)\r\n\r\n #session.commit()\r", "def load_dataset(fname, nb_lines):\n import os.path\n if os.path.isfile('safe/Amazon-'+str(nb_lines)+'.p'):\n return util.load('safe/Amazon-'+str(nb_lines)+'.p')\n count = 1\n X = []\n y = []\n with open(fname) as f:\n for line in f:\n text, label = read_line(line)\n #print((label, text))\n X.append(text)\n y.append(label)\n if count >= nb_lines:\n break\n count+=1\n\n #load pretrained dictonary\n dico = util.load('safe/vocab_gensim.p')\n preprocessor = text_preprocessing.Preprocessor(dico=dico)\n X = preprocessor.preprocess(X)\n #save the loaded dataset in a pickle for speeding up next run\n util.save((X,y), 'safe/Amazon-'+str(nb_lines)+'.p')\n return (X, y)", "def loadData(catalog):\n loadVideos(catalog)", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)\n loadAdquires(catalog)\n loadNacionalities(catalog)\n load2DArtworks(catalog)\n loadArtistMediumsTags(catalog)\n loadDptments(catalog)\n catalog['artists'] = sortArtists(catalog, 3)\n fillArtistMediums(catalog)\n fillMostUsedMediums(catalog)\n catalog['artists_tags'] = sortArtistTags(catalog, 3)\n sort_dptments(catalog)", "def load_data():\r\n #读取User数据\r\n users_title = ['UserID', 'Gender', 'Age', 'JobID', 'Zip-code']\r\n users = pd.read_csv('./ml-1m/users.dat', sep='::', header=None, names=users_title, engine = 'python')\r\n users = users.filter(regex='UserID|Gender|Age|JobID')\r\n users_orig = users.values\r\n #改变User数据中性别和年龄\r\n gender_map = {'F':0, 'M':1}\r\n users['Gender'] = users['Gender'].map(gender_map)\r\n\r\n age_map = {val:ii for ii,val in enumerate(set(users['Age']))}\r\n users['Age'] = users['Age'].map(age_map)\r\n\r\n #读取Movie数据集\r\n movies_title = ['MovieID', 'Title', 'Genres']\r\n movies = pd.read_csv('./ml-1m/movies.dat', sep='::', header=None, names=movies_title, engine = 'python')\r\n movies_orig = movies.values\r\n #将Title中的年份去掉\r\n pattern = re.compile(r'^(.*)\\((\\d+)\\)$')\r\n\r\n title_map = {val:pattern.match(val).group(1) for ii,val in enumerate(set(movies['Title']))}\r\n movies['Title'] = movies['Title'].map(title_map)\r\n\r\n #电影类型转数字字典\r\n genres_set = set()\r\n for val in movies['Genres'].str.split('|'):\r\n genres_set.update(val)\r\n\r\n genres_set.add('<PAD>')\r\n genres2int = {val:ii for ii, val in enumerate(genres_set)}\r\n\r\n #将电影类型转成等长数字列表,长度是18\r\n genres_map = {val:[genres2int[row] for row in val.split('|')] for ii,val in enumerate(set(movies['Genres']))}\r\n\r\n for key in genres_map:\r\n for cnt in range(max(genres2int.values()) - len(genres_map[key])):\r\n genres_map[key].insert(len(genres_map[key]) + cnt,genres2int['<PAD>'])\r\n \r\n movies['Genres'] = movies['Genres'].map(genres_map)\r\n\r\n #电影Title转数字字典\r\n title_set = set()\r\n for val in movies['Title'].str.split():\r\n title_set.update(val)\r\n \r\n title_set.add('<PAD>')\r\n title2int = {val:ii for ii, val in enumerate(title_set)}\r\n\r\n #将电影Title转成等长数字列表,长度是15\r\n title_count = 15\r\n title_map = {val:[title2int[row] for row in val.split()] for ii,val in enumerate(set(movies['Title']))}\r\n \r\n for key in title_map:\r\n for cnt in range(title_count - len(title_map[key])):\r\n title_map[key].insert(len(title_map[key]) + cnt,title2int['<PAD>'])\r\n \r\n movies['Title'] = movies['Title'].map(title_map)\r\n\r\n #读取评分数据集\r\n ratings_title = ['UserID','MovieID', 'ratings', 'timestamps']\r\n ratings = pd.read_csv('./ml-1m/ratings.dat', sep='::', header=None, names=ratings_title, engine = 'python')\r\n ratings = ratings.filter(regex='UserID|MovieID|ratings')\r\n\r\n #合并三个表\r\n data = pd.merge(pd.merge(ratings, users), movies)\r\n \r\n #将数据分成X和y两张表\r\n target_fields = ['ratings']\r\n features_pd, targets_pd = data.drop(target_fields, axis=1), data[target_fields]\r\n \r\n features = features_pd.values\r\n targets_values = targets_pd.values\r\n \r\n return title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig", "def download_imdb_ratings():\n print(\"Option not implemented\")\n sys.exit(1)\n return {}", "def loadData(catalog):\n loadVideos(catalog)\n loadCategories(catalog)", "def _get_omdb_data(self):\n url = \"http://www.omdbapi.com/?i=\" + self.imdb_id + \"&plot=short&r=json\"\n try:\n json_data = urllib2.urlopen(url).read()\n except urllib2.HTTPError as e:\n print('The server couldn\\'t fulfill the request.')\n print 'Error code:', e.code\n exit()\n except urllib2.URLError as e:\n print('We failed to reach a server.')\n print 'Reason:', e.reason\n exit()\n else:\n data = json.loads(json_data)\n self._omdb_data[\"title\"] = data[\"Title\"].encode('utf-8', 'ignore') # encode to prevent encoding errors\n self._omdb_data[\"storyline\"] = data[\"Plot\"].encode('utf-8', 'ignore')\n self._omdb_data[\"poster_image_url\"] = data[\"Poster\"].encode('utf-8', 'ignore')\n self._omdb_data[\"age_rating\"] = data[\"Rated\"].encode('utf-8', 'ignore')\n self._omdb_data[\"imdb_rating\"] = float(data[\"imdbRating\"])\n self._omdb_data[\"genre\"] = data[\"Genre\"].encode('utf-8', 'ignore')\n self._omdb_data[\"directors\"] = data[\"Director\"].encode('utf-8', 'ignore').split(\", \")\n self._omdb_data[\"actors\"] = data[\"Actors\"].encode('utf-8', 'ignore').split(\", \")\n self._omdb_data[\"awards\"] = data[\"Awards\"].encode('utf-8', 'ignore')\n self._omdb_data[\"release_date\"] = data[\"Released\"].encode('utf-8', 'ignore')", "def train(self):\n lFileList = []\n for fFileObj in os.walk(\"movies_reviews/\"):\n lFileList = fFileObj[2]\n break\n for rev in lFileList:\n if int(rev[7])== 1:\n contents = self.loadFile(\"movies_reviews/\" + rev)\n listOfWords = self.tokenize(contents)\n for word in listOfWords:\n self.negRev[word] = self.negRev.get(word, 0) + 1\n if int(rev[7])== 5:\n contents = self.loadFile(\"movies_reviews/\" + rev)\n listOfWords = self.tokenize(contents)\n for word in listOfWords:\n self.posRev[word] = self.posRev.get(word, 0) + 1\n self.save(self.posRev, \"posRev\")\n self.save(self.negRev, \"negRev\")", "def load_data(file_name=None):\n pos_review = pickle.load(open(file_name + 'pos_review.pkl', 'r'))\n neg_review = pickle.load(open(file_name + 'neg_review.pkl', 'r'))\n\n return pos_review, neg_review", "def load_reviews_by_id_files(id_files, load_polarities=False, load_sentences=False, load_words=False, load_deptrees=False):\n from loacore.conf import DB_TIMEOUT\n reviews = []\n conn = sql.connect(DB_PATH, timeout=DB_TIMEOUT)\n c = conn.cursor()\n\n for id_file in id_files:\n c.execute(\"SELECT ID_Review, ID_File, File_Index, Review \"\n \"FROM Review WHERE ID_File = \" + str(id_file) + \" ORDER BY File_Index\")\n results = c.fetchall()\n for result in results:\n reviews.append(Review(result[0], result[1], result[2], result[3]))\n\n conn.close()\n\n if load_polarities:\n # Load Polarities\n import loacore.load.polarity_load as polarity_load\n polarity_load.load_polarities_in_reviews(reviews)\n\n if load_sentences:\n # Load Sentences\n import loacore.load.sentence_load as sentence_load\n sentence_load.load_sentences_in_reviews(reviews, load_words=load_words, load_deptrees=load_deptrees)\n\n return reviews", "def imdb_load_file(file_name):\n imdb_saved = open(file_name)\n imdb_save = json.loads(imdb_saved.read())\n Movie.__movies.append(\n Movie(\n imdb_save['title'],\n imdb_save['description'],\n imdb_save['image'],\n \"https://www.youtube.com/watch?v=\" + imdb_save['youtube_id'],\n imdb_save['genres'],\n imdb_save['released'] \n )\n )", "def imdb_dataset(directory='data/',\n train=False,\n test=False,\n train_directory='train',\n test_directory='test',\n extracted_name='aclImdb',\n check_files=['aclImdb/README'],\n url='http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz',\n sentiments=['pos', 'neg']):\n download_file_maybe_extract(url=url, directory=directory, check_files=check_files)\n\n ret = []\n splits = [\n dir_ for (requested, dir_) in [(train, train_directory), (test, test_directory)]\n if requested\n ]\n for split_directory in splits:\n full_path = os.path.join(directory, extracted_name, split_directory)\n examples = []\n for sentiment in sentiments:\n for filename in glob.iglob(os.path.join(full_path, sentiment, '*.txt')):\n with open(filename, 'r', encoding=\"utf-8\") as f:\n text = f.readline()\n examples.append({\n 'text': text,\n 'sentiment': sentiment,\n })\n ret.append(examples)\n\n if len(ret) == 1:\n return ret[0]\n else:\n return tuple(ret)", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def __init__(self):\n try:\n negative_ids = movie_reviews.fileids('neg')\n positive_ids = movie_reviews.fileids('pos')\n except LookupError:\n import nltk\n nltk.download('movie_reviews')\n negative_ids = movie_reviews.fileids('neg')\n positive_ids = movie_reviews.fileids('pos')\n\n \"\"\" \n Separate positive features from negative\n \"\"\"\n negative_features = [(extract(movie_reviews.words(fileids=[f])), 'neg') for f in negative_ids]\n positive_features = [(extract(movie_reviews.words(fileids=[f])), 'pos') for f in positive_ids]\n\n \"\"\" \n Trains of 3/4 off the database\n and test off 1/4\n \"\"\"\n negative_cutoff = int(len(negative_features) * 3 / 4)\n positive_cutoff = int(len(positive_features) * 3 / 4)\n\n train_features = negative_features[:negative_cutoff] + positive_features[:positive_cutoff]\n test_features = negative_features[negative_cutoff:] + positive_features[positive_cutoff:]\n\n print('Training on %d instances, testing on %d instances' % (len(train_features), len(test_features)))\n self.classifier = NaiveBayesClassifier.train(train_features)\n print('Training complete')\n #print('accuracy:', nltk.classify.util.accuracy(self.classifier, test_features))\n #self.classifier.show_most_informative_features()\n\n \"\"\" Save classifier \"\"\"\n f = open('classifier.pickle', 'wb')\n pickle.dump(self.classifier, f)\n f.close()", "def read_data(self):\n # This matrix has the following shape: num_movies x num_users\n # The values stored in each row i and column j is the rating for\n # movie i by user j\n self.titles, self.ratings = ratings()\n self.base_rating = 3.0\n self.binarize()\n reader = csv.reader(open('data/sentiment.txt', 'rb'))\n sentiment = dict(reader)\n self.sentiment = {}\n # added stemming for sentiment keywords\n for key, val in sentiment.items():\n stemmed_key = self.stemmer.stem(key)\n self.sentiment[stemmed_key] = val", "def load_blogpan(path, subset=\"all\", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False, percent=.5):\n PAN13_HOME = \"C:/Users/mramire8/Documents/Datasets/textclassification/raw data/author-profiling-gender/gender-profiling/blogs/blogs\"\n\n data = bunch.Bunch()\n\n if subset in ('train', 'test'):\n # data[subset] = load_files(\"{0}/{1}\".format(AVI_HOME, subset), encoding=\"latin1\", load_content=True,\n # random_state=rnd)\n raise Exception(\"We are not ready for train test aviation data yet\")\n elif subset == \"all\":\n data = load_files(PAN13_HOME, encoding=\"latin1\", load_content=True,\n random_state=rnd)\n # parser = XMLParser(encoding=\"latin-1\", recover=True)\n parser = etree.XMLParser(recover=True)\n data.data = [clean_xml_pan(text, parser=parser) for text in data.data]\n # data[\"test\"] = load_files(\"{0}/{1}\".format(AVI_HOME, \"test\"), encoding=\"latin1\", load_content=True,\n # random_state=rnd)\n else:\n raise ValueError(\n \"subset can only be 'train', 'test' or 'all', got '%s'\" % subset)\n\n # train_x, test_x, train_y, test_y = train_test_split(data.data, data.target, test_size=0.25,\n # random_state=rnd)\n\n indices = ShuffleSplit(len(data.data), n_iter=1, test_size=percent, random_state=rnd)\n for train_ind, test_ind in indices:\n\n data = bunch.Bunch(train=bunch.Bunch(data=[data.data[i] for i in train_ind], target=data.target[train_ind]),\n test=bunch.Bunch(data=[data.data[i] for i in test_ind], target=data.target[test_ind]))\n # if shuffle:\n # random_state = np.random.RandomState(rnd)\n # indices = np.arange(data.train.target.shape[0])\n # random_state.shuffle(indices)\n # data.train.filenames = data.train.filenames[indices]\n # data.train.target = data.train.target[indices]\n # # Use an object array to shuffle: avoids memory copy\n # data_lst = np.array(data.train.data, dtype=object)\n # data_lst = data_lst[indices]\n # data.train.data = data_lst.tolist()\n\n if not raw:\n data = process_data(data, fix_k, min_size, vct)\n\n return data", "def load_data():\n\n if 'data' not in os.listdir('.'):\n os.mkdir('data') \n \n if 'id_to_word.pkl' not in os.listdir('data'):\n print('Loading data...')\n (x_train, y_train), (x_val, y_val) = imdb.load_data(num_words=max_features, skip_top=20, index_from=3)\n word_to_id = imdb.get_word_index()\n word_to_id ={k:(v+3) for k,v in word_to_id.items()}\n word_to_id[\"<PAD>\"] = 0\n word_to_id[\"<START>\"] = 1\n word_to_id[\"<UNK>\"] = 2\n id_to_word = {value:key for key,value in word_to_id.items()}\n\n print(len(x_train), 'train sequences')\n print(len(x_val), 'test sequences')\n\n print('Pad sequences (samples x time)')\n x_train = sequence.pad_sequences(x_train, maxlen=maxlen)\n x_val = sequence.pad_sequences(x_val, maxlen=maxlen)\n y_train = np.eye(2)[y_train]\n y_val = np.eye(2)[y_val] \n\n np.save('./data/x_train.npy', x_train)\n np.save('./data/y_train.npy', y_train)\n np.save('./data/x_val.npy', x_val)\n np.save('./data/y_val.npy', y_val)\n with open('data/id_to_word.pkl','wb') as f:\n pickle.dump(id_to_word, f) \n\n else:\n x_train, y_train, x_val, y_val = np.load('data/x_train.npy'),np.load('data/y_train.npy'),np.load('data/x_val.npy'),np.load('data/y_val.npy')\n with open('data/id_to_word.pkl','rb') as f:\n id_to_word = pickle.load(f)\n\n return x_train, y_train, x_val, y_val, id_to_word", "def load_data(self):", "def loadData(dataset_path):\n with open(dataset_path, 'rb') as handle:\n data = pickle.load(handle)\n word2id = data['word2id']\n id2word = data['id2word']\n training_data = data['trainingSamples']\n return word2id, id2word, training_data", "def create_dataset(base_path, batch_size, is_train):\n columns_list = [\"feature\", \"label\"]\n num_consumer = 4\n\n if is_train:\n path = os.path.join(base_path, 'aclImdb_train.mindrecord0')\n else:\n path = os.path.join(base_path, 'aclImdb_test.mindrecord0')\n\n data_set = ds.MindDataset(path, columns_list, num_consumer)\n ds.config.set_seed(0)\n data_set = data_set.shuffle(buffer_size=data_set.get_dataset_size())\n data_set = data_set.batch(batch_size, drop_remainder=True)\n return data_set", "def __init__(self):\n self.movie_reviews = []", "def load_train_data():\n # X has dim (USER_COUNT x ITEM_COUNT)\n USER_COUNT = 10000\n ITEM_COUNT = 1000\n\n ratings = load_ratings_from_file_path(get_train_file_path())\n\n X = np.zeros([USER_COUNT, ITEM_COUNT], dtype=np.float32)\n for (row, col, rating) in ratings:\n X[row, col] = rating\n return X", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def load_rating_all(self) -> pd.DataFrame:\n with BytesIO(self.zf.read(\"ml-1m/ratings.dat\")) as ifs:\n import pandas as pd\n\n df = pd.read_csv(\n ifs,\n sep=\"\\:\\:\",\n header=None,\n names=[\"user_id\", \"movie_id\", \"rating\", \"timestamp\"],\n engine=\"python\",\n )\n df[\"timestamp\"] = pd.to_datetime(df.timestamp, unit=\"s\")\n return df", "def load_ratings(self):\n logging.debug(\"Loading ratings data...\")\n\n # loading ratings\n data=requests.get(self.__URL_RATINGS)\n self.__dataframe_ratings=pd.DataFrame(data.json())\n # calculate implicit and explicit ratings\n # XXX use a function to calculate implicit rating considering the video lead time\n self.__dataframe_ratings['rating_implicit'] = (self.__dataframe_ratings['video_watch_time']/100) * 0.3\n self.__dataframe_ratings['rating_explicit'] = (self.__dataframe_ratings['rating_value']) * 0.7\n\n # create a new column to put implicit or explicit rating value\n self.__dataframe_ratings['overall_rating_value'] = self.__dataframe_ratings['rating_implicit'] + self.__dataframe_ratings['rating_explicit']\n\n logging.debug(\"Ratings data loaded! n=%s\" % self.__dataframe_ratings.shape[0])\n\n return self.__dataframe_ratings", "def load_data(self) -> None:", "def loadData(catalog):\n\n loadArtwork(catalog)\n loadArtists(catalog)", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)", "def load_imbd_dataset(path=\"imdb.pkl\", nb_words=None, skip_top=0,\n maxlen=None, test_split=0.2, seed=113,\n start_char=1, oov_char=2, index_from=3):\n from six.moves import cPickle\n import gzip\n # from ..utils.data_utils import get_file\n from six.moves import zip\n import numpy as np\n from six.moves import urllib\n\n url = 'https://s3.amazonaws.com/text-datasets/'\n def download_imbd(filename):\n if not os.path.exists(filename):\n print('Downloading ...')\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n return filename\n\n filename = download_imbd(path)\n # path = get_file(path, origin=\"https://s3.amazonaws.com/text-datasets/imdb.pkl\")\n\n if filename.endswith(\".gz\"):\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'rb')\n\n X, labels = cPickle.load(f)\n f.close()\n\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(labels)\n\n if start_char is not None:\n X = [[start_char] + [w + index_from for w in x] for x in X]\n elif index_from:\n X = [[w + index_from for w in x] for x in X]\n\n if maxlen:\n new_X = []\n new_labels = []\n for x, y in zip(X, labels):\n if len(x) < maxlen:\n new_X.append(x)\n new_labels.append(y)\n X = new_X\n labels = new_labels\n if not X:\n raise Exception('After filtering for sequences shorter than maxlen=' +\n str(maxlen) + ', no sequence was kept. '\n 'Increase maxlen.')\n if not nb_words:\n nb_words = max([max(x) for x in X])\n\n # by convention, use 2 as OOV word\n # reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)\n if oov_char is not None:\n X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]\n else:\n nX = []\n for x in X:\n nx = []\n for w in x:\n if (w >= nb_words or w < skip_top):\n nx.append(w)\n nX.append(nx)\n X = nX\n\n X_train = np.array(X[:int(len(X) * (1 - test_split))])\n y_train = np.array(labels[:int(len(X) * (1 - test_split))])\n\n X_test = np.array(X[int(len(X) * (1 - test_split)):])\n y_test = np.array(labels[int(len(X) * (1 - test_split)):])\n\n return X_train, y_train, X_test, y_test", "def popularity_dataset(data_dir):\n def generator_fn():\n \"\"\"\n Generate tagger data points.\n \"\"\"\n for video_id, timestamp, metadata in data_dir.shuffled_thumbnails():\n total_votes = metadata['votes_up'] + metadata['votes_down']\n if total_votes == 0:\n like_frac = 0.5\n else:\n like_frac = metadata['votes_up'] / total_votes\n yield (video_id, timestamp), like_frac, metadata['views']\n dataset = tf.data.Dataset.from_generator(generator_fn, (tf.int32, tf.float32, tf.float32),\n output_shapes=((2,), (), ()))\n return dataset.map(_thumbnail_reader(data_dir))", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_test_data():\n # X has dim (USER_COUNT x ITEM_COUNT)\n USER_COUNT = 10000\n ITEM_COUNT = 1000\n\n ratings = load_ratings_from_file_path(get_test_file_path())\n\n X = np.zeros([USER_COUNT, ITEM_COUNT])\n for (row, col, rating) in ratings:\n X[row, col] = rating\n return X", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def load_radiography_data():\n # Load all Covid Images\n images = []\n labels = []\n for filename in os.listdir(\n os.path.join(\"COVID-19 Radiography Database\", \"COVID-19\")):\n img = cv2.imread(\n os.path.join(\"COVID-19 Radiography Database\", \"COVID-19\", filename), cv2.IMREAD_GRAYSCALE)\n if img is not None:\n images.append(img)\n labels.append(\"covid\")\n\n count_covid_images = len(images)\n\n # Load all Normal (non-covid) Images\n for filename in os.listdir(\n os.path.join(\"COVID-19 Radiography Database\", \"NORMAL\")):\n img = cv2.imread(\n os.path.join(\"COVID-19 Radiography Database\", \"NORMAL\", filename), cv2.IMREAD_GRAYSCALE)\n if img is not None and count_covid_images > 0:\n images.append(img)\n labels.append(\"normal\")\n count_covid_images = count_covid_images - 1\n\n X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.25, shuffle=True)\n return (np.array(X_train), np.array(y_train)), (np.array(X_test), np.array(y_test))", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary", "def load_data(database_filepath):\n\n #Load Messages Dataset\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table('DisasterResponse', engine)\n\n # split into features and target\n X = df[\"message\"]\n \n # Making DataFrame with relevant categories\n Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)\n Y['related']= Y['related'].map(lambda x: 1 if x == 2 else x)\n category_names = Y.columns\n print(Y.shape)\n\n return X, Y, category_names", "def get_rating_data(rating_file='ml-1m/ratings.dat', max_rows=1e6):\n userID_ls = []\n movieID_ls = []\n rating_ls = []\n\n for i, line in enumerate(ZIPFILE.open(rating_file).readlines()):\n if i >= max_rows:\n break\n try:\n x = line.decode('utf-8').split('::')\n except Exception:\n continue\n userID_ls.append(int(x[0]))\n movieID_ls.append(int(x[1]))\n rating_ls.append(int(x[2]))\n\n rating_dict = {'userID': np.array(userID_ls),\n 'movieID': np.array(movieID_ls),\n 'rating': np.array(rating_ls)}\n\n return pd.DataFrame(rating_dict)", "def get_df(path):\n i = 0\n df = {}\n for dict_item in parse(path):\n if i < 2000:\n df[i] = dict_item\n i += 1\n else:\n break\n # generate a DataFrame that has reviews and ratings\n desired = pd.DataFrame.from_dict(df, orient='columns').T # transform the matrix since the data is \"sideways\" in the gzip\n desired = desired.drop(['asin', 'helpful', 'reviewTime', 'reviewerID', 'reviewerName', 'summary', 'unixReviewTime'], axis=1) # strip unused data\n return desired", "def load_examples_data(dataset_name):\n dataset_name = dataset_name.strip().lower()\n if dataset_name.lower() not in ['pokemon', 'hanzi', 'animals', 'nsfw', 'simpsons', 'horse2zebra', 'people',\n 'autodrive', 'superresolution', 'anpr', 'beauty','antisproofing','facelandmarks','dogs-vs-cats','chinese']:\n raise ValueError('Not a valid dataset_name.')\n dataset_name = 'examples_' + dataset_name\n dirname = os.path.join(_trident_dir, dataset_name)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n is_internet_ok = is_connected()\n if dataset_name == 'examples_pokemon':\n is_download=download_file_from_google_drive('1U-xc54fX9j9BcidvRa0ow6qjssMlSF2A', dirname, 'pokemon.tar')\n tar_file_path = os.path.join(dirname, 'pokemon.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n extract_path = os.path.join(dirname, 'pokemon')\n dataset = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n print('get pokemon images :{0}'.format(len(dataset)))\n return dataset\n\n\n elif dataset_name == 'examples_hanzi':\n download_file_from_google_drive('13UEzSG0az113gpRPKPyKrIE2HDaA2P4H', dirname, 'hanzi.tar')\n tar_file_path = os.path.join(dirname, 'hanzi.tar')\n extract_path = os.path.join(dirname, 'hanzi')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, os.path.join(dirname, 'train'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset_test = load_folder_images(dataset_name, os.path.join(dirname, 'test'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset.testdata = dataset_test.traindata\n dataset.class_names['zh-cn'] = dataset.class_names['en-us']\n return dataset\n\n elif dataset_name == 'examples_animals':\n download_file_from_google_drive('19Cjq8OO6qd9k9TMZxlPjDpejDOdiHJoW', dirname, 'animals.tar')\n tar_file_path = os.path.join(dirname, 'animals.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, dirname, folder_as_label=True)\n return dataset\n elif dataset_name == 'examples_nsfw':\n tar_file_path = os.path.join(dirname, 'nsfw.tar')\n if os.path.exists(tar_file_path) and get_file_create_time(tar_file_path)<datetime.datetime(2021, 2, 20, 0, 0, 0).timestamp():\n os.remove(tar_file_path)\n if os.path.exists(os.path.join(dirname,'porn_detection_data.pkl')):\n os.remove(os.path.join(dirname,'porn_detection_data.pkl'))\n _delete_h(dirname)\n download_file_from_google_drive('1EXpV2QUrSFJ7zJn8NqtqFl1k6HvXsUzp', dirname, 'nsfw.tar')\n\n extract_path = os.path.join(dirname, 'nsfw')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n folders = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']\n data=unpickle(os.path.join(dirname,'porn_detection_data.pkl'))\n\n trainData = []\n testData = []\n trainLabel = []\n testLabel = []\n for n in range(5):\n folder=folders[n]\n trainData.extend(data[folder]['train'])\n trainLabel.extend([n]*len(data[folder]['train']))\n testData.extend(data[folder]['test'])\n testLabel.extend([n] * len(data[folder]['test']))\n\n trainarray = ImageDataset(trainData,object_type=ObjectType.rgb)\n trainlabel = LabelDataset(trainLabel,object_type=ObjectType.classification_label)\n train_iter = Iterator(data=trainarray, label=trainlabel)\n\n testarray = ImageDataset(testData,object_type=ObjectType.rgb)\n testlabel = LabelDataset(testLabel,object_type=ObjectType.classification_label)\n test_iter = Iterator(data=testarray, label=testlabel)\n print('training images: {0} test images:{1}'.format(len(trainarray), len(testarray)))\n\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n dataset.binding_class_names(['drawing', 'hentai', 'neutral', 'porn', 'sexy'], 'en-us')\n dataset.binding_class_names(['绘画', '色情漫画', '中性', '色情', '性感'], 'zh-cn')\n dataset.binding_class_names(['繪畫', '色情漫畫', '中性', '色情', '性感'], 'zh-tw')\n dataset.scenario = 'train'\n return dataset\n elif dataset_name == 'examples_simpsons':\n download_file_from_google_drive('1hGNFbfBv3EZ4nx4Qod6PtSYzO8H4QIxC', dirname, 'simpsons.tar')\n tar_file_path = os.path.join(dirname, 'simpsons.tar')\n extract_path = os.path.join(dirname, 'simpsons')\n extract_archive(tar_file_path, extract_path, archive_format='tar')\n data_provider = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n data_provider.traindata.unpair = RandomNoiseDataset(shape=(100), random_mode='normal')\n print('get simpsons images :{0}'.format(len(data_provider.traindata.data.items)))\n return data_provider\n elif dataset_name == 'examples_horse2zebra':\n download_file_from_google_drive('1pqj-T90Vh4wVNBV09kYZWgVPsZUA2f7U', dirname, 'horse2zebra.tar')\n tar_file_path = os.path.join(dirname, 'horse2zebra.tar')\n extract_path = os.path.join(dirname, 'horse2zebra')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n trainA = ImageDataset(list_images(os.path.join(dirname, 'trainA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n trainB = ImageDataset(list_images(os.path.join(dirname, 'trainB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testA = ImageDataset(list_images(os.path.join(dirname, 'testA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testB = ImageDataset(list_images(os.path.join(dirname, 'testB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n train_iter = Iterator(data=trainA, unpair=trainB)\n test_iter = Iterator(data=testA, unpair=testB)\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n print('get horse2zebra images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_people':\n download_file_from_google_drive('1H7mJJfWpmXpRxurMZQqY4N_UXWLbQ2pT', dirname, 'people.tar')\n tar_file_path = os.path.join(dirname, 'people.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'imgs', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs=list(sorted(imgs))\n masks = list(sorted(masks))\n # make_dir_if_need(os.path.join(dirname, 'trimap'))\n # for i in range(len(masks)):\n # mask=mask2array(masks[i])\n # trimap=mask2trimap(mask)\n # save_mask(trimap,masks[i].replace('masks','trimap'))\n # print('trimap',len(masks))\n\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb)\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.binary_mask)\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get people images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_autodrive':\n download_file_from_google_drive('1JqPPeHqhWLqnI6bD8nuHcVx-Y56oIZMK', dirname, 'autodrive.tar')\n tar_file_path = os.path.join(dirname, 'autodrive.tar')\n extract_path = os.path.join(dirname, 'autodrive')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'images', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs = list(sorted(imgs))\n masks = list(sorted(masks))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb,symbol='image')\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.color_mask,symbol='mask')\n\n def parse_code(l):\n if len(l.strip().split(\"\\t\")) == 2:\n a, b = l.replace('\\t\\t', '\\t').strip().split(\"\\t\")\n return tuple(int(i) for i in b.split(' ')), a\n\n label_codes, label_names = zip(\n *[parse_code(l) for l in open(os.path.join(dirname, \"label_colors.txt\")).readlines()])\n for i in range(len(label_codes)):\n mskdata.palette[label_names[i]] = label_codes[i]\n\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get autodrive images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_superresolution':\n download_file_from_google_drive('1v1uoymrWI_MLSiGvSGW7tWJYSnzzXpEQ', dirname, 'superresolution.tar')\n tar_file_path = os.path.join(dirname, 'superresolution.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs.extend(glob.glob(os.path.join(dirname, '*.bmp')))\n imgs = list(sorted(imgs))\n\n print('get super resolution images :{0}'.format(len(imgs)))\n\n imgdata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='lr')\n labeldata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='hr')\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=labeldata))\n return dataset\n elif dataset_name == 'examples_beauty':\n download_file_from_google_drive('1aJhxN9IqsxuayhRTm-gmxk6PiLe5wm9X', dirname, 'beauty.tar')\n tar_file_path = os.path.join(dirname, 'beauty.tar')\n\n extract_archive(tar_file_path, dirname, archive_format='tar')\n # 讀取圖片數據\n images_dict = {}\n with open(os.path.join(dirname, 'images_dict.pkl'), 'rb') as fp:\n images_dict = pickle.load(fp)\n\n f = open(os.path.join(dirname, 'All_Ratings.txt'), encoding='utf-8-sig').readlines()\n imgs = []\n landmarks = []\n ratings = []\n for row in f:\n data = row.strip().split('\\t')\n if 'images\\\\' + data[0] in images_dict:\n img = images_dict['images\\\\' + data[0]][0]\n img = img.transpose([2, 0, 1])[::-1].transpose([1, 2, 0])\n imgs.append(img)\n landmark = images_dict['images\\\\' + data[0]][1].astype(np.float32)\n landmarks.append(landmark)\n rating = (float(data[1])) / 5.00\n ratings.append(rating)\n print('{0} faces loaded...'.format(len(imgs)))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb, symbol='faces')\n landmarkdata = LandmarkDataset(landmarks=landmarks, object_type=ObjectType.landmarks, symbol='target_landmarks')\n labeldata = LabelDataset(data=ratings,object_type=ObjectType.classification_label, symbol='target_beauty')\n data_provider = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=Dataset.zip(landmarkdata,labeldata)))\n return data_provider\n\n elif dataset_name == 'examples_facelandmarks':\n download_file_from_google_drive('1GtswQBAHPa_bXaB4tW2uOOQ8Lxfz2L5B', dirname, 'ibug_300W.tar')\n tar_file_path = os.path.join(dirname, 'ibug_300W.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n root_dir=os.path.join(dirname, 'ibug_300W_large_face_landmark_dataset')\n image_paths = {}\n landmarks = {}\n crops = {}\n\n for mode in ['train','test']:\n make_dir_if_need(os.path.join(dirname, 'crops',mode))\n tree = ElementTree.parse(os.path.join(root_dir, 'labels_ibug_300W_{0}.xml'.format(mode)))\n root = tree.getroot()\n image_paths[mode]=[]\n landmarks[mode] = []\n crops[mode] = []\n\n offset=5\n for j in tqdm(range(len(root[2]))):\n try:\n filename=root[2][j]\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmark=np.asarray(landmark)\n\n crop = filename[0].attrib\n for k in crop.keys():\n crop[k] = int(crop[k]) if isinstance(crop[k], str) else crop[k]\n for k in crop.keys():\n if k=='top' and int(landmark[:,1].min())<int(crop[k]):\n crop[k] = int( landmark[:,1].min())\n crop[ 'height']+=crop[k]-int(landmark[:,1].min())\n elif k=='left' and int(landmark[:,0].min())<int(crop[k]):\n crop[k] = int( landmark[:,0].min())\n crop['width']+= crop[k] - int(landmark[:, 0].min())\n elif k == 'width' and int(landmark[:, 0].max()-landmark[:, 0].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 0].max()-landmark[:, 0].min())\n elif k == 'height' and int(landmark[:, 1].max()-landmark[:, 1].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 1].max()-landmark[:, 1].min())\n\n crop['left']-=offset\n crop['top'] -= offset\n crop['width'] += 2*offset\n crop['height'] += 2*offset\n\n\n landmark[:,0]-=crop['left']\n landmark[:, 1] -= crop['top']\n\n\n if not os.path.exists(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j))):\n im=image2array(os.path.join(root_dir, filename.attrib['file']))\n if im.ndim==2:\n im=cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)\n im=im[crop['top']:min(crop['top']+crop['height'],im.shape[0]),crop['left']:min(crop['left']+crop['width'],im.shape[1]),:]\n\n if max(im.shape[:2])/max(min(im.shape[:2]),0)<=5:\n\n array2image(im).save(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n del im\n else:\n #im = image2array(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n\n if j%100==0:\n gc.collect()\n except Exception as e:\n pass\n\n print('ibug 300w train dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['train']),len(landmarks['train'])))\n print('ibug 300w test dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['test']), len(landmarks['test'])))\n imdata=ImageDataset(images=image_paths['train'],symbol='faces',object_type=ObjectType.rgb)\n landmarkdata = LandmarkDataset(landmarks=landmarks['train'], symbol='landmarks',object_type=ObjectType.landmarks)\n imtestdata = ImageDataset(images=image_paths['test'], symbol='faces',object_type=ObjectType.rgb)\n landmarktestdata = LandmarkDataset(landmarks=landmarks['test'], symbol='landmarks',object_type=ObjectType.landmarks)\n data_provider=DataProvider(traindata=Iterator(data=imdata,label=landmarkdata),testdata=Iterator(data=imtestdata,label=landmarktestdata))\n return data_provider\n\n elif dataset_name == 'examples_antisproofing':\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name,os.path.join(dirname, 'antisproofing'))\n return data_provider\n elif dataset_name == 'examples_anpr':\n download_file_from_google_drive('1uGBd8tXlP0TZAXNgrR6H0jl5MXj7VPbN', dirname, 'anpr.tar')\n tar_file_path = os.path.join(dirname, 'anpr.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs = list(sorted(imgs))\n\n # CCPD (Chinese City Parking Dataset, ECCV) and PDRC (license Plate Detection and Recognition Challenge)\n # https://github.com/detectRecog/CCPD\n provinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\",\n \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'O']\n ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\n def lp2char(lp):\n cols = lp.split('_')\n charstring = ''\n for i in range(len(cols)):\n if i == 0:\n charstring += provinces[int(cols[i])]\n elif i == 1:\n charstring += alphabets[int(cols[i])]\n else:\n charstring += ads[int(cols[i])]\n return charstring\n\n width = 720\n height = 1160\n for im_path in imgs:\n lbl = im_path.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\n charstring = lp2char(lbl)\n iname = im_path.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\n box = [leftUp[0], leftUp[1], rightDown[0], rightDown[1]]\n ori_w, ori_h = [float(int(el)) for el in [width, height]]\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, os.path.join(dirname, 'antisproofing'))\n return data_provider\n\n\n\n elif dataset_name == 'examples_dogs-vs-cats':\n download_file_from_google_drive('10czW0On7eIXkPP-MuQ-IRxMWdTizWjNC', dirname, 'dogs-vs-cats.tar')\n tar_file_path = os.path.join(dirname, 'dogs-vs-cats.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, dirname)\n return data_provider\n elif dataset_name == 'examples_chinese':\n to_half=ToHalfWidth()\n to_sc=ChineseConvert(convert_to='simplified')\n download_file_from_google_drive('1yzRzXpLuhSUxnixqCgpbdTk16ajnTEWF', dirname, 'chinese.tar')\n tar_file_path = os.path.join(dirname, 'chinese.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n\n as_train = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_training.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000' ,'|'))).splitlines()\n cityu_train =remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_training.utf8'), encoding='utf-8-sig').read().strip().replace(' ','|'))).splitlines()\n\n as_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_testing_gold.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000', '|'))).splitlines()\n cityu_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_test_gold.utf8'), encoding='utf-8-sig').read().strip().replace(' ', '|'))).splitlines()\n\n\n data = as_train + cityu_train # 把兩個語料合併\n test_data=as_test + cityu_test # 把兩個語料合併\n\n\n raw_data_train = [row.strip('\\n').strip('\\r') for row in data] # 移除分行字元\n raw_data_test = [row.strip('\\n').strip('\\r') for row in test_data] # 移除分行字元\n\n process_data_train=[]\n process_seg_label_train = []\n process_simplifided_label_train = []\n process_traditional_label_train = []\n\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n for k in tqdm(range(len(raw_data_train))):\n row=raw_data_train[k]\n if row.startswith('∥'):\n row=row[1:]\n words=row.replace('||','|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_train.append(word[i])\n #tmp_simplifided_label_train.append(to_half(to_sc(word[i])))\n #轉換為BMES\n\n if i==0 and len(word)>1: #B 是一個詞的開始\n tmp_seg_label_train.append('B')\n elif i==len(word)-1 and len(word)>=2 and tmp_seg_label_train[-1] in ['B','M']: #E 是一個詞的結束\n tmp_seg_label_train.append('E')\n elif len(word)==1 and i==0: #S 自己就是一個單詞\n tmp_seg_label_train.append('S')\n elif len(word)>=3 and tmp_seg_label_train[-1] in ['B','M']: #M 是一個詞的中間\n tmp_seg_label_train.append('M')\n\n if len(tmp_seg_label_train)>0 and tmp_seg_label_train[-1] in ['E','S']:\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)) and k+1<len(words):\n if word in [ '。','﹖']:\n pass\n\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_train.append(' ')\n tmp_seg_label_train.append('S')\n\n if (k+1<len(raw_data_train) and not raw_data_train[k+1].startswith( '」')) and words[-1] in [ '。','﹖']:\n #process_traditional_label_train.append(tmp_data_train)\n\n tmp_data_train=to_half(''.join(tmp_data_train))\n tmp_seg_label_train = ''.join(tmp_seg_label_train)\n # if len(tmp_data_train)!=len(tmp_seg_label_train):\n # print('')\n tmp_simplifided_label_train =to_sc(tmp_data_train)\n\n process_data_train.append(tmp_data_train)\n process_seg_label_train.append(tmp_seg_label_train)\n process_simplifided_label_train.append(tmp_simplifided_label_train)\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n # else:\n # tmp_data_train.append('\\n')\n # tmp_simplifided_label_train.append('\\n')\n # tmp_seg_label_train.append('\\n')\n corpus=process_data_train\n seg_corpus=process_seg_label_train\n simplifided_corpus =process_simplifided_label_train\n\n process_data_test = []\n process_seg_label_test = []\n process_simplifided_label_test = []\n process_traditional_label_test = []\n print('generate test labels')\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n for k in tqdm(range(len(raw_data_test))):\n row=raw_data_test[k]\n if row.startswith('∥'):\n row=row[1:]\n words = row.replace('||', '|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_test.append(word[i])\n # tmp_simplifided_label_test.append(to_half(to_sc(word[i])))\n # 轉換為BMES\n\n if i == 0 and len(word) > 1: # B 是一個詞的開始\n tmp_seg_label_test.append('B')\n elif i == len(word) - 1 and len(word) >= 2 and tmp_seg_label_test[-1] in ['B', 'M']: # E 是一個詞的結束\n tmp_seg_label_test.append('E')\n elif len(word) == 1 and i == 0: # S 自己就是一個單詞\n tmp_seg_label_test.append('S')\n elif len(word) >= 3 and tmp_seg_label_test[-1] in ['B', 'M']: # M 是一個詞的中間\n tmp_seg_label_test.append('M')\n\n if len(tmp_seg_label_test) > 0 and tmp_seg_label_test[-1] in ['E', 'S'] and k+1<len(words):\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)):\n if word in ['。', '﹖']:\n pass\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_test.append(' ')\n tmp_seg_label_test.append('S')\n\n if (k + 1 < len(raw_data_test) and not raw_data_test[k + 1].startswith('」')) and words[-1] in ['。', '﹖']:\n # process_traditional_label_test.append(tmp_data_test)\n\n tmp_data_test = to_half(''.join(tmp_data_test))\n tmp_seg_label_test = ''.join(tmp_seg_label_test)\n # if len(tmp_data_test)!=len(tmp_seg_label_test):\n # print('')\n tmp_simplifided_label_test = to_sc(tmp_data_test)\n\n process_data_test.append(tmp_data_test)\n process_seg_label_test.append(tmp_seg_label_test)\n process_simplifided_label_test.append(tmp_simplifided_label_test)\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n # else:\n # tmp_data_test.append('\\n')\n # tmp_simplifided_label_test.append('\\n')\n # tmp_seg_label_test.append('\\n')\n test_corpus = process_data_test\n test_seg_corpus = process_seg_label_test\n test_simplifided_corpus = process_simplifided_label_test\n\n\n data=TextSequenceDataset(corpus=corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_label = TextSequenceDataset(corpus=seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_label = TextSequenceDataset(corpus=simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_label = TextSequenceDataset(corpus= copy.deepcopy(corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n data_test=TextSequenceDataset(corpus=test_corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_test_label = TextSequenceDataset(corpus=test_seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_test_label = TextSequenceDataset(corpus=test_simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_test_label = TextSequenceDataset(corpus= copy.deepcopy(test_corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n\n chars = list(sorted(set(list( ''.join(corpus) +bpmf_phonetic+'\\n\\r\\t∥'+ ''.join(simplifided_corpus)+''.join(test_data)))))\n chars.insert(0, '[CLS]')\n chars.insert(1, '[SEP]')\n chars.insert(2, '[UNK]')\n chars.insert(3, '[PAD]')\n chars.insert(4, '[MASK]')\n\n data.vocabs =data_test.vocabs=simplifided_label.vocabs=simplifided_test_label.vocabs = chars\n data.text2index=data_test.text2index =simplifided_label.text2index=simplifided_test_label.text2index = dict((c, i) for i, c in enumerate(chars))\n data.index2text =data_test.index2text =simplifided_label.index2text=simplifided_test_label.index2text= dict((i, c) for i, c in enumerate(chars))\n traditional_label = copy.deepcopy(data)\n traditional_test_label = copy.deepcopy(data_test)\n traditional_label.object_type =traditional_test_label.object_type = ObjectType.sequence_label\n traditional_label.symbol =traditional_test_label.symbol = 'traditional_label'\n\n mask_label = copy.deepcopy(data)\n mask_test_label = copy.deepcopy(data_test)\n #mask_label.object_type =mask_test_label.object_type= ObjectType.corpus\n mask_label.symbol = mask_test_label.symbol = 'mask_label'\n\n\n\n nextword=copy.deepcopy(data)\n nextword_test = copy.deepcopy(data_test)\n nextword.object_type=nextword_test.object_type=ObjectType.sequence_label\n nextword.symbol=nextword_test.symbol='nextword_label'\n nextword.sequence_offset=nextword_test.sequence_offset=1\n\n label=ZipDataset(seg_label,nextword,simplifided_label,traditional_label,mask_label)\n label_test = ZipDataset(seg_test_label, nextword_test, simplifided_test_label, traditional_test_label, mask_test_label)\n provider=TextSequenceDataProvider(\n traindata=Iterator(data=data,label=label),\n testdata=Iterator(data=data_test,label=label_test))\n return provider\n #,sample_filter=lambda x:x[0][-1]==3\n else:\n return None", "def load_ratings():\n\n print \"Importing ratings...\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate retailers\n Rating.query.delete()\n\n # Read CSV file\n with open(\"seed_data/ratings.csv\") as source_file:\n example_data = list(csv.reader(source_file))\n\n # skip header row for populating db\n for list_item in example_data[1:]:\n rating = Rating(meeting_id=list_item[1],\n score=list_item[2])\n\n # Add the current retailer to the session\n db.session.add(rating)\n\n # Commit the db.session changes to the database\n db.session.commit()", "def load_data():\n # Dictionary mapping image names to labels\n image_name_to_label = dict()\n\n # Store labels associated with image names\n notifier.send(\" Reading metadata...\")\n with open(\"data/metadata.csv\") as file: # Original dataset\n # Use images for normal, virus (unknown type), COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"Label\"].lower() == \"normal\":\n label = 2\n elif row[\"Label_2_Virus_category\"].lower() == \"covid-19\":\n label = 0\n elif row[\"Label_1_Virus_category\"].lower() == \"virus\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"X_ray_image_name\"]] = label\n with open(\"data/metadata2.csv\") as file: # GitHub dataset\n # Use COVID-19, SARS\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n if row[\"filename\"] in image_name_to_label: # Image already added\n continue\n if \"covid-19\" in row[\"finding\"].lower():\n label = 0\n elif row[\"finding\"].lower() == \"sars\":\n label = 1\n else:\n continue\n image_name_to_label[row[\"filename\"]] = label\n with open(\"data/metadata_COVID-19.csv\") as file: # Additional COVID-19 images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"COVID-19/\" + row[\"FILE NAME\"] + \".\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 0\n with open(\"data/metadata_ViralPneumonia.csv\") as file: # Additional virus images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"ViralPneumonia/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 1\n with open(\"data/metadata_Normal.csv\") as file: # Additional normal images\n metadata_contents = csv.DictReader(file)\n for row in metadata_contents:\n name = \"Normal/\" + row[\"FILE NAME\"].replace(\"-\", \"(\") + \").\" + row[\"FORMAT\"]\n image_name_to_label[name.lower().replace(\" \", \"\")] = 2\n\n notifier.send(\" Loading images...\")\n images, labels = load_images(image_name_to_label)\n\n notifier.send(\" Splitting data...\")\n return split_data(images, labels)", "def load_test_dataset():\n\n def gen_image(resolution, x1, y1, x2, y2):\n width, height = resolution\n image = np.full([height, width, 3], fill_value=255, dtype=np.uint8)\n image[int(y1 * height) : int(y2 * height), int(x1 * width) : int(x2 * width), :] = np.array(\n [0, 128, 128], dtype=np.uint8\n )[None, None, :]\n return image, Rectangle(x1=x1, y1=y1, x2=x2, y2=y2)\n\n images = [\n gen_image((640, 480), 0.0, 0.0, 0.5, 0.5),\n gen_image((640, 480), 0.5, 0.0, 1.0, 0.5),\n gen_image((640, 480), 0.0, 0.5, 0.5, 1.0),\n gen_image((640, 480), 0.5, 0.5, 1.0, 1.0),\n ]\n labels = [LabelEntity(name=\"rect\", domain=Domain.DETECTION, id=ID(\"0\"))]\n\n def get_image(i, subset):\n image, bbox = images[i]\n return DatasetItemEntity(\n media=Image(data=image),\n annotation_scene=AnnotationSceneEntity(\n annotations=[Annotation(bbox, labels=[ScoredLabel(label=labels[0])])],\n kind=AnnotationSceneKind.ANNOTATION,\n ),\n subset=subset,\n )\n\n items = [\n get_image(0, Subset.TRAINING),\n get_image(1, Subset.TRAINING),\n get_image(2, Subset.TRAINING),\n get_image(3, Subset.TRAINING),\n get_image(0, Subset.TRAINING),\n get_image(1, Subset.TRAINING),\n get_image(2, Subset.TRAINING),\n get_image(3, Subset.TRAINING),\n get_image(0, Subset.TRAINING),\n get_image(1, Subset.TRAINING),\n get_image(0, Subset.VALIDATION),\n get_image(1, Subset.VALIDATION),\n get_image(2, Subset.VALIDATION),\n get_image(3, Subset.VALIDATION),\n get_image(0, Subset.TESTING),\n get_image(1, Subset.TESTING),\n get_image(2, Subset.TESTING),\n get_image(3, Subset.TESTING),\n ]\n return DatasetEntity(items), labels", "def load_gt_roidb(dataset_name, image_set_name, root_path, dataset_path,\n flip=False, verbose=False):\n imdb = retinaface(image_set_name, root_path, dataset_path)\n roidb = imdb.gt_roidb()\n if verbose:\n print('roidb size', len(roidb))\n if flip:\n roidb = imdb.append_flipped_images(roidb)\n if verbose:\n print('flipped roidb size', len(roidb))\n return roidb", "def _load_restored(self, dataset_path):\n for group in ['knowledge', 'source', 'target']:\n if getattr(self, group + '_format') != 'none':\n text_data = load_restored(dataset_path, group + '.', ignore_file='vocab')[0]\n setattr(self, group + '_text_data', text_data)\n idx2token, token2idx = load_restored(dataset_path, ignore_file='data')\n setattr(self, 'idx2token', idx2token)\n setattr(self, 'token2idx', token2idx)\n self.max_vocab_size = len(self.idx2token)\n self.logger.info(\"Restore finished!\")", "def load_data(data_path=DATA_PATH):\n with open (os.path.join(DATA_PATH, \"imdb_extrait.pkl\"),\"rb\") as file:\n \n [data , id2titles , fields ]= pk.load(file)\n \n \n datax = data [: ,:33]\n datay = np.array([1 if x [33] >6.5 else -1 for x in data ])\n \n return datax, datay, id2titles, fields", "def _loadData(self, data):\n Video._loadData(self, data)\n self.audienceRating = utils.cast(float, data.attrib.get('audienceRating'))\n self.audienceRatingImage = data.attrib.get('audienceRatingImage')\n self.audioLanguage = data.attrib.get('audioLanguage', '')\n self.autoDeletionItemPolicyUnwatchedLibrary = utils.cast(\n int, data.attrib.get('autoDeletionItemPolicyUnwatchedLibrary', '0'))\n self.autoDeletionItemPolicyWatchedLibrary = utils.cast(\n int, data.attrib.get('autoDeletionItemPolicyWatchedLibrary', '0'))\n self.childCount = utils.cast(int, data.attrib.get('childCount'))\n self.collections = self.findItems(data, media.Collection)\n self.contentRating = data.attrib.get('contentRating')\n self.duration = utils.cast(int, data.attrib.get('duration'))\n self.enableCreditsMarkerGeneration = utils.cast(int, data.attrib.get('enableCreditsMarkerGeneration', '-1'))\n self.episodeSort = utils.cast(int, data.attrib.get('episodeSort', '-1'))\n self.flattenSeasons = utils.cast(int, data.attrib.get('flattenSeasons', '-1'))\n self.genres = self.findItems(data, media.Genre)\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.key = self.key.replace('/children', '') # FIX_BUG_50\n self.labels = self.findItems(data, media.Label)\n self.languageOverride = data.attrib.get('languageOverride')\n self.leafCount = utils.cast(int, data.attrib.get('leafCount'))\n self.locations = self.listAttrs(data, 'path', etag='Location')\n self.network = data.attrib.get('network')\n self.originallyAvailableAt = utils.toDatetime(data.attrib.get('originallyAvailableAt'), '%Y-%m-%d')\n self.originalTitle = data.attrib.get('originalTitle')\n self.rating = utils.cast(float, data.attrib.get('rating'))\n self.ratings = self.findItems(data, media.Rating)\n self.roles = self.findItems(data, media.Role)\n self.seasonCount = utils.cast(int, data.attrib.get('seasonCount', self.childCount))\n self.showOrdering = data.attrib.get('showOrdering')\n self.similar = self.findItems(data, media.Similar)\n self.studio = data.attrib.get('studio')\n self.subtitleLanguage = data.attrib.get('audioLanguage', '')\n self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode', '-1'))\n self.tagline = data.attrib.get('tagline')\n self.theme = data.attrib.get('theme')\n self.useOriginalTitle = utils.cast(int, data.attrib.get('useOriginalTitle', '-1'))\n self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))\n self.year = utils.cast(int, data.attrib.get('year'))", "def __init__(self, cfg, data_dir, train_files):\n self.cfg = cfg\n self.imgs, self.ids, self.anns = None, None, None\n self.data_dir = data_dir\n self.product_labels = {}\n print('loading annotations into memory...')\n tic = time.time()\n self.datasets = []\n if type(train_files) != list:\n train_files = [train_files]\n for train_file in train_files:\n labels_file = os.path.dirname(train_file)\n labels_file = os.path.join(labels_file, 'labels.txt')\n with open(labels_file, 'r') as f:\n self.product_names = {}\n for line in f:\n label, prod_name = line.split()\n self.product_labels[prod_name] = int(label)\n with open(train_file, 'r') as f:\n dataset = {}\n train_file_dir = os.path.dirname(train_file)\n for line in f:\n img, ann_file = line.split()\n img = os.path.join(train_file_dir, 'images',\n os.path.basename(img))\n ann_file = os.path.join(train_file_dir, 'annotations',\n os.path.basename(ann_file))\n dataset[img] = ann_file\n self.datasets.append(dataset)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n self.create_index()", "def read_data():\n ADV_MAT = np.load('ADV.npy');\n ADJ_MAT = np.load('ADJ.npy');\n PR_MAT = np.load('PR.npy'); \n NN_MAT = np.load('NN.npy');\n for i in range(ADV_MAT.shape[0]):RUNNING_DATA['ADV___'+str(i)] = ADV_MAT[i];\n for i in range(ADJ_MAT.shape[0]):RUNNING_DATA['ADJ___'+str(i)] = ADJ_MAT[i];\n for i in range(PR_MAT.shape[0]):RUNNING_DATA['PR___'+str(i)] = PR_MAT[i];\n for i in range(NN_MAT.shape[0]):RUNNING_DATA['NN___'+str(i)] = NN_MAT[i];", "def load_data_and_labels(filename, dataset_name,is_train):\n label_count={}\n parameter_file = \"./parameters.json\"\n params = json.loads(open(parameter_file).read())\n if dataset_name == 'ag_news' or dataset_name == 'dbpedia' or dataset_name == 'sogou_news' or dataset_name == 'amazon_review_full' or dataset_name == 'amazon_review_polarity' :\n df = pd.read_csv(filename, names=['label', 'title', 'text'], dtype={'title': object,'text': object})\n selected = ['label', 'title','text','too_short','to_drop']\n\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[2]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN)\n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df[selected[2]].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df[selected[2]].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n \n elif dataset_name == 'yelp_review_full' or dataset_name == 'yelp_review_polarity':\n df = pd.read_csv(filename, names=['label','text'], dtype={'text': object})\n selected = ['label','text','too_short','to_drop']\n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['too_short']= df[selected[1]].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n # Map the actual labels to one hot labels\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['text'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['text'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n elif dataset_name == 'yahoo_answers':\n df = pd.read_csv(filename, names=['label', 'title', 'content','answer'], dtype={'title': object,'answer': object,'content': object})\n selected = ['label', 'title','content','answer','too_short','to_drop'] \n non_selected = list(set(df.columns) - set(selected))\n df = df.drop(non_selected, axis=1) # Drop non selected columns \n df['temp'] = df[['content','answer']].apply(lambda x: ' '.join(str(v) for v in x), axis=1)\n df['too_short']= df['temp'].apply(lambda x: (remove_short(x,params['min_length'])))\n df['too_short']=df['too_short'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any') # Drop null rows \n df['to_drop']= df[selected[0]].apply(lambda y: (shrink_df(y,label_count,params['data_per_class'])))\n df['to_drop']=df['to_drop'].replace('N/A',np.NaN) \n if is_train:\n df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows\n df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe\n for key,value in label_count.items():\n print(\"{} : {}\".format(key,value))\n labels = sorted(list(set(df[selected[0]].tolist())))\n one_hot = np.zeros((len(labels), len(labels)), int)\n np.fill_diagonal(one_hot, 1)\n label_dict = dict(zip(labels, one_hot))\n if params['use_summary']==1:\n x_raw = df['temp'].apply(lambda x: gen_summary(x,params['max_length'])).tolist()\n else:\n x_raw = df['temp'].apply(lambda x: clean_str(x,params['max_length'])).tolist()\n\n y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist()\n\n return x_raw, y_raw, df, labels", "def loadRatings(ratingstablename, ratingsfilepath, openconnection):\n\n cur = openconnection.cursor()\n cur.execute(\"DROP TABLE IF EXISTS \" + ratingstablename)\n cur.execute(\"CREATE TABLE \" + ratingstablename + \"(\" + \"\\\n UserId INT, \\\n TMP1 CHAR, \\\n MovieId INT, \\\n TMP2 CHAR, \\\n Rating FLOAT, \\\n TMP3 CHAR, \\\n TMP4 BIGINT \\\n );\")\n with open(ratingsfilepath, 'r') as fpath:\n cur.copy_from(fpath, ratingstablename, sep = ':')\n cur.execute(\"ALTER TABLE \" + ratingstablename + \"\\\n DROP COLUMN TMP1, \\\n DROP COLUMN TMP2, \\\n DROP COLUMN TMP3, \\\n DROP COLUMN TMP4\")\n \n openconnection.commit()\n cur.close()", "def load_dataset_and_make_vectorizer(cls, review_csv):\n df = pd.read_csv(review_csv)\n train_df = df[df.split=='train']\n return cls(df, ReviewVectorizer.from_dataframe(train_df))", "def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)", "def load_data():\n t = time()\n print 'loading tweets, please wait...'\n trained_tweets = load_tweets('training_dataset')\n eval_tweets = load_tweets('evaluation_dataset')\n print 'Time taken {}'.format(time() - t)\n t = time()\n print 'loading w2v model, please wait...'\n model = w2v_load_model('GoogleNews-vectors-negative300.bin')\n print 'Time taken {}'.format(time() - t)\n return trained_tweets, eval_tweets, model", "def load_data(nlp, cue_verbs, poly):\n train_dicts, _ = load_quote_authors(nlp)\n author_prediction_dataset = AuthorPredictionDataset(train_dicts, cue_verbs, poly)\n return np.array(train_dicts), author_prediction_dataset", "def load_data(database_filepath):\n #X, Y, category_names = load_data(database_filepath)\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql('SELECT * FROM categorized_messages_tbl', engine)\n \n X = df.filter(items=['id', 'message', 'original', 'genre'])\n\n #Drop Columns that Aren't Relevant for Predictions\n # - 'child_alone' has no responses\n y = df.drop(['id', 'message', 'original', 'genre', 'child_alone'], axis=1)\n\n #'Related' Column has several '2' values; updating these to 1\n y['related'] = y['related'].map(lambda x: 1 if x == 2 else x)\n \n return X['message'], y, y.columns.values", "def load_gt_roidb(dataset_name, image_set_name, root_path, dataset_path, result_path=None,\n flip=False):\n imdb = eval(dataset_name)(image_set_name, root_path, dataset_path, result_path)\n roidb = imdb.gt_roidb()\n if flip:\n roidb = imdb.append_flipped_images(roidb)\n return roidb", "def load_gt_roidb(dataset_name, image_set_name, root_path, dataset_path, result_path=None,\n flip=False):\n imdb = eval(dataset_name)(image_set_name, root_path, dataset_path, result_path)\n roidb = imdb.gt_roidb()\n if flip:\n roidb = imdb.append_flipped_images(roidb)\n return roidb", "def prep_data(ratings_df, watched_df=None, watchlist_df=None,\n good_threshold=4, bad_threshold=3):\n id_book = pd.read_csv('title_basics_small.csv')\n try:\n # try to read Letterboxd user data\n # drop rows with nulls in the columns we use\n ratings_df = ratings_df.dropna(axis=0, subset=['Rating', 'Name', 'Year'])\n # split according to user rating\n good_df = ratings_df[ratings_df['Rating'] >= good_threshold]\n bad_df = ratings_df[ratings_df['Rating'] <= bad_threshold]\n neutral_df = ratings_df[(ratings_df['Rating'] > bad_threshold) & (ratings_df['Rating'] < good_threshold)]\n # convert dataframes to lists\n good_list, good_dict = df_to_id_list(good_df, id_book)\n bad_list, bad_dict = df_to_id_list(bad_df, id_book)\n neutral_list, neutral_dict = df_to_id_list(neutral_df, id_book)\n except KeyError:\n # Try to read IMDb user data\n # strip ids of \"tt\" prefix\n ratings_df['movie_id'] = ratings_df['Const'].apply(lambda x: str(x).lstrip(\"tt\"))\n # drop rows with nulls in the columns we use\n ratings_df = ratings_df.dropna(axis=0, subset=['Your Rating', 'Year'])\n # split according to user rating\n good_df = ratings_df[ratings_df['Your Rating'] >= good_threshold*2]\n bad_df = ratings_df[ratings_df['Your Rating'] <= bad_threshold*2]\n neutral_df = ratings_df[(ratings_df['Your Rating'] > bad_threshold*2) & (ratings_df['Your Rating'] < good_threshold*2)]\n # convert dataframes to lists\n good_list = good_df['movie_id'].to_list()\n bad_list = bad_df['movie_id'].to_list()\n neutral_list = neutral_df['movie_id'].to_list()\n # make ratings dictionaries\n good_dict = dict(zip(good_list, good_df['Your Rating'].tolist()))\n bad_dict = dict(zip(bad_list, bad_df['Your Rating'].tolist()))\n neutral_dict = dict(zip(neutral_list, neutral_df['Your Rating'].tolist()))\n except Exception as e:\n # can't read the dataframe as Letterboxd or IMDb user data\n print(\"This dataframe has columns:\", ratings_df.columns)\n raise Exception(e)\n\n ratings_dict = dict(list(good_dict.items()) + list(bad_dict.items()) + list(neutral_dict.items()))\n\n if (watched_df is not None) and (not watched_df.empty):\n # Construct list of watched movies that aren't rated \"good\" or \"bad\"\n # First, get a set of identified IDs.\n rated_names = set(good_df.Name.tolist() + bad_df.Name.tolist() + neutral_list)\n # drop nulls from watched dataframe\n full_history = watched_df.dropna(axis=0, subset=['Name', 'Year'])\n # get list of watched movies that haven't been rated\n hist_list = df_to_id_list(full_history[~full_history['Name'].isin(rated_names)], id_book)[0]\n # add back list of \"neutral\" movies (whose IDs we already found before)\n hist_list = hist_list + neutral_list\n else: hist_list = neutral_list\n\n if (watchlist_df is not None) and (not watchlist_df.empty):\n try:\n watchlist_df = watchlist_df.dropna(axis=0, subset=['Name', 'Year'])\n val_list = df_to_id_list(watchlist_df, id_book)[0]\n except KeyError:\n watchlist_df = watchlist_df.dropna(axis=0, subset=['Const', 'Year'])\n watchlist_df['movie_id'] = watchlist_df['Const'].str.lstrip(\"tt\")\n val_list = watchlist_df['movie_id'].tolist()\n else: val_list = []\n\n return (good_list, bad_list, hist_list, val_list, ratings_dict)", "def get_basic_data(self):\n\n db = DataBase().clear_table()\n\n data = self.scraper.scrape_top_250()\n for d in data:\n title = d.find(\"td\", class_=\"titleColumn\")\n title = title.find(\"a\")\n title = re.sub(\"<.*?>\", \"\", str(title))\n\n film_id = d.find(\"td\", class_=\"watchlistColumn\")\n film_id = film_id.find(\"div\")\n film_id = film_id[\"data-tconst\"]\n\n year = d.find(\"span\", class_=\"secondaryInfo\")\n year = re.sub(\"<.*?>\", \"\", str(year)).replace(\"(\", \"\").replace(\")\", \"\")\n\n director = d.find(\"td\", class_=\"titleColumn\")\n director = director.find(\"a\")\n director = director[\"title\"]\n director, *cast = director.split(\", \")\n director = director.replace(\" (dir.)\", \"\")\n\n rating = d.find(\"td\", class_=\"ratingColumn imdbRating\")\n rating = rating.find(\"strong\")\n rating = re.sub(\"<.*?>\", \"\", str(rating))\n\n poster = d.find(\"td\", class_=\"posterColumn\")\n poster = poster.find(\"img\")[\"src\"]\n poster = re.sub(\"@.+\", \"@._V1_FMjpg_UY474_.jpg\", poster)\n\n DataBase().populate_table(\n (title, film_id, year, director, \", \".join(cast), rating, poster)\n )", "def get_reviews(titles):\n\n neg_files = glob.glob(\"txt_sentoken/neg/*.txt\")\n pos_files = glob.glob(\"txt_sentoken/pos/*.txt\")\n review_dict = {'sentiment' : list(), 'review' : list(), 'title' : list()}\n\n for file in neg_files:\n with open(file, 'r') as f:\n review_text = f.read()\n review_dict['sentiment'].append(\"neg\")\n review_dict['review'].append(review_text)\n review_dict['title'].append(random.choice(titles)['title'])\n for file in pos_files:\n with open(file, 'r') as f:\n review_text = f.read()\n review_dict['sentiment'].append(\"pos\")\n review_dict['review'].append(review_text)\n review_dict['title'].append(random.choice(titles)['title'])\n reviews = pd.DataFrame(review_dict)\n return reviews", "def load_data(limit=0, split=0.8):\n # Partition off part of the train data for evaluation\n train_data, _ = thinc.extra.datasets.imdb()\n random.shuffle(train_data)\n train_data = train_data[-limit:]\n texts, labels = zip(*train_data)\n cats = [{'POSITIVE': bool(y)} for y in labels]\n split = int(len(train_data) * split)\n return (texts[:split], cats[:split]), (texts[split:], cats[split:])", "def load_data(database_filepath):\n engine = create_engine('sqlite:///'+ database_filepath)\n df = pd.read_sql(\"SELECT * FROM DisasterResponse\", engine)\n #exclude colums that are not needed in model\n col=[i for i in df.columns if i not in ['id','original', 'genre']]\n X = df[\"message\"]\n Y = df.iloc[:,4:]\n #global category_names\n category_names = Y.columns\n return X, Y, category_names", "def load_tmdb_details(self):\n details = tmdb.tmdb_client().get_movie_details(self.tmdb_id)\n if {\"title\", \"poster_path\"} <= set(details): # ensure both keys are in the results dict.\n self.title = details[\"title\"]\n self.poster_image_url = details[\"poster_path\"]", "def find_reviews(imdb_id):\n film = __get_session().query(Movie).filter_by(imdb_id=imdb_id).first()\n reviews = []\n\n if film:\n for r in film.reviews:\n review = {\"reviewer\": r.reviewer, \"review_txt\": r.review_txt,\n \"timestamp\": r.timestamp}\n reviews.append(review)\n \n return reviews" ]
[ "0.75287616", "0.66760135", "0.65806144", "0.6317917", "0.6313057", "0.6215495", "0.61960655", "0.61669785", "0.6146301", "0.6112559", "0.60396117", "0.5993801", "0.59847677", "0.59646434", "0.59450567", "0.5916073", "0.59076846", "0.5888437", "0.5850578", "0.5838023", "0.5837334", "0.58049804", "0.5794639", "0.5735462", "0.5723742", "0.5720875", "0.5719168", "0.57129264", "0.57035667", "0.56937844", "0.5672918", "0.5646037", "0.5643363", "0.56350684", "0.56241524", "0.5607232", "0.5575846", "0.5575713", "0.5573966", "0.55609846", "0.5549562", "0.55477864", "0.5532248", "0.55301756", "0.5518136", "0.5513379", "0.5499785", "0.5480363", "0.5479987", "0.54504275", "0.5441403", "0.5412482", "0.5407622", "0.540544", "0.54001933", "0.5390241", "0.5388178", "0.53871024", "0.5383745", "0.5382041", "0.53808635", "0.5353781", "0.5350282", "0.5346734", "0.5333629", "0.532961", "0.5319645", "0.5319645", "0.5317983", "0.53134257", "0.5305324", "0.5304551", "0.52950376", "0.5293591", "0.528702", "0.5281267", "0.5279909", "0.527411", "0.52656895", "0.52616763", "0.5257618", "0.52510667", "0.5244676", "0.5236316", "0.52329594", "0.5222941", "0.5218612", "0.521104", "0.52067506", "0.52052283", "0.52039653", "0.520182", "0.520182", "0.5196226", "0.519583", "0.5194439", "0.51864856", "0.5182895", "0.5179239", "0.5178054" ]
0.6858757
1
Loads the dataset as a generator of batches.
def load_image_dataset(split, batch_size, name="cifar10", repeat=False, shuffle=False, shuffle_seed=None): # Do no data augmentation. ds, dataset_info = tfds.load( name, split=split, as_supervised=True, with_info=True) num_classes = dataset_info.features["label"].num_classes num_examples = dataset_info.splits[split].num_examples num_channels = dataset_info.features["image"].shape[-1] def img_to_float32(image, label): return tf.image.convert_image_dtype(image, tf.float32), label ds = ds.map(img_to_float32).cache() ds_stats = _ALL_IMG_DS_STATS[ImgDatasets(name)] def img_normalize(image, label): """Normalize the image to zero mean and unit variance.""" mean, std = ds_stats image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype) image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype) return image, label ds = ds.map(img_normalize) if batch_size == -1: batch_size = num_examples if repeat: ds = ds.repeat() if shuffle: ds = ds.shuffle(buffer_size=10 * batch_size, seed=shuffle_seed) ds = ds.batch(batch_size) return tfds.as_numpy(ds), num_classes, num_examples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch(data_path):\n train, _, _ = get_datasets(\n data_path=data_path,\n nb_nodes=7,\n task_type=\"classification\",\n nb_classes=2,\n split=None,\n k_fold=None,\n seed=1234,\n )\n for batch in torch.utils.data.DataLoader(\n train, shuffle=False, batch_size=25, drop_last=False\n ):\n return batch", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def gen_batch(self):\n batch_size = self.batch_size\n shuffle = self.shuffle\n data = np.array(self.sentences)\n\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n while True:\n # shuffle the data at starting of each epoch\n shuffled_data = data\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield self._format_samples(shuffled_data[start_index:end_index], self.max_length)\n\n if self.mode in ['train', \"pred\"]:\n break", "def generate_batches(data, labels, batch_size):\n for start in range(0, len(data), batch_size):\n yield Tensor(data[start:start+batch_size, ...]), Tensor(labels[start:start+batch_size, ...])", "def batch_generator(self, num_epochs=1, shuffle=False):\n def parse_fn(tfrecord):\n return parse_mnist_tfrec(\n tfrecord, self.name, self.features_shape, True\n )\n dataset = tf.data.TFRecordDataset(\n self.filenames_list, compression_type=self.compression_type\n )\n if shuffle:\n dataset = dataset.shuffle(buffer_size=256)\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.map(parse_fn).prefetch(self.batch_size)\n dataset = dataset.batch(self.batch_size)\n iterator = dataset.make_one_shot_iterator()\n batch_features, batch_labels = iterator.get_next()\n return batch_features, batch_labels", "def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None, batch_size=1):\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n image, gt_class_ids = load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation)\n\n # Init batch arrays\n if b == 0:\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n\n # Add to batch\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, gt_class_ids] = 1\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_gt_class_ids]\n outputs = []\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise", "def load_training_data_generator(self) -> Generator[Tuple[List[np.ndarray], np.ndarray], None, None]:\n return self._load_generator(config.TRAIN_DIR, True)", "def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result", "def load(self, handler, name, size, \n batch_size=None, shuffle=False, \n sample_transform=None, batch_transform=None):\n if sample_transform is None:\n sample_transform = self.sample_transform\n if batch_transform is None:\n batch_transform = self.batch_transform\n dataset = DatasetIterator(name, size, handler, \n shuffle=shuffle,\n transform=sample_transform)\n if batch_size is None:\n return dataset\n batches = BatchIterator(dataset, \n batch_size=batch_size, \n transform=batch_transform)\n return batches", "def get_train_batches(data_dir='/home/yunhan/batchified'):\n # todo: read in data that is preoprocessed\n # Use batch 1 - 52 as train (60%), 53 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 53\n idx = np.random.permutation(n)\n idx = idx + 1\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def data_generator(self, data):\n X, y = [], []\n while 1:\n np.random.shuffle(data)\n for line in data:\n img = Image.open(line[0])\n img = img.resize((32, 16))\n img = np.asarray(img, dtype=np.float32)\n img = img / 128. - 1.\n img = np.transpose(img, (2, 0, 1)) \n X.append(img)\n y.append(line[1])\n if len(X) == self.config.batch_size:\n batch = (np.asarray(X), np.asarray(y))\n X = []\n y = []\n yield batch", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def generator(self):\n\n # Each thread gets its own randomized set of keys\n keys = self.loader.keys()\n\n while True:\n random.shuffle(keys)\n data_batch = []\n label_batch = []\n\n for key in keys:\n data = self.loader.get(key)\n s = StringIO(data)\n img = PIL.Image.open(s)\n img = img.resize((224, 224))\n img = img.convert('RGB')\n data_batch.append(np.array(img))\n\n label_str = self._classname_from_key(key)\n label_int = self._classname_to_label[label_str]\n label_arr = np.zeros(self.num_classes())\n label_arr[label_int] = 1 # one-hot encoding\n label_batch.append(label_arr)\n\n if len(data_batch) == 32: # batch size\n yield np.array(data_batch), np.array(label_batch)\n data_batch = []\n label_batch = []", "def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)", "def load_batch(self):\r\n\r\n #if we've seen all the data, start again with them in a new random order\r\n if self.batchcounter+self.batchsize > self.num_data:\r\n self.batchcounter = 0\r\n self.epochs += 1\r\n self._permutation = np.random.permutation(self.num_data)\r\n\r\n this_perm = self._permutation[self.batchcounter:self.batchcounter+self.batchsize]\r\n\r\n self.X_batch = self.X[this_perm]\r\n self.likelihood.set_data(self.Y[this_perm])\r\n if self.has_uncertain_inputs:\r\n self.X_variance_batch = self.X_variance[this_perm]\r\n\r\n self.batchcounter += self.batchsize\r\n\r\n self.data_prop = float(self.batchsize)/self.num_data\r\n\r\n self._compute_kernel_matrices()\r\n self._computations()", "def gen_batch(img_dir, id_label_dict, batch_size, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n num_images = len(img_file_path)\n while True:\n for i in range(0, num_images-batch_size, batch_size):\n X, y = gen_data_file(img_file_path[i:i+batch_size], id_label_dict, num_class)\n yield X, y", "def gen_batches(data, batch_size):\n data = np.array(data)\n\n for i in range(0, data.shape[0], batch_size):\n yield data[i:i+batch_size]", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n\n if os.path.isdir(filenames):\n num_records = len(os.listdir(filenames))\n print(\"Loading from directory. \" + str(num_records) + \" tfRecords found.\")\n files = tf.data.Dataset.list_files(filenames + \"/\" + \"*.tfrecord\").shuffle(num_records)\n dataset = files.apply(\n tf.contrib.data.parallel_interleave(\n lambda x: tf.data.TFRecordDataset(x, num_parallel_reads=256, buffer_size=8*1024*1024),\n cycle_length=32, sloppy=True)\n )\n else:\n print(\"Loading from single tfRecord...\")\n dataset = tf.data.TFRecordDataset(filenames + \".tfrecord\").repeat()\n \n dataset = dataset.map(self.parser, num_parallel_calls=128)\n \n if self.subset == 'train':\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n \n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(10)\n \n iterator = dataset.make_one_shot_iterator()\n seq_batch, input_batch, map_batch, transformation_batch = iterator.get_next()\n\n return seq_batch, input_batch, map_batch, transformation_batch", "def get_batches(self, batch_size):\n if self.data.shape[0] % batch_size != 0:\n raise RuntimeError('num of data tuples is not a multiple of batch size')\n num_batch = self.data.shape[0] // batch_size\n for b in range(num_batch):\n yield self.data[b*batch_size:(b+1)*batch_size, :], \\\n self.target[b*batch_size:(b+1)*batch_size, :]", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def generateBatches(data, batch_size):\n random.shuffle(data)\n batches = []\n size = len(data)\n def loadBatches(data, total_size, batch_size_):\n for i in range(0, total_size, batch_size_):\n yield data[i:min(total_size, i + batch_size_)]\n\n for unprocessed_batch in loadBatches(data, size, batch_size):\n processed_batch = processBatch(unprocessed_batch)\n batches.append(processed_batch)\n return batches", "def Pylearn2DatasetGenerator(dataset,\n batch_size,\n which_batches=range(0,1),\n mode='shuffled_sequential',\n num_batches=-1):\n\n for b in dataset.iterator(\n batch_size,\n dataset.get_num_examples()/batch_size,\n mode=mode,\n data_specs=dataset.get_data_specs(),\n return_tuple=True\n ):\n b = [b[i] for i in which_batches]\n yield b", "def get_train_iterator(self) -> Iterable[Batch]:\n if self._train_name not in self._datasets:\n raise ValueError(\"Training data not provided.\")\n return self.get_iterator(self._train_name)", "def __iter__(self):\n\t\tfor i, data in enumerate(self.dataloader):\n\t\t\tif i * self.opt.batch_size >= self.opt.max_dataset_size:\n\t\t\t\tbreak\n\t\t\tyield data", "def get_dataset_batches(self):\n\n coco_api = COCO(annotation_file=self._args.annotation_path)\n image_ids = coco_api.getImgIds()\n\n image_paths = []\n for image_id in image_ids:\n coco_img = coco_api.imgs[image_id]\n image_paths.append(\n os.path.join(self._args.data_dir, coco_img['file_name'])\n )\n\n dataset = tf.data.Dataset.from_tensor_slices(image_paths)\n\n def load_image_op(path):\n image = tf.io.read_file(path)\n image = tf.image.decode_jpeg(image, channels=3)\n\n return tf.data.Dataset.from_tensor_slices([image])\n\n dataset = dataset.interleave(\n load_image_op,\n cycle_length=tf.data.AUTOTUNE,\n block_length=8,\n num_parallel_calls=tf.data.AUTOTUNE\n )\n\n def preprocess_fn(image):\n if self._args.input_size is not None:\n image = tf.image.resize(\n image, size=(self._args.input_size, self._args.input_size)\n )\n image = tf.cast(image, tf.uint8)\n return image\n\n dataset = dataset.map(\n map_func=preprocess_fn,\n num_parallel_calls=tf.data.AUTOTUNE,\n )\n\n dataset = dataset.batch(self._args.batch_size, drop_remainder=False)\n\n dataset = dataset.prefetch(buffer_size=tf.data.AUTOTUNE)\n\n return dataset, None", "def load_batch(self, batch_index):\n if self.num_samples < self.batch_size:\n raise ValueError('The number of samples is smaller than the batch size')\n \n if self.mode != 'testing':\n if len(self.identities) < self.batch_size:\n raise ValueError('The number of identities is smaller than the batch size')\n\n if self.mode != 'testing':\n return self._yield_training_validation(batch_index)\n else:\n return self._yield_testing(batch_index)", "def get_batches(path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):\n return gen.flow_from_directory(path,\n target_size=(ROWS, COLS),\n class_mode=class_mode,\n shuffle=shuffle,\n batch_size=batch_size)", "def open_dataset(dataset_path, batch_size, img_shape, infinite=True):\n dataset = generate_paths()\n\n dataset_gen = dataset_generator(\n dataset,\n batch_size=batch_size, infinite=infinite,\n img_shape=img_shape\n )\n steps = len(dataset) // batch_size\n return dataset_gen, steps", "def data_generator(delta=1, batch_size=32):\n while True:\n yield generate_samples(delta=delta, n=batch_size)", "def batch_generator(labels_df, set_kind):\n # Generate training batches\n if set_kind == \"train\" and (labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 64):\n while 1:\n\n for i in range(labels_df.shape[0]//8):\n x_train = np.load('data/train-npy/npdatasetX{}.npy'.format(i))\n y_train = np.load('data/train-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1):\n x_trainj = x_train[j*8:j*8-1,:]\n y_trainj = y_train[j*8:j*8-1,:]\n\n yield (x_trainj, y_trainj)\n\n\n # Generate validation batches\n if set_kind == \"valid\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//4): \n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n y_valid = np.load('data/valid-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1): \n x_validj = x_valid[j*4:j*4-1,:]\n y_validj = y_valid[j*4:j*4-1,:]\n\n yield (x_validj, y_validj)\n\n\n # Generate test batches\n if set_kind == \"test\" and labels_df.shape[0] == 40669:\n while 1:\n\n for i in range(labels_df.shape[0]//4): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(1): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n \n yield (x_validj, y_validj)\n\n if set_kind == \"test\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//8): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(2): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n\n yield x_validj", "def NumpyDatasetGenerator(dataset,\n batch_size,\n shuffle=True,\n num_batches=-1):\n # top.Optimizer is expecting for tuples\n if isinstance(dataset, tuple):\n dataset = tuple(dataset)\n\n if shuffle==True:\n perm = np.random.permutation(dataset[0].shape[0])\n dataset = [d[perm] for d in dataset]\n if num_batches == -1:\n num_batches = dataset[0].shape[0]/batch_size\n for i in range(num_batches):\n start = i*batch_size\n finish = (i+1)*batch_size\n batch = [d[start:finish] for d in dataset]\n yield tuple(batch)", "def generator_input(filenames, chunk_size, batch_size=64):\n\n feature_cols = None\n while True:\n input_reader = pd.read_csv(\n tf.gfile.Open(filenames[0]),\n names=CSV_COLUMNS,\n chunksize=chunk_size,\n na_values=' ?')\n\n for input_data in input_reader:\n input_data = input_data.dropna()\n # Pop off all of the columns we want to predict and concatenate them\n labels = pd.concat([input_data.pop(x) for x in LABEL_COLUMNS], 1)\n\n input_data = to_numeric_features(input_data, feature_cols)\n\n # Retains schema for next chunk processing.\n if feature_cols is None:\n feature_cols = input_data.columns\n\n idx_len = input_data.shape[0]\n for index in range(0, idx_len, batch_size):\n yield (input_data.iloc[index:min(idx_len, index + batch_size)],\n labels.iloc[index:min(idx_len, index + batch_size)])", "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def batch_iter(input_data,batch_size):\r\n batch_ids,batch_mask,batch_segment,batch_label=[],[],[],[]\r\n for features in input_data:\r\n if len(batch_ids) == batch_size:\r\n yield batch_ids,batch_mask,batch_segment,batch_label\r\n batch_ids, batch_mask, batch_segment, batch_label = [], [], [], []\r\n\r\n batch_ids.append(features['input_ids'])\r\n batch_mask.append(features['input_mask'])\r\n batch_segment.append(features['segment_ids'])\r\n batch_label.append(features['label_ids'])\r\n\r\n if len(batch_ids) != 0:\r\n yield batch_ids, batch_mask, batch_segment, batch_label", "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def load_batch(dataset, batch_size=32, height=224, width=224, is_training=False):\n data_provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, common_queue_capacity=32,\n common_queue_min=8)\n image_raw, label = data_provider.get(['image', 'label'])\n\n # Preprocess image for usage by Inception.\n # image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)\n image = no_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)\n\n # Preprocess the image for display purposes.\n image_raw = tf.expand_dims(image_raw, 0)\n image_raw = tf.image.resize_images(image_raw, [height, width])\n image_raw = tf.squeeze(image_raw)\n\n # Batch it up.\n images, images_raw, labels = tf.train.batch(\n [image, image_raw, label],\n batch_size=batch_size,\n num_threads=1,\n capacity=2 * batch_size)\n\n return images, images_raw, labels", "def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False):\n data_provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, common_queue_capacity=32,\n common_queue_min=8)\n image_raw, label = data_provider.get(['image', 'label'])\n \n # Preprocess image for usage by Inception.\n image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)\n \n # Preprocess the image for display purposes.\n image_raw = tf.expand_dims(image_raw, 0)\n image_raw = tf.image.resize_images(image_raw, [height, width])\n image_raw = tf.squeeze(image_raw)\n\n # Batch it up.\n images, images_raw, labels = tf.train.batch(\n [image, image_raw, label],\n batch_size=batch_size,\n num_threads=1,\n capacity=2 * batch_size)\n \n return images, images_raw, labels", "def generate_batch(self, batch_size, rand=None, *args, **kwargs):\n return [\n self.generate_datasets(rand, *args, **kwargs) for _ in range(batch_size)\n ]", "def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i * self.opt.batch_size >= self.opt.max_dataset_size:\n break\n yield data", "def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False):\n data_provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset, common_queue_capacity=32,\n common_queue_min=8)\n image_raw, label = data_provider.get(['image', 'label'])\n\n # Preprocess image for usage by Inception.\n image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)\n\n # Preprocess the image for display purposes.\n image_raw = tf.expand_dims(image_raw, 0)\n image_raw = tf.image.resize_images(image_raw, [height, width])\n image_raw = tf.squeeze(image_raw)\n\n # Batch it up.\n images, images_raw, labels = tf.train.batch(\n [image, image_raw, label],\n batch_size=batch_size,\n num_threads=1,\n capacity=2 * batch_size)\n\n return images, images_raw, labels", "def load_test_data(batch_size=32):\n log = logging.getLogger(__name__)\n\n log.info('Reading TEST csv file...')\n # read csv data file\n\n samples = []\n with open('./test_driving_log.csv') as f:\n csv_reader = csv.reader(f)\n next(csv_reader) #just skip the header line for test data set provided by Udacity\n\n for row in csv_reader:\n samples.append(row)\n\n generator = input_generator(samples, batch_size, is_for_validation=True, drop_zero_samples=True)\n return generator, math.ceil(len(samples) / batch_size)", "def default_generator(self,\n dataset,\n epochs=1,\n predict=False,\n deterministic=True,\n pad_batches=True):\n for epoch in range(epochs):\n if not predict:\n print('Starting epoch %i' % epoch)\n for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(\n batch_size=self.batch_size,\n deterministic=deterministic,\n pad_batches=pad_batches):\n\n feed_dict = dict()\n if y_b is not None and not predict:\n for index, label in enumerate(self.labels_fd):\n if self.mode == \"classification\":\n feed_dict[label] = to_one_hot(y_b[:, index])\n if self.mode == \"regression\":\n feed_dict[label] = y_b[:, index:index + 1]\n if w_b is not None:\n feed_dict[self.weights] = w_b\n # Transform SMILES string to integer vectors\n smiles_seqs = [self.smiles_to_seq(smiles) for smiles in ids_b]\n feed_dict[self.smiles_seqs] = np.stack(smiles_seqs, axis=0)\n yield feed_dict", "def load_training_dataset(data_dir):\n\tball_images = load_ball_images_to_memory(data_dir)\n\tgen = functools.partial(data_generator, data_dir, ball_images)\n\treturn tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))", "def data_generator(batch_size, preprocessor, x, y):\n num_examples = len(x)\n examples = zip(x, y)\n examples = sorted(examples, key = lambda x: x[0].shape[0])\n end = num_examples - batch_size + 1\n batches = [examples[i:i+batch_size]\n for i in range(0, end, batch_size)]\n random.shuffle(batches)\n while True:\n for batch in batches:\n x, y = zip(*batch)\n yield preprocessor.process(x, y)", "def gen_batches(data, batch_size=2048):\n indices = torch.randperm(len(data))\n indices = indices.cuda()\n\n for idx in range(0, len(data) - batch_size + 1, batch_size):\n sample = indices[idx:idx + batch_size]\n l_words, r_words = data.L_words[sample], data.R_words[sample]\n l_vecs = data.l_vecs[l_words]\n r_vecs = data.r_vecs[r_words]\n l_bias = data.l_biases[l_words]\n r_bias = data.r_biases[r_words]\n weight = data.weights[sample]\n y = data.y[sample]\n yield weight, l_vecs, r_vecs, y, l_bias, r_bias", "def get_dataset(dataset_dir, split_name, batch_size, workers):\n folder = os.path.join(dataset_dir, '{}_*.tfrecord'.format(split_name))\n filenames = tf.data.Dataset.list_files(folder)\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.shuffle(1000)\n dataset = dataset.repeat()\n dataset = dataset.map(preprocess, num_parallel_calls=workers)\n dataset = dataset.apply(\n tf.contrib.data.batch_and_drop_remainder(batch_size))\n dataset = dataset.prefetch(2)\n\n filename = '{}.txt'.format(split_name)\n with open(os.path.join(dataset_dir, filename), 'r') as f:\n examples = int(f.read().strip())\n\n return dataset.make_one_shot_iterator(), examples", "def batch_loader(data_set: Union[IterableDataset, Dataset],\n batch_size: bool,\n shuffle=False) -> DataLoader:\n return DataLoader(\n data_set,\n batch_size=batch_size,\n collate_fn=lambda x: x,\n shuffle=shuffle\n )", "def generate_validation_batch(self):\n assert self.validation_dataset is not None\n assert self.data_tags is not None\n \n # Sample indices and get data\n index_array = np.random.choice(self.num_validation_samples, self.p.trainer.batch_size)\n return self.get_data_from_indices(self.validation_dataset, index_array)", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n dataset = tf.contrib.data.TFRecordDataset(filenames)\n\n # Parse records.\n dataset = dataset.map(self.parser,\n num_threads=batch_size,\n output_buffer_size=2 * batch_size)\n\n # If training, shuffle and repeat indefinitely.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=10000 + 3 * batch_size)\n dataset = dataset.repeat(-1)\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n if self.predict_split == 'train':\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.TRAIN)\n else:\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.EVAL)\n # Take as much of the dataset as possible that can be evenly\n # divided by batch_size.\n while True:\n if num_examples % batch_size == 0:\n break\n else:\n num_examples -= 1\n dataset = dataset.take(num_examples)\n dataset = dataset.repeat(1)\n\n # dataset = dataset.take(1000) # For fast debugging!\n else:\n dataset = dataset.repeat(1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return image_batch, label_batch", "def generate(\n self,\n dataset: Tensor,\n labels: Tensor,\n chunk_size: int) -> Tuple[\n int, Iterator[Tuple[Tensor, Tensor]]]:", "def load_batch(batch_name):\n data_dict = unpickle('./datasets/cifar-10-batches-py/' + batch_name)\n X = data_dict[b'data'] / 255\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).reshape(10000, 3072).transpose(1,0)\n y = data_dict[b'labels']\n Y = make_one_hot(y)\n return X, Y, y", "def batch_train_generator(self, X, batch_size, seq_len):\n startidx = np.random.randint(0, len(X) - seq_len, batch_size)\n while True:\n batch_X = np.array([X[start:start + seq_len]\n for start in startidx])\n batch_y = np.array(\n [X[start:start + seq_len + self.config.shift] for start in startidx])\n batch_y = batch_y[:, -1]\n startidx = (startidx + seq_len) % (len(X) - seq_len)\n yield batch_X.reshape(batch_size, seq_len, 1), batch_y.reshape(batch_size, 1)", "def get_batches(dirname,\n gen=keras.preprocessing.image.ImageDataGenerator(),\n shuffle=True,\n batch_size=1,\n target_size=(224, 224),\n class_mode=\"categorical\"):\n return gen.flow_from_directory(dirname,\n shuffle=shuffle,\n batch_size=batch_size,\n target_size=target_size,\n class_mode=class_mode)", "def generator(features, labels, batch_size):\n \n # Create empty arrays to contain batch of features and labels#\n batch_features = np.zeros((batch_size, 160, 320, 3))\n batch_labels = np.zeros((batch_size, 1))\n while True:\n for i in range(batch_size):\n # choose random index in features\n index = random.choice(range(len(features)))\n batch_features[i] = features[index]\n batch_labels[i] = labels[index]\n yield batch_features, batch_labels", "def get_test_batches(data_dir='/home/yunhan/batchified'):\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 72\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def batch_generator(Dataset, batch_size, shuffle=True, repeat = 1, ignore_class = 255):\n\n \"\"\"\n Args : \n Dataset (class) : dataset class defined in cityscapes.py. \n batch_size (int) : batch size \n shuffle (bool) : shuffle dataset order \n ignore_class (int) : class number to be ignored \n\n Return : \n images (np.array) : images \n labels (np.array) : labels array in 2d \n \n \"\"\"\n \n idx_dataset = list(range(len(Dataset)))\n idx_dataset = idx_dataset*repeat\n \n\n if shuffle :\n from random import shuffle\n shuffle(idx_dataset)\n\n for idx in range(len(idx_dataset)//batch_size):\n \n imgs_to_stack = []\n labels_to_stack = []\n\n for _data_idx in range(idx*batch_size, (idx+1)*batch_size):\n data_idx = idx_dataset[_data_idx]\n image, label = load_image_train(Dataset[data_idx])\n imgs_to_stack.append(image)\n labels_to_stack.append(label)\n \n images = tf.stack(imgs_to_stack)\n labels = tf.stack(labels_to_stack)\n\n if ignore_class : \n idx_to_ignore = labels!=ignore_class\n labels = tf.where(idx_to_ignore, labels, 0)\n\n yield (images, labels)", "def trainDataGenerator(num_epochs):\r\n samples, all_files = get_filenames()\r\n for num in range(num_epochs):\r\n for i in range(len(samples)):\r\n sample = samples[i]\r\n for file in all_files[i]:\r\n ohvs, Y = prepData(sample, file)\r\n if (ohvs == []):\r\n continue\r\n X = np.array([ohvs[:800]])\r\n yield X, Y\r\n # for i in range(0, len(ohvs), 400):\r\n # X = np.array([ohvs[i : i+400]])\r\n # print(\"\\tX shape =\", X.shape)\r\n # yield X, Y\r", "def batches(set_name):\n global num_batches, args, ds_sizes \n # num_batches = how many batches in each dataset(train, valid, test)\n # ds_sizes = dataset_sizes \n for b in range(num_batches[set_name]):\n bi = b * args.batch_size # one batch mul batch_size \n bj = (b + 1) * args.batch_size \n if b == num_batches[set_name] - 1:\n bj = ds_sizes[set_name] # maybe only remainer set\n yield bi, bj", "def get_evaluate_batches(data_dir='/home/yunhan/batchified'):\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 54\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def get_data_generator(train_data, validation_data):\n\n def batch_generator(mode=\"train\", batch_size=100):\n assert mode in [\"train\", \"val\"], \"The mode should be in {train, val}.\"\n if mode == \"train\":\n data = train_data.copy()\n elif mode == \"val\":\n data = validation_data.copy()\n\n while True:\n indices = np.random.permutation(np.arange(len(data)))\n data = data[indices]\n\n for i in range(len(data) // batch_size):\n yield data[i * batch_size:(i + 1) * batch_size]\n\n return batch_generator", "def __iter__(self):\n batch = []\n for sample in self.dataset:\n batch.append(sample)\n if len(batch) == self.size:\n yield self.transform(batch)\n batch = []\n if batch:\n # the last batch may be less then batch size.\n yield self.transform(batch)", "def load_array(data_arrays, batch_size, is_train=True): #@save\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset", "def load_array(data_arrays, batch_size, is_train=True): #@save\n dataset = tf.data.Dataset.from_tensor_slices(data_arrays)\n if is_train:\n dataset = dataset.shuffle(buffer_size=1000)\n dataset = dataset.batch(batch_size)\n return dataset", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def load_eval_dataset(data_dir):\n\tball_images = load_ball_images_to_memory(data_dir)\n\tgen = functools.partial(data_generator, data_dir, ball_images)\n\treturn tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))", "def get_loader(self, batch_size=1, num_threads=3):\n\n gen_func, gen_types, gen_shapes = self.get_batch_gen(\n self, self.steps_per_epoch, batch_size)\n\n loader = tf.data.Dataset.from_generator(gen_func, gen_types, gen_shapes)\n\n loader = loader.map(map_func=self.transform,\n num_parallel_calls=num_threads)\n\n if ('batcher' not in self.model_cfg.keys() or\n self.model_cfg.batcher == 'DefaultBatcher'):\n loader = loader.batch(batch_size)\n\n length = len(self.dataset) / batch_size + 1 if len(\n self.dataset) % batch_size else len(self.dataset) / batch_size\n length = length if self.steps_per_epoch is None else self.steps_per_epoch\n\n return loader, int(length)", "def prepare_epoch(dataset):\n print(\"[-] Epoch Start\")\n\n i = 0\n for sample in range(len(dataset)):\n if sample <= i + BATCH_SIZE-1:\n continue\n\n batch = []\n for i in range(i, i+BATCH_SIZE):\n batch.append(get_image(dataset[i], OUT_SIZE, CHANNELS))\n\n i += BATCH_SIZE + 1\n\n batch_images = np.array(batch).astype(np.float32)\n yield (batch_images, batch_images)\n print(\"i: {}, s: {}\".format(i, sample))\n\n print(\"[+] Epoch complete\")", "def nn_batch_generator(self, x_train):\n # Shuffle the batch\n np.random.seed(self.seed)\n shuffle_index = np.arange(np.shape(x_train)[0])\n np.random.shuffle(shuffle_index)\n x = x_train[shuffle_index, :]\n y = x_train[shuffle_index, :]\n\n # Iterate until making a full epoch\n counter = 0\n while 1:\n index_batch = shuffle_index[\n self.batch_size * counter : self.batch_size * (counter + 1)\n ]\n # Decompress batch\n x_batch = x[index_batch, :]\n y_batch = y[index_batch, :]\n counter += 1\n yield (np.array(x_batch), np.array(y_batch))\n\n # Stopping rule\n if counter >= self.number_of_batches:\n counter = 0", "def loadInMemoryOneBatch(fileName,batchSize):\n\n inputFile = open(fileName)\n\n while True:\n objects = []\n allDone = False\n while True:\n line = inputFile.readline()\n if line:\n objects.append(line)\n if len(objects) == batchSize:\n break\n else:\n allDone = True\n break\n yield objects\n if allDone == True:\n break", "def _make_dataset_iterator(self, dataset):\n # Note that split_batch_by argument is not passed because it is always 1 in\n # this strategy, and adding it adds unnecessary overhead to the dataset.\n return input_lib_v1.DatasetIterator(dataset, self._input_workers,\n self._container_strategy())", "def Batch(dataset, batch_size, drop_last=False):\n\n return dataset.batch(batch_size=batch_size, drop_remainder=drop_last)", "def Pylearn2OldGenerator(dataset,\n batch_size,\n mode='shuffled_sequential',\n num_batches=-1):\n if num_batches == -1:\n num_batches = dataset.get_num_examples()/batch_size\n for b in dataset.iterator(\n mode,\n batch_size,\n num_batches):\n yield b", "def batch_generator(batch_size, file, dataset, indices, labels=\"labels\"):\n sample_size = len(indices)\n n_batches = int(sample_size/batch_size)\n h5f = h5py.File(file,'r')\n instarget = Target('AAAAAA')\n aa_to_int = instarget.predefining_dict()\n while True: \n for i in range(n_batches):\n if i == n_batches:\n batch_samples = h5f[dataset][i*batch_size:sample_size]\n seqs_onehot = instarget.int_to_onehot(list(batch_samples), len(aa_to_int))\n batch_y = h5f[labels][i*batch_size:sample_size]\n else:\n batch_samples = h5f[dataset][i*batch_size:i*batch_size+batch_size]\n seqs_onehot = instarget.int_to_onehot(list(batch_samples), len(aa_to_int))\n batch_y = h5f[labels][i*batch_size:i*batch_size+batch_size]\n yield (seqs_onehot, batch_y)", "def gen_batches(data, batch_size=8, randomize=False):\n indices = list(range(len(data)))\n targets = [randint(0, N_CLASSES - 1) for _ in indices] # random labels\n if randomize:\n shuffle(indices)\n\n for start in range(0, len(data), batch_size):\n labels = np.array(targets[start:start + batch_size])\n yield (pad_sequences(data[indices[start:start + batch_size]]),\n labels, labels)", "def batch_generator(data_frame_encoded):\n labels = data_frame_encoded[-1]\n # data = np.delete(data_frame_encoded, -1, axis=0)\n data = data_frame_encoded[:-1]\n\n num_features = len(data)\n num_batches = len(data[0])\n for i in range(num_batches):\n batch_compiled = []\n for j in range(num_features):\n if type(data[j][i]) is np.ndarray:\n batch_compiled.extend(data[j][i])\n else:\n batch_compiled.extend([data[j][i]])\n yield batch_compiled, labels[i]", "def load_test_batch(self, image_sequence_names):\n def _parse_test_img(img_path):\n with tf.device('/cpu:0'):\n img_buffer = tf.read_file(img_path)\n image_decoded = tf.image.decode_jpeg(img_buffer)\n return image_decoded\n\n image_dataset = tf.data.Dataset.from_tensor_slices(image_sequence_names).map(\n _parse_test_img).batch(self.batch_size).prefetch(self.batch_size*4)\n iterator = image_dataset.make_initializable_iterator()\n return iterator", "def data_loader(origin_data, batch_size, num_epochs=1):\n data = {}\n for key, value in origin_data.items():\n data[key] = np.copy(value)\n\n data_size = len(data['text_len'])\n num_batches_per_epoch = int((data_size-1)/batch_size) + 1\n\n for epoch in range(num_epochs):\n # shuffle the dataset at the begging of each epoch\n shuffle_indices = np.random.permutation(np.arange(data_size))\n for key, value in data.items():\n data[key] = value[shuffle_indices]\n\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n\n max_text_len = max(data['text_len'][start_index:end_index])\n\n yield (data['text'][start_index:end_index, :max_text_len],\n data['text_len'][start_index:end_index],\n data['label'][start_index:end_index],\n data['raw'][start_index:end_index])", "def train(self, num_batches: int):", "def batch_generator(batch_size, data, labels=None):\n n_batches = int(np.ceil(len(data) / float(batch_size)))\n idx = np.random.permutation(len(data))\n data_shuffled = data[idx]\n if labels is not None:\n labels_shuffled = labels[idx]\n for i in range(n_batches):\n start = i * batch_size\n end = start + batch_size\n if labels is not None:\n yield data_shuffled[start:end, :], labels_shuffled[start:end]\n else:\n yield data_shuffled[start:end, :]", "def data_generation(imgs, labs, batch, validataion):\n\n # Initialization\n batch_images = np.empty((batch, imgs[0].shape[0], imgs[0].shape[1], imgs[0].shape[2]))\n batch_labels = np.empty((batch, 1))\n # Generate data\n while True: # loop forever\n for x in range(batch):\n rand = random.randint(0, len(labs)-1)\n if validataion:\n # Store un-altered image and measurement\n batch_images[x] = imgs[rand]\n batch_labels[x] = labs[rand]\n else:\n # Store new image and adjusted measurement\n batch_images[x], batch_labels[x] = transform_image(imgs[rand], labs[rand])\n yield batch_images, batch_labels", "def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def create_dataset_iterator(pathes, batch_size=64):\n path_ds = tf.data.Dataset.from_tensor_slices(pathes)\n image_ds = path_ds.map(\n load_and_preprocess_image, num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n dataset = image_ds.cache() # Especially performant if the data fits in memory.\n dataset = dataset.shuffle(buffer_size=len(pathes))\n dataset = dataset.repeat()\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n return tf.compat.v1.data.make_one_shot_iterator(dataset)", "def get_one_shot_iterator(self):\n\n files = self._get_all_files()\n\n dataset = (\n tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers)\n .map(self._parse_function, num_parallel_calls=self.num_readers)\n .map(self._preprocess_image, num_parallel_calls=self.num_readers))\n\n if self.should_shuffle:\n dataset = dataset.shuffle(buffer_size=100)\n\n if self.should_repeat:\n dataset = dataset.repeat() # Repeat forever for training.\n else:\n dataset = dataset.repeat(1)\n\n dataset = dataset.batch(self.batch_size).prefetch(self.batch_size)\n return dataset.make_one_shot_iterator()", "def generate(self, labels, list_IDs, n_classes):\n # Infinite loop\n while 1:\n # Generate order of exploration of dataset\n indexes = self.__get_exploration_order(list_IDs)\n\n # Generate batches\n imax = int(len(indexes)/self.batch_size)\n for i in range(imax):\n # Find list of IDs\n list_IDs_temp = [list_IDs[k] for k in indexes[i*self.batch_size:(i+1)*self.batch_size]]\n print(\"Producing\")\n #print(list_IDs_temp)\n # Generate data\n X, y = self.__data_generation(labels, list_IDs_temp, n_classes)\n # print(X.shape)\n # print(y.shape)\n #print(\"Target Label\")\n #print(y)\n gc.collect()\n yield X, y", "def generator(data_dir, samples, batch_size=32):\n num_samples = len(samples)\n while 1:\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n measurements = []\n for batch_sample in batch_samples:\n filename = csv_log_to_image_filename(data_dir,\n batch_sample[0])\n image = cv2.imread(filename)\n if image is not None:\n images.append(image)\n measurements.append(batch_sample[1])\n else:\n print(\"File \" + filename + \" is missing.\")\n\n X_data = np.array(images)\n y_data = np.array(measurements)\n yield sklearn.utils.shuffle(X_data, y_data)", "def get_data(self):\n if self.with_encoder:\n for i in count():\n batchdata = pd.read_csv(SEQUENTIAL_TRAIN_PATH,\n nrows=GAN_BATCH_SIZE,\n skiprows=i * GAN_BATCH_SIZE + 1,\n names=SEQUENTIAL_COLUMN_NAMES.keys(),\n dtype=SEQUENTIAL_COLUMN_NAMES)\n if len(batchdata) < GAN_BATCH_SIZE:\n yield None\n batchdata = batchdata['seq_contents'].values\n yield get_data_for_lstm_ae(batchdata)\n else:\n # shuffles data\n self.encoded_data = self.encoded_data[np.random.permutation(self.encoded_data.shape[0])]\n for i in count():\n result = self.encoded_data[i*GAN_BATCH_SIZE:(i+1)*GAN_BATCH_SIZE,:]\n if result.shape[0] < GAN_BATCH_SIZE:\n yield None\n yield result", "def get_generator(self, dataset, batchsize, shuffle = False):\n\t\tself.load_select_if_necessary((\"X\", \"Y\"))\n\t\trandom_state = np.random.RandomState(0)\n\n\t\twhile True:\n\t\t\tindices = list(range(len(self.X[dataset])))\n\t\t\tif shuffle:\n\t\t\t\trandom_state.shuffle(indices)\n\n\t\t\tX = [self.X[dataset][idx] for idx in indices]\n\t\t\tY = [self.Y[dataset][idx] for idx in indices]\n\n\t\t\tfor idx in range(0, len(X), batchsize):\n\t\t\t\tbatch_X = X[idx:min(idx + batchsize, len(X))]\n\t\t\t\tbatch_Y = Y[idx:min(idx + batchsize, len(X))]\n\t\t\t\tbatch_X = np.array(self.trim_and_pad_batch(batch_X))\n\n\t\t\t\tyield(batch_X, np.array(batch_Y))", "def _defineBatches(self):\n # extract all ids\n all_keys = list(self.data_dict.unique_ids)\n\n # randomly shuffle keys\n if self.random_shuffle_batches:\n random.shuffle(all_keys)\n\n # create batches based on number of batches\n if self.n_big_batches is not None:\n self.n_big_batches += 1\n # define cuts for batches\n cuts = np.linspace(0, self.n_observations,\n self.n_big_batches).round()\n # create batches based on batch size\n elif self.batch_size is not None:\n cuts = [x for x in range(0, self.n_observations,\n int(self.batch_size))]\n if cuts[-1] < self.n_observations:\n cuts.append(self.n_observations)\n\n # convert batch sizes to integers\n cuts = [int(x) for x in cuts]\n\n # save batches into dictionary\n batches = dict()\n for i in range(0, (len(cuts) - 1)):\n # create DataBatch object\n current_batch = DataBatch(ids=all_keys[cuts[i]:cuts[i+1]],\n batch_id=i)\n current_batch.setDiskStoragePath(self.disk_scratch)\n batches[i] = current_batch\n\n # save batches\n self.n_batches = len(batches.keys())\n self.batches = batches", "def RandomDataloader(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = torch.from_numpy(seq)\n\n # The input includes an additional channel used for the delimiter\n inp = torch.zeros(seq_len + 1, batch_size, seq_width + 1)\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield inp.float(), outp.float()", "def build_train_generator(X: numpy.array, y: numpy.array,\n batch_size: int = 500) -> Iterable[Tuple[numpy.array]]:\n assert X.shape[0] == y.shape[0], \"Number of samples mismatch in X and y.\"\n\n def xy_generator():\n while True:\n n_batches = X.shape[0] // batch_size\n if n_batches * batch_size < X.shape[0]:\n n_batches += 1 # to yield last samples\n for i in range(n_batches):\n start = i * batch_size\n end = min((i + 1) * batch_size, X.shape[0])\n yield X[start:end], y[start:end]\n return xy_generator()", "def generator(self, batch = 1):\n data = self.create_df(self.file_path)\n video_files = self.get_video_files(self.file_path, self.directory)\n return self._generator(data, directory = self.directory, video_files = video_files, BATCH_SIZE = batch)", "def batches(self):\n return [self.get_batch(i) for i in range(self.num_batches)]", "def batch_fit(self, data_loader: torch.utils.data.DataLoader, epochs: int):\n pass", "def get_train_batch_generator(self, size):\n self.shuffle_train()\n while self.train_position + size < len(self.train):\n yield self.unzip_batch(self.train[self.train_position:self.train_position + size])\n self.train_position = self.train_position + size", "def generate_batch(self, batch_size=8, shuffle=True):\n if self._contour_dicom_folder:\n contour_files = glob(os.path.join(self._contour_dicom_folder, \"*.h5\"))\n if shuffle:\n contour_files = np.random.permutation(contour_files)\n contours_generator = self._contour_folder_gen(contour_files)\n else:\n contours_generator = self._contour_dicom_generator\n\n x_batch, y_batch, sources_batch = [], [], []\n batch_idx = 0\n for idx, (dataset, sources) in enumerate(contours_generator):\n if batch_idx > 0 and batch_idx % batch_size == 0:\n if self._include_sources:\n yield sources_batch, np.array(x_batch), np.array(y_batch)\n else:\n yield np.array(x_batch), np.array(y_batch)\n x_batch, y_batch, sources_batch = [], [], []\n batch_idx = 0\n try:\n x_data = self._parse_channels(dataset, self.x_channels)\n y_data = self._parse_channels(dataset, self.y_channels)\n x_batch.append(x_data)\n y_batch.append(y_data)\n sources_batch.append(sources)\n batch_idx += 1\n except ValueError:\n # Log Error\n err_msg = \"Missing all channels in {}\".format(sources[\"filename\"])\n self._log_error(err_msg)\n\n if self._include_sources:\n yield sources_batch, np.array(x_batch), np.array(y_batch)\n else:\n yield np.array(x_batch), np.array(y_batch)", "def as_generator(self, shuffle=False, n_workers=0):\n\n data_loader = DataLoader(\n dataset=self, shuffle=shuffle, num_workers=n_workers\n )\n for sample in cycle(data_loader):\n sample_batch_dim_removed = {}\n for key, val in sample.items():\n sample_batch_dim_removed[key] = val[0]\n yield sample_batch_dim_removed", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def gen_batches(self, batch_size: int, data_type: str = 'train',\n shuffle: bool = None, reset: bool = None) -> Generator:\n if shuffle is None:\n shuffle = self.shuffle\n\n data = self.data[data_type]\n cursors = self._cursors[data_type]\n last_batches = self._last_batches[data_type]\n data_len = len(data)\n\n if data_len == 0:\n return\n if batch_size != self._old_batch_size:\n pass\n\n order = list(range(data_len))\n if shuffle:\n self.random.shuffle(order)\n\n if batch_size < 0:\n batch_size = data_len\n\n for i in range((data_len - 1) // batch_size + 1):\n yield tuple(zip(*[data[o] for o in order[i * batch_size:(i + 1) * batch_size]]))", "def batches(data, batch_size) -> list:\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv", "def get_batch(self, all_samples, all_labels, batch_size):\n\n # Create a Tensor dataset object for the samples and labels\n samples_dataset = tf.data.Dataset.from_tensor_slices(all_samples)\n labels_dataset = tf.data.Dataset.from_tensor_slices(all_labels)\n\n # Combine the samples dataset with the labels dataset\n combined_dataset = tf.data.Dataset.zip((samples_dataset, labels_dataset))\n\n # Prevent that you run out of samples by repeating the dataset once\n combined_dataset = combined_dataset.repeat()\n\n # Shuffle the data\n combined_dataset = combined_dataset.shuffle(batch_size)\n\n # Create batches of your dataset\n combined_dataset = combined_dataset.batch(batch_size)\n\n # Initialize the dataset for TensorFlow\n iterator = combined_dataset.make_initializable_iterator()\n\n # Get the batch samples and labels operations\n batch_samples, batch_labels = iterator.get_next()\n\n # Convert the samples and labels to type float32 to use them in the convolutional layer\n batch_samples = tf.cast(batch_samples, tf.float32)\n batch_labels = tf.cast(batch_labels, tf.float32)\n\n # Make the iterator object global to initialize it from another function\n self.iter_initializer = iterator.initializer\n\n return batch_samples, batch_labels" ]
[ "0.74145716", "0.72911185", "0.7193387", "0.70773464", "0.70638293", "0.7023753", "0.6965977", "0.69633365", "0.6931853", "0.69255435", "0.6894077", "0.6890785", "0.6880416", "0.6874165", "0.68381536", "0.6833069", "0.6754379", "0.6751766", "0.6733288", "0.6700763", "0.66704863", "0.66574925", "0.66537786", "0.66481775", "0.66441476", "0.66417843", "0.66359687", "0.6619923", "0.6613091", "0.65967", "0.658392", "0.65794414", "0.65683335", "0.6561383", "0.6561037", "0.65585345", "0.655376", "0.6543893", "0.65434515", "0.65378904", "0.653322", "0.65151346", "0.65040886", "0.64957047", "0.64853126", "0.6482821", "0.64631253", "0.64620495", "0.6452501", "0.6447949", "0.6445401", "0.6443651", "0.64336246", "0.64260435", "0.64156455", "0.641289", "0.64036995", "0.63962203", "0.639507", "0.6393098", "0.63830113", "0.6376943", "0.6353344", "0.6353344", "0.6350988", "0.6348943", "0.63454396", "0.6338203", "0.6332665", "0.6331951", "0.6327284", "0.6326342", "0.6324964", "0.6321072", "0.63170886", "0.6306368", "0.63019854", "0.62976277", "0.6286119", "0.6284171", "0.6278133", "0.62716585", "0.6270114", "0.62685496", "0.62659264", "0.6262985", "0.6257867", "0.62512577", "0.62461483", "0.62365425", "0.623635", "0.6235493", "0.62259424", "0.62208843", "0.6219561", "0.6210949", "0.6202717", "0.6202665", "0.61984503", "0.61912125", "0.6190074" ]
0.0
-1
Normalize the image to zero mean and unit variance.
def img_normalize(image, label): mean, std = ds_stats image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype) image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype) return image, label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def normalize(image):\n image = image.astype(np.float32)\n mean = np.mean(image)\n std = np.std(image)\n if std > 0:\n ret = (image - mean) / std\n else:\n ret = image * 0.\n return ret", "def normalize_img(img):\n channel_mean = img.mean(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n channel_std = img.std(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n return (img - channel_mean) / channel_std", "def imgNormalize(img): \n constant = np.sum(sitk.GetArrayFromImage(img))*np.prod(img.GetSpacing())\n return img/constant", "def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale", "def normalize(img, mean, std, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n mean = paddle.to_tensor(mean, place=img.place)\n std = paddle.to_tensor(std, place=img.place)\n\n if _is_channel_first(data_format):\n mean = mean.reshape([-1, 1, 1])\n std = std.reshape([-1, 1, 1])\n\n return (img - mean) / std", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalization_func(img):\n vmin, vmax = img.min(), img.max()\n if vmin != vmax:\n im = (img - vmin) / (vmax - vmin)\n else:\n im = np.ones(img.shape)\n return im", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def normalize(self):\n self._data /= self.norm()", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def image_normalize(im, axis=(0, 1), c=1e-8):\n return (im - im.mean(axis)) / (im.std(axis) + c)", "def standardize(image, mean=[0.48462227599918, 0.45624044862054, 0.40588363755159], std=[0.22889466674951, 0.22446679341259, 0.22495548344775]):\n image = image.astype(np.float32) / 255.0\n image = np.divide(np.subtract(image, mean), std)\n return image", "def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)", "def normalize(self):\n self.image = rescale_intensity(self.image, out_range=(0, 255))", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def normalize(image):\n min = np.min(image)\n max = np.max(image)\n normalImg = 255*(image - min) / (max - min)\n return normalImg", "def standardize(self, x):\n if not self.image_resample:\n x = to_shape(x, self.image_shape, constant_values=-1024)\n elif self.image_resample:\n x = resample(x, self.image_shape)\n\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.voxelwise_normalization:\n if self.voxel_bounds is not None:\n x = voxelwise_normalize(x, self.voxel_bounds)\n if self.voxelwise_center:\n if self.voxel_mean is not None:\n x -= self.voxel_mean\n if self.voxelwise_std_normalization:\n x /= (self.voxelwise_std + 1e-7)\n if self.samplewise_center:\n x -= np.mean(x, axis=self.channel_axis, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=self.channel_axis, keepdims=True) + 1e-7)\n return x", "def normalize(self, image, transpose=False, data_type=None):\n return normalize(image, self.mean, self.std, transpose)", "def normalize(img):\r\n return ((img / 255.0) - 0.5) / 0.5", "def normalize(image, label):\n image -= settings.DATASET_MEAN\n image /= settings.DATASET_STD\n\n return image, label", "def normalize(img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img / 255", "def normalize(image, xbar, sigma):\n image = image.transpose(2, 0, 1) # Switch to channel-first\n mean, std = np.array(xbar), np.array(sigma)\n image = (image - mean[:, None, None]) / std[:, None, None]\n return image.transpose(1, 2, 0)", "def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')", "def norm_img(img):\n img_arr = np.array(img).astype(float)\n max_val = np.amax(img_arr)\n if max_val > 0:\n img_arr /= max_val\n return img_arr", "def normalization_brain(img, mask):\n zone1 = img[mask != 0]\n imge = img.copy()\n imge[mask != 0] = (zone1 - zone1.min()) / (zone1.max() - zone1.min())\n imge[mask == 0] = 0\n return imge", "def normalize(im: np.ndarray) -> np.ndarray:\n im = im.astype(np.float32)\n return (im - im.min()) / (im.max() - im.min())", "def normalize_image(image, mean=(0.485, 0.456, 0.406), var=(0.229, 0.224, 0.225)):\n with tf.name_scope('NormalizeImage', values=[image]):\n image = tf.to_float(image)\n image /= 255.0\n\n image -= mean\n image /= var\n\n return image", "def imnormalize_tensor(self, img, mean, std, to_rgb=True):\n mean = np.float32(mean.reshape(1, -1))\n stdinv = 1 / np.float32(std.reshape(1, -1))\n if to_rgb:\n img = img[:, :, [2, 1, 0]]\n img = torch.sub(img, torch.tensor(mean).cuda())\n img = torch.mul(img, torch.tensor(stdinv).cuda())\n return img", "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def _normalise(self):\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n", "def normalize(X):\n # z-score\n mean = np.mean(X, axis=(0, 1, 2, 3))\n std = np.std(X, axis=(0, 1, 2, 3))\n # avoid dividing zero by adding a very small number\n X = (X - mean) / (std + 1e-7)\n\n return X", "def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01", "def normalize_image(img):\n min_, max_ = float(np.min(img)), float(np.max(img))\n return (img - min_) / (max_ - min_)", "def normalize(self) -> NoReturn:\n self._ionic_fractions = self._ionic_fractions / np.sum(self._ionic_fractions)", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std", "def normalize(img):\n # TODO: implement this function.\n min_img = min([min(i) for i in img])\n max_img = max([max(i) for i in img])\n\n for i in range(len(img)):\n \tfor j in range(len(img[0])):\n \t\timg[i][j] = ((img[i][j] - min_img) / (max_img - min_img))\n #raise NotImplementedError\n return img", "def normalize(tensor: np.ndarray):\n if len(tensor.shape) < 4:\n tensor = np.expand_dims(tensor, axis=2)\n mean = np.array([tensor[..., chn, :].mean() for chn in range(tensor.shape[2])])\n std = np.array([tensor[..., chn, :].std() for chn in range(tensor.shape[2])])\n return (tensor - mean[:, np.newaxis]) / std[:, np.newaxis]", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def normalization(image):\n return (image - np.min(image)) / (np.max(image) - np.min(image))", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n # random normal too slow\n #out_random = np.random.normal(0, 1, size = volume.shape)\n out_random = np.zeros(volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def unnormalize(self, image, transpose=False):\n return unnormalize(image, self.mean, self.std, transpose)", "def standardize(image):\n \n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n \n # initialize to array of zeros, with same shape as the image\n standardized_image = np.zeros(image.shape)\n\n # iterate over channels\n for c in range(image.shape[0]):\n # iterate over the `z` dimension\n for z in range(image.shape[3]):\n # get a slice of the image \n # at channel c and z-th dimension `z`\n image_slice = image[c,:,:,z]\n\n # subtract the mean from image_slice\n centered = image_slice - np.mean(image_slice)\n \n # divide by the standard deviation (only if it is different from zero)\n centered_scaled = centered / np.std(centered)\n\n # update the slice of standardized image\n # with the scaled centered and scaled image\n standardized_image[c, :, :, z] = centered_scaled\n\n ### END CODE HERE ###\n\n return standardized_image", "def normalize(data):\n\n p_means = np.mean(data,axis=0)\n p_vars = np.var(data,axis=0)\n\n # subtract dc component\n data = data-p_means\n\n # contrast normalize \n data = data/np.sqrt(p_vars+10) # plus 10 to account for small variances\n \n return data", "def normalise(dataset):\n # Scale images to the [0, 1] range\n dataset = dataset.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n return np.expand_dims(dataset, -1)", "def _compute_normalization(self, normalize=True):\n self._normalization_constant = 1.0 / self._normalization_correction\n\n if normalize:\n # compute normalization constant so that\n # N*C*sum(data) = 1:\n if self._img_norm is None:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._normalization_constant /= self._img_norm\n self._normalization_status = 0\n\n else:\n self._normalization_constant = 1.0\n self._normalization_status = 1\n warnings.warn(\"Overflow encountered while computing \"\n \"normalization constant. Normalization \"\n \"constant will be set to 1.\", NonNormalizable)\n\n else:\n self._normalization_status = 2", "def un_normalize(tensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n mean = torch.FloatTensor(mean).view(1,3,1,1)\n std = torch.FloatTensor(std).view(1,3,1,1)\n \n image = tensor.cpu().detach()\n image = image*std+mean\n image = image.numpy()\n \n image = np.transpose(image, (0,2,3,1))\n \n #print(np.max(image))\n #print(np.min(image))\n return image", "def normalize(self, X):\n return X - X.mean()", "def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data", "def normalize(tensor, mean, std):\n if not _is_tensor_image(tensor):\n raise TypeError('tensor is not a torch image.')\n # TODO: make efficient\n for t, m, s in zip(tensor, mean, std):\n t.sub_(m).div_(s)\n return tensor", "def normalize(tensor, mean, std):\n if not _is_tensor_image(tensor):\n raise TypeError('tensor is not a torch image.')\n # TODO: make efficient\n for t, m, s in zip(tensor, mean, std):\n t.sub_(m).div_(s)\n return tensor", "def normalization_stats(completeData):\n data_mean = np.mean(completeData, axis=0)\n data_std = np.std(completeData, axis=0)\n\n dimensions_to_ignore = []\n dimensions_to_use = []\n\n dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))\n dimensions_to_use.extend(list(np.where(data_std >= 1e-4)[0]))\n\n data_std[dimensions_to_ignore] = 1.0\n\n return data_mean, data_std, dimensions_to_ignore, dimensions_to_use", "def normalize(img):\r\n min = img.min()\r\n max = img.max()\r\n x = 2.0 * (img - min) / (max - min) - 1.0\r\n return x", "def standardize(X):\n mu = X.mean(axis=0, keepdims=True)\n s = X.std(axis=0, keepdims=True)\n return (X-mu)/s", "def normalize_transform():\n\n # Default for PyTorch's pre-trained models\n return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])", "def reshape_normalise(img):\n\t# The image shape is expected to match the input of VGG19\n\timg = np.resize(img, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\timg -= CONFIG.MEAN_PIXEL\n\treturn img", "def itensity_normalize_one_volume(volume):\n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.random.normal(0, 1, size = volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "def normalization(img):\n max_val = img.max()\n min_val = img.min()\n\n return ((img-min_val)*255)/(max_val-min_val)", "def normalize_std(img, eps=1e-10):\n with tf.name_scope('normalize'):\n std = tf.sqrt(tf.reduce_mean(tf.square(img)))\n return img/tf.maximum(std, eps)", "def normalise(image):", "def standardise(self):\n if self.vector.shape is ():\n return\n if self.dimensionality() != 1:\n # TODO: implement\n raise NotImplementedError\n max_value = 1.0 * max(self.vector)\n if max_value == 0.0:\n # Nothing to do\n return\n self.vector = self.vector.astype('float64') / max_value", "def normalize(X, Y=None):\r\n # # It would be possible to normalize with last rather than mean, such as:\r\n # lasts = np.expand_dims(X[:, -1, :], axis=1)\r\n # assert (lasts[:, :] == X[:, -1, :]).all(), \"{}, {}, {}. {}\".format(lasts[:, :].shape, X[:, -1, :].shape, lasts[:, :], X[:, -1, :])\r\n mean = np.expand_dims(np.average(X, axis=1) + 0.00001, axis=1)\r\n stddev = np.expand_dims(np.std(X, axis=1) + 0.00001, axis=1)\r\n # print (mean.shape, stddev.shape)\r\n # print (X.shape, Y.shape)\r\n X = X - mean\r\n X = X / (2.5 * stddev)\r\n if Y is not None:\r\n #assert Y.shape == X.shape, (Y.shape, X.shape)\r\n Y = Y - mean\r\n Y = Y / (2.5 * stddev)\r\n return X, Y\r\n return X", "def normalization_test(x_test, meanV, stdV): \n eps = np.finfo(float).eps \n x_test_post = (x_test - meanV)/(stdV + eps) \n \n return x_test_post", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize(img):\n norm = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_32F, 1)\n cvCopy(img, norm)\n cvNormalize(norm, norm, 1, 0, CV_MINMAX)\n norm_u = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_8U, 1)\n cvConvertScale(norm, norm_u, 255)\n return norm_u", "def normalize(self, mean=None, std=None):\n if mean is None:\n mean = self.mean\n if std is None:\n std = self.std\n\n new = self.copy()\n new.data = (new.data - mean) / std\n return new", "def preprocess(img):\n \n scaler=StandardScaler() ## scaler object to perform preprocessing\n img=scaler.fit_transform(img) ## zero-center and normalize\n \n return img", "def center_normalize(x):\n return (x - K.mean(x)) / K.std(x)", "def center_normalize(x):\n return (x - K.mean(x)) / K.std(x)", "def normalize(self) -> \"CharacterizationPixel\":\n return replace(\n self,\n data=self.data/self.norm,\n mean=self.mean/self.norm,\n norm=np.ones_like(self.norm),\n )", "def normalize_mean(dataset):\n normalized_dataset = np.array(dataset)\n return normalized_dataset - np.mean(normalized_dataset)", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output", "def _normalize_image(self, img: np.ndarray) -> np.ndarray:\n i2 = img.astype(float) - self.bg\n i2 /= i2.max()\n return i2", "def unnormalize(tensor, mean, std, inplace: bool = False) :\n if not isinstance(tensor, torch.Tensor):\n raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))\n\n if tensor.ndim < 3:\n raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '\n '{}.'.format(tensor.size()))\n\n if not inplace:\n tensor = tensor.clone()\n\n dtype = tensor.dtype\n mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)\n std = torch.as_tensor(std, dtype=dtype, device=tensor.device)\n if (std == 0).any():\n raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))\n if mean.ndim == 1:\n mean = mean.view(-1, 1, 1)\n if std.ndim == 1:\n std = std.view(-1, 1, 1)\n tensor.mul_(std).add_(mean)\n return tensor", "def turn_intensity_normalization_on(self):\n self.intensity_normalize_image = True", "def _normalize(image):\n return tf.multiply(tf.subtract(image, 0.5), 2.0)", "def turn_intensity_normalization_off(self):\n self.intensity_normalize_image = False", "def normalize(self):\n if self.normed:\n return\n self._normalize()", "def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x", "def denormalize(img, means, stds, resize_to_original=False):\n\n img = np.moveaxis(img, 0, 2)\n img = img*stds + means\n img = np.clip(img, 0, 255).astype('uint8')\n\n if resize_to_original:\n # revert def preprocess_image()\n img = img[:,(img_w//4): (img_w - img_w//4),:]\n img = cv2.copyMakeBorder( img, img.shape[0], 0,0,0, cv2.BORDER_CONSTANT) #, borderType)\n img = cv2.resize(img, (img_orig_w, img_orig_h))\n \n return img", "def normalize(arr: np.ndarray) -> np.ndarray:\n if max(arr) - min(arr) == 0:\n logger.warning(\n \"Normalize averted a div/0, the input data was:\\n {0}\".format(arr)\n )\n return np.ones(len(arr))\n return (arr - min(arr)) / (max(arr) - min(arr))", "def normal_init(m, mean, std):\n if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):\n m.weight.data.normal_(mean, std)\n m.bias.data.zero_()", "def preprocess(self, img):\n return img - np.mean(img)", "def unNormalizeData(normalizedData, data_mean, data_std, dimensions_to_ignore):\n T = normalizedData.shape[0]\n D = data_mean.shape[0]\n\n origData = np.zeros((T, D), dtype=np.float32)\n dimensions_to_use = []\n for i in range(D):\n if i in dimensions_to_ignore:\n continue\n dimensions_to_use.append(i)\n dimensions_to_use = np.array(dimensions_to_use)\n\n origData[:, dimensions_to_use] = normalizedData\n\n # potentially ineficient, but only done once per experimentdata_conversions\n stdMat = data_std.reshape((1, D))\n stdMat = np.repeat(stdMat, T, axis=0)\n meanMat = data_mean.reshape((1, D))\n meanMat = np.repeat(meanMat, T, axis=0)\n origData = np.multiply(origData, stdMat) + meanMat\n return origData" ]
[ "0.85018736", "0.77996475", "0.77996475", "0.7690731", "0.76031065", "0.7495071", "0.74933225", "0.7450734", "0.742322", "0.742322", "0.7292144", "0.72221386", "0.71879476", "0.717902", "0.71174324", "0.71018165", "0.709455", "0.709455", "0.7082818", "0.7072676", "0.70667064", "0.70541835", "0.7053812", "0.70454556", "0.70426357", "0.6993511", "0.6957666", "0.69117194", "0.69049627", "0.6892117", "0.6885972", "0.6870124", "0.6860137", "0.68559885", "0.68551356", "0.68473995", "0.6841504", "0.6840989", "0.6825759", "0.68123347", "0.6807944", "0.6805895", "0.67972416", "0.6786311", "0.67685115", "0.6752539", "0.6752514", "0.6751734", "0.6751582", "0.6749783", "0.6748619", "0.6724984", "0.67054814", "0.67039657", "0.6698907", "0.66935873", "0.6691806", "0.6687432", "0.6685552", "0.6684338", "0.6673765", "0.667355", "0.66553867", "0.66419244", "0.66392213", "0.66392213", "0.6623516", "0.66147655", "0.6602378", "0.65976065", "0.65776145", "0.65683556", "0.656803", "0.65620273", "0.6561842", "0.65607536", "0.65525997", "0.65512407", "0.6546082", "0.6542514", "0.6534144", "0.65323144", "0.6520997", "0.6520997", "0.6509194", "0.6503891", "0.6501746", "0.64900696", "0.64883435", "0.64806414", "0.64691", "0.6468318", "0.6464816", "0.64571035", "0.64564383", "0.6456037", "0.6451426", "0.6440006", "0.6430807", "0.64161456" ]
0.7024483
25
Load a UCI dataset from an npz file. Ported from
def load_uci_regression_dataset(name, split_seed, train_fraction=0.9, data_dir="uci_datasets"): path = os.path.join(data_dir, _UCI_REGRESSION_FILENAMES[UCIRegressionDatasets(name)]) data_arr = onp.load(path) x, y = data_arr["x"], data_arr["y"] indices = jax.random.permutation(jax.random.PRNGKey(split_seed), len(x)) indices = onp.asarray(indices) x, y = x[indices], y[indices] n_train = int(train_fraction * len(x)) x_train, y_train = x[:n_train], y[:n_train] x_test, y_test = x[n_train:], y[n_train:] def normalize_with_stats(arr, arr_mean=None, arr_std=None): return (arr - arr_mean) / arr_std def normalize(arr): eps = 1e-6 arr_mean = arr.mean(axis=0, keepdims=True) arr_std = arr.std(axis=0, keepdims=True) + eps return normalize_with_stats(arr, arr_mean, arr_std), arr_mean, arr_std x_train, x_mean, x_std = normalize(x_train) y_train, y_mean, y_std = normalize(y_train) x_test = normalize_with_stats(x_test, x_mean, x_std) y_test = normalize_with_stats(y_test, y_mean, y_std) data_info = {"y_scale": float(y_std)} return (x_train, y_train), (x_test, y_test), data_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data_from_npz(filename):\n with np.load(filename) as f:\n data = np.zeros(f[\"shape\"], np.bool_)\n data[[x for x in f[\"nonzero\"]]] = True\n return data", "def load_npz(fd_or_filename: Union[str, io.IOBase], **kwargs):\n return np.load(fd_or_filename, **kwargs)", "def load_data(filename):\n return InferenceData.from_netcdf(filename)", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def load_rd_uv(in_file):\n uv_data = np.load(in_file)\n return uv_data['ucs'], uv_data['vcs']", "def loadz(file):\n y = np.load(file)\n return coo_matrix((y['data'],(y['row'],y['col'])),shape=y['shape'])", "def load_ismrmrd_ifft3d_reconstruction(filename):\n\n if not os.path.isfile(filename):\n print(\"%s is not a valid file\" % filename)\n raise SystemExit\n dset = ismrmrd.Dataset(filename, 'dataset', create_if_needed=False)\n\n #Read some fields from the XML header\n hdr = ismrmrd.xsd.CreateFromDocument(dset.read_xml_header())\n #get encoding and reconstruction information\n enc = hdr.encoding[0]\n # Matrix size\n eNx = enc.encodedSpace.matrixSize.x\n eNy = enc.encodedSpace.matrixSize.y\n eNz = enc.encodedSpace.matrixSize.z\n rNx = enc.reconSpace.matrixSize.x\n rNy = enc.reconSpace.matrixSize.y\n\n # Number of Slices, Reps, Contrasts, etc.\n #We have to wrap the following in a if/else because a valid xml header may\n #not have an entry for some of the parameters\n ncoils = hdr.acquisitionSystemInformation.receiverChannels\n if enc.encodingLimits.slice != None:\n nslices = enc.encodingLimits.slice.maximum + 1\n else:\n nslices = 1\n\n if enc.encodingLimits.repetition != None:\n nreps = enc.encodingLimits.repetition.maximum + 1\n else:\n nreps = 1\n\n if enc.encodingLimits.contrast != None:\n ncontrasts = enc.encodingLimits.contrast.maximum + 1\n else:\n ncontrasts = 1\n\n\n # Loop through the acquisitions looking for noise scans\n firstacq = 0\n for acqnum in range(dset.number_of_acquisitions()):\n acq = dset.read_acquisition(acqnum)\n\n # TODO: Currently ignoring noise scans\n if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT):\n print(\"Found noise scan at acq \", acqnum)\n continue\n else:\n firstacq = acqnum\n print(\"Imaging acquisition starts acq \", acqnum)\n break\n\n # Initialiaze a storage array\n all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64)\n\n # Loop through the rest of the acquisitions and stuff\n for acqnum in range(firstacq, dset.number_of_acquisitions()):\n acq = dset.read_acquisition(acqnum)\n head = acq.getHead()\n\n # TODO: this is where we would apply noise pre-whitening\n\n #padd if acquisition data is not complete (padding)\n if acq.data.shape[1]<eNx :\n x0=int((eNx - acq.data.shape[1]) / 2)\n zeros = np.zeros((acq.data.shape[0], x0))\n padded_acq_data = np.append(np.append(zeros, acq.data, axis=1), zeros, axis=1)\n acq.resize(eNx, acq.active_channels, acq.trajectory_dimensions)\n acq.data[:]=padded_acq_data\n\n # Remove oversampling if needed\n if eNx != rNx:\n #xline = transform.transform_kspace_to_image(acq.data, [1])\n xline = transform.transform_kspace_to_image(acq.data, dim=(1,), img_shape=(eNx,))\n x0 = int((eNx - rNx) / 2)\n x1 = int((eNx - rNx) / 2 + rNx)\n xline = xline[:, x0:x1]\n acq.resize(rNx, acq.active_channels, acq.trajectory_dimensions)\n acq.center_sample = int(rNx / 2)\n # need to use the [:] notation here to fill the data\n acq.data[:] = transform.transform_image_to_kspace(xline, dim=(1,), k_shape=(rNx,))\n\n # Stuff into the buffer\n rep = acq.idx.repetition\n contrast = acq.idx.contrast\n slice = acq.idx.slice\n y = acq.idx.kspace_encode_step_1\n z = acq.idx.kspace_encode_step_2\n all_data[rep, contrast, slice, :, z, y, :] = acq.data\n\n # Reconstruct images\n images = np.zeros((nreps, ncontrasts, nslices, eNz, rNy, rNx), dtype=np.float32)\n img_scaled = []\n for rep in range(nreps):\n for contrast in range(ncontrasts):\n for slice in range(nslices):\n # FFT\n if eNz > 1:\n # 3D\n im = transform.transform_kspace_to_image(all_data[rep, contrast, slice, :, :, :, :], [1, 2, 3])\n else:\n # 2D\n im = transform.transform_kspace_to_image(all_data[rep, contrast, slice, :, 0, :, :], [2, 3])\n\n if eNy != rNy:\n x0 = int((eNy - rNy) / 2)\n x1 = int((eNy - rNy) / 2 + rNy)\n im = im[:,:,x0:x1, :]\n\n # Sum of squares\n im = np.sqrt(np.sum(np.abs(im) ** 2, 0))\n\n # Stuff into the output\n if eNz > 1:\n # 3D\n images[rep, contrast, slice, :, :, :] = im\n else:\n # 2D\n images[rep, contrast, slice, 0, :, :] = im\n\n img_scaled.append(im)\n\n dset.close()\n\n return [head, hdr, img_scaled]", "def load_dat_file(fi):\n\n if type(fi) == np.lib.npyio.NpzFile:\n dat = fi[\"class_sig_list\"]\n else:\n dat = fi\n\n cls_num = dat.shape[0]\n sig_num = dat.shape[1]\n dims = dat.shape[2]\n t_steps = dat.shape[3]\n\n return dat, cls_num, sig_num, dims, t_steps", "def load_data_from_npy(filename):\n return np.load(filename)", "def load_UCR_dataset(path, dataset):\n train_file = os.path.join(path, dataset, dataset + \"_TRAIN.tsv\")\n test_file = os.path.join(path, dataset, dataset + \"_TEST.tsv\")\n train_df = pandas.read_csv(train_file, sep='\\t', header=None)\n test_df = pandas.read_csv(test_file, sep='\\t', header=None)\n train_array = numpy.array(train_df)\n test_array = numpy.array(test_df)\n\n # Move the labels to {0, ..., L-1}\n labels = numpy.unique(train_array[:, 0])\n transform = {}\n for i, l in enumerate(labels):\n transform[l] = i\n\n train = numpy.expand_dims(train_array[:, 1:], 1).astype(numpy.float64)\n train_labels = numpy.vectorize(transform.get)(train_array[:, 0])\n test = numpy.expand_dims(test_array[:, 1:], 1).astype(numpy.float64)\n test_labels = numpy.vectorize(transform.get)(test_array[:, 0])\n\n # Normalization for non-normalized datasets\n # To keep the amplitude information, we do not normalize values over\n # individual time series, but on the whole dataset\n if dataset not in [\n 'AllGestureWiimoteX',\n 'AllGestureWiimoteY',\n 'AllGestureWiimoteZ',\n 'BME',\n 'Chinatown',\n 'Crop',\n 'EOGHorizontalSignal',\n 'EOGVerticalSignal',\n 'Fungi',\n 'GestureMidAirD1',\n 'GestureMidAirD2',\n 'GestureMidAirD3',\n 'GesturePebbleZ1',\n 'GesturePebbleZ2',\n 'GunPointAgeSpan',\n 'GunPointMaleVersusFemale',\n 'GunPointOldVersusYoung',\n 'HouseTwenty',\n 'InsectEPGRegularTrain',\n 'InsectEPGSmallTrain',\n 'MelbournePedestrian',\n 'PickupGestureWiimoteZ',\n 'PigAirwayPressure',\n 'PigArtPressure',\n 'PigCVP',\n 'PLAID',\n 'PowerCons',\n 'Rock',\n 'SemgHandGenderCh2',\n 'SemgHandMovementCh2',\n 'SemgHandSubjectCh2',\n 'ShakeGestureWiimoteZ',\n 'SmoothSubspace',\n 'UMD'\n ]:\n return train, train_labels, test, test_labels\n mean = numpy.nanmean(numpy.concatenate([train, test]))\n var = numpy.nanvar(numpy.concatenate([train, test]))\n train = (train - mean) / math.sqrt(var)\n test = (test - mean) / math.sqrt(var)\n return train, train_labels, test, test_labels", "def load_npy_nii(filename):\n import numpy as np\n import nibabel\n\n if filename_type(filename) == 'nii':\n return nibabel.load(filename)\n\n elif filename_type(filename) == 'npy':\n return np.load(filename)\n\n return None", "def load(filename):\n return np.load(filename)", "def load_nifti(file_path, dtype=np.float32, incl_header=False, z_factor=None, mask=None):\n \n img = nib.load(file_path)\n struct_arr = img.get_data().astype(dtype)\n \n # replace infinite values with 0\n if np.inf in struct_arr:\n struct_arr[struct_arr == np.inf] = 0.\n \n # replace NaN values with 0 \n if np.isnan(struct_arr).any() == True:\n struct_arr[np.isnan(struct_arr)] = 0.\n \n if mask is not None:\n struct_arr *= mask\n \n if z_factor is not None:\n struct_arr = zoom(struct_arr, z_factor)\n \n if incl_header:\n return struct_arr, img\n else:\n return struct_arr", "def _read_netCDF(filename):\n if any(fn in os.path.basename(filename) for fn in L1B_MATCHES):\n with h5py.File(filename, \"r\") as afile:\n data = afile[\"RAD\"][:]\n\n blank = afile[\"RAD\"].attrs[\"_FillValue\"][0]\n bzero = afile[\"RAD\"].attrs[\"add_offset\"][0]\n bscale = afile[\"RAD\"].attrs[\"scale_factor\"][0]\n bunit = afile[\"RAD\"].attrs[\"units\"].tobytes().decode(\"utf-8\").rstrip(\"\\x00\")\n\n data = data * bscale + bzero\n dqf = afile[\"DQF\"][:]\n\n header_info = dict((key, afile[key][...]) for key in afile.keys())\n header = _make_cdf_header(header_info)\n # Deal with this here as we require the file.\n for att, val in afile.attrs.items():\n if att in TAG_MAPPING:\n header[TAG_MAPPING[att]] = (\n val.tobytes().decode(\"utf-8\").rstrip(\"\\x00\")\n )\n header[\"NAXIS1\"] = data.shape[0]\n header[\"NAXIS2\"] = data.shape[1]\n header[\"BLANK\"] = blank\n header[\"BSCALE\"] = bscale\n header[\"BZERO\"] = bzero\n header[\"BUNIT\"] = bunit\n else:\n raise ValueError(f\"File {filename} does not look like a SUVI L1b netCDF file.\")\n return header, data, dqf", "def load_encoded(filename):\n return np.fromfile(filename, dtype='uint8')", "def load_file(input_file_path_str, var_name=None):\r\n\r\n if not exists(dirname(input_file_path_str)):\r\n print('*** ERROR, input file path does not exist: ' + dirname(input_file_path_str))\r\n exit()\r\n \r\n ## Adjust for disparr files (which should be understood as .npz files):\r\n if (\"disparr\" in input_file_path_str or \"UV_vec\" in input_file_path_str) and input_file_path_str[-4:]==\".npy\":\r\n input_file_path_str = input_file_path_str[:-4]+\".npz\"\r\n \r\n ## Analyse file ending:\r\n if input_file_path_str[-3:]==\".nc\":\r\n #if var_name==[\"Dx\",\"Dy\",\"Vx\",\"Vy\"] or var_name==[\"UV_vec\",\"UV_vec_sp\"]:\r\n # data_arr = read_nc(input_file_path_str,var_name)\r\n # return data_arr\r\n #if var_name is None:\r\n # raise ValueError(\"variable name necessary to read NetCDF file.\")\r\n if var_name is None:\r\n print(\" *** Warning: Returning opened NetCDF file without closing ***\")\r\n nc_file = read_nc(input_file_path_str,var_name)\r\n return nc_file\r\n elif type(var_name) is not list:\r\n data_arr = read_nc(input_file_path_str,var_name)\r\n return data_arr\r\n else:\r\n data_arr_ls = []\r\n for var in var_name:\r\n data_arr_ls.append(read_nc(input_file_path_str,var))\r\n return data_arr_ls\r\n elif input_file_path_str[-4:]==\".npy\":\r\n if type(var_name) is not list:\r\n data_arr = np.load(input_file_path_str)\r\n return(data_arr)\r\n else:\r\n raise ValueError(\"only one variable saved in .npy file.\")\r\n elif input_file_path_str[-4:]==\".npz\":\r\n if var_name is None:\r\n #raise ValueError(\"several variable names needed to extract arrays from .npz file.\")\r\n data_arr = np.load(input_file_path_str)\r\n return data_arr\r\n elif type(var_name) is list:\r\n data_arr = np.load(input_file_path_str)\r\n data_arr_ls = []\r\n for var in var_name:\r\n data_arr_ls.append(data_arr[var])\r\n return data_arr_ls\r\n else:\r\n data_arr = np.load(input_file_path_str)[var_name]\r\n return data_arr", "def readMAXIPOLdataLuis(filename):\n\n ia=[]; ja=[]\n i=[]; j=[]; beam=[]; sig=[]; cts=[]\n for line in open(filename, \"r\"):\n line = line.strip().split()\n i1, j1, b1, s1, c1 = (int(line[0]), int(line[1]), \n float(line[2]), float(line[3]), int(line[4]))\n ia.append(i1); ja.append(j1)\n if b1 != 0 and s1 !=0:\n ## only keep pixels with data\n i.append(i1); j.append(j1); beam.append(b1)\n sig.append(s1); cts.append(c1)\n\n beam = asarray(beam, float64)\n sig = asarray(sig, float64)\n ## map i and j (before deletion) onto (-1,1)\n x = array([2*(ii-min(ia))/(max(ia)-min(ia))-1 for ii in i], float64)\n y = array([2*(jj-min(ja))/(max(ja)-min(ja))-1 for jj in j], float64)\n\n return BeamData(x, y, beam, sig, cts=cts)", "def read_ultrasound_file(ult_file):\n\n return np.fromfile(open(ult_file, \"rb\"), dtype=np.uint8)", "def load_nii_in_ras(fname):\n nii = nib.load(fname)\n nii = nib.as_closest_canonical(nii)\n vol = nii.get_fdata()\n\n return vol, nii.affine", "def __init__(self, file_name, load_uncertainty=False):\n if file_name[-3:] == 'npz':\n self._load_npz(file_name)\n else:\n self._load_3ddose(file_name, load_uncertainty)", "def load_npz_model(filename) -> tuple:\n # load_row_cols_json\n js = load_json2py(filename)\n # print(js)\n # print(type(js['rows']))\n rows = js['rows']\n # print(type(js['cols']))\n documents = js['cols']\n\n loaded_matrix = pd.DataFrame(sparse.load_npz(resources_folder(filename) + \".npz\").toarray(),\n index=rows,\n columns=documents)\n\n return loaded_matrix, rows, documents, js['args']", "def load_npz(path='', name='model.npz'):\n d = np.load( path+name )\n params = []\n print('Load Model')\n for key, val in sorted( d.items() ):\n params.append(val)\n print('Loading %s, %s' % (key, str(val.shape)))\n return params", "def read_suvi(filename):\n if filename.lower().endswith(FITS_FILE_EXTENSIONS):\n header, data, dqf = _read_fits(filename)\n elif filename.lower().endswith(NETCDF_FILE_EXTENSIONS):\n header, data, dqf = _read_netCDF(filename)\n else:\n raise ValueError(\n f\"File {filename} does not look like a valid FITS or netCDF file.\"\n )\n return header, data, dqf", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def load_nifty_volume_as_array(filename, with_header = False):\n img = nibabel.load(filename)\n data = img.get_data()\n data = np.transpose(data, [2,1,0])\n if(with_header):\n return data, img.affine, img.header\n else:\n return data", "def LoadICPData(filename, path=\"\", friendly_name=\"\", auto_PolState=False, PolState='', flip=True, transpose=True, **kw):\n lookup = {\"a\":\"_down_down\", \"b\":\"_up_down\", \"c\":\"_down_up\", \"d\":\"_up_up\", \"g\": \"\"}\n file_obj = load(os.path.join(path, filename), format='NCNR NG-1')\n dims = file_obj.detector.counts.shape\n ndims = len(dims)\n #if not (len(file_obj.detector.counts.shape) == 2):\n # not a 2D object!\n # return\n if auto_PolState:\n key = friendly_name[-2:-1] # na1, ca1 etc. are --, nc1, cc1 are -+...\n PolState = lookup.get(key, \"\")\n # force PolState to a regularized version:\n if not PolState in lookup.values():\n PolState = ''\n #datalen = file_obj.detector.counts.shape[0]\n if ndims == 2:\n if DEBUG: print(\"2d\")\n ypixels = file_obj.detector.counts.shape[0]\n xpixels = file_obj.detector.counts.shape[1]\n elif ndims >= 3:\n if DEBUG: print(\"3d\")\n frames = file_obj.detector.counts.shape[0]\n ypixels = file_obj.detector.counts.shape[1]\n xpixels = file_obj.detector.counts.shape[2]\n\n creation_story = \"LoadICPData('{fn}', path='{p}', auto_PolState={aPS}, PolState='{PS}')\".format(fn=filename, p=path, aPS=auto_PolState, PS=PolState)\n\n # doesn't really matter; changing so that each keyword (whether it took the default value\n # provided or not) will be defined\n # if not PolState == '':\n # creation_story += \", PolState='{0}'\".format(PolState)\n # creation_story += \")\"\n\n\n if ndims == 2: # one of the dimensions has been collapsed.\n info = []\n info.append({\"name\": \"xpixel\", \"units\": \"pixels\", \"values\": arange(xpixels) }) # reverse order\n info.append({\"name\": \"theta\", \"units\": \"degrees\", \"values\": file_obj.sample.angle_x })\n info.extend([\n {\"name\": \"Measurements\", \"cols\": [\n {\"name\": \"counts\"},\n {\"name\": \"pixels\"},\n {\"name\": \"monitor\"},\n {\"name\": \"count_time\"}]},\n {\"PolState\": PolState, \"filename\": filename, \"start_datetime\": file_obj.date, \"friendly_name\": friendly_name,\n \"CreationStory\":creation_story, \"path\":path, \"det_angle\":file_obj.detector.angle_x}]\n )\n data_array = zeros((xpixels, ypixels, 4))\n mon = file_obj.monitor.counts\n count_time = file_obj.monitor.count_time\n if ndims == 2:\n mon.shape = (1,) + mon.shape # broadcast the monitor over the other dimension\n count_time.shape = (1,) + count_time.shape\n counts = file_obj.detector.counts\n if transpose == True: counts = counts.swapaxes(0,1)\n if flip == True: counts = flipud(counts)\n data_array[..., 0] = counts\n #data_array[..., 0] = file_obj.detector.counts\n data_array[..., 1] = 1\n data_array[..., 2] = mon\n data_array[..., 3] = count_time\n # data_array[:,:,4]... I wish!!! Have to do by hand.\n data = MetaArray(data_array, dtype='float', info=info)\n data.friendly_name = friendly_name # goes away on dumps/loads... just for initial object.\n\n elif ndims == 3: # then it's an unsummed collection of detector shots. Should be one sample and detector angle per frame\n infos = []\n data = []\n for i in range(frames):\n samp_angle = file_obj.sample.angle_x[i]\n det_angle = file_obj.detector.angle_x[i]\n info = []\n info.append({\"name\": \"xpixel\", \"units\": \"pixels\", \"values\": range(xpixels) })\n info.append({\"name\": \"ypixel\", \"units\": \"pixels\", \"values\": range(ypixels) })\n info.extend([\n {\"name\": \"Measurements\", \"cols\": [\n {\"name\": \"counts\"},\n {\"name\": \"pixels\"},\n {\"name\": \"monitor\"},\n {\"name\": \"count_time\"}]},\n {\"PolState\": PolState, \"filename\": filename, \"start_datetime\": file_obj.date, \"friendly_name\": friendly_name,\n \"CreationStory\":creation_story, \"path\":path, \"samp_angle\": samp_angle, \"det_angle\": det_angle}]\n )\n data_array = zeros((xpixels, ypixels, 4))\n mon = file_obj.monitor.counts[i]\n count_time = file_obj.monitor.count_time[i]\n counts = file_obj.detector.counts[i]\n if flip == True: counts = flipud(counts)\n data_array[..., 0] = counts\n data_array[..., 1] = 1\n data_array[..., 2] = mon\n data_array[..., 3] = count_time\n # data_array[:,:,4]... I wish!!! Have to do by hand.\n subdata = MetaArray(data_array, dtype='float', info=info)\n subdata.friendly_name = friendly_name + (\"_%d\" % i) # goes away on dumps/loads... just for initial object.\n data.append(subdata)\n return data", "def load_data() -> np.ndarray:\n \n # Create a data directory if it doesn't exist.\n data_dir_path = find_or_create_dir(\"data\")\n \n # Download the data file if it doesn't exist.\n data_file_path = os.path.join(data_dir_path, \"Testdata.mat\")\n if not os.path.exists(data_file_path):\n print(\"Downloading data file...\")\n data_url = \"https://bea-portfolio.s3-us-west-2.amazonaws.com/denoising-3D-scans/Testdata.mat\"\n with urlopen(data_url) as response:\n with open(data_file_path, \"wb\") as data_file:\n shutil.copyfileobj(response, data_file)\n print(\"Done downloading data file.\")\n\n # Load data into memory.\n data_file = loadmat(data_file_path, struct_as_record=False)\n data = data_file['Undata']\n # data.shape is 20 x 262144\n\n return data", "def load_nii(img_path):\n nimg = nib.load(img_path)\n return np.asanyarray(nimg.dataobj), nimg.affine, nimg.header", "def load_z(self):\n self.z = self.read_var(self.zvar)\n self.test_shape(self.zvar, self.z.shape, 2)", "def load(npz):\n e = np.load(npz, allow_pickle=True)\n return EOMap(\n e['dataStore'],\n e['etas'],\n e['etaEdges'],\n e['omegas'],\n e['omeEdges'],\n e['iHKLList'],\n plane_data(e)\n )", "def load_data(npz_dir):\n files = glob.glob('%s/*.npz' % npz_dir)\n data_list = []\n for f in files:\n data_list += load_npz_to_data_list(f)\n return data_list", "def read_netcdf(self,filename):", "def load_nifti_image(filename):\n img = nib.load(filename)\n data = img.get_data()\n return data", "def load_tmp_atlas(filename):\n fbase, ext = osp.splitext(filename)\n fimg = None\n if osp.isfile(fbase+\".nii\"): fimg = fbase+\".nii\"\n if osp.isfile(fbase+\".nii.gz\"): fimg = fbase+\".nii.gz\" \n\n try:\n img = nib.load(fimg)\n except ValueError as e:\n print(\"error {0}, cannot find file {1} .nii or .nii.gz \".format(fbase, e.errno))\n\n fjson = None\n if osp.isfile(fbase+\".txt\"): fjson= fbase+\".txt\"\n if osp.isfile(fbase+\".json\"): fjson= fbase+\".json\"\n\n if fjson == None:\n warn(\"cannot find file %s .txt or .json\" % fbase)\n return None\n\n with open(fjson) as f:\n j_labels = json.load(f)\n\n a_labels = [label[1] for label in j_labels]\n \n return (img.get_data(), img.get_affine(), a_labels)", "def read_file(netcdf_file_name):\n\n if netcdf_file_name.endswith(GZIP_FILE_EXTENSION):\n with gzip.open(netcdf_file_name) as gzip_handle:\n with netCDF4.Dataset(\n 'dummy', mode='r', memory=gzip_handle.read()\n ) as dataset_object:\n prediction_dict = {\n TARGET_MATRIX_KEY:\n dataset_object.variables[TARGET_MATRIX_KEY][:],\n PROBABILITY_MATRIX_KEY:\n dataset_object.variables[PROBABILITY_MATRIX_KEY][:],\n VALID_TIMES_KEY:\n dataset_object.variables[VALID_TIMES_KEY][:],\n LATITUDES_KEY: dataset_object.variables[LATITUDES_KEY][:],\n LONGITUDES_KEY: dataset_object.variables[LONGITUDES_KEY][:],\n MODEL_FILE_KEY:\n str(getattr(dataset_object, MODEL_FILE_KEY)),\n QUANTILE_LEVELS_KEY: None\n }\n\n if len(prediction_dict[PROBABILITY_MATRIX_KEY].shape) == 3:\n prediction_dict[PROBABILITY_MATRIX_KEY] = numpy.expand_dims(\n prediction_dict[PROBABILITY_MATRIX_KEY], axis=-1\n )\n\n if QUANTILE_LEVELS_KEY in dataset_object.variables:\n prediction_dict[QUANTILE_LEVELS_KEY] = (\n dataset_object.variables[QUANTILE_LEVELS_KEY][:]\n )\n\n return prediction_dict\n\n dataset_object = netCDF4.Dataset(netcdf_file_name)\n\n prediction_dict = {\n TARGET_MATRIX_KEY: dataset_object.variables[TARGET_MATRIX_KEY][:],\n PROBABILITY_MATRIX_KEY:\n dataset_object.variables[PROBABILITY_MATRIX_KEY][:],\n VALID_TIMES_KEY: dataset_object.variables[VALID_TIMES_KEY][:],\n LATITUDES_KEY: dataset_object.variables[LATITUDES_KEY][:],\n LONGITUDES_KEY: dataset_object.variables[LONGITUDES_KEY][:],\n MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)),\n QUANTILE_LEVELS_KEY: None\n }\n\n if QUANTILE_LEVELS_KEY in dataset_object.variables:\n prediction_dict[QUANTILE_LEVELS_KEY] = (\n dataset_object.variables[QUANTILE_LEVELS_KEY][:]\n )\n\n dataset_object.close()\n\n if len(prediction_dict[PROBABILITY_MATRIX_KEY].shape) == 3:\n prediction_dict[PROBABILITY_MATRIX_KEY] = numpy.expand_dims(\n prediction_dict[PROBABILITY_MATRIX_KEY], axis=-1\n )\n\n return prediction_dict", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def loadFromNpy(filename):\n return np.load(filename, allow_pickle = True)[()]", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_spe(filename):\n def read_at(data, pos, size, ntype):\n raw.seek(pos)\n return np.fromfile(raw, ntype, size)\n raw = open(filename, 'rb')\n xdim = np.int64(read_at(raw, 42, 1, np.int16)[0])\n ydim = np.int64(read_at(raw, 656, 1, np.int16)[0])\n arr = read_at(raw, 4100, xdim*ydim, np.uint16)\n arr = arr.reshape((ydim, xdim))\n print('data shape: {}'.format(np.shape(arr)))\n if np.shape(arr)[0] == 1:\n arr = arr[0]\n print('data shape: {}'.format(np.shape(arr)))\n return arr", "def load(self, filename):\n data = np.load(temp_dir + '/' + filename + '.npz')\n return data['chip_ids'], data['core_ids'], data['cx_ids']", "def read_npz(chro,HiCData):\r\n data = {}\r\n inter = HiCData[chro]\r\n max_bin = max(inter['bin1'].max(), inter['bin2'].max())\r\n for i in range(max_bin+1):\r\n data[i] = []\r\n for i in inter:\r\n data[i['bin1']].append((i['bin1'],i['bin2'],i['IF']))\r\n\r\n dtype = np.dtype({'names':['bin1','bin2','IF'],\r\n 'formats':[np.int, np.int, np.float]})\r\n for k,v in data.items():\r\n v = np.array(v,dtype = dtype)\r\n data[k] = v\r\n\r\n return data", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def load_info():\n data = np.loadtxt(\"u_sol_meta.txt\", dtype=int)\n return data", "def load_data(file_to_read):\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data", "def make_npz_file(data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\toutput_file = os.path.join(dataset_params.data_path, \"synthetic_\" + data_type + \"_data\")\n\tline_reader = csv.DictReader(open(label_file,\"r\"))\n\n\tdata = []\n\tlabels = []\n\tdata_points = 0\n\tfor row in line_reader:\n\t\timage_name = os.path.join(dataset_params.data_path,data_folder,row[\"figNum\"] + \".png\")\n\t\timage_data = cv2.imread(image_name, cv2.IMREAD_COLOR)\n\t\timage_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)\n\t\timage_label = [int(dataset_params.shapes[row[\"shape\"]]), int(dataset_params.colors[row[\"color\"]]), int(dataset_params.sizes[row[\"size\"]]), int(row[\"quadrant\"]), int(dataset_params.backgrounds[row[\"background\"]]) ]\n\t\tdata.append(image_data)\n\t\tlabels.append(image_label)\n\t\tdata_points += 1\n\n\t# Converting list to data to np array\n\tdata = np.asarray(data)\n\tlabels = np.asarray(labels)\n\n\t# Printing log information\n\tprint(data_type, \"statistics being saved: \")\n\tprint(data_type, \"data shape\", data.shape)\n\tprint(data_type, \"label shape\", labels.shape)\n\n\t# saveing the file as npz file\n\tnp.savez_compressed(output_file, data=data, lables=labels)", "def nircam_image_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='NIRCam Image')\n data.header = hdulist[0].header\n wcs = WCS(hdulist[0].header)\n\n # drop the last axis since the cube will be split\n data.coords = coordinates_from_wcs(wcs.sub(2))\n data.add_component(hdulist[0].data[0], 'Flux')\n data.add_component(hdulist[0].data[1], 'Uncertainty')\n\n return data", "def test_load_UCR_UEA_dataset():\n X, y = load_UCR_UEA_dataset(name=\"UnitTest\")\n assert isinstance(X, pd.DataFrame) and isinstance(y, np.ndarray)\n assert X.shape == (42, 1) and y.shape == (42,)", "def load_data(filename):\n assert os.path.exists(filename)==True\n dat = scipy.io.loadmat(filename)\n inputs = dat['inputs']\n #print len(inputs)\n targets = dat['targets']\n #print len(targets)\n assert len(inputs)==len(targets)\n\n global alldata\n global indim \n global outdim\n\n indim = len(inputs[0])\n outdim = 1\n #print indim\n alldata = ClassificationDataSet(indim, outdim, nb_classes = 8)\n alldata.setField('input',inputs)\n alldata.setField('target',targets)\n\n assert len(alldata['input'])==len(alldata['target'])\n print type(alldata)", "def loadNPY_R(self, f = \"UI.npy\"):\n self.UI = np.load(f)\n \n return", "def load_hdr(filename):\n\n img = nib.load(filename)\n np_arr = img.get_data()\n \n return np_arr", "def load_hdr(filename):\n\n img = nib.load(filename)\n np_arr = img.get_data()\n \n return np_arr", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def test_load_numpy_file(save_npz) -> None:\n filename, data = save_npz\n result = loader.load_numpy_file(filename)\n\n for k, v in data.items():\n assert np.array_equal(v, result[k])", "def load_npz(file_path: str):\n if not os.path.exists(file_path):\n raise FileNotFoundError(\"File {} does not exist\".format(file_path))\n\n not_npz = False\n if not os.path.isfile(file_path):\n not_npz = True\n else:\n try:\n npz = np.load(file_path, allow_pickle=True)\n except OSError:\n not_npz = True\n\n if not_npz:\n raise ValueError(\"Failed to load file - file {} was saved incorrectly or is not an .npz file\".format(file_path))\n\n return {i: npz[i] for i in npz.files}", "def loadData(infile,k):\n f = open(infile,'r')\n #f = f.read().split(\"\\n\")\n #raw = json.loads(f[1])\n f = f.read()\n raw = json.loads(f)\n data = np.array(raw)\n dataset = data[k]\n return dataset", "def load_from_netcdf(filename):\n filename = os.path.join(datadir, filename + '.nc')\n return xr.open_dataarray(filename)", "def load_data():\n data = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\", header=None)\n\n # utiliza somente as duas primeiras classes\n data = data[:100]\n # transforma as classes em 0 e 1\n data[4] = np.where(data.iloc[:, -1] == 'Iris-setosa', 0, 1)\n data = np.asmatrix(data, dtype='float64')\n return data", "def load_nii(img_path):\n nimg = nib.load(img_path)\n return nimg.get_data(), nimg.affine, nimg.header", "def read_scil_b0():\n dipy_home = os.path.join(os.path.expanduser('~'), '.dipy')\n file = pjoin(dipy_home,\n 'datasets_multi-site_all_companies',\n '3T',\n 'GE',\n 'b0.nii.gz')\n\n return nib.load(file)", "def read_CU_model(self, infname='CU_SDT1.0.mod.h5'):\n indset = h5py.File(infname)\n lons = np.mgrid[0.:359.:2.]\n lats = np.mgrid[-88.:89.:2.]\n stalst = self.waveforms.list()\n if len(stalst) == 0:\n print 'Inversion with surface wave datasets only, not added yet!'\n return\n for staid in stalst:\n netcode, stacode = staid.split('.')\n staid_aux = netcode+'_'+stacode\n stla, elev, stlo = self.waveforms[staid].coordinates.values()\n if stlo < 0.:\n stlo += 360.\n try:\n ind_lon = np.where(lons>=stlo)[0][0]\n except:\n ind_lon = lons.size - 1\n try:\n ind_lat = np.where(lats>=stla)[0][0]\n except:\n ind_lat = lats.size - 1\n pind = 0\n while(True):\n if pind == 0:\n data = indset[str(lons[ind_lon])+'_'+str(lats[ind_lat])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon]\n outlat = lats[ind_lat]\n break\n pind += 1\n continue\n data = indset[str(lons[ind_lon+pind])+'_'+str(lats[ind_lat])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon+pind]\n outlat = lats[ind_lat]\n break\n data = indset[str(lons[ind_lon-pind])+'_'+str(lats[ind_lat])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon-pind]\n outlat = lats[ind_lat]\n break\n data = indset[str(lons[ind_lon])+'_'+str(lats[ind_lat+pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon]\n outlat = lats[ind_lat+pind]\n break\n data = indset[str(lons[ind_lon])+'_'+str(lats[ind_lat-pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon]\n outlat = lats[ind_lat-pind]\n break\n data = indset[str(lons[ind_lon-pind])+'_'+str(lats[ind_lat-pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon-pind]\n outlat = lats[ind_lat-pind]\n break\n data = indset[str(lons[ind_lon-pind])+'_'+str(lats[ind_lat+pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon-pind]\n outlat = lats[ind_lat+pind]\n break\n data = indset[str(lons[ind_lon+pind])+'_'+str(lats[ind_lat-pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon+pind]\n outlat = lats[ind_lat-pind]\n break\n data = indset[str(lons[ind_lon+pind])+'_'+str(lats[ind_lat+pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon+pind]\n outlat = lats[ind_lat+pind]\n break\n pind += 1\n if pind >= 5:\n print 'WARNING: Large differences in the finalized points: lon = '+str(outlon)+', lat = '+str(outlat)\\\n + ', station: '+staid+' stlo = '+str(stlo) + ', stla = '+str(stla)\n # print outlon, outlat, stlo, stla, pind\n header = {'data_source': 'CU_SDT',\\\n 'depth': 0, 'vs': 1, 'vsv': 2, 'vsh': 3, 'vsmin': 4, 'vsvmin': 5, 'vshmin': 6, \\\n 'vsmax': 7, 'vsvmax': 8, 'vshmax': 9}\n self.add_auxiliary_data(data=data, data_type='ReferenceModel', path=staid_aux, parameters=header)\n return", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def ubcModel3D(FileName):\n # Check if recurssion needed\n if type(FileName) is list:\n out = {}\n for f in FileName:\n out[os.path.basename(f)] = ubcMeshReaderBase.ubcModel3D(f)\n return out\n # Perform IO\n try:\n data = np.genfromtxt(FileName, dtype=np.float, comments='!')\n except (IOError, OSError) as fe:\n raise _helpers.PVGeoError(str(fe))\n return data", "def load_nifti(fname, reorient=True):\n img = nib.load(fname)\n if reorient:\n img = nib.as_closest_canonical(img)\n return(img.get_data())", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_unicef_data():\n fname = 'SOWC_combined_simple.csv'\n \n \n # Uses pandas to help with string-NaN-numeric data.\n data = pd.read_csv(fname, na_values='_', encoding='latin1')\n \n \n # Strip countries title from feature names.\n features = data.axes[1][1:]\n # Separate country names from feature values.\n countries = data.values[:,0]\n values = data.values[:,1:]\n # Convert to numpy matrix for real.\n values = np.asmatrix(values,dtype='float64')\n # Modify NaN values (missing values).\n mean_vals = nanmean(values, axis=0)\n inds = np.where(np.isnan(values))\n values[inds] = np.take(mean_vals, inds[1])\n return (countries, features, values)", "def load_data(path='mnist.npz'):\n origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n path = get_file(\n path,\n origin=origin_folder + 'mnist.npz',\n file_hash=\n '731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')\n print('############################################' + path) \n with np.load(path, allow_pickle=True) as f: # pylint: disable=unexpected-keyword-arg\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\n return (x_train, y_train), (x_test, y_test)", "def load_velodyne_points(filename):\n points = np.fromfile(filename, dtype=np.float32).reshape(-1, 4)\n points[:, 3] = 1.0 # homogeneous\n return points", "def load_data(filename):\n data = []\n with open('data/' + filename) as raw_data:\n for line in raw_data.readlines():\n data.append(float(line.strip('\\n')))\n return data\n # data = np.mat(np.genfromtxt('data/' + filename)).T\n # return data", "def load(filename):\n return sio.loadmat(filename, appendmat=False, squeeze_me=True)['data']", "def uncompress_nparr(byte_string):\n return np.load(io.BytesIO(zlib.decompress(byte_string)))", "def load_dataset():\n try:\n data_path = ROOT_PATH.joinpath('data', 'Complete_TAVG_Daily_LatLong1_1880.nc')\n ds = xarray.open_dataset(data_path)\n return ds\n except FileNotFoundError:\n raise", "def readascii(file_name):\n data = np.loadtxt(file_name)\n z = data[0,1:]\n nuInu = data[1:,1:]\n lmu = data[1:,0]\n return EBL(z, lmu, nuInu)", "def load_nist(ion):\n import glob\n # Find file\n srch_file = os.path.join(data.Paths.nist, f'{ion}_vacuum.ascii')\n nist_file = glob.glob(srch_file)\n if len(nist_file) == 0:\n raise IOError(f\"Cannot find NIST file {srch_file}\")\n # Read\n nist_tbl = Table.read(nist_file[0], format='ascii.fixed_width')\n gdrow = nist_tbl['Observed'] > 0. # Eliminate dummy lines\n nist_tbl = nist_tbl[gdrow]\n # Now unique values only (no duplicates)\n uniq, indices = np.unique(nist_tbl['Observed'],return_index=True)\n nist_tbl = nist_tbl[indices]\n # Deal with Rel\n agdrel = []\n for row in nist_tbl:\n try:\n gdrel = int(row['Rel.'])\n except:\n try:\n gdrel = int(row['Rel.'][:-1])\n except:\n gdrel = 0\n agdrel.append(gdrel)\n agdrel = np.array(agdrel)\n # Remove and add\n nist_tbl.remove_column('Rel.')\n nist_tbl.remove_column('Ritz')\n nist_tbl['RelInt'] = agdrel\n #nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='S5'))\n nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='U5'))\n nist_tbl.rename_column('Observed','wave')\n # Return\n return nist_tbl", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = pickle.load(f, encoding='latin1')\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\r\n Y = np.array(Y)\r\n return X, Y", "def inithr(_filename):\n # Open file provided\n _file = open(_filename)\n # Create empty array to hold data\n _data = np.zeros((1, 3), dtype=float)\n\n # Iterate through the file line by line\n for _line in _file:\n # Split each line into constituent values\n _x = _line.split()\n # Append data array with each value, converted to float, convert parallax angle to distance\n _data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0)\n\n # Iterate through data array\n for _row in _data:\n np.seterr(divide='ignore')\n # Convert magnitude to luminosity\n _row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)\n # Convert B-V colour to temperature\n _row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))\n\n # Delete first empty row\n _data = np.delete(_data, 0, axis=0)\n\n # Return parsed data\n return _data", "def load_data(path='alex_mnist_data.npz'):\n with np.load(path, allow_pickle=True) as f:\n x_train, y_train = f['alex_train_data'], f['alex_train_label']\n x_test, y_test = f['alex_test_data'], f['alex_test_label']\n return (x_train, y_train),(x_test, y_test)", "def loadData( path, strDataset, strName, nSamples ):\r\n # Size of the image:\r\n xSize = 176\r\n ySize = 208\r\n zSize = 176\r\n\r\n # Limits of the regions of interest of the data:\r\n xLimMin = 14\r\n xLimMax = 18\r\n yLimMin = 12\r\n yLimMax = 15\r\n zLimMin = 3\r\n zLimMax = 20\r\n\r\n # Creation of the dictionary which will contain our dataset:\r\n datasetDic = {}\r\n\r\n for i in range(nSamples):\r\n # Complete path of the i-th file of the dataset:\r\n imageName = strName + str(i + 1)\r\n imagePath = path + \"/\" + strDataset + \"/\" + imageName + \".nii\"\r\n \r\n # Loading of the 3D images using a function from the nibabel library\r\n imageRaw = nib.load(imagePath)\r\n \r\n # Tranforming the images into data (3d np.array):\r\n datasetDic[i] = imageRaw.get_data()[xLimMin:xSize-xLimMax, \\\r\n yLimMin:ySize-yLimMax, zLimMin:zSize-zLimMax, 0]\r\n \r\n return datasetDic", "def loadPulseData(filename, suffix = ''):\n data = np.genfromtxt(filename+'.txt', skip_header=3, names=True,\n dtype='i8,f8,S5,f8,f8,f8,f8,f8,f8')\n print \"Importing...\\n\"\n for key in data.dtype.fields.keys():\n name = key + suffix\n print name\n globals()[name] = data[key]", "def load_nib_data(filename, sample_size=None):\n\timg = nib.load(filename)\n\tdata = img.get_data()\n\tif sample_size is not None:\n\t\tnp.random.shuffle(data)\n\t\treturn data[0:sample_size, :, :, :]\n\treturn data", "def load_data(filename):\r\n with open(filename,'rb') as f:\r\n data = pk.load(f,encoding='bytes')\r\n return data[b'data'],data[b'labels']", "def read_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n fraw = pjoin(folder, 'DSI203.nii.gz')\n fbval = pjoin(folder, 'DSI203.bval')\n fbvec = pjoin(folder, 'DSI203.bvec')\n md5_dict = {'data': '950408c0980a7154cb188666a885a91f',\n 'bval': '602e5cb5fad2e7163e8025011d8a6755',\n 'bvec': 'a95eb1be44748c20214dc7aa654f9e6b',\n 'license': '7fa1d5e272533e832cc7453eeba23f44'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n check_md5(pjoin(folder, 'DSI203_license.txt'), md5_dict['license'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n bvecs[1:] = bvecs[1:] / np.sqrt(np.sum(bvecs[1:] * bvecs[1:], axis=1))[:, None]\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab", "def read(filename: str) -> orm.Data:\n return from_bands_inspect(load(hdf5_file=filename))", "def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source", "def load(f, model, ext_unit_dict=None):\n\n if model.verbose:\n sys.stdout.write('loading swt package file...\\n')\n\n if not hasattr(f, 'read'):\n filename = f\n f = open(filename, 'r')\n # dataset 0 -- header\n while True:\n line = f.readline()\n if line[0] != '#':\n break\n # determine problem dimensions\n nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()\n\n # read dataset 1\n if model.verbose:\n sys.stdout.write(' loading swt dataset 1\\n')\n t = line.strip().split()\n ipakcb, iswtoc, nsystm, ithk, ivoid, istpcs, icrcc = int(t[0]), \\\n int(t[1]), \\\n int(t[2]), \\\n int(t[3]), \\\n int(t[4]), \\\n int(t[5]), \\\n int(t[6])\n\n # if ipakcb > 0:\n # ipakcb = 53\n\n # read dataset 2\n lnwt = None\n if nsystm > 0:\n if model.verbose:\n sys.stdout.write(' loading swt dataset 2\\n')\n lnwt = np.empty((nsystm), dtype=np.int32)\n lnwt = read1d(f, lnwt) - 1\n\n # read dataset 3\n if model.verbose:\n sys.stdout.write(' loading swt dataset 3\\n')\n line = f.readline()\n t = line.strip().split()\n iizcfl, izcfm, iglfl, iglfm, iestfl, \\\n iestfm, ipcsfl, ipcsfm, istfl, istfm = int(t[0]), int(t[1]), \\\n int(t[2]), int(t[3]), \\\n int(t[4]), int(t[5]), \\\n int(t[6]), int(t[7]), \\\n int(t[8]), int(t[9])\n\n # read dataset 4\n if model.verbose:\n sys.stdout.write(' loading swt dataset 4')\n gl0 = Util2d.load(f, model, (nrow, ncol), np.float32, 'gl0',\n ext_unit_dict)\n\n # read dataset 5\n if model.verbose:\n sys.stdout.write(' loading swt dataset 5')\n sgm = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgm',\n ext_unit_dict)\n\n # read dataset 6\n if model.verbose:\n sys.stdout.write(' loading swt dataset 6')\n sgs = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgs',\n ext_unit_dict)\n\n # read datasets 7 to 13\n thick = [0] * nsystm\n void = [0] * nsystm\n sub = [0] * nsystm\n if icrcc == 0:\n sse = None\n ssv = None\n cr = [0] * nsystm\n cc = [0] * nsystm\n else:\n sse = [0] * nsystm\n ssv = [0] * nsystm\n cr = None\n cc = None\n\n for k in range(nsystm):\n kk = lnwt[k] + 1\n # thick\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 7 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'thick layer {}'.format(kk),\n ext_unit_dict)\n thick[k] = t\n if icrcc != 0:\n # sse\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 8 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'sse layer {}'.format(kk), ext_unit_dict)\n sse[k] = t\n # ssv\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 9 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'sse layer {}'.format(kk), ext_unit_dict)\n ssv[k] = t\n else:\n # cr\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 10 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'cr layer {}'.format(kk), ext_unit_dict)\n cr[k] = t\n # cc\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 11 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'cc layer {}'.format(kk), ext_unit_dict)\n cc[k] = t\n # void\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 12 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'void layer {}'.format(kk), ext_unit_dict)\n void[k] = t\n # sub\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 13 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'sub layer {}'.format(kk), ext_unit_dict)\n sub[k] = t\n\n # dataset 14 and 15\n if istpcs != 0:\n pcsoff = [0] * nlay\n pcs = None\n else:\n pcsoff = None\n pcs = [0] * nlay\n for k in range(nlay):\n if istpcs != 0:\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 14 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'pcsoff layer {}'.format(k + 1), ext_unit_dict)\n pcsoff[k] = t\n else:\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 15 for layer {}\\n'.format(kk))\n t = Util2d.load(f, model, (nrow, ncol), np.float32,\n 'pcs layer {}'.format(k + 1), ext_unit_dict)\n pcs[k] = t\n\n ids16 = None\n ids17 = None\n if iswtoc > 0:\n # dataset 16\n if model.verbose:\n sys.stdout.write(\n ' loading swt dataset 15 for layer {}\\n'.format(kk))\n ids16 = np.empty(26, dtype=np.int32)\n ids16 = read1d(f, ids16)\n #for k in range(1, 26, 2):\n # model.add_pop_key_list(ids16[k])\n # ids16[k] = 2054 # all sub-wt data sent to unit 2054\n # dataset 17\n ids17 = [0] * iswtoc\n for k in range(iswtoc):\n if model.verbose:\n msg = 2 * ' ' + 'loading swt dataset 17 for ' + \\\n 'iswtoc {}\\n'.format(k + 1)\n sys.stdout.write(msg)\n t = np.empty(30, dtype=np.int32)\n t = read1d(f, t)\n t[0:4] -= 1\n ids17[k] = t\n\n # close file\n f.close()\n\n # determine specified unit number\n unitnumber = None\n filenames = [None for x in range(15)]\n if ext_unit_dict is not None:\n unitnumber, filenames[0] = \\\n model.get_ext_dict_attr(ext_unit_dict,\n filetype=ModflowSwt.ftype())\n if ipakcb > 0:\n iu, filenames[1] = \\\n model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)\n\n if iswtoc > 0:\n ipos = 2\n for k in range(1, 26, 2):\n unit = ids16[k]\n if unit > 0:\n iu, filenames[ipos] = \\\n model.get_ext_dict_attr(ext_unit_dict,\n unit=unit)\n model.add_pop_key_list(unit)\n ipos += 1\n\n # create sub-wt instance\n swt = ModflowSwt(model, ipakcb=ipakcb, iswtoc=iswtoc, nsystm=nsystm,\n ithk=ithk, ivoid=ivoid, istpcs=istpcs,\n icrcc=icrcc, lnwt=lnwt, izcfl=iizcfl, izcfm=izcfm,\n iglfl=iglfl, iglfm=iglfm, iestfl=iestfl,\n iestfm=iestfm, ipcsfl=ipcsfl, ipcsfm=ipcsfm,\n istfl=istfl, istfm=istfm, gl0=gl0, sgm=sgm,\n sgs=sgs, thick=thick, sse=sse, ssv=ssv, cr=cr, cc=cc,\n void=void, sub=sub, pcsoff=pcsoff,\n pcs=pcs, ids16=ids16, ids17=ids17,\n unitnumber=unitnumber, filenames=filenames)\n\n # return sut-wt instance\n return swt", "def load(self):\n from time import time\n from subprocess import Popen\n logger_name = thisfile + '->EXR:load()'\n assert self.exr_f is not None, \"Set the exr_f first\"\n npz_f = '/tmp/%s_t%s.npz' % \\\n (basename(self.exr_f).replace('.exr', ''), time())\n # Convert to .npz\n # cv2.imread() can't load more than three channels from .exr even with IMREAD_UNCHANGED\n # Has to go through IO. Maybe there's a better way?\n cwd = join(dirname(abspath(__file__)), '..', '..', 'cli')\n bash_cmd = 'python2 exr2npz.py %s %s' % (self.exr_f, npz_f)\n process = Popen(bash_cmd.split(), cwd=cwd)\n _, _ = process.communicate()\n # Load this .npz\n data = np.load(npz_f)\n logger.name = logger_name\n logger.info(\"Loaded %s\", self.exr_f)\n return data", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies", "def load(filename):\n lines = [l.strip('\\r\\n ') for l in open(filename, 'r').readlines()]\n lines = [l for l in lines if l != '']\n dims = [re.split(r'\\s+', l) for l in lines]\n f = np.array([[float(f) for f in d] for d in dims])\n return f", "def load(cls, f, model, ext_unit_dict=None):\n msg = (\n \"Model object must be of type flopy.mfusg.MfUsg\\n\"\n f\"but received type: {type(model)}.\"\n )\n assert isinstance(model, MfUsg), msg\n\n if model.verbose:\n print(\"loading bcf package file...\")\n\n f_obj = get_open_file_object(f, \"r\")\n\n # dataset 0 -- header\n while True:\n line = f_obj.readline()\n if line[0] != \"#\":\n break\n\n # determine problem dimensions\n nlay = model.nlay\n dis = model.get_package(\"DIS\")\n if dis is None:\n dis = model.get_package(\"DISU\")\n njag = dis.njag\n\n # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET - line already read above\n if model.verbose:\n print(\" loading ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET...\")\n text_list = line_parse(line)\n ipakcb, hdry, iwdflg, wetfct, iwetit, ihdwet = (\n int(text_list[0]),\n float(text_list[1]),\n int(text_list[2]),\n float(text_list[3]),\n int(text_list[4]),\n int(text_list[5]),\n )\n\n ikvflag = type_from_iterable(\n text_list, index=6, _type=int, default_val=0\n )\n ikcflag = type_from_iterable(\n text_list, index=7, _type=int, default_val=0\n )\n\n # LAYCON array\n laycon, intercellt = cls._load_laycon(f_obj, model)\n\n # TRPY array\n if model.verbose:\n print(\" loading TRPY...\")\n trpy = Util2d.load(\n f_obj, model, (nlay,), np.float32, \"trpy\", ext_unit_dict\n )\n\n # property data for each layer based on options\n transient = not dis.steady.all()\n anis = any(t != 1 for t in trpy)\n anglex = 0\n if (not model.structured) and anis:\n if model.verbose:\n print(\"loading ANGLEX...\")\n anglex = Util2d.load(\n f_obj, model, (njag,), np.float32, \"anglex\", ext_unit_dict\n )\n\n # hy, kv, storage\n (sf1, tran, hy, vcont, sf2, wetdry, kv) = cls._load_layer_arrays(\n f_obj,\n model,\n nlay,\n ext_unit_dict,\n transient,\n laycon,\n ikvflag,\n ikcflag,\n iwdflg,\n )\n\n # Ksat mfusg\n ksat = 0\n if (not model.structured) and abs(ikcflag == 1):\n if model.verbose:\n print(\" loading ksat (njag)...\")\n ksat = Util2d.load(\n f_obj, model, (njag,), np.float32, \"ksat\", ext_unit_dict\n )\n\n f_obj.close()\n\n # set package unit number\n unitnumber, filenames = get_unitnumber_from_ext_unit_dict(\n model, cls, ext_unit_dict, ipakcb\n )\n\n # create instance of bcf object\n bcf = cls(\n model,\n ipakcb=ipakcb,\n intercellt=intercellt,\n laycon=laycon,\n trpy=trpy,\n hdry=hdry,\n iwdflg=iwdflg,\n wetfct=wetfct,\n iwetit=iwetit,\n ihdwet=ihdwet,\n ikvflag=ikvflag,\n ikcflag=ikcflag,\n tran=tran,\n hy=hy,\n vcont=vcont,\n kv=kv,\n anglex=anglex,\n ksat=ksat,\n sf1=sf1,\n sf2=sf2,\n wetdry=wetdry,\n unitnumber=unitnumber,\n filenames=filenames,\n )\n\n # return bcf object\n return bcf", "def read_KNN_dataFile(file):\n A = np.genfromtxt(file)\n return A", "def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def read_file(netcdf_file_name):\n\n dataset_object = netCDF4.Dataset(netcdf_file_name)\n\n saliency_dict = {\n MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)),\n IS_LAYER_OUTPUT_KEY: bool(getattr(dataset_object, IS_LAYER_OUTPUT_KEY)),\n LAYER_NAME_KEY: str(getattr(dataset_object, LAYER_NAME_KEY)),\n NEURON_INDICES_KEY: numpy.array(\n getattr(dataset_object, NEURON_INDICES_KEY), dtype=int\n ),\n IDEAL_ACTIVATION_KEY: getattr(dataset_object, IDEAL_ACTIVATION_KEY),\n MULTIPLY_BY_INPUT_KEY:\n bool(getattr(dataset_object, MULTIPLY_BY_INPUT_KEY)),\n VALID_TIMES_KEY: numpy.array(\n dataset_object.variables[VALID_TIMES_KEY][:], dtype=int\n ),\n LATITUDES_KEY: numpy.array(\n dataset_object.variables[LATITUDES_KEY][:], dtype=float\n ),\n LONGITUDES_KEY: numpy.array(\n dataset_object.variables[LONGITUDES_KEY][:], dtype=float\n ),\n SALIENCY_MATRIX_KEY: numpy.array(\n dataset_object.variables[SALIENCY_MATRIX_KEY][:], dtype=float\n )\n }\n\n dataset_object.close()\n return saliency_dict", "def load(f: Union[str, os.PathLike], model):\n from ..utils.flopy_io import multi_line_strip\n\n pkg_ws = os.path.split(f)[0]\n with open(f) as foo:\n t = [0]\n while t[0] != \"ncells\":\n t = multi_line_strip(foo).split()\n\n ncells = int(t[1])\n\n t = [0]\n while t[0] != \"izone\":\n t = multi_line_strip(foo).split()\n\n method = multi_line_strip(foo).split()[0]\n\n if method in (\"internal\", \"open/close\"):\n izone = np.zeros((ncells,), dtype=int)\n i = 0\n fobj = foo\n if method == \"open/close\":\n fobj = open(os.path.join(pkg_ws, t[1]))\n while i < ncells:\n t = multi_line_strip(fobj)\n if t[0] == \"open/close\":\n if fobj != foo:\n fobj.close()\n fobj = open(os.path.join(pkg_ws, t[1]))\n for zn in t:\n izone[i] = zn\n i += 1\n else:\n izone = np.array([t[1]] * ncells, dtype=int)\n\n zon = ZoneFile6(model, izone)\n return zon", "def load_nc(file,var):\n\tf = netCDF4.Dataset(file,'r+')\n\tdara = f.variables[var][:]\n\tf.close()\n\treturn data", "def _npz_file_lazy_dataset(file_path, fields, feature_names, types, shapes, filesystem=None):\n\n def _generator():\n if filesystem is None:\n data = np.load(file_path)\n else:\n data = np.load(filesystem.openbin(file_path))\n\n np_arrays = [data[f] for f in fields]\n \n perc99, meanstd_mean, meanstd_std = _construct_norm_arrays(file_path)\n\n np_arrays.append(perc99)\n np_arrays.append(meanstd_mean)\n np_arrays.append(meanstd_std)\n \n # Check that arrays match in the first dimension\n n_samples = np_arrays[0].shape[0]\n assert all(n_samples == arr.shape[0] for arr in np_arrays)\n # Iterate through the first dimension of arrays\n for slices in zip(*np_arrays):\n yield slices\n\n ds = tf.data.Dataset.from_generator(_generator, types, shapes)\n\n # Converts a database of tuples to database of dicts\n def _to_dict(*features):\n return {'features': features[0], \n 'labels': [features[1], features[2], features[3]], \n 'norm_perc99': features[4], \n 'norm_meanstd_mean': [features[5]], \n 'norm_meanstd_std': features[6]}\n\n ds = ds.map(_to_dict)\n\n return ds", "def loadDeth(ficXYZ):\n data=np.array(Image.open(ficXYZ))\n cloud=data.reshape((-1)).view(dtype=np.uint16).reshape((data.shape[0],data.shape[1]/2,data.shape[2]))-0x7FFF\n # change of the type of the data to correspond to the encoding of the data in the imageXYZ + data refocusing\n cloud=cloud.astype('int16')\n cloud=cloud.astype('float32')/1000.0\n return cloud", "def load_from_planetoid_files(dataset_name, path):\n\n def _sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\n def _parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n def _load_file(name):\n \"\"\"Load from data file.\"\"\"\n filename = 'ind.{}.{}'.format(dataset_name, name)\n filename = os.path.join(path, filename)\n with open(filename, 'rb') as f:\n if sys.version_info > (3, 0):\n return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg\n else:\n return pickle.load(f)\n\n x = _load_file('x')\n y = _load_file('y')\n tx = _load_file('tx')\n ty = _load_file('ty')\n allx = _load_file('allx')\n ally = _load_file('ally')\n graph = _load_file('graph')\n\n filename = 'ind.{}.test.index'.format(dataset_name)\n filename = os.path.join(path, filename)\n test_idx_reorder = _parse_index_file(filename)\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_name == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph).\n # Find isolated nodes, add them as zero-vecs into the right position.\n test_idx_range_full = range(\n min(test_idx_reorder),\n max(test_idx_reorder) + 1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range - min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range - min(test_idx_range), :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y) + 500)\n\n train_mask = _sample_mask(idx_train, labels.shape[0])\n val_mask = _sample_mask(idx_val, labels.shape[0])\n test_mask = _sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n\n return (adj, features, y_train, y_val, y_test, train_mask, val_mask,\n test_mask, labels)", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)" ]
[ "0.6841584", "0.62716067", "0.6261759", "0.6199561", "0.61455333", "0.613199", "0.6091358", "0.6050293", "0.60497475", "0.60486746", "0.6030378", "0.6012157", "0.5956382", "0.59548086", "0.59342986", "0.5906623", "0.58845", "0.5869178", "0.58608663", "0.5851471", "0.5849214", "0.58378255", "0.5835821", "0.5821109", "0.5821109", "0.5821093", "0.58055323", "0.5804221", "0.5774293", "0.5770284", "0.57606894", "0.574183", "0.57408255", "0.5733098", "0.5730841", "0.5724622", "0.5722589", "0.5718628", "0.57179004", "0.57155675", "0.57126856", "0.57087636", "0.57009727", "0.56954455", "0.56923246", "0.5687738", "0.5685549", "0.5683412", "0.56786984", "0.5657476", "0.56490135", "0.56490135", "0.56419206", "0.56394833", "0.5623741", "0.56154686", "0.5613082", "0.561016", "0.55881834", "0.5575002", "0.5556818", "0.5551147", "0.5534555", "0.5533945", "0.5531504", "0.55294967", "0.5526246", "0.55238837", "0.55171305", "0.5506925", "0.5494542", "0.54926926", "0.54851395", "0.54771465", "0.547423", "0.54685503", "0.5452405", "0.54518557", "0.54516554", "0.5450962", "0.5448573", "0.54462606", "0.5438954", "0.5438007", "0.5436321", "0.54282045", "0.5421905", "0.54217625", "0.54207885", "0.54147595", "0.5413899", "0.54058564", "0.53955024", "0.53946054", "0.5392031", "0.53907883", "0.53830534", "0.53829664", "0.5381834", "0.5377461" ]
0.54285187
85
Parse name and seed for uci regression data. E.g. yacht_2 is the yacht dataset with seed 2.
def _parse_uci_regression_dataset(name_str): pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)" pattern = re.compile(pattern_string) matched = pattern.match(name_str) if matched: name = matched.group("name") seed = matched.group("seed") return name, seed return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_uci_regression_dataset(name,\n split_seed,\n train_fraction=0.9,\n data_dir=\"uci_datasets\"):\n path = os.path.join(data_dir,\n _UCI_REGRESSION_FILENAMES[UCIRegressionDatasets(name)])\n data_arr = onp.load(path)\n x, y = data_arr[\"x\"], data_arr[\"y\"]\n\n indices = jax.random.permutation(jax.random.PRNGKey(split_seed), len(x))\n indices = onp.asarray(indices)\n x, y = x[indices], y[indices]\n\n n_train = int(train_fraction * len(x))\n x_train, y_train = x[:n_train], y[:n_train]\n x_test, y_test = x[n_train:], y[n_train:]\n\n def normalize_with_stats(arr, arr_mean=None, arr_std=None):\n return (arr - arr_mean) / arr_std\n\n def normalize(arr):\n eps = 1e-6\n arr_mean = arr.mean(axis=0, keepdims=True)\n arr_std = arr.std(axis=0, keepdims=True) + eps\n return normalize_with_stats(arr, arr_mean, arr_std), arr_mean, arr_std\n\n x_train, x_mean, x_std = normalize(x_train)\n y_train, y_mean, y_std = normalize(y_train)\n x_test = normalize_with_stats(x_test, x_mean, x_std)\n y_test = normalize_with_stats(y_test, y_mean, y_std)\n\n data_info = {\"y_scale\": float(y_std)}\n\n return (x_train, y_train), (x_test, y_test), data_info", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def dataset(name):\n t = \"unknown\"\n if name ==\"boston\":\n # regression (506x13feat)\n from sklearn.datasets import load_boston\n X, y = load_boston(return_X_y=True)\n t = \"R\"\n #X,y = shap.datasets.boston()\n #return X,y\n elif name == \"iris\":\n # classification (150x4featx3classes)\n from sklearn.datasets import load_iris\n data = load_iris()\n X = data.data\n y = data.target\n t = \"C\"\n elif name == \"diabetes\":\n # regression (442x10feat)\n from sklearn.datasets import load_diabetes\n X, y = load_diabetes(return_X_y=True)\n t = \"R\"\n elif name == \"digits\":\n # classification (1797x64featx10classes)\n from sklearn.datasets import load_digits\n X, y = load_digits(return_X_y=True)\n t = \"C\"\n elif name == \"wine\":\n # classification (178x13featuresx3classes)\n from sklearn.datasets import load_wine\n X, y = load_wine(return_X_y=True)\n t = \"C\"\n elif name == \"breast_cancer\":\n # classification (569x30featx2classes)\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n t = \"C\"\n elif name ==\"nhanesi\":\n X,y = shap.datasets.nhanesi()\n t = \"R\"\n elif name == \"segments\":\n X,y = make_led()\n t = \"C\"\n elif name == \"segments_sampled\":\n X,y = make_led_sample()\n t = \"C\"\n elif name == \"friedman1\":\n from sklearn.datasets import make_friedman1\n X,y= make_friedman1(n_samples=500, random_state=0)\n print('Done')\n X = pd.DataFrame(X, columns=list(range(X.shape[1])))\n t = 'R'\n elif name == \"friedman2\":\n from sklearn.datasets import make_friedman2\n X,y= make_friedman2(random_state=0)\n t = 'R'\n elif name == 'linear':\n X, y, t = draw_linear_function()\n elif name == \"linear2\":\n importlib.reload(lreg)\n X,y,t = lreg.lf_dataset(nsamples=5000, with_vimp=False)\n elif name == 'friendman3':\n X, y, t = friedman_modified()\n else:\n raise ValueError(\"dataset `{}` not implemented\".format(name))\n return X,y,t", "def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test", "def test_load_UCR_UEA_dataset():\n X, y = load_UCR_UEA_dataset(name=\"UnitTest\")\n assert isinstance(X, pd.DataFrame) and isinstance(y, np.ndarray)\n assert X.shape == (42, 1) and y.shape == (42,)", "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "def data(dataset=\"bio_eventrelated_100hz\"):\n # TODO: one could further improve this function with like\n # selectors 'ecg=True, eda=True, restingstate=True' that would\n # find the most appropriate dataset\n\n dataset = dataset.lower()\n\n # TODO: change this path back to \"master\"\n path = \"https://raw.githubusercontent.com/neuropsychology/NeuroKit/dev/data/\"\n\n # Signals as vectors =======================\n if dataset in [\"eeg\", \"eeg_150hz\", \"eeg.txt\"]:\n return pd.read_csv(path + \"eeg.txt\").values[:, 0]\n\n if dataset in [\"rsp\", \"rsp_1000hz\", \"rsp_1000hz.txt\"]:\n return pd.read_csv(path + \"rsp_1000hz.txt\", header=None).values[:, 0]\n\n if dataset in [\"ecg\", \"ecg_1000hz\", \"ecg_1000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"ecg_3000hz\", \"ecg_3000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"eog\", \"veog\", \"eog_100hz\", \"eog_100hz.csv\"]:\n return pd.read_csv(path + \"eog_100hz.csv\")[\"vEOG\"].values\n\n # Dataframes ===============================\n if dataset == \"iris\":\n info = sklearn_datasets.load_iris()\n data = pd.DataFrame(\n info.data, columns=[\"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\", \"Petal.Width\"]\n )\n data[\"Species\"] = info.target_names[info.target]\n return data\n\n if dataset in [\"eogs\", \"eogs_200hz\", \"eog_200hz\", \"eog_200hz.csv\"]:\n return pd.read_csv(path + \"eog_200hz.csv\")\n\n # Add extension\n if dataset in [\"bio_resting_8min_200hz\"]:\n dataset += \".json\"\n\n # Specific case for json file\n if dataset.endswith(\".json\"):\n if \"https\" not in dataset:\n data = pd.read_json(path + dataset, orient=\"index\")\n else:\n data = pd.read_json(dataset, orient=\"index\")\n df = {}\n for participant, row in data.iterrows():\n for _, data_string in row.items():\n data_list = json.loads(data_string)\n data_pd = pd.DataFrame(data_list)\n df[participant] = data_pd\n\n return df\n\n # TODO: Add more EEG (fif and edf datasets)\n if dataset in [\"eeg_1min_200hz\"]:\n\n return pickle.load(\n urllib.request.urlopen(\n \"https://github.com/neuropsychology/NeuroKit/blob/dev/data/eeg_1min_200hz.pickle?raw=true\"\n )\n )\n\n # General case\n file, ext = os.path.splitext(dataset) # pylint: disable=unused-variable\n if ext == \"\":\n df = pd.read_csv(path + dataset + \".csv\")\n else:\n if \"https\" not in dataset:\n df = pd.read_csv(path + dataset)\n else:\n df = pd.read_csv(dataset)\n return df", "def parse_input(giant_string):\r\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\r\n\r\n X_train_row_strings = X_train_part.split(\"S\")\r\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\r\n X_train = np.array(X_train_rows)\r\n\r\n Y_train = concatenated_string_to_array(Y_train_part)\r\n\r\n X_test_row_strings = X_test_part.split(\"S\")\r\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\r\n X_test = np.array(X_test_rows)\r\n\r\n return X_train, Y_train, X_test", "def load_data():\n data = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\", header=None)\n\n # utiliza somente as duas primeiras classes\n data = data[:100]\n # transforma as classes em 0 e 1\n data[4] = np.where(data.iloc[:, -1] == 'Iris-setosa', 0, 1)\n data = np.asmatrix(data, dtype='float64')\n return data", "def __parse_sample_name(self):\n pattern = '(.*)(P53)(XR|NT)(\\d+)([A-Z]?|Ctr)?.*'\n vals = re.findall(pattern, self.sample_name.replace('_', ''))[0]\n self.cell_type = vals[0]\n self.treatment_type = vals[2]\n self.treatment_time = vals[3]\n if vals[3]:\n self.treatment_repeat = vals[4]", "def parse_input(giant_string):\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\n\n X_train_row_strings = X_train_part.split(\"S\")\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\n X_train = np.array(X_train_rows)\n\n Y_train = concatenated_string_to_array(Y_train_part)\n\n X_test_row_strings = X_test_part.split(\"S\")\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\n X_test = np.array(X_test_rows)\n\n return X_train, Y_train, X_test", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def build_data(seed):\n rs = np.random.RandomState(seed)\n\n def y(x):\n \"\"\" y(x) = 1 + 0.3 * x_1 - 0.6 * x_2^2 - 0.2 * x_3^3 + 0.5 x_4^4 \"\"\"\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4\n\n xtrain = rs.rand(10000, 4)\n xtest = rs.rand(1000, 4)\n ytrain = y(xtrain) + rs.rand(10000) / 10\n ytest = y(xtest) + rs.rand(1000) / 10\n return xtrain, xtest, ytrain, ytest", "def esm1_t6_43M_UR50S():\n return load_model_and_alphabet_hub(\"esm1_t6_43M_UR50S\")", "def _load_vowel_test():\n vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1)\n X = vowel_data[:, -10:]\n y = vowel_data[:, 1].astype(int)\n return (X, y)", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def esm1_t34_670M_UR100():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR100\")", "def feature_extraction(_data):\n # Find the digits in the given string Example - data='18-20' digits = '1820'\n digits = str(''.join(c for c in _data if c.isdigit()))\n # calculate the length of the string\n len_digits = len(digits)\n # splitting digits in to values example - digits = '1820' ages = [18, 20]\n ages = [int(digits[i:i + 2]) for i in range(0, len_digits, 2)]\n # checking for special character in the given data\n special_character = '.+-<>?'\n spl_char = ''.join([c for c in list(special_character) if c in _data])\n # handling decimal age data\n if len_digits == 3:\n spl_char = '.'\n age = \"\".join([str(ages[0]), '.', str(ages[1])])\n # normalizing\n age = int(float(age) - 0.5)\n ages = [age]\n # Finding the maximum, minimum, average age values\n max_age = 0\n min_age = 0\n mean_age = 0\n if len(ages):\n max_age = max(ages)\n min_age = min(ages)\n if len(ages) == 2:\n mean_age = int((max_age + min_age) / 2)\n else:\n mean_age = max_age\n # specially added for 18 years cases\n only_18 = 0\n is_y = 0\n if ages == [18]:\n only_18 = 1\n if 'y' in _data or 'Y' in _data:\n is_y = 1\n under_18 = 0\n if 1 < max_age < 18:\n under_18 = 1\n above_65 = 0\n if mean_age >= 65:\n above_65 = 1\n # verifying whether digit is found in the given string or not.\n # Example - data='18-20' digits_found=True data='????' digits_found=False\n digits_found = 1\n if len_digits == 1:\n digits_found = 1\n max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = 0, 0, 0, 0, 0, 0, 0\n elif len_digits == 0:\n digits_found, max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = -1, -1, -1, -1, -1, -1, -1, -1\n \n feature = {\n 'ages': tuple(ages),\n 'len(ages)': len(ages),\n 'spl_chr': spl_char,\n 'is_digit': digits_found,\n 'max_age': max_age,\n 'mean_age': mean_age,\n 'only_18': only_18,\n 'is_y': is_y,\n 'above_65': above_65,\n 'under_18': under_18\n }\n\n return feature", "def import_data(seed: object = 42) -> object:\n\n # Read input data\n df = pd.read_csv(\"x_train_gr_smpl.csv\").astype(int)\n\n # label data-frame rows based on sample data\n for x in range(10):\n index = ~pd.read_csv(\"y_train_smpl_%s.csv\" % x, squeeze=True).astype(bool) # reversed flags (~)\n df.loc[index, 'label'] = str(x)\n\n input_data_ordered = df.iloc[:, 0:2304].to_numpy()\n output_data_ordered = df.iloc[:, 2304].to_numpy()\n\n # Randomise instance order (forcing the same result each time)\n np.random.seed(seed)\n permutation = np.random.permutation(df.shape[0])\n\n # Create base input and output arrays\n input_data = input_data_ordered[permutation]\n output_data = output_data_ordered[permutation]\n\n return input_data, output_data, df, input_data_ordered, output_data_ordered", "def load_data():\n data = pd.read_csv('datasets/housing.csv')\n prices = data['MEDV']\n features = data.drop(['MEDV'], axis=1) # remove it from data as we need to predict it\n print(data.head()) # prints top columns 5 for ex\n return [features, prices]", "def reading_data(fname,goal):\n \n #Reading of the EEG data\n data = pd.read_csv(fname)\n events_fname = fname.replace('_data','_events')\n labels= pd.read_csv(events_fname)\n\n if goal==\"training\":\n data=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n elif goal==\"testing\":\n labels=labels.drop(['id' ], axis=1)\n else:\n raise SystemExit(\"The goal variable is unknown for the function\")\n\n return data, labels", "def parse_IAU_name(name):\n # First see if there is a source type acronym\n if diag:\n print \"parse_IAU_name: received\",name\n parts = name.split()\n if len(parts) == 1:\n designation = parts[0]\n elif len(parts) == 2:\n acronym, designation = parts\n else:\n raise(\"Invalid format: \"+name)\n # Now process the designation\n flag = designation[0].upper()\n if flag == \"G\":\n # Galactic coordinates\n longitude,latitude,sign = split_on_sign(name[1:])\n X = parse_decimal_angle(longitude)\n Y = parse_decimal_angle(latitude)\n elif flag == \"J\":\n # Julian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif flag == \"B\":\n # Besselian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif designation[0].isdigit():\n # This should be Besselian but who knows?\n # If it is Besselian there should be at least four digits in RA\n # otherwise it could be galactic\n x,y,sign = split_on_sign(name)\n if len(x) > 3:\n X = parse_sexagesimal_angle(x)\n Y = parse_sexagesimal_angle(y)\n flag = \"B\"\n else:\n X = parse_decimal_angle(x)\n Y = parse_decimal_angle(y)\n flag = \"G\"\n else:\n return \"?\",None,None\n if sign == \"-\":\n Y = -Y\n return flag,X,Y", "def _process_input_seed(self):\n\n Tcmb = 2.72548 * u.K # 0.00057 K\n Tfir = 70 * u.K\n ufir = 0.2 * u.eV / u.cm ** 3\n Tnir = 5000 * u.K\n unir = 0.2 * u.eV / u.cm ** 3\n\n # Allow for seed_photon_fields definitions of the type 'CMB-NIR-FIR' or 'CMB'\n if type(self.seed_photon_fields) != list:\n self.seed_photon_fields = self.seed_photon_fields.split('-')\n\n self.seeduf = {}\n self.seedT = {}\n self.seedisotropic = {}\n self.seedtheta = {}\n for idx, inseed in enumerate(self.seed_photon_fields):\n if isinstance(inseed, six.string_types):\n if inseed == 'CMB':\n self.seedT[inseed] = Tcmb\n self.seeduf[inseed] = 1.0\n self.seedisotropic[inseed] = True\n elif inseed == 'FIR':\n self.seedT[inseed] = Tfir\n self.seeduf[inseed] = (ufir / (ar * Tfir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n elif inseed == 'NIR':\n self.seedT[inseed] = Tnir\n self.seeduf[inseed] = (unir / (ar * Tnir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n else:\n log.warning('Will not use seed {0} because it is not '\n 'CMB, FIR or NIR'.format(inseed))\n raise TypeError\n elif type(inseed) == list and (len(inseed) == 3 or len(inseed) == 4):\n isotropic = len(inseed) == 3\n\n if isotropic:\n name, T, uu = inseed\n self.seedisotropic[name] = True\n else:\n name, T, uu, theta = inseed\n self.seedisotropic[name] = False\n self.seedtheta[name] = validate_scalar('{0}-theta'.format(name),\n theta, physical_type='angle')\n\n validate_scalar('{0}-T'.format(name), T, domain='positive',\n physical_type='temperature')\n self.seed_photon_fields[idx] = name\n self.seedT[name] = T\n if uu == 0:\n self.seeduf[name] = 1.0\n else:\n # pressure has same physical type as energy density\n validate_scalar('{0}-u'.format(name), uu,\n domain='positive', physical_type='pressure')\n self.seeduf[name] = (uu / (ar * T ** 4)).decompose()\n else:\n log.warning(\n 'Unable to process seed photon field: {0}'.format(inseed))\n raise TypeError", "def mk_test(input_data):\r\n\r\n\ttrend, h, p, z, Tau, s, var_s, slope, intercept = mk.original_test(input_data)\r\n\r\n\treturn trend, h, p, z, Tau, s, var_s, slope, intercept", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def load_demo():\n\tprint(\"\"\"\n\tBreast Cancer Wisconsin dataset. It contains a total of 569 samples of tumor and malignant cells. \n\tData labeled 1 corresponds to malignant cells, while data labeled 0 corresponds to benign cells. \n\tThe 30 characteristics contain real values obtained from images of cell nuclei. For more information:\n\n\t\t\thttp://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+(diagnostic)\n\n\n\tThe returned value is a dictionary where 'x_data' are the predictor variables, 'y_data' the class \n\tlabels and 'features' the name of the characteristics.\n\t\"\"\")\n\tpath = '/'.join(os.path.abspath(pywinEA.__file__).split('/')[:-1])\n\t\n\tdata = pd.read_csv(path+'/dataset/data/BreastCancerWisconsin.csv', index_col=0)\n\tx_data = data.iloc[:, 1:].values\n\ty_data = data.iloc[:, 0].values\n\tfeatures = data.columns[1:].values\n\n\t# Transform labels\n\ty_data[np.where(y_data == 'M')] = 1\n\ty_data[np.where(y_data == 'B')] = 0\n\ty_data = y_data.astype(int)\n\n\treturn {'x_data': x_data, 'y_data': y_data, 'features': features}", "def parse_modelname(string,labellist,ensemblesfolder):\n ## We need to account for two different prefixes now. \n split_ens_temp = ensemble_template.split(\"{f}\")\n template_prefix = split_ens_temp[0]\n\n template_seedind = split_ens_temp[1].split(\"{s}\")[0]\n if string.startswith(template_prefix): ## TODO or other prefix\n frames,seedext = string.split(template_prefix)[-1].split(template_seedind)\n seed=seedext.split(\"results.json\")[0]\n return {\"name\":string,\n \"frames\":int(frames),\n \"seed\":int(seed),\n \"template\":ensemble_template,\n \"outliers\":determine_outliers(labellist,int(seed),int(frames)),\n }", "def fixture_microbial_sample_name():\n return \"microbial_name_test\"", "def get_uci_datasets(\n name, split_seed=0, test_fraction=0.10, train_frac=1.0, combine_val_train=False\n):\n # load full dataset\n load_funs = {\n \"naval\": _load_naval,\n \"protein\": _load_protein,\n \"crime\": _load_crime,\n \"energy\": _load_app_energy,\n }\n print(\"Loading dataset {}....\".format(name))\n if name == \"depth\":\n (X_train, y_train), (X_test, y_test) = load_funs[name]()\n y_scale = np.array([[1.0]])\n return (X_train, y_train), (X_test, y_test), y_scale\n\n X, y = load_funs[name]()\n X = X.astype(np.float32)\n y = y.astype(np.float32)\n\n # We create the train and test sets with 90% and 10% of the data\n\n if split_seed == -1: # Do not shuffle!\n permutation = range(X.shape[0])\n else:\n rs = np.random.RandomState(split_seed)\n permutation = rs.permutation(X.shape[0])\n\n size_train = int(np.round(X.shape[0] * (1 - test_fraction)))\n index_train = permutation[0:size_train]\n index_test = permutation[size_train:]\n\n X_train = X[index_train, :]\n X_test = X[index_test, :]\n if name == \"depth\":\n y_train = y[index_train]\n y_test = y[index_test]\n else:\n y_train = y[index_train, None]\n y_test = y[index_test, None]\n\n if train_frac != 1.0:\n rs = np.random.RandomState(split_seed)\n permutation = rs.permutation(X_train.shape[0])\n n_train = int(train_frac * len(X_train))\n X_train = X_train[:n_train]\n y_train = y_train[:n_train]\n\n if split_seed == -1: # Do not shuffle!\n permutation = range(X_train.shape[0])\n else:\n rs = np.random.RandomState(split_seed)\n permutation = rs.permutation(X_train.shape[0])\n\n if combine_val_train:\n val_fraction = 0.0\n else:\n val_fraction = 0.10\n size_train = int(np.round(X_train.shape[0] * (1 - val_fraction)))\n index_train = permutation[0:size_train]\n index_val = permutation[size_train:]\n\n X_new_train = X_train[index_train, :]\n X_val = X_train[index_val, :]\n\n y_new_train = y_train[index_train]\n y_val = y_train[index_val]\n\n print(\"Done loading dataset {}\".format(name))\n\n def standardize(data):\n mu = data.mean(axis=0, keepdims=1)\n scale = data.std(axis=0, keepdims=1)\n scale[scale < 1e-10] = 1.0\n\n data = (data - mu) / scale\n return data, mu, scale\n\n # Standardize\n X_new_train, x_train_mu, x_train_scale = standardize(X_new_train)\n X_test = (X_test - x_train_mu) / x_train_scale\n y_new_train, y_train_mu, y_train_scale = standardize(y_new_train)\n y_test = (y_test - y_train_mu) / y_train_scale\n X_val = (X_val - x_train_mu) / x_train_scale\n y_val = (y_val - y_train_mu) / y_train_scale\n\n train = TensorDataset(\n torch.Tensor(X_new_train).type(torch.float64),\n torch.Tensor(y_new_train).type(torch.float64),\n )\n\n val = TensorDataset(\n torch.Tensor(X_val).type(torch.float64),\n torch.Tensor(y_val).type(torch.float64),\n )\n\n test = TensorDataset(\n torch.Tensor(X_test).type(torch.float64),\n torch.Tensor(y_test).type(torch.float64),\n )\n in_size = X_train[0].shape\n target_size = y_train[0].shape\n\n return train, val, test, in_size, target_size, y_train_scale", "def setup(self):\n (self.X, self.Y) = load_iris(problem=\"label_ranking\")", "def autogen_dataset():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n seed=42,\n sep=',')", "def load_UCR_dataset(path, dataset):\n train_file = os.path.join(path, dataset, dataset + \"_TRAIN.tsv\")\n test_file = os.path.join(path, dataset, dataset + \"_TEST.tsv\")\n train_df = pandas.read_csv(train_file, sep='\\t', header=None)\n test_df = pandas.read_csv(test_file, sep='\\t', header=None)\n train_array = numpy.array(train_df)\n test_array = numpy.array(test_df)\n\n # Move the labels to {0, ..., L-1}\n labels = numpy.unique(train_array[:, 0])\n transform = {}\n for i, l in enumerate(labels):\n transform[l] = i\n\n train = numpy.expand_dims(train_array[:, 1:], 1).astype(numpy.float64)\n train_labels = numpy.vectorize(transform.get)(train_array[:, 0])\n test = numpy.expand_dims(test_array[:, 1:], 1).astype(numpy.float64)\n test_labels = numpy.vectorize(transform.get)(test_array[:, 0])\n\n # Normalization for non-normalized datasets\n # To keep the amplitude information, we do not normalize values over\n # individual time series, but on the whole dataset\n if dataset not in [\n 'AllGestureWiimoteX',\n 'AllGestureWiimoteY',\n 'AllGestureWiimoteZ',\n 'BME',\n 'Chinatown',\n 'Crop',\n 'EOGHorizontalSignal',\n 'EOGVerticalSignal',\n 'Fungi',\n 'GestureMidAirD1',\n 'GestureMidAirD2',\n 'GestureMidAirD3',\n 'GesturePebbleZ1',\n 'GesturePebbleZ2',\n 'GunPointAgeSpan',\n 'GunPointMaleVersusFemale',\n 'GunPointOldVersusYoung',\n 'HouseTwenty',\n 'InsectEPGRegularTrain',\n 'InsectEPGSmallTrain',\n 'MelbournePedestrian',\n 'PickupGestureWiimoteZ',\n 'PigAirwayPressure',\n 'PigArtPressure',\n 'PigCVP',\n 'PLAID',\n 'PowerCons',\n 'Rock',\n 'SemgHandGenderCh2',\n 'SemgHandMovementCh2',\n 'SemgHandSubjectCh2',\n 'ShakeGestureWiimoteZ',\n 'SmoothSubspace',\n 'UMD'\n ]:\n return train, train_labels, test, test_labels\n mean = numpy.nanmean(numpy.concatenate([train, test]))\n var = numpy.nanvar(numpy.concatenate([train, test]))\n train = (train - mean) / math.sqrt(var)\n test = (test - mean) / math.sqrt(var)\n return train, train_labels, test, test_labels", "def load_unicef_data():\n fname = 'SOWC_combined_simple.csv'\n \n \n # Uses pandas to help with string-NaN-numeric data.\n data = pd.read_csv(fname, na_values='_', encoding='latin1')\n \n \n # Strip countries title from feature names.\n features = data.axes[1][1:]\n # Separate country names from feature values.\n countries = data.values[:,0]\n values = data.values[:,1:]\n # Convert to numpy matrix for real.\n values = np.asmatrix(values,dtype='float64')\n # Modify NaN values (missing values).\n mean_vals = nanmean(values, axis=0)\n inds = np.where(np.isnan(values))\n values[inds] = np.take(mean_vals, inds[1])\n return (countries, features, values)", "def parse_user_selections(self):\n if \"model2\" in sys.argv:\n self.model_choice = \"model2\"\n else:\n self.model_choice = \"model1\"\n\n if \"Virginia\" in sys.argv:\n self.region = \"Virginia\"\n self.region_name = 'us-east-1'\n elif \"California\" in sys.argv:\n self.region = \"California\"\n self.region_name = 'us-west-1'\n else:\n self.region = \"Oregon\"\n self.region_name = 'us-west-2'\n\n if self.verbose_mode:\n print \"** will run the Machine Learning %s\" % self.model_choice\n print \"\\n** Running on %s Elastic Map Reduce server\" % self.region", "def main(self, data):\n\t\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\t\teval_features = self.get_features(data, self.labels, tokenizer, self.max_seq_length)\n\t\tlabel, prob = self.predict(eval_features)\n\t\treturn label, prob", "def data_prep(data, y, dropna=False):\n\n rand_state = 10 # Setting random state for later cv\n df = pd.read_pickle(data) # Reading in data\n if dropna is True:\n df.dropna(axis=0, inplace = True)\n else:\n pass\n X = df.drop(y, axis=1) # Assigning the feature space to X\n y = df[y] # Class labels to predict\n\n return X, y, rand_state", "def __init__(self, seed = None):\n self.data_dir = pkg_resources.resource_filename('logistic_control_variate', 'data/')\n self.generate_data(seed)\n # Holds logistic regression object for this example\n self.lr = None", "def _parse_input(self):\n #temperature\n regex = re.compile(\"TEMP=(\\d+\\.\\d*|\\d+)\")\n r = regex.search(self.file_dic['input'])\n if r:\n self.temperature = r.groups()[0]\n else:\n self.temperature = 298.15\n #theory\n regex = re.compile('(\\$contrl.+\\$end|\\$basis.+ \\$end)')\n temp_theory = regex.findall(self.file_dic['input'])\n contrl = temp_theory[0][:-4][7:].strip()\n basis = temp_theory[1][:-4][6:].strip()\n self.theory = contrl + ' ' + basis", "def esm1_t34_670M_UR50D():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR50D\")", "def easydatagen():\n\n # Reading in the training file\n data = pd.read_json('train.json')\n\n # The set of different cuisines\n cuisines = data.cuisine.unique()\n\n # To find the different ingredients, we need to clean them up a little.\n def clean(string) :\n s = string.replace('-',' ') # read low-fat the same as low fat\n s = string.replace('&', 'and') # read & and and as the same\n s = re.sub('\\((.*?)\\)', '', s) # remove everythin g in brackets\n s = re.sub('\\d{1,2}\\%', '', s) # remove things of the form d% or dd%, where d is a digit\n s = ' '.join(s.split()) # remove extra white spaces\n\n return s\n\n ing_list = data.ingredients.values.tolist()\n raw_ingredients = [clean(x) for ing in ing_list for x in ing]\n\n ingredients = sorted(set(raw_ingredients))\n\n # build a dictionary that to each ingredient assigns its index\n ingredient_index = {}\n for i in range(0,len(ingredients)) :\n ingredient_index[ingredients[i]] = i\n\n # the same for cuisines\n cuisine_index = {}\n for i in range(0, len(cuisines)) :\n cuisine_index[cuisines[i]] = i\n\n def ingredients_to_vector(ings) :\n vect = np.zeros(len(ingredients))\n for ing in ings :\n vect[ingredient_index[clean(ing)]] = 1\n\n return vect\n\n def cuisine_to_vector(cus) :\n vect = np.zeros(20)\n vect[cuisine_index[cus]] = 1\n return vect\n\n vect_list = [ingredients_to_vector(ing) for ing in ing_list]\n target_list = [cuisine_to_vector(cus) for cus in data.cuisine.values.tolist()]\n\n # Define training data\n X = np.c_[vect_list]\n Y = np.c_[target_list]\n\n Y_num = np.zeros((Y.shape[0]))\n for i in range(Y.shape[0]):\n Y_num[i] = np.argmax(Y[i])\n\n x_train, x_test, y_train, y_test = train_test_split(X, Y_num, test_size = 0.2)\n\n return x_train, x_test, y_train, y_test", "def esm1_t34_670M_UR50S():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR50S\")", "def get_emulator_training_info(filename):\n seed_found, ntr_found = None, None\n ret_str = \"\"\n # search for seed\n match = search(r\"seed_[0-9]*\", filename)\n if match:\n seed_found = match.group()\n ret_str += seed_found\n # search for number of training points\n match = search(r\"ntr_[0-9]*\", filename)\n if match:\n match_found = match.group()\n ret_str += \"_\" + match_found\n\n return ret_str", "def __prepareDataSet(fileName):\n\n labels = []\n utterances = []\n\n with open(fileName) as f:\n lines = f.readlines()\n\n for line in lines:\n try:\n act = line[:line.index(\" \")]\n utterance = line[line.index(\" \"):line.index(\"\\n\")]\n\n try:\n labels.append(act.strip())\n utterances.append(utterance.strip())\n\n except KeyError:\n pass\n\n except ValueError:\n pass\n\n return labels, utterances", "def main():\n\n clues_file = \"data/part1-clues.txt\"\n parsed_clues_file = \"data/part1-parsedclues.txt\"\n cp = ClueParser()\n\n clues = loadList(clues_file)\n gold_parsed_clues = loadList(parsed_clues_file)\n assert(len(clues) == len(gold_parsed_clues))\n\n cp.train(clues, gold_parsed_clues)\n parsed_clues = cp.parseClues(clues)\n cp.evaluate(parsed_clues, gold_parsed_clues)", "def nnRegression(data):", "def learn(filePath):\n filename = filePath.stem\n processedJAFFE = load(str(filePath))\n processedDF = pd.DataFrame(processedJAFFE)\n processedDF.columns = ['name', 'data', 'emotion']\n processedDF = processedDF.sort_values(by=['name', 'emotion'])\n grouped = processedDF.groupby(['name', 'emotion'])\n train = grouped.nth([0, 1])\n test = grouped.nth([2, 3, 4])\n\n yTrain = train.index.get_level_values(1).tolist()\n xTrain = train.values.ravel().tolist()\n yTest = test.index.get_level_values(1).tolist()\n xTest = test.values.ravel().tolist()\n\n parameters = {\n 'C': [\n 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08, 1.00E-07, 1.00E-06,\n 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02, 1.00E-01, 1.00,\n 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05\n ],\n 'gamma': [\n 1.00E00,\n 1.00E-01,\n 1.00E-02,\n 1.00E-03,\n 5.00E-04, 2.00E-04, 1.50E-04, 1.10E-04, 1.05E-04, 1.00E-04,\n 9.50E-05, 9.00E-05, 7.00E-05, 5.00E-05, 1.90E-05, 1.00E-05,\n 1.00E-06,\n 1.00E-07,\n ],\n }\n\n models = []\n models.append(['gamma \\\\ C', 1.00E-12, 1.00E-11, 1.00E-10, 1.00E-09, 1.00E-08,\n 1.00E-07, 1.00E-06, 1.00E-05, 1.00E-04, 1.00E-03, 2.00E-03, 1.00E-02,\n 1.00E-01, 1.00, 1.00E+01, 1.00E+02, 1.00E+03, 1.00E+04, 1.00E+05 ])\n gridTimeStart = time()\n numIteration = len(parameters['gamma']) * len(parameters['C'])\n iteration = 0\n meanTime = 0\n for gamma in parameters['gamma']:\n row = [gamma]\n for C in parameters['C']:\n print('C = %s \\t gamma = %s'%(C, gamma))\n timeStart = time()\n svc = OneVsRestClassifier(SVC(random_state=0, decision_function_shape='ovr',\n C=C, kernel='rbf', gamma=gamma), n_jobs=4)\n svc.fit(xTrain, yTrain)\n yTrue, yPred = yTest, svc.predict(xTest)\n yTrue = np.array(yTrue, dtype=np.unicode_)\n yPred = np.array(yPred, dtype=np.unicode_)\n correct = np.sum(yTrue == yPred)\n \n print(\"accuracy: %d/%d = \"%(correct, len(yTrue)),\n D('%.2f'%(correct/len(yTrue)*100)))\n row.append(D('%.2f'%(correct/len(yTrue)*100)))\n \n iterTime = time()-timeStart\n iteration = iteration + 1\n meanTime = meanTime * (iteration-1)/iteration + iterTime/iteration\n remainingTime = (numIteration-iteration)*meanTime\n print('--------------------------(%d sec)--remaining: %s'%\n (iterTime, str(timedelta(seconds=int(remainingTime)))))\n models.append(row)\n gridTime = time() - gridTimeStart\n gridTime = timedelta(seconds=int(gridTime))\n print('time: %s'%str(gridTime))\n print('saving file: %s.csv'%filename)\n with open('../csv/%s.csv'%filename, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerows(models)", "def setup(self):\n (self.X, self.Y) = load_iris(problem=\"partial_label_ranking\")", "def get_karate_club_data():\n\n # Edge list of Zachary's karate club.\n edge_list = [\n (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8),\n (0, 10), (0, 11), (0, 12), (0, 13), (0, 17), (0, 19), (0, 21), (0, 31),\n (1, 2), (1, 3), (1, 7), (1, 13), (1, 17), (1, 19), (1, 21), (1, 30),\n (2, 3), (2, 7), (2, 8), (2, 9), (2, 13), (2, 27), (2, 28), (2, 32),\n (3, 7), (3, 12), (3, 13), (4, 6), (4, 10), (5, 6), (5, 10), (5, 16),\n (6, 16), (8, 30), (8, 32), (8, 33), (9, 33), (13, 33), (14, 32), (14, 33),\n (15, 32), (15, 33), (18, 32), (18, 33), (19, 33), (20, 32), (20, 33),\n (22, 32), (22, 33), (23, 25), (23, 27), (23, 29), (23, 32), (23, 33),\n (24, 25), (24, 27), (24, 31), (25, 31), (26, 29), (26, 33), (27, 33),\n (28, 31), (28, 33), (29, 32), (29, 33), (30, 32), (30, 33), (31, 32),\n (31, 33), (32, 33)\n ]\n\n # Student-teacher assignment (before split) as in Zachary (1977).\n # Part-time karate instructor: Mr. Hi, node 0 (labeled as 0).\n # President: John A., node 33 (labeled as 1).\n node_labels = jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0,\n 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n \n return create_graph_data(edge_list=edge_list, node_labels=node_labels)", "def generate_data(groups):\n # get path list for the intended classification problem\n input_paths = generate_input_list(groups) \n X_lst = []\n y = []\n for p in input_paths:\n dp = pd.read_csv(p, sep = '\\t') #datapoint\n # Normalization \n # norm = lambda x: (x - x.mean()) / x.std()\n # dp = dp.apply(norm)\n # Min-Max scaling \n #dp_norm = (dp - dp.min()) / (dp.max() - dp.min())\n #dp = dp_norm.values\n if dp.isnull().sum().sum()>0:\n# print(p, dp.isnull().sum().sum())\n continue\n dp = dp.drop(['time'], axis = 1) \n dp = dp.iloc[:1600:4]\n\n if dp.isnull().sum().sum()>0:\n# print('after norm',p, dp.isnull().sum().sum())\n continue\n dp = dp.values\n\n X_lst.append(dp)\n sample_y = get_target(p, text= True)\n y.append(sample_y)\n X = np.stack(X_lst, axis=0)\n \n # convert y into int 0 and 1\n encoder = LabelEncoder()\n encoder.fit(y)\n y = encoder.transform(y)\n y_dummy = y\n # convert y into one-hot encoding\n if len(groups)>2:\n y_dummy = pd.get_dummies(y)\n y_dummy = y_dummy.values\n return X, y , y_dummy", "def read_random_data_from_csv(\n file_name, training_set_size, unlabeled_set_size, holdout_set_size, validation_set_size):\n data = samp_file_to_arr(\n file_name, training_set_size + unlabeled_set_size + holdout_set_size + validation_set_size)\n y_raw = np.array([x[0] for x in data])\n x_all = np.array([x[1:] for x in data])\n # Now transform so that the lower label is -1, always. \n uq = np.unique(y_raw) # Assumed to be only two unique labels!\n y_all = np.zeros(len(y_raw))\n y_all[np.where(y_raw == uq[0])[0]] = -1\n y_all[np.where(y_raw == uq[1])[0]] = 1\n xtrhoval, x_unl, ytrhoval, y_unl = sklearn.model_selection.train_test_split(\n x_all, y_all, test_size=unlabeled_set_size)\n x_trho, x_validate, y_trte, y_validate = sklearn.model_selection.train_test_split(\n xtrhoval, ytrhoval, test_size=validation_set_size)\n x_train, x_out, y_train, y_out = sklearn.model_selection.train_test_split(\n x_trho, y_trte, test_size=holdout_set_size)\n return (x_train, y_train, x_unl, y_unl, x_out, y_out, x_validate, y_validate)", "def test_dataset_from_file(train_dataset):\n dummy = \"justo. Praesent luctus. Curabitur egestas nunc sed libero. Proin sed\"\n assert train_dataset[0][0] == dummy\n assert train_dataset[0][1] == '6'", "def load_yield_data():\n data = pd.read_csv(\"soybean_model_data_2017.csv\",dtype={'FIPS':str})\n \n data['soybean_percent'] = data['area']/data['land_area']\n \n # Add logical filter to the yield Data\n area_con = data['area'].notnull()\n data = data[area_con]\n \n # Add Rainfed yield\n # rainfed_con: counties without irrigation, the yield is rainfed\n rainfed_con = ~data['FIPS'].isin(data.loc[data['yield_irr'].notnull(),'FIPS'].unique())\n data['yield_rainfed'] = data['yield_noirr']\n data['area_rainfed'] = data['area_noirr']\n \n \n # For counties with irrigation, only the rainfed yield is added to irrigated yield\n data.loc[rainfed_con, 'yield_rainfed'] = data.loc[rainfed_con, 'yield']\n data.loc[rainfed_con, 'area_rainfed'] = data.loc[rainfed_con, 'area']\n\n # add growing season\n data['tave56789']= data.loc[:,'tave5':'tave9'].mean(axis=1)\n data['vpdave56789']= data.loc[:,'vpdave5':'vpdave8'].mean(axis=1)\n data['precip56789']= data.loc[:,'precip5':'precip9'].sum(axis=1)\n \n \n # Add z-score\n county_std = data.groupby('FIPS').std()['precip56789'].to_frame('precip_gs_std').reset_index()\n county_mean = data.groupby('FIPS').mean()['precip56789'].to_frame('precip_gs_mean').reset_index()\n \n data = data.merge(county_mean, on='FIPS').merge(county_std, on='FIPS')\n \n data['precip_gs_z'] = (data['precip56789'] - data['precip_gs_mean'])/data['precip_gs_std']\n\n # The 12 core states \n data_12 = data[data['State'].isin(data.loc[data['evi6'].notnull(),'State'].unique())]\n\n # Detrend yield\n global trend_rainfed, trend_irrigated, trend_all\n trend_rainfed = yield_trend(data_12, yield_type='rainfed')\n trend_irrigated = yield_trend(data_12, yield_type='irrigated')\n trend_all = yield_trend(data_12, yield_type='all')\n \n data_12.loc[:,'yield_ana'] = (data_12['yield'] - trend_all.predict(data_12[['year','yield']]))\n data_12.loc[:,'yield_rainfed_ana'] = (data_12['yield_rainfed'] - trend_rainfed.predict(data_12[['year','yield_rainfed']])) \n data_12.loc[:,'yield_irr_ana'] = (data_12['yield_irr'] - trend_irrigated.predict(data_12[['year','yield_irr']])) \n \n return data_12", "def parseDataUniform(line):\n if b'(' in line:\n return np.array([float(x) for x in line.split(b'(')[1].split(b')')[0].split()])\n return float(line.split(b'uniform')[1].split(b';')[0])", "def setup(measurement_uncertainty):\n\tdata = import_data('dataset.txt', measurement_uncertainty)\n\tdesign = get_design_matrix(data['x'])\n\tA = design / measurement_uncertainty\n\tlikelihood_fisher = get_likelihood_fisher_matrix(A)\n\tprior_fisher = get_prior_fisher_matrix()\n\tposterior_fisher = get_posterior_fisher_matrix(likelihood_fisher, prior_fisher)\n\tb = data['y'] / measurement_uncertainty\n\tmle = get_mle(likelihood_fisher, A, b)\n\tposterior_mean = get_posterior_mean(likelihood_fisher, posterior_fisher, mle)\n\n\tcovariance = np.linalg.inv(posterior_fisher)\n\n\tposterior_stats = {'fisher': posterior_fisher, 'mean': posterior_mean, 'covar': covariance}\n\n\treturn data, posterior_stats", "def random_forest_test_Data(strat_test_set):\n logging.info(\"Random forest.....\")\n X_test = strat_test_set.drop(\"median_house_value\", axis=1)\n y_test = strat_test_set[\"median_house_value\"].copy()\n X_test_num = X_test.drop(\"ocean_proximity\", axis=1)\n imputer = SimpleImputer(strategy=\"median\")\n imputer.fit(X_test_num)\n X_test_prepared = imputer.transform(X_test_num)\n X_test_prepared = pd.DataFrame(\n X_test_prepared, columns=X_test_num.columns, index=X_test.index\n )\n X_test_prepared = feature_eng2(X_test_prepared, X_test)\n return X_test_prepared, y_test", "def define_gender(name_input):\n if not os.path.isfile('train_set.txt') and not os.path.isfile('test_set'):\n \"\"\"\n We take a sample of male and female names and mix\n them in order to create a training set and testing set\n \"\"\"\n labeled_names = ([(name, 'male') for name in names.words('male.txt')] +\n [(name, 'female') for name in names.words(\n 'female.txt')])\n random.shuffle(labeled_names)\n\n \"\"\"\n We train the classifier and return the gender of the name\n \"\"\"\n featuresets = [(gender_features(n), gender) for (n, gender)\n in labeled_names]\n train_set, test_set = featuresets[-500:], featuresets[:500]\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n with open('train_set.txt', 'wb') as handle:\n pickle.dump(train_set, handle)\n with open('test_set.txt', 'wb') as handle:\n pickle.dump(test_set, handle)\n with open('classifier.txt', 'wb') as handle:\n pickle.dump(classifier, handle)\n\n with open('train_set.txt', 'rb') as handle:\n train_set = pickle.load(handle)\n with open('test_set.txt', 'rb') as handle:\n test_set = pickle.load(handle)\n with open('classifier.txt', 'rb') as handle:\n classifier = pickle.load(handle)\n\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n# accuracy = nltk.classify.accuracy(classifier, test_set)\n# classifier.show_most_informative_features(10)\n# print accuracy\n\n \"\"\"\n Accuracy: .804\n Most Informative Features\n last_letter = u'a' female : male = 44.0 : 1.0\n last_letter = u'd' male : female = 23.7 : 1.0\n last_two_letters = u'on' male : female = 11.0 : 1.0\n first_two_letters = u'ha' male : female = 7.8 : 1.0\n last_two_letters = u'ta' female : male = 7.0 : 1.0\n last_letter = u't' male : female = 6.7 : 1.0\n last_letter = u'o' male : female = 6.0 : 1.0\n last_two_letters = u'll' male : female = 4.7 : 1.0\n first_two_letters = u'te' male : female = 4.7 : 1.0\n last_two_letters = u'an' male : female = 4.1 : 1.0\n \"\"\"\n\n return classifier.classify(gender_features(name_input))", "def load_data(url: str, target_var: str) -> tuple:\n # link = \"http://archive.ics.uci.edu/ml/machine-learning-databases/00519/heart_failure_clinical_records_dataset.csv\"\n data = pd.read_csv(url)\n\n X = data.copy()\n y = X.pop(target_var)\n columns = X.columns\n return X.values, y.values, columns", "def test_from_inchi_name(self):\n mol = Molecule.from_inchi(\"InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3\")\n assert mol.name == \"\"\n mol = Molecule.from_inchi(\"InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3\", name=\"bob\")\n assert mol.name == \"bob\"", "def feature_engineering(data):\n ft, nt, pruefung, training, version, vt, zt = get_testposition(data[\"Testposition\"])\n HA, Self, HA_nt, HA_vt, HA_zt = get_HA(data[\"HA\"])\n wochentag, ist_schulzeit = get_datetime_fields()\n sex_m, sex_w = get_sex(data[\"Sex\"])\n jahredabei = get_jahre_dabei(data[\"UserID\"])\n beendet = get_beendet(data[\"beendet\"])\n klassenstufe = get_klassenstufe(data[\"Klassenstufe\"])\n\n dataset = [\n [\n data[\"UserID\"],\n data[\"UebungsID\"],\n data[\"satzID\"],\n data[\"Erstloesung\"],\n data[\"Schwierigkeit\"],\n data[\"Art\"],\n data[\"AufgabenID\"],\n wochentag,\n ist_schulzeit,\n data[\"MehrfachFalsch\"],\n ft,\n nt,\n pruefung,\n training,\n version,\n vt,\n zt,\n beendet,\n data[\"Fehler\"],\n HA,\n Self,\n HA_nt,\n HA_vt,\n HA_zt,\n klassenstufe,\n jahredabei,\n sex_m,\n sex_w,\n ]\n ]\n\n df = pd.DataFrame(\n dataset,\n columns=[\n \"UserID\",\n \"UebungsID\",\n \"satzID\",\n \"Erstloesung\",\n \"Schwierigkeit\",\n \"Art\",\n \"AufgabenID\",\n \"Wochentag\",\n \"ist_Schulzeit\",\n \"MehrfachFalsch\",\n \"Testposition__FT\",\n \"Testposition__nt\",\n \"Testposition__pruefung\",\n \"Testposition__training\",\n \"Testposition__version\",\n \"Testposition__vt\",\n \"Testposition__zt\",\n \"beendet\",\n \"Fehler\",\n \"HA__HA\",\n \"HA__Self\",\n \"HA__nt\",\n \"HA__vt\",\n \"HA__zt\",\n \"Klassenstufe\",\n \"Jahredabei\",\n \"Sex__m\",\n \"Sex__w\",\n ],\n )\n\n # merge data with historical data\n global df_hisotorical\n result = pd.merge(df, df_hisotorical, on=\"UserID\")\n result = result.drop(columns=[\"UserID\", \"UebungsID\", \"satzID\", \"AufgabenID\", \"Art\"])\n return result", "def extract_critic_input(self, data):\n return data[1]", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n else:\n data = datasets.load_wine()\n\n X = data.data\n y = data.target\n return X, y", "def get_name():\n return \"SVMd+ - simplified approach\"", "def initializing():\n data = np.array(pd.read_csv('data.csv'))[:,1:]\n\n X = data[:,1:-1].astype(int)\n y = data[:,-1].astype(int)\n y_binary = (y == 1).astype(int)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, \n y_binary, \n test_size=0.25, \n )\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n return (X_train, X_test, y_train, y_test, X, y_binary)", "def parse_dataset(self, data):\n pass", "def read_datasets(data_string):\n if type(data_string) is dict:\n features_file = data_string[\"features\"]\n target_file = data_string[\"meta\"]\n if data_string.get(\"target_col\"):\n target_col = data_string.get(\"target_col\")\n else:\n target_col = \"target\"\n if data_string.get(\"train_test_col\"):\n train_test_col = data_string.get(\"train_test_col\")\n else:\n train_test_col = \"group\"\n elif type(data_string) is tuple:\n features_file = data_string[0]\n target_file = data_string[1]\n target_col = \"target\"\n train_test_col = \"group\"\n\n else:\n raise Exception(\n \"Data has to be expressed in either a tuple (features,target) or dictionary {\\\"features\\\":\\\"your_features\\\",\" +\n \"\\\"target\\\":\\\"your_target\\\"\")\n # opening data\n data_directory = os.path.join(project_dir,\"data/processed/\")\n try:\n X = pd.read_csv(data_directory + features_file, index_col=0)\n y = pd.read_csv(data_directory + target_file, index_col=0, encoding=\"ISO-8859-1\")\n except FileNotFoundError:\n print(\"Files not in data/preprocessed, searching for them in the application's directory. You should run the\" +\n \" program from its directory: python program.py instead of python /somewhere/else/program.py\")\n X = pd.read_csv(features_file, index_col=0)\n y = pd.read_csv(target_file, index_col=0, encoding=\"ISO-8859-1\")\n except pd.errors.ParserError as e:\n print(\"Pandas seams to be unable to read this file. Make sure it's a csv\")\n raise e\n except UnicodeDecodeError as e:\n print(\"The encoding of either the features or the targets is not encoded using UTF-8 or ISO-8859-1\")\n raise e\n # Check to see if columns exist and return them\n target_col = checking_columns(y, target_col, x=target_col)\n\n # Get group column\n train_test_col = checking_columns(y, train_test_col, x=train_test_col, handle=lambda x: target_col)\n\n return features_file, target_file, X, y, target_col, train_test_col", "def load_unicef_data():\n fname = 'SOWC_combined_simple.csv'\n\n # Uses pandas to help with string-NaN-numeric data.\n data = pd.read_csv(fname, na_values='_')\n # Strip countries title from feature names.\n features = data.axes[1][1:]\n # Separate country names from feature values.\n countries = data.values[:,0]\n values = data.values[:,1:]\n # Convert to numpy matrix for real.\n values = np.asmatrix(values,dtype='float64')\n\n # Modify NaN values (missing values).\n mean_vals = np.nanmean(values, axis=0)\n inds = np.where(np.isnan(values))\n values[inds] = np.take(mean_vals, inds[1])\n return (countries, features, values)", "def main():\n df_titanic = pd.read_csv('train.csv', header=None)\n print df_titanic.describe()", "def baseline(x_data, y_data, stra = \"uniform\"):\r\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2)\r\n dummy = DummyClassifier(strategy= stra)\r\n dummy.fit(x_train, y_train)\r\n y_pred = dummy.predict(x_test)\r\n accu = accuracy_score(y_test, y_pred)\r\n return accu", "def iris_data():\n X, y = load_iris()['data'], load_iris()['target']\n y[y == 2.] = 0 # N.B. make binary, TODO simulate a competition dataset\n return BasicExamplesProvider(X, y)", "def _read_txt(self, expected_col_names):\n\n try:\n # Read data\n data = pd.read_csv(self.source)\n\n # Check number of columns\n if data.shape[1] != len(expected_col_names):\n raise ValueError(\n \"Unexpected number of columns. Expected {}.\".format(\n len(expected_col_names)))\n # Check column names\n for item in data.columns:\n if item not in expected_col_names:\n raise ValueError(\"Unexpected column name. Expected:{}\"\\\n .format(expected_col_names))\n\n # Convert data\n for column in data.columns:\n data[column] = pd.to_numeric(data[column])\n\n # Generate output\n if self.coordinate_system == CoordinateSystem.GEOGRAPHIC:\n def generate_utm(row):\n return UtmCoordinate.create_from_geographic(\n row['latitude'],\n row['longitude'],\n row['elevation'])\n data['UTM'] = data.apply(generate_utm, axis=1)\n data['easting'] = data.apply(lambda row: row['UTM'].easting,\n axis=1)\n data['northing'] = data.apply(lambda row: row['UTM'].northing,\n axis=1)\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.UTM:\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.CARTESIAN:\n data['elevation'] = data['z'] # keeping return values consitent\n data['z'] = data['elevation'] - data['elevation'].min()\n\n else:\n raise ValueError('Unknown coordinate system.')\n\n selection = ['x', 'y', 'z', 'elevation']\n return data[selection]\n except Exception as exception:\n raise exception", "def test_excalibur_name():\n assert I07Nexus.excalibur_detector_2021 == \"excroi\"\n assert I07Nexus.excalibur_04_2022 == \"exr\"", "def _read_samples(self):\n\n logging.debug(\"Start file parsing.\")\n data = pd.read_csv(self._source_file, header=None)\n \n data = pd.read_csv(self._source_file, header=None)\n header = pd.read_csv(self._header_file, delimiter=':', skiprows=1, header=None)\n header.columns = ['column', 'column_type']\n\n data.columns = header.column.tolist() + ['attack']\n data['attack'] = data['attack'].str.replace('.', '')\n data['label'] = 1\n data.loc[data['attack'] == 'normal', 'label'] = 0\n\n symbolic_columns = header.loc[header.column_type == ' symbolic.'].column.tolist()\n # print(symbolic_columns)\n\n for scol in symbolic_columns:\n data[scol] = pd.Categorical(data[scol])\n one_hot_cols = pd.get_dummies(data[scol], prefix=scol)\n data = pd.concat([data, one_hot_cols], axis=1)\n\n data = data.drop(columns=symbolic_columns)\n data = data.drop(columns=['attack'])\n\n # data.loc[data.attack != 'normal' , ['attack', 'label']].head(20)\n\n data_normal = data.loc[data['label'] == 0]\n data_abnormal = data.loc[data['label'] == 1]\n\n data_normal_train = data_normal.sample(frac=0.7)\n data_normal_test = data_normal.loc[~data_normal.index.isin(data_normal_train.index)]\n\n data_normal_train = data_normal_train.drop(columns=['label']).values\n data_normal_test = data_normal_test.drop(columns=['label']).values\n data_abnormal = data_abnormal.drop(columns=['label']).values\n \n scaler = MinMaxScaler()\n _ = scaler.fit(data_normal_train)\n data_normal_train = scaler.transform(data_normal_train)\n data_normal_test = scaler.transform(data_normal_test)\n data_abnormal = scaler.transform(data_abnormal)\n \n logging.debug('Normal {}; Train {}; Test{}'.format(data_normal.shape, data_normal_train.shape, data_normal_test.shape))\n logging.debug('Abnormal {}'.format(data_abnormal.shape))\n\n samples = {}\n samples['NORMAL'] = data_normal_train\n samples['NORMAL_TEST'] = data_normal_test\n samples['ABNORMAL_TEST'] = data_abnormal\n\n logging.debug(\"End file parsing.\")\n\n return samples", "def split_data(name, is_train = True):\r\n data = pd.read_csv(name, header = 0, encoding = 'ISO-8859-1')\r\n X = data['text']\r\n if is_train:\r\n Y = data['polarity']\r\n return X, Y\r\n return X", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def produce_init(filename):\n training_dataset = pd.read_csv(f'../Modified Data/{filename}')\n test_dataset = pd.read_csv(f'../Raw Data/test.csv')\n features = list(training_dataset.columns)\n features.remove('SalePrice')\n predict_feature = ['SalePrice']\n\n # Produce Test Data\n test_X = test_dataset.loc[:, features]\n ids_test = test_dataset.loc[:, 'Id']\n\n for column in features:\n if str(training_dataset.loc[:, column].dtype) == 'object':\n # Initialize encoder\n labelencoder = LabelEncoder()\n # Encode Train Data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna('Missing')\n training_dataset.loc[:, column] = pd.Series(labelencoder.fit_transform(training_dataset.loc[:, column]))\n # Encode Test Data\n test_X.loc[:, column] = test_X.loc[:, column].fillna('Missing')\n test_X.loc[:, column] = pd.Series(labelencoder.fit_transform(test_X.loc[:, column]))\n else:\n # Fix missing values for train data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna(int(training_dataset.loc[:, column].mean()))\n # Fix missing values for test data\n test_X.loc[:, column] = test_X.loc[:, column].fillna(int(test_X.loc[:, column].mean()))\n\n return training_dataset, test_X, ids_test", "def setUp(self):\r\n # Single sample, 6 observations, one of which isn't observed in sample.\r\n self.biom_table1 = parse_biom_table(biom_table_str1)\r\n self.estimator1 = ObservationRichnessEstimator(self.biom_table1,\r\n Chao1MultinomialPointEstimator)", "def read_iris_data():\n\n # Tomamos los datos del dataset\n # Esta es la parte en la que copio codigo de la fuente mencionada\n iris_dataset = datasets.load_iris()\n\n # Separamos caracteristicas de las clases\n data = iris_dataset.data\n classes = iris_dataset.target\n feature_names = iris_dataset.feature_names # Para saber el nombre de las caracteristicas\n target_names = iris_dataset.target_names # Los nombres de las flores que consideramos:\n # Son los nombres de las clases\n\n # Nos quedamos solo con la primera y tercera caracteristica que corresponden\n # a los indices 0 y 2\n data = [data[indx][0:3:2] for indx in range(len(data))]\n\n # Del mismo modo solo me quedo con los nombres de las caracteristicas con\n # las que me quedo en el paso anterior\n feature_names = [feature_names[0], feature_names[1]]\n\n return data, classes, feature_names, target_names", "def load_crime():\n\n # LOAD DATA FROM FILE.\n # filename = \"resources\\CommViolPredUnnormalizedData.csv\"\n filename = os.path.join('resources', 'CommViolPredUnnormalizedData.csv')\n data = pd.read_csv(filename, header=0, sep=';', na_values='?', skipinitialspace=True)\n data = data.sample(frac=1, random_state=42)\n\n targets = ['violentPerPop']\n pfeatures = ['race']\n\n # Drop rows with no associated attribute to be predicted.\n dataset = data.dropna(subset=targets, axis=0).reset_index(drop=True)\n\n # Keep only features that have more than 95% of points with associated value.\n features_to_drop = list()\n n_points = len(dataset)\n acc_rate = 0.95\n\n for c in dataset.columns:\n tot_values = np.sum(dataset[c].isna())\n if tot_values >= (1 - acc_rate) * n_points:\n features_to_drop.append(c)\n\n dataset = dataset.drop(features_to_drop, axis=1)\n\n # Remove features that are either correlated with the target or useless.\n feat_to_remove = [\n 'fold',\n 'communityname',\n 'state',\n 'murders',\n 'murdPerPop',\n 'rapes',\n 'rapesPerPop',\n 'robberies',\n 'robbbPerPop',\n 'assaults',\n 'assaultPerPop',\n 'burglaries',\n 'burglPerPop',\n 'larcenies',\n 'larcPerPop',\n 'autoTheft',\n 'autoTheftPerPop',\n 'arsons',\n 'arsonsPerPop',\n 'nonViolPerPop'\n ]\n\n feat_to_remove += targets + pfeatures\n\n # Prepare the feature dataset.\n features = [f for f in dataset.columns if f not in feat_to_remove]\n dataset = dataset[features + pfeatures + targets]\n\n # Last check on Nan values.\n dataset = dataset.dropna(axis=0).reset_index(drop=True)\n\n # Force all types to float.\n for c in dataset.columns:\n dataset[c] = dataset[c].astype(float)\n\n # Features selection.\n top_features = utils.get_top_features(dataset[features], dataset[targets], n=15)\n\n for pfeat in pfeatures:\n if pfeat in top_features:\n print(\"Protected feature \" + pfeat + \" in top features!\")\n\n x, xp, y = dataset[top_features].values, dataset[pfeatures].values, dataset[targets].values\n\n return x, xp, y", "def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None):\n self.scorer = UniEvaluator(\n model_name_or_path='MingZhong/unieval-sum' if model_name_or_path == \"\" else model_name_or_path,\n max_length=max_length,\n device=device,\n cache_dir=cache_dir)\n self.task = 'data2text'\n self.dimensions = ['naturalness', 'informativeness']", "def fetchAndCleanDataframe(self):\n\n df = pd.read_csv('/Users/apple4u/Desktop/goksel tez/results_with_scenarios.csv')\n df.insider_label.fillna(0, inplace=True) # replaces null fields with 0\n df = df.drop(columns=['employee_name', 'scenario', 'role'])\n df = df.rename(columns={'insider_label':'label'})\n #df['label'] = df['insider_label'].astype('int64')\n #df.drop(columns='insider_label', inplace=True)\n df.set_index('user_id', inplace=True)\n X = df.iloc[:, :5].values #fetch all records first 5 columns\n y = df.label.values\n print(df.head())\n return X, y", "def get_test_data():\n\n # test set\n test = pd.read_csv(\"test.csv\")\n\n return test", "def fixture_name(self):\n return \"coding_dna_substitution\"", "def test_intro_model_n_amd():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n n=100\n prep.prepare(n_components=n, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl')\n features = [\n\n u'days_since_start',\n u'vote_required',\n u'nterms', u'success_rate',\n u'n_amd', u'session_type',\n u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', u'party_COM',\n u'urgency_No', u'urgency_Yes',\n u'appropriation_No', u'appropriation_Yes',\n u'taxlevy_No', u'taxlevy_Yes',\n u'fiscal_committee_No', u'fiscal_committee_Yes']\n topic_features = [\"topic_\"+str(k) for k in range(n)]\n features += topic_features\n X_train, y_train = prep.subset(features, dep_var='n_amd')\n\n baseline = DummyRegressor()\n\n gb = GradientBoostingRegressor()\n\n mc = ModelChooser([baseline, gb])\n mc.fit_predict(X_train, y_train, regressor=True)\n mc.print_results(regressor=True)", "def get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n\n elif dataset_name == \"Breast Cancer\":\n data = datasets.load_breast_cancer()\n\n elif dataset_name == \"Wine Dataset\":\n data = datasets.load_wine()\n\n elif dataset_name == \"MNIST\":\n data = datasets.load_digits()\n\n #elif dataset_name == \"Boston Housing Price\":\n # data = datasets.load_boston()\n\n X = data.data\n y = data.target\n\n return X, y", "def parse_data_uniform(line):\n if b'(' in line:\n return np.array([float(x) for x in line.split(b'(')[1].split(b')')[0].split()])\n return float(line.split(b'uniform')[1].split(b';')[0])", "def create_dataset():\n x_old, y_old = clean_scores_version1()\n\n # delete duplicates\n x_old = np.unique(x_old, axis=0)\n\n file = open('/Users/kira/Desktop/uni/Connect4/agents/agent_supervised_ml/unlabeled2.txt', \"a\")\n\n for row in x_old:\n string = ''\n move_seq = row[row != 0]\n for move in move_seq:\n string = string + str(move)\n for i in range(1, 8):\n file.write(string + str(i) + '\\n')\n\n file.close()", "def esm1v_t33_650M_UR90S_2():\n return load_model_and_alphabet_hub(\"esm1v_t33_650M_UR90S_2\")", "def prepare_data():\n #data, label = load_ta_data(), load_ta_target()\n data, label = load_own_data(), load_own_target()\n tra_x, tst_x = split_samples(data)\n tra_y, tst_y = split_samples(label)\n return (tra_x, tst_x, tra_y, tst_y)", "def load_dataset_test():\n df_test = load_csv_file(\"31_test.csv\")\n return df_test.values", "def prepare_data_test(fname):\n # Read data\n data = pd.read_csv(fname)\n return data", "def load_cup_data(train=True):\n type = \"TR\" if train else \"TS\"\n csv_file = path_data / Path(f\"ML_CUP/ML-CUP20-{type}.csv\")\n return pd.read_csv(csv_file, skiprows=7, header=None, index_col=0)", "def generate_data(self,seed):\n X, y = make_classification( n_samples = 250, random_state = seed )\n # Add bias term\n X = np.concatenate( ( np.ones( ( 250, 1 ) ), X ), axis = 1 )\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( \n X, y, test_size = 50, random_state = seed )", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def prep_data(df):\n y = df.target\n X = df.drop([\"target\"], axis=1)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n return X_train, X_test, y_train, y_test", "def load_seed(self) -> np.ndarray:\n return np.loadtxt(CONFIG_DIR / self.name_seed).view(complex).reshape(-1, 1)", "def get_train_data():\n # train set\n train = pd.read_csv(\"train.csv\")\n\n return train", "def create_random_human_identity_from_dataset(self):\n # Set the identity seed. this is used to sample the indentity that generates\n # the human gender, texture, and body shape\n identity_rng = np.random.RandomState(randint(1, 1000))\n # Collecting Humanav dataset\n dataset = HumanAppearance.dataset\n if(dataset is None):\n print('\\033[31m', \"ERROR: can't find Surreal Dataset\", '\\033[0m')\n exit(1) # Failure condition\n # Using the SBPD dataset to generate a random gender, texture, and body shape\n human_gender, human_texture, body_shape = \\\n dataset.get_random_human_gender_texture_and_body_shape(\n identity_rng)\n return human_gender, human_texture, body_shape", "def test_dataset_autogen(autogen_dataset):\n train_dummy = \"eget, venenatis a, magna. Lorem ipsum dolor sit amet, consectetuer\"\n val_dummy = \"leo. Vivamus nibh dolor, nonummy ac, feugiat non, lobortis quis,\"\n test_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n\n assert autogen_dataset.train[0][0] == train_dummy\n assert autogen_dataset.train[0][1] == '8'\n assert len(autogen_dataset.train) == 64\n\n assert autogen_dataset.val[0][0] == val_dummy\n assert autogen_dataset.val[0][1] == '1'\n assert len(autogen_dataset.val) == 16\n\n assert autogen_dataset.test[0][0] == test_dummy\n assert autogen_dataset.test[0][1] == '6'\n assert len(autogen_dataset.test) == 20" ]
[ "0.6082497", "0.5352037", "0.5263292", "0.5021193", "0.49601898", "0.48798177", "0.4859456", "0.48388806", "0.48280886", "0.48230565", "0.48205665", "0.4811698", "0.4808677", "0.4778711", "0.477406", "0.4759555", "0.47592923", "0.47398236", "0.47390524", "0.4733845", "0.47287226", "0.46976715", "0.468585", "0.46762756", "0.46732804", "0.46732804", "0.46732804", "0.46675482", "0.46493044", "0.46306705", "0.46248496", "0.4623197", "0.4617823", "0.46141237", "0.46132454", "0.46007624", "0.45884776", "0.4587554", "0.4586943", "0.45773402", "0.4571271", "0.45499396", "0.45422903", "0.4539875", "0.45366368", "0.45321104", "0.45316443", "0.45254108", "0.45239326", "0.45219713", "0.45142153", "0.4514057", "0.4510401", "0.45056033", "0.4503797", "0.44974226", "0.44911733", "0.44893008", "0.44784573", "0.44744673", "0.44706452", "0.44705704", "0.44581494", "0.44528955", "0.4444527", "0.4444217", "0.44430792", "0.4442398", "0.44420296", "0.44414136", "0.44335654", "0.44325536", "0.44291747", "0.44254175", "0.44197193", "0.44181392", "0.441609", "0.44153017", "0.44140187", "0.44007158", "0.43995535", "0.4396111", "0.4394548", "0.43938193", "0.4389231", "0.43891245", "0.43877634", "0.43874934", "0.43805286", "0.43797562", "0.43764937", "0.4372705", "0.43679857", "0.4366206", "0.43641385", "0.43610975", "0.43592668", "0.43526947", "0.4352457", "0.4352281" ]
0.730633
0
Reshapes batch to have first axes size equal n_split.
def batch_split_axis(batch, n_split): x, y = batch n = x.shape[0] n_new = n / n_split assert n_new == int(n_new), ( "First axis cannot be split: batch dimension was {} when " "n_split was {}.".format(x.shape[0], n_split)) n_new = int(n_new) return tuple(arr.reshape([n_split, n_new, *arr.shape[1:]]) for arr in (x, y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reshape_output_batch(self, number, output):\n #tt = cutotime('reshape')\n #tt.start()\n output = output.reshape(self.output_shapes[number]) # batch, h, w, 3, (5 + 80)\n #tt.stop()\n return output", "def split_last_dimension(x, n):\n x_shape = shape_list(x)\n m = x_shape[-1]\n if isinstance(m, int) and isinstance(n, int):\n assert m % n == 0\n return tf.reshape(x, x_shape[:-1] + [n, m // n])", "def split_last_dimension(x, n):\r\n old_shape = x.get_shape().dims\r\n last = old_shape[-1]\r\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\r\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0))\r\n ret.set_shape(new_shape)\r\n return tf.transpose(ret,[0,2,1,3])", "def split_last_dimension(x, n):\n old_shape = x.get_shape().dims\n last = old_shape[-1]\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0))\n ret.set_shape(new_shape)\n return tf.transpose(ret, [0, 2, 1, 3])", "def reshape_to_batch(array):\n if len(array.shape) == 2:\n array = numpy.expand_dims(array, axis=2)\n array = numpy.expand_dims(array, axis=0)\n return array", "def split_last_dim(self, x, n):\n old_shape = list(x.size())\n last = old_shape[-1]\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\n ret = x.view(new_shape)\n return ret.permute(0, 2, 1, 3)", "def split_last_dim(self, x, n):\n old_shape = list(x.size())\n last = old_shape[-1]\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\n ret = x.view(new_shape)\n return ret.permute(0, 2, 1, 3)", "def split_last_dim(self, x, n):\n old_shape = list(x.size())\n last = old_shape[-1]\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\n ret = x.view(new_shape)\n return ret.permute(0, 2, 1, 3)", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def _reshape(self, arr: np.ndarray) -> np.ndarray:\n return arr.reshape(self.TileHeight.value, self.TileWidth.value, self.bands,)", "def split(array, nrows, ncols):\r\n r, h = array.shape\r\n return (array.reshape(h//nrows, nrows, -1, ncols)\r\n .swapaxes(1, 2)\r\n .reshape(-1, nrows, ncols))", "def _reshape_channels(x):\n assert x.dim() == 4\n batch_size, nc, h, w = x.size()\n x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()\n x_t = x_t.view(batch_size, h, w, nc)\n return x_t", "def reshape(x, shape):\n return Reshape(shape)(x)", "def split(x, axis, split_size):\n assert axis < x.ndim, 'Dimension out of range!'\n\n if isinstance(split_size, int):\n _split_size = [x.shape[axis] // split_size] * split_size\n\n elif isinstance(split_size, (list, tuple)):\n _split_size = split_size\n else:\n raise TypeError\n\n if x.ndim == 0:\n\n return [x for _ in range(len(_split_size))]\n\n return T.split(x, splits_size=_split_size, n_splits=len(_split_size), axis=axis)", "def batch_split(self) -> np.array:\n pass", "def batchify(data, batch_size):\n n_batch = data.shape[0] // batch_size\n data = data[:n_batch * batch_size]\n data = data.reshape((batch_size, n_batch)).T\n return data", "def reshape_d(sequence, batch_size, num_steps):\n batch_length = batch_size * num_steps\n num_batches = sequence // batch_size\n if num_batches * batch_length > (len(sequence) - 1):\n num_batches -= 1\n # Round up batch\n X = sequence[: num_batches * batch_length]\n y = sequence[1: num_batches * batch_length + 1]\n X_splits = np.split(X, batch_size)\n y_splits = np.split(y, batch_size)\n # Stack batches\n X = np.stack(X_splits)\n y = np.stack(y_splits)\n return X, y", "def _reshape_feature(self, X, size):\n new_shape = (X.shape[0],) + size + (X.shape[-1],)\n return X.reshape(new_shape)", "def reshape(arr):\r\n reshape_arr = np.empty((3,240,320),dtype='float32')\r\n reshape_arr[0,:,:] = arr[:,:,0]\r\n reshape_arr[1,:,:] = arr[:,:,1]\r\n reshape_arr[2,:,:] = arr[:,:,2]\r\n return reshape_arr", "def windows_partition(x, window_size):\n\n B, H, W, C = x.shape\n x = x.reshape([B, H//window_size, window_size, W//window_size, window_size, C])\n x = x.transpose([0, 1, 3, 2, 4, 5])\n x = x.reshape([-1, window_size, window_size, C]) #(num_windows*B, window_size, window_size, C)\n return x", "def squeeze_batch_dims(inp, op, inner_rank, name=None):\n with ops.name_scope(name, \"squeeze_batch_dims\", [inp]):\n inp = ops.convert_to_tensor(inp, name=\"input\")\n shape = inp.shape\n\n inner_shape = shape[-inner_rank:]\n if not inner_shape.is_fully_defined():\n inner_shape = array_ops.shape(inp)[-inner_rank:]\n\n batch_shape = shape[:-inner_rank]\n if not batch_shape.is_fully_defined():\n batch_shape = array_ops.shape(inp)[:-inner_rank]\n\n if isinstance(inner_shape, tensor_shape.TensorShape):\n inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())\n else:\n inp_reshaped = array_ops.reshape(\n inp, array_ops.concat(([-1], inner_shape), axis=-1))\n\n out_reshaped = op(inp_reshaped)\n\n out_inner_shape = out_reshaped.shape[-inner_rank:]\n if not out_inner_shape.is_fully_defined():\n out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]\n\n out = array_ops.reshape(\n out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))\n\n out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])\n return out", "def reshape(data):\n return K.reshape(x=data, shape=(K.shape(data)[0], 1, reshape_size))", "def split_into_n_states(inp, n):\n *start, m = shapes_list(inp)\n out = tf.reshape(inp, start + [n, m // n])\n return out", "def split_into_n_states(inp, n):\n *start, m = shapes_list(inp)\n out = tf.reshape(inp, start + [n, m // n])\n return out", "def reshape(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = Reshape(shape).apply((x,))\n return y", "def reshape(self, *shape):\n return F.Reshape.apply(self, shape)", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.h, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def blockshaped(arr, nrows, ncols):\r\n\t h, w = arr.shape\r\n\t return (arr.reshape(h//nrows, nrows, -1, ncols)\r\n\t .swapaxes(1,2)\r\n\t .reshape(-1, nrows, ncols))", "def split_heads_2d(inputs, Nh):\n B, H, W, d = shape_list(inputs)\n ret_shape = [B, H, W, Nh, d // Nh]\n split = tf.reshape(inputs, ret_shape)\n return tf.transpose(split, [0, 3, 1, 2, 4])", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def split_heads(self, x, batch_size): # noqa\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def _reshape_output(self, output):\n output = np.transpose(output, [0, 2, 3, 1])\n _, height, width, _ = output.shape\n dim1, dim2 = height, width\n dim3 = 3\n # There are CATEGORY_NUM=80 object categories:\n dim4 = (4 + 1 + CATEGORY_NUM)\n return np.reshape(output, (dim1, dim2, dim3, dim4))", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def _batchify(data: nd.NDArray, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = len(data) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data[0: nbatch * batch_size]\n # Evenly divide the data across the bsz batches.\n data = data.reshape(batch_size, -1).transpose()\n # if torch.cuda.is_available():\n # data = data.cuda()\n return data", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def numpyReshape(array):\n return np.array(array, dtype = float).reshape(1, len(array))", "def flatten_image(x):\n *batch_shape, h, w, c = x.shape\n return x.reshape((*batch_shape, h * w * c))", "def split_heads(x, batch_size, num_heads, depth):\n x = tf.reshape(x, (batch_size, -1, num_heads, depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def _eager_reshape(tensor, shape, ctx):\n attr_t = tensor._datatype_enum() # pylint: disable=protected-access\n attr_tshape, (shape,) = execute.args_to_matching_eager(\n [shape], ctx, [dtypes.int32, dtypes.int64], dtypes.int32)\n inputs_flat = [tensor, shape]\n attrs = (\"T\", attr_t, \"Tshape\", attr_tshape)\n [result] = execute.execute(\n b\"Reshape\", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)\n return result", "def reshape(self, shape, ndim=None):\r\n\r\n if ndim is not None:\r\n if not isinstance(ndim, int):\r\n raise ValueError(\"Expected ndim to be an integer, is \" +\r\n str(type(ndim)))\r\n\r\n return theano.tensor.basic.reshape(self, shape, ndim=ndim)", "def _reshape_like(mat: Tensor, shape: Tuple[int]) -> Tensor:\n return mat.reshape(-1, *shape)", "def _reshape(self, data):\n batch_size, height, width, n_channels = data.shape\n if self._grid_height:\n grid_height = self._grid_height\n else:\n grid_height = int(math.floor(math.sqrt(batch_size)))\n\n grid_width = int(math.ceil(batch_size/grid_height))\n\n if n_channels == 1:\n data = np.tile(data, (1, 1, 1, 3))\n n_channels = 3\n\n if n_channels != 3:\n raise ValueError('Image batch must have either 1 or 3 channels, but '\n 'was {}'.format(n_channels))\n\n shape = (height * grid_height, width * grid_width, n_channels)\n buf = np.full(shape, 255, dtype=np.uint8)\n multiplier = 1 if data.dtype in (np.int32, np.int64) else 255\n\n for k in range(batch_size):\n i = k // grid_width\n j = k % grid_width\n arr = data[k]\n x, y = i * height, j * width\n buf[x:x + height, y:y + width, :] = np.clip(\n multiplier * arr, 0, 255).astype(np.uint8)\n\n if self._zoom > 1:\n buf = buf.repeat(self._zoom, axis=0).repeat(self._zoom, axis=1)\n return buf", "def blockshaped(arr, nrows, ncols):\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))", "def blockshaped(arr, nrows, ncols):\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))", "def blockshaped(arr, nrows, ncols):\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))", "def split_and_concat_model():\n x = tf.keras.Input(shape=[224, 224, 3, ])\n # TODO: implement split for the following commented out method of splitting\n # y1 = x[:, :100, :, :]\n # y2 = x[:, 101:, :, :]\n y1, y2 = tf.split(x, [100, 124], 1)\n y1 = tf.nn.relu(y1)\n y2 = tf.keras.layers.BatchNormalization()(y2)\n z = tf.keras.layers.concatenate([y1, y2], axis=1)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"split_and_concat_model\")(z)\n return output", "def split_to_tiles(array: np.ndarray, tile_height: int, tile_width: int) -> np.ndarray:\n arr_height, arr_width, *dimensions = array.shape\n nchannels = dimensions[0] if dimensions else 1\n new_shape = get_shape_for_tile_split(\n arr_height, arr_width, nchannels, tile_height, tile_width\n )\n return array.reshape(new_shape).swapaxes(1, 2)", "def batch_dataset(x, batch_size):\r\n\tsize_modulo = len(x) % batch_size # hack to ensure data is batches successfully\r\n\tif size_modulo != 0:\r\n\t\tx = x[:-size_modulo]\r\n\tpartitioned = np.split(x, batch_size)\r\n\treturn partitioned", "def _split_heads(x, num_heads):\n\tshape_lst = bert_utils.get_shape_list(x)\n\tdepth = shape_lst[-1]\n\tbatch = shape_lst[0]\n\tseq = shape_lst[1]\n\t# print(x.get_shape(), \"===splitheads===\")\n\tsplitted_x = tf.reshape(x, [tf.shape(x)[0], tf.shape(x)[1], \\\n\t\tnum_heads, depth // num_heads])\n\treturn tf.transpose(splitted_x, [0, 2, 1, 3])", "def img_reshape(self, img):\n # reshape image to have a leading 1 dimension\n img = numpy.asarray(img, dtype='float32') / 256.\n img_shape = img.shape\n img_reshaped = img.reshape(1, img_shape[0], img_shape[1], 3)\n return img_reshaped", "def output_reshape(ct):\n return np.moveaxis(ct, 1, -1)", "def _split_and_reshape_event(x, model):\n splits = [\n ps.maximum(1, ps.reduce_prod(s))\n for s in tf.nest.flatten(model.event_shape)\n ]\n x = tf.nest.pack_sequence_as(model.event_shape, tf.split(x, splits, axis=-1))\n\n def _reshape_part(part, dtype, event_shape):\n part = tf.cast(part, dtype)\n new_shape = ps.concat([ps.shape(part)[:-1], event_shape], axis=-1)\n return tf.reshape(part, ps.cast(new_shape, tf.int32))\n\n x = tf.nest.map_structure(_reshape_part, x, model.dtype, model.event_shape)\n return x", "def img_reshape(self, input_img):\n _img = np.transpose(input_img, (1, 2, 0)) \n _img = np.flipud(_img)\n _img = np.reshape(_img, (1, img_dim[0], img_dim[1], img_dim[2]))\n return _img", "def reshape_dataset(self, dataset, params):\n assert hasattr(params, \"vectorize_data\"), (\n \"Model params must set vectorize_data.\")\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n dataset[key].images = dp.reshape_data(dataset[key].images, params.vectorize_data)[0]\n dataset[key].shape = dataset[key].images.shape\n return dataset", "def split_multi_scale(y, y_shape):\n yw, yh = y_shape\n\n # Index of original image\n split_index = [yw * yh]\n # Index of large image\n split_index.append(split_index[-1] + (yw - 1) * yh)\n # Index of tall image\n split_index.append(split_index[-1] + yw * (yh - 1))\n # Index of big image\n split_index.append(split_index[-1] + (yw - 1) * (yh - 1))\n\n # We split according to computed indices\n y_preds = np.split(y, split_index, axis=1)\n\n # y_pred is the original image\n y_pred = y_preds[0]\n\n # y_pred_tall is the image with 1x2 patch application. We have to make\n # some calculus to get it back in original shape\n height_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=-1))[\n :, : y_cols - 1\n ] * 0.5\n height_tf_i.flat[0] = 1\n height_tf_i.flat[-1] = 1\n y_pred_tall = [\n np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten()\n for m in y_preds[1]\n ]\n y_pred_tall = np.asarray(y_pred_tall)\n\n # y_pred_large is the image with 2x1 patch application. We have to make\n # some calculus to get it back in original shape\n width_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=1))[: y_cols - 1] * 0.5\n width_tf_i.flat[0] = 1\n width_tf_i.flat[-1] = 1\n y_pred_large = [\n np.dot(np.reshape(m, (yw, yh - 1)), width_tf_i).flatten()\n for m in y_preds[2]\n ]\n y_pred_large = np.asarray(y_pred_large)\n\n # y_pred_big is the image with 2x2 patch application. We use previous\n # matrices to get it back in original shape\n y_pred_big = [\n np.dot(np.reshape(m, (yw - 1, yh - 1)), width_tf_i) for m in y_preds[3]\n ]\n y_pred_big = [\n np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten()\n for m in y_pred_big\n ]\n y_pred_big = np.asarray(y_pred_big)\n\n return (y_pred, y_pred_tall, y_pred_large, y_pred_big)", "def _data_reshape(self, data):\n data_offset = [int(size / 2) for size in data.shape[1:]]\n data_diff = [int(size / 2) for size in self.shape]\n data_diff_min = data_diff\n data_diff_max = []\n for i, elem in enumerate(data_diff):\n if self.shape[i] % 2 == 0:\n data_diff_max.append(elem)\n else:\n data_diff_max.append(elem + 1)\n data = data[:, (data_offset[0] - data_diff_min[0]):(data_offset[0] + data_diff_max[0]),\n (data_offset[1] - data_diff_min[1]):(data_offset[1] + data_diff_max[1]),\n (data_offset[2] - data_diff_min[2]):(data_offset[2] + data_diff_max[2])]\n\n if data.shape[1] == 1:\n data = data.reshape(data.shape[0], data.shape[2], data.shape[3])\n return data", "def make_unsupervised_batches(data, nbatches, batch_size=None):\n print '---->\\n.....Putting data into vector-shaped batches'\n if batch_size==None:\n batch_size = int(data.sahpe[0]/nbatches)\n else:\n assert nbatches * batch_size <= data.shape\n permut = permutation(data.shape[0])\n xdata = []\n for i in xrange(nbatches):\n xs = data[permut[i * batch_size:(i + 1) * batch_size], :, :, :]\n xdata.append(reshape(xs, (batch_size, prod(xs.shape) / batch_size)))\n return np.reshape(np.asarray(xdata), (nbatches, batch_size, -1))", "def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))", "def unstack_and_split(self, x, batch_size, num_channels=3):\n unstacked = torch.reshape(x, [batch_size, -1] + list(x.shape)[1:])\n channels, masks = torch.split(unstacked, [num_channels, 1], dim=2)\n return channels, masks", "def reshape(tensor, newshape):\n raise NotImplementedError", "def reshape_datasets(dataset_train, dataset_test, predict_window):\n\n x_train, y_train = create_my_dataset(dataset_train, predict_window)\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n\n x_test, y_test = create_my_dataset(dataset_test, predict_window)\n x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n\n return x_train, y_train, x_test, y_test", "def split_heads(x, num_heads):\n return tf.transpose(split_last_dimension(x, num_heads), [0, 2, 1, 3])", "def reshape(tensor):\n row = tf.shape(tensor)[0]\n shape_list = [row, -1]\n out = tf.reshape(tensor, shape_list)\n return out", "def split_to_batches(self, train_data, batch_size):\n num_of_training_examples = len(train_data)\n for i in range(0, num_of_training_examples, batch_size):\n x, y = zip(*train_data[i: i+batch_size])\n yield np.vstack(x), np.vstack(y)", "def split_array(arr, num_of_splits):\n # TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7\n size = arr.shape[0]\n if size < num_of_splits:\n return [arr[i:i + 1] for i in range(size)]\n slice_len, rest = divmod(size, num_of_splits)\n div_points = [0] + [(slice_len * index + min(index, rest) + slice_len + (index < rest))\n for index in range(num_of_splits)]\n slices = [arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)]\n return slices", "def flatten_reshape(variable):\n dim = 1\n for d in variable.get_shape()[1:].as_list():\n dim *= d\n return tf.reshape(variable, shape=[-1, dim])", "def reshape_0(tensor):\n row = tf.shape(tensor)[0]\n og_shape = tensor.get_shape().as_list()\n shape_list = [row, og_shape[1], og_shape[2], 1]\n out = tf.reshape(tensor, shape_list)\n return out", "def reshape_input(input_data, input_size, single=True, warning=False):\n with suppress(Exception):\n input_data = torch.from_numpy(input_data)\n\n if input_size is None:\n if warning is True:\n print(\"No size was given and no reshaping can occur\")\n return input_data\n\n # Reshape the data regardless of batch size\n start = len(input_data)\n\n alternate = list(input_size)\n alternate[0] = start\n alternate = tuple(alternate)\n\n try:\n if single:\n input_data = input_data.reshape(alternate)\n else:\n input_data = input_data.reshape(input_size)\n except Exception:\n if warning is True:\n print(\"Warning: Data loss is possible during resizing.\")\n if single:\n input_data = input_data.resize_(alternate)\n else:\n input_data = input_data.resize_(input_size)\n return input_data", "def split_tensor(input, n):\n batch_size = tf.shape(input)[0]\n ls = tf.cast(tf.lin_space(0.0, tf.cast(batch_size, FLOAT_TYPE), n + 1), INT_TYPE)\n return [input[ls[i]:ls[i+1]] for i in range(n)]", "def _TransposeStageBatch(x):\n # [num_stages, t, b, ...]\n shape = py_utils.GetShape(x)\n if p.num_pipeline_microbatches is not None:\n assert shape[2] % p.num_pipeline_microbatches == 0\n mb = p.num_pipeline_microbatches\n else:\n assert shape[2] % p.pipeline_microbatch_size == 0\n mb = shape[2] // p.pipeline_microbatch_size\n # [num_stages, t, mb_size, mb, ...]\n x = tf.reshape(x, shape[:2] + [shape[2] // mb, mb] + shape[3:])\n # [mb, num_stages, t, mb_size, ...]\n perm = [3, 0, 1, 2] + [i + 4 for i in range(len(shape) - 3)]\n x = tf.transpose(x, perm)\n return self.pipeline.PadMicrobatches(x)", "def reshape_as_blocks(data, block_size):\n data, block_size = _process_block_inputs(data, block_size)\n\n if np.any(np.mod(data.shape, block_size) != 0):\n raise ValueError(\n \"Each dimension of block_size must divide evenly \"\n \"into the corresponding dimension of data\"\n )\n\n nblocks = np.array(data.shape) // block_size\n new_shape = tuple(k for ij in zip(nblocks, block_size) for k in ij)\n nblocks_idx = tuple(range(0, len(new_shape), 2)) # even indices\n block_idx = tuple(range(1, len(new_shape), 2)) # odd indices\n\n return data.reshape(new_shape).transpose(nblocks_idx + block_idx)", "def device_reshape(self, x: JaxArray) -> JaxArray:\n assert hasattr(x, 'ndim'), f'Expected JaxArray, got {type(x)}. If you are trying to pass a scalar to ' \\\n f'parallel, first convert it to a JaxArray, for example np.float(0.5)'\n if x.ndim == 0:\n return np.broadcast_to(x, [self.ndevices])\n assert x.shape[0] % self.ndevices == 0, f'Must be able to equally divide batch {x.shape} among ' \\\n f'{self.ndevices} devices, but does not go equally.'\n return x.reshape((self.ndevices, x.shape[0] // self.ndevices) + x.shape[1:])", "def _flatten(self, inputT, size):\n return tf.reshape(inputT, (-1, size))", "def unbatch_stack(S, grid_shape):\n\tI, J = grid_shape\n\tC, M = S.shape[1], S.shape[2]\n\treturn S.reshape(-1, I, J, C, M, M)", "def generate_batch(self, batch_size, split=0):\n data_size = self.split_sizes[split]\n data_offset = self.data_index[split] + self.split_offset[split]\n\n # Variable batch size - ensure model can handle this\n batch_size = min(batch_size, data_size - self.data_index[split])\n\n batch = np.empty(batch_size, dtype=np.int32)\n labels = np.empty(batch_size, dtype=np.int32)\n\n batch[:] = self.data[data_offset: data_offset + batch_size, 0]\n labels[:] = self.data[data_offset: data_offset + batch_size, 1]\n\n self.data_index[split] += batch_size\n\n return batch, labels", "def get_shape_for_tile_split(\n arr_height: int, arr_width: int, nchannels: int, tile_height: int, tile_width: int\n) -> list[int]:\n shape = [\n arr_height // tile_height,\n tile_height,\n arr_width // tile_width,\n tile_width,\n ]\n if nchannels > 1:\n shape.append(nchannels)\n return shape", "def get_batches_new(split,n_batches,channels,data):\n data_len = data.shape[0]\n max_int = data_len-split\n #Only the selected channels\n #ch_data = \n batches = [] \n for i in range(len(channels)):\n random_ints = np.random.randint(0,max_int,size=(n_batches,1))\n batches.append(data[:,channels][:,i][random_ints+np.arange(split)])\n return np.swapaxes(np.array(batches),0,1)[:,:,:,np.newaxis]", "def batchify(data, batch_size, args):\n # Work out how cleanly we can divide the dataset into batch_size parts (i.e. continuous seqs).\n nbatch = data.size(0) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * batch_size)\n # Evenly divide the data across the batch_size batches.\n data = data.view(batch_size, -1)\n if args.cuda:\n data = data.cuda()\n return data", "def init_reshape(cube, nside):\n half_nside = 2**nside / 2\n \n dim1 = cube.shape[1]/2 - half_nside\n dim2 = cube.shape[1]/2 + half_nside\n dim3 = cube.shape[2]/2 - half_nside\n dim4 = cube.shape[2]/2 + half_nside\n\n return cube[:, dim1:dim2, dim3:dim4]", "def Split(batch, replicas_per_host, axis=0):\n def _SplitFn(t):\n return tf.sparse.split if isinstance(t, tf.SparseTensor) else tf.split\n\n split = batch.Transform(lambda t: _SplitFn(t)(t, replicas_per_host, axis))\n return [\n nest.map_structure_up_to(batch, lambda t: t[i], split) # pylint: disable=cell-var-from-loop\n for i in range(replicas_per_host)\n ]", "def resize_batch(images : List[np.ndarray], size : Tuple[int,int,int,int], resize_kind='stretch') :\n assert resize_kind in ['stretch'] and len(size) == 4\n n, w, h, c = size if size[-1]==3 else tuple(size[i] for i in [0,3,1,2])\n resize = lambda x: BaseRuntime.resize_stretch(x, (h,w))\n dtype = images[0].dtype\n n_pad = n - len(images)\n batch_pad = [np.zeros((h,w,c),dtype=dtype)] * n_pad\n batch_image = list(map(resize, images))\n batch_image = batch_image + batch_pad\n return np.stack(batch_image)", "def _chunk_windows(windows, num_chunks):\n if num_chunks <= 0 or int(num_chunks) != num_chunks:\n raise ValueError(\"Number of chunks must be an integer > 0\")\n num_chunks = min(len(windows) - 1, num_chunks)\n splits = np.array_split(windows[:-1], num_chunks)\n chunks = []\n for j in range(num_chunks - 1):\n chunk = np.append(splits[j], splits[j + 1][0])\n chunks.append(chunk)\n chunk = np.append(splits[-1], windows[-1])\n chunks.append(chunk)\n return chunks", "def test_integer_split_2D_default(self):\n a = np.array([np.arange(10), np.arange(10)])\n res = array_split(a, 3)\n tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),\n np.zeros((0, 10))]\n compare_results(res, tgt)\n assert_(a.dtype.type is res[-1].dtype.type)\n # perhaps should check higher dimensions", "def _create_reshape(cls, onnx_node, inputs, opset_version):\n shape = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n onnx_node.consumed_inputs.append(onnx_node.inputs[1])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(shape)", "def convert_split(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.input(\"AxisTensor\")\n if axis:\n axis = g.get_node(axis[0])\n axis, infered = try_infer_value(axis, g.get_params())\n if infered:\n axis = axis.tolist()[0]\n else:\n axis = op.attr(\"axis\")\n\n sections = op.input(\"SectionsTensorList\")\n if sections:\n tmp_section = []\n for i in sections:\n i = g.get_node(i)\n i, infered = try_infer_value(i, g.get_params())\n if infered:\n i = i.tolist()\n else:\n raise ValueError(\"Dynamic Split not yet supported.\")\n tmp_section.extend(i)\n sections = tmp_section\n else:\n sections = op.attr(\"sections\")\n if sections:\n indices = []\n split_index = 0\n for i in sections[:-1]:\n if i == -1:\n input_shape = infer_shape(x)[axis]\n i = input_shape - np.sum(sections) - 1\n split_index += i\n indices.append(split_index)\n else:\n indices = op.attr(\"num\")\n\n out = _op.split(x, indices, axis)\n for i, out_i in enumerate(out):\n g.add_node(op.output(\"Out\")[i], out_i)", "def convert_reshape(g, op, block):\n\n input_shape = op.input(\"Shape\")\n input_shape_tensor = op.input(\"ShapeTensor\")\n data = g.get_node(op.input(\"X\")[0])\n if input_shape:\n new_shape = g.get_node(input_shape[0])\n elif input_shape_tensor:\n new_shape = []\n for shape_name in input_shape_tensor:\n shape = g.get_node(shape_name)\n if len(infer_shape(shape)) == 0:\n shape = _op.reshape(shape, [-1])\n new_shape.append(shape)\n new_shape = _op.concatenate(new_shape, axis=0)\n new_shape, infered = try_infer_value(new_shape, parameters=g.get_params())\n if infered:\n new_shape = new_shape.tolist()\n else:\n new_shape = op.attr(\"shape\")\n out = _op.reshape(data, new_shape)\n g.add_node(op.output(\"Out\")[0], out)", "def repeat_to_batch(x, batch_size):\n shape = tf.shape(x)\n rnk = tf.rank(x)\n tileshp = tf.ones([rnk - 1], dtype=tf.int32)\n tileshpfinal = tf.concat([[batch_size], tileshp], 0)\n return tf.tile(x, tileshpfinal)", "def reshape(input):\n\n input = input / 255\n input = trans.resize(input, (args.size, args.size))\n input = np.reshape(input, input.shape + (1,))\n input = np.reshape(input, (1,) + input.shape)\n return input", "def flatten_layers(data):\n return data.reshape((data.shape[0], data.shape[1], -1))", "def make_vector_batches(data, nbatches, batch_size=None):\n print \"---->\\n.....Putting into vector-shaped batches\"\n if batch_size==None:\n batch_size = int(data['images'].shape[0]/nbatches)\n else:\n assert nbatches * batch_size <= data['images'].shape[0]\n permut = permutation(data['images'].shape[0])\n xdata = []\n ydata = []\n for i in range(nbatches):\n xs = data['images'][permut[i * batch_size:(i + 1) * batch_size],\n :, :, :]\n xdata.append(reshape(xs, (batch_size, prod(xs.shape) / batch_size)))\n ydata.append(data['labels'][permut[i * batch_size:(i + 1)\n * batch_size]])\n print \"---->\\n.....Done!\"\n return [np.reshape(np.asarray(xdata), (nbatches, batch_size, -1)),\n np.asarray(ydata)]" ]
[ "0.69617146", "0.68236685", "0.66688055", "0.6664411", "0.65866363", "0.65084076", "0.65084076", "0.65084076", "0.6464381", "0.6412519", "0.6412519", "0.6395728", "0.6334293", "0.630652", "0.6277924", "0.62474674", "0.61763036", "0.61714095", "0.6166665", "0.61203986", "0.6088742", "0.6058957", "0.6057854", "0.6024649", "0.6013037", "0.6013037", "0.59323484", "0.5861982", "0.5827883", "0.5818059", "0.58179855", "0.58153355", "0.58153355", "0.58044755", "0.5799797", "0.57497275", "0.57497275", "0.57487595", "0.5738105", "0.5738105", "0.5738105", "0.5738105", "0.5738105", "0.5738105", "0.5738105", "0.571956", "0.57117957", "0.5710915", "0.5694648", "0.569134", "0.5680084", "0.56777465", "0.5671846", "0.5671846", "0.5671846", "0.56531006", "0.5648046", "0.56136024", "0.5608312", "0.55827785", "0.55667883", "0.55559105", "0.5530411", "0.55234176", "0.5486612", "0.5477394", "0.5463987", "0.5463638", "0.54598534", "0.54504263", "0.54437166", "0.5433052", "0.5427659", "0.5422036", "0.5408396", "0.5407969", "0.5405937", "0.5397071", "0.5396359", "0.53942245", "0.53883785", "0.5363088", "0.5349693", "0.5347569", "0.53466356", "0.53422904", "0.5323746", "0.5323666", "0.53224915", "0.5299057", "0.5291478", "0.52782553", "0.52752185", "0.527421", "0.5268825", "0.5245335", "0.5243672", "0.5230739", "0.5226342", "0.52244186" ]
0.72161376
0
Shard the dataset to devices.
def pmap_dataset(ds, n_devices): n_data = len(ds[0]) if n_data % n_devices: new_len = n_devices * (n_data // n_devices) warning_str = ("Dataset of length {} can not be split onto {} devices." "Truncating to {} data points.".format( n_data, n_devices, new_len)) warnings.warn(warning_str, UserWarning) ds = (arr[:new_len] for arr in ds) return jax.pmap(lambda x: x)(batch_split_axis(ds, n_devices))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shard(self, dataset_iter):\n return dataset_iter", "def devices(self, devices):\n\n self._devices = devices", "def devices(self, devices):\n\n self._devices = devices", "def set_rng_device_and_dtype(\n self, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32\n ) -> None:\n if self.device != device or self.dtype != dtype:\n self.make_samplers(device, dtype)\n self.device = device\n self.dtype = dtype", "def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)", "def partition(data, device, train_size=0.8):\n # Start with a copy, will be training\n train = copy.deepcopy(data)\n test = []\n\n # Get distribution of message indices, keep ordering\n test_len = int((1 - train_size) * len(data))\n test_indices = sorted(random.sample(range(len(data)), test_len), reverse=True)\n\n # For each index, remove from train and append to test\n for i in test_indices:\n test.append(train.pop(i))\n\n # Need to reverse test now\n test.reverse()\n\n # Now label each set individually (performed in place)\n Labeler(train)\n Labeler(test)\n\n # Rescale data as well\n train = DataRescaler(train).scaled_data\n test = DataRescaler(test).scaled_data\n\n # Convert to tensors\n # Inputs\n Xtrain = torch.tensor([[s.get('time')] + list(s.get('composite').values()) for s in train], dtype=torch.double).to(device)\n Xtest = torch.tensor([[s.get('time')] + list(s.get('composite').values()) for s in test], dtype=torch.double).to(device)\n\n # Targets\n Ttrain = torch.tensor([[s.get('distinct')] for s in train], dtype=torch.long).to(device)\n Ttest = torch.tensor([[s.get('distinct')] for s in test], dtype=torch.long).to(device)\n return(Xtrain, Ttrain, Xtest, Ttest)", "def wrap_dataset(self, dataset: Any, shard_dataset: bool = True) -> Any:\n if not self.env.managed_training:\n return dataset\n\n hvd.require_horovod_type(\"tensorflow\", \"EstimatorTrialContext.wrap_dataset was called.\")\n\n self.dataset_initialized = True\n if self.distributed.size == 1 or not shard_dataset:\n if self.distributed.size > 1 and not shard_dataset:\n logging.info(\"Dataset sharding skipped.\")\n return dataset\n\n dataset = dataset.shard(hvd.size(), hvd.rank())\n logging.debug(f\"Sharded dataset to index {hvd.rank()} of {hvd.size()}.\")\n return dataset", "def to(self, device):\n for item in self.data:\n if torch.is_tensor(item):\n item.to(item)\n else:\n for subitem in item:\n subitem.to(device)\n return self", "def _import_devices(self) -> None:\n self._devices.clear()\n\n # Exctract all devices\n for device in self._udev.list_devices():\n # Skip devices without mapping\n if not device.device_node or self.helper.hide_virtual_device(device):\n continue\n self._devices[device.sys_name] = Device.import_udev(device)", "def setup_devices(self, devices):\n \n self.devices = devices\n \n barrier = ReusableBarrier(len(devices))\n lock = Lock()\n aux_dict = {}\n\n for device in devices:\n device.barrier = barrier\n device.global_lock = lock\n for location in device.sensor_data: \n if location not in aux_dict:\n aux_dict[location] = Semaphore() \n \n for device in devices:\n device.device_semaphores = aux_dict\n\n self.setup_master_thread()", "def experimental_distribute_dataset(self, dataset, options=None): # pylint: disable=useless-super-delegation\n return super(OneDeviceStrategy, self).experimental_distribute_dataset(\n dataset, options)", "def to_device(data, device):\n if isinstance(data, (list,tuple)): # allows to apply function to lists or tuples of tensors\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)", "def connect_all_devices_to_daq(self):\n for d in self.devices:\n dev = self.devices[d] # Get the device from the devices list\n if 'connection' in dev.properties:\n if 'device' in dev.properties['connection']:\n connected_to = dev.properties['connection']['device']\n mode = dev.properties['mode']\n self.daqs[connected_to][mode].append(dev)\n print('Appended %s to %s' % (dev, connected_to))", "def preprocess(self, dataset_iter, single_device=False):\n dataset_iter = map(self.as_example, dataset_iter)\n if not single_device:\n dataset_iter = self.shard(dataset_iter)\n return dataset_iter", "def initialize_devices(self):\n for k in self.devices:\n dev = self.devices[k]\n print('Starting %s' % dev.properties['name'])\n dev.initialize_driver()\n # print('Error initializing %s' % dev.properties['name'])\n if 'defaults' in dev.properties:\n defaults_file = dev.properties['defaults']\n defaults = from_yaml_to_dict(defaults_file)[dev.properties['name']]\n dev.apply_values(defaults)\n if dev.properties['type'] == 'daq':\n self.daqs[dev.properties['name']] = {'input': [],\n 'output': [],\n 'monitor': [], } # Creates an entry for every different DAQ.", "def mount(mapping, loaded_data):\n for drive_size, partition_infos in mapping:\n mount_single(partition_infos, loaded_data[drive_size])", "def set_devices(args):\n global devices\n if args is not None:\n devices = [torch.device(i) for i in ast.literal_eval('[' + args + ']')]\n torch.cuda.set_device(devices[0])\n else:\n devices = [torch.device('cpu')]", "def to_device(data, device):\r\n if isinstance(data, (list,tuple)):\r\n return [to_device(x, device) for x in data]\r\n return data.to(device, non_blocking=True)", "def to_device(data, device):\n if isinstance(data, (list,tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)", "def to_device(data, device):\n if isinstance(data, (list,tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)", "def to_device(data, device):\n if isinstance(data, (list,tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)", "def to_device(data, device):\n if isinstance(data, (list,tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)", "def create_shard(dataset, num_shards):\n input_chips, label_chips = [], []\n for item in tqdm(dataset):\n # not using chip_id and chip_for_display fields\n input_chips.append(item['chip'])\n label_chips.append(item['chip_label'])\n\n # debugging\n # if len(input_chips) > 200:\n # break\n num_chips = len(input_chips)\n print(f'Created {num_chips} chips.')\n\n items_per_shards = math.ceil(num_chips / num_shards)\n shard_idx = []\n for i in range(num_shards):\n shard_idx.append(\n (i * items_per_shards, (1 + i) * items_per_shards)\n )\n # print(f'Debug - shard_end_idx is {shard_idx}')\n\n print('Stacking imagery and label chips into shards')\n input_chip_shards, label_chip_shards = [], []\n for begin_idx, end_idx in shard_idx:\n if begin_idx < num_chips:\n input_chip_shard = input_chips[begin_idx:end_idx]\n input_chip_shard = np.stack(input_chip_shard, axis=0)\n print(f'dim of input chip shard is {input_chip_shard.shape}, dtype is {input_chip_shard.dtype}')\n input_chip_shards.append(input_chip_shard)\n\n label_chip_shard = label_chips[begin_idx:end_idx]\n label_chip_shard = np.stack(label_chip_shard, axis=0)\n print(f'dim of label chip shard is {label_chip_shard.shape}, dtype is {label_chip_shard.dtype}')\n label_chip_shards.append(label_chip_shard)\n\n return (input_chip_shards, label_chip_shards)", "def to_device(data, device):\n if isinstance(data, (list, tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)", "def device_placement(self):\n if is_tf_available():\n import tensorflow as tf\n with tf.device('/CPU:0' if self.device == -1 else '/device:GPU:{}'.format(self.device)):\n yield\n else:\n import torch\n if self.device >= 0:\n torch.cuda.set_device(self.device)\n\n yield", "def set_device(self, device: torch.Tensor) -> None:\n raise NotImplementedError", "def set_devices_for_ml(sys_device_ids):\n import os\n\n all_ids = []\n for ids in sys_device_ids:\n all_ids += ids\n unique_sys_device_ids = list(set(all_ids))\n unique_sys_device_ids.sort()\n if -1 in unique_sys_device_ids:\n unique_sys_device_ids.remove(-1)\n\n # Set the CUDA_VISIBLE_DEVICES environment variable\n\n visible_devices = ''\n for i in unique_sys_device_ids:\n visible_devices += '{}, '.format(i)\n os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices\n\n # Return wrappers\n\n relative_device_ids = []\n TVTs, TMOs = [], []\n for ids in sys_device_ids:\n relative_ids = []\n for id in ids:\n if id != -1:\n id = find_index(unique_sys_device_ids, id)\n relative_ids.append(id)\n relative_device_ids.append(relative_ids)\n\n # Models and user defined Variables/Tensors would be transferred to the\n # first device.\n TVTs.append(TransferVarTensor(relative_ids[0]))\n TMOs.append(TransferModulesOptims(relative_ids[0]))\n return TVTs, TMOs, relative_device_ids", "def move_devices(self, data):\n data = clean(data, self.move_parameters)\n return self.put(\"/devices/move\", data)", "def partition_dataset_train():\n dataset = datasets.MNIST(\n './data',\n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ]))\n size = dist.get_world_size()\n bsz = int(128 / float(size))\n partition_sizes = [1.0 / size for _ in range(size)]\n partition = DataPartitioner(dataset, partition_sizes)\n partition = partition.use(dist.get_rank())\n train_set = torch.utils.data.DataLoader(\n partition, batch_size=bsz, shuffle=True)\n return train_set, bsz", "def _batch_to_device(batch, target_device):\n tensor = _getattr(\"torch\", \"Tensor\")\n for key in batch:\n if isinstance(batch[key], tensor):\n batch[key] = batch[key].to(target_device)\n return batch", "def batch_to_device(batch):\n for key in batch:\n if isinstance(batch[key], torch.Tensor):\n batch[key] = batch[key].to(device)\n return batch", "def load_devices():", "def make_output_shard_1d(\n output: DTensor, device_mesh: Optional[DeviceMesh] = None, dim: int = 0\n) -> DTensor:\n\n return output.redistribute(device_mesh, [Shard(dim)])", "def _write_dataset(name, dataset, num_shards, output_dir):\n borders = np.int32(np.linspace(0, len(dataset), num_shards + 1))\n indices = list(range(len(dataset)))\n\n for i in range(num_shards):\n filename = os.path.join(\n output_dir, '%s-%.5d-of-%.5d' % (name, i, num_shards))\n shard_indices = indices[borders[i]:borders[i + 1]]\n _write_shard(filename, dataset, shard_indices)\n logging.info('Wrote dataset indices [%d, %d) to output shard %s',\n borders[i], borders[i + 1], filename)", "def process_mount_dataset(dataset, mount_path):\n entry = repository.get_entry(dataset)\n if entry:\n username = entry.username\n user_pkey = entry.user_pkey\n if username.strip() == \"\" or user_pkey.strip() == \"\":\n # use local settings\n syndicate_users = config.list_syndicate_users_by_ms_host(entry.ms_host)\n for suser in syndicate_users:\n username = suser.username\n user_pkey = suser.user_pkey\n break\n\n if username.strip() == \"\" or user_pkey.strip() == \"\":\n sdm_util.print_message(\"Cannot find user accounts to access the dataset - %s\" % (dataset))\n return 1\n\n try:\n bimpl = sdm_backends.Backends.get_backend_instance(backend, config.get_backend_config(backend))\n if not bimpl.is_legal_mount_path(mount_path):\n sdm_util.print_message(\"Cannot mount dataset to the given mount path for wrong mount path - %s\" % (mount_path))\n return 1\n\n # check existance\n records = mount_table.get_records_by_mount_path(mount_path)\n for rec in records:\n if rec.dataset == dataset and rec.status == sdm_mount_table.MountRecordStatus.UNMOUNTED:\n # same dataset but unmounted\n # delete and overwrite\n mount_table.delete_record(rec.record_id)\n\n mount_record = mount_table.add_record(dataset, mount_path, backend, sdm_mount_table.MountRecordStatus.UNMOUNTED)\n mount_table.save_table(MOUNT_TABLE_PATH)\n\n bimpl.mount(\n mount_record.record_id,\n entry.ms_host,\n entry.dataset,\n username,\n user_pkey,\n entry.gateway,\n mount_path\n )\n mount_record.status = sdm_mount_table.MountRecordStatus.MOUNTED\n mount_table.save_table(MOUNT_TABLE_PATH)\n return 0\n except sdm_mount_table.MountTableException, e:\n sdm_util.print_message(\"Cannot mount dataset - %s to %s\" % (dataset, mount_path), True, sdm_util.LogLevel.ERROR)\n sdm_util.print_message(e, True, sdm_util.LogLevel.ERROR)\n return 1\n except sdm_absbackends.AbstractBackendException, e:\n sdm_util.print_message(\"Cannot mount dataset - %s to %s\" % (dataset, mount_path), True, sdm_util.LogLevel.ERROR)\n sdm_util.print_message(e, True, sdm_util.LogLevel.ERROR)\n return 1\n else:\n sdm_util.print_message(\"Dataset not found - %s\" % dataset)\n return 1", "def to(self, device):\n self.device = device\n self.model.to(self.device)", "def cuda(self, device=None) -> MultiIndicesEmbedding:\n super().cuda(device=device)\n\n self.offsets = self.offsets.cuda(device)\n\n return self", "def test_partition_with_more_sdram_than_default(self):\n self.setup()\n flops = 1000\n (e, ne, n, w, sw, s) = range(6)\n\n processors = list()\n for i in range(18):\n processors.append(Processor(i, flops))\n\n links = list()\n links.append(Link(0, 0, 0, 0, 1, s, s))\n\n _sdram = SDRAM(128 * (2**21))\n\n links = list()\n\n links.append(Link(0, 0, 0, 1, 1, n, n))\n links.append(Link(0, 1, 1, 1, 0, s, s))\n links.append(Link(1, 1, 2, 0, 0, e, e))\n links.append(Link(1, 0, 3, 0, 1, w, w))\n r = Router(links, False, 100, 1024)\n\n ip = \"192.162.240.253\"\n chips = list()\n for x in range(5):\n for y in range(5):\n chips.append(Chip(x, y, processors, r, _sdram, 0, 0, ip))\n\n self.machine = Machine(chips)\n graph, mapper = self.bp.partition(self.graph,self.machine)", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def launch_devices(self):\n self.data[0], temp = alghoritm.temperature(self.data[0], self.set_thermostat, 0) # get value\n HC35_3S.launch(self.data_path, self.samples_size, temp) # set it via device\n\n self.data[1], humidi = alghoritm.humidity(self.data[1], self.set_humidifier, 0)\n humidifier.launch(self.data_path, self.samples_size, humidi)\n\n self.data[2], moistu = alghoritm.moisture(self.data[2], self.set_sprinklers, 0)\n HUNTER.launch(self.data_path, self.samples_size, moistu)\n\n self.data[3], o2 = alghoritm.o2(self.data[3], self.set_ventilation, 0)\n ventilation.launch_o2(self.data_path, self.samples_size, o2)\n\n self.data[4], co2 = alghoritm.co2(self.data[4], self.set_ventilation, 0)\n ventilation.launch_co2(self.data_path, self.samples_size, co2)", "def mount_multi_dataset(argv):\n if len(argv) >= 1:\n try:\n bimpl = sdm_backends.Backends.get_backend_instance(backend, config.get_backend_config(backend))\n\n for d in argv:\n dataset = d.strip().lower()\n mount_path = bimpl.make_default_mount_path(dataset, config.get_backend_config(backend).default_mount_path)\n abs_mount_path = sdm_util.get_abs_path(mount_path)\n res = process_mount_dataset(dataset, abs_mount_path)\n if res > 0:\n return res\n return 0\n except sdm_absbackends.AbstractBackendException, e:\n sdm_util.print_message(\"Cannot mount dataset - %s\" % dataset, True, sdm_util.LogLevel.ERROR)\n sdm_util.print_message(e, True, sdm_util.LogLevel.ERROR)\n return 1\n else:\n show_help([\"mmount\"])\n return 1", "def shard(self, shard):\n\n self._shard = shard", "def shard(self, shard):\n\n self._shard = shard", "def make_ds_pmap_fullbatch(name, dtype, n_devices=None, truncate_to=None):\n name = name.lower()\n n_devices = n_devices or len(jax.local_devices())\n if name in ImgDatasets._value2member_map_:\n train_set, test_set, data_info = get_image_dataset(name)\n loaded = True\n task = Task.CLASSIFICATION\n elif name == \"imdb\":\n train_set, test_set, _, data_info = load_imdb_dataset()\n dtype = jnp.int32\n loaded = True\n task = Task.CLASSIFICATION\n elif name[-4:] == \".npz\":\n train_set, test_set, data_info = load_npz_array(name)\n loaded = True\n task = Task.CLASSIFICATION\n else:\n name, seed = _parse_uci_regression_dataset(name)\n loaded = name is not None\n if name is not None:\n train_set, test_set, data_info = load_uci_regression_dataset(\n name, int(seed))\n loaded = True\n task = Task.REGRESSION\n\n if not loaded:\n raise ValueError(\"Unknown dataset name: {}\".format(name))\n\n if truncate_to:\n assert truncate_to % n_devices == 0, (\n \"truncate_to should be devisible by n_devices, but got values \"\n \"truncate_to={}, n_devices={}\".format(truncate_to, n_devices))\n train_set = tuple(arr[:truncate_to] for arr in train_set)\n\n train_set, test_set = tuple(\n pmap_dataset(ds, n_devices) for ds in (train_set, test_set))\n\n train_set, test_set = map(lambda ds: (ds[0].astype(dtype), ds[1]),\n (train_set, test_set))\n\n return train_set, test_set, task, data_info", "def init_devices(self):\n self.hp_nb = int(self.rs_nb* self.hp_proportion/(1- self.hp_proportion))\n self.defense_cost = self.hp_nb * self.hp_unit_cost\n rs_devices = [True for i in range(self.rs_nb)] #rs --> True\n hp_devices = [False for i in range(self.hp_nb)] #hp --> False\n self.devices = rs_devices + hp_devices\n shuffle(self.devices)", "def create_datasets(config, data_rng):\n # Compute batch size per device from global batch size.\n if config.batch_size % jax.device_count() != 0:\n raise ValueError(f'Batch size ({config.batch_size}) must be divisible by '\n f'the number of devices ({jax.device_count()}).')\n per_device_batch_size = config.batch_size // jax.device_count()\n\n dataset_builder = tfds.builder(config.dataset)\n\n def cast_int32(batch):\n img = tf.cast(batch['image'], tf.int32)\n out = batch.copy()\n out['image'] = img\n return out\n\n def drop_info(batch):\n \"\"\"Removes unwanted keys from batch.\"\"\"\n if 'id' in batch:\n batch.pop('id')\n if 'rng' in batch:\n batch.pop('rng')\n return batch\n\n if config.data_augmentation:\n should_augment = True\n should_randflip = True\n should_rotate = True\n else:\n should_augment = False\n should_randflip = False\n should_rotate = False\n\n def augment(batch):\n img = tf.cast(batch['image'], tf.float32)\n aug = None\n if should_augment:\n if should_randflip:\n img_flipped = tf.image.flip_left_right(img)\n aug = tf.random.uniform(shape=[]) > 0.5\n img = tf.where(aug, img_flipped, img)\n if should_rotate:\n u = tf.random.uniform(shape=[])\n k = tf.cast(tf.floor(4. * u), tf.int32)\n img = tf.image.rot90(img, k=k)\n aug = aug | (k > 0)\n if aug is None:\n aug = tf.convert_to_tensor(False, dtype=tf.bool)\n\n out = batch.copy()\n out['image'] = img\n return out\n\n def preprocess_train(batch):\n return cast_int32(augment(drop_info(batch)))\n\n def preprocess_eval(batch):\n return cast_int32(drop_info(batch))\n\n # Read instructions to shard the dataset!\n print('train', dataset_builder.info.splits['train'].num_examples)\n # TODO(emielh) use dataset_info instead of num_examples.\n train_split = deterministic_data.get_read_instruction_for_host(\n 'train', num_examples=dataset_builder.info.splits['train'].num_examples)\n train_ds = deterministic_data.create_dataset(\n dataset_builder,\n split=train_split,\n num_epochs=1,\n shuffle=True,\n batch_dims=[jax.local_device_count(), per_device_batch_size],\n preprocess_fn=preprocess_train,\n rng=data_rng,\n prefetch_size=tf.data.AUTOTUNE,\n drop_remainder=True\n )\n\n # TODO(emielh) check if this is necessary?\n\n # Test batches are _not_ sharded. In the worst case, this simply leads to some\n # duplicated information. In our case, since the elbo is stochastic we get\n # multiple passes over the test data.\n if config.test_batch_size % jax.local_device_count() != 0:\n raise ValueError(f'Batch size ({config.batch_size}) must be divisible by '\n f'the number of devices ({jax.local_device_count()}).')\n test_device_batch_size = config.test_batch_size // jax.local_device_count()\n\n eval_ds = deterministic_data.create_dataset(\n dataset_builder,\n split='test',\n # Repeated epochs for lower variance ELBO estimate.\n num_epochs=config.num_eval_passes,\n shuffle=False,\n batch_dims=[jax.local_device_count(), test_device_batch_size],\n preprocess_fn=preprocess_eval,\n # TODO(emielh) Fix this with batch padding instead of dropping.\n prefetch_size=tf.data.AUTOTUNE,\n drop_remainder=False)\n\n return dataset_builder.info, train_ds, eval_ds", "def batch_to_device(batch, target_device: device):\n moved_batch = {}\n for key, val in batch.items():\n val = val.to(target_device)\n moved_batch[key] = val\n return moved_batch", "def convert_to(data_set, name: str, data_directory: str, num_shards: int=1):\n\n num_examples, rows, cols, depth = data_set.images.shape\n\n data_set = list(zip(data_set.images, data_set.labels))\n\n def _process_examples(example_dataset, filename: str):\n print('Processing {} data'.format(filename))\n dataset_length = len(example_dataset)\n with tf.python_io.TFRecordWriter(filename) as writer:\n for index, (image, label) in enumerate(example_dataset):\n sys.stdout.write('\\rProcessing sample {} of {}'.format(\n index + 1, dataset_length))\n sys.stdout.flush()\n\n image_raw = image.tostring()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(rows),\n 'width': _int64_feature(cols),\n 'depth': _int64_feature(depth),\n 'label': _int64_feature(int(label)),\n 'image_raw': _bytes_feature(image_raw)\n }))\n writer.write(example.SerializeToString())\n print()\n\n if num_shards == 1:\n _process_examples(data_set, _data_path(data_directory, name))\n else:\n sharded_dataset = np.array_split(data_set, num_shards)\n for shard, dataset in enumerate(sharded_dataset):\n _process_examples(dataset, _data_path(\n data_directory, '{}-{}'.format(name, shard + 1)))", "def test_auto_transfer_correct_device(ray_start_4_cpus_2_gpus):\n import nvidia_smi\n\n nvidia_smi.nvmlInit()\n\n def get_gpu_used_mem(i):\n handle = nvidia_smi.nvmlDeviceGetHandleByIndex(i)\n info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)\n return info.used\n\n start_gpu_memory = get_gpu_used_mem(1)\n\n device = torch.device(\"cuda:1\")\n small_dataloader = [(torch.randn((1024 * 4, 1024 * 4)),) for _ in range(10)]\n wrapped_dataloader = ( # noqa: F841\n ray.train.torch.train_loop_utils._WrappedDataLoader(\n small_dataloader, device, True\n )\n )\n\n end_gpu_memory = get_gpu_used_mem(1)\n\n # Verify GPU memory usage increases on the right cuda device\n assert end_gpu_memory > start_gpu_memory", "def magma_device_sync():\n\n _libmagma.magma_device_sync()", "def setup(self, ds):\n pass", "def _distribute_data_to_cluster(self):\n\n for data in self.data:\n _distances = self._calculate_distances(data)\n _cluster = self._get_closest_cluster(_distances)\n self.clusters[_cluster].append(data)", "def split_dev(self):\n\t\tprint(\"Splitting test set into dev and test set\")\n\n\t\told_length = len(self.X[\"test\"])\n\t\tindices = list(range(old_length))\n\n\t\tnp.random.seed(0)\n\t\tnp.random.shuffle(indices)\n\t\t\n\t\tsplit = int(len(indices) * 0.5)\n\n\t\tsplit_indices = {\"test\": indices[:split], \"dev\": indices[split:]}\n\t\n\t\tfor dataset in (\"dev\", \"test\"):\n\t\t\tself.X[dataset] = [self.X[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.Y[dataset] = [self.Y[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.raw_documents[dataset] = [self.raw_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.tokenized_documents[dataset] = [self.tokenized_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\n\t\tprint(\"Split test set with\", old_length, \"samples into\", len(self.X[\"test\"]), \"/\", len(self.X[\"dev\"]), \"samples\")", "def device_reshape(self, x: JaxArray) -> JaxArray:\n assert hasattr(x, 'ndim'), f'Expected JaxArray, got {type(x)}. If you are trying to pass a scalar to ' \\\n f'parallel, first convert it to a JaxArray, for example np.float(0.5)'\n if x.ndim == 0:\n return np.broadcast_to(x, [self.ndevices])\n assert x.shape[0] % self.ndevices == 0, f'Must be able to equally divide batch {x.shape} among ' \\\n f'{self.ndevices} devices, but does not go equally.'\n return x.reshape((self.ndevices, x.shape[0] // self.ndevices) + x.shape[1:])", "def test_create_device_data(self):\n pass", "def to(self, device):\n\n def to_device(seq: Sequence) -> Sequence:\n return seq.to(device=device)\n\n return self.apply_(to_device)", "def data_parallel(self, batch_size, inputs):\n inputs = list(inputs)\n\n # quick path: only one device, do not slice\n if len(self.work_devices) == 1:\n assert(self.main_device == self.work_devices[0])\n yield self.main_device, False, tuple(inputs)\n\n # slow path: multi-GPUs\n else:\n # the GPUs are not in the same group, place variables on CPU\n if self.main_device not in self.work_devices:\n yield self.main_device, True, tuple(inputs)\n\n # build the paralleled computation graph for each device\n with tf.name_scope('data_parallel') as ns:\n pass # generate a name scope to place our data slicing ops\n\n k = len(self.work_devices)\n for i, device in enumerate(self.work_devices):\n dev_inputs = []\n with tf.name_scope(ns + 'tower_gpu_{}'.format(i)):\n for inp in inputs:\n slice_len = (batch_size + k - 1) // k\n low, high = slice_len * i, slice_len * (i + 1)\n dev_inputs.append(inp[low: high])\n yield device, False, tuple(dev_inputs)", "def move_data_to_device(self, data: Tuple) -> Tuple:\n tmp = []\n for dv in data:\n tmp.append(dv.to(self.device))\n \n return tuple(tmp)", "def devices(self) -> Mapping[str, Device]:\n return MappingProxyType(self._devices)", "def module_transfer_to_device(self) -> None:\n for name, module in self.modules.items():\n module.to(self.device)\n if self.device.type == 'cuda':\n self.modules[name] = torch.nn.DataParallel(module, self.gpu_ids)\n return", "def to(self, device) -> None:\n self.obs_buffer = self.obs_buffer.to(device)\n self.hid_buffer = self.hid_buffer.to(device)\n self.rew_buffer = self.rew_buffer.to(device)\n self.act_buffer = self.act_buffer.to(device)\n self.don_buffer = self.don_buffer.to(device)\n self.true_termin = self.true_termin.to(device)\n\n self.device = device", "def _setup(self, orig=False):\n log_method_call(self, self.name, orig=orig, status=self.status,\n controllable=self.controllable)\n disks = []\n for member in self.devices:\n member.setup(orig=orig)\n disks.append(member.path)\n\n mdraid.mdactivate(self.path,\n members=disks,\n array_uuid=self.mdadmFormatUUID)", "def update(self):\n _LOGGER.debug(\"Updating Warmup devices\")\n self._warmup.update_all_devices()", "def device(self, serial):\n self._devices = []", "def devicedata():\n data = request.get_json()\n\n dd = DeviceData(**data)\n db.session.add(dd)\n db.session.commit()\n\n # update cache when write is confirmed, updates corresponding maxheaps\n num_top_devices = int(environ.get('NUM_TOP_DEVICES'))\n for feature in DeviceData.features():\n # negate feature value to keep list reversed for efficient .pop()\n device_item = [-getattr(dd, feature), dd.deviceId, dd.to_dict()]\n for itv in intervals:\n key = \"_\".join([feature, itv])\n cache = json.loads(memcached.get(key))\n\n # if device already in cache, replace val if larger (pop & insort)\n try:\n idx = [dd_dict[\"deviceId\"]\n for _, _, dd_dict in cache[\"minmaxes\"]].index(dd.deviceId)\n if device_item > cache[\"minmaxes\"][idx]:\n cache[\"minmaxes\"].pop(idx)\n bisect.insort(cache[\"minmaxes\"], device_item)\n\n # otherwise, insort new item if len(cache) < NUM_TOP_DEVICES\n # OR if device_item > minmaxes[0]. -> insort and pop last\n except ValueError:\n if len(cache[\"minmaxes\"]) < num_top_devices:\n bisect.insort(cache[\"minmaxes\"], device_item)\n cache[\"timestamp\"] = dd.timestamp.isoformat()\n memcached.set(key, json.dumps(cache))\n elif device_item > cache[\"minmaxes\"][0]:\n bisect.insort(cache[\"minmaxes\"], device_item)\n cache[\"minmaxes\"].pop()\n cache[\"timestamp\"] = dd.timestamp.isoformat()\n memcached.set(key, json.dumps(cache))\n\n return jsonify(dd.to_dict())", "def test_partially_update_device_group_by_id1(self):\n pass", "def test_partially_update_device_by_id1(self):\n pass", "def test_reshard():\n # legacy_dataset_reshard is a sharded dataset in the legacy format kept\n # around for testing resharding.\n current_dir = os.path.dirname(os.path.abspath(__file__))\n data_dir = os.path.join(current_dir, \"legacy_dataset_reshard\")\n dataset = dc.data.DiskDataset(data_dir)\n assert dataset.legacy_metadata\n assert len(dataset.metadata_df.columns) == 4\n assert list(dataset.metadata_df.columns) == ['ids', 'X', 'y', 'w']\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n copy = dataset.copy(tmpdirname)\n assert np.all(copy.X == dataset.X)\n assert np.all(copy.y == dataset.y)\n assert np.all(copy.w == dataset.w)\n assert np.all(copy.ids == dataset.ids)\n\n # Reshard copy\n copy.reshard(shard_size=10)\n assert copy.get_number_shards() == 10\n # Check metadata has been updated\n assert not copy.legacy_metadata\n assert len(copy.metadata_df.columns) == 8\n assert list(copy.metadata_df.columns) == [\n 'ids', 'X', 'y', 'w', 'ids_shape', 'X_shape', 'y_shape', 'w_shape'\n ]", "def _findSdPartitionDevice (self):\n for partitionDiskName in self.getPartitionDiskNames():\n partitionOsDevice = self.getPartitionOsDeviceName(partitionDiskName)\n partitionIndex = self.getPartitionIndex(partitionDiskName)\n\n if partitionOsDevice is None:\n # not given - need to \"calculate\" it from the sd device and index\n partitionOsDevice = self.getSdDevice() + str(int(partitionIndex) + 1)\n\n self._log(\"find-sd-partition-device\").notice(\"found SD partition %s as os device %s\", partitionDiskName, partitionOsDevice)\n self._partitionOsDevice[partitionDiskName] = partitionOsDevice", "def train_dev_split(docs, dev_size):\n pass", "def test_partially_update_device_group_by_id(self):\n pass", "def set_devices(sys_device_ids):\n # Set the CUDA_VISIBLE_DEVICES environment variable\n import os\n visible_devices = ''\n for i in sys_device_ids:\n visible_devices += '{}, '.format(i)\n os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices\n # Return wrappers.\n # Models and user defined Variables/Tensors would be transferred to the\n # first device.\n device_id = 0 if len(sys_device_ids) > 0 else -1\n TVT = TransferVarTensor(device_id)\n TMO = TransferModulesOptims(device_id)\n return TVT, TMO", "def archive_mds_data(self, lmtdb):\n\n dataset_names = [\n 'mdservers/cpuload',\n ]\n\n self.init_datasets(dataset_names, lmtdb.mds_names)\n\n # Now query the MDS_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_mds_data(self.query_start, self.query_end_plusplus)\n\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'MDS_ID', 'PCT_CPU']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n target_name = lmtdb.mds_id_map[row[col_map['MDS_ID']]]\n for dataset_name in dataset_names:\n target_dbcol = self.config[dataset_name].get('column')\n # target_dbcol=PCT_CPU, target_name=snx11025n022\n if target_dbcol is not None:\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map[target_dbcol]])\n else:\n errmsg = \"%s in self.config but missing 'column' setting\" % dataset_name\n raise KeyError(errmsg)", "def _assign_port_to_device(self):\n for i in range(0, len(self.stlink_devices)):\n self.stlink_devices[i]['usb_port'] = self.get_port_from_serial(self.stlink_devices[i]['serial'])", "def get_devices_per_node(self):\n\n for i in self._nodes.items():\n node = i[1]\n # Update the interface data\n\n self._get_device(node)\n\n self.updateconfig()", "def test_soud_to_full_to_soud(self):\n # we'll be doing a subset sync\n self.initialize_sessions(filters=\"\\n\".join([\"p1\", \"p2\"]))\n\n tablet1_tuples = [\n (self.i1, \"p1\", 6),\n (self.i1, \"p2\", 1),\n ]\n laptop_tuples_data_included = [\n (self.i3, \"p1\", 5),\n (self.i3, \"p2\", 2),\n ]\n laptop_tuples_data_excluded = [\n (self.i3, \"p\", 5),\n ]\n laptop_tuples_dmcs = [\n (self.i3, \"p\", 5),\n ]\n tablet2_tuples = []\n\n tablet_data = self.create_stores(tablet1_tuples)\n laptop_data_included = self.create_stores(laptop_tuples_data_included)\n laptop_data_excluded = self.create_stores(laptop_tuples_data_excluded)\n\n self.set_sender_fsic_from_dmcs(tablet1_tuples + laptop_tuples_dmcs)\n self.set_receiver_fsic_from_dmcs(tablet2_tuples)\n self.queue()\n assertRecordsBuffered(laptop_data_included)\n assertRecordsNotBuffered(laptop_data_excluded)\n assertRecordsBuffered(tablet_data)", "def change_device(self, device=None):\n\n if device is None:\n # If the function is called without a device, use the current device\n device = self.device\n\n # Create the appropriate device object\n device = torch.device(f'cuda:{device}'\n if torch.cuda.is_available() else 'cpu')\n\n # Change device field\n self.device = device\n # Load the transcription model onto the device\n self.to(self.device)", "def _update_all_devices(self):\n self.all_devices = []\n self.all_devices.extend(self.keyboards)\n self.all_devices.extend(self.mice)\n self.all_devices.extend(self.gamepads)\n self.all_devices.extend(self.other_devices)", "def test_same_device(self):\n\n mode = \"unique_host_same_device\"\n host_id_devices = utils.host_id_devices_for_rng(mode)\n specialize_func = jax.pmap(functools.partial(\n utils.specialize_rng_host_device, axis_name=\"i\",\n mode=mode), axis_name=\"i\")\n rng = specialize_func(self.rng, host_id_devices)\n\n self.assertEqual(\n np.unique(rng, axis=0).shape[0], 1)", "def devices_from_entities(hass, entry):\n device_client = hass.data[DOMAIN][entry.entry_id][DATA_DEVICE_REGISTER]\n devices = []\n for i in range(16):\n device_port = f\"{i:01x}\"\n device = SW16Switch(device_port, entry.entry_id, device_client)\n devices.append(device)\n return devices", "def to_device(model, device):\n p = next(model.parameters())\n if p.device == device:\n return\n model.to(device)", "def partition_dataset_val():\n dataset = datasets.MNIST(\n './data',\n train=False,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ]))\n size = dist.get_world_size()\n bsz = int(128 / float(size))\n partition_sizes = [1.0 / size for _ in range(size)]\n partition = DataPartitioner(dataset, partition_sizes)\n partition = partition.use(dist.get_rank())\n val_set = torch.utils.data.DataLoader(\n partition, batch_size=bsz, shuffle=True)\n return val_set, bsz", "def setUp(self):\n\n # Load the data\n dataset = tagging.data.DataSet.from_fits(DATA_PATH, extension=1)\n\n # Assign all as field.\n dataset.data[\"FIELD/CLUSTER\"] = \"FIELD\"\n\n # [TODO] Delete benchmarks\n clusters = (\"Cha_I\", \"Br81\", \"M15\", \"NGC2808\", \"NGC6633\", \"IC4665\", \n \"NGC104\", \"gamma2_Vel\", \"GJ880\", \"NGC4815\", \"NGC2547\", \"NGC5927\",\n \"NGC4833\", \"NGC1851\", \"NGC2243\", \"NGC3532\", \"NGC6752\", \"Br25\", \n \"NGC4372\", \"NGC6705\", \"M67\", \"NGC2516\", \"Trumpler20\")\n\n # Assign all as members.\n for cluster in clusters:\n members = dataset.assign_cluster_members(cluster,\n lambda row: row[\"TARGET\"].startswith(cluster))\n\n # Special hack:\n if cluster == \"Trumpler20\":\n members += dataset.assign_cluster_members(cluster,\n lambda row: row[\"TARGET\"].startswith(\"Trumpler_20\"))\n\n logger.info(\"Assigned stars to {} clusters\".format(len(clusters)))\n self.dataset = dataset\n return None", "def _update_device_types(self):\n device_types = self.adapter.device_types()\n for device_type in device_types.items:\n key = device_type.id\n self._make_up_to_date('/device_types', key, device_type)", "def set_dims(self, dataset):\n block.Block.set_dims(self, dataset)\n \n raw = dataset.blocks['raw']\n\n # local reference to input data\n data = dataset.get_source_data('prep')\n\n # this is the calculated proper size for self.data\n raw_dims = list(data.shape) # so we can compare to self.dims LIST\n\n # if we average FIDs, the dimensions change here \n raw_dims[-2] = int(raw_dims[-2]/self.set.fids_to_average)\n\n if self.dims is None:\n self._reset_dimensional_data(dataset)\n elif (self.dims)[::-1] != raw_dims: #FIXME bjs - need reverse here, ARRRRGH, why?\n self._reset_dimensional_data(dataset)\n\n # calculate measure_time array based on whether we average or not\n measure_time = list(raw.measure_time)\n nfids = len(measure_time)\n navgs = self.set.fids_to_average\n measure_time = measure_time[0::navgs]\n if (nfids % navgs) != 0:\n del measure_time[-1]\n self.measure_time = np.array(measure_time)", "def setUp(self):\n super().setUp()\n self.devices = _DEVICE_STRATEGY()\n command_line = [\"pool\", \"create\", self._POOLNAME] + self.devices\n RUNNER(command_line)", "def connect_monitor_devices_to_daq(self):\n scan = self.measure['scan']\n devices_to_monitor = scan['detectors']\n\n # Clear the DAQS just in case is a second scan running\n for d in self.daqs:\n self.daqs[d]['monitor'] = []\n\n for d in devices_to_monitor:\n dev = self.devices[d]\n self.daqs[dev.properties['connection']['device']]['monitor'].append(dev)", "def test_partially_update_device_by_id(self):\n pass", "def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )", "def input_fn(data_dir,\n subset,\n num_shards,\n batch_size,\n seq_length=4,\n use_distortion_for_training=True):\n with tf.device('/gpu:0' if num_shards >= 1 else '/cpu:0'):\n use_distortion = subset == 'train' and use_distortion_for_training\n dataset = data_parser.DataSet(data_dir, subset, use_distortion, seq_length)\n image_batch, label_batch, occlusion_batch, depth_batch = dataset.make_batch(batch_size)\n\n # Note that passing num=batch_size is safe here, even though\n # dataset.batch(batch_size) can, in some cases, return fewer than batch_size\n # examples. This is because it does so only when repeating for a limited\n # number of epochs, but our dataset repeats forever.\n image_batch = tf.unstack(image_batch, num=batch_size, axis=0)\n label_batch = tf.unstack(label_batch, num=batch_size, axis=0)\n occlusion_batch = tf.unstack(occlusion_batch, num=batch_size, axis=0)\n depth_batch = tf.unstack(depth_batch, num=batch_size, axis=0)\n feature_shards = [[] for i in range(num_shards)]\n label_shards = [[] for i in range(num_shards)]\n occlusion_shards = [[] for i in range(num_shards)]\n depth_shards = [[] for i in range(num_shards)]\n skip = batch_size/num_shards\n for idx in range(num_shards):\n feature_shards[idx].append(tf.parallel_stack(image_batch[idx*skip:(idx+1)*skip]))\n label_shards[idx].append([[tf.parallel_stack(label_batch[idx*skip:(idx+1)*skip])], [tf.parallel_stack(occlusion_batch[idx*skip:(idx+1)*skip])], [tf.parallel_stack(depth_batch[idx*skip:(idx+1)*skip])]])\n\n return feature_shards, label_shards", "def device(self, device):\n\n self._device = device", "def set_device(self, device):\n self.device = device", "def distribute_datasets_from_function(\n self,\n dataset_fn, # pylint: disable=useless-super-delegation\n options=None):\n return super(OneDeviceStrategy,\n self).distribute_datasets_from_function(dataset_fn, options)", "def devices(self):\n return DeviceCollection(client=self)", "def test_unique_device(self):\n\n mode = \"unique_host_unique_device\"\n host_id_devices = utils.host_id_devices_for_rng(mode)\n specialize_func = jax.pmap(functools.partial(\n utils.specialize_rng_host_device, axis_name=\"i\",\n mode=mode), axis_name=\"i\")\n\n rng = specialize_func(self.rng, host_id_devices)\n\n self.assertEqual(\n np.unique(rng, axis=0).shape[0], jax.local_device_count())", "def make_source_dataset(self, current_host_index, num_hosts):\n split = self.split\n if self.mode == enums.ModelMode.TRAIN and self.shard_per_host:\n split = tfds.even_splits(split, n=num_hosts)[current_host_index]\n # Don't shuffle until after sharding, since otherwise you risk dropping\n # samples because the sharding is performed on different shufflings of the\n # data for each core.\n return tfds.load(\n name=self.dataset_name,\n split=split,\n data_dir=self.data_dir,\n shuffle_files=False)", "def test_write_slices(self):\n dt = np.dtype('(3,)i')\n\n data1 = np.ones((2,), dtype=dt)\n data2 = np.ones((4,5), dtype=dt)\n\n dset = self.f.create_dataset('x', (10,9,11), dtype=dt)\n\n dset[0,0,2:4] = data1\n self.assertArrayEqual(dset[0,0,2:4], data1)\n\n dset[3, 1:5, 6:11] = data2\n self.assertArrayEqual(dset[3, 1:5, 6:11], data2)", "def create_device(cls, params, token):\n tenant = init_tenant_context(token, db)\n try:\n count = int(params.get('count'))\n except ValueError as e:\n LOGGER.error(e)\n raise HTTPRequestError(400, \"If provided, count must be integer\")\n\n c_length = len(str(count))\n verbose = params.get('verbose') in ['true', '1', 'True']\n if verbose and count != 1:\n raise HTTPRequestError(\n 400, \"Verbose can only be used for single device creation\")\n\n devices = []\n full_device = None\n orm_devices = []\n\n try:\n for i in range(0, count):\n content_type = params.get('content_type')\n data_request = params.get('data')\n device_data, json_payload = parse_payload(content_type, data_request, device_schema)\n validate_repeated_attrs(json_payload)\n\n if json_payload.get('id', None) is None or count > 1 : \n device_data['id'] = DeviceHandler.generate_device_id()\n else:\n DeviceHandler.validate_device_id(json_payload['id'])\n device_data['id'] = json_payload['id']\n\n device_data['label'] = DeviceHandler.indexed_label(count, c_length, device_data['label'], i)\n device_data.pop('templates', None)\n orm_device = Device(**device_data)\n parse_template_list(json_payload.get('templates', []), orm_device)\n auto_create_template(json_payload, orm_device)\n db.session.add(orm_device)\n orm_devices.append(orm_device)\n db.session.commit()\n except IntegrityError as error:\n handle_consistency_exception(error)\n except ValidationError as error:\n raise HTTPRequestError(400, error.messages)\n\n\n for orm_device in orm_devices:\n devices.append(\n {\n 'id': orm_device.id,\n 'label': orm_device.label\n }\n )\n\n full_device = serialize_full_device(orm_device, tenant)\n\n # Updating handlers\n kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier)\n kafka_handler_instance.create(full_device, meta={\"service\": tenant})\n\n if verbose:\n result = {\n 'message': 'device created',\n 'devices': [full_device]\n }\n else:\n result = {\n 'message': 'devices created',\n 'devices': devices\n }\n return result", "def move_hdd_to_ssd():\n hdd_dir = '/media/bryce/4TB Seagate/Autonomous Vehicles Data/Cityscapes Data'\n ssd_dir = '/media/bryce/1TB Samsung/ml_datasets/cityscapes_data'\n\n datasets = {\n 'l_img': 'leftImg8bit',\n 'disp': 'disparity',\n 'l_seq': 'leftImg8bit_sequence',\n 'seg': 'gtFine',\n 'inst': 'gtFine'\n }\n\n subsets = ['train', 'val']\n\n copy_cityscapes(hdd_dir, datasets, subsets, ssd_dir)", "def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data" ]
[ "0.5999675", "0.58297193", "0.58297193", "0.58137083", "0.5668784", "0.56086874", "0.5523341", "0.5516918", "0.54677045", "0.54248637", "0.5422187", "0.5421713", "0.5397734", "0.5350507", "0.53368425", "0.5336252", "0.5329454", "0.5285518", "0.52771497", "0.52771497", "0.52771497", "0.52771497", "0.5249026", "0.5239814", "0.5230732", "0.52237314", "0.5168554", "0.51602864", "0.5157209", "0.51229745", "0.5097095", "0.509205", "0.50856054", "0.5079158", "0.5073498", "0.50518626", "0.50424683", "0.50341564", "0.5031321", "0.50239265", "0.50083727", "0.49939874", "0.49939874", "0.4990607", "0.49870896", "0.49831212", "0.49822718", "0.49775395", "0.49731997", "0.49674118", "0.4963253", "0.49479035", "0.49345306", "0.49344876", "0.49174622", "0.49107534", "0.49086118", "0.4897971", "0.4897499", "0.4889235", "0.48884448", "0.4886529", "0.48833498", "0.4871415", "0.48654935", "0.48653144", "0.48632383", "0.48425245", "0.4840163", "0.48337874", "0.48329848", "0.4830925", "0.48308703", "0.48278597", "0.48199373", "0.48187244", "0.48173314", "0.48160365", "0.48116744", "0.47843477", "0.47837743", "0.47793925", "0.47791734", "0.47789046", "0.4773252", "0.47645092", "0.47643423", "0.47641078", "0.47621974", "0.47553265", "0.47549775", "0.4754271", "0.47481823", "0.474415", "0.4740704", "0.47390622", "0.4733414", "0.47293666", "0.47249055", "0.47230697" ]
0.5367258
13
Make train and test sets sharded over batch dim.
def make_ds_pmap_fullbatch(name, dtype, n_devices=None, truncate_to=None): name = name.lower() n_devices = n_devices or len(jax.local_devices()) if name in ImgDatasets._value2member_map_: train_set, test_set, data_info = get_image_dataset(name) loaded = True task = Task.CLASSIFICATION elif name == "imdb": train_set, test_set, _, data_info = load_imdb_dataset() dtype = jnp.int32 loaded = True task = Task.CLASSIFICATION elif name[-4:] == ".npz": train_set, test_set, data_info = load_npz_array(name) loaded = True task = Task.CLASSIFICATION else: name, seed = _parse_uci_regression_dataset(name) loaded = name is not None if name is not None: train_set, test_set, data_info = load_uci_regression_dataset( name, int(seed)) loaded = True task = Task.REGRESSION if not loaded: raise ValueError("Unknown dataset name: {}".format(name)) if truncate_to: assert truncate_to % n_devices == 0, ( "truncate_to should be devisible by n_devices, but got values " "truncate_to={}, n_devices={}".format(truncate_to, n_devices)) train_set = tuple(arr[:truncate_to] for arr in train_set) train_set, test_set = tuple( pmap_dataset(ds, n_devices) for ds in (train_set, test_set)) train_set, test_set = map(lambda ds: (ds[0].astype(dtype), ds[1]), (train_set, test_set)) return train_set, test_set, task, data_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_test_split(self):\n random.seed(self.args.seed)\n nodes = [node for node in range(self.ncount)]\n random.shuffle(nodes)\n self.train_nodes = torch.LongTensor(nodes[0:self.args.training_size])\n self.validation_nodes = torch.LongTensor(nodes[self.args.training_size:self.args.training_size+self.args.validation_size])\n self.test_nodes = torch.LongTensor(nodes[self.args.training_size+self.args.validation_size:])", "def create_train_valid_set(self):\n\n if not self.eq_train:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level, self.train_weights, self.y_train,\n train_size=0.7, test_size=0.3\n )\n else:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, w_train_eq, w_valid_eq, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level,\n self.train_weights, self.train_weights_eq, self.y_train,\n train_size=0.7, test_size=0.3\n )\n self.train_weights_eq = w_train_eq\n\n #NOTE: might need to re-equalise weights in each folds as sumW_sig != sumW_bkg anymroe!\n self.train_weights = train_w\n self.valid_weights = valid_w #validation weights should never be equalised weights!\n\n print 'creating validation dataset'\n self.X_train_high_level = X_train_high_level\n self.X_train_low_level = self.join_objects(X_train_low_level)\n\n self.X_valid_high_level = X_valid_high_level\n self.X_valid_low_level = self.join_objects(X_valid_low_level)\n print 'finished creating validation dataset'\n\n self.y_train = y_train\n self.y_valid = y_valid", "def train(self, num_batches: int):", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y", "def create_train_test_sets(self,x,y,lenTest):\n \n nbInd = x.shape[0]\n shuffler = np.random.permutation(nbInd)\n x_train = x[shuffler][0:(nbInd-lenTest),]\n y_train = y[shuffler][0:(nbInd-lenTest),]\n\n x_test = x[shuffler][(nbInd-lenTest):nbInd,]\n y_test = y[shuffler][(nbInd-lenTest):nbInd,]\n\n return x_train,y_train,x_test,y_test", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def partition_dataset_train():\n dataset = datasets.MNIST(\n './data',\n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ]))\n size = dist.get_world_size()\n bsz = int(128 / float(size))\n partition_sizes = [1.0 / size for _ in range(size)]\n partition = DataPartitioner(dataset, partition_sizes)\n partition = partition.use(dist.get_rank())\n train_set = torch.utils.data.DataLoader(\n partition, batch_size=bsz, shuffle=True)\n return train_set, bsz", "def testSharded(self):\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=False)\n weight_shards, bias_shards = self._ShardTestEmbeddings(\n weights, biases, num_shards=3)\n logits_tensor, labels_tensor = _compute_sampled_logits(\n weights=[constant_op.constant(shard) for shard in weight_shards],\n biases=[constant_op.constant(shard) for shard in bias_shards],\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=False,\n remove_accidental_hits=False,\n partition_strategy=\"div\",\n name=\"sampled_logits_sharded_num_true_%d\" % num_true)\n got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])\n self.assertAllClose(exp_logits, got_logits, self._eps)\n self.assertAllClose(exp_labels, got_labels, self._eps)", "def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data", "def split_train_and_test_with_py_datasets(data_set, batch_size=cfg['batch_size'], test_size=0.2, num_works=4,\n pin_memory=True):\n num_dataset = len(data_set)\n indices = list(range(num_dataset))\n split = int(np.floor(test_size * num_dataset))\n\n train_idx, test_idx = indices[split:], indices[:split]\n train_sampler = SubsetRandomSampler(train_idx)\n test_sampler = SubsetRandomSampler(test_idx)\n\n train_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=train_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=test_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n return train_loader, test_loader", "def split_dataset(dset, batch_size=128, thread_count=4):\n sampler_dset_train = data.sampler.SubsetRandomSampler(list(range(int(0.7*len(dset)))))\n sampler_dset_test = data.sampler.SubsetRandomSampler(list(range(int(0.7*len(dset)),\n int(0.85*len(dset)))))\n sampler_dset_validation = data.sampler.SubsetRandomSampler(list(range(int(0.85*len(dset)),\n len(dset))))\n\n loader_dset_train = data.DataLoader(\n dset, batch_size=batch_size, num_workers=thread_count,\n pin_memory=True, sampler=sampler_dset_train)\n loader_dset_test = data.DataLoader(\n dset, batch_size=batch_size, num_workers=thread_count,\n pin_memory=True, sampler=sampler_dset_test)\n loader_dset_validation = data.DataLoader(\n dset, batch_size=batch_size, num_workers=thread_count,\n pin_memory=True, sampler=sampler_dset_validation)\n\n return loader_dset_train, loader_dset_test, loader_dset_validation", "def train_dev_split(docs, dev_size):\n pass", "def trainSet(self):\r\n self.currIdx = 0\r\n random.shuffle(self.trainSamples)\r\n self.samples = self.trainSamples[:self.numTrainSamplesPerEpoch]", "def minibatcher(inputs, targets, batchsize, shuffle=False):", "def batches(set_name):\n global num_batches, args, ds_sizes \n # num_batches = how many batches in each dataset(train, valid, test)\n # ds_sizes = dataset_sizes \n for b in range(num_batches[set_name]):\n bi = b * args.batch_size # one batch mul batch_size \n bj = (b + 1) * args.batch_size \n if b == num_batches[set_name] - 1:\n bj = ds_sizes[set_name] # maybe only remainer set\n yield bi, bj", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def train_test_set_split(dataset, dataset_name, test_size=0.1):\n train_indices_path = './' + dataset_name + '_train_indices(' + str(test_size) + ').txt'\n test_indices_path = './' + dataset_name + '_test_indices(' + str(test_size) + ').txt'\n try:\n train_indices = []\n test_indices = []\n file = open(train_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n train_indices.append(int(line[:-1]))\n file.close()\n file = open(test_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n test_indices.append(int(line[:-1]))\n file.close()\n train_labels = [dataset.targets[i] for i in train_indices]\n except FileNotFoundError:\n indices = np.arange(len(dataset))\n labels = np.array(dataset.targets)\n train_indices, test_indices, train_labels, _ = train_test_split(\n indices, labels, test_size=test_size, stratify=labels\n )\n file = open(train_indices_path, 'wt', encoding='utf-8')\n for i in train_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n file = open(test_indices_path, 'wt', encoding='utf-8')\n for i in test_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n\n train_set = torch.utils.data.Subset(dataset, indices=train_indices)\n test_set = torch.utils.data.Subset(dataset, indices=test_indices)\n return train_set, test_set, train_labels", "def partition_train_valid_test_multiview2(data, classes=None, others=None, ratio=(1,1,1), rng=np.random.RandomState(1000)):\n if classes is None:\n num_samples=data[0].shape[0]\n classes=np.zeros(shape=(num_samples,),dtype=int)\n k=sum(ratio) # ratio must be a vector of integers\n ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)\n sequence=np.arange(len(classes))\n train_ind=np.array([],dtype=int)\n valid_ind=np.array([],dtype=int)\n test_ind=np.array([],dtype=int)\n count=0\n for ki in range(k):\n if count<ratio[0]:\n train_ind=np.append(train_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0] and count <ratio[0]+ratio[1]:\n valid_ind=np.append(valid_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0]+ratio[1] and ratio[2]>0:\n test_ind=np.append(test_ind,sequence[ind==ki])\n count=count+1\n continue\n num_views=len(data)\n train_set_x=[None]*num_views\n valid_set_x=[None]*num_views\n test_set_x=[None]*num_views\n for v in range(num_views):\n train_set_x[v]=data[v][train_ind,:]\n train_set_y=classes[train_ind]\n train_set_others=others[train_ind]\n valid_set_x[v]=data[v][valid_ind,:]\n valid_set_y=classes[valid_ind]\n valid_set_others=others[valid_ind]\n test_set_x[v]=data[v][test_ind,:]\n test_set_y=classes[test_ind]\n test_set_others=others[test_ind]\n return train_set_x,train_set_y,train_set_others,valid_set_x,valid_set_y,valid_set_others,test_set_x,test_set_y,test_set_others", "def partition_train_valid_test2(data, classes, others, ratio=(1,1,1), rng=np.random.RandomState(1000)):\n k=sum(ratio) # ratio must be a vector of integers\n ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)\n sequence=np.arange(len(classes))\n train_ind=np.array([],dtype=int)\n valid_ind=np.array([],dtype=int)\n test_ind=np.array([],dtype=int)\n count=0\n for ki in range(k):\n if count<ratio[0]:\n train_ind=np.append(train_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0] and count <ratio[0]+ratio[1]:\n valid_ind=np.append(valid_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0]+ratio[1] and ratio[2]>0:\n test_ind=np.append(test_ind,sequence[ind==ki])\n count=count+1\n continue\n train_set_x=data[train_ind]\n train_set_y=classes[train_ind]\n if others is not None:\n train_set_others=others[train_ind]\n else:\n train_set_others=None\n valid_set_x=data[valid_ind]\n valid_set_y=classes[valid_ind]\n if others is not None:\n valid_set_others=others[valid_ind]\n else:\n valid_set_others=None\n test_set_x=data[test_ind]\n test_set_y=classes[test_ind]\n if others is not None:\n test_set_others=others[test_ind]\n else:\n test_set_others=None\n \n return train_set_x,train_set_y,train_set_others,valid_set_x,valid_set_y,valid_set_others,test_set_x,test_set_y,test_set_others", "def _split_train_tst(self):\n num_samples = self.Y.shape[0]\n mapper_file = self.checkpointer.get_mapper_file_location()\n if not self.checkpointer.is_mapper_checkpointed():\n print 'No mapper checkpoint found. Fresh loading in progress ...'\n # Now shuffle the data\n sample_id = range(num_samples)\n random.shuffle(sample_id)\n print 'Dumping the mapper shuffle for reuse.'\n Pickle.dump(sample_id, open(mapper_file, 'wb'))\n print 'Dump complete. Moving Forward...'\n else:\n print 'Mapper Checkpoint found... Reading from mapper dump'\n sample_id = Pickle.load(open(mapper_file, 'rb'))\n print 'Mapping unpickling complete.. Moving forward...'\n\n self.X_fwd = self.X_fwd[sample_id]\n self.X_bwd = self.X_bwd[sample_id]\n self.Y = self.Y[sample_id]\n # Now divide the data into test ans train set\n test_fraction = 0.01\n self.test_size = int(test_fraction * num_samples)\n self.train_size = num_samples - self.test_size\n # Forward review\n self.X_trn_fwd = self.X_fwd[0:self.train_size]\n self.X_tst_fwd = self.X_fwd[self.train_size:num_samples]\n # Backward review\n self.X_trn_bwd = self.X_bwd[0:self.train_size]\n self.X_tst_bwd = self.X_bwd[self.train_size:num_samples]\n # Summary\n self.Y_trn = self.Y[0:self.train_size]\n self.Y_tst = self.Y[self.train_size:num_samples]", "def train_validation_split(tiles_df, valid_set_size = .2, visualize = False):\n val_tile_indexes = np.random.choice(len(tiles_df), size = round(valid_set_size*len(tiles_df)))\n tiles_df['dataset']='training'\n tiles_df.loc[val_tile_indexes, 'dataset'] = 'validation'\n\n if visualize:\n fig, ax = plt.subplots(figsize=(10,10))\n tiles_df.loc[tiles_df['dataset']=='training'].plot(ax=ax, color='grey', alpha=0.5, edgecolor='red')\n tiles_df.loc[tiles_df['dataset']=='validation'].plot(ax=ax, color='grey', alpha=0.5, edgecolor='blue')\n return tiles_df", "def shuffle_and_split_data(X_genesets, y, train_size, validate_size):\n permutation = np.random.permutation(y.size)\n y_permuted = y[permutation]\n X_genesets_permuted = [Xg[permutation, :] for Xg in X_genesets]\n X_groups_train = [Xg[0:train_size, :] for Xg in X_genesets_permuted]\n X_groups_validate = [Xg[train_size: validate_size + train_size, :] for Xg in X_genesets_permuted]\n X_groups_test = [Xg[validate_size + train_size:, :] for Xg in X_genesets_permuted]\n y_train = y_permuted[0:train_size]\n y_validate = y_permuted[train_size: validate_size + train_size]\n y_test = y_permuted[validate_size + train_size:]\n return X_groups_train, y_train, X_groups_validate, y_validate, X_groups_test, y_test", "def split_train_test_dev(self):\n for dir_name in (self.config.train_dir, self.config.dev_dir,\n self.config.test_dir):\n create_dir(dir_name)\n\n self.split_helper(self.config.parsed_train_file_pos, 'pos')\n self.split_helper(self.config.parsed_train_file_neg, 'neg')", "def split_dev(self):\n\t\tprint(\"Splitting test set into dev and test set\")\n\n\t\told_length = len(self.X[\"test\"])\n\t\tindices = list(range(old_length))\n\n\t\tnp.random.seed(0)\n\t\tnp.random.shuffle(indices)\n\t\t\n\t\tsplit = int(len(indices) * 0.5)\n\n\t\tsplit_indices = {\"test\": indices[:split], \"dev\": indices[split:]}\n\t\n\t\tfor dataset in (\"dev\", \"test\"):\n\t\t\tself.X[dataset] = [self.X[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.Y[dataset] = [self.Y[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.raw_documents[dataset] = [self.raw_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.tokenized_documents[dataset] = [self.tokenized_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\n\t\tprint(\"Split test set with\", old_length, \"samples into\", len(self.X[\"test\"]), \"/\", len(self.X[\"dev\"]), \"samples\")", "def next_train_batch(self, batch_size):\n if (not self.has_next_train()):\n self._random_permutation()\n self.train_next = 0\n if (self.train_next + batch_size <= len(self.train_list)):\n real_batch_size = batch_size\n else:\n real_batch_size = len(self.train_list) - self.train_next\n img_set = np.zeros([batch_size, self.img_height, self.img_width, 3])\n ground_truth_set = np.zeros([batch_size, self.img_height, self.img_width])\n for i in range(self.train_next, self.train_next + real_batch_size):\n train_ind = self.train_list[self.train_permutation[i]]\n img_path = join(self.dataset_dir, 'data/jpg_images', train_ind + '.jpg')\n img_set[i - self.train_next] = self.load_image(img_path)\n mat_path = join(self.dataset_dir, 'data/label_mat', train_ind + '.mat')\n ground_truth_set[i - self.train_next] = self.load_ground_truth(mat_path)\n dup_cnt = 0\n while (real_batch_size < batch_size):\n img_set[real_batch_size] = img_set[dup_cnt]\n ground_truth_set[real_batch_size] = ground_truth_set[dup_cnt]\n dup_cnt = dup_cnt + 1\n real_batch_size = real_batch_size + 1\n self.train_next = self.train_next + batch_size\n return [img_set, ground_truth_set]", "def train(self, batch):\n pass", "def create_datasets(config, data_rng):\n # Compute batch size per device from global batch size.\n if config.batch_size % jax.device_count() != 0:\n raise ValueError(f'Batch size ({config.batch_size}) must be divisible by '\n f'the number of devices ({jax.device_count()}).')\n per_device_batch_size = config.batch_size // jax.device_count()\n\n dataset_builder = tfds.builder(config.dataset)\n\n def cast_int32(batch):\n img = tf.cast(batch['image'], tf.int32)\n out = batch.copy()\n out['image'] = img\n return out\n\n def drop_info(batch):\n \"\"\"Removes unwanted keys from batch.\"\"\"\n if 'id' in batch:\n batch.pop('id')\n if 'rng' in batch:\n batch.pop('rng')\n return batch\n\n if config.data_augmentation:\n should_augment = True\n should_randflip = True\n should_rotate = True\n else:\n should_augment = False\n should_randflip = False\n should_rotate = False\n\n def augment(batch):\n img = tf.cast(batch['image'], tf.float32)\n aug = None\n if should_augment:\n if should_randflip:\n img_flipped = tf.image.flip_left_right(img)\n aug = tf.random.uniform(shape=[]) > 0.5\n img = tf.where(aug, img_flipped, img)\n if should_rotate:\n u = tf.random.uniform(shape=[])\n k = tf.cast(tf.floor(4. * u), tf.int32)\n img = tf.image.rot90(img, k=k)\n aug = aug | (k > 0)\n if aug is None:\n aug = tf.convert_to_tensor(False, dtype=tf.bool)\n\n out = batch.copy()\n out['image'] = img\n return out\n\n def preprocess_train(batch):\n return cast_int32(augment(drop_info(batch)))\n\n def preprocess_eval(batch):\n return cast_int32(drop_info(batch))\n\n # Read instructions to shard the dataset!\n print('train', dataset_builder.info.splits['train'].num_examples)\n # TODO(emielh) use dataset_info instead of num_examples.\n train_split = deterministic_data.get_read_instruction_for_host(\n 'train', num_examples=dataset_builder.info.splits['train'].num_examples)\n train_ds = deterministic_data.create_dataset(\n dataset_builder,\n split=train_split,\n num_epochs=1,\n shuffle=True,\n batch_dims=[jax.local_device_count(), per_device_batch_size],\n preprocess_fn=preprocess_train,\n rng=data_rng,\n prefetch_size=tf.data.AUTOTUNE,\n drop_remainder=True\n )\n\n # TODO(emielh) check if this is necessary?\n\n # Test batches are _not_ sharded. In the worst case, this simply leads to some\n # duplicated information. In our case, since the elbo is stochastic we get\n # multiple passes over the test data.\n if config.test_batch_size % jax.local_device_count() != 0:\n raise ValueError(f'Batch size ({config.batch_size}) must be divisible by '\n f'the number of devices ({jax.local_device_count()}).')\n test_device_batch_size = config.test_batch_size // jax.local_device_count()\n\n eval_ds = deterministic_data.create_dataset(\n dataset_builder,\n split='test',\n # Repeated epochs for lower variance ELBO estimate.\n num_epochs=config.num_eval_passes,\n shuffle=False,\n batch_dims=[jax.local_device_count(), test_device_batch_size],\n preprocess_fn=preprocess_eval,\n # TODO(emielh) Fix this with batch padding instead of dropping.\n prefetch_size=tf.data.AUTOTUNE,\n drop_remainder=False)\n\n return dataset_builder.info, train_ds, eval_ds", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def batches(self, batch_size): \n if self.shuffle:\n idx = np.arange(len(dataset.train_x))\n np.random.shuffle(idx)\n self.train_x = self.train_x[idx]\n \n n_batches = len(self.train_x) // batch_size\n for ii in range(0, len(self.train_x), batch_size):\n x = self.train_x[ii:ii+batch_size]\n \n yield self.scaler(x)", "def test_diff_trainability(self):\n self.run_subtests(\n {\n \"multi_tensor\": [False, True],\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n ],\n },\n self._test_diff_trainability,\n )", "def make_batch(self, \n batch_size=None, \n filenames=None,\n subset=None,\n initializable=False,\n repeat=None,\n shuffle=None,\n return_iterator=True,\n hvd_shard=None,\n simple_parse=False,\n num_epochs=None,\n cache=False,\n cache_file='',\n buffer_size=None,\n batch_sizes=None,\n buckets=None,\n drop_remainder=None,\n world_size=1,\n rank=0,\n shard_by_files=None,\n distribute_strategy=None,\n return_numpy=False):\n # with tf.device('/cpu:0'):\n subset = subset or self.subset\n hvd_shard = hvd_shard if hvd_shard is not None else self.hvd_shard\n if batch_size is None:\n is_test = True\n else:\n is_test = False\n batch_size = batch_size or self.batch_size\n self.batch_size = batch_size\n batch_sizes = batch_sizes if batch_sizes is not None else FLAGS.batch_sizes\n buffer_size = buffer_size if buffer_size is not None else FLAGS.buffer_size\n buckets = buckets if buckets is not None else FLAGS.buckets\n drop_remainder = drop_remainder if drop_remainder is not None else FLAGS.drop_remainder\n shard_by_files = shard_by_files if shard_by_files is not None else FLAGS.shard_by_files\n # use_post_decode = use_post_decode if use_post_decode is not None else self.use_post_decode\n\n self.return_numpy = return_numpy\n\n filenames = filenames or self.files_ or self.get_filenames(subset)\n \n self.gen_example(filenames)\n\n is_eager = tf.executing_eagerly()\n\n logging.debug(subset, 'num files', len(filenames))\n assert filenames, f'{subset}:{filenames} train:{FLAGS.train_input}, valid:{FLAGS.valid_input}, test:{FLAGS.valid_input}' \n\n self.files_ = filenames\n\n self.indexes[self.subset] += 1\n \n if repeat is None:\n num_gpus = melt.num_gpus() if not 'OMPI_COMM_WORLD_RANK' in os.environ else 1\n # if subset == 'train' or num_gpus > 1:\n if subset == 'train':\n repeat = True\n else:\n repeat = False\n if is_eager and num_gpus == 1 and tf.__version__ < '2':\n # let tf eager similary to pytorch\n repeat = False\n\n if shuffle is None:\n if subset == 'train':\n shuffle = FLAGS.shuffle \n else:\n shuffle = FLAGS.shuffle_valid \n\n if drop_remainder is None:\n if gezi.get('tpu'):\n drop_remainder = True\n else:\n if subset == 'train':\n drop_remainder = True\n else:\n drop_remainder = False\n\n balance_pos_neg=False\n if self.pos_filter_fn and self.neg_filter_fn:\n balance_pos_neg = True\n\n if self.subset != 'train' and FLAGS.eval_batch_size:\n batch_sizes = None\n buckets = None\n else:\n if buckets:\n buckets = [int(x) for x in buckets]\n FLAGS.buckets = buckets\n if batch_sizes:\n batch_sizes = [int(x) for x in batch_sizes]\n if batch_sizes[0] < batch_size:\n factor = batch_size / batch_sizes[0]\n batch_sizes = [int(x * factor) for x in batch_sizes]\n FLAGS.batch_sizes = batch_sizes\n\n # repeat = False\n logging.debug('---dataset subset:', self.subset, 'repeat:', repeat, 'batch_parse:', self.batch_parse, \n 'drop_last:', drop_remainder, 'initializable:', initializable, 'shuffle:', shuffle,\n 'wolrd_size', world_size, 'rank', rank, 'batch_size', batch_size)\n\n seed = FLAGS.seed \n if seed is not None:\n FLAGS.seed += 1\n\n logging.debug(f'seed for {self.subset} dataset is {seed}')\n\n ## put on cpu or dummy\n with melt.device(FLAGS.dataset_device):\n result = melt.dataset_decode.inputs(\n filenames, \n decode_fn=self.decode,\n batch_size=batch_size,\n post_decode_fn=self.post_decode if hasattr(self, 'post_decode') and self.use_post_decode != False else None,\n shuffle=shuffle,\n shuffle_batch=FLAGS.shuffle_batch,\n shuffle_files=FLAGS.shuffle_files,\n ordered=FLAGS.dataset_ordered if subset == 'train' else True,\n num_threads=FLAGS.num_threads,\n buffer_size=buffer_size,\n num_prefetch_batches=FLAGS.num_prefetch_batches,\n initializable=initializable,\n repeat=repeat,\n repeat_then_shuffle=FLAGS.repeat_then_shuffle,\n drop_remainder=drop_remainder,\n bucket_boundaries=buckets,\n bucket_batch_sizes=batch_sizes,\n length_index=FLAGS.length_index,\n length_key=FLAGS.length_key,\n seed=seed,\n return_iterator=return_iterator,\n filter_fn=self.filter_fn, # inside filter_fn judge subset train or valid or test\n balance_pos_neg=balance_pos_neg,\n pos_filter_fn=self.pos_filter_fn if subset == 'train' else None,\n neg_filter_fn=self.neg_filter_fn if subset == 'train' else None,\n count_fn=self.count_fn if subset == 'train' else None,\n name=subset,\n Dataset=self.Type,\n batch_parse=self.batch_parse,\n hvd_shard=hvd_shard,\n shard_by_files=shard_by_files,\n training=subset == 'train',\n simple_parse=simple_parse,\n num_epochs=num_epochs,\n dynamic_pad=FLAGS.dynamic_pad, #如果有varlen feats才需要 padded_batch 同时batch_parse模式其实也不需要因为sparse2dense就可以自动padd\n cache=cache,\n cache_file=cache_file,\n device='/gpu:0',\n world_size=world_size,\n rank=rank,\n fixed_random=FLAGS.fixed_random,\n parallel_read_files=FLAGS.parallel_read_files,\n use_feed_dict=FLAGS.train_loop and FLAGS.rounds > 1 and not is_eager and FLAGS.feed_dataset and tf.__version__ < '2',\n feed_name=f'{self.subset}_{self.indexes[self.subset]}' if not is_test else None,\n padding_values=FLAGS.padding_idx, \n distribute_strategy=distribute_strategy or melt.distributed.get_strategy(),\n torch=FLAGS.torch,\n keras=FLAGS.keras,\n subset=self.subset,\n return_numpy=return_numpy,\n ) \n \n result = self.adjust(result)\n return result", "def batch_creator(batch_size, dataset_length, dataset_name):\n # batch_size = 128\n # dataset_length = 6000\n batch_mask = rng.choice(dataset_length, batch_size)\n\n batch_x = eval('x_' + dataset_name)[[batch_mask]].reshape(-1, input_num_units)\n batch_x = preproc(batch_x)\n\n if dataset_name == 'train':\n batch_y = eval('y_' + dataset_name)[[batch_mask]]\n batch_y = dense_to_one_hot(batch_y)\n\n return batch_x, batch_y", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def minibatch(x_train, y_train, batch_size, train_epochs):\n epoch = 0\n start = 0\n key = random.PRNGKey(0)\n\n while epoch < train_epochs:\n end = start + batch_size\n\n if end > x_train.shape[0]:\n key, split = random.split(key)\n permutation = random.permutation(split,\n np.arange(x_train.shape[0], dtype=np.int64))\n x_train = x_train[permutation]\n y_train = y_train[permutation]\n epoch += 1\n start = 0\n continue\n\n yield x_train[start:end], y_train[start:end]\n start = start + batch_size", "def split_data(self):\n self.train, self.val, self.test_x, self.test_y = [], [], [], []\n train_size = self.horizon\n # This assumes all countries have the same length.\n # The minus two gives space for the validation and test sets as they will overshoot.\n k_folds = len(self.countries[0].data)//self.horizon - 2\n for _ in range(k_folds):\n tr, v, te_x, te_y = self.cross_validate(train_size)\n self.train.append(tr), self.val.append(v), self.test_x.append(te_x), self.test_y.append(te_y)\n train_size += self.horizon", "def create_train_validate_test_sets(self,X,Y):\n\n\t\tprint \"Size of the original images\"\n\n\t\tX = np.asarray(X, dtype=theano.config.floatX)\n\t\t\n\t\ttrain_length = int(round(len(X) * 0.60))\n\t\tvalid_length = int(round(len(X) * 0.20))\n\t\ttest_length = int(round(len(X) * 0.20))\n\n\t\tX_train = X[0:train_length]\n\t\tX_valid = X[train_length: (train_length + valid_length)]\n\t\tX_test = X[-test_length:]\n\n\t\t# sample = X_train[0].reshape(64,64)\n\n\t\t# X_train = X_train.transpose(0, 3, 1, 2)\n\t\t# X_valid = X_valid.transpose(0, 3, 1, 2)\n\t\t# X_test = X_test.transpose(0, 3, 1, 2)\n\n\t\t# X = X.transpose(0, 3, 1, 2)\n\n\t\tX_train = map(self.flaten_aux, X_train)\n\t\tX_valid = map(self.flaten_aux, X_valid)\n\t\tX_test = map(self.flaten_aux, X_test)\n\n\t\t# X = map(self.flaten_aux, X)\n\n\t\t#print X_train.shape\n\t\t#X = X.transpose(0, 3, 1, 2)\n\t\t# X = np.asarray(X, dtype=theano.config.floatX)\n\t\t# X = X.reshape((21, 3, 64, 64))\n\t\t# print X.shape\n\t\t# #X_train = X_train.transpose(0, 3, 1, 2)\n\t\t# #print X[0].\n\t\t# im = Image.fromarray(X[0],mode=\"RGB\")\n\t\t# im.show()\n\t\t#self.reconstructImage(X[0]).show()\n\t\t# sample = X_train[0].reshape(64,64)\n\t\t# Image.fromarray(sample,mode=\"L\").show()\n\n\t\t#X = map(self.flaten_aux, X)\n\n\t\t# X_train = X[0:train_length]\n\t\t# X_valid = X[train_length: (train_length + valid_length)]\n\t\t# X_test = X[-test_length:]\n\n\t\tY_train = Y[0:train_length]\n\t\tY_valid = Y[train_length:(train_length + valid_length)]\n\t\tY_test = Y[-test_length:]\n\n\t\t#pkl_file = open( '../data/lb.pkl', 'rb')\n\t\t#lb = cPickle.load(pkl_file)\n\n\t\t#arr = np.array(np.round((X_train[0] * 256).reshape((64,64))),dtype=np.uint8)\n\t\t# Image.fromarray(arr,mode=\"L\").show()\n\t\t# print lb.classes_\n\t\t# print Y_train[0]\n\n\t\ttrain_set = [X_train,Y_train]\n\t\tvalid_set = [X_valid,Y_valid]\n\t\ttest_set = [X_test,Y_test]\n\t\tinput = [X,Y]\n\n\t\tif self.verbose:\n\t\t\tprint \"X_train {} X_validation {} X_test {}\".format(len(X_train),len(X_valid),len(X_test))\n\t\t\tprint \"Y_train {} Y_validation {} Y_test {}\".format(len(Y_train),len(Y_valid),len(Y_test))\n\n\t\toutput = open(self.data_path + 'train_set.pkl', 'wb')\n\t\tcPickle.dump(train_set, output,protocol=-1)\n\t\toutput.close()\n\n\t\toutput = open(self.data_path + 'valid_set.pkl', 'wb')\n\t\tcPickle.dump(valid_set, output,protocol=-1)\n\t\toutput.close()\n\n\t\toutput = open(self.data_path + 'test_set.pkl', 'wb')\n\t\tcPickle.dump(test_set, output,protocol=-1)\n\t\toutput.close()\n\t\t\n\t\treturn train_set,valid_set,test_set", "def get_test_batches(data_dir='/home/yunhan/batchified'):\n # train 3 valid 1\n # Use batch 1 - 53 as train (60%), 54 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 18\n idx = np.random.permutation(n)\n idx = idx + 72\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def train_valid_index_split_two_stage(all_index, train_size_1 = None, train_size_2 = None, valid_split = 0.3):\n\tall_index = np.arange(all_index) if isinstance(all_index, int) else np.array(all_index)\n\n\ttrain_size_2 = len(all_index) if train_size_2 is None else train_size_2\n\ttrain_index_2_ = np.random.choice(all_index, train_size_2, replace = False)\n\ttrain_index_2, valid_index_2 = np.split(train_index_2_, [int(train_size_2*(1-valid_split))])\n\n\tall_index = np.setdiff1d(all_index, train_index_2)\n\ttrain_index_1_ = np.random.choice(all_index, train_size_1-train_size_2, replace = False)\n\ttrain_index_1, valid_index_1 = np.split(train_index_1_, [int((train_size_1-train_size_2)*(1-valid_split))])\n\ttrain_index_1 = np.hstack([train_index_1, train_index_2])\n\tvalid_index_1 = np.hstack([valid_index_1, valid_index_2])\n\treturn train_index_1, valid_index_1, train_index_2, valid_index_2", "def train_valid_loader_split(train_set, train_labels, batch_size=32, valid_size=0.1, stratify=True):\n indices = np.arange(len(train_set))\n if stratify:\n train_indices, valid_indices, _, _ = train_test_split(\n indices, train_labels, test_size=valid_size, stratify=train_labels\n )\n else:\n train_indices, valid_indices, _, _ = train_test_split(\n indices, train_labels, test_size=valid_size\n )\n train_sampler = SubsetRandomSampler(indices=train_indices)\n valid_sampler = SubsetRandomSampler(indices=valid_indices)\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size, sampler=train_sampler, num_workers=2\n )\n valid_loader = torch.utils.data.DataLoader(\n train_set, batch_size, sampler=valid_sampler, num_workers=2\n )\n return train_loader, valid_loader", "def shuffle_and_split_data_full_cv(X_genesets, y, train_validate_size):\n permutation = np.random.permutation(y.size)\n y_permuted = y[permutation]\n X_genesets_permuted = [Xg[permutation, :] for Xg in X_genesets]\n X_groups_train_validate = [Xg[0:train_validate_size, :] for Xg in X_genesets_permuted]\n X_groups_test = [Xg[train_validate_size:, :] for Xg in X_genesets_permuted]\n y_train_validate = y_permuted[0:train_validate_size]\n y_test = y_permuted[train_validate_size:]\n return X_groups_train_validate, y_train_validate, X_groups_test, y_test", "def dataset_train(batch, size=(256, 256), folder=None,\n threads=8, shuffle=None, fliplr=False):\n return dataset(batch, size=size, folder=folder,\n threads=threads, shuffle=shuffle,\n partition='train', fliplr=fliplr)", "def split_data_crossvalid(data):\n X_trainfolder = []\n X_testfolder = []\n y_trainfolder = []\n y_testfolder = []\n data = data[data[:, 0].argsort()]\n number_one = np.count_nonzero(data[:, :1])\n data_one = data[np.where(data[:, 0] == 1)]\n data_zero = data[np.where(data[:, 0] == 0)]\n one_ratio = round(number_one / len(data), 1)\n one_zero_ratio = 1 - one_ratio\n batch_one = int(70 * one_ratio)\n batch_zero = int(70 * one_zero_ratio)\n batchs = len(data) // 70\n for i in range(batchs):\n test_one = data_one[i * batch_one:(i + 1) * batch_one, :]\n train_one = np.delete(data_one, test_one, axis = 0)\n test_zero = data_zero[i * batch_zero:(i + 1) * batch_zero, :]\n train_zero = np.delete(data_zero, test_zero, axis = 0)\n train_sets = np.concatenate((train_one, train_zero), axis=0)\n test_sets = np.concatenate((test_one, test_zero), axis=0)\n np.random.shuffle(train_sets)\n np.random.shuffle(test_sets)\n X_trainfolder.append(train_sets[:, 1:])\n y_trainfolder.append(train_sets[:, 0])\n X_testfolder.append(test_sets[:, 1:])\n y_testfolder.append(test_sets[:, 0])\n return X_trainfolder, y_trainfolder, X_testfolder, y_testfolder", "def _test_train_partition(self, sent_partition_size):\n\n self._train_data_partitioned = self._partition_dataset(\n unpartitioned_dataset=self._train_data,\n sent_partition_size=sent_partition_size\n )\n\n self._test_data_partitioned = self._partition_dataset(\n unpartitioned_dataset=self._test_data,\n sent_partition_size=sent_partition_size\n )", "def _split_sets(X, y, folds, ind=-1, sample_counter=0):\n\n fold = folds.pop(ind) - sample_counter\n X_test = X[fold, ...]\n y_test = y[fold, ...]\n X_train = np.delete(X, fold, axis=0)\n y_train = np.delete(y, fold, axis=0)\n test_fold = fold + sample_counter\n # return X_train, np.squeeze(y_train), X_val, np.squeeze(y_val)\n return X_train, y_train, X_test, y_test, test_fold", "def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])", "def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)", "def partition_mnist():\n (x_train, y_train), testset = tf.keras.datasets.mnist.load_data()\n partitions = []\n # We keep all partitions equal-sized in this example\n partition_size = math.floor(len(x_train) / NUM_CLIENTS)\n for cid in range(NUM_CLIENTS):\n # Split dataset into non-overlapping NUM_CLIENT partitions\n idx_from, idx_to = int(cid) * partition_size, (int(cid) + 1) * partition_size\n partitions.append((x_train[idx_from:idx_to] / 255.0, y_train[idx_from:idx_to]))\n return partitions, testset", "def train_valid_split(X, y):\n random_indexes = np.random.permutation(len(y))\n train_inds = random_indexes[:(0.75*len(y))]\n valid_inds = random_indexes[(0.75*len(y)):]\n return X[train_inds], y[train_inds], X[valid_inds], y[valid_inds]", "def create_train_test_sets(conform_shape=True, indi_proportion=0.50, incl_group_imgs=True):\r\n X_train_indi, y_train_indi = build_dataframe('Individual_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_indi, y_test_indi = build_dataframe('Individual_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_group, y_train_group = build_dataframe('Group_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_group, y_test_group = build_dataframe('Group_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_indi, y_train_indi = subsample_dataframe(X_train_indi, y_train_indi,indi_proportion)\r\n \r\n if incl_group_imgs:\r\n X_train = np.concatenate([X_train_indi,X_train_group])\r\n y_train = np.concatenate([y_train_indi,y_train_group])\r\n else: \r\n X_train = X_train_indi.copy()\r\n y_train = y_train_indi.copy()\r\n\r\n return X_train, y_train, X_test_indi, y_test_indi, X_test_group, y_test_group", "def test_fused_batch_norm_uneven_batch(self, distribution):\n self.skipTest(\"TODO(b/234354008): Requires fetching data from network.\")\n (train_images, train_labels), _ = fashion_mnist.load_data()\n # add channel dimension to make 2D data into 3D, since some ops of the\n # model require it.\n train_images = train_images[..., None]\n train_images = train_images / np.float32(255)\n\n # Padding images because ResNet requires a minimal shape of (32, 32)\n padded_train_images = np.concatenate(\n [\n np.zeros((len(train_images), 2, 28, 1)),\n train_images,\n np.zeros((len(train_images), 2, 28, 1)),\n ],\n axis=1,\n )\n padded_train_images = np.concatenate(\n [\n np.zeros((len(train_images), 32, 2, 1)),\n padded_train_images,\n np.zeros((len(train_images), 32, 2, 1)),\n ],\n axis=2,\n )\n\n buffer_size = len(train_images)\n global_batch_size = distribution.num_replicas_in_sync\n num_samples = global_batch_size - 1\n\n epochs = 2\n\n # Keep only the first images, so that the last GPU receives an empty\n # batch\n padded_train_images = padded_train_images[:num_samples]\n train_labels = train_labels[:num_samples]\n\n train_dataset = (\n tf.data.Dataset.from_tensor_slices(\n (padded_train_images, train_labels)\n )\n .shuffle(buffer_size)\n .batch(global_batch_size)\n )\n train_dist_dataset = distribution.experimental_distribute_dataset(\n train_dataset\n )\n\n def create_model():\n inputs = keras.Input((32, 32, 1))\n preprocessed = keras.layers.Conv2D(3, (1, 1))(\n inputs\n ) # ResNet requires 3 channels\n features = resnet_v2.ResNet50V2(\n include_top=False,\n input_tensor=preprocessed,\n pooling=\"avg\",\n weights=None,\n ).output\n return keras.Model(inputs, features)\n\n with distribution.scope():\n # Set reduction to `none` so we can do the reduction afterwards and\n # divide by global batch size.\n loss_object = keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction=losses_impl.Reduction.NONE\n )\n\n def compute_resnet_loss(labels, predictions):\n per_example_loss = loss_object(labels, predictions)\n return tf.nn.compute_average_loss(\n per_example_loss, global_batch_size=global_batch_size\n )\n\n model = create_model()\n\n optimizer = optimizers.adam_legacy.Adam()\n\n def train_step(inputs):\n images, labels = inputs\n\n with tf.GradientTape() as tape:\n predictions = model(images, training=True)\n loss = compute_resnet_loss(labels, predictions)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return loss\n\n @tf.function\n def distributed_train_step(dataset_inputs):\n per_replica_losses = distribution.run(\n train_step, args=(dataset_inputs,)\n )\n return distribution.reduce(\n tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None\n )\n\n for epoch in range(epochs):\n # Train loop\n total_loss = 0.0\n num_batches = 0\n for x in train_dist_dataset:\n total_loss += distributed_train_step(x)\n num_batches += 1\n train_loss = total_loss / num_batches\n\n print(f\"Epoch {epoch+1}, Loss: {train_loss}\")", "def partition_train_valid_test(data, classes, ratio=(1,1,1), rng=np.random.RandomState(1000)):\n k=sum(ratio) # ratio must be a vector of integers\n ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)\n sequence=np.arange(len(classes))\n train_ind=np.array([],dtype=int)\n valid_ind=np.array([],dtype=int)\n test_ind=np.array([],dtype=int)\n count=0\n for ki in range(k):\n if count<ratio[0]:\n train_ind=np.append(train_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0] and count <ratio[0]+ratio[1]:\n valid_ind=np.append(valid_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0]+ratio[1] and ratio[2]>0:\n test_ind=np.append(test_ind,sequence[ind==ki])\n count=count+1\n continue\n train_set_x=data[train_ind]\n train_set_y=classes[train_ind]\n valid_set_x=data[valid_ind]\n valid_set_y=classes[valid_ind]\n test_set_x=data[test_ind]\n test_set_y=classes[test_ind]\n return train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y", "def get_batch(batch_size, s=\"train\"):\n if s == \"train\":\n X = Xtrain\n categories = train_classes\n else:\n X = Xtest\n categories = test_classes\n\n n_classes, n_examples, w, h = X.shape\n\n # randomly sample several classes to use in the batch\n categories = rng.choice(n_classes, size=(batch_size), replace=False)\n\n # Initial 2 empty arrays for the input image_batch\n pairs = [np.zeros((batch_size, h, w, 1)) for i in range(2)]\n # initialize vector fo the targets\n targets = np.zeros((batch_size, 1))\n\n # make one half of it \"1\"s so 2nd half of batch has same class\n targets[batch_size // 2:] = 1\n\n\n for i in range(batch_size):\n category = categories[i]\n idx_1 = rng.randint(0, n_examples)\n pairs[0][i, :, :, :] = X[category, idx_1].reshape(w, h, 1)\n idx_2 = rng.randint(0, n_examples)\n\n # pick images of same class for 1st half, different for 2nd\n if i >= batch_size // 2:\n category_2 = category\n else:\n # add a random number to the category modulo n classes to ensure 2nd image has a different category\n category_2 = (category + rng.randint(1, n_classes)) % n_classes\n\n pairs[1][i, :, :, :] = X[category_2, idx_2].reshape(w, h, 1)\n\n return pairs, targets", "def partition_train_valid_test_multiview(data, classes=None, ratio=(1,1,1), return_ind=False, rng=np.random.RandomState(1000)):\n if classes is None:\n num_samples=data[0].shape[0]\n classes=np.zeros(shape=(num_samples,),dtype=int)\n k=sum(ratio) # ratio must be a vector of integers\n ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)\n sequence=np.arange(len(classes))\n train_ind=np.array([],dtype=int)\n valid_ind=np.array([],dtype=int)\n test_ind=np.array([],dtype=int)\n count=0\n for ki in range(k):\n if count<ratio[0]:\n train_ind=np.append(train_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0] and count <ratio[0]+ratio[1]:\n valid_ind=np.append(valid_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0]+ratio[1] and ratio[2]>0:\n test_ind=np.append(test_ind,sequence[ind==ki])\n count=count+1\n continue\n num_views=len(data)\n train_set_x=[None]*num_views\n valid_set_x=[None]*num_views\n test_set_x=[None]*num_views\n for v in range(num_views):\n train_set_x[v]=data[v][train_ind,:]\n train_set_y=classes[train_ind]\n valid_set_x[v]=data[v][valid_ind,:]\n valid_set_y=classes[valid_ind]\n test_set_x[v]=data[v][test_ind,:]\n test_set_y=classes[test_ind]\n if return_ind:\n return train_set_x,train_set_y,train_ind,valid_set_x,valid_set_y,valid_ind,test_set_x,test_set_y,test_ind\n else:\n return train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 16\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints + 1.0\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def train_dynamic(batch_size=10):\n \n return", "def get_train_batches(data_dir='/home/yunhan/batchified'):\n # todo: read in data that is preoprocessed\n # Use batch 1 - 52 as train (60%), 53 - 71 as validation (20%), 72 - 89 as test (20%)\n n = 53\n idx = np.random.permutation(n)\n idx = idx + 1\n for i in range(n):\n X = np.load(\"%s/X%d.npy\" % (data_dir, idx[i]))/255.\n Y = np.load(\"%s/y%d.npy\" % (data_dir, idx[i])).reshape(-1)\n yield X, Y", "def create_cub200_task_distribution(path_to_pkl,\n num_training_samples_per_class=10,\n num_test_samples_per_class=15,\n num_training_classes=20,\n meta_batch_size=5):\n\n global cub200_trainX\n global cub200_trainY\n\n global cub200_valX\n global cub200_valY\n\n global cub200_testX\n global cub200_testY\n\n\n with open(path_to_pkl, 'rb') as f:\n d = pickle.load(f)\n cub200_X, cub200_Y = d['dataset']\n\n cub200_X = cub200_X.astype(np.float32) / 255.0\n cub200_X = (cub200_X - np.asarray((0.4914, 0.4822, 0.4465))) / np.asarray((0.2023, 0.1994, 0.2010))\n\n #\n # TODO\n # random horiz flip + normalize by: \n # transforms.Normalize((0.4914, 0.4822, 0.4465),\n # (0.2023, 0.1994, 0.2010)) (mean, std)\n\n\n\n #np.random.seed(0)\n # TODO: shuffle allocation of class indices to train/val/test\n num_train = 100\n num_val = 50\n num_test = 50\n\n classes = list(set(cub200_Y))\n train_classes = classes[:num_train]\n val_classes = classes[num_train:(num_train+num_val)]\n test_classes = classes[(num_train+num_val):]\n\n train_indices = []\n val_indices = []\n test_indices = []\n\n for i in range(len(cub200_Y)):\n if cub200_Y[i] in train_classes:\n train_indices.append(i)\n elif cub200_Y[i] in val_classes:\n val_indices.append(i)\n elif cub200_Y[i] in test_classes:\n test_indices.append(i)\n\n cub200_trainX = cub200_X[train_indices]\n cub200_trainY = cub200_Y[train_indices]\n\n cub200_valX = cub200_X[val_indices]\n cub200_valY = cub200_Y[val_indices]\n\n cub200_testX = cub200_X[test_indices]\n cub200_testY = cub200_Y[test_indices]\n\n\n train_tasks_list = [ClassificationTask(cub200_trainX,\n cub200_trainY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n # TODO: NOTE: HACK -- validation and test tasks use a fixed number of test-set samples, instead of the supplied\n # ones. This is because in MAML/FOMAML the test set is used to compute the meta-gradient, and a small number of\n # samples is used (in the philosophy of few-shot learning, where only few samples are available).\n # However, in this case we wish to use a few more test-samples to better estimate the accuracy of the model on the validation\n # and test tasks!\n num_test_samples_per_class = 20\n validation_tasks_list = [ClassificationTask(cub200_valX,\n cub200_valY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n test_tasks_list = [ClassificationTask(cub200_valX,\n cub200_valY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n metatrain_task_distribution = TaskDistribution(tasks=train_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n metaval_task_distribution = TaskDistribution(tasks=validation_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n metatest_task_distribution = TaskDistribution(tasks=test_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n return metatrain_task_distribution, metaval_task_distribution, metatest_task_distribution", "def setup(self, stage: Optional[str] = None):\n if stage in (None, 'fit'):\n # Get a 20% of the train data for validation in a stratified way.\n _x = [i[1] for i in self.splits['train']]\n _y = [i[0] for i in self.splits['train']]\n\n _train_x, _val_x, _train_y, _val_y = train_test_split(_x, _y, test_size=0.2,\n stratify=_y)\n #print(np.unique(_train_y, return_counts=True))\n #print(np.unique(_val_y, return_counts=True))\n\n self.splits['train'] = [[i, j] for i,j in zip(_train_y, _train_x)]\n self.splits['valid'] = [[i, j] for i,j in zip(_val_y, _val_x)]\n\n self.datasets['train'] = FewShotDataset(self.splits['train'], self.ops)\n self.datasets['valid'] = FewShotDataset(self.splits['valid'], self.ops)\n\n if stage in (None, 'test'):\n self.datasets['test'] = FewShotDataset(self.splits['test'], self.ops)", "def createTrainTestSets():\n tweets = open(noDuplicatesFilename, 'r').read().splitlines()\n name_mapping = loadNameMapping()\n holdoutLocations = [u'Frederiksberg, Danmark', u'T\\xe5rnby, Danmark', u'Kolding, Danmark', u'T\\xe4by, Sverige', u'Kungsbacka, Sverige', u'Kristianstad, Sverige', u'Bod\\xf8, Norge', u'Kvinnherad, Norge', u'Ullensaker, Norge']\n testSetLocation = []\n rest = []\n for tweet in tweets:\n if stringToTweet(tweet).getFullName() in holdoutLocations:\n testSetLocation.append(tweet)\n else:\n rest.append(tweet)\n tweets = rest\n testIndex = int(round(len(tweets) * (1 - test_set_ratio)))\n random.seed(1)\n random.shuffle(tweets)\n trainSet = tweets[:testIndex]\n testSet = tweets[testIndex:]\n open(trainSetFilename, 'w').write('\\n'.join(trainSet))\n open(testSetNormalFilename, 'w').write('\\n'.join(testSet))\n open(testSetLocationFilename, 'w').write('\\n'.join(testSetLocation))\n print \"Wrote %d tweets to train set\" % len(trainSet)\n print \"Wrote %d tweets to normal test set\" % len(testSet)\n print \"Wrote %d tweets to location test set\" % len(testSetLocation)", "def split_datasets(img_lst):\n num = len(img_lst)\n\n idx = np.random.permutation(num)\n train_lst = np.array(img_lst)[idx[:int(num * .8)]] # 80/20 split\n validation_lst = np.array(img_lst)[idx[int(num * .8):int(num * .9)]]\n test_lst = np.array(img_lst)[idx[int(num * .9):]]\n return train_lst, validation_lst, test_lst", "def train_test_split(collection):\r\n num_docs_train = int(training_docs * BATCH_SIZE)\r\n train_corpus = collection[:num_docs_train]\r\n test_corpus = collection[num_docs_train:]\r\n return num_docs_train, train_corpus, test_corpus", "def train_valid_index_split(all_index, train_size = None, valid_split = 0.3):\n\tall_index = np.arange(all_index) if isinstance(all_index, int) else np.array(all_index)\n\ttrain_size = len(all_index) if train_size is None else train_size\n\ttrain_index_ = np.random.choice(all_index, train_size, replace = False)\n\ttrain_index, valid_index = np.split(train_index_, [int(train_size*(1-valid_split))])\n\treturn train_index, valid_index", "def make_checkerboard_training_set(num_points=0, noise=0.0, randomize=True,\n x_min=0.0, x_max=1.0, y_min=0.0, y_max=1.0):\n log.out.info(\"Generating target data.\")\n # Select coordinates to do an XOR like operation on\n coords = []\n bools = []\n if randomize:\n for i in range(num_points):\n # Add num_points randomly\n coord_point = np.random.random(2)\n coord_point[0] = coord_point[0] * (x_max - x_min) + x_min\n coord_point[1] = coord_point[1] * (y_max - y_min) + y_min\n coords.append(coord_point)\n else:\n x_points = np.linspace(x_min, x_max, int(np.sqrt(num_points)))\n y_points = np.linspace(y_min, y_max, int(np.sqrt(num_points)))\n for i in range(int(np.sqrt(num_points))):\n for j in range(int(np.sqrt(num_points))):\n # Add num_points randomly\n coord_point = [x_points[i], y_points[j]]\n coords.append(coord_point)\n # Assign an xor boolean value to the coordinates\n for coord_point in coords:\n bool_point = np.array([np.round(coord_point[0]) % 2, np.round(coord_point[1]) % 2]).astype(bool)\n bools.append(np.logical_xor(bool_point[0], bool_point[1]))\n # If noisy then bit flip\n if noise > 0.0:\n for i in enumerate(bools):\n if np.random.random() < noise:\n bools[i] = np.logical_not(bools[i])\n # Build training vectors\n train_in = None\n train_out = None\n for i, coord in enumerate(coords):\n # Need to initialize the arrays\n if i == 0:\n train_in = np.array([coord])\n train_out = np.array([[bools[i]]])\n else:\n train_in = np.append(train_in, np.array([coord]), axis=0)\n train_out = np.append(train_out, np.array([[bools[i]]]), axis=1)\n\n train_out = train_out.T\n return train_in, train_out", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 15\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 15\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def args_train_test_val_split(args, random_state=42, train_min_class_count=5):\n\n # split into single-sample classes and others.\n args_clean, args_singles = clean_dataset(args, min_class_count=2)\n classes_clean = [a.frame_id for a in args_clean]\n\n # split other in train,test and val set in a stratified fashion.\n args_train, args_test = train_test_split(args_clean, test_size=0.2, shuffle=True, stratify=classes_clean,\n random_state=random_state)\n classes_train = [a.frame_id for a in args_train]\n args_train, args_val = train_test_split(args_train, test_size=0.2, shuffle=True, stratify=classes_train,\n random_state=random_state + 70)\n\n # randomly split the 'singles' into partitions train,test val. disregard them for train set.\n args_train_singles, args_test_singles = train_test_split(args_singles, test_size=0.2, shuffle=True,\n random_state=random_state + 42)\n _, args_val_singles = train_test_split(args_train_singles, test_size=0.2, shuffle=True,\n random_state=random_state + 20)\n\n args_train, _ = clean_dataset(args_train, min_class_count=train_min_class_count)\n\n # add singles back to test and val set.\n args_test.extend(args_test_singles)\n args_val.extend(args_val_singles)\n\n return args_train, args_test, args_val", "def make_splits(self):\n # produce fold/portion splits of the training indexes: these output indexes to the tr. indexes themselves\n if self.folds is not None:\n meta_trainval_idx = kfold_split(self.train_idx, self.folds, self.seed, self.labels, self.label_info)\n elif self.portion is not None:\n meta_trainval_idx = portion_split(self.train_idx, self.portion, self.seed, self.labels, self.label_info)\n else:\n meta_trainval_idx = [(np.arange(len(self.train_idx)), np.arange(0, dtype=np.int32))]\n # \"dereference\" the metaindexes to point to the data themselves\n self.trainval_idx = []\n for (tidx, vidx) in meta_trainval_idx:\n self.trainval_idx.append((self.train_idx[tidx], self.train_idx[vidx]))", "def get_batch(batch_size,s=\"train\"):\n\n if s == 'train':\n X = Xtrain # X training input\n categories = train_classes # y categories\n else:\n X = Xval # X validation input\n categories = val_classes # y categories\n\n n_classes, n_examples, w, h = X.shape[0], X.shape[1], X.shape[2], X.shape[3]\n\n # randomly sample several classes to use in the batch of size n\n categories = rng.choice(n_classes,size=(batch_size,),replace=False)\n \n # initialize 2 empty arrays for the input image batch\n pairs=[np.zeros((batch_size, h, w,1)) for i in range(2)]\n \n # initialize vector for the targets\n targets=np.zeros((batch_size,))\n \n # one half of is full of '1's and 2nd half of batch has same class\n\n targets[batch_size//2:] = 1\n for i in range(batch_size):\n category = categories[i]\n idx_1 = rng.randint(0, n_examples)\n pairs[0][i,:,:,:] = X[category, idx_1].reshape(w, h, 1)\n idx_2 = rng.randint(0, n_examples)\n \n # pick images of same class for 1st half, different for 2nd\n if i >= batch_size // 2:\n category_2 = category \n else: \n # add a random number to the category modulo n classes to ensure 2nd image has a different category\n category_2 = (category + rng.randint(1,n_classes)) % n_classes\n \n pairs[1][i,:,:,:] = X[category_2,idx_2].reshape(w, h,1)\n\n \n return pairs, targets", "def create_sets(test, data, test_size=0.2, write=False):\n y_test = test['y_old']\n X_test = test.drop('y_old', 1)\n y_data = data['y_old']\n X_data = data.drop('y_old', 1)\n X_train, X_val, y_train, y_val = train_test_split(X_data, y_data, test_size=test_size, random_state=123)\n if write:\n pickle.dump((X_train, X_val, y_train, y_val), open(obj_save_path+'train_val_df.p', 'wb'))\n #X_train, X_val, y_train, y_val = pickle.load(open(obj_save_path+'train_val_df.p', 'rb'))\n return X_train, y_train, X_val, y_val, X_test, y_test", "def get_batch_random(batch_size,s=\"train\"):\n if s == 'train':\n X = dataset_train\n else:\n X = dataset_test\n\n m, w, h,c = X[0].shape\n\n nb_class = nb_classes-1\n\n # initialize result\n triplets=[np.zeros((batch_size,h, w,c)) for i in range(3)]\n\n for i in range(batch_size):\n #Pick one random class for anchor\n anchor_class = np.random.randint(0, nb_class)\n nb_sample_available_for_class_AP = X[anchor_class].shape[0]\n\n #Pick two different random pics for this class => A and P\n [idx_A,idx_P] = np.random.choice(nb_sample_available_for_class_AP,size=2,replace=False)\n\n #Pick another class for N, different from anchor_class\n negative_class = (anchor_class + np.random.randint(1,nb_class)) % nb_class\n nb_sample_available_for_class_N = X[negative_class].shape[0]\n\n #Pick a random pic for this negative class => N\n idx_N = np.random.randint(0, nb_sample_available_for_class_N)\n\n triplets[0][i,:,:,:] = X[anchor_class][idx_A,:,:,:]\n triplets[1][i,:,:,:] = X[anchor_class][idx_P,:,:,:]\n triplets[2][i,:,:,:] = X[negative_class][idx_N,:,:,:]\n\n return triplets", "def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr", "def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def split_dataset(data_set, train_size, test_size):\n # Generate random indices without replacement, to make train and test sets disjoint\n rand_indices = np.random.choice(data_set.shape[0], train_size+test_size, replace=False)\n feature_end = data_set.shape[1] - 1\n output_location = feature_end\n feature_offset = var.ALGORITHM_INFO['feature_offset']\n\n # Define the training and testing matrices\n x_train = data_set[rand_indices[0:train_size], feature_offset:feature_end]\n y_train = data_set[rand_indices[0:train_size], output_location]\n x_test = data_set[rand_indices[train_size:train_size+test_size], feature_offset:feature_end]\n y_test = data_set[rand_indices[train_size:train_size+test_size], output_location]\n favorite_test = data_set[rand_indices[train_size:train_size+test_size], 0]\n\n # Normalize features, with maximum value in training set\n # as realistically, this would be the only possibility\n\n for ii in range(x_train.shape[1]):\n maxval = np.max(np.abs(x_train[:, ii]))\n if maxval > 0:\n x_train[:, ii] = np.divide(x_train[:, ii], maxval)\n x_test[:, ii] = np.divide(x_test[:, ii], maxval)\n\n\n # Add a column of ones; done after to avoid modifying entire data_set\n x_train = np.hstack((x_train, np.ones((x_train.shape[0], 1))))\n x_test = np.hstack((x_test, np.ones((x_test.shape[0], 1))))\n\n return (x_train, y_train), (x_test, y_test), favorite_test", "def create_split_loaders(root_dir, batch_size, seed=0, transform=transforms.ToTensor(),\n p_val=0.1, p_test=0.2, shuffle=True, \n show_sample=False, extras={}):\n \n\n # once all single json datasets are created you can concat them into a single one:\n quickdraw_dataset = CharacterDataset(root_dir=root_dir, transform=transform)\n \n # Dimensions and indices of training set\n dataset_size = len(quickdraw_dataset)\n all_indices = list(range(dataset_size))\n\n # Shuffle dataset before dividing into training & test sets\n if shuffle:\n np.random.seed(seed)\n np.random.shuffle(all_indices)\n \n # Create the validation split from the full dataset\n val_split = int(np.floor(p_val * dataset_size))\n train_ind, val_ind = all_indices[val_split :], all_indices[: val_split]\n \n # Separate a test split from the training dataset\n test_split = int(np.floor(p_test * len(train_ind)))\n train_ind, test_ind = train_ind[test_split :], train_ind[: test_split]\n print(len(train_ind), len(val_ind), len(test_ind))\n # Use the SubsetRandomSampler as the iterator for each subset\n sample_train = SubsetRandomSampler(train_ind)\n sample_test = SubsetRandomSampler(test_ind)\n sample_val = SubsetRandomSampler(val_ind)\n\n num_workers = 0\n pin_memory = False\n # If CUDA is available\n if extras:\n num_workers = extras[\"num_workers\"]\n pin_memory = extras[\"pin_memory\"]\n \n # Define the training, test, & validation DataLoaders\n train_loader = DataLoader(quickdraw_dataset, batch_size=batch_size, \n sampler=sample_train, num_workers=num_workers, \n pin_memory=pin_memory)\n\n test_loader = DataLoader(quickdraw_dataset, batch_size=batch_size, \n sampler=sample_test, num_workers=num_workers, \n pin_memory=pin_memory)\n\n val_loader = DataLoader(quickdraw_dataset, batch_size=batch_size,\n sampler=sample_val, num_workers=num_workers, \n pin_memory=pin_memory)\n\n \n # Return the training, validation, test DataLoader objects\n return (train_loader, val_loader, test_loader)", "def build_splits(dataset, train_size, valid_size, by=['context_id'], seed=17):\n if isinstance(seed, RandomState):\n rng = seed\n else:\n rng = RandomState(seed)\n\n groups = dataset.groupby(by).groups\n context_ids = groups.keys()\n\n train_ids, other_ids = sklearn.cross_validation.train_test_split(\n context_ids, train_size=train_size, random_state=rng)\n valid_ids, test_ids = sklearn.cross_validation.train_test_split(\n other_ids, train_size=valid_size, random_state=rng)\n\n train_idx = context_id_to_idx(train_ids, groups)\n valid_idx = context_id_to_idx(valid_ids, groups)\n test_idx = context_id_to_idx(test_ids, groups)\n\n return dataset.ix[train_idx, :], dataset.ix[valid_idx, :], dataset.ix[test_idx, :]", "def test_train_split_per_value():\n shape = (1000, 1000, 3)\n\n input1 = np.random.randint(10, size=shape, dtype=int)\n input2 = np.random.randint(10, size=shape, dtype=int)\n\n patch1 = EOPatch()\n patch1[INPUT_MASK_FEATURE] = input1\n\n patch2 = EOPatch()\n patch2[INPUT_MASK_FEATURE] = input2\n\n bins = [0.2, 0.6]\n\n split_task = TrainTestSplitTask((*INPUT_MASK_FEATURE, NEW_FEATURE_NAME), bins, split_type='per_value')\n\n # seeds should get ignored when splitting 'per_value'\n patch1 = split_task(patch1, seed=1)\n patch2 = split_task(patch2, seed=1)\n\n otuput1 = patch1[NEW_MASK_FEATURE]\n otuput2 = patch2[NEW_MASK_FEATURE]\n\n unique = set(np.unique(input1)) | set(np.unique(input2))\n\n for uniq in unique:\n folds1 = otuput1[input1 == uniq]\n folds2 = otuput2[input2 == uniq]\n assert_array_equal(np.unique(folds1), np.unique(folds2))", "def split(self, X):\n # Make sure it's a sparse array...\n X = check_sparse_array(X)\n\n # Use np.linspace to evenly partition the space between 0 and 1 into\n # k + 1 pieces so we can use them as \"training_sizes\"\n train_sizes = np.linspace(0, 1, self.n_splits + 1)\n\n # We use a series of \"permuted values\" to mask out the training/testing\n # folds.\n random_state = check_random_state(self.random_state)\n values = _get_train_mask_linspace(X.nnz, random_state,\n shuffle=self.shuffle)\n\n # Iterate the fold space bounds in a generator, returning train/test\n for lower, upper in zip(train_sizes[:-1], train_sizes[1:]):\n test, train = _split_between_values(X, values, lower, upper)\n yield train, test", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def batch_generator(labels_df, set_kind):\n # Generate training batches\n if set_kind == \"train\" and (labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 64):\n while 1:\n\n for i in range(labels_df.shape[0]//8):\n x_train = np.load('data/train-npy/npdatasetX{}.npy'.format(i))\n y_train = np.load('data/train-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1):\n x_trainj = x_train[j*8:j*8-1,:]\n y_trainj = y_train[j*8:j*8-1,:]\n\n yield (x_trainj, y_trainj)\n\n\n # Generate validation batches\n if set_kind == \"valid\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//4): \n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n y_valid = np.load('data/valid-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1): \n x_validj = x_valid[j*4:j*4-1,:]\n y_validj = y_valid[j*4:j*4-1,:]\n\n yield (x_validj, y_validj)\n\n\n # Generate test batches\n if set_kind == \"test\" and labels_df.shape[0] == 40669:\n while 1:\n\n for i in range(labels_df.shape[0]//4): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(1): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n \n yield (x_validj, y_validj)\n\n if set_kind == \"test\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//8): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(2): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n\n yield x_validj", "def split_training_validation(classes, validation_size = 0.2, shuffle = False):\n num_samples=len(classes)\n classes=np.array(classes)\n classes_unique=np.unique(classes)\n num_classes=len(classes_unique)\n indices=np.arange(num_samples)\n #indices_folds=np.zeros([num_samples],dtype=int)\n training_indice = []\n training_label = []\n validation_indice = []\n validation_label = []\n for cl in classes_unique:\n indices_cl=indices[classes==cl]\n num_samples_cl=len(indices_cl)\n\n # split this class into k parts\n if shuffle:\n random.shuffle(indices_cl) # in-place shuffle\n \n # module and residual\n num_samples_each_split=int(num_samples_cl*validation_size)\n res=num_samples_cl - num_samples_each_split\n \n training_indice = training_indice + [val for val in indices_cl[num_samples_each_split:]]\n training_label = training_label + [cl] * res\n \n validation_indice = validation_indice + [val for val in indices_cl[:num_samples_each_split]]\n validation_label = validation_label + [cl]*num_samples_each_split\n\n training_index = np.arange(len(training_label))\n random.shuffle(training_index)\n training_indice = np.array(training_indice)[training_index]\n training_label = np.array(training_label)[training_index]\n \n validation_index = np.arange(len(validation_label))\n random.shuffle(validation_index)\n validation_indice = np.array(validation_indice)[validation_index]\n validation_label = np.array(validation_label)[validation_index] \n \n \n return training_indice, training_label, validation_indice, validation_label", "def split_training_validation(classes, validation_size = 0.2, shuffle = False):\n num_samples=len(classes)\n classes=np.array(classes)\n classes_unique=np.unique(classes)\n num_classes=len(classes_unique)\n indices=np.arange(num_samples)\n #indices_folds=np.zeros([num_samples],dtype=int)\n training_indice = []\n training_label = []\n validation_indice = []\n validation_label = []\n for cl in classes_unique:\n indices_cl=indices[classes==cl]\n num_samples_cl=len(indices_cl)\n\n # split this class into k parts\n if shuffle:\n random.shuffle(indices_cl) # in-place shuffle\n \n # module and residual\n num_samples_each_split=int(num_samples_cl*validation_size)\n res=num_samples_cl - num_samples_each_split\n \n training_indice = training_indice + [val for val in indices_cl[num_samples_each_split:]]\n training_label = training_label + [cl] * res\n \n validation_indice = validation_indice + [val for val in indices_cl[:num_samples_each_split]]\n validation_label = validation_label + [cl]*num_samples_each_split\n\n training_index = np.arange(len(training_label))\n random.shuffle(training_index)\n training_indice = np.array(training_indice)[training_index]\n training_label = np.array(training_label)[training_index]\n \n validation_index = np.arange(len(validation_label))\n random.shuffle(validation_index)\n validation_indice = np.array(validation_indice)[validation_index]\n validation_label = np.array(validation_label)[validation_index] \n \n \n return training_indice, training_label, validation_indice, validation_label", "def split(self, test_size=0.25, random_state=None):\n self.train_index, self.test_index = ms.train_test_split(\n self.data.index, test_size=test_size, random_state=random_state)", "def make_data_splits(samples, params, RESULTSDIR, num_experiments):\n # TODO: Switch to .mat from .pickle so that these lists are easier to read\n # and change.\n\n partition = {}\n if params[\"load_valid\"] is None:\n # Set random seed if included in params\n if params[\"data_split_seed\"] is not None:\n np.random.seed(params[\"data_split_seed\"])\n\n all_inds = np.arange(len(samples))\n\n # extract random inds from each set for validation\n v = params[\"num_validation_per_exp\"]\n valid_inds = []\n if params[\"valid_exp\"] is not None and params[\"num_validation_per_exp\"] > 0:\n all_valid_inds = []\n for e in params[\"valid_exp\"]:\n tinds = [\n i for i in range(len(samples)) if int(samples[i].split(\"_\")[0]) == e\n ]\n all_valid_inds = all_valid_inds + tinds\n valid_inds = valid_inds + list(\n np.random.choice(tinds, (v,), replace=False)\n )\n valid_inds = list(np.sort(valid_inds))\n\n train_inds = list(set(all_inds) - set(all_valid_inds))#[i for i in all_inds if i not in all_valid_inds]\n elif params[\"num_validation_per_exp\"] > 0: # if 0, do not perform validation\n for e in range(num_experiments):\n tinds = [\n i for i in range(len(samples)) if int(samples[i].split(\"_\")[0]) == e\n ]\n valid_inds = valid_inds + list(\n np.random.choice(tinds, (v,), replace=False)\n )\n valid_inds = list(np.sort(valid_inds))\n\n train_inds = [i for i in all_inds if i not in valid_inds]\n elif params[\"valid_exp\"] is not None:\n raise Exception(\"Need to set num_validation_per_exp in using valid_exp\")\n else:\n train_inds = all_inds\n\n assert (set(valid_inds) & set(train_inds)) == set()\n\n train_samples = samples[train_inds]\n train_inds = []\n if params[\"valid_exp\"] is not None:\n train_expts = [f for f in range(num_experiments) if f not in params[\"valid_exp\"]]\n else:\n train_expts = np.arange(num_experiments)\n\n print(\"TRAIN EXPTS: {}\".format(train_expts))\n\n if params[\"num_train_per_exp\"] is not None:\n # Then sample randomly without replacement from training sampleIDs\n for e in train_expts:\n tinds = [\n i for i in range(len(train_samples)) if int(train_samples[i].split(\"_\")[0]) == e\n ]\n print(e)\n print(len(tinds))\n train_inds = train_inds + list(\n np.random.choice(tinds, (params[\"num_train_per_exp\"],), replace=False)\n )\n train_inds = list(np.sort(train_inds))\n else:\n train_inds = np.arange(len(train_samples))\n\n \n\n partition[\"valid_sampleIDs\"] = samples[valid_inds]\n partition[\"train_sampleIDs\"] = train_samples[train_inds]\n\n # Save train/val inds\n with open(os.path.join(RESULTSDIR, \"val_samples.pickle\"), \"wb\") as f:\n cPickle.dump(partition[\"valid_sampleIDs\"], f)\n\n with open(os.path.join(RESULTSDIR, \"train_samples.pickle\"), \"wb\") as f:\n cPickle.dump(partition[\"train_sampleIDs\"], f)\n else:\n # Load validation samples from elsewhere\n with open(os.path.join(params[\"load_valid\"], \"val_samples.pickle\"), \"rb\",) as f:\n partition[\"valid_sampleIDs\"] = cPickle.load(f)\n partition[\"train_sampleIDs\"] = [\n f for f in samples if f not in partition[\"valid_sampleIDs\"]\n ]\n\n # Reset any seeding so that future batch shuffling, etc. are not tied to this seed\n if params[\"data_split_seed\"] is not None:\n np.random.seed()\n\n return partition", "def setup_distributed(self, urls, world_rank, world_size):\n assert len(urls) == world_size\n tf_config = {\n \"cluster\": {\n \"worker\": urls\n },\n \"task\": {\n \"index\": world_rank,\n \"type\": \"worker\"\n }\n }\n os.environ[\"TF_CONFIG\"] = json.dumps(tf_config)\n\n MultiWorkerMirroredStrategy = _try_import_strategy()\n\n # MultiWorkerMirroredStrategy handles everything for us, from\n # sharding the dataset (or even sharding the data itself if the loader\n # reads files from disk) to merging the metrics and weight updates\n #\n # worker 0 is the \"chief\" worker and will handle the map-reduce\n # every worker ends up with the exact same metrics and model\n # after model.fit\n #\n # because of this, we only really ever need to query its state\n self.strategy = MultiWorkerMirroredStrategy()\n\n self.train_dataset, self.test_dataset = self.data_creator(self.config)\n\n logger.debug(\"Creating model with MultiWorkerMirroredStrategy\")\n with self.strategy.scope():\n self.model = self.model_creator(self.config)\n\n # For use in model.evaluate()\n self.local_model = None", "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def split_data(train_split, src_dir, train_dir, test_dir, classes):\n for cls in classes:\n # get all dat files of this class\n data = get_instances_of_class(cls, src_dir)\n \n # how many of the data points are for training?\n train_count = round(len(data) * train_split / 100)\n \n # randomly choose indexes\n train_indexes = set()\n while len(train_indexes) < train_count:\n train_indexes.add(random.randrange(len(data)))\n \n # move all train_indexes to train_dir, others to test_dir\n COPY = lambda src, dst, filename:\\\n shutil.copy2(\n \"{}/{}\".format(src, data[i]),\n \"{}/{}\".format(dst, data[i])\n )\n \n for i in range(len(data)):\n if i in train_indexes:\n COPY(src_dir, train_dir, data[i])\n else:\n COPY(src_dir, test_dir, data[i])", "def train_mini_batch(X_train, Y_train, X_valid,\n Y_valid, batch_size=32,\n epochs=5, load_path=\"/tmp/model.ckpt\",\n save_path=\"/tmp/model.ckpt\"):\n init = tf.global_variables_initializer()\n m = X_train.shape[0]\n batches = m / batch_size\n if batches % 1 != 0:\n batches = int(batches + 1)\n else:\n batches = int(batches)\n with tf.Session() as sess:\n sess.run(init)\n saver = tf.train.import_meta_graph(\"{}.meta\".format(load_path))\n saver.restore(sess, load_path)\n x = tf.get_collection('x')[0]\n y = tf.get_collection('y')[0]\n train_op = tf.get_collection('train_op')[0]\n loss = tf.get_collection('loss')[0]\n accuracy = tf.get_collection('accuracy')[0]\n for ep in range(epochs+1):\n train_loss, train_accuracy = sess.run((loss, accuracy),\n {x: X_train, y: Y_train})\n valid_loss, valid_accuracy = sess.run((loss, accuracy),\n {x: X_valid, y: Y_valid})\n print(\"After {} epochs:\".format(ep))\n print(\"\\tTraining Cost: {}\".format(train_loss))\n print(\"\\tTraining Accuracy: {}\".format(train_accuracy))\n print(\"\\tValidation Cost: {}\".format(valid_loss))\n print(\"\\tValidation Accuracy: {}\".format(valid_accuracy))\n if ep < epochs:\n X_shuffled, Y_shuffled = shuffle_data(X_train, Y_train)\n for b in range(batches):\n start = b * batch_size\n end = start + batch_size\n if end > m:\n end = m\n X_batch = X_shuffled[start:end]\n Y_batch = Y_shuffled[start:end]\n sess.run((train_op), {x: X_batch,\n y: Y_batch})\n batch_cost, batch_accuracy = sess.run((loss, accuracy),\n {x: X_batch,\n y: Y_batch})\n if (b + 1) % 100 == 0 and b > 0:\n print(\"\\tStep {}:\".format(b + 1))\n print(\"\\t\\tCost: {}\".format(batch_cost))\n print(\"\\t\\tAccuracy: {}\".format(batch_accuracy))\n return saver.save(sess, save_path)", "def train_test_split(X,Y,test_size=None,seed=5):\n\tassert test_size!=None, \"test_size cannot be None\"\n\tnp.random.seed(seed)\n\tindexes = np.random.choice([False,True],size=len(X),p=[test_size,1-test_size])\n\treturn X[indexes],X[~indexes],Y[indexes],Y[~indexes]", "def train_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def combine_datasources(dset, dset_extra, valid_size=0, shuffle=True, random_seed=2019,\n maxsize=None, device='cpu'):\n if shuffle == True and random_seed:\n np.random.seed(random_seed)\n\n ## Convert both to TensorDataset\n if isinstance(dset, torch.utils.data.DataLoader):\n dataloader_args = {k:getattr(dset, k) for k in ['batch_size', 'num_workers']}\n X, Y = load_full_dataset(dset, targets=True, device=device)\n d = int(np.sqrt(X.shape[1]))\n X = X.reshape(-1, 1, d, d)\n dset = torch.utils.data.TensorDataset(X, Y)\n logger.info(f'Main data size. X: {X.shape}, Y: {Y.shape}')\n elif isinstance(dset, torch.utils.data.Dataset):\n raise NotImplemented('Error: combine_datasources cant take Datasets yet.')\n\n merged_dset = torch.utils.data.ConcatDataset([dset, dset_extra])\n train_idx, valid_idx = random_index_split(len(dset), 1-valid_size, (maxsize, None)) # No maxsize for validation\n train_idx = np.concatenate([train_idx, np.arange(len(dset_extra)) + len(dset)])\n\n if shuffle:\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n else:\n train_sampler = SubsetSampler(train_idx)\n valid_sampler = SubsetSampler(valid_idx)\n\n train_loader_ext = dataloader.DataLoader(merged_dset, sampler = train_sampler, **dataloader_args)\n valid_loader_ext = dataloader.DataLoader(merged_dset, sampler = valid_sampler, **dataloader_args)\n\n logger.info(f'Fold Sizes: {len(train_idx)}/{len(valid_idx)} (train/valid)')\n\n return train_loader_ext, valid_loader_ext", "def prepare_batches(self, pairs, batch_size):\n\t\treturn MATHBatch.create_from_items(pairs, batch_size)", "def supervised_random_mini_batches(X, Y, mini_batch_size, seed):\n\n np.random.seed(seed)\n m = X.shape[0] #number of examples in set\n n_classes = Y.shape[1]\n mini_batches=[]\n\n permutation = list(np.random.permutation(m))\n \n shuffled_X = X[permutation,:]\n shuffled_Y = Y[permutation,:]\n #partition of (shuffled_X, shuffled_Y) except the last mini_batch\n \n num_complete_mini_batches = math.floor(m/mini_batch_size)\n for k in range(num_complete_mini_batches):\n mini_batch_X = shuffled_X[k*mini_batch_size:(k+1)*mini_batch_size,:]\n mini_batch_Y = shuffled_Y[k*mini_batch_size:(k+1)*mini_batch_size,:]\n \n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # handling the case of last mini_batch < mini_batch_size \n if m % mini_batch_size !=0:\n \n mini_batch_X = shuffled_X[mini_batch_size*num_complete_mini_batches:m,:]\n mini_batch_Y = shuffled_Y[mini_batch_size*num_complete_mini_batches:m,:]\n \n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def _split_train_test_sets(self, sessions):\n # For every session find out whether there is a document at the first position that also occurs in other positions\n test_sets = []\n train_sets = []\n for s_idx, session in enumerate(sessions):\n pos_1 = session.web_results[0].id\n found_in_other_test_set = False\n\n #Check whether session is already in a test set.\n for test in test_sets:\n if pos_1 == test[0].web_results[0].id:\n found_in_other_test_set = True\n if found_in_other_test_set:\n break\n \n #If not already in a test set create a new test/train pair\n test = [session]\n train = []\n \n for session_2 in sessions[:s_idx] + sessions[s_idx+1:]:\n\n #Add session to test set if they have same doc in pos 1\n if pos_1 == session_2.web_results[0].id:\n test.append(session_2)\n #Add session to train set if it is in another place than the first\n elif pos_1 in [result.id for result in session_2.web_results[1:]]:\n train.append(session_2)\n \n #Only add if there is both a test and train set.\n if test and train:\n test_sets.append(test)\n train_sets.append(train)\n return train_sets, test_sets", "def _create_tf_datasets(self, split, batch_size):\n out_types = (tf.int32, tf.float32, tf.float32, tf.int32, tf.bool)\n # out_shapes has an additional batch dim (None) and 3 or 1 scenes.\n out_shapes = ((None, None, 2), (None, None, 2, 3), (None, None, 2, 4),\n (None, None), (None,))\n self.iterator = tf.data.Iterator.from_structure(out_types, out_shapes)\n for p in [\"train\", \"validation\", \"test\"]:\n # generator factory throws if there's no validation data\n try:\n self.generators[p] = self.generator_factory.scene_desc_generator(split, p)\n except ValueError:\n continue\n out_shapes = tuple([np.array(x).shape for x in next(self.generators[p]())])\n d = tf.data.Dataset.from_generator(self.generators[p], out_types,\n out_shapes)\n d = d.batch(batch_size if p == \"train\" else 1)\n # d = d.prefetch(3)\n self.iterator_init_ops[p] = self.iterator.make_initializer(d)", "def train_test_split(X, y, test_size=0.33, random_state=None, shuffle=True):\n\n copyX = copy.deepcopy(X)\n copyY = copy.deepcopy(y)\n if random_state is not None:\n # TODO: seed your random number generator\n #Seed random number generator\n np.random.seed(random_state)\n \n if shuffle: \n # TODO: shuffle the rows in X and y before splitting\n # be sure to maintain the parallel order of X and y!!\n # note: the unit test for train_test_split() does not test\n # your use of random_state or shuffle, but you should still \n # implement this and check your work yourself\n copyX, copyY = myutils.randomize_in_place(copyX,copyY)\n\n #Define Variables\n X_train = []\n X_test = []\n y_train = []\n y_test = []\n prop_sum = 0.0\n numTest = 0\n proportion = 1.0/float(len(X))\n\n #Determine how many values to put in test set\n while(prop_sum < test_size):\n numTest = numTest + 1\n prop_sum = prop_sum + proportion\n \n #Put values in train/test sets\n for i in range(len(X)):\n if(test_size>=1):\n if(i<=len(X)-1-test_size):\n X_train.append(copyX[i])\n y_train.append(copyY[i])\n else:\n X_test.append(copyX[i])\n y_test.append(copyY[i])\n else:\n if(i<=len(X)-1-numTest):\n X_train.append(copyX[i])\n y_train.append(copyY[i])\n else:\n X_test.append(copyX[i])\n y_test.append(copyY[i])\n\n return X_train, X_test, y_train, y_test", "def split(self, X=None, y=None, groups=None):\n\n for train_index in [0,1]:\n train_indices=np.where(self.test_fold==train_index)[0]\n test_indices=np.where(self.test_fold==(train_index+1)%2)[0]\n if self.shuffle:\n self.rng.shuffle(train_indices)\n self.rng.shuffle(test_indices)\n yield train_indices, test_indices" ]
[ "0.6583123", "0.639782", "0.63896275", "0.63545656", "0.63532937", "0.6268737", "0.623224", "0.6219766", "0.62126184", "0.62075806", "0.6200337", "0.6198661", "0.61894786", "0.61629933", "0.61584157", "0.6099286", "0.6081113", "0.607283", "0.60670036", "0.6061789", "0.6061682", "0.60613525", "0.6060258", "0.60583496", "0.6055331", "0.60547304", "0.6044575", "0.6043865", "0.60421616", "0.6007276", "0.59900564", "0.5989778", "0.5985602", "0.5977673", "0.5976196", "0.5973935", "0.5969543", "0.596197", "0.5953672", "0.5953549", "0.59410435", "0.5920636", "0.59075046", "0.5902249", "0.59013355", "0.58958596", "0.58812845", "0.5874244", "0.5869148", "0.5863271", "0.58552754", "0.58520484", "0.5844839", "0.5839655", "0.58254474", "0.58222586", "0.581431", "0.5810205", "0.5808163", "0.58075213", "0.5805554", "0.5801837", "0.5793256", "0.57895845", "0.5788077", "0.5788077", "0.57878876", "0.5785966", "0.5781563", "0.57706285", "0.5769294", "0.57687575", "0.57665145", "0.57653254", "0.57652336", "0.575489", "0.5754116", "0.5748067", "0.5746732", "0.57406443", "0.5740361", "0.573906", "0.5736736", "0.57357126", "0.57357126", "0.57318753", "0.5729674", "0.5729066", "0.5729037", "0.5728252", "0.5728247", "0.5726732", "0.5726623", "0.5721593", "0.5709003", "0.570749", "0.5704525", "0.57027227", "0.570085", "0.5695536", "0.5695366" ]
0.0
-1
Load traces in an interval (in seconds).
def select_traces(traces, interval, sample_rate=None): start, end = interval i, j = round(sample_rate * start), round(sample_rate * end) i, j = int(i), int(j) traces = traces[i:j] traces = traces - np.median(traces, axis=0) return traces
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self, filepath=''):\n sleep(20)\n pass", "def load_traces(self, filename):\n\n self.traces = self.load(filename)", "def delay(interval):\n time.sleep(interval / 1000.0)", "def interval(ctx, poll_interval):\n fc_info = {}\n fc_info['POLL_INTERVAL'] = poll_interval\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", \"FLOW_CNT_TRAP\", fc_info)", "def setInterval(self, interval):\n self._interval = interval", "def setInterval(self, interval):\n self._interval = interval", "def start(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].start()\n self.globalTimer = Timer(1, self.step)\n self.globalTimer.start()", "def build_traces_from_files(trace_files, traces_lst, min_length, agg_window):\n for trace_file in trace_files:\n trace_df = pd.read_csv(trace_file)\n order = trace_df[specs.START_INTERVAL_COL].sort_values().index\n trace_df = trace_df.loc[order]\n if len(trace_df) >= min_length:\n traces_lst.append(Trace.from_raw_trace_data(trace_df, agg_window))", "def interval(poll_interval):\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['POLL_INTERVAL'] = poll_interval\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def __init__(self,interval):\n _interval = interval", "def interval(ctx, poll_interval):\n fc_info = {}\n fc_info['POLL_INTERVAL'] = poll_interval\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", \"FLOW_CNT_ROUTE\", fc_info)", "def load_frog_trace(self, filename, thr=0.0,\n l_start_pixel=0, l_stop_pixel=-1, t_start_pixel=0, t_stop_pixel=-1,\n filter_img=True, transpose=False):\n f_name_root = '_'.join((filename.split('_')[:-1]))\n logger.debug(f_name_root)\n t_data = np.loadtxt(''.join((f_name_root, '_timevector.txt')))\n t_data = t_data - t_data.mean()\n l_data = np.loadtxt(''.join((f_name_root, '_wavelengthvector.txt')))\n if l_data[0] > 1:\n l_data = l_data * 1e-9\n pic = np.float32(imread(''.join((f_name_root, '_image.png'))))\n if transpose is True:\n pic = pic.transpose()\n pic_n = pic / pic.max()\n\n if t_stop_pixel == -1:\n t_stop_pixel = pic_n.shape[0] - 1\n if l_stop_pixel == -1:\n l_stop_pixel = pic_n.shape[1] - 1\n\n if filter_img is True:\n picF = self.filter_frog_trace(pic_n, 3, thr)\n else:\n picF = pic_n.copy() - thr\n picF[picF < 0.0] = 0.0\n\n # self.condition_frog_trace(picF[t_start_pixel:t_stop_pixel, l_start_pixel:l_stop_pixel],\n # l_data[l_start_pixel], l_data[l_stop_pixel], t_data[t_start_pixel],\n # t_data[t_stop_pixel], self.Et.shape[0], thr, False)\n\n self.condition_frog_trace(picF[l_start_pixel:l_stop_pixel, t_start_pixel:t_stop_pixel],\n l_data[l_start_pixel], l_data[l_stop_pixel], t_data[t_start_pixel],\n t_data[t_stop_pixel], self.Et.shape[0], thr, False)", "def getHourlyLoads(self):\n\n\t\tloads_data = self.getDataForLoadComparisons()\n\t\tload_values = [] # Array that will contain all the load data\n\t\tload_data = {} # Dictionary of load data\n\t\thour = 0 # Counter that determines the 24 hours in a day\n\n\t\t# Parsing load data\n\t\ttoday = self.helper.getMonth() + \"/\" + self.helper.getDay() + \"/\" + self.helper.getYear()\n\t\tfor data in loads_data[0]['values']:\t\t\t\n\t\t\tif data[\"label\"] == \"12:00 AM\":\n\t\t\t\tdata[\"label\"] = \" 00:00\"\n\t\t\telif data[\"label\"].split(\" \")[1] == \"AM\":\n\n\t\t\t\thour = int(data[\"label\"].split(\":\")[0])\n\t\t\t\tif hour < 10:\n\t\t\t\t\tdata[\"label\"] = \" 0\" + str(hour) + \":00\"\n\t\t\t\telse:\n\t\t\t\t\tdata[\"label\"] = str(hour) + \":00\"\n\t\t\telif data[\"label\"].split(\" \")[1] == \"PM\":\n\t\t\t\tif data[\"label\"] == \"12:00 PM\":\n\t\t\t\t\tdata[\"label\"] = \" 12:00\"\n\t\t\t\telse:\n\t\t\t\t\thour = int(data[\"label\"].split(\":\")[0])\n\t\t\t\t\thour += 12\n\t\t\t\t\tdata[\"label\"] = \" \" + str(hour) + \":00\"\n\t\t\tload_data[\"x\"] = self.helper.getDateInEpoch(today + \" \" + data[\"label\"])\n\t\t\tload_data[\"y\"] = float(data[\"value\"])\n\t\t\tload_values.append(load_data)\n\t\t\tload_data = {}\n\n\t\treturn load_values", "def load_traces(dir, limit=None):\n games = []\n for i, fn in enumerate(os.listdir(dir)):\n if limit is not None and i == limit: break\n\n f = open(os.path.join(dir, fn), 'rb')\n seed, trace = pickle.load(f)\n f.close()\n games.append(trace)\n\n return games", "def step(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].step()\n # Restart the timer\n self.globalTimer = Timer(1, self.step)\n self.globalTimer.start()", "def interval(poll_interval):\n configdb = ConfigDBConnector()\n configdb.connect()\n rif_info = {}\n if poll_interval is not None:\n rif_info['POLL_INTERVAL'] = poll_interval\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"RIF\", rif_info)", "def traffic_livestats(self, **kwargs):\n url_path = 'traffic/livestats'\n self.logger.debug(\"Get live stats report data\")\n kwargs['granularity'] = self.GRANULARITY_ONE_MINUTE\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def set_progress_update_interval(self, update_interval):\r\n\r\n pass", "def sleep(interval):\n time.sleep(interval) # pragma: no cover", "def changeBatchLogInterval(self,interval):\n self.batch_log_interval = interval", "def sleep_to_flush_spans(*args, **kwargs):\n if settings.LOGSTASH_ENABLE:\n time.sleep(5)", "def load_segment(self):\n \n data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)\n \n # Check cycle length against 5 minute duration minimum\n cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()\n self.data = data\n \n diff = data.index.to_series().diff()[1:2]\n s_freq = 1000000/diff[0].microseconds\n\n self.metadata['file_info']['start_time'] = str(data.index[0])\n self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs}\n self.s_freq = s_freq\n\n print('EEG successfully imported.')", "def set_update_interval (self, interval):\n\t\tif self.__timeout:\n\t\t\tgobject.source_remove(self.__timeout)\n #print \"update interval : %s min\" % interval\n\t\tself.__timeout = gobject.timeout_add(interval*60*1000, self.update)", "def interval(poll_interval):\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n if poll_interval:\n port_info['POLL_INTERVAL'] = poll_interval\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def process_traces(subdirs,dates,load_path):\n\n N = 60*60*24*len(dates)*10\n\n firing_rates_storage = np.zeros((N))\n var_storage = np.zeros((N))\n position_storage = np.zeros((N,2))\n firing_rates_storage[:] = np.nan\n var_storage[:] = np.nan\n timestamps = np.zeros((N))\n clusters = np.zeros((N))\n pk_max = 0\n n=0\n\n for subdir,date in zip(subdirs,dates):\n \n dpk = pk_max \n path = load_path+'/%s/'%subdir\n file = [i for i in os.listdir(path) if '.pkl' in i] \n \n if len(file) == 0:\n continue\n \n pd_ob = pkl.load(open(path+file[0],'rb'))\n \n positions = pd_ob['positions']\n sts = pd_ob['sts']\n isis = pd_ob['isis']\n fsts = pd_ob['fsts']\n fisis = pd_ob['fisis']\n et = pd_ob['et']\n ep = pd_ob['ep']\n \n max_time = 0\n for k,v in sts.items():\n max_time = max(max_time,np.max(v))\n \n for t in np.arange(0,np.floor(max_time)):\n\n for i,pk in enumerate(sts.keys()):\n if np.count_nonzero((sts[pk]>t) & (sts[pk]<(t+1))) > 1:\n\n p = positions[pk][:-1]\n\n x = sts[pk]\n y = isis[pk]\n fx = fsts[pk]\n fy = fisis[pk]\n\n firing_rates_storage[n] = np.nanmean(y[(x>t) & (x<t+1)])\n var_storage[n] = np.nanvar(y[(x>t) & (x<t+1)])\n position_storage[n] = np.nanmean(p[(x>t) & (x<t+1)],axis=0)\n timestamps[n] = (date + timedelta(0,int(t))).timestamp()\n clusters[n] = pk + dpk\n n=n+1\n pk_max = max(pk_max,pk+dpk)\n\n firing_rates_storage = firing_rates_storage[:n]\n var_storage = var_storage[:n]\n position_storage = position_storage[:n]\n timestamps = timestamps[:n]\n clusters = clusters[:n]\n\n np.savez(load_path+'processed_traces.npz',frs=firing_rates_storage,vs=var_storage,pos=position_storage,ts=timestamps,cl=clusters)\n return 0", "def run(every=45):\n print(f\"Scheduling refuel time for every {every} minutes.\")\n seconds = every * 60\n pic = Path.joinpath(Path(__file__).parent, \"pic.png\")\n try:\n img = Image.open(pic)\n while(True):\n for i in tqdm.trange(seconds):\n time.sleep(1)\n print(f\"Taking rest at {time.ctime()}\")\n img.show()\n except:\n print(\"Have a good day!\")\n img.close()", "def set_tick(self, new_tick_sec):\n self._tick_interval = new_tick_sec", "def watch(self, func, seconds=3600):\n func\n time.sleep(seconds)", "async def on_timer_update(self, secs: int):\n pass", "def set_interval(self, interval=None):\n if interval is None:\n interval = self._interval\n interval = self._restrict_interval(interval)\n\n if interval != self._interval:\n logger.log(5, \"Redraw the entire trace view.\")\n self._interval = interval\n emit('is_busy', self, True)\n self.plot(update_traces=True, update_waveforms=True)\n emit('is_busy', self, False)\n emit('time_range_selected', self, interval)\n self.update_status()\n else:\n self.plot(update_traces=False, update_waveforms=True)", "def load_intervals(self):\n fname, aux = QFileDialog.getOpenFileName(self, 'Open file', '', \"(*.csv)\")\n if fname != '':\n self.model.IntervalLoad(fname=fname)", "def load_timestamps(timestamps_data_path):\n timestamp_file = os.path.join(\n timestamps_data_path, 'data.csv')\n\n timestamps = []\n with codecs.open(timestamp_file, 'r', 'utf-8') as f:\n for line in islice(f, 1, None):\n t = float(\"{:.9f}\".format(float(line.split(',')[0]) / 1e9))\n timestamps.append(t) \n\n # Subselect the chosen range of frames, if any\n return timestamps", "def __init__(self, trace, sampling_rate):\n super().__init__(trace, sampling_rate)\n self.trigger_values = None", "def data_play(Y, visualizer, frame_rate=30):\r\n \r\n\r\n for y in Y:\r\n visualizer.modify(y[None, :])\r\n time.sleep(1./float(frame_rate))", "def runScheduler(self):\n\n for source in self.sources:\n intervals = [\n int(self.sources[source]['metrics'][x]['interval']) for x\n in range(0, len(self.sources[source]['metrics']))]\n sourceInterval = self.gcd(intervals)\n self.sources[source]['sourceInterval'] = sourceInterval\n self.logger.debug(self.sources[source]['metrics'])\n\n self.scheduler.add_job(\n self.getDriverData, 'interval', args=[\n self.sources[source]['metrics']],\n seconds=sourceInterval)", "def update(self, symbol, interval):\n new_data = self.data_feed.pull(symbol, interval)\n self.data_saver.add_recent_candles(new_data, snip_amount=50)", "def start(self):\n self.monitor_lc.start(self.interval)", "def start_timer(self):\n print \"Timer Object Started. Will update ADC Information every %s seconds\" % self.refreshTime\n self.timer=Timer(float(self.refreshTime)*1000, self._refresh_Visible_channels)", "def interval(ctx, poll_interval):\n\n fc_group_cfg = {}\n fc_group_cfg['POLL_INTERVAL'] = poll_interval\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", ACL, fc_group_cfg)", "def setInterval(self, x):\n self._base_interval = x", "def interval(ctx, poll_interval):\n\n port_info = {}\n port_info['POLL_INTERVAL'] = poll_interval\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", PG_DROP, port_info)", "def set_looping_interval(self, interval: float) -> None:\n \n self._looping_interval = interval", "def set_signal_trace(self):\n\t\t\n\t\tassert self.signal_trace_file is not None, \"Need to set \"\\\n\t\t\t\"'signal_trace_file' var before calling set_signal_trace; \"\\\n\t\t\t\"var should be set without extension, which must be .dat\"\n\t\t\n\t\tsignal_data = load_signal_trace_from_file(self.signal_trace_file)\n\t\tprint 'Signal time trace from file %s.dat loaded\\n' \\\n\t\t\t\t% self.signal_trace_file\n\t\tself.signal_trace_Tt = signal_data[:, 0]\n\t\tself.signal_trace = (signal_data[:, 1] + self.signal_trace_offset)*\\\n\t\t\t\t\t\t\t\tself.signal_trace_multiplier\n\t\t\n\t\tif self.signal_trace_file_2 is not None:\n\t\t\tsignal_data_2 = load_signal_trace_from_file(self.signal_trace_file_2)\n\t\t\tprint 'Signal time trace 2 from file %s.dat loaded\\n' \\\n\t\t\t\t% self.signal_trace_file_2\n\t\t\tassert len(self.signal_trace_Tt) == len(signal_data_2[:, 0]), \\\n\t\t\t\t\"signal_trace_file_2 must be same length as signal_trace_file\"\n\t\t\tassert sp.allclose(self.signal_trace_Tt, signal_data_2[:, 0], \n\t\t\t\t1e-6), \"signal_trace_file_2 must have same time array as \"\\\n\t\t\t\t\"signal_trace_file\"\n\t\t\tself.signal_trace_2 = (signal_data_2[:, 1] + \\\n\t\t\t\t\t\t\t\t\tself.signal_trace_offset_2)*\\\n\t\t\t\t\t\t\t\t\tself.signal_trace_multiplier_2", "def load_timestamps(self):\n print('Loading timestamps for sequence ' + self.sequence + '...')\n\n timestamp_file = os.path.join(self.sequence_path, 'times.txt')\n\n # Read and parse the timestamps\n self.timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n t = dt.timedelta(seconds=float(line))\n self.timestamps.append(t)\n\n # Subselect the chosen range of frames, if any\n if self.frame_range:\n self.timestamps = [self.timestamps[i] for i in self.frame_range]\n\n print('Found ' + str(len(self.timestamps)) + ' timestamps...')\n\n print('done.')", "def refresh(self):\n for i in self.data:\n values = self.data[i]\n try:\n if values[\"state\"] == \"Teardown\":\n t_delta = (values[\"t_end\"] or values[\n \"date\"]) - values[\"ts\"]\n else:\n t_delta = values[\"date\"] - values[\"ts\"]\n\n if t_delta.total_seconds() < 0:\n t_delta = values[\"ts\"] - values[\"ts\"]\n values[\"duration\"] = str(t_delta.total_seconds())\n except:\n print sys.exc_info()\n # print values\n values[\"duration\"] = 0", "def change_flush_interval(self, interval):\n assert self.flush_thread, \"Cannot change flush interval when auto_flush is False\"\n self.flush_thread.interval = interval", "def on_tick(self, time):\n pass", "def __init__(self, interval=1.0):\n\n super(VirtualTimeSyncScheduler, self).__init__()\n self.interval = interval", "def interval(self, interval):\n\n self._interval = interval", "def record_throughput(cls, obj, interval=10):\n\n while True:\n obj._reset_receiving_data_throughput()\n obj._reset_consuming_data_throughput()\n\n time.sleep(interval)\n\n print(f'Receiving FPS: {obj._get_receiving_data_throughput() / interval:.2f}, '\n f'Consuming FPS: {obj._get_consuming_data_throughput() / interval:.2f}')", "def load_from_file(self, trfilename):\n # Read the report file and populate the datastructure\n for ll in open(trfilename):\n timestr = ll.split()[0]\n label = \" \".join(ll.split()[1:])\n time = float(timestr)\n\n if label in (\"init-trigger\", \"trigger\"):\n self.trtimes.append(time)\n\n elif label == \"sound-start\":\n self.soundstarttime = time\n\n elif label == \"sound-stop\":\n self.soundstoptime = time\n\n else:\n self.otherlabels.append((time, label))\n\n # Fix weird TR times\n itrtimes = np.diff(self.trtimes)\n badtrtimes = np.nonzero(itrtimes > (itrtimes.mean()*1.5))[0]\n newtrs = []\n for btr in badtrtimes:\n # Insert new TR where it was missing..\n newtrtime = self.trtimes[btr]+self.expectedtr\n newtrs.append((newtrtime, btr))\n\n for ntr, btr in newtrs:\n self.trtimes.insert(btr+1, ntr)", "def timelapse():\n now = arrow.now()\n pic_path = Path(f\"/home/pi/lapse_{now.format('YYYY-MM-DD')}\")\n if not pic_path.exists():\n log.info(f\"Creating pic dir: {pic_path}\")\n pic_path.mkdir(parents=True)\n os.chdir(pic_path)\n log.info(f\"Picture directory: {pic_path}\")\n\n if __debug__:\n # In __debug__ mode, just run for 3 minutes.\n end_time = now.shift(minutes=+3)\n else:\n end_time = now.shift(hours=+END_AFTER_HOURS)\n\n with picamera.PiCamera() as camera:\n camera.resolution = (1920, 1080) # Full HD resolution\n camera.rotation = ROTATE\n for filename in camera.capture_continuous(\"sl_{timestamp:%Y%j_%H%M%S}.jpg\"):\n # Using the timestamp to ensure that there are no collisions when/if there's a problem during\n # the night and the timelapse \"restarts\"\n log.info(f\"Taking pic at: {time.asctime()}\")\n if arrow.now() > end_time:\n log.info(\"Got to end time, quitting normally\")\n break\n else:\n time.sleep(WAIT_TIME)", "def on_timer(self, event):\n \n o = Unicorn()\n data = o.get_data(rt)\n k = len(data[0])\n y[:, :-k] = y[:, k:]\n y[:, -k:] = remap((data), -40, 40, -1, 1 ) \n t2 = _thread.start_new_thread(printT, ())\n #y2 = np.array([lfilter(b, a, y[i]) for i in range(17)])\n self.program['a_position'].set_data(y.ravel().astype(np.float32))\n self.update()", "def loading(delay):\r\n\r\n for i in range(3):\r\n\r\n print \".\",\r\n sys.stdout.flush()\r\n time.sleep(delay)\r\n\r\n print(\"\")", "def load_gap_free_trace(file_to_load):\n\t\n\tfilename = file_to_load; \n\texperiment_name = filename.rstrip('.abf');\n\n\tr = io.AxonIO(filename=file_to_load)\n\t#bl = r.read_block(lazy=False, cascade=True)\n\tbl = r.read_block(lazy=False)\n\t#segments are sweeps\n\n\t\n\tprint bl.segments[0].analogsignals[0].magnitude\n\t\n\t##get sampling rate\n\tsampling_rate = bl.segments[0].analogsignals[0].sampling_rate\n\tprint(sampling_rate)\n\n\t##adds channel 0 from each sweep to array \n\tprint('file has')\n\tprint(len(bl.segments))\n\tprint('sweeps')\n\tprint(len(bl.segments[0].analogsignals[0].magnitude))\n\tprint('samples')\n\tchannel_array = np.empty((len(bl.segments)+1,(len(bl.segments[0].analogsignals[0])))); \n\tprint(channel_array.shape)\n\tfor sweep in range(len(bl.segments)):\n\t\tchannel_0_sweep = [] \n\t\tfor data_point in range(len(bl.segments[sweep].analogsignals[0].magnitude)):\t\n\t\t\t#print(bl.segments[sweep].analogsignals[0].magnitude[data_point])\n\t\t\tchannel_array[sweep+1][data_point] = (bl.segments[sweep].analogsignals[0].magnitude[data_point]);\n\t\n\t\n\tprint channel_array[0][0:10]\n\t\n\n\n\t## make additional row for time\n\tsamplingrate_Hz = sampling_rate.magnitude ;\n\tsampling_interval_msec = (1000 / float(samplingrate_Hz));\n\tfor time_point in range(len(bl.segments[sweep].analogsignals[0].magnitude)):\n\t\tchannel_array[0][time_point] = (float(time_point)*sampling_interval_msec); \n\n\t## write a csv file \n\n\tnp.savetxt(experiment_name + 'abf_to_csv.csv', np.transpose(channel_array), delimiter=',', newline='\\n');\n\treturn(channel_array)", "def change_interval(self, seconds):\n if isinstance(seconds, int) and seconds > 0:\n self.log.info(\"{} timer interval changed (old:{} s new:{} s)\".format(self.name, self.interval, seconds))\n self.interval = seconds\n self.restart_timer()\n else:\n self.log.error(\"Invalid interval requested...must be integer > 0\")", "def wait(interval):\n time.sleep(interval/1000.0)", "def lCurve(self): \n\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n if (self.tstart is not None) and (self.tstop is not None):\n lcTab = lcTab[ (self.tstart <= lcTab['mjd']) & (lcTab['mjd'] <= self.tstop)]\n lcTab = lcTab[lcTab['flux'] != -1.] # avoid undone analyses\n\n timeMJD = lcTab['mjd']\n tref = int(np.floor( timeMJD[0] / 100.0)) * 100 # round to lowest hundred\n timeMJD -= tref\n ts = lcTab['ts']\n detect = lcTab['ts'] >= self.tsmin\n undet = lcTab['ts'] < self.tsmin\n flux = lcTab['flux'][detect]\n fluxerr = lcTab['fluxerr'][detect]\n upperl = lcTab['upperlim'][undet]\n upperl[upperl == -1.] = 0. # for when it failed\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux, upperl), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n lcplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n lcplt.figname = os.path.join(self.workpath, 'LightCurve.pdf')\n lcplt.xlabel = r'Time (MJD $-$ {})'.format(tref)\n lcplt.ylabel = [r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale))), r'TS']\n lcplt.hline = [None, self.tsmin]\n deltaY = max(np.concatenate((flux+fluxerr, upperl), axis=0)) - min(np.concatenate((flux-fluxerr, upperl), axis=0))\n lcplt.ymin = [(min(np.concatenate((flux-fluxerr, upperl-upperl*0.1), axis=0)) - 0.05*deltaY) / scale, min(ts) - 0.05*(max(ts)-min(ts))]\n lcplt.ymax = [(max(np.concatenate((flux+fluxerr, upperl), axis=0)) + 0.05*deltaY) / scale, max(ts) + 0.05*(max(ts)-min(ts))]\n deltaX = (timeMJD[-1] + lcTab['mjderr'][-1]) - (timeMJD[0] - lcTab['mjderr'][0]) \n lcplt.xmin = timeMJD[0] - lcTab['mjderr'][0] - 0.05*deltaX\n lcplt.xmax = timeMJD[-1] + lcTab['mjderr'][-1] + 0.05*deltaX\n lcplt.fill = [item for sublist in zip( timeMJD[detect]-lcTab['mjderr'][detect], timeMJD[detect]+lcTab['mjderr'][detect] ) for item in sublist]\n lcplt.shadecol= self.loran \n if len(flux) == 0:\n lcplt.mksize = [2, 2]\n lcplt.ymode = ['linear', 'linear']\n lcplt.color = ['gray', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [True, False]\n lcplt.multiplot(x = [ timeMJD[undet], timeMJD ],\n y = [ upperl/scale, ts ],\n xerr = [ lcTab['mjderr'][undet], lcTab['mjderr']],\n yerr = [ upperl/scale*0.1, None])\n else:\n lcplt.mksize = [2, 2, 2]\n lcplt.ymode = ['linear', 'linear', 'linear']\n lcplt.color = ['gray', 'black', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [[True, False], False]\n lcplt.multiplot(x = [ [timeMJD[undet], timeMJD[detect]], timeMJD ],\n y = [ [upperl/scale, flux/scale], ts ],\n xerr = [ [lcTab['mjderr'][undet], lcTab['mjderr'][detect]], lcTab['mjderr']],\n yerr = [ [upperl/scale*0.1, fluxerr/scale], None])\n lcplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(lcplt.figname)) \n return", "def _tail_profile(self, db, interval):\r\n latest_doc = None\r\n while latest_doc is None:\r\n time.sleep(interval)\r\n latest_doc = db['system.profile'].find_one()\r\n\r\n current_time = latest_doc['ts']\r\n\r\n while True:\r\n time.sleep(interval)\r\n cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)\r\n for doc in cursor:\r\n current_time = doc['ts']\r\n yield doc", "def get_traces(self, traces, **kwargs):\n self.resource.clear()\n sweep = kwargs.get(\"sweep\", False)\n\n name_prefix = kwargs.get(\"name_prefix\", \"\")\n if name_prefix:\n name_prefix += \" - \"\n\n channels = OrderedDict()\n for trace in traces:\n ch = trace[\"channel\"]\n if ch not in channels.keys():\n channels[ch] = {\n \"frequency\": None,\n \"traces\": list()}\n channels[ch][\"traces\"].append(trace)\n\n if sweep is True:\n self.sweep(channels=list(channels.keys()))\n\n traces = []\n for ch, ch_data in channels.items():\n frequency = ch_data[\"frequency\"] = self.get_frequency()\n for trace in ch_data[\"traces\"]:\n self.scpi.set_selected_meas_by_number(trace[\"channel\"], trace[\"measurement number\"])\n sdata = self.scpi.query_data(trace[\"channel\"], \"SDATA\")\n s = sdata[::2] + 1j * sdata[1::2]\n ntwk = skrf.Network()\n ntwk.s = s\n ntwk.frequency = frequency\n ntwk.name = name_prefix + trace.get(\"parameter\", \"trace\")\n traces.append(ntwk)\n return traces", "def spectrum(datapath, run, forcebins = False):\n runpath = datapath + '/' + run\n events = [evnt for evnt in listdir(runpath) if not isfile(join(runpath,evnt))]\n allTraces = []\n total_time = 0\n pulses = {'zero': [], 'one': [], 'two': [], 'three': [], 'other': []}\n times = {'zero': [], 'one': [], 'two': [], 'three': [], 'other': []}\n #camextratime = 25e-6\n for event in events:\n if int(event)> 3:\n break\n print(event)\n e = sbc.DataHandling.GetSBCEvent.GetEvent(runpath,event)\n if e[\"slowDAQ\"][\"loaded\"]:\n #print(e[\"fastDAQ\"].keys())\n cgate = e[\"fastDAQ\"][\"CAMgate\"]\n #dcam = np.diff(cgate)\n fdt = e['fastDAQ']['time']\n #camOffTimes = np.sort(np.array([fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5]))\n \n #camOnTimes = np.sort(np.array([fdt[i] for i in range(len(dcam)) if dcam[i] < 0.5]))\n fddt = fdt[1]-fdt[0]\n tfast = fdt[-1]-fdt[0]\n LED_on = [fdt[i] for i in range(len(cgate)) if cgate[i]<-0.5]\n blockedFraction = ((len(LED_on)*fddt))/tfast\n print(blockedFraction)\n tr = e[\"PMTtraces\"]\n trac = tr[\"traces\"]\n dt = tr[\"dt\"]\n #event_time = (tr['t0_sec'][-1]+tr['t0_frac'][-1]-tr['t0_sec'][0] - tr['t0_frac'][0])[0]\n event_time = (((e[\"slowDAQ\"][\"elapsed_time\"][-1]-e[\"slowDAQ\"][\"elapsed_time\"][0]))*(1-blockedFraction))\n #print(event_time)\n total_time += event_time\n\n #f,axes = plt.subplots(1,5,sharey=True)\n #f.suptitle(runpath+\"/\"+str(event))\n #pmttracetime = e[\"PMTtraces\"][\"t0_sec\"][:,0]+e[\"PMTtraces\"][\"t0_frac\"][:,0]\n #d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e)\n #pmtalign = d[\"PMT_trigt0_sec\"]+d[\"PMT_trigt0_frac\"]\n #tracetimes = pmttracetime - pmtalign\n #camoffindex = 0\n #camonindex = 0\n for i in range(len(trac)):\n #print(i)\n \"\"\"\n thistracetime = tracetimes[i]\n \n #nearestcamoff = min(camOffTimes, key=lambda x:abs(x-thistracetime))\n #nearestcamon = min(camOnTimes, key=lambda x:abs(x-thistracetime))\n print(camOffTimes[camoffindex])\n print(thistracetime)\n if thistracetime > camOffTimes[camoffindex]:\n camoffindex += 1\n if thistracetime > camOnTimes[camonindex]:\n camonindex += 1 \n if camoffindex<len(camOffTimes)-1:\n if abs(camOffTimes[camoffindex]-thistracetime)<camextratime:\n print('excluding a trace near a camera off')\n continue\n if camonindex<len(camOnTimes)-1:\n if abs(camOnTimes[camonindex]-thistracetime)<camextratime:\n print('excluding a trace near a camera on')\n continue\n \"\"\"\n trace = np.fabs(trac[i][0])\n if max(trace) == 128:\n trace = stitchTraces(trace,np.fabs(e[\"PMTtraces\"][\"traces\"][i][1]))\n dt_tr = dt[i][0]\n\n # populate dictionaries arrays based on how many pulses there were\n [a,n,totInt,pktimes] = SBC_pulse_integrator_bressler(trace,dt_tr)\n if n == 0:\n number = 'zero'\n allTraces.append(a)\n elif n == 1:\n number = 'one'\n allTraces.append(a)\n times['one'].append(pktimes[0])\n elif n == 2:\n number = 'two'\n allTraces.append(a)\n elif n == 3:\n number = 'three'\n allTraces.append(a)\n else:\n number = 'other'\n allTraces.append(a)\n \"\"\"\n #if a != None:\n if isZero:\n if j < 5:\n if isNegative:\n if random() >0:\n print(runpath+\"/\"+str(event)+\" pmt trace \"+str(i))\n tPMT = np.arange(len(trace))*dt_tr\n axes[j].plot(tPMT,trace,lw=3)\n axes[j].set_xlabel(\"time (s)\",fontsize=25)\n axes[j].set_ylabel(\"PMT response (ADC)\",fontsize=25)\n j+=1\n \n \n plt.show\n \"\"\"\n pulses[number].append(a)\n gc.collect()\n \n \n for k in pulses:\n pulses[k] = [x for x in pulses[k] if x != None]\n \n allTraces = [x for x in allTraces if x != None]\n \n plt.figure()\n\n Nbins = int(np.floor(np.sqrt(len(allTraces))))\n allvals, bins, _ = plt.hist(allTraces,Nbins,label='all traces')\n \n areaVals = {'zero': [], 'one': [], 'two': [], 'three': [], 'other': []}\n for k in pulses:\n if k != 'other':\n areaVals[k], _, _ = plt.hist(pulses[k],bins,histtype = 'step',\n linewidth = 3,label= k+' hits')\n plt.legend(fontsize=12)\n plt.show() \n #spe_spectrum = areaVals['one']\n \n #def gaussian(x,mu,sigma,amplitude):\n # return amplitude * np.exp(-((x - mu) /(np.sqrt(2)* sigma))**2 )\n \n #params_spe, params_cov_spe = scipy.optimize.curve_fit(gaussian,bins[:len(areaVals['one'])],\n # spe_spectrum,\n # p0=[0.4e8,1e7,40])\n #params_twohits, params_cov_twohits = scipy.optimize.curve_fit(gaussian,\n # bins[:len(areaVals['two'])],\n # areaVals['two'],\n # p0=[0.8e8,1e7,10])\n #mu_spe = params_spe[0]\n #mu_2 = params_twohits[0] - mu_spe\n #print(mu_spe)\n #print(mu_2)\n \n #mu_avg = (mu_spe + mu_2)*0.5\n #mu_avg = get_gain(datapath,run)\n mu_avg = 1e7\n print(mu_avg)\n\n \n plt.figure()\n plt.grid(True)\n if isinstance(forcebins,np.ndarray):\n bins=forcebins\n fullspect,_,_=plt.hist([t/mu_avg for t in allTraces],\n forcebins,label='all traces')\n \n else:\n fullspect,bins,_=plt.hist([t/mu_avg for t in allTraces],\n int(np.floor(np.sqrt(len(allTraces)))),label='all traces')\n \n #print(bins)\n plt.yscale('log')\n plt.xlabel('phe based on a gain of '+str(mu_avg)+' electrons per phe')\n plt.legend()\n plt.show\n print(sum(fullspect)/total_time)\n print(\"The Total Exposure Time of run \"+str(runpath)+ \" was \"+str(total_time)+\" Seconds\")\n print(\"The overall PMT trigger rate was \" + str(len(allTraces)/total_time)+ \"Hz\")\n return [fullspect,bins,total_time]", "def set_checkpoint_interval(self, interval):\n self.checkpoint_interval = interval\n for _, reader in self.shard_readers.items():\n reader.checkpoint_interval = interval", "def callback_time_cut(val):\n global plot_mode\n global idx_time\n last_plot_mode = plot_mode\n plot_mode = 'time_cut'\n idx_time = int(val)\n update_num_shadow(int(sld['neighbors'].val))\n # plot 121\n lcuttime.set_xdata( [val, val] )\n lcuttime.set_alpha( alpha_hm )\n lcutfreq.set_alpha( 0.0 )\n # plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_time ) # [True/False, True/False]\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True ] )\n replot_light()\n reform_axis()\n\n fig.canvas.draw_idle()", "def loadts(args):\n # writing and reading pickle are both about 10x faster than reading csv\n # hence, simplify repeated execs by providing pickle file\n time_before = time.time()\n # timestart = time.time()\n d = dict() # dictionary of numpy arrays that hold timestamps per IP\n p = dict() # dictionary of IPs, holding # timestamps per IP\n offset = dict() # dict to hold tsval offset per IP\n\n try:\n pklfile = open(args.tsfile + \".pickle\", 'rb')\n d, p, offset = pickle.load(pklfile)\n pklfile.close()\n except:\n print(\"TS pickle loading failed, loading from csv\")\n logging.debug(\"TS pickle loading failed, loading from csv\")\n with open(args.tsfile, \"r\") as csvfile:\n datareader = csv.reader(csvfile)\n count = 0\n for row in datareader:\n count += 1\n try:\n ip = row[0]\n tcpt = row[1]\n recvt = row[2]\n except:\n print(\"Error in line \" + str(count) + \"of \" + str(args.tsfile) + \", skipping.\")\n logging.error(\"Error in line \" + str(count) + \"of \" + str(args.tsfile) + \", skipping.\")\n continue\n if ip in d:\n if p[ip] == 9999:\n d[ip].resize(100 * 1000, 2)\n if p[ip] > (100 * 1000) - 1: # more than 100k measurements can not be a target host\n continue\n if ip in offset:\n # recv_t is zero-based and scaled to be in seconds precision\n d[ip][p[ip], :] = \\\n [np.float64(tcpt),\n np.float64(np.uint64(recvt) - np.uint64(offset[ip])) / np.float64(1000.0 * 1000.0)]\n p[ip] = p[ip] + 1\n else:\n print(\"ip not in offset dict (should never happen, exiting): \" + str(ip))\n sys.exit(1)\n else: # ip is not in d, i.e. has not been seen before\n d[ip] = np.zeros((10000, 2), dtype=np.float64)\n p[ip] = 0\n d[ip][p[ip], :] = [np.float64(tcpt), np.float64(0.0)]\n p[ip] += 1\n offset[ip] = recvt\n logging.debug(\"timestamp np structure built after: {}, count: {} {} {}\".format(time.time() - time_before, count, len(d), len(p)))\n # resize all to correct length (removes trailing zeroes)\n for ip, value in p.items():\n d[ip].resize((p[ip], 2))\n\n pklfile = open(args.tsfile + \".pickle\", 'wb')\n pickle.dump([d, p, offset], pklfile)\n pklfile.close()\n print(\"ts data loaded in {} seconds, # IP addresses: {} \".format(round(time.time() - time_before, 2), len(d)))\n logging.debug(\"ts data loaded in {} seconds, # IP addresses: {} \".format(round(time.time() - time_before, 2), len(d)))\n return d, p, offset", "async def set_sampling_interval(self, interval):\n data = [interval & 0x7f, (interval >> 7) & 0x7f]\n await self._send_sysex(PrivateConstants.SAMPLING_INTERVAL, data)", "async def set_sampling_interval(self, interval):\n data = [interval & 0x7f, (interval >> 7) & 0x7f]\n await self._send_sysex(PrivateConstants.SAMPLING_INTERVAL, data)", "def load_data():\n # Load in data\n sample_frame = energy_connection.sample_series('energy_readings')\n # TODO: Rooms/QL Extract\n sample_frame = energy_connection.sample_series('external_readings', append_frame=sample_frame)\n\n # To object\n sample = TimeSeriesSample(sample_frame, 'time')\n\n return sample", "def interpolate_lightcurve(light_curve, samples_per_frame_time, frame_time):\n time_units = light_curve['times'].unit\n flux_units = light_curve['fluxes'].unit\n divisor = samples_per_frame_time - 1.\n points = np.arange(light_curve['times'][0].value, light_curve['times'][-1].value, frame_time/divisor)\n light_curve[\"fluxes\"] = np.interp(points, light_curve['times'].value, light_curve['fluxes'].value) * flux_units\n light_curve[\"times\"] = points * time_units\n return light_curve", "def update_system_load(self, interval, stats, resource):\n interval = int(interval)\n if resource == 'cpu':\n latest_reading = sysmon.get_cpu_utilisation()\n elif resource == 'memory':\n latest_reading = sysmon.get_memory_usage()\n elif resource == 'network':\n latest_reading = sysmon.get_network_interface_traffic(INTERFACE)\n\n del stats[0]\n stats.append(latest_reading)\n print(\"Latest %s reading is %0.2f\" % (resource, latest_reading))\n time.sleep(interval)\n return stats", "def before_tick(self, time):\n pass", "def trc_get_sample_time(path):\n datX, datY, m = readTrc.readTrc(path)\n return int(np.ceil(m['HORIZ_INTERVAL'] * 1e9))", "def run(self):\n while True:\n # Do something\n print('Doing something imporant in the background')\n\n self.loadData()\n time.sleep(self.interval)", "def tick(self):\n \n TIME = self._get_ticks()\n DT = self._ticks = (TIME - self.time) / self.dilation\n self._elapsed += self._ticks\n self.time = TIME\n \n # Update runtime stats and counters every second.\n if self._elapsed >= 1.0:\n self._elapsed %= 1.0\n # Save stats and clear counters.\n self.tps = 0.0\n self.fps = self.frame_count\n self.ups = self.update_count\n self.frame_count = self.update_count = 0\n \n # Process the time slice.\n self._tps += 1\n self._update_elapsed += DT\n self._frame_elapsed += DT\n self.update_ready = self.frame_ready = False\n \n if TIME >= self._last_update+self._tick_step*self.dilation:\n self.update_ready = True\n \n if self.max_fps == 0:\n self.frame_ready = True\n elif TIME >= self._last_frame+self._frame_step or \\\n self._frames_skipped >= self.max_frame_skip:\n self.frame_ready = True\n elif self._use_wait and self.max_fps > 0:\n wait_sec = self._last_frame + self._frame_step - self._get_ticks()\n if wait_sec > 0.:\n self._wait(wait_sec)\n self.frame_ready = True\n \n # Schedules cycled every tick.\n for sched in self.schedules:\n sched.func(DT, *sched.args, **sched.kwargs)\n \n # Schedules cycled when their interval elapses.\n if self._need_sort:\n self.interval_schedules.sort(key=_IntervalItem.sort_key)\n self.need_sort = False\n for sched in self.interval_schedules:\n due = sched.lasttime + sched.interval*self.dilation\n if TIME >= due:\n drift = TIME - due\n if -0.5 < drift < 0.5:\n dt = sched.interval\n else:\n dt = TIME - sched.lasttime\n sched.func(dt/self.dilation, *sched.args, **sched.kwargs)\n sched.lasttime += dt * self.dilation\n self._need_sort = True\n else:\n break\n \n # Schedules cycled every update.\n if self.update_ready:\n # Flip the state variables.\n self.update_count += 1\n self._frames_skipped += 1\n self.update_elapsed = self._update_elapsed\n self._update_elapsed = 0.0\n # Reconcile if we're way too fast or slow.\n self._last_update += self._tick_step\n drift = self._tick_step / 5.0\n if not (TIME-drift < self._last_update < TIME+drift):\n self._last_update = TIME\n # Run the schedules.\n update_called = self.update_callback is None\n for sched in self.update_schedules:\n if update_called:\n sched.func(self.update_elapsed, *sched.args, **sched.kwargs)\n else:\n if sched.pri > 0.0:\n self.update_callback(self.update_elapsed)\n update_called = True\n sched.func(self.update_elapsed, *sched.args, **sched.kwargs)\n if not update_called:\n self.update_callback(self.update_elapsed)\n \n # Schedules cycled every frame.\n if self.frame_ready:\n # Flip the state variables.\n self.frame_count += 1\n self._frames_skipped = 0\n self.frame_elapsed = self._frame_elapsed\n self._frame_elapsed = 0.0\n # Reconcile if we're way too fast or slow.\n if self._frame_step:\n self._last_frame += self._frame_step\n drift = self._frame_step * self.max_frame_skip\n if not (TIME-drift < self._last_frame < TIME+drift):\n self._last_frame = TIME\n # Run the schedules.\n frame_called = self.frame_callback is None\n for sched in self.frame_schedules:\n if frame_called:\n sched.func(self.frame_elapsed, *sched.args, **sched.kwargs)\n else:\n if sched.pri > 0.0:\n self.frame_callback(self.frame_elapsed)\n frame_called = True\n sched.func(self.frame_elapsed, *sched.args, **sched.kwargs)\n if not frame_called:\n self.frame_callback(self.frame_elapsed)\n \n return DT", "def simulate(self, path):\n prev_time = 0\n for entry in self.read_log(path):\n event = Event(entry)\n if event.time != prev_time: # Show frame at time i\n self.show()\n prev_time = event.time\n self.process_event(event)", "def heartbeat(self):\n if not self._timestamp:\n self._timestamp = time.time()\n else:\n now = time.time()\n interval = now - self._timestamp\n self._timestamp = now\n self._intervals.append(interval)\n if len(self._intervals) > self.max_sample_size:\n self._intervals.pop(0)\n if len(self._intervals) > 1:\n self._mean = sum(self._intervals) / float(len(self._intervals))", "def load(trace_dir: str) -> Trace:\n with open(os.path.join(trace_dir, \"metadata.pkl\"), \"rb\") as f:\n load_dict = pickle.load(f)\n call_dirs = sorted(glob.glob(os.path.join(trace_dir, \"call_*\")))\n calls = [ModuleCall.load(call_dir) for call_dir in call_dirs]\n load_dict[\"calls\"] = calls\n return Trace(module=None, function=None, _load_dict=load_dict)", "def get_interval_list_predefined_gap(traces_list, gap_interval):\n\n intv = 0\n interval_list = []\n pre_traces = []\n\n for timst in traces_list:\n timst = timst.replace(microsecond=0)\n pre_traces.append(timst)\n\n for i in range(0, len(pre_traces)-1):\n iat = (pre_traces[i+1]-pre_traces[i]).total_seconds()\n if iat <= gap_interval:\n current_trace = pre_traces[i]\n while current_trace < pre_traces[i+1]:\n interval_list.append(current_trace)\n current_trace = current_trace + datetime.timedelta(0,1)\n else:\n interval_list.append(pre_traces[i])\n\n if i == len(pre_traces)-2:\n interval_list.append(pre_traces[i+1])\n\n return interval_list", "def interval(poll_interval):\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n if poll_interval is not None:\n port_info['POLL_INTERVAL'] = poll_interval\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def timer(papirus, seconds):\n image = Image.new('1', papirus.size, WHITE)\n\n draw = ImageDraw.Draw(image)\n width, height = image.size\n\n timer_font_size = int((width - 4)/(5*0.65))\n timer_font = ImageFont.truetype(CLOCK_FONT_FILE, timer_font_size)\n\n draw.rectangle((0, 0, width, height), fill=WHITE, outline=WHITE)\n previous_remaining = 0\n\n start = time.time()\n remaining = seconds # seconds\n\n light_on((100,100,0))\n while remaining > 0:\n while remaining > 0:\n now = time.time()\n remaining = seconds - (now - start)\n if int(remaining) == previous_remaining:\n break\n if remaining < 0:\n break\n time.sleep(0.1)\n\n draw.rectangle((5, 10, width - 5, 10 + timer_font_size), fill=WHITE, outline=WHITE)\n draw.text((5, 10), '{m:02d}:{s:02d}'.format(m=int(remaining // 60), s=int(remaining % 60)), fill=BLACK, font=timer_font)\n\n # display image on the panel\n papirus.display(image)\n if int(remaining % 60) == 0:\n papirus.update() # full update every minute\n else:\n papirus.partial_update()\n previous_remaining = int(remaining)\n light_off()\n papirus.clear()", "def interval(poll_interval):\n configdb = ConfigDBConnector()\n configdb.connect()\n queue_wm_info = {}\n pg_wm_info = {}\n buffer_pool_wm_info = {}\n if poll_interval is not None:\n queue_wm_info['POLL_INTERVAL'] = poll_interval\n pg_wm_info['POLL_INTERVAL'] = poll_interval\n buffer_pool_wm_info['POLL_INTERVAL'] = poll_interval\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", queue_wm_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", pg_wm_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, buffer_pool_wm_info)", "def plotTrace(trace):\n for t in trace:\n plt.plot(range(len(t)),t,alpha=0.5)\n plt.ylabel(\"Trace\")\n plt.xlabel(\"Step\")\n\n return", "def increase_scan_interval(hass):\n hue_sensor_base.SensorManager.SCAN_INTERVAL = datetime.timedelta(days=365)", "def start(interval: int, devices_list: list):\n t = threading.Thread(target=start_monitoring, args=(interval, devices_list))\n t.start()\n with lock:\n global running\n running = True", "def load(fileobj, interval, base_timestamp, initial_bid=None, initial_ask=None):\n\n if not isinstance(initial_bid, Candle) and initial_bid is not None:\n initial_bid = Candle.constant(initial_bid)\n if not isinstance(initial_ask, Candle) and initial_ask is not None:\n initial_ask = Candle.constant(initial_ask)\n pd_csv = pd.read_csv(fileobj, chunksize=3600, header=None)\n source = Source(Candle, base_timestamp, interval, initial_bid, initial_ask)\n rows = []\n for chunk in pd_csv:\n for idx, row in chunk.iterrows():\n rows.append((Candle(start=row[0], min=row[2], max=row[4], end=row[6]),\n Candle(start=row[1], min=row[3], max=row[5], end=row[7])))\n source.push(array(rows, dtype=Candle))\n return source", "def beat_interval_option(args, run):\n run.beat_interval = float(args)", "def on_sampling_timer(self, event):\n self.sampling_timer.Stop()", "def Sleep(self):\n sleep(self.pSampling)", "def __sleep(self):\n if self.sleep_duration > 0:\n gevent.sleep(self.sleep_duration)\n else:\n self.__warn(\n f\"The average tick took longer than the set tick duration of {self.__tick_duration}. \"\n f\"Program is to heavy to run real time\")", "def tick(self):\n self.times.append(timeit.default_timer())", "def _show_time_updates(p_bar):\n while p_bar.total > p_bar.n:\n time.sleep(1)\n if p_bar.total > p_bar.n:\n p_bar.refresh()", "def on_timer(self):\n self.read_serial_data()\n # self.update_monitor()", "def _lt_sec(self) -> int:\n return self.loading_time.total_seconds()", "def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)", "def update_graphs(self):\n profile_time = False\n if profile_time:\n start_time = time.time()\n\n # Update the graph data with data only within the chosen time_range\n now = time.time()\n i_tr_prs = np.where(now - self.prs_data[0, :] < \n self.time_range[1] - self.time_range[0])[0]\n self.prs_graph.setData(self.prs_data[0, i_tr_prs] - now, self.prs_data[1, i_tr_prs])\n # Updates the graph title\n self.prs_pw.setTitle(f\"Pressão: {self.prs_data[1, 0]:.1f} cmH2O\", **self.ttl_style)\n\n if profile_time == True:\n time_at_pressure = time.time()\n print(f\"Until pressure graph: {time_at_pressure - start_time:.4f} s\")\n \n # Update the graph data with data only within the chosen time_range\n now = time.time()\n i_tr_flw = np.where(now - self.flw_data[0, :] < \n self.time_range[1] - self.time_range[0])[0]\n self.flw_pw.setTitle(f\"Fluxo: {self.flw_data[1, 0]:.1f} l/min\", **self.ttl_style)\n self.flw_graph.setData(self.flw_data[0, i_tr_flw] - now, self.flw_data[1, i_tr_flw])\n\n if profile_time == True:\n time_at_flow = time.time()\n print(f\"Until flow graph: {time_at_flow - start_time:.4f} s\")\n\n i_tr_vol = np.where(now - self.vol_data[0, :] < \n self.time_range[1] - self.time_range[0])[0]\n self.vol_pw.setTitle(f\"Volume: {self.vol_data[1, 0]:.0f} ml\", **self.ttl_style)\n self.vol_graph.setData(self.vol_data[0, i_tr_vol] - now, self.vol_data[1, i_tr_vol])\n\n if profile_time == True:\n time_at_volume = time.time()\n print(f\"After the volume graph: {time_at_volume - time_at_flow:.4f} s\")\n\n # Adjust the Y range every N measurements\n # Manually adjusting by calculating the max and min with numpy is faster than autoscale on \n # the graph. Also calculates FPS\n N = 20\n if self.run_counter % N == 0:\n # definition of the minimum acceptable range for the volume\n min_range_vol = [-5, 50]\n # Tries to get the max and min from each data set \n try:\n range_vol = [np.min(self.vol_data[1, i_tr_vol]), np.max(self.vol_data[1, i_tr_vol])]\n # Adjusts the minimum and maximum, if the measured values are outside the minimum range\n self.vol_pw.setYRange(np.min([range_vol[0], min_range_vol[0]]), \n np.max([range_vol[1], min_range_vol[1]]))\n except:\n pass\n min_range_prs = [-0.2, 5]\n try:\n range_prs = [np.min(self.prs_data[1, i_tr_prs]), np.max(self.prs_data[1, i_tr_prs])]\n self.prs_pw.setYRange(np.min([range_prs[0], min_range_prs[0]]), \n np.max([range_prs[1], min_range_prs[1]]))\n except:\n pass\n\n min_range_flw = [-0.1, 1]\n try:\n range_flw = [np.min(self.flw_data[1, i_tr_flw]), np.max(self.flw_data[1, i_tr_flw])]\n self.flw_pw.setYRange(np.min([range_flw[0], min_range_flw[0]]), \n np.max([range_flw[1], min_range_flw[1]]))\n except:\n pass\n mean_pts = 50\n try:\n FPS = np.nan_to_num(1.0 / np.mean(self.vol_data[0, 0:mean_pts] - \n self.vol_data[0, 1:1+mean_pts]))\n except:\n FPS = 0\n self.fps_lbl.setText(f\"FPS: {FPS:.2f}\")\n self.run_counter = 0\n self.run_counter += 1", "def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)", "def display_time_updates(bar):\n threading.Thread(target=_show_time_updates, args=(bar,)).start()", "def __window_setInterval(self, f, delay, arg1 = None, arg2 = None):\n pass", "def start(self):\n self.timer.start(500)", "def interval(poll_interval):\n configdb = ConfigDBConnector()\n configdb.connect()\n queue_info = {}\n if poll_interval is not None:\n queue_info['POLL_INTERVAL'] = poll_interval\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE\", queue_info)", "def capture_timelapse(self, count, interval):\n filename = self.get_new_photo_filename('_{0:03d}_{1:03d}')\n for i in range(count):\n open(self.camid + '/' + filename.format(i, count), 'wb').write(\n self.fake_shot)\n time.sleep(interval)\n return filename.format(0, count)" ]
[ "0.54937834", "0.5422799", "0.523051", "0.5210247", "0.51206535", "0.51206535", "0.510375", "0.50268334", "0.49911025", "0.49497274", "0.48942694", "0.48845673", "0.48481822", "0.48463246", "0.48297423", "0.47870618", "0.47789985", "0.47762358", "0.47673932", "0.47318327", "0.47196627", "0.4699382", "0.46844473", "0.46830693", "0.4669657", "0.4665458", "0.46622297", "0.46540987", "0.462533", "0.46118197", "0.45835236", "0.45753744", "0.45558125", "0.45471582", "0.4541961", "0.45412257", "0.4536319", "0.4533754", "0.45307535", "0.45303512", "0.45235708", "0.45230705", "0.4516325", "0.4515754", "0.45062795", "0.44926375", "0.4491865", "0.44913673", "0.44841018", "0.4476178", "0.44706377", "0.44663998", "0.4466385", "0.44552043", "0.44548416", "0.44545013", "0.44496447", "0.44419867", "0.4428326", "0.4425142", "0.44145277", "0.4410617", "0.4408625", "0.44040185", "0.4399417", "0.4399417", "0.4395917", "0.43816411", "0.4370218", "0.43673643", "0.43615285", "0.4359925", "0.43554023", "0.43534094", "0.4351251", "0.43381554", "0.43366665", "0.43359113", "0.43341762", "0.43326098", "0.43309993", "0.43283856", "0.43274254", "0.4327271", "0.4326865", "0.43235826", "0.43230665", "0.43203178", "0.43150145", "0.43138647", "0.4309658", "0.4307892", "0.43024287", "0.42937776", "0.4293684", "0.42919657", "0.42916825", "0.42905965", "0.42882228", "0.4285966" ]
0.47573504
19
Iterate through the spike waveforms belonging in the current trace view.
def _iter_spike_waveforms( interval=None, traces_interval=None, model=None, supervisor=None, n_samples_waveforms=None, get_best_channels=None, show_all_spikes=False): m = model p = supervisor sr = m.sample_rate a, b = m.spike_times.searchsorted(interval) s0, s1 = int(round(interval[0] * sr)), int(round(interval[1] * sr)) ns = n_samples_waveforms k = ns // 2 for show_selected in (False, True): for i in range(a, b): t = m.spike_times[i] c = m.spike_clusters[i] is_selected = c in p.selected # Show non selected spikes first, then selected spikes so that they appear on top. if is_selected is not show_selected: continue # Skip non-selected spikes if requested. if (not show_all_spikes and c not in supervisor.selected): continue # cg = p.cluster_meta.get('group', c) channel_ids, channel_amps = get_best_channels(c) s = int(round(t * sr)) - s0 # Skip partial spikes. if s - k < 0 or s + k >= (s1 - s0): # pragma: no cover continue # Extract the waveform. wave = Bunch( data=traces_interval[s - k:s + ns - k, channel_ids], channel_ids=channel_ids, start_time=(s + s0 - k) / sr, spike_id=i, spike_time=t, spike_cluster=c, channel_amps=channel_amps, # for each of the channel_ids, the relative amp select_index=p.selected.index(c) if c in p.selected else None, ) assert wave.data.shape == (ns, len(channel_ids)) yield wave
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_waveforms(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n self.trace_lines[0].set_ydata(self.pv_monitor.arrays[key][0])\n self.trace_lines[1].set_ydata(self.pv_monitor.arrays[key][1])\n self.draw()", "def waveforms(self):\n return list(self._waveforms)", "def getSpikes(self, compatible_output=False, gather=True):\n global controller\n timer = None\n if conf.config.getboolean(\"Reports\", \"outputTimesForSections\"):\n timer = Timer()\n timer.start_timing()\n spikes = self.vertex.getSpikes(controller, controller.dao.run_time, compatible_output)\n\n if conf.config.getboolean(\"Reports\", \"outputTimesForSections\"):\n timer.take_sample()\n return spikes", "def iter_spectra(self):\n for record in self.session.query(SpectrumLibraryIndexRecord).order_by(\n SpectrumLibraryIndexRecord.number).yield_per(10000):\n yield record", "def AllSpikeTimes(self):\n blah = []\n for neur in self.neurons:\n blah.append(np.array(neur.spikes))\n\n return blah", "def set_spike_data(self):\n self.spike_record = {l: self.network.monitors['{:}_spikes'.format(l)].get('s') for l in self.network.layers}", "def get_ser_spktimes(self):\n\n spktimes = []\n for sweep_no in range(self.get_no_sweeps()):\n spktimes_singlesweep = []\n for cell_no in range(self.get_no_ser_neurons()):\n spktimes_singlesweep.append(\n np.where(self.ser_spktrains[sweep_no, cell_no, :] > 0.5)[0]\n * self.get_dt()\n )\n spktimes.append(spktimes_singlesweep)\n return spktimes", "def _get_all_spectra(self):\n pass", "def __iter__(self):\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0\n\n while True:\n\n # Randomizing wav lists\n random.shuffle(self._lst_spk_files)\n random.shuffle(self._lst_noise_files)\n\n for spk_file, noise_file in zip(self._lst_spk_files, self._lst_noise_files):\n\n # Read wav files\n sig_spk = self.__read_wav_file(spk_file)\n sig_noise = self.__read_wav_file(noise_file)\n\n # Align signal\n min_length = min(sig_spk.shape[0], sig_noise.shape[0])\n\n if min_length < self._fftsize:\n raise Exception(\"ERROR: Too short signals in dataset\")\n\n sig_spk = sig_spk[:min_length]\n sig_noise = sig_noise[:min_length]\n\n # Generate need SNR\n need_snr = random.uniform(self._min_snr, self._max_snr)\n\n # Calc scaled signals\n sig_spk, sig_noise = self.__mix_with_snr(sig_spk, sig_noise, need_snr)\n\n # Calc STFT\n stft_spk = stft(sig_spk, fftsize=self._fftsize, overlap=self._overlap)\n stft_noise = stft(sig_noise, fftsize=self._fftsize, overlap=self._overlap)\n stft_mix = stft_spk + stft_noise\n\n # Skip small segments\n frames, bin = stft_mix.shape\n if frames <= self._context_size:\n continue\n\n # Collect batch\n i = 0\n while i + self._context_size < frames:\n\n batch_sp.append(stft_spk[i:i + self._context_size, :])\n batch_noise.append(stft_noise[i:i + self._context_size, :])\n batch_mix.append(stft_mix[i:i + self._context_size, :])\n\n i += self._context_size // 2\n batch_count += 1\n\n if batch_count == self._batch_size:\n sp = np.array(batch_sp).reshape((self._batch_size,\n self._context_size, -1))\n noise = np.array(batch_noise).reshape((self._batch_size,\n self._context_size, -1))\n mix = np.array(batch_mix).reshape((self._batch_size,\n self._context_size, -1))\n yield sp, noise, mix\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0", "def iterate_tsne(self):\n send_stop_event = False\n if self.analysis is None:\n return\n if not self._stop_iter:\n self.timer_count = self.timer_count + 1\n for j in range(self.iters_per_frame):\n self.analysis.do_iteration()\n self._iter_count = self.timer_count * self.iters_per_frame + j\n self.status.showMessage(f\"Iteration: {self._iter_count}\")\n\n if self.timer_count == self.num_frames - 1:\n self._stop_iter = True\n send_stop_event = True\n self.timer_count = 0\n self.status.showMessage(\"Iteration: Completed\")\n\n # Update point positions\n self.embedding_viewer.update_plot(self.analysis.embedding)\n else:\n if self.timer_count % 10 == 0:\n self.embedding_viewer.force_refresh()\n\n if send_stop_event:\n self.embedding_viewer.force_refresh()\n time.sleep(0.1)\n self.analysis_stopped(self.analysis,\n self.embedding_viewer.get_figure_as_buffer())", "def train(self):\n \n for demo_traj in self._demo_trajs:\n\n interpolate = interp1d(self._phase._z, demo_traj, kind='cubic')\n\n #strech the trajectory to fit 0 to 1\n stretched_demo = interpolate(self._phase._z)[None,:]\n\n #compute the weights of the trajectory using the basis function\n w_demo_traj = np.dot(np.linalg.inv(np.dot(self._Phi, self._Phi.T) + 1e-12*np.eye(self._n_bfs) ), np.dot(self._Phi, stretched_demo.T)).T # weights for each trajectory\n \n #append the weights to the list\n self._W.append(w_demo_traj.copy())\n\n self._W = np.asarray(self._W).squeeze()\n \n # mean of weights\n self._mean_W = np.mean(self._W, axis=0)\n \n # covariance of weights\n # w1 = np.array(map(lambda x: x - self._mean_W.T, self._W))\n # self._sigma_W = np.dot(w1.T, w1)/self._W.shape[0]\n\n self._sigma_W = np.cov(self._W.T)", "def extract_spikes(spike_data, spt_dict, sp_win,\n resample=1, contacts='all'):\n sp_data = spike_data['data']\n n_contacts = spike_data['n_contacts']\n\n if contacts == \"all\":\n contacts = np.arange(n_contacts)\n elif isinstance(contacts, int):\n contacts = np.array([contacts])\n else:\n contacts = np.asarray(contacts)\n\n FS = spike_data['FS']\n spt = spt_dict['data']\n idx = np.arange(len(spt))\n inner_idx = filter_spt(spike_data, spt_dict, sp_win)\n outer_idx = idx[~np.in1d(idx, inner_idx)]\n\n indices = (spt / 1000.0 * FS).astype(np.int32)\n win = (np.asarray(sp_win) / 1000.0 * FS).astype(np.int32)\n time = np.arange(win[1] - win[0]) * 1000.0 / FS + sp_win[0]\n n_contacts, n_pts = sp_data.shape\n\n # auxiliary function to find a valid spike window within data range\n minmax = lambda x: np.max([np.min([n_pts, x]), 0])\n spWave = np.zeros((len(time), len(spt), len(contacts)),\n dtype=np.float32)\n\n for i in inner_idx:\n sp = indices[i]\n spWave[:, i, :] = np.atleast_2d(sp_data[contacts,\n sp + win[0]:sp + win[1]]).T\n\n for i in outer_idx:\n sp = indices[i]\n l, r = map(minmax, sp + win)\n if l != r:\n spWave[(l - sp) - win[0]:(r - sp) - win[0], i, :] = \\\n sp_data[contacts, l:r].T\n\n wavedict = {\"data\": spWave, \"time\": time, \"FS\": FS}\n\n if len(idx) != len(inner_idx):\n is_valid = np.zeros(len(spt), dtype=np.bool)\n is_valid[inner_idx] = True\n wavedict['is_valid'] = is_valid\n\n if resample != 1:\n warn(\"resample argument is deprecated.\"\n \"Please update your code to use function\"\n \"resample_spikes\", DeprecationWarning)\n wavedict = resample_spikes(wavedict, FS * resample)\n return wavedict", "def get_spike_trains(self, current=None):\n\n # For compability with sciunit as many spike trains are generated as there exists ground truth observations\n spike_trains = []\n if current:\n self.set_external_current(current)\n self.simulate(T_max=TMAX)\n voltage_trial = self.v\n vm_trial = AnalogSignal(voltage_trial, self.dt)\n spike_train = vm_trial.threshold_detection(0)\n spike_trains = [spike_train for _ in range(0,3)]\n return spike_trains", "def extract_waveforms(signal, fs, spikes_idx, pre, post):\n cutouts = []\n pre_idx = int(pre * fs)\n post_idx = int(post * fs)\n for index in spikes_idx:\n if index-pre_idx >= 0 and index+post_idx <= signal.shape[0]:\n cutout = signal[(index-pre_idx):(index+post_idx)]\n cutouts.append(cutout)\n return np.stack(cutouts)", "def get_list_of_tracers_for_wsp(self):\n sacc_file = self.io.get_sacc_file()\n tracers = sacc_file.get_tracer_combinations()\n\n fnames = []\n tracers_out = []\n for i, trs1 in enumerate(tracers):\n s1, s2 = self.get_tracer_comb_spin(trs1)\n mn1, mn2 = [self.mask_names[tri] for tri in trs1]\n\n for trs2 in tracers[i:]:\n s3, s4 = self.get_tracer_comb_spin(trs2)\n mn3, mn4 = [self.mask_names[tri] for tri in trs2]\n\n fname1 = f\"w{s1}{s2}__{mn1}__{mn2}.fits\"\n fname2 = f\"w{s3}{s4}__{mn3}__{mn4}.fits\"\n\n if (fname1 in fnames) or (fname2 in fnames):\n continue\n\n fnames.append(fname1)\n fnames.append(fname2)\n\n tracers_out.append((trs1, trs2))\n\n return tracers_out", "def show_pipline_infor(self):\r\n self.normalOutputWritten('--------Pipeline general info--------\\n')\r\n for eachround in range(int(len(self.RoundQueueDict)/2-1)):\r\n\r\n #--------------------------------------------------------------\r\n # show waveform settings\r\n waveformPackage = self.RoundQueueDict['RoundPackage_'+str(eachround+1)][0]\r\n camOperationPackage = self.RoundQueueDict['RoundPackage_'+str(eachround+1)][1]\r\n waveform_sequence = 1\r\n \r\n for eachwaveform in waveformPackage:\r\n\r\n try:\r\n if len(waveformPackage[eachwaveform][3]) != 0:\r\n self.normalOutputWritten('Round {}, sequence {}, recording channels:{}.\\n'.format(eachround+1, waveform_sequence, waveformPackage[eachwaveform][3]))\r\n print('Round {}, recording channels:{}.'.format(eachround+1, waveformPackage[eachwaveform][3]))#[1]['Sepcification']\r\n# else:\r\n# self.normalOutputWritten('Round {} No recording channel.\\n'.format(eachround+1))\r\n except:\r\n self.normalOutputWritten('No recording channel.\\n')\r\n print('No recording channel.')\r\n try:\r\n self.normalOutputWritten('Round {}, Analog signals:{}.\\n'.format(eachround+1, waveformPackage[eachwaveform][1]['Sepcification']))\r\n print('Round {}, Analog signals:{}.'.format(eachround+1, waveformPackage[eachwaveform][1]['Sepcification']))#\r\n except:\r\n self.normalOutputWritten('No Analog signals.\\n')\r\n print('No Analog signals.')\r\n try:\r\n if len(waveformPackage[eachwaveform][2]['Sepcification']) != 0:\r\n self.normalOutputWritten('Round {}, Digital signals:{}.\\n'.format(eachround+1, waveformPackage[eachwaveform][2]['Sepcification']))\r\n self.normalOutputWritten('Lasting time:{} s.\\n'.format(len(waveformPackage[eachwaveform][2]['Waveform'][0])/waveformPackage[eachwaveform][0]))\r\n \r\n print('Lasting time:{} s.\\n'.format(len(waveformPackage[eachwaveform][2]['Waveform'][0])/waveformPackage[eachwaveform][0]))\r\n print('Round {}, Digital signals:{}.'.format(eachround+1, waveformPackage[eachwaveform][2]['Sepcification']))#\r\n# else:\r\n# self.normalOutputWritten('Round {} No Digital signals.\\n'.format(eachround+1))\r\n except:\r\n self.normalOutputWritten('No Digital signals.\\n')\r\n print('No Digital signals.')\r\n waveform_sequence += 1\r\n self.normalOutputWritten('\\n')\r\n \r\n for eachcamoperation in camOperationPackage:\r\n #--------------------------------------------------------------\r\n # Show camera operations\r\n \r\n try:\r\n if len(camOperationPackage[eachcamoperation]) != 0:\r\n self.normalOutputWritten('Round {}, cam Buffer_number:{}.\\n'.format(eachround+1, camOperationPackage[eachcamoperation]['Buffer_number']))\r\n print('Round {}, cam Buffer_number:{}.\\n'.format(eachround+1, camOperationPackage[eachcamoperation]['Buffer_number']))#\r\n# else:\r\n# self.normalOutputWritten('Round {} No Digital signals.\\n'.format(eachround+1))\r\n except:\r\n self.normalOutputWritten('No camera operations.\\n')\r\n print('No camera operations.') \r\n \r\n self.normalOutputWritten('-----------end of round-----------\\n')\r\n self.normalOutputWritten('----------------------------------------\\n')", "def iter_recorded(self):\n return iter(self._recorded)", "def isolate_strokes(self):\n if self.onset_times is False:\n self.find_onsets()\n # Defining the frame to contain the strokes\n frame_sz = int(self.stroke_length*self.sampling_rate)\n self.strokes = np.array(\n [self.audio[i:i+frame_sz] for i in self.onset_samples])", "def wrapPlotsOverEdges(self):\n if not self.__selectedCurves:\n return\n wrapcurve = self.__selectedCurves[-1]\n path = self.curve_path_dict[wrapcurve]\n times = []\n xdata = numpy.array(wrapcurve.data().xData())\n ydata = numpy.array(wrapcurve.data().yData())\n # It is a spike train, x values are spike times, wrap around those\n if 'spikes' in path:\n times = xdata\n # It is a stimulus: take the leadin edges\n elif 'stim' in path:\n times = xdata[numpy.r_[False, numpy.diff(ydata) < 0].nonzero()[0]]\n else:\n ydata = analyzer.smooth(ydata)\n mid = numpy.mean(ydata)\n ydata = ydata[ydata > mid] # Threshold at midpoint\n times = xdata[numpy.r_[True, ydata[1:] > ydata[:-1]] & numpy.r_[ydata[:-1] > ydata[1:], True]]\n # start from the first edge, ignoring everything before it\n # and put end of simulation as the upper bound\n for curve in self.itemList():\n ydata = numpy.array(curve.data().yData())\n xdata = numpy.array(curve.data().xData()) \n path = self.curve_path_dict[curve]\n path_curve_list = self.path_curve_dict[path]\n path_curve_list.pop(path_curve_list.index(curve))\n self.curve_path_dict.pop(curve)\n curve.detach()\n start = 0\n end = len(xdata)\n for ii in range(-1, - len(times) - 1, -1):\n points = numpy.nonzero(xdata >= times[ii])[0]\n if len(points) == 0:\n continue\n start = points[0]\n xx = numpy.array(xdata[start:end] - times[ii])\n xdata[start:end] = -1.0\n new_curve = Qwt.QwtPlotCurve('%s #%d' % (curve.title().text(), len(times) + ii, ))\n new_curve.setData(xx, ydata[start:end])\n new_curve.setStyle(curve.style())\n new_curve.setPen(QtGui.QPen(curve.pen()))\n new_curve.setSymbol(Qwt.QwtSymbol(curve.symbol()))\n new_curve.attach(self)\n self.curve_path_dict[new_curve] = path\n self.path_curve_dict[path].append(new_curve)\n end = start \n self.replot()", "def get_gaba_spktimes(self):\n\n spktimes = []\n for sweep_no in range(self.get_no_sweeps()):\n spktimes_singlesweep = []\n for cell_no in range(self.get_no_gaba_neurons()):\n spktimes_singlesweep.append(\n np.where(self.gaba_spktrains[sweep_no, cell_no, :] > 0.5)[\n 0\n ]\n * self.get_dt()\n )\n spktimes.append(spktimes_singlesweep)\n return spktimes", "def all_spike_ind(t, V):\n spikes, _ = find_peaks(V, [1, 1000])\n\n return spikes", "def spectate(self):\n pass", "def waves(repeats = 1):\r\n for i in range(repeats):\r\n alex.up()\r\n alex.color(hueGen(i, .5*i/repeats, .5))\r\n alex.goto(-315,315 - i)\r\n alex.seth(45) # set heading\r\n x = alex.xcor()\r\n y = alex.ycor()\r\n f = i + 1\r\n for j in range(630):\r\n x = alex.xcor()\r\n alex.goto(x + 1, y + 25*sin(8*j/f + i/25)) # plot sines\r\n alex.down()\r\n x = alex.xcor()", "def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]", "def wave_get_cbs():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSC, 0, 0))", "def spew(self):\n for frame in self.frames:\n print frame.func, frame", "def drawBolts(self,view):\r\n for bolt in self.getBolts():\r\n bolt.draw(view)", "def __iter__(self):\n for element in self.focals:\n yield element", "def find_spectra(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def printSpikes(self, filename, gather=True):\n spikes = self.getSpikes(compatible_output=True)\n if spikes != None:\n first_id = 0\n num_neurons = self.vertex.atoms\n dimensions = self.vertex.atoms\n last_id = self.vertex.atoms - 1\n utility_calls.check_directory_exists(filename)\n spikeFile = open(filename, \"w\")\n spikeFile.write(\"# first_id = %d\\n\" % first_id)\n spikeFile.write(\"# n = %d\\n\" % num_neurons)\n spikeFile.write(\"# dimensions = [%d]\\n\" % dimensions)\n spikeFile.write(\"# last_id = %d\\n\" % last_id)\n for (neuronId, time) in spikes:\n spikeFile.write(\"%d\\t%d\\n\" % (time, neuronId))\n spikeFile.close()", "def plot_frames(beads, sim, ti, tf, savebase):\n \n ### define the color for the spheres\n\n print 'defining colors'\n sphere_rgbcolor = gen_colors(sim.nbeads)\n\n ### create povray settings\n\n print 'creating povray settings'\n sphere_radius, img_widthpx, img_heightpx, povray_includes, \\\n povray_defaults, sun1, sun2, background, povray_cam, quality \\\n = gen_img_settings_quality(sim.lx)\n \n zi = np.zeros((sim.nbeads))\n \n ### set general plot properties\n\n os.system(\"mkdir -p \" + savebase)\n savebase += 'eps_' + str(sim.eps) + '_fp_' + str(sim.fp) + \\\n '_areak_' + str(sim.areak) + '_kappa_' + str(sim.kappa) + '/'\n os.system(\"mkdir -p \" + savebase)\n \n ### plot the frames\n \n for step in range(ti, tf):\n \n time = step*sim.dt\n print 'Step / Total : ', step, tf\n \n ### create povray items\n \n print 'generating povray item'\n particles = vapory.Object( \\\n vapory.Union( \\\n *[ vapory.Sphere([beads.xi[step, 0, j], beads.xi[step, 1, j],zi[j]], \\\n sphere_radius, vapory.Texture( \\\n vapory.Pigment('color', sphere_rgbcolor[j]), \\\n vapory.Finish('phong',1)) ) for j in range(0, sim.nbeads ) ] ) )\n\n ### generate povray objects\n\n print 'generating povray objects'\n povray_objects = [sun1, sun2, background, particles]\n ### create the scene\n scene = vapory.Scene( camera = povray_cam,\n objects = povray_objects, \n included = povray_includes, \n defaults = povray_defaults )\n \n ### render image\n \n print 'rendering scene'\n savename = \"pov-frame-\" + \"{0:05d}\".format(int(step)) + \".png\"\n scene.render(outfile=savename, width=img_widthpx, height=img_heightpx, \\\n antialiasing=0.001, quality=quality, remove_temp=True)\n \n ### move the image to the correct destination\n \n os.system('mv ' + savename + ' ' + savebase)\n \n return", "def waves(self) -> List[float]:\n return self._waves", "def thermals(self) -> Iterator[\"Flight\"]:\n self = cast(\"Flight\", self)\n all_segments = (\n self.unwrap()\n .diff(\"track_unwrapped\")\n .agg_time(\"1T\", vertical_rate=\"max\", track_unwrapped_diff=\"median\")\n .abs(track_unwrapped_diff_median=\"track_unwrapped_diff_median\")\n .query(\"vertical_rate_max > 2 and track_unwrapped_diff_median > 5\")\n )\n if all_segments is not None:\n yield from all_segments.split(\"1T\")", "def getScaleKeyTimes(self, view) -> list[float]:\n ...", "def viz_samples(data, trace, num_sweeps, K, viz_interval=3, figure_size=3, title_fontsize=20, marker_size=1.0, opacity=0.3, bound=20, colors=['#AA3377','#0077BB', '#EE7733', '#009988', '#BBBBBB', '#EE3377', '#DDCC77'], save_name=None):\n E_tau, E_mu, E_z = trace['E_tau'].cpu(), trace['E_mu'].cpu(), trace['E_z'].cpu()\n num_rows = len(data)\n num_cols = 2 + int((num_sweeps-1) / viz_interval)\n gs = gridspec.GridSpec(num_rows, num_cols)\n gs.update(left=0.0 , bottom=0.0, right=1.0, top=1.0, wspace=0, hspace=0)\n fig = plt.figure(figsize=(figure_size * num_cols, figure_size * num_rows))\n for row_ind in range(num_rows):\n ax = fig.add_subplot(gs[row_ind, 0])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=None) ## visualize raw dataset in the 1st column\n if row_ind == 0:\n ax.set_title('Data', fontsize=title_fontsize)\n# col_ind = 1\n for col_ind in range(num_cols-1):\n sweep = col_ind * viz_interval\n ax = fig.add_subplot(gs[row_ind, col_ind+1])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=(E_tau[sweep, row_ind], E_mu[sweep, row_ind], E_z[sweep, row_ind]))\n if row_ind == 0:\n if sweep == 0:\n ax.set_title('RWS', fontsize=title_fontsize)\n else:\n ax.set_title('sweep %d' % sweep, fontsize=title_fontsize)\n if save_name is not None:\n plt.savefig(save_name + '.svg', dpi=300)", "def wave(self):\n return self._wave", "def findspikes(xin, vin, thresh, t0=None, t1= None, dt=1.0, mode=None, interpolate=False, debug=False):\n # if debug:\n # # this does not work with pyside...\n # import matplotlib\n # matplotlib.use('Qt4Agg')\n # import matplotlib.pyplot as PL\n # from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n # from matplotlib.figure import Figure\n # \n # #PL.rcParams['interactive'] = False\n \n st=numpy.array([])\n spk = []\n if xin is None:\n return(st, spk)\n xt = xin.view(numpy.ndarray)\n v = vin.view(numpy.ndarray)\n if t1 is not None and t0 is not None:\n it0 = int(t0/dt)\n it1 = int(t1/dt)\n if not isinstance(xin, numpy.ndarray):\n xt = xt[it0:it1]\n v = v[it0:it1]\n else:\n xt = xt[it0:it1]\n v = v[it0:it1]\n # if debug:\n # f = PL.figure(1)\n # print \"xt: \", xt\n # print \"v: \", v\n # PL.plot(numpy.array(xt), v, 'k-')\n # PL.draw()\n # PL.show()\n\n dv = numpy.diff(v, axis=0) # compute slope\n try:\n dv = numpy.insert(dv, 0, dv[0])\n except:\n pass # print 'dv: ', dv\n dv /= dt\n st = numpy.array([])\n spk = []\n spv = numpy.where(v > thresh)[0].tolist() # find points above threshold\n sps = numpy.where(dv > 0.0)[0].tolist() # find points where slope is positive\n sp = list(set.intersection(set(spv),set(sps))) # intersection defines putative spikes\n sp.sort() # make sure all detected events are in order (sets is unordered)\n sp = tuple(sp) # convert to tuple\n if sp is ():\n return(st, spk) # nothing detected\n dx = 1\n mingap = int(0.0005/dt) # 0.5 msec between spikes (a little unphysiological...)\n # normal operating mode is fixed voltage threshold\n # for this we need to just get the FIRST positive crossing,\n if mode == 'schmitt':\n sthra = list(numpy.where(numpy.diff(sp) > mingap))\n sthr = [sp[x] for x in sthra[0]] # bump indices by 1\n #print 'findspikes: sthr: ', len(sthr), sthr\n for k in sthr:\n if k == 0:\n continue\n x = xt[k-1:k+1]\n y = v[k-1:k+1]\n if interpolate:\n dx = 0\n m = (y[1]-y[0])/dt # local slope\n b = y[0]-(x[0]*m)\n s0 = (thresh-b)/m\n else:\n s0 = x[1]\n st = numpy.append(st, x[1])\n\n elif mode == 'peak':\n pkwidth = 1.0e-3 # in same units as dt - usually msec\n kpkw = int(pkwidth/dt)\n z = (numpy.array(numpy.where(numpy.diff(spv) > 1)[0])+1).tolist()\n z.insert(0, 0) # first element in spv is needed to get starting AP\n spk = []\n #print 'findspikes peak: ', len(z)\n for k in z:\n zk = spv[k]\n spkp = numpy.argmax(v[zk:zk+kpkw])+zk # find the peak position\n x = xt[spkp-1:spkp+2]\n y = v[spkp-1:spkp+2]\n if interpolate:\n try:\n # mimic Igor FindPeak routine with B = 1\n m1 = (y[1]-y[0])/dt # local slope to left of peak\n b1 = y[0]-(x[0]*m1)\n m2 = (y[2]-y[1])/dt # local slope to right of peak\n b2 = y[1]-(x[1]*m2)\n mprime = (m2-m1)/dt # find where slope goes to 0 by getting the line\n bprime = m2-((dt/2.0)*mprime)\n st = numpy.append(st, -bprime/mprime+x[1])\n spk.append(spkp)\n except:\n continue\n else:\n st = numpy.append(st, x[1]) # always save the first one\n spk.append(spkp)\n return(st, spk)", "def getControlPointKeyTimes(self, view: Str = ...) -> list[float]:\n ...", "def spectralwhitening(st):\n \n for trace in arange(len(st)):\n data = st[trace].data\n \n n = len(data)\n nfft = nextpow2(n)\n \n spec = fft(data, nfft)\n spec_ampl = sqrt(abs(multiply(spec, conjugate(spec))))\n \n spec /= spec_ampl #Do we need to do some smoothing here?\n ret = real(ifft(spec, nfft)[:n])\n \n st[trace].data = ret\n \n return st", "def iterator(self):\n return _osgAnimation.vectorFloatKeyframe_iterator(self)", "def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')", "def process_trace(n_tr, tr, sta, orig_time, cmps, cfg):\n cmp = tr.stats.channel[2:3]\n sta[cmp] = {}\n sta[cmp][\"times\"] = tr.times(reftime=orig_time)\n\n sta[cmp][\"tr_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), sta[\"lenD\"])\n )\n sta[cmp][\"f1_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), len(cfg.picking.KURT_WINS),\n sta[\"lenD\"])\n )\n sta[cmp][\"f1_mean\"] = np.zeros(sta[\"lenD\"])\n sta[cmp][\"f3_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]),\n len(cfg.picking.KURT_WINS), sta[\"lenD\"])\n )\n sta[cmp][\"f3_mean_smooth\"] = np.zeros(\n (len(cfg.picking.CF_MEAN_SMOOTH_WIND), sta[\"lenD\"])\n )\n sta[cmp][\"f4_all\"] = np.zeros((len(cfg.picking.CF_MEAN_SMOOTH_WIND),\n sta[\"lenD\"]))\n sta[cmp][\"f1_mean_smooth\"] = np.zeros(sta[\"lenD\"])\n # Get suitable filters (exclude those fully outside Nyquist freq.)\n for phase in [\"P\", \"S\"]:\n if cmp in cmps[phase]:\n sta[\"picks\"][\"poss_obs\"][phase][cmp] = {}\n sta[cmp][\"filtwins_check\"] = [\n filt_win for filt_win in cfg.picking.FILT_WINS[phase]\n if filt_win[0] < sta[\"samplerate\"] / 2\n ]\n if cfg.picking.INTEGRATE_S is True:\n tr.integrate()\n\n for n_filt, filt in enumerate(sta[cmp][\"filtwins_check\"]):\n # Ensure that filter covers sample rate / 2\n if (tr.stats.sampling_rate / 2) <= filt[0]:\n print(\"Skipping this Kurtosis run due to sample rate/2<f\")\n continue\n tr.filter(\"bandpass\", freqmin=filt[0], freqmax=filt[1])\n try:\n sta[cmp][\"tr_results\"][n_filt] = tr.data\n except ValueError: # If input array length is inconsistent\n continue\n # Loop over kurtosis windows\n for n_kurt, kurt_win_s in enumerate(cfg.picking.KURT_WINS):\n f1 = CF_kurtosis(kurt_win_s, tr)\n sta[cmp][\"f1_results\"][n_filt, n_kurt] = f1 # Needed for weights\n f2 = kurt_transform_f2(f1, kurt_win_s, tr)\n f3 = kurt_transform_f3(f2, kurt_win_s, tr)\n\n sta[cmp][\"f3_results\"][n_filt, n_kurt] = f3\n sta[cmp][\"f1_mean\"] = np.nanmean(sta[cmp][\"f1_results\"], axis=0)[0]\n sta[cmp][\"f1_mean_smooth\"] = do_smooth(\n sta[cmp][\"f1_mean\"], cfg.picking.CF_MEAN_SMOOTH_WIND[0],\n tr.stats.sampling_rate\n )\n # ^ Throws up a warning first time due to NaN slices\n # Compute mean CF and final kurtosis transform\n f3_mean = np.nanmean(sta[cmp][\"f3_results\"], axis=0)[0]\n\n for nsm, smooth_wind in enumerate(cfg.picking.CF_MEAN_SMOOTH_WIND):\n sta[cmp][\"f3_mean_smooth\"][nsm] = do_smooth(\n f3_mean, smooth_wind, tr.stats.sampling_rate\n )\n f4 = kurt_transform_f4(sta[cmp][\"f3_mean_smooth\"][nsm],\n np.max(cfg.picking.KURT_WINS), tr)\n sta[cmp][\"f4_all\"][nsm] = f4\n\n # Now pick (avoiding end and beginning of signal)\n # Pick the P-waves\n if cmp in cmps[\"P\"]:\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm] = []\n # Find points where Kurt<0 & doesn't look like S-wave\n p_cands = np.argwhere((f4 < 0.0))\n for idx in p_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(\n cfg.picking.KURT2WGHT[\"P\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx])))\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n # Pick the S-waves\n if cmp in cmps[\"S\"]:\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm] = []\n\n # Find points where Kurt<0 & doesn't look like S-wave\n s_cands = np.argwhere((f4 < 0.0))\n for idx in s_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(cfg.picking.KURT2WGHT[\"S\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx]))\n )\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n return(sta)", "def data_play(Y, visualizer, frame_rate=30):\r\n \r\n\r\n for y in Y:\r\n visualizer.modify(y[None, :])\r\n time.sleep(1./float(frame_rate))", "def testWaveform(self):\n\n\t\twg = waveform.Generator(frequency=Quantity(2, 'Hz'))\n\n\t\twg.delay(Quantity(2, 's'))\n\t\twg.marker(1, True)\n\t\twg.marker(2, True)\n\t\twg.pulse([], 0.5, Quantity(1, 's'))\n\t\twg.pulse([1.0, 0.0, -1.0], 1.0, Quantity(3, 's'))\n\t\twg.marker(1, False)\n\t\twg.square(-0.5, Quantity(2, 's'))\n\n\t\texpected = [0.0, 0.0, 0.0, 1.0, 0.6, 0.2, -0.2, -0.6, -1.0, -0.5, -0.5, -0.5, -0.5, -1.0]\n\n\t\twave, markers = wg.waveform\n\t\tassert_array_almost_equal(wave, expected, 4)\n\t\teq_(markers[1], [False] * 3 + [True] * 6 + [False] * 5)\n\t\teq_(markers[2], [False] * 3 + [True] * 11)\n\t\tassert 3 not in markers", "def plotspec(self, skyfile='', smooth=False,\n smoothwindow=0, showerr=False,\n zlines=0.0, lineset='none',\n plotwavemin=0, plotwavemax=0,\n plotfluxmin=0, plotfluxmax=0):\n # medsmooth = lambda f,N : array( [ median( f[max(0,i-N):min(len(f),max(0,i-N)+2*N)]) for i in range(len(f)) ] )\n\n\n pl.clf()\n lineset= lineset.lower()\n\n # TODO: this should be more general\n if skyfile :\n # lower axes : sky\n ax2 = pl.axes([0.03,0.05,0.95,0.2])\n skywave, skyflux = np.loadtxt( skyfile, unpack=True, usecols=[0,1] )\n pl.plot( skywave, skyflux , color='darkgreen',\n ls='-', drawstyle='steps' )\n ax1 = pl.axes([0.03,0.25,0.95,0.63], sharex=ax2)\n\n # upper axes : source\n # TODO : better smoothing !!!\n # if smoothwindow : flux = medsmooth( flux, smoothwindow )\n if smooth :\n if smoothwindow<5 :\n smoothwindow=5\n order=3\n print(\"raising S-G smoothwindow window to 5, order 3.\")\n if smoothwindow<7 :\n order=3\n else :\n order=5\n flux = savitzky_golay(self.flux, smoothwindow, order=order)\n else :\n flux = self.flux\n\n if showerr :\n pl.errorbar( self.wave, flux/np.median(flux),\n self.fluxerror/np.median(flux),\n marker=' ', color='k', ls='-', drawstyle='steps' )\n else :\n pl.plot( self.wave, flux/np.median(flux),\n marker=' ', color='k', ls='-', drawstyle='steps' )\n\n if lineset!='none':\n marklines( zlines, lineset )\n\n if plotwavemin==0:\n plotwavemin = self.wave.min()\n if plotwavemax==0:\n plotwavemin = self.wave.max()\n if plotfluxmin==0:\n plotfluxmin = -np.std(flux)\n if plotfluxmax==0:\n plotfluxmax = flux.max() + np.std(flux)\n\n ax = pl.gca()\n ax.set_xlim(plotwavemin, plotwavemax)\n ax.set_ylim(plotfluxmin, plotfluxmax)\n\n #pl.draw()\n #pl.show()\n return", "def itercubes(self, **kwargs):\n for ifuslot in self.fplane.ifuslots:\n yield ifuslot, self.extract_ifu_sensitivity_cube(ifuslot, \n **kwargs)", "def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.", "def spikingModel(wEE, wEI, wIE, wII, stim_e, stim_i,\n time=1000, dt=0.1, Vth=1.0, Vre=0.0,\n tau_e=15.0, tau_i=10.0, ref_e=5.0, ref_i=5.0, \n syntau2_e=3.0, syntau2_i=2.0, syntau1=1.0):\n\n T = np.arange(0,time,dt)\n nE = wEE.shape[0]\n nI = wII.shape[0]\n\n Ve = np.zeros((nE,len(T)))\n Vi = np.zeros((nI,len(T)))\n # Set initial conditions\n Ve = np.random.uniform(0,1,size=(nE,))\n Vi = np.random.uniform(0,1,size=(nI,))\n # Instantiate synaptic currents empty matrix\n Ie = np.zeros((nE,len(T)))\n Ii = np.zeros((nI,len(T)))\n # Instantiate spiking matrix\n spkE = np.zeros((nE,time))\n spkI = np.zeros((nI,time))\n # Instantiate synaptic input matrix (temporally downsampled)\n synE = np.zeros((nE,time))\n synI = np.zeros((nI,time))\n\n bin_spkE = np.zeros((nE,))\n bin_spkI = np.zeros((nI,))\n # Synaptic rise gating variable\n xrse_ee = np.zeros((nE,))\n xdec_ee = np.zeros((nE,))\n xrse_ei= np.zeros((nI,))\n xdec_ei = np.zeros((nI,))\n xrse_ie = np.zeros((nE,))\n xdec_ie = np.zeros((nE,))\n xrse_ii= np.zeros((nI,))\n xdec_ii = np.zeros((nI,))\n\n\n # Set random biases from a uniform distribution\n # Excitatory neurons\n mu_e = np.random.uniform(1.1,1.2,size=(nE,))\n #mu_e = np.random.uniform(1.05,1.15,size=(nE,)) # Imbalanced state\n # Inhibitory neurons\n mu_i = np.random.uniform(1.0,1.05,size=(nI,))\n\n maxrate = 500 # max rate is 100hz\n maxtimes = int(np.round(maxrate*time/1000))\n timesE = np.zeros((nE,maxrate))\n timesI = np.zeros((nI,maxrate))\n ne_s = np.zeros((nE,),dtype=int)\n ni_s = np.zeros((nI,),dtype=int)\n\n refractory_e = np.zeros((nE,))\n refractory_i = np.zeros((nI,))\n for t in range(len(T)-1):\n ## Using RK2 method\n\n ## K1s\n Ve = Ve + dt*((mu_e + stim_e - Ve)/tau_e + Ie[:,t])\n Vi = Vi + dt*((mu_i + stim_i - Vi)/tau_i + Ii[:,t])\n\n # Synaptic gating\n # Excitatory synapses\n xrse_ee = xrse_ee - dt*xrse_ee/syntau1 + np.matmul(bin_spkE,wEE)\n xdec_ee = xdec_ee - dt*xdec_ee/syntau2_e + np.matmul(bin_spkE,wEE)\n xrse_ei = xrse_ei - dt*xrse_ei/syntau1 + np.matmul(bin_spkE,wEI)\n xdec_ei = xdec_ei - dt*xdec_ei/syntau2_e + np.matmul(bin_spkE,wEI)\n # Inhibitory dt*synapses\n xrse_ie = xrse_ie - dt*xrse_ie/syntau1 + np.matmul(bin_spkI,wIE)\n xdec_ie = xdec_ie - dt*xdec_ie/syntau2_i + np.matmul(bin_spkI,wIE)\n xrse_ii = xrse_ii - dt*xrse_ii/syntau1 + np.matmul(bin_spkI,wII)\n xdec_ii = xdec_ii - dt*xdec_ii/syntau2_i + np.matmul(bin_spkI,wII)\n\n # Calculate synaptic outputs given rise and decay times\n Ie[:,t+1] = (xdec_ee - xrse_ee)/(syntau2_e - syntau1) + (xdec_ie - xrse_ie)/(syntau2_i - syntau1)\n Ii[:,t+1] = (xdec_ii - xrse_ii)/(syntau2_i - syntau1) + (xdec_ei - xrse_ei)/(syntau2_e - syntau1)\n\n ## Spiking\n # Find which neurons exceed threshold (and are not in a refractory period)\n bin_spkE = np.multiply(Ve>Vth, refractory_e==0.0)\n bin_spkI = np.multiply(Vi>Vth, refractory_i==0.0)\n\n # Save spike time (and downsample to 1ms)\n tms = int(np.floor(T[t]))\n spkE[bin_spkE,tms] = 1 # spikes are okay - refractory period is 5ms, anyway\n spkI[bin_spkI,tms] = 1\n synE[:,tms] = synE[:,tms] + Ie[:,t]\n synI[:,tms] = synI[:,tms] + Ii[:,t]\n\n # Reset voltages\n Ve[bin_spkE] = Vre\n Vi[bin_spkI] = Vre\n\n # spike times\n timesE[bin_spkE,ne_s[bin_spkE]] = T[t+1]\n timesI[bin_spkI,ni_s[bin_spkI]] = T[t+1]\n ne_s[bin_spkE] = ne_s[bin_spkE] + 1\n ni_s[bin_spkI] = ni_s[bin_spkI] + 1\n\n\n # Set refractory period\n # Add a refractory time step to neurons who just spiked, and to those are still in a refractory period\n refractory_e = refractory_e + (bin_spkE * dt) + (refractory_e!=0) * dt \n refractory_i = refractory_i + (bin_spkI * dt) + (refractory_i!=0) * dt\n # Once refractory period is complete, allow to spike\n can_spike_again_e = np.round(refractory_e,1) == ref_e\n can_spike_again_i = np.round(refractory_i,1) == ref_i\n\n refractory_e[can_spike_again_e] = 0.0\n refractory_i[can_spike_again_i] = 0.0\n\n # Set neurons who are in their refractory to the baseline membrane potential\n in_refractory_e = refractory_e != 0.0\n in_refractory_i = refractory_i != 0.0\n\n Ve[in_refractory_e] = Vre\n Vi[in_refractory_i] = Vre\n \n return spkE, spkI, synE, synI, timesE, timesI, ne_s, ni_s", "def resample_spikes(spikes_dict, FS_new):\n\n sp_waves = spikes_dict['data']\n time = spikes_dict['time']\n FS = spikes_dict['FS']\n\n resamp_time = np.arange(time[0], time[-1], 1000.0 / FS_new)\n n_pts, n_spikes, n_contacts = sp_waves.shape\n\n spike_resamp = np.empty((len(resamp_time), n_spikes, n_contacts))\n\n for i in range(n_spikes):\n for contact in range(n_contacts):\n tck = interpolate.splrep(time, sp_waves[:, i, contact], s=0)\n spike_resamp[:, i, contact] = interpolate.splev(resamp_time,\n tck, der=0)\n\n return {\"data\": spike_resamp, \"time\": resamp_time, \"FS\": FS}", "def animate(self, i):\n try:\n self.lastSpectrum = self.spectrometer.getSpectrum()\n if self.darkReference is not None:\n self.lastSpectrum -= self.darkReference\n if self.whiteReference is not None:\n np.seterr(divide='ignore',invalid='ignore')\n if self.darkReference is not None:\n self.lastSpectrum = self.lastSpectrum / (self.whiteReference-self.darkReference)\n else:\n self.lastSpectrum = self.lastSpectrum / self.whiteReference \n\n self.plotSpectrum(spectrum=self.lastSpectrum)\n except usb.core.USBError as err:\n print(\"The spectrometer was disconnected. Quitting.\")\n self.quitFlag = True\n\n if self.quitFlag:\n self.animation.event_source.stop()\n self.animation = None\n plt.close()", "def make_figures():\n amps = numpy.array([0.6, 0.25, 0.1, 0.05])\n freqs = [100, 200, 300, 400]\n framerate = 11025\n\n ts = numpy.linspace(0, 1, framerate)\n ys = synthesize1(amps, freqs, ts)\n print(ys)\n \n thinkplot.preplot(2)\n n = framerate / 25\n thinkplot.plot(ts[:n], ys[:n].real, label='real')\n thinkplot.plot(ts[:n], ys[:n].imag, label='imag')\n thinkplot.save(root='dft1',\n xlabel='time (s)',\n ylabel='wave',\n ylim=[-1.05, 1.05])\n\n ys = synthesize2(amps, freqs, ts)\n\n amps2 = amps * numpy.exp(1.5j)\n ys2 = synthesize2(amps2, freqs, ts)\n\n thinkplot.preplot(2)\n thinkplot.plot(ts[:n], ys.real[:n], label=r'$\\phi_0 = 0$')\n thinkplot.plot(ts[:n], ys2.real[:n], label=r'$\\phi_0 = 1.5$')\n thinkplot.save(root='dft2',\n xlabel='time (s)', \n ylabel='wave',\n ylim=[-1.05, 1.05],\n loc='lower right')\n\n\n framerate = 10000\n signal = thinkdsp.SawtoothSignal(freq=500)\n wave = signal.make_wave(duration=0.1, framerate=framerate)\n hs = dft(wave.ys)\n amps = numpy.absolute(hs)\n\n N = len(hs)\n fs = numpy.arange(N) * framerate / N\n thinkplot.plot(fs, amps)\n thinkplot.save(root='dft3',\n xlabel='frequency (Hz)', \n ylabel='amplitude',\n legend=False)", "def step(self):\n for c in self.spill_list:\n \n self._schedule.step()", "def spectrum(datapath, run, forcebins = False):\n runpath = datapath + '/' + run\n events = [evnt for evnt in listdir(runpath) if not isfile(join(runpath,evnt))]\n allTraces = []\n total_time = 0\n pulses = {'zero': [], 'one': [], 'two': [], 'three': [], 'other': []}\n times = {'zero': [], 'one': [], 'two': [], 'three': [], 'other': []}\n #camextratime = 25e-6\n for event in events:\n if int(event)> 3:\n break\n print(event)\n e = sbc.DataHandling.GetSBCEvent.GetEvent(runpath,event)\n if e[\"slowDAQ\"][\"loaded\"]:\n #print(e[\"fastDAQ\"].keys())\n cgate = e[\"fastDAQ\"][\"CAMgate\"]\n #dcam = np.diff(cgate)\n fdt = e['fastDAQ']['time']\n #camOffTimes = np.sort(np.array([fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5]))\n \n #camOnTimes = np.sort(np.array([fdt[i] for i in range(len(dcam)) if dcam[i] < 0.5]))\n fddt = fdt[1]-fdt[0]\n tfast = fdt[-1]-fdt[0]\n LED_on = [fdt[i] for i in range(len(cgate)) if cgate[i]<-0.5]\n blockedFraction = ((len(LED_on)*fddt))/tfast\n print(blockedFraction)\n tr = e[\"PMTtraces\"]\n trac = tr[\"traces\"]\n dt = tr[\"dt\"]\n #event_time = (tr['t0_sec'][-1]+tr['t0_frac'][-1]-tr['t0_sec'][0] - tr['t0_frac'][0])[0]\n event_time = (((e[\"slowDAQ\"][\"elapsed_time\"][-1]-e[\"slowDAQ\"][\"elapsed_time\"][0]))*(1-blockedFraction))\n #print(event_time)\n total_time += event_time\n\n #f,axes = plt.subplots(1,5,sharey=True)\n #f.suptitle(runpath+\"/\"+str(event))\n #pmttracetime = e[\"PMTtraces\"][\"t0_sec\"][:,0]+e[\"PMTtraces\"][\"t0_frac\"][:,0]\n #d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e)\n #pmtalign = d[\"PMT_trigt0_sec\"]+d[\"PMT_trigt0_frac\"]\n #tracetimes = pmttracetime - pmtalign\n #camoffindex = 0\n #camonindex = 0\n for i in range(len(trac)):\n #print(i)\n \"\"\"\n thistracetime = tracetimes[i]\n \n #nearestcamoff = min(camOffTimes, key=lambda x:abs(x-thistracetime))\n #nearestcamon = min(camOnTimes, key=lambda x:abs(x-thistracetime))\n print(camOffTimes[camoffindex])\n print(thistracetime)\n if thistracetime > camOffTimes[camoffindex]:\n camoffindex += 1\n if thistracetime > camOnTimes[camonindex]:\n camonindex += 1 \n if camoffindex<len(camOffTimes)-1:\n if abs(camOffTimes[camoffindex]-thistracetime)<camextratime:\n print('excluding a trace near a camera off')\n continue\n if camonindex<len(camOnTimes)-1:\n if abs(camOnTimes[camonindex]-thistracetime)<camextratime:\n print('excluding a trace near a camera on')\n continue\n \"\"\"\n trace = np.fabs(trac[i][0])\n if max(trace) == 128:\n trace = stitchTraces(trace,np.fabs(e[\"PMTtraces\"][\"traces\"][i][1]))\n dt_tr = dt[i][0]\n\n # populate dictionaries arrays based on how many pulses there were\n [a,n,totInt,pktimes] = SBC_pulse_integrator_bressler(trace,dt_tr)\n if n == 0:\n number = 'zero'\n allTraces.append(a)\n elif n == 1:\n number = 'one'\n allTraces.append(a)\n times['one'].append(pktimes[0])\n elif n == 2:\n number = 'two'\n allTraces.append(a)\n elif n == 3:\n number = 'three'\n allTraces.append(a)\n else:\n number = 'other'\n allTraces.append(a)\n \"\"\"\n #if a != None:\n if isZero:\n if j < 5:\n if isNegative:\n if random() >0:\n print(runpath+\"/\"+str(event)+\" pmt trace \"+str(i))\n tPMT = np.arange(len(trace))*dt_tr\n axes[j].plot(tPMT,trace,lw=3)\n axes[j].set_xlabel(\"time (s)\",fontsize=25)\n axes[j].set_ylabel(\"PMT response (ADC)\",fontsize=25)\n j+=1\n \n \n plt.show\n \"\"\"\n pulses[number].append(a)\n gc.collect()\n \n \n for k in pulses:\n pulses[k] = [x for x in pulses[k] if x != None]\n \n allTraces = [x for x in allTraces if x != None]\n \n plt.figure()\n\n Nbins = int(np.floor(np.sqrt(len(allTraces))))\n allvals, bins, _ = plt.hist(allTraces,Nbins,label='all traces')\n \n areaVals = {'zero': [], 'one': [], 'two': [], 'three': [], 'other': []}\n for k in pulses:\n if k != 'other':\n areaVals[k], _, _ = plt.hist(pulses[k],bins,histtype = 'step',\n linewidth = 3,label= k+' hits')\n plt.legend(fontsize=12)\n plt.show() \n #spe_spectrum = areaVals['one']\n \n #def gaussian(x,mu,sigma,amplitude):\n # return amplitude * np.exp(-((x - mu) /(np.sqrt(2)* sigma))**2 )\n \n #params_spe, params_cov_spe = scipy.optimize.curve_fit(gaussian,bins[:len(areaVals['one'])],\n # spe_spectrum,\n # p0=[0.4e8,1e7,40])\n #params_twohits, params_cov_twohits = scipy.optimize.curve_fit(gaussian,\n # bins[:len(areaVals['two'])],\n # areaVals['two'],\n # p0=[0.8e8,1e7,10])\n #mu_spe = params_spe[0]\n #mu_2 = params_twohits[0] - mu_spe\n #print(mu_spe)\n #print(mu_2)\n \n #mu_avg = (mu_spe + mu_2)*0.5\n #mu_avg = get_gain(datapath,run)\n mu_avg = 1e7\n print(mu_avg)\n\n \n plt.figure()\n plt.grid(True)\n if isinstance(forcebins,np.ndarray):\n bins=forcebins\n fullspect,_,_=plt.hist([t/mu_avg for t in allTraces],\n forcebins,label='all traces')\n \n else:\n fullspect,bins,_=plt.hist([t/mu_avg for t in allTraces],\n int(np.floor(np.sqrt(len(allTraces)))),label='all traces')\n \n #print(bins)\n plt.yscale('log')\n plt.xlabel('phe based on a gain of '+str(mu_avg)+' electrons per phe')\n plt.legend()\n plt.show\n print(sum(fullspect)/total_time)\n print(\"The Total Exposure Time of run \"+str(runpath)+ \" was \"+str(total_time)+\" Seconds\")\n print(\"The overall PMT trigger rate was \" + str(len(allTraces)/total_time)+ \"Hz\")\n return [fullspect,bins,total_time]", "def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)", "def extract_spike_features(time, current, voltage, start=0.1, end=0.7, fil=10):\n\n df = pd.DataFrame()\n df_related_features = pd.DataFrame()\n for c, curr in enumerate(current):\n current_array = curr * np.ones_like(time)\n start_index = (np.abs(time - start)).argmin() # Find closest index where the injection current starts\n end_index = (np.abs(time - end)).argmin() # Find closest index where the injection current ends\n current_array[:start_index] = 0\n current_array[end_index:len(current_array)] = 0\n EphysObject = efex.EphysSweepFeatureExtractor(t=time, v=voltage[:, c], i=current_array, start=start, \\\n end=end, filter=fil)\n EphysObject.process_spikes()\n\n # Adding peak_height (mV) + code for maximum frequency determination (see further)\n spike_count = 0\n if EphysObject._spikes_df.size:\n EphysObject._spikes_df['peak_height'] = EphysObject._spikes_df['peak_v'].values - \\\n EphysObject._spikes_df['threshold_v'].values\n spike_count = EphysObject._spikes_df['threshold_i'].values.size\n df = pd.concat([df, EphysObject._spikes_df], sort=True)\n\n # Some easily found extra features\n df_features = EphysObject._sweep_features\n\n # Adding spike count\n df_features.update({'spike_count': spike_count})\n\n # Adding spike frequency adaptation (ratio of spike frequency of second half to first half)\n SFA = np.nan\n half_stim_index = ft.find_time_index(time, np.float(start + (end - start) / 2))\n if spike_count > 5: # We only consider traces with more than 8.333 Hz = 5/600 ms spikes here\n # but in the end we only take the trace with the max amount of spikes\n\n if np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] < half_stim_index) != 0:\n SFA = np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] > half_stim_index) / \\\n np.sum(df.loc[df['threshold_i'] == curr, :]['threshold_index'] < half_stim_index)\n\n df_features.update({'SFA': SFA})\n\n # Adding current (pA)\n df_features.update({'current': curr})\n\n # Adding membrane voltage (mV)\n df_features.update({'resting_membrane_potential': EphysObject._get_baseline_voltage()})\n\n # Adding voltage deflection to steady state (mV)\n voltage_deflection_SS = ft.average_voltage(voltage[:, c], time, start=end - 0.1, end=end)\n # voltage_deflection_v, voltage_deflection_i = EphysObject.voltage_deflection() # = old way: max deflection\n df_features.update({'voltage_deflection': voltage_deflection_SS})\n\n # Adding input resistance (MOhm)\n input_resistance = np.nan\n if not ('peak_i' in EphysObject._spikes_df.keys()) and not curr == 0: # We only calculate input resistances\n # from traces without APs\n input_resistance = (np.abs(voltage_deflection_SS - EphysObject._get_baseline_voltage()) * 1000) / np.abs(\n curr)\n if input_resistance == np.inf:\n input_resistance = np.nan\n df_features.update({'input_resistance': input_resistance})\n\n # Adding membrane time constant (s) and voltage plateau level for hyperpolarisation paradigms\n # after stimulus onset\n tau = np.nan\n E_plat = np.nan\n sag_ratio = np.nan\n if curr < 0: # We use hyperpolarising steps as required in the object function to estimate the\n # membrane time constant and E_plateau\n while True:\n try:\n tau = EphysObject.estimate_time_constant() # Result in seconds!\n break\n except TypeError: # Probably a noisy bump for this trace, just keep it to be np.nan\n break\n E_plat = ft.average_voltage(voltage[:, c], time, start=end - 0.1, end=end)\n sag, sag_ratio = EphysObject.estimate_sag()\n df_features.update({'tau': tau})\n df_features.update({'E_plat': E_plat})\n df_features.update({'sag_ratio': sag_ratio})\n\n # For the rebound and sag time we only are interested in the lowest (-200 pA (usually)) hyperpolarisation trace\n rebound = np.nan\n sag_time = np.nan\n sag_area = np.nan\n\n if c == 0:\n baseline_interval = 0.1 # To calculate the SS voltage\n v_baseline = EphysObject._get_baseline_voltage()\n\n end_index = ft.find_time_index(time, 0.7)\n if np.flatnonzero(voltage[end_index:, c] > v_baseline).size == 0: # So perfectly zero here means\n # it did not reach it\n rebound = 0\n else:\n index_rebound = end_index + np.flatnonzero(voltage[end_index:, c] > v_baseline)[0]\n if not (time[index_rebound] > (end + 0.15)): # We definitely have 150 ms left to calculate the rebound\n rebound = ft.average_voltage(\n voltage[index_rebound:index_rebound + ft.find_time_index(time, 0.15), c], \\\n time[index_rebound:index_rebound + ft.find_time_index(time, 0.15)]) - v_baseline\n else: # Work with whatever time is left\n if time[-1] == time[index_rebound]:\n rebound = 0\n else:\n rebound = ft.average_voltage(voltage[index_rebound:, c], \\\n time[index_rebound:]) - v_baseline\n\n v_peak, peak_index = EphysObject.voltage_deflection(\"min\")\n v_steady = ft.average_voltage(voltage[:, c], time, start=end - baseline_interval, end=end)\n\n if v_steady - v_peak < 4: # The sag should have a minimum depth of 4 mV\n # otherwise we set sag time and sag area to 0\n sag_time = 0\n sag_area = 0\n else:\n # First time SS is reached after stimulus onset\n first_index = start_index + np.flatnonzero(voltage[start_index:peak_index, c] < v_steady)[0]\n # First time SS is reached after the max voltage deflection downwards in the sag\n if np.flatnonzero(voltage[peak_index:end_index, c] > v_steady).size == 0:\n second_index = end_index\n else:\n second_index = peak_index + np.flatnonzero(voltage[peak_index:end_index, c] > v_steady)[0]\n sag_time = time[second_index] - time[first_index]\n sag_area = -integrate.cumtrapz(voltage[first_index:second_index, c], time[first_index:second_index])[-1]\n\n burst_metric = np.nan\n # print(c)\n if spike_count > 5:\n burst = EphysObject._process_bursts()\n if len(burst) != 0:\n burst_metric = burst[0][0]\n\n df_features.update({'rebound': rebound})\n df_features.update({'sag_time': sag_time})\n df_features.update({'sag_area': sag_area})\n df_features.update({'burstiness': burst_metric})\n\n df_related_features = pd.concat([df_related_features, pd.DataFrame([df_features])], sort=True)\n\n return df, df_related_features", "def getSkewXKeyTimes(self, view) -> list[float]:\n ...", "def wave_samples(self):\n return self._quantized_subsamples", "def __iter__(self):\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0\n\n while True:\n\n # Randomizing wav lists\n random.shuffle(self._lst_spk_files)\n random.shuffle(self._lst_noise_files)\n\n for spk_file, noise_file in zip(self._lst_spk_files, self._lst_noise_files):\n\n # Read wav files\n sig_spk, rate = self.__read_wav_file(spk_file)\n sig_noise, _ = self.__read_wav_file(noise_file)\n\n # Skip silence file\n if np.mean(sig_spk ** 2) < self.energy_silence_threshold or \\\n np.mean(sig_noise ** 2) < self.energy_silence_threshold:\n continue\n\n # Apply reverberations\n if self._enable_rir:\n rev_prob = np.random.uniform(0, 1) < self._rir_prob\n if rev_prob:\n filter_num = random.randint(0, self._rir_filters_num - 1)\n\n filter_sp_name = self.RIR_PREF + str(filter_num) + self.RIR_SP_SUF\n filter_n_name = self.RIR_PREF + str(filter_num) + self.RIR_N_SUF\n\n sig_spk = reverb_util.reverb_matlab(sig_spk, rate, os.path.join(self._rir_dir, filter_sp_name))\n sig_noise = reverb_util.reverb_matlab(sig_noise, rate, os.path.join(self._rir_dir, filter_n_name))\n\n # Align signal\n min_length = min(sig_spk.shape[0], sig_noise.shape[0])\n spk_length = sig_spk.shape[0]\n noise_length = sig_noise.shape[0]\n\n if min_length < self._fftsize:\n raise Exception(\"ERROR: Too short signals in dataset\")\n\n if spk_length > min_length:\n start_ind = random.randint(0, spk_length - min_length)\n sig_spk = sig_spk[start_ind:start_ind + min_length]\n elif noise_length > min_length:\n start_ind = random.randint(0, noise_length - min_length)\n sig_noise = sig_noise[start_ind:start_ind + min_length]\n\n # Generate need SNR\n need_snr = random.uniform(self._min_snr, self._max_snr)\n\n # Calc scaled signals\n sig_spk, sig_noise = self.__mix_with_snr(sig_spk, sig_noise, need_snr)\n\n # Normalization\n norm_const = np.max([np.max(np.abs(sig_spk)), np.max(np.abs(sig_noise))])\n sig_spk /= norm_const\n sig_noise /= norm_const\n\n # Calc STFT\n stft_spk = stft(sig_spk, fftsize=self._fftsize, overlap=self._overlap)\n stft_noise = stft(sig_noise, fftsize=self._fftsize, overlap=self._overlap)\n stft_mix = stft_spk + stft_noise\n\n # Skip small segments\n frames, bin = stft_mix.shape\n if frames <= self._context_size:\n continue\n\n # Collect batch\n i = 0\n while i + self._context_size < frames:\n\n batch_sp.append(stft_spk[i:i + self._context_size, :])\n batch_noise.append(stft_noise[i:i + self._context_size, :])\n batch_mix.append(stft_mix[i:i + self._context_size, :])\n\n i += self._context_size // 2\n batch_count += 1\n\n if batch_count == self._batch_size:\n sp = np.array(batch_sp).reshape((self._batch_size,\n self._context_size, -1))\n noise = np.array(batch_noise).reshape((self._batch_size,\n self._context_size, -1))\n mix = np.array(batch_mix).reshape((self._batch_size,\n self._context_size, -1))\n yield sp, noise, mix\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0", "def __iter__(self):\n\n self.__counter = 0\n return iter(self.__fasteners)", "def _show_traces(self, fovsubset, time_vec, fps, gs, gs_rows, gs_cols):\n detected_spikes = locate_spikes_peakutils(fovsubset.dff, fps=fps)\n ax = plt.subplot(gs[gs_rows, gs_cols])\n if time_vec.shape[0] != fovsubset.dff.shape[1]:\n dff = np.zeros((fovsubset.dff.shape[0], time_vec.shape[0]))\n spikes = np.zeros((detected_spikes.shape[0], time_vec.shape[0]))\n spikes[:, :detected_spikes.shape[1]] = detected_spikes\n dff[:, :fovsubset.dff.shape[1]] = fovsubset.dff\n else:\n dff = fovsubset.dff\n spikes = detected_spikes\n scatter_spikes(dff, spikes, downsample_display=1, time_vec=time_vec, ax=ax)", "def instrumented(self,) -> Iterable[Tuple[JSONPath, ComponentView]]:\n\n for q, c in instrumented_component_views(self.dict()):\n # Add the chain indicator so the resulting paths can be specified\n # for feedback selectors.\n q = JSONPath(\n path=(GetItemOrAttribute(item_or_attribute=\"__app__\"),) + q.path\n )\n yield q, c", "def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)", "def show_waveform(self, peaks=[]):\n if peaks is None:\n peaks = []\n data = self.amplitude\n x_axis = range(0, len(data))\n x_axis = [x / self.fs for x in x_axis]\n plt.plot(x_axis, data)\n plt.axhline(self.height)\n for p in peaks:\n plt.axvline(p / self.fs, color=\"red\", alpha=0.2)\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (seconds)\")\n plt.title(\"Waveform\")\n plt.show()", "def iter_waters(self):\n fpred = lambda f: f.is_water()\n return itertools.ifilter(fpred, self.iter_fragments())", "def getTransformKeyTimes(self, view) -> list[float]:\n ...", "def draw_samplers(self):\n raise NotImplementedError(\" The draw_samplers() method has not been implemented \")", "def __call__(self, waveforms, telid, selected_gain_channel):", "def spectators(self):\n return self._return_if('_spectators')", "def analyze_wfs(self, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001, compact=True):\n\n print(\"---------------------------------\")\n print(\"Analyzing waveforms to get maxima\")\n print(\"---------------------------------\")\n\n # Creo una progress bar per rendere piu' fruibile visivamente il programma\n bar = progressbar.ProgressBar(maxval=self.number_of_events,\n widgets=[progressbar.Bar(\"=\", \"[\", \"]\"), \" \", progressbar.Percentage()])\n bar.start()\n counter = 0\n # Ora faccio un loop sugli eventi..\n if compact:\n for event in range(0, len(self.table_sipm_time['ev']), 9):\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_dataframe = self.analyze_ev_wf_compact(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat(\n [self.wf_peaks, peaks_dataframe], ignore_index=True)\n bar.update(counter+1)\n counter += 9\n else:\n for event in self.table_sipm_time['ev']:\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_time, peaks_ampl = self.analyze_ev_wf(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat([self.wf_peaks, pd.DataFrame(\n {'t': peaks_time, 'A': peaks_ampl})], ignore_index=True)\n bar.update(counter+1)\n counter += 1\n\n bar.finish()\n print(\"Events: \"+str(len(self.table_sipm_time['ev'])))\n print(\"---------------------------------\")\n print(\"Waveform analysis completed!\")\n # Devo ora ricavare di nuovo i Dt dai tempi assoluti, utilizzando la funzione diff()..\n self.wf_peaks['dt'] = self.wf_peaks['t'].diff()\n # ..e scartando il primo valore (che non ha un Dt)\n self.wf_peaks = self.wf_peaks.iloc[1:]\n print('Found {:d} peaks in waveforms\\n'.format(len(self.wf_peaks)))", "def enemy_waves(self):\n\n pass", "def per_shoebox_whitelist_iterator(self, sidx):\n Z = self.refl_table\n SOFF = Z[\"spots_offset\"]\n SSIZ = Z[\"spots_size\"]\n slow_size = 254\n panel_size = 254 * 254\n for idxpx in self.spots_pixels[SOFF[sidx]:SOFF[sidx]+SSIZ[sidx]]:\n ipanel = idxpx//panel_size; panelpx = idxpx%panel_size\n islow = panelpx//slow_size; ifast = panelpx%slow_size\n yield ipanel, islow, ifast", "def plot_waveforms(cutouts, fs, pre, post, n=100, color='k', show=True):\n if n is None:\n n = cutouts.shape[0]\n n = min(n, cutouts.shape[0])\n time_in_us = np.arange(-pre*1000, post*1000, 1e3/fs)\n if show:\n _ = plt.figure(figsize=(10,6))\n\n for i in range(n):\n _ = plt.plot(time_in_us, cutouts[i,]*1e6, color, linewidth=1, alpha=0.3)\n _ = plt.xlabel('Time (ms)')\n _ = plt.ylabel('Voltage (mV)')\n _ = plt.title('Spike Waveforms')\n\n if show:\n plt.show()", "def ReceiveSpike(self, w):\n self.weighted_incoming_spikes += w", "def __iter__(self):\n for featuredict in self._data[\"features\"]:\n yield Stop(featuredict)", "def dumpRecording(self, files):\n for tone, f in zip(self.tones, files):\n tone.dump_to_file(f)", "def _get_pulse_shaping_waveform(self):\n self.pulse_shaping_list = []\n # Make the rise time be 3.3333% if the dot time.\n rise_time_in_msec = 0.03333333333333 * self.dot_time_in_msec\n # Limit the rise time to 2 milliseconds.\n if rise_time_in_msec > 0.002:\n rise_time_in_msec = 0.002\n rising_falling_count = int(rise_time_in_msec * self.sample_rate)\n step = math.pi / rising_falling_count\n # The first value is zero, so skip that value.\n # The last value is 1.0, so skip that value too.\n for i in range(1, rising_falling_count - 1):\n gain = 0.5 * (1.0 - math.cos(step * i))\n self.pulse_shaping_list.append(gain)", "def next_wave(self):\n if self._wave == self._level.get_max_wave():\n return\n\n self._wave += 1\n\n #Task 1.3 (Status Bar): Update the current wave display here\n self._status_bar.set_wave(self._wave)\n\n #Task 1.5 (Play Controls): Disable the add wave button here (if this is the last wave)\n if self._wave == 20:\n self._wave_button.config(state=tk.DISABLED)\n\n #Generate wave and enqueue\n wave = self._level.get_wave(self._wave, self._game)\n for step, enemy in wave:\n enemy.set_cell_size(self._game.grid.cell_size)\n\n self._game.queue_wave(wave)", "def stop(self) -> None:\n with self._samplers_lock:\n for sampler in self._samplers.values():\n sampler.stop()", "def test_plot_wiggle_traces(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n splt = SEGYPlotter(ax, self.segy)\n # should add a single artist to the active line dict.\n splt.plot_wiggles(wiggle_traces=True)\n self.assertEqual(len(splt.ACTIVE_LINES['wiggle_traces']),1)\n # same artist should be in the axes list\n self.assertTrue(splt.ACTIVE_LINES['wiggle_traces'][0] in \\\n splt.ax.lines)\n # should be able to directly remove it\n splt.ax.lines.remove(splt.ACTIVE_LINES['wiggle_traces'][0])\n self.assertEqual(len(splt.ax.lines), 0)\n # and re-add it\n splt.ax.lines.append(splt.ACTIVE_LINES['wiggle_traces'][0])\n self.assertEqual(len(splt.ax.lines), 1)\n # should move artist to the inactive line dict.\n splt.plot_wiggles(wiggle_traces=False)\n self.assertEqual(len(splt.INACTIVE_LINES['wiggle_traces']),1)\n self.assertEqual(len(splt.ax.lines), 0)\n # calling again should change nothing\n splt.plot_wiggles(wiggle_traces=False)\n self.assertEqual(len(splt.INACTIVE_LINES['wiggle_traces']),1)\n self.assertEqual(len(splt.ax.lines), 0)\n # should move artist back to active line dict and re-add to axes\n splt.plot_wiggles(wiggle_traces=True)\n self.assertEqual(len(splt.ACTIVE_LINES['wiggle_traces']),1)\n self.assertEqual(len(splt.ax.lines), 1)\n # new plot items should be accessible through the orig. axes\n self.assertEqual(len(splt.ax.lines),\n len(ax.lines))", "def plot_wavefunctions(self, num_levels=10):\n for ind, psi in enumerate(self.psis(num_levels)):\n plot(self.x, psi * sign(psi[1] - psi[0]), label=\"$\\psi_%d$\" % ind)", "def replace_waveforms(self, waveform_dict):\n for waveform, value in waveform_dict.items():\n\n if waveform not in self.unresolved_placeholders:\n self.hd.log.error(f\"Placeholder {waveform} not found in sequence.\")\n\n waveform_wrapped = f\"{self.marker_string}{waveform}{self.marker_string}\"\n waveform_vector = 'vect(' + ','.join([str(x) for x in value]) + ')'\n self.sequence = self.sequence.replace(f\"{waveform_wrapped}\", str(waveform_vector))\n self.unresolved_placeholders.discard(waveform)", "def __call__(self, stage, itr):\n if not self.logWeights:\n weightNames = list(stage.logWeights.keys())\n getLogger(__name__).info(\"Start collecting log weights. \"\n \"Found %s at trajectory %d.\", weightNames, itr)\n self.logWeights = {name: [] for name in weightNames}\n for key, val in stage.logWeights.items():\n self.logWeights[key].append(complex(val))", "def plot_single_spk(spk,subplot = None, spines = ['left', 'bottom'],\n figure_size = (5,5),dpi=600,**kwargs):\n if subplot == None: \n # Creating the figure \n f = plt.figure(figsize=figure_size,dpi=dpi)\n # creating the axes\n ax = f.add_subplot(111)\n else:\n ax = subplot\n #ax.plot(range(-20,44),spk.waveform,**kwargs)\n time_vec = np.linspace(spk.time_edge[0],spk.time_edge[1],spk.waveform.shape[0],endpoint=True)*1000\n ax.plot(time_vec,spk.waveform,**kwargs)\n plt.xlabel('Time (ms)')\n adjust_spines(ax, spines)", "def plot(self) -> List[matplotlib.figure.Figure]:\n figs = []\n # Figure 1: Position\n fig = self.plot_kine_var(1, self.trial_name, ('X (mm)', 'Y (mm)', 'Z (mm)'), self.prev_filled[0],\n self.smoothed[0], self.filled[0], self.sfs[0])\n figs.append(fig)\n\n # Figure 2: Orientation\n fig = self.plot_kine_var(2, self.trial_name, ('Flex/Ext (deg)', 'Lat Flex (deg)', 'Axial (deg)'),\n self.prev_filled[1], self.smoothed[1], self.filled[1], self.sfs[1])\n figs.append(fig)\n\n return figs", "def _traces(self, *args, **kwargs):\r\n # initialize traces with a draw from the prior\r\n old_model_trace = poutine.trace(self.model)(*args, **kwargs)\r\n traces = []\r\n t = 0\r\n i = 0\r\n while t < self.burn + self.lag * self.samples:\r\n i += 1\r\n # q(z' | z)\r\n new_guide_trace = poutine.block(\r\n poutine.trace(self.model))(old_model_trace, *args, **kwargs)\r\n # p(x, z')\r\n new_model_trace = poutine.trace(\r\n poutine.replay(self.model, new_guide_trace))(*args, **kwargs)\r\n # q(z | z')\r\n old_guide_trace = poutine.block(\r\n poutine.trace(\r\n poutine.replay(self.model, old_model_trace)))(new_model_trace,\r\n *args, **kwargs)\r\n # p(x, z') q(z' | z) / p(x, z) q(z | z')\r\n logr = new_model_trace.log_pdf() + new_guide_trace.log_pdf() - \\\r\n old_model_trace.log_pdf() - old_guide_trace.log_pdf()\r\n rnd = pyro.sample(\"mh_step_{}\".format(i),\r\n Uniform(torch.zeros(1), torch.ones(1)))\r\n\r\n if torch.log(rnd).data[0] < logr.data[0]:\r\n # accept\r\n t += 1\r\n old_model_trace = new_model_trace\r\n if t <= self.burn or (t > self.burn and t % self.lag == 0):\r\n yield (new_model_trace, new_model_trace.log_pdf())", "def plot(self):\n fig, ax = plt.subplots()\n for i in range(len(self.lsys.elements)):\n element = self.lsys.elements[i]\n # Draw Lens Element\n # Aperture\n if element.is_stop:\n line_x = [element.z, element.z]\n line_y = [element.aperture_radius,\n 1.2*element.aperture_radius]\n ax.plot(line_x, line_y, c=\"blue\")\n line_y = [-element.aperture_radius, -\n 1.2*element.aperture_radius]\n ax.plot(line_x, line_y, c=\"blue\")\n # Spherical Lens\n else:\n z = element.z\n r = element.curvature_radius\n h = element.aperture_radius\n theta = abs(np.degrees(np.arcsin(h / r)))\n angle = 180 if r > 0 else 0\n arc = patches.Arc((z + r, 0), 2*abs(r), 2*abs(r),\n angle=angle, theta1=-theta, theta2=theta)\n ax.add_patch(arc)\n\n # Draw Lens Box\n if i > 0:\n element_prev = self.lsys.elements[i - 1]\n\n # current or previous element is aperture radius\n if element.is_stop or element_prev.is_stop:\n continue\n\n # previous element is air\n if element_prev.ior(0.550) == 1:\n continue\n\n z = element.z\n r = element.curvature_radius\n h = element.aperture_radius\n l = r - \\\n np.sqrt(r**2 - h**2) if r > 0 else -(np.abs(r) -\n np.sqrt(r**2 - h**2))\n zp = element_prev.z\n rp = element_prev.curvature_radius\n hp = element_prev.aperture_radius\n lp = rp - \\\n np.sqrt(rp**2 - hp**2) if rp > 0 else -(np.abs(rp) -\n np.sqrt(rp**2 - hp**2))\n\n if h > hp:\n ax.plot([zp + lp, z + l], [h, h], c=\"black\")\n ax.plot([zp + lp, z + l], [-h, -h], c=\"black\")\n ax.plot([zp + lp, zp + lp], [hp, h], c=\"black\")\n ax.plot([zp + lp, zp + lp], [-hp, -h], c=\"black\")\n else:\n ax.plot([zp + lp, z + l], [hp, hp], c=\"black\")\n ax.plot([zp + lp, z + l], [-hp, -hp], c=\"black\")\n ax.plot([z + l, z + l], [h, hp], c=\"black\")\n ax.plot([z + l, z + l], [-h, -hp], c=\"black\")\n\n # figure width, height\n z_list = [element.z for element in self.lsys.elements]\n length = max(z_list) - min(z_list)\n max_aperture_radius = max([\n element.aperture_radius for element in self.lsys.elements])\n ax.set_xlim([min(z_list) - 0.3*length,\n self.image_focal_z() + 0.3*length])\n ax.set_ylim([-1.1 * max_aperture_radius, 1.1*max_aperture_radius])\n ax.set_aspect('equal')\n ax.grid('on')\n plt.xlabel('$z \\mathrm{[mm]}$')\n plt.ylabel('$y \\mathrm{[mm]}$')\n\n return ax", "def step(self):\n if self.iteration % self.print_interval == 0:\n print('Iteration: {:} (Time: {:.4f})'.format(self.iteration, time.time() - self.clock))\n self.clock = time.time()\n\n #if self.iteration % self.save_interval == 0:\n # print('Saving network to {:}'.format(self.save_dir))\n # self.network.save(self.save_dir)\n\n # Choose action based on output neuron spiking.\n # need inserting to spike_record\n a = self.action_function(self, output=self.output)\n # convert number into action_name\n self.action_name = self.env.subject.action_list[a]\n\n # Run a step of the environment.\n events, self.reward, self.done, info = self.env.step(action=self.action_name)\n\n # reward accumulation\n self.accumulated_reward += self.reward\n\n # currently image-based learning is adopted (Future work : spike-based)\n events_img = events_to_image(events, self.env.render_width, self.env.render_height)\n self.obs = torch.from_numpy(cv2.cvtColor(events_img, cv2.COLOR_BGR2GRAY)).float()/255.0\n\n # Encode the observation using given encoding function.\n for inpt in self.encoded:\n self.encoded[inpt] = self.encoding(self.obs, time=self.time, dt=self.network.dt)\n\n # Run the network on the spike train-encoded inputs.\n self.network.run(inpts=self.encoded, time=self.time, reward=self.reward)\n self.set_spike_data() # insert into spike_record\n\n # Plot relevant data.\n if self.iteration % self.plot_interval == 0:\n self.plot_data()\n self.plot_obs()\n\n self.iteration += 1\n\n if self.done:\n self.iteration = 0\n self.episode += 1\n self.reward_list.append(self.accumulated_reward)\n self.accumulated_reward = 0\n self.plot_reward()", "def skybass_sampling_rates(data):\n for i in range(4):\n fig = plt.figure()\n TODO: finish", "def samples(self):\n return np.concatenate([wf.samples for wf in self._waveforms])", "def take_spectra(self, datagroup):\n N = 0\n self.taking_spectra = True\n try:\n while N < self.number_of_spectra and self.taking_spectra: \n if N!=0: # starting data collection immediately\n time.sleep(self.time_interval_seconds) \n ds = datagroup.create_dataset(\"spectrum_%d\", \n data=self.spectrometer.read_spectrum(bundle_metadata=True),\n attrs=self.spectrometer.get_metadata(),\n )\n ds.attrs.create(\"time_interval\", self.time_interval_seconds)\n ds.attrs.create(\"information\", self.info_string)\n datagroup.file.flush()\n N += 1\n print \"Spectra %d of %d recorded\" % (N,self.number_of_spectra)\n print \"Done!\\n\"\n finally:\n self.taking_spectra = False", "def _exp_splayer(ss) :\n\texp_list = ss.experiments\n\tfor exp in exp_list : \n\t\tss_copy = copy.copy(ss) # create a copy of original ss\n\t\tss_copy.experiments = [exp]\n\t\tyield ss_copy", "def analyze_ev_wf_compact(self, event, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001):\n\n fig, ax = plt.subplots(nrows=3, ncols=3)\n peaks_temp = pd.DataFrame()\n\n for i in range(0, 9):\n if event < len(self.table_sipm_time):\n # Creo un np.array con gli indici della singola waveform..\n wf_idx = [event*self.points_per_wf, event *\n self.points_per_wf+self.points_per_wf]\n # ..i tempi di ciascun punto..\n wf_time = self.table_sipm_time['t'].iloc[event] + \\\n self.table_sipm_wf['TIME'][int(wf_idx[0]):int(wf_idx[1])]\n # ..e i valori del segnale di ciascun ppunto\n wf_ch = - \\\n self.table_sipm_wf['CH1'][int(wf_idx[0]):int(wf_idx[1])]\n\n # Per trovare la baseline, faccio un fit polinomiale di grado 0..\n # ..su un numero finito di punti iniziali, specificato dall'utente..\n # ..poi la salvo internamente alla classe\n self.baseline = np.polyfit(\n wf_time[0:n_bsl], wf_ch[0:n_bsl], 0)[0]\n # Voglio anche disegnarla sui plot, quindi mi creo una lista di x e di y..\n # ..nello spazio della waveform\n bsl_time = wf_time[0:n_bsl]\n bsl_ch = [self.baseline] * n_bsl\n\n # Per trovre i picchi, uso la funzione find_peaks di scipy.signal\n # I valori di height e prominence sono specificati dall'utente..\n # ..e scalti per selezionare tutti i picchi senza prendere rumore\n peaks, _ = sp.find_peaks(\n wf_ch, height=peak_height, prominence=peak_prominences)\n\n # Ora posso plottare tutto:\n plt.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))\n # la waveform..\n ax[int(i / 3)][i % 3].plot(wf_time,\n wf_ch, linestyle='-', linewidth=1)\n # ..la baseline..\n ax[int(i / 3)][i % 3].plot(bsl_time, bsl_ch, linestyle='-',\n linewidth=1, c='darkgreen')\n # ..e i picchi (se ci sono)\n if len(peaks) > 0:\n ax[int(i / 3)][i % 3].scatter(wf_time.iloc[peaks],\n wf_ch.iloc[peaks], c='darkred')\n\n # Set common labels\n fig.text(0.5, 0.01, 'Time (s)', ha='center', va='center')\n fig.text(0.02, 0.5, 'Amplitude (V)', ha='center', va='center', rotation='vertical')\n \n \n # plt.show()\n peaks_temp = pd.concat([peaks_temp, pd.DataFrame(\n {'t': wf_time.iloc[peaks], 'A': wf_ch.iloc[peaks]-self.baseline})], ignore_index=True)\n event += 1\n\n # ..e salvo il plot in una cartella a parte\n folder_name = 'plot'\n plot_name = '{0}/{1}_ev{2}.png'.format(\n folder_name, pic_name, event)\n fig.savefig(plot_name)\n plt.close(fig)\n\n # La funzione restituisce i valori di tempo e ampiezza (ottenuta come Ch1-baseline)..\n # ..agli indici dei massimi trovati da find_peaks\n return peaks_temp", "def go_to_next_spike(self, ):\n self._jump_to_spike(+1)", "def SinewavTest(self):\n self.ui.CallJSFunction('testInProgress', None)\n\n duration = self._current_test_args.get('duration',\n _DEFAULT_SINEWAV_TEST_DURATION)\n wav_duration = duration + _DEFAULT_SINEWAV_DURATION_MARGIN\n input_channels = self._current_test_args.get('input_channels',\n self._in_channel_map)\n output_channels = self._current_test_args.get(\n 'output_channels', _DEFAULT_TEST_OUTPUT_CHANNELS)\n\n for output_channel in output_channels:\n volume = self._output_volumes[self._output_volume_index]\n record_file_path = (\n f'/tmp/record-{volume}-{output_channel}-{time.time()}.raw')\n with self._dut.temp.TempFile() as dut_sine_wav_path:\n session.console.info('DUT sine wav path %s', dut_sine_wav_path)\n # It's hard to estimate the overhead in audio record thing of different\n # platform, To make sure we can record the whole sine tone in the record\n # duration, we will playback a long period sine tone, and stop the\n # playback process after we finish recording.\n self.GenerateSinewav(dut_sine_wav_path, output_channel, wav_duration)\n self._dut.audio.PlaybackWavFile(dut_sine_wav_path, self._out_card,\n self._out_device, blocking=False)\n self.RecordAndCheck(duration, input_channels, record_file_path)\n self._dut.audio.StopPlaybackWavFile()", "def test_spike_realdata(self):\n suspect_threshold = 0.5\n fail_threshold = 1\n\n arr = [-0.189, -0.0792, -0.0122, 0.0457, 0.0671, 0.0213, -0.0488, -0.1463, -0.2438, -0.3261, -0.3871, -0.4054,\n -0.3932, -0.3383, -0.2804, -0.2347, -0.2134, -0.2347, -0.2926, -0.3597, -0.442, -0.509, 0, -0.5944,\n -0.57, -0.4267, -0.2926, -0.1585, -0.0945, -0.0762]\n\n expected = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1]\n\n inputs = [\n arr,\n np.asarray(arr, dtype=np.floating),\n dask_arr(np.asarray(arr, dtype=np.floating))\n ]\n for i in inputs:\n npt.assert_array_equal(\n qartod.spike_test(\n inp=i,\n suspect_threshold=suspect_threshold,\n fail_threshold=fail_threshold\n ),\n expected\n )", "def test_correct_spiking(self):\n n = 10\n t_max = 25.0\n dt = 0.2\n p = 0.05\n\n # some reproducible arbitrariness\n np.random.seed(622312)\n n_steps = int_r(t_max/dt)\n table = np.random.rand(n_steps, n) < p\n\n G = TableSpikers(n)\n G.spike_table = copy.copy(table)\n\n class SimpleMonitor(object):\n def __init__(self, target):\n self.target = target;\n self.results = []\n self.order = 1\n\n def evolve(self, t, dt):\n idxs = self.target.spike.nonzero()[0]\n self.results.extend([(int_r(t/dt), i) for i in idxs])\n\n M = SimpleMonitor(G)\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n expected = zip(*table.nonzero())\n\n self.assertSequenceEqual(expected, M.results)", "def get_frames(self, indices=None):\n if indices is None:\n return self._spikestimes\n raise self._spikestimes[indices]", "def set_data(self, waveforms, clusters=None, cluster_colors=None,\n clusters_unique=None, clusters_ordered=None,\n masks=None, geometrical_positions=None, spike_ids=None,\n spatial_arrangement=None, superposition=None,\n box_size=None, probe_scale=None, subselect=None):\n \n # select only a subsample of the spikes\n if subselect:\n nspk = waveforms.shape[0]\n if nspk > 0:\n indices = np.unique(np.random.randint(low=0, high=nspk, size=subselect))\n # waveforms = waveforms[indices,...]\n waveforms = np.take(waveforms, indices, axis=0)\n # spike_ids = spike_ids[indices,...]\n spike_ids = np.take(spike_ids, indices, axis=0)\n # clusters = clusters[indices,...]\n clusters = np.take(clusters, indices, axis=0)\n # masks = masks[indices,...]\n masks = np.take(masks, indices, axis=0)\n \n \n self.nspikes, self.nsamples, self.nchannels = waveforms.shape\n self.npoints = waveforms.size\n self.geometrical_positions = geometrical_positions\n self.spike_ids = spike_ids\n self.waveforms = waveforms\n \n # data organizer: reorder data according to clusters\n self.data_organizer = SpikeDataOrganizer(waveforms,\n clusters=clusters,\n cluster_colors=cluster_colors,\n clusters_unique=clusters_unique,\n clusters_ordered=clusters_ordered,\n masks=masks,\n nchannels=self.nchannels,\n spike_ids=spike_ids)\n \n # get reordered data\n self.waveforms_reordered = self.data_organizer.data_reordered\n self.nclusters = self.data_organizer.nclusters\n self.clusters = self.data_organizer.clusters\n self.masks = self.data_organizer.masks\n self.cluster_colors = self.data_organizer.cluster_colors\n self.clusters_unique = self.data_organizer.clusters_unique\n self.clusters_rel = self.data_organizer.clusters_rel\n self.clusters_depth = self.data_organizer.clusters_depth\n self.cluster_sizes = self.data_organizer.cluster_sizes\n self.cluster_sizes_dict = self.data_organizer.cluster_sizes_dict\n \n # prepare GPU data: waveform initial positions and colors\n data = self.prepare_waveform_data()\n \n # masks\n self.full_masks = np.repeat(self.masks.T.ravel(), self.nsamples)\n self.full_clusters = np.tile(np.repeat(self.clusters_rel, self.nsamples), self.nchannels)\n self.full_clusters_depth = np.tile(np.repeat(self.clusters_depth, self.nsamples), self.nchannels)\n self.full_channels = np.repeat(np.arange(self.nchannels, dtype=np.int32), self.nspikes * self.nsamples)\n \n # normalization in dataio instead\n self.normalized_data = data\n \n # position waveforms\n self.position_manager.set_info(self.nchannels, self.nclusters, \n geometrical_positions=self.geometrical_positions,\n spatial_arrangement=spatial_arrangement,\n superposition=superposition,\n box_size=box_size,\n probe_scale=probe_scale)\n \n # update the highlight manager\n self.highlight_manager.initialize()", "def run(self, stim, seed):\n self.reset()\n\n # Generate 2 new seeds for the SGC spike generator and for the NEURON simulation\n rs = np.random.RandomState()\n rs.seed(self.seed ^ seed)\n seed1, seed2 = rs.randint(0, 2 ** 32, 2)\n random_seed.set_seed(seed1)\n self.sgc.set_seed(seed2)\n\n self.sgc.set_sound_stim(stim, parallel=False)\n\n # set up recording vectors\n for pop in self.bushy, self.dstellate, self.tstellate, self.tuberculoventral:\n for ind in pop.real_cells():\n cell = pop.get_cell(ind)\n self[cell] = cell.soma(0.5)._ref_v\n self[\"t\"] = h._ref_t\n\n h.tstop = stim.duration * 1000\n h.celsius = self.temp\n h.dt = self.dt\n\n self.custom_init()\n last_update = time.time()\n while h.t < h.tstop:\n h.fadvance()\n now = time.time()\n if now - last_update > 1.0:\n print(\"%0.2f / %0.2f\" % (h.t, h.tstop))\n last_update = now\n\n # record vsoma and spike times for all cells\n vec = {}\n for k in self._vectors:\n v = self[k].copy()\n if k == \"t\":\n vec[k] = v\n continue\n spike_inds = np.argwhere((v[1:] > -20) & (v[:-1] <= -20))[:, 0]\n spikes = self[\"t\"][spike_inds]\n pop = k.celltype\n # print('pop: ', pop)\n assert isinstance(pop, str)\n cell_ind = getattr(self, pop).get_cell_index(k)\n vec[(pop, cell_ind)] = [v, spikes]\n\n # record SGC spike trains\n for ind in self.sgc.real_cells():\n cell = self.sgc.get_cell(ind)\n vec[(\"sgc\", ind)] = [None, cell._spiketrain]\n\n return vec", "def clock_skews(self):\r\n clock_skews = {}\r\n for address, probe in self.__probes.items():\r\n clock_skews[address] = probe.get_clock_skew()\r\n return clock_skews" ]
[ "0.60995203", "0.60207593", "0.58172476", "0.56029576", "0.5505114", "0.5358715", "0.5346913", "0.53436625", "0.5329249", "0.5252956", "0.52186084", "0.5192006", "0.5182989", "0.5122417", "0.5108391", "0.50970674", "0.5088726", "0.5059595", "0.5016616", "0.5011529", "0.50094086", "0.5001465", "0.49915072", "0.4984667", "0.49736238", "0.49505764", "0.4940775", "0.49297512", "0.49246913", "0.49090725", "0.49039802", "0.4901486", "0.4895905", "0.4887303", "0.4884787", "0.4873318", "0.48714927", "0.48636082", "0.48554605", "0.48522592", "0.48513788", "0.48511058", "0.48506463", "0.48255712", "0.48236924", "0.4816639", "0.4807758", "0.48057663", "0.48034015", "0.47893384", "0.47883865", "0.47837856", "0.4772986", "0.47690275", "0.47672224", "0.47601917", "0.4758438", "0.4758097", "0.47570202", "0.47559243", "0.47535387", "0.47415382", "0.47377676", "0.47288853", "0.47202694", "0.47160673", "0.47063962", "0.46868166", "0.46866572", "0.468332", "0.4677301", "0.46742192", "0.4664898", "0.4662729", "0.46606514", "0.46588784", "0.4642673", "0.46414018", "0.4637836", "0.46343577", "0.46302044", "0.46260554", "0.4624849", "0.46224236", "0.46154362", "0.46024778", "0.4594949", "0.4593196", "0.45922884", "0.45878056", "0.45872745", "0.45850304", "0.45839423", "0.4583775", "0.45797387", "0.45755774", "0.45668703", "0.45575342", "0.455635", "0.45546103" ]
0.71562934
0
Display the traces and spikes in a given interval.
def set_interval(self, interval=None): if interval is None: interval = self._interval interval = self._restrict_interval(interval) if interval != self._interval: logger.log(5, "Redraw the entire trace view.") self._interval = interval emit('is_busy', self, True) self.plot(update_traces=True, update_waveforms=True) emit('is_busy', self, False) emit('time_range_selected', self, interval) self.update_status() else: self.plot(update_traces=False, update_waveforms=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self, fig=None):\n i = 0\n # for t = 0:obj.step_size:obj.duration\n # TODO: make a generator?\n iterator = np.linspace(0, self.duration(), num=math.ceil(self.duration() / self.step_precision) + 1)\n tfInterp_l = np.zeros((4, 4, len(iterator)))\n tfInterp_r = np.zeros((4, 4, len(iterator)))\n for t in iterator:\n [lfp, rfp] = self.footPosition(t)\n tfInterp_l[:, :, i] = lfp\n tfInterp_r[:, :, i] = rfp\n i = i + 1\n\n self.show_tf(fig, tfInterp_l, len(iterator))\n self.show_tf(fig, tfInterp_r, len(iterator))", "def plot_fitter(self):\n\n total_time=self.interval*self.maxspectra\n times = np.linspace(self.interval,total_time + 1,self.interval)\n spectra_fitter.main(self.rt_plot.sum_data, times)", "def show_events(ticks):\n for tick in i_prune_ticks(ticks):\n print(tick)", "def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')", "def display_sweep(self):\n for sweep in range(len(self.sweep_points)):\n lidar_x_coordinate=self.flight_points[sweep][0]\n lidar_y_coordinate=self.flight_points[sweep][1]\n\n xx=[]\n yy=[]\n for point in range(len(self.sweep_points[sweep])):\n angle_degree=self.sweep_points[sweep][point][0]\n distance=self.sweep_points[sweep][point][1]\n angle_redian = (math.pi * angle_degree) / 180.0\n sweep_point_x=lidar_x_coordinate+ (distance * math.cos(angle_redian))/1000.0\n sweep_point_y=lidar_y_coordinate+ (distance * math.sin(angle_redian))/1000.0\n xx.append(sweep_point_x)\n yy.append(sweep_point_y)\n\n self.plot_sweep(xx,yy,sweep)", "def plotTrace(trace):\n for t in trace:\n plt.plot(range(len(t)),t,alpha=0.5)\n plt.ylabel(\"Trace\")\n plt.xlabel(\"Step\")\n\n return", "def _show_traces(self, fovsubset, time_vec, fps, gs, gs_rows, gs_cols):\n detected_spikes = locate_spikes_peakutils(fovsubset.dff, fps=fps)\n ax = plt.subplot(gs[gs_rows, gs_cols])\n if time_vec.shape[0] != fovsubset.dff.shape[1]:\n dff = np.zeros((fovsubset.dff.shape[0], time_vec.shape[0]))\n spikes = np.zeros((detected_spikes.shape[0], time_vec.shape[0]))\n spikes[:, :detected_spikes.shape[1]] = detected_spikes\n dff[:, :fovsubset.dff.shape[1]] = fovsubset.dff\n else:\n dff = fovsubset.dff\n spikes = detected_spikes\n scatter_spikes(dff, spikes, downsample_display=1, time_vec=time_vec, ax=ax)", "def show(self, x_offset=0, y_offset=0, delay=0):\n clear()\n sys.stdout.write(y_offset * '\\n')\n for line in self.get_lines():\n sys.stdout.write(x_offset * ' ')\n sys.stdout.write(line)\n sys.stdout.write('\\n')\n end_lines = get_term_height() - self.get_height() - y_offset - 1\n sys.stdout.write(end_lines * '\\n')\n sys.stdout.flush()\n time.sleep(delay)", "def plot_spikes(self, show=False, save_path=None, expand = False):\n spikes = np.array(self.spike_history)\n spike_time, e_idx = np.where(spikes)\n spike_time = spike_time.astype('float32')\n spike_time *= self.global_dt\n spike_time_pair = zip(e_idx,spike_time)\n spike_time_pair.sort()\n spike_time_pair = np.array(spike_time_pair)\n spike_time_pair = list(np.split(spike_time_pair, np.where(np.diff(spike_time_pair[:,0]))[0]+1))\n\n if self.enable_spike_dump:\n n = len(self.all_compartments)\n else:\n n = len(self.electrodes)\n\n s = []\n for i in xrange(n):\n s1 = [t[:,1] for t in spike_time_pair if t[0,0] == i]\n s.append(s1)\n\n fig = plt.figure()\n ax = self.raster(s)\n\n if n < 50 or expand:\n ax.set_yticks(np.arange(1, n + 1))\n if self.enable_spike_dump:\n ax.set_yticklabels(tuple(self.all_compartments))\n else:\n ax.set_yticklabels(tuple(self.electrodes))\n else:\n ax.set_yticklabels([])\n\n ax.set_ylabel('Electrode IDX')\n ax.set_xlabel('Time (msec)')\n ax.set_title('CSTMD Electrode Spikes for ' + str(n) + ' compartments')\n\n if not show and expand:\n if n > 40:\n w,h = fig.get_size_inches()\n h *= n / 40\n fig.set_size_inches(w,h)\n\n if save_path is not None:\n #fig.tight_layout()\n plt.savefig(save_path, bbox_inches='tight')\n print \"Saved Cstmd spike train to \" + save_path\n plt.gcf().clear()\n if show:\n plt.show()", "def display_spikes(smd2, spikes, output_fname):\n # Check the input destination file parameter\n if not os.path.isdir(os.path.dirname(output_fname)):\n raise ValueError(\"The output file name '{0}' point to an invalid \"\n \"directory.\".format(output_fname))\n\n # Plot information\n cnt = 1\n nb_of_slices = smd2.shape[1]\n nb_of_plots = len(np.where(spikes.sum(axis=1) > 0)[0])\n\n # Go through all timepoints\n for timepoint_smd2, timepoint_spikes in zip(smd2, spikes):\n\n # If at least one spike is detected, generate a subplot\n if timepoint_spikes.sum() > 0:\n fig = plt.subplot(nb_of_plots, 1, cnt)\n ax = fig.get_axes()\n plt.plot(range(nb_of_slices), timepoint_smd2, \"yo-\")\n plt.ylabel(\"Metric\")\n plt.title(\"Spikes at timepoint {0}\".format(cnt - 1))\n for spike_index in np.where(timepoint_spikes > 0)[0]:\n plt.plot((spike_index, spike_index), (0, timepoint_smd2[spike_index]), \"r\")\n cnt += 1\n plt.xlabel(\"Slices\")\n\n # Save the figure\n plt.savefig(output_fname)", "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def animate(self, interval=100, title='test', medium_from=None, medium_to=None):\n # data input\n N = self.get_N()\n plotting_data = self.data[:,N:2*N]\n time = self.get_time()\n y_min = np.min(plotting_data)\n y_max = np.max(plotting_data)\n n_frames = plotting_data.shape[0]\n \n # initialization of plots\n fig = plt.figure()\n ax = plt.axes(xlim=(0, N), ylim=(y_min, y_max))\n time_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)\n time_text.set_text('')\n l, = plt.plot([], [], '.-')\n plt.xlabel('bead number')\n plt.ylabel('velocity (computer units)')\n plt.title(title)\n \n # visualize interfaces\n if medium_from is not None:\n if medium_to is None:\n medium_to = N\n \n plt.fill_between(range(medium_from, medium_to+1), y_min, y_max, facecolor='red', alpha=0.5)\n \n # updat4e function for animation\n def update_line(num, plotting_data, time, line):\n dat = plotting_data[num,:]\n line.set_data([range(len(dat)), dat])\n time_text.set_text('time = %.1f' % time[num])\n line.set_label('t= 10')\n return line,\n \n line_ani = animation.FuncAnimation(fig, update_line, n_frames, fargs=(plotting_data, time, l), interval=interval, blit=False)\n return line_ani", "def callback_time_cut(val):\n global plot_mode\n global idx_time\n last_plot_mode = plot_mode\n plot_mode = 'time_cut'\n idx_time = int(val)\n update_num_shadow(int(sld['neighbors'].val))\n # plot 121\n lcuttime.set_xdata( [val, val] )\n lcuttime.set_alpha( alpha_hm )\n lcutfreq.set_alpha( 0.0 )\n # plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_time ) # [True/False, True/False]\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True ] )\n replot_light()\n reform_axis()\n\n fig.canvas.draw_idle()", "def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)", "def show_lightcurve(self):\n\n time_array = self.exp_start_times\n\n fig = plt.figure()\n\n if self.transmission_spectroscopy:\n lc_model = self.generate_lightcurves(time_array,\n self.planet.calcTransitDepth())\n plt.ylabel(\"Transit Depth\")\n else:\n lc_model = np.ones_like(time_array)\n plt.ylabel(\"Unit Flux\")\n\n if self._visit_trend:\n trend_model = self._visit_trend.scale_factors\n # have to convert weird model format to flat array\n lc_model = trend_model * lc_model.T[0]\n\n plt.scatter(time_array, lc_model)\n plt.xlabel(\"Time (JD)\")\n plt.title(\"Normalised White Time Series of observation\")\n\n return time_array, lc_model", "def displayTicker(self):\n for pair in self.config.pairs:\n if self.config.pairs[pair]:\n self.printTicker(pair, self.trader.tickerData)", "def _interval_example(avg_price_with_interval):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Interval plots\")\n ch.set_subtitle(\"Represent variation. Optional `middle_column` to mark a middle point.\")\n ch.plot.interval(\n data_frame=avg_price_with_interval,\n categorical_columns=\"fruit\",\n lower_bound_column=\"lower_ci\",\n upper_bound_column=\"upper_ci\",\n middle_column=\"mean\",\n )\n ch.show(_OUTPUT_FORMAT)", "def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)", "def plot_interpolation(self):\r\n self.plot_all_logcalls(True)\r\n print_log('info', 'Interpolation was finished.')", "def display_open_interest(symbol: str, interval: int, export: str) -> None:\n df = get_open_interest_per_exchange(symbol, interval)\n if df.empty:\n print(\"Error in glassnode request\")\n else:\n plot_data(df, symbol)\n print(\"\")\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"oi\",\n df,\n )", "def plot_spk(df_flt, df_speak, seps=None, show_trace=False, show_std=False, beg=0, dur=50, rotation=0, interval=5, markersize=1.5, figsize=(30,15), title='', alpha=1, spkplot_gap=1):\n idxes = df_speak.index.unique()\n idxes = get_meet_sec(df_flt).index\n sbeg = str(idxes[beg*20]) \n send = str(idxes[beg*20+dur*20]) \n df_speak = df_speak.loc[sbeg: send]\n n_sub = len(df_flt.columns)\n n_row = n_sub + 1\n \n ## Cannot set [sbeg: send] due to pandas\n df_flt_part = df_flt.loc[:send]\n fig, axes = plt.subplots(n_row, 1, figsize=figsize, sharex=True)\n axs = df_flt_part.plot(figsize=figsize, subplots=True, linewidth=1, marker='o', \n markersize=markersize, alpha=alpha, title=title, ax=axes[:n_sub])\n\n ### add std\n if show_std:\n df_sec = get_meet_sec(df_flt_part)\n df_std = df_sec.groupby(df_sec.index).std()\n dt_std = {}\n \n colors = []\n dt_uc = {}\n dt_ps = {}\n for comb in zip(axs, df_flt.columns):\n ax, u = comb\n l = ax.lines[0]\n dt_uc[u] = l.get_color()\n dt_ps[u] = []\n if show_std:\n dt_std[u] = []\n subjects = sorted(dt_uc.keys(), reverse=True)\n \n if show_std:\n for k in df_sec.index.unique():\n k1 = k + datetime.timedelta(seconds=1)\n for u in df_sec.columns:\n # add std\n stdu = df_std.ix[k, u]\n dt_std[u].append([k, k1])\n dt_std[u].append([stdu, stdu])\n \n \n for k in df_speak.index:\n k1 = k + datetime.timedelta(seconds=1)\n us = df_speak.loc[df_speak.index == k].speaker.tolist()\n for u in us:\n y = -1 * spkplot_gap * ( 1 + subjects.index(u) )\n dt_ps[u].append([k, k1])\n dt_ps[u].append([y, y])\n \n nax = axes[n_sub]\n\n for i,u in enumerate(df_flt.columns):\n c = dt_uc[u]\n params = dt_ps[u]\n axs[i].plot(*params, linewidth=5, color=c)\n if seps is not None:\n axs[i].axhline(seps[i], linestyle= '--', color='black', alpha=0.8)\n axs[i].set_ylim([-10,60])\n axs[i].set_ylabel('Volume')\n axs[i].grid(axis='x', which='major', alpha=0.5, linestyle=':') \n \n # add std\n if show_std:\n params_std = dt_std[u]\n axs[i].plot(*params_std, linewidth=3, color='black', linestyle='--')\n \n if show_trace and len(params) != 0:\n nax.axhline(params[1][0], linestyle=':' , color=c )\n nax.plot(*params, linewidth=spkplot_gap*20, color=c);\n nax.set_ylim([0, -1*spkplot_gap*(n_sub+1) ])\n nax.set_yticklabels('') \n nax.xaxis.set_major_locator(mdates.SecondLocator(interval=interval))\n dateformatter = ':%S' if dur <= 60 else '%M:%S'\n nax.xaxis.set_major_formatter(mdates.DateFormatter(dateformatter))\n\n nax.grid(axis='x', which='major', alpha=0.5, linestyle='--')\n nax.set_xlabel('Time')\n nax.set_ylabel('Speaker')\n ## This is just a work-around. Something should be wrong with df.plot (pd version 0.22.)\n nax.set_xlim([sbeg, send])\n plt.xticks(rotation=rotation)\n plt.tight_layout()\n return sbeg, send", "def showLevels(self):\n\n pa = 'EUR_USD GBP_USD AUD_USD USD_CAD USD_CHF NZD_USD'.split(' ')\n gr = 'D H4 H1 M30 M15'.split(' ')\n for i in xrange(len(pa)):\n dfs = p.DataFrame()\n for j in xrange(len(gr)):\n try:\n training = self.viewTraining(pa[i], gr[j])\n df = training[0]\n manifest = training[1]\n dfs = dfs.combine_first(manifest.set_index('timeframe'))\n plot(df.get_values())\n except: \n ''\n try:\n dfs['timeframe'] = dfs.index # save the lost field before calling set_index()\n print dfs.set_index('forecast').sort(ascending=False)\n except: ''\n dfp = p.read_csv('/ml.dev/bin/data/oanda/ticks/{0}/{0}-M5.csv'.format(pa[i])).sort(ascending=True).tail(50).ix[:,'closeAsk']\n plot(dfp)\n title('{0} Forecast'.format(pa[i]))\n legend(gr)\n show();\n #break", "def plot_tuning_curves(self, baseline_rate=10.):\n x = np.arange(0, 1 + 0.01, 0.01)\n l0 = self.data['L0']\n l1 = self.data['L1']\n y_on = np.exp(np.log(l0) + x * np.log(l1 / l0))\n y_off = np.exp(np.log(l0) + (1 - x) * np.log(l1 / l0))\n plt.plot(x, y_on, label='ON')\n plt.plot(x, y_off, label='OFF')\n plt.plot(x, baseline_rate + 0 * x, '--')\n # plt.xlabel('Stimulus intensity')\n # plt.ylabel('Firing Rate (Hz)')\n # plt.title('Firing rate as a function \\n of Stimulus Intensity')\n # plt.legend()", "def get_klines(self, symbol, interval):\n payload = {\n 'symbol': symbol,\n 'interval': interval\n }\n return self.public_request('GET', '/api/v1/klines', **payload)", "def run():\r\n\r\n # Build list of stations\r\n stations = build_station_list()\r\n\r\n # Find 5 stations at which the current level is the highest\r\n stations_highest_rel_level_list = []\r\n N = 5\r\n for i in range(len(stations_highest_rel_level(stations, N))):\r\n stations_highest_rel_level_list.append(stations_highest_rel_level(stations, N)[i][0])\r\n \r\n\r\n # Plot the water level for each of these stations over the past 10 days\r\n \r\n # First fetch the time history for a station\r\n for station in stations:\r\n if station.name in stations_highest_rel_level_list:\r\n \r\n dt = 2\r\n dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt))\r\n # This gives list of dates and levels to be passed into a plot\r\n plot_water_level_with_fit(station, dates, levels, 4)\r\n else:\r\n pass", "def update_fps(self):\n self.fps.tick()\n\n\trange_str = \"\"\n gd = self.main_curve_dialog.curve.get_data()[1]\n\trange_str = \"Max: %s, Min: %s, Avg: %0.5s \" \\\n\t\t % (numpy.max(gd), numpy.min(gd), numpy.average(gd))\n\n\n fps_text = \"%s Update: %s FPS\" % (range_str, self.fps.rate())\n self.action_fps_display.setText(fps_text)", "def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()", "def _display_tsne(self):\n self._tsne_window.clear()\n self._tsne_window.plot(self._Y_tsne[:,0], self._Y_tsne[:,1], 'b.')", "def curve_plot(self):\n if self.session.active['mode'] == 'database':\n self.curvePlot.set_scroll_interval()\n self.curvePlot.update_depth()\n self.curvePlot.show()", "def demo( self, interval: int = 200_000, iterations = None ) -> None:\n import godafoss.gf_pin_port_demos\n print( \"port_out demo: kitt\" )\n godafoss.gf_pin_port_demos.kitt(\n self,\n interval = interval,\n iterations = iterations\n )", "def plot_kinetics(k_data, i_data, tlim=None, xlim=None, lb=10, mpp=0.33, seg_length=100, fps=10, plot=True):\n \n t = [] \n power = []\n \n # apply tlim\n if tlim == None:\n pass\n elif isinstance(tlim, int):\n tc = (k_data.segment-1)*seg_length/fps\n k_data = k_data.loc[ tc < tlim]\n i_data = i_data.loc[i_data.t / fps < tlim]\n elif isinstance(tlim, list) and len(tlim) == 2:\n assert(tlim[1]>tlim[0])\n tc = (k_data.segment-1)*seg_length/fps\n k_data = k_data.loc[ (tc < tlim[1]) & (tc >= tlim[0])]\n i_data = i_data.loc[(i_data.t / fps < tlim[1]) & (i_data.t / fps >= tlim[0])]\n else:\n raise ValueError('tlim should be None, int or list of 2 int') \n \n # compute exponents at different time\n # t, power will be plotted on ax1\n for idx in k_data.segment.drop_duplicates():\n subdata = k_data.loc[k_data.segment==idx]\n xx, yy = postprocess_gnf(subdata, lb, xlim=xlim, sparse=3)\n x = np.log(xx)\n y = np.log(yy)\n p = np.polyfit(x, y, deg=1)\n t.append((idx-1)*seg_length/fps)\n power.append(p[0])\n\n # rescale light intensity to (0, 1)\n # t1, i will be plotted on ax2\n t1 = i_data.t / fps\n i = i_data.intensity - i_data.intensity.min()\n i = i / i.max()\n \n data = {'t0': t, 'alpha': power, 't1': t1, 'i': i}\n \n if plot == True:\n # set up fig and ax\n fig = plt.figure()\n ax1 = fig.add_axes([0,0,1,1])\n ax2 = ax1.twinx()\n\n # plot t, power\n color = wowcolor(0)\n ax1.set_xlabel('$t$ [s]')\n ax1.set_ylabel('$\\\\alpha$', color=color)\n ax1.plot(t, power, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n # plot t1, intensity\n color = wowcolor(4)\n ax2.set_ylabel('$I$', color=color)\n ax2.plot(t1, i, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n return data, fig, ax1\n else:\n return data", "def trace(self, out):\n if self.step == 0:\n out.write(\"# %5s %16s %8s %8s %7s\\n\" \\\n % ('Step', 'Current energy', 'Av shift',\n 'Mx shift', 'Funcs'))\n log = \"%7d %16.5f %8.4f %8.4f %7d\\n\" \\\n % (self.step, self.current_e, self.shiftavr,\n self.shiftmax, self.funcs)\n out.write(log)", "def plot_tcv(self):\n self.plot_profiles(0, title='Shot #{:d} @ t={:.2f} s'.format(self.shot, self.t))", "def showBar(self):\n track = 0\n dot_symbols = [\" \" for i in range(self.width)]\n sys.stdout.write(\"{}\".format(''.join(dot_symbols)) + \"\\r\")\n while self.spin_on == True:\n dot_symbols[track] = \".\"\n sys.stdout.write(\"{}\".format(self.c.colorText(''.join(dot_symbols), fg=self.text_color)) + \"\\r\")\n time.sleep(.5)\n track += 1\n if track == self.width:\n dot_symbols = [\" \" for i in range(self.width)]\n track = 0\n sys.stdout.flush()\n sys.stdout.write(self.c.text[\"clear\"])\n sys.stdout.write(\"\" + \"\\r\")\n time.sleep(.5)", "def figure8():\n\n plot_settings = {'y_limits': [15, 60],\n 'x_limits': None,\n 'y_ticks': [20, 30, 40, 50, 60],\n 'locator_size': 5,\n 'y_label': 'ISI (ms)',\n 'x_ticks': [],\n 'scale_size': 0,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_8',\n 'legend': ['First ISI', 'Second ISI'],\n 'legend_size': 8,\n 'y_on': True,\n 'legend_location': 4}\n\n g_t_bars = np.linspace(0.02, 0.2, 10)\n isi = np.zeros((len(g_t_bars), 2))\n\n for ix, g_t_bar in enumerate(g_t_bars):\n t, y = solver(200, t_start=15, duration=260, g_t_bar=g_t_bar)\n t_spike, f = spike_times(t, y[:, 0])\n isi[ix, 0] = t_spike[1] - t_spike[0]\n isi[ix, 1] = t_spike[2] - t_spike[1]\n\n plt.subplot(2, 2, 1) # Generate subplot 1 (top left)\n plt.plot(g_t_bars, isi[:, 0], c='k', marker='o', fillstyle='none', linestyle='-')\n plt.plot(g_t_bars, isi[:, 1], c='k', marker='s', fillstyle='none', linestyle='dotted')\n\n \"\"\"\n Annotate plot\n \"\"\"\n plt.gca().arrow(g_t_bars[3], 35, 0, 11, head_width=0, head_length=0, fc='k', ec='k')\n plt.gca().arrow(g_t_bars[3], 46, -0.01, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().arrow(g_t_bars[3], 35, 0.01, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().annotate(\"Acceleration\", (0.1, 35), fontsize=8)\n plt.gca().annotate(\"Adaptation\", (0.01, 46), fontsize=8)\n alter_figure(plot_settings)\n\n plt.subplot(2, 2, 2) # Generate subplot 2 (top right)\n g_n_bars = np.linspace(0.02, 0.2, 10)\n isi = np.zeros((len(g_t_bars), 2))\n for ix, g_n_bar in enumerate(g_n_bars):\n t, y = solver(200, g_n_bar=g_n_bar, duration=260, t_start=15, g_t_bar=0.02)\n t_spike, f = spike_times(t, y[:, 0])\n\n isi[ix, 0] = t_spike[1] - t_spike[0]\n isi[ix, 1] = t_spike[2] - t_spike[1]\n plt.plot(g_t_bars, isi[:, 0], c='k', marker='o', fillstyle='none', linestyle='-')\n plt.plot(g_t_bars, isi[:, 1], c='k', marker='s', fillstyle='none', linestyle='dotted')\n\n \"\"\"\n Annotate plot\n \"\"\"\n plt.gca().arrow(g_n_bars[3], 30, 0, 10, head_width=0, head_length=0, fc='k', ec='k')\n plt.gca().arrow(g_n_bars[3], 40, -0.01, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().arrow(g_n_bars[3], 30, 0.01, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().annotate(\"Acceleration\", (0.1, 30), fontsize=8)\n plt.gca().annotate(\"Adaptation\", (0.015, 40), fontsize=8)\n plot_settings['y_ticks'] = []\n plot_settings['y_label'] = \"\"\n plot_settings['y_on'] = False\n plot_settings['legend_location'] = 4\n alter_figure(plot_settings)\n\n plt.subplot(2, 2, 3) # Generate subplot 3 (bottom left)\n g_t_bars = np.linspace(0.02, 0.16, 8)\n isi = np.zeros((len(g_t_bars), 2))\n for ix, g_t_bar in enumerate(g_t_bars):\n t, y = solver(200, g_t_bar=g_t_bar, duration=260, t_start=15, ca_type=1)\n t_spike, f = spike_times(t, y[:, 0])\n\n isi[ix, 0] = t_spike[1] - t_spike[0]\n isi[ix, 1] = t_spike[2] - t_spike[1]\n plt.plot(g_t_bars, isi[:, 0], c='k', marker='o', fillstyle='none', linestyle='-')\n plt.plot(g_t_bars, isi[:, 1], c='k', marker='s', fillstyle='none', linestyle='dotted')\n\n \"\"\"\n Annotate plot\n \"\"\"\n plt.gca().arrow(g_t_bars[2], 25, -0.02, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().arrow(g_t_bars[4], 25, 0.02, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().annotate(\"Adaptation\", (0.06, 25), fontsize=8)\n\n plot_settings['y_limits'] = [0, 45]\n plot_settings['y_ticks'] = [0, 10, 20, 30, 40]\n plot_settings['locator_size'] = 5\n plot_settings['y_label'] = 'ISI (ms)'\n plot_settings['y_on'] = True\n plot_settings['legend_location'] = 3\n alter_figure(plot_settings)\n\n plt.subplot(2, 2, 4)\n g_n_bars = np.linspace(0.02, 0.16, 8)\n isi = np.zeros((len(g_t_bars), 2))\n for ix, g_n_bar in enumerate(g_n_bars):\n t, y = solver(200, duration=260, t_start=15, g_n_bar=g_n_bar, g_t_bar=0.02, ca_type=2)\n t_spike, f = spike_times(t, y[:, 0])\n\n isi[ix, 0] = t_spike[1] - t_spike[0]\n isi[ix, 1] = t_spike[2] - t_spike[1]\n plt.plot(g_t_bars, isi[:, 0], c='k', marker='o', fillstyle='none', linestyle='-')\n plt.plot(g_t_bars, isi[:, 1], c='k', marker='s', fillstyle='none', linestyle='dotted')\n\n \"\"\"\n Annotate plot\n \"\"\"\n plt.gca().arrow(g_n_bars[2], 20, -0.02, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().arrow(g_n_bars[4], 20, 0.02, 0, head_width=2, head_length=0.01, fc='k', ec='k')\n plt.gca().annotate(\"Adaptation\", (0.06, 20), fontsize=8)\n\n plot_settings['y_ticks'] = []\n plot_settings['y_label'] = ''\n plot_settings['y_on'] = False\n plot_settings['legend_location'] = 2\n alter_figure(plot_settings, close=True)", "def plot_pretty():\n\n ts, ys, lin_model, K, us, dt_control, biass, end_time = simulate()\n plt.style.use('seaborn-deep')\n\n black = '#2B2B2D'\n red = '#E90039'\n orange = '#FF1800'\n white = '#FFFFFF'\n yellow = '#FF9900'\n\n plt.figure(figsize=(12.8, 9.6))\n plt.rcParams.update({'font.size': 16, 'text.color': white, 'axes.labelcolor': white,\n 'axes.edgecolor': white, 'xtick.color': white, 'ytick.color': white})\n\n plt.gcf().set_facecolor(black)\n\n plt.subplot(2, 3, 1)\n plt.plot(ts, ys[:, 2], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[1], color=white)\n plt.title(r'$C_{FA}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 2)\n plt.plot(ts, ys[:, 0], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[0], color=white)\n plt.title(r'$C_{G}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 3)\n plt.plot(ts, ys[:, 3], color=orange)\n plt.title(r'$C_{E}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 4)\n plt.plot(ts, us[:, lin_model.inputs[1]], color=red)\n plt.title(r'$F_{m, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 5)\n plt.plot(ts, us[:, lin_model.inputs[0]], color=red)\n plt.title(r'$F_{G, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 6)\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 1],\n color=red\n )\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 0],\n color=yellow\n )\n plt.legend([r'$C_{FA}$', r'$C_G$'], facecolor=black)\n plt.title('bias')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n # plt.suptitle('Closedloop bioreactor without noise')\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n plt.savefig('no_noise_pretty.png', transparent=True)\n plt.show()", "def timer_plot_data_out(self, w):\n w.update_plot(self.getLaps())", "def plot_sed_curve(self, period=6., showfig=True):\n\t\tgroup = self['%g_sec'%( period )]\n\t\ttomo_data = group['tomo_data'].value\n\t\tmask = group['tomo_data_msk'].value\n\t\tsed_Arr = group['sed_Arr'].value\n\t\tvel_vec = tomo_data[~mask]\n\t\tsed_vec = sed_Arr[~mask]\n\t\tplt.plot(sed_vec, vel_vec, 'r.')\n\t\tplt.xlim(xmin=0)\n\t\tplt.xlabel('Sediment thickness (m)', fontsize=14)\n\t\tplt.ylabel('vel (km/s)', fontsize=14)\n\t\tfig = plt.gcf()\n\t\tfig.suptitle(str(period)+' sec', fontsize=14)\n\t\tif showfig:\n\t\t\tplt.show()\n\t\tpass", "def plotalltraces(td):\n \n plotmsubtrace(td, 211)\n dftf.plotflypic(td, 212)", "def print_leaks(self):\n for key in sorted(self.leaks.keys()):\n print('Failure mode: '+key)\n print('Failure rate: {:.2~}'.format(self.leaks[key][0]))\n print('Flow rate: {:.2~}'.format(\n self.leaks[key][1].to(ureg.ft**3/ureg.min)))\n print('Event duration: {:.2~}'.format(self.leaks[key][2]))\n print()", "def _display_from_tsne(self, x, y):\n\n # Find the closest 9\n inds = np.argsort(np.sum( (self._Y_tsne-np.array([x, y]))**2, axis=1))\n print(inds[:10])\n\n # Plot the green circles on the tsne plot\n self._display_tsne()\n self._tsne_window.plot(self._Y_tsne[inds[:9],0], self._Y_tsne[inds[:9],1], 'yo')\n\n # Now run through the 9 sub axes and display the image data and cutout location.\n self._sub_window_filenames = []\n for ii, axis in enumerate(self._sub_windows):\n axis.clear()\n\n fits_filename, filename, sliceno, middle = self._process_result_filename_cutout_number[inds[ii]]\n print('display from tsne fits: {} filename: {}'.format(fits_filename, filename))\n\n # So, the filename actually contains the wrong path on it so we\n # need to take it off and use the proper path.\n pf = pickle.load(open(os.path.join(self._cutouts_directory, filename), 'rb'))\n ff = list(glob.iglob('{}/**/{}'.format(self._data_directory, pf['filename'].split('/')[-1])))[0]\n\n print(ff)\n self._display_window(axis, ff)\n self._sub_window_filenames.append(fits_filename)\n\n # Draw the line\n axis.plot([middle[0]-112, middle[0]-112], [middle[1]-112, middle[1]+112], 'y')\n axis.plot([middle[0]+112, middle[0]+112], [middle[1]-112, middle[1]+112], 'y')\n axis.plot([middle[0]-112, middle[0]+112], [middle[1]-112, middle[1]-112], 'y')\n axis.plot([middle[0]-112, middle[0]+112], [middle[1]+112, middle[1]+112], 'y')\n\n plt.figure(1).show()\n plt.figure(1).canvas.draw()", "def viz_samples(data, trace, num_sweeps, K, viz_interval=3, figure_size=3, title_fontsize=20, marker_size=1.0, opacity=0.3, bound=20, colors=['#AA3377','#0077BB', '#EE7733', '#009988', '#BBBBBB', '#EE3377', '#DDCC77'], save_name=None):\n E_tau, E_mu, E_z = trace['E_tau'].cpu(), trace['E_mu'].cpu(), trace['E_z'].cpu()\n num_rows = len(data)\n num_cols = 2 + int((num_sweeps-1) / viz_interval)\n gs = gridspec.GridSpec(num_rows, num_cols)\n gs.update(left=0.0 , bottom=0.0, right=1.0, top=1.0, wspace=0, hspace=0)\n fig = plt.figure(figsize=(figure_size * num_cols, figure_size * num_rows))\n for row_ind in range(num_rows):\n ax = fig.add_subplot(gs[row_ind, 0])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=None) ## visualize raw dataset in the 1st column\n if row_ind == 0:\n ax.set_title('Data', fontsize=title_fontsize)\n# col_ind = 1\n for col_ind in range(num_cols-1):\n sweep = col_ind * viz_interval\n ax = fig.add_subplot(gs[row_ind, col_ind+1])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=(E_tau[sweep, row_ind], E_mu[sweep, row_ind], E_z[sweep, row_ind]))\n if row_ind == 0:\n if sweep == 0:\n ax.set_title('RWS', fontsize=title_fontsize)\n else:\n ax.set_title('sweep %d' % sweep, fontsize=title_fontsize)\n if save_name is not None:\n plt.savefig(save_name + '.svg', dpi=300)", "def show_plot(self):\n runs = self.GetParent().runs\n if len(runs) <= 0: return\n\n t1 = time.time()\n total_width = self.GetParent().total_width\n\n newwidth = total_width * (self.GetParent().zoom / 100)\n newmid = total_width * (self.GetParent().pan/100)\n newxmin = newmid - (newwidth/2)\n newxmax = newxmin + newwidth\n\n if newxmin < 0:\n newxmin = 0\n newxmax = newwidth\n elif newxmax > total_width:\n newxmax = total_width\n newxmin = newxmax - newwidth\n\n assert newxmin >= 0 and newxmin <= total_width\n\n #print \"**** Zoom: %s, pan: %s, total_width: %s, newwidth: %s, newmid: %s, newxmin: %s, newxmax: %s\" \\\n # %(self.GetParent().zoom,self.GetParent().pan,total_width,newwidth,newmid,newxmin,newxmax)\n\n left = 0\n width_so_far = 0\n self.figure.clear()\n braggsmax = max(flex.max(r.culled_braggs) for r in runs)\n braggsmin = min(flex.min(r.culled_braggs) for r in runs)\n distsmax = max(flex.max(r.culled_distances) for r in runs)\n distsmin = min(flex.min(r.culled_distances) for r in runs)\n sifomax = max(flex.max(r.culled_sifoils) for r in runs)\n sifomin = min(flex.min(r.culled_sifoils) for r in runs)\n wavemax = max(flex.max(r.culled_wavelengths) for r in runs)\n wavemin = min(flex.min(r.culled_wavelengths) for r in runs)\n\n #above tricks don't work for hit rates as they can be empty if the run is new\n goodruns = []\n for run in runs:\n if len(run.hit_rates) > 0: goodruns.append(run)\n if len(goodruns) > 0:\n hitsmax = max(flex.max(r.hit_rates) for r in goodruns)\n hitsmin = min(flex.min(r.hit_rates) for r in goodruns)\n else:\n hitsmax = hitsmin = 0\n\n first_run = True\n for run in runs:\n right = left + run.width()\n\n if right < newxmin or left > newxmax:\n left += run.width()\n #print \"Not showing run %s\"%run.runId\n continue\n\n if left < newxmin:\n xmin = run.min() + (newxmin - left)\n else:\n xmin = run.min()\n\n if right > newxmax:\n xmax = run.min() + (newxmax - left)\n else:\n xmax = run.max()\n\n #print \"Run: %s, run.width(): %s, left: %s, right: %s, run.min(): %s, run.max(): %s, xmin: %s, xmax: %s, width_so_far: %s, xmax-xmin: %s\" \\\n #%(run.runId,run.width(),left,right,run.min(),run.max(),xmin,xmax,width_so_far,xmax-xmin)\n\n ax1 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.05, 0.9*(xmax-xmin)/newwidth, 0.4])\n ax2 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.45, 0.9*(xmax-xmin)/newwidth, 0.2], sharex=ax1)\n ax3 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.65, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax4 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.75, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax5 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.85, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n left += run.width()\n width_so_far += (xmax-xmin)\n\n ax1.grid(True, color=\"0.75\")\n ax2.grid(True, color=\"0.75\")\n ax3.grid(True, color=\"0.75\")\n ax4.grid(True, color=\"0.75\")\n ax5.grid(True, color=\"0.75\")\n ax1.plot(run.culled_bragg_times.select(run.culled_indexed),\n run.culled_braggs.select(run.culled_indexed), 'd', color=[0.0,1.0,0.0])\n ax1.plot(run.culled_bragg_times.select(~run.culled_indexed),\n run.culled_braggs.select(~run.culled_indexed), 'd', color=[0.0,0.5,1.0])\n ax2.plot(run.hit_rates_times, run.hit_rates, 'o-', color=[0.0,1.0,0.0])\n ax3.plot(run.culled_bragg_times, run.culled_wavelengths, '^', color=[0.8,0.0,0.2])\n ax4.plot(run.culled_bragg_times, run.culled_sifoils, '<', color=[0.8,0.0,0.2])\n ax5.plot(run.culled_bragg_times, run.culled_distances, '>', color=[0.8,0.0,0.2])\n ax1.set_ylabel(\"# of Bragg spots\")\n ax2.set_ylabel(\"Hit rate (%)\")\n ax3.set_ylabel(\"WaveL\")\n ax4.set_ylabel(\"SiFoils(mm)\")\n ax5.set_ylabel(\"Dist (mm)\")\n ax1.set_xlim(xmin, xmax)\n ax1.set_ylim(braggsmin, braggsmax)\n ax2.set_ylim(hitsmin, hitsmax)\n ax3.set_ylim(wavemin, wavemax)\n ax4.set_ylim(sifomin-10, sifomax+10)\n ax5.set_ylim(distsmin-3, distsmax+3)\n ax1.set_xlabel(\"Time\")\n for ax in ax1, ax2, ax3, ax4, ax5:\n if (ax is not ax1) :\n for label in ax.get_xticklabels():\n label.set_visible(False)\n ax.get_yticklabels()[0].set_visible(False)\n if not first_run:\n ax.get_yaxis().set_visible(False)\n\n ax1.xaxis.set_major_formatter(ticker.FuncFormatter(status_plot.format_time))\n ax3.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.3f\"))\n ax5.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.0f\"))\n ax5.set_title(\"%d:%d/%d:%.1f%% I:%d\"%(run.runId, run.hits_count, len(run.braggs), 100*run.hits_count/len(run.braggs),run.indexed.count(True)))\n\n labels = ax1.get_xticklabels()\n for label in labels:\n label.set_rotation(30)\n\n first_run = False\n\n self.figure.autofmt_xdate()\n self.canvas.draw()\n self.parent.Refresh()\n\n t2 = time.time()\n print(\"Plotted in %.2fs\" % (t2 - t1))", "def display_gameclock(interpolation):\n ## GOTCHA: See the comment in update_gameclock().\n sprite_group.clear(screen, eraser_image)\n for ball in sprite_group:\n ball.predict(interpolation, USE_PREDICTION)\n sprite_group.draw(screen)\n pygame.display.update()", "def print_readings(data):\n output = [str(data['timestamp'])]\n output.append(getvalue(data, 't_in', '%0.2f'))\n output.append(getvalue(data, 'h_in', '%d'))\n for i in range(1, 6):\n output.append(getvalue(data, 't_%d' % i, '%0.2f'))\n output.append(getvalue(data, 'h_%d' % i, '%d'))\n output.append(getvalue(data, 'slp', '%0.1f'))\n output.append(getvalue(data, 'uv', '%0.1f'))\n output.append(getvalue(data, 'forecast', '%d'))\n output.append(getvalue(data, 'storm', '%d'))\n output.append(getvalue(data, 'winddir', '%d'))\n output.append(getvalue(data, 'windspeed', '%0.1f'))\n output.append(getvalue(data, 'windgust', '%0.1f'))\n output.append(getvalue(data, 'windchill', '%0.1f'))\n output.append(getvalue(data, 'rain', '%d'))\n print ':'.join(output)", "def plotPSTH(self, stimpath,\n stimdata,\n spikesdict,\n simtime,\n offset=0,\n binsize=10e-3,\n legendSuffix='',\n rate=False,\n normcells=True\n ):\n if not spikesdict:\n return 0\n stimdata = stimdata[:]\n times = []\n # It is a spike train, x values are spike times, wrap around those\n if 'spikes' in stimpath:\n times = stimdata\n # It is a stimulus: take the leadin edges\n elif 'stim' in stimpath:\n times = numpy.linspace(0, simtime, stimdata.shape[0])[numpy.r_[False, numpy.diff(stimdata) < 0].nonzero()[0]]\n else:\n stimdata = analyzer.smooth(stimdata)\n mid = numpy.mean(stimdata)\n stimdata = stimdata[stimdata > mid] # Threshold at midpoint\n times = numpy.linspace(0, simtime, stimdata.shape[0])[numpy.r_[True, stimdata[1:] > stimdata[:-1]] & numpy.r_[stimdata[:-1] > stimdata[1:], True]]\n if (times is None) or (len(times) == 0):\n return 0\n start = times + offset\n end = numpy.zeros(times.shape)\n end[:-1] = start[1:]\n end[-1] = simtime + offset # We assume\n accumulated_data = []\n for spikedata in spikesdict.values():\n tpoints = spikedata[:]\n for ii in range(len(times)):\n ix = numpy.nonzero((tpoints >= start[ii]) & (tpoints < end[ii]))[0]\n accumulated_data = numpy.r_[accumulated_data, tpoints[ix] - times[ii]]\n if len(accumulated_data) == 0:\n return 0\n # set the bins by splitting interstimulus interval\n interval = numpy.mean(numpy.diff(times))\n bins = numpy.arange(offset, interval+offset, binsize)\n bins = numpy.r_[bins, bins[-1] + binsize]\n hist = numpy.histogram(accumulated_data, bins=bins)\n xx = (hist[1][:-1] + hist[1][1:])/2.0\n if rate:\n yy = hist[0] / binsize\n else:\n yy = hist[0]\n if normcells:\n yy /= len(spikesdict)\n path = stimpath + '_psth' + legendSuffix\n new_curve = Qwt.QwtPlotCurve(path)\n new_curve.setData(xx, yy)\n pen = Qt.QPen(Qt.Qt.blue, 1, Qt.Qt.DashDotLine)\n new_curve.setStyle(Qwt.QwtPlotCurve.Lines)\n new_curve.setPen(pen)\n pen = Qt.QPen(Qt.Qt.red, 1)\n new_curve.setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.XCross,\n Qt.QBrush(),\n pen,\n Qt.QSize(3,3))) \n new_curve.attach(self)\n self.curve_path_dict[new_curve] = path\n self.path_curve_dict[path].append(new_curve)\n path = stimpath + '_bins' + legendSuffix\n histmarkers = Qwt.QwtPlotCurve(path)\n height = int(max(yy) + 0.5)\n yy = numpy.ones(hist[1].shape) * height\n histmarkers.setData(hist[1], yy)\n pen = Qt.QPen(Qt.Qt.black, 1, Qt.Qt.DotLine)\n histmarkers.setPen(pen)\n histmarkers.setStyle(Qwt.QwtPlotCurve.Sticks)\n histmarkers.attach(self)\n self.curve_path_dict[histmarkers] = path\n self.path_curve_dict[path].append(new_curve)\n self.clearZoomStack()\n self.replot()\n return 1", "def notebook_display(self):\n time = self.out_channels['0'].samples / self.out_channels['0'].samprate\n\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n ) * 1.05\n \n for i in range(len(self.out_channels)):\n plt.plot(time[::20], self.out_channels[str(i)].values[::20]+2*i*vmax, label=self.channels.labels[i])\n\n plt.xlabel('Time (s)')\n plt.ylabel('Relative Amplitude')\n plt.legend(frameon=False, loc=5)\n plt.xlim(-time[-1]*0.05,time[-1]*1.2)\n for s in plt.gca().spines.values():\n s.set_visible(False)\n plt.gca().get_yaxis().set_visible(False)\n\n if len(self.channels.labels) == 1:\n # we have used 48000 Hz everywhere above as standard, but to quickly hear the sonification sped up / slowed down,\n # you can modify the 'rate' argument below (e.g. multiply by 0.5 for half speed, by 2 for double speed, etc)\n outfmt = np.column_stack([self.out_channels['0'].values, self.out_channels['0'].values]).T\n else:\n outfmt = np.column_stack([self.out_channels['0'].values, self.out_channels['1'].values]).T\n plt.show()\n display(ipd.Audio(outfmt,rate=self.out_channels['0'].samprate, autoplay=False))", "def show(self):\n plt.close() # Remove any existing plot\n plt.imshow(\n self.data,\n extent=[\n self.c - self.radius,\n self.c + self.radius,\n self.r + self.radius,\n self.r - self.radius,\n ],\n )\n plt.colorbar()\n plt.title(self.time.strftime(\"%Y%m%d %H:%M:%S.%f %Z\"))\n plt.show()", "def disp_annotation(self):\r\n print('Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec')\r\n sys.stdout.flush()", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def plot_waveforms(cutouts, fs, pre, post, n=100, color='k', show=True):\n if n is None:\n n = cutouts.shape[0]\n n = min(n, cutouts.shape[0])\n time_in_us = np.arange(-pre*1000, post*1000, 1e3/fs)\n if show:\n _ = plt.figure(figsize=(10,6))\n\n for i in range(n):\n _ = plt.plot(time_in_us, cutouts[i,]*1e6, color, linewidth=1, alpha=0.3)\n _ = plt.xlabel('Time (ms)')\n _ = plt.ylabel('Voltage (mV)')\n _ = plt.title('Spike Waveforms')\n\n if show:\n plt.show()", "def display_loop(self):\n from time import sleep\n self.displaying = True\n while self.displaying:\n print self\n sleep(.083)\n print loc(self.y, self.x) + ' '*self.size", "def visualize(self, save=False):\n import matplotlib.pyplot as plt\n import inspect\n\n plt.style.use('seaborn-whitegrid')\n plt.rcParams['figure.figsize'] = [10, 5]\n\n grid = np.linspace(self.lower, self.upper, 10000)\n func = self.intensity_function(np.linspace(self.lower, self.upper, 10000))\n try:\n plt.plot(grid, func)\n except:\n plt.plot(grid, np.repeat(func, 10000))\n plt.title('Intensity function')\n plt.xlabel('time')\n plt.ylabel('value')\n if save:\n try:\n plt.savefig('intensity_function_' + inspect.getsource(self.intensity_function).split('return')[\n 1].strip() + '.png')\n print('Saved as ' + 'intensity_function_' + inspect.getsource(self.intensity_function).split('return')[\n 1].strip() + '.png')\n except:\n warnings.warn(\"Saving intensity function failed!\")\n plt.show()\n plt.clf()\n\n t = self.generate()\n plt.step(t, list(range(0, len(t))))\n plt.title('Simulated trajectory')\n plt.xlabel('time')\n plt.ylabel('value')\n if save:\n try:\n plt.savefig(\n 'trajectory_' + inspect.getsource(self.intensity_function).split('return')[1].strip() + '.png')\n print('Saved as ' + 'trajectory_' + inspect.getsource(self.intensity_function).split('return')[\n 1].strip() + '.png')\n except:\n warnings.warn(\"Saving trajectory failed!\")\n plt.show()\n plt.clf()\n\n plt.plot(t, list(np.repeat(0, len(t))), '.')\n plt.title('Simulated points')\n plt.xlabel('time')\n if save:\n try:\n plt.savefig('points_' + inspect.getsource(self.intensity_function).split('return')[1].strip() + '.png')\n print('Saved as ' + 'points_' + inspect.getsource(self.intensity_function).split('return')[\n 1].strip() + '.png')\n except:\n warnings.warn(\"Saving points failed!\")\n plt.show()\n plt.clf()", "def _interval_example2(avg_price_with_interval):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Combined interval plot & bar plot\")\n ch.plot.bar(\n data_frame=avg_price_with_interval,\n categorical_columns=\"fruit\",\n numeric_column=\"mean\",\n )\n ch.plot.interval(\n data_frame=avg_price_with_interval,\n categorical_columns=\"fruit\",\n lower_bound_column=\"lower_ci\",\n upper_bound_column=\"upper_ci\",\n )\n ch.show(_OUTPUT_FORMAT)", "def plot(self):\n\t\tclf()\n\n\t\t# Plot rate for flow 1\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\tmaxY = None\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data1:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\t# Plot rate for flow 2\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data2:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\t# Plot rate for flow 3\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data3:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\t# Plot rate for flow 4\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data4:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\t# Plot rate for flow 1\n\t\tx = []\n\t\ty = []\n\t\ti = 0\n\t\twhile i < self.max_time:\n\t\t\tbytes = 0\n\t\t\t# loop through array of data and find relevant data\n\t\t\tfor (t,sequence,size) in self.data5:\n\t\t\t\tif (t >= i - 1) and (t <= i):\n\t\t\t\t\tbytes += size\n\t\t\t# compute interval\n\t\t\tleft = i - 1\n\t\t\tif i - 1 < 0:\n\t\t\t\tleft = 0\n\t\t\tright = i\n\t\t\t# add data point\n\t\t\tif (right - left) != 0:\n\t\t\t\trate = (bytes*8.0/1000000)/(right-left)\n\t\t\t\tx.append(i)\n\t\t\t\ty.append(rate)\n\t\t\t\tif not maxY or rate > maxY:\n\t\t\t\t\tmaxY = int(rate) + 1\n\t\t\ti += 0.1\n\t\t\n\t\tplot(x,y)\n\n\t\txlabel('Time (seconds)')\n\t\tylabel('Rate (Mbps)')\n\t\tylim([0,maxY])\n\t\tsavefig(self.output_file + '.png')", "def show_waveform(self, peaks=[]):\n if peaks is None:\n peaks = []\n data = self.amplitude\n x_axis = range(0, len(data))\n x_axis = [x / self.fs for x in x_axis]\n plt.plot(x_axis, data)\n plt.axhline(self.height)\n for p in peaks:\n plt.axvline(p / self.fs, color=\"red\", alpha=0.2)\n plt.ylabel(\"Amplitude\")\n plt.xlabel(\"Time (seconds)\")\n plt.title(\"Waveform\")\n plt.show()", "def show_window_k_scatter(window_from, window_to, k_from, k_to):\n # Get the confidential interval by window and the number of group.\n stats_results = get_stats_results(window_from, window_to, k_from, k_to)\n\n # Grab some test data.\n x = stats_results['k']\n y = stats_results['window']\n z = stats_results['interval']\n\n # Plot a basic wireframe.\n scatter = plt.scatter(x, y, c=z)\n plt.colorbar(scatter)\n plt.xlabel('k')\n plt.ylabel('window')\n plt.show()", "def lCurve(self): \n\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n if (self.tstart is not None) and (self.tstop is not None):\n lcTab = lcTab[ (self.tstart <= lcTab['mjd']) & (lcTab['mjd'] <= self.tstop)]\n lcTab = lcTab[lcTab['flux'] != -1.] # avoid undone analyses\n\n timeMJD = lcTab['mjd']\n tref = int(np.floor( timeMJD[0] / 100.0)) * 100 # round to lowest hundred\n timeMJD -= tref\n ts = lcTab['ts']\n detect = lcTab['ts'] >= self.tsmin\n undet = lcTab['ts'] < self.tsmin\n flux = lcTab['flux'][detect]\n fluxerr = lcTab['fluxerr'][detect]\n upperl = lcTab['upperlim'][undet]\n upperl[upperl == -1.] = 0. # for when it failed\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux, upperl), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n lcplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n lcplt.figname = os.path.join(self.workpath, 'LightCurve.pdf')\n lcplt.xlabel = r'Time (MJD $-$ {})'.format(tref)\n lcplt.ylabel = [r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale))), r'TS']\n lcplt.hline = [None, self.tsmin]\n deltaY = max(np.concatenate((flux+fluxerr, upperl), axis=0)) - min(np.concatenate((flux-fluxerr, upperl), axis=0))\n lcplt.ymin = [(min(np.concatenate((flux-fluxerr, upperl-upperl*0.1), axis=0)) - 0.05*deltaY) / scale, min(ts) - 0.05*(max(ts)-min(ts))]\n lcplt.ymax = [(max(np.concatenate((flux+fluxerr, upperl), axis=0)) + 0.05*deltaY) / scale, max(ts) + 0.05*(max(ts)-min(ts))]\n deltaX = (timeMJD[-1] + lcTab['mjderr'][-1]) - (timeMJD[0] - lcTab['mjderr'][0]) \n lcplt.xmin = timeMJD[0] - lcTab['mjderr'][0] - 0.05*deltaX\n lcplt.xmax = timeMJD[-1] + lcTab['mjderr'][-1] + 0.05*deltaX\n lcplt.fill = [item for sublist in zip( timeMJD[detect]-lcTab['mjderr'][detect], timeMJD[detect]+lcTab['mjderr'][detect] ) for item in sublist]\n lcplt.shadecol= self.loran \n if len(flux) == 0:\n lcplt.mksize = [2, 2]\n lcplt.ymode = ['linear', 'linear']\n lcplt.color = ['gray', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [True, False]\n lcplt.multiplot(x = [ timeMJD[undet], timeMJD ],\n y = [ upperl/scale, ts ],\n xerr = [ lcTab['mjderr'][undet], lcTab['mjderr']],\n yerr = [ upperl/scale*0.1, None])\n else:\n lcplt.mksize = [2, 2, 2]\n lcplt.ymode = ['linear', 'linear', 'linear']\n lcplt.color = ['gray', 'black', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [[True, False], False]\n lcplt.multiplot(x = [ [timeMJD[undet], timeMJD[detect]], timeMJD ],\n y = [ [upperl/scale, flux/scale], ts ],\n xerr = [ [lcTab['mjderr'][undet], lcTab['mjderr'][detect]], lcTab['mjderr']],\n yerr = [ [upperl/scale*0.1, fluxerr/scale], None])\n lcplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(lcplt.figname)) \n return", "def plot_stparams(interval_lengths, rates, color):\n\n for i, rate in enumerate(rates):\n plt.plot(\n [sp.sum(interval_lengths[:i]), sp.sum(interval_lengths[:i + 1])],\n [rate, rate], c=color)", "def _show_time_updates(p_bar):\n while p_bar.total > p_bar.n:\n time.sleep(1)\n if p_bar.total > p_bar.n:\n p_bar.refresh()", "def visualize(self):\n print('{0} is {1} time steps old'.format(self.name, self.timestep))\n\n self.amygdala.visualize(self.timestep, self.name, self.log_dir)\n self.cerebellum.visualize(self.name, self.log_dir)\n self.cingulate.visualize(self.name, self.log_dir)\n self.hippocampus.visualize(self.name, self.log_dir)\n #self.ganglia.visualize(self.name, self.log_dir)\n #self.cortex.visualize(self.name, self.log_dir)", "def display(f, x_min, x_max, delta=0.001):\n x = list(drange(x_min, x_max,delta))\n y = [f(i) for i in x]\n plt.title(f.__name__)\n plt.grid(True)\n plt.xlabel('X')\n plt.ylabel('Y= '+f.__name__ + '(X)')\n plt.plot(x,y, 'r')\n plt.show()", "def figure6():\n\n plot_settings = {'y_limits': [-100, 30],\n 'x_limits': None,\n 'y_ticks': [-80, -60, -40, -20, 0, 20],\n 'locator_size': 10,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 50,\n 'x_label': \"\",\n 'scale_loc': 3,\n 'figure_name': 'figure_6',\n 'legend': None,\n 'legend_size': 8,\n 'y_on': True}\n\n marker = ['o', 's', '^']\n line_styles = ['-', 'dotted', '--']\n\n plt.figure(figsize=(5, 3), dpi=96)\n\n plt.subplot(2, 1, 1) # Generate subplot 1 (top)\n t, y = solver(240, i_bias_on=2, g_t_bar=0.1 / 10, duration=250)\n plt.plot(t, y[:, 0], 'k-')\n alter_figure(plot_settings)\n\n plt.subplot(2, 1, 2) # Generate subplot 2 (bottom)\n for ix, i_bias_on in enumerate([2, 1.5, 1]):\n t, y = solver(240, i_bias_on=i_bias_on, g_t_bar=0.1 / 10, duration=250)\n t_spike, f = spike_times(t, y[:, 0])\n plt.plot(t_spike[0:-1], f, c='k', linestyle=line_styles[ix], marker=marker[ix], fillstyle='none')\n\n plot_settings['y_limits'] = [0, 200]\n plot_settings['y_ticks'] = [0, 50, 100, 150, 200]\n plot_settings['locator_size'] = 25\n plot_settings['y_label'] = 'Frequency (Hz)'\n plot_settings['legend'] = ['2.0 nA', '1.5 nA', '1.0 nA']\n plot_settings['scale_size'] = 0\n alter_figure(plot_settings, close=True) # Alter figure for publication", "def plot_credible_intervals_for_time(\n histories: Union[List[History], History],\n labels: Union[List[str], str] = None,\n ms: Union[List[int], int] = None,\n ts: Union[List[int], int] = None,\n par_names: List[str] = None,\n levels: List[float] = None,\n show_mean: bool = False,\n show_kde_max: bool = False,\n show_kde_max_1d: bool = False,\n size: tuple = None,\n rotation: int = 0,\n refvals: Union[List[dict], dict] = None,\n kde: Transition = None,\n kde_1d: Transition = None,\n):\n histories = to_lists(histories)\n labels = get_labels(labels, len(histories))\n n_run = len(histories)\n if ms is None:\n ms = [0] * n_run\n elif not isinstance(ms, list) or len(ms) == 1:\n ms = [ms] * n_run\n if levels is None:\n levels = [0.95]\n levels = sorted(levels)\n if par_names is None:\n # extract all parameter names\n df, _ = histories[0].get_distribution(m=ms[0])\n par_names = list(df.columns.values)\n n_par = len(par_names)\n n_confidence = len(levels)\n if ts is None:\n ts = [history.max_t for history in histories]\n if refvals is not None and not isinstance(refvals, list):\n refvals = [refvals] * n_run\n\n # prepare axes\n fig, arr_ax = plt.subplots(\n nrows=n_par, ncols=1, sharex=False, sharey=False\n )\n if n_par == 1:\n arr_ax = [arr_ax]\n\n # prepare matrices\n cis = np.empty((n_par, n_run, 2 * n_confidence))\n median = np.empty((n_par, n_run))\n if show_mean:\n mean = np.empty((n_par, n_run))\n if show_kde_max:\n kde_max = np.empty((n_par, n_run))\n if show_kde_max_1d:\n kde_max_1d = np.empty((n_par, n_run))\n if kde is None and show_kde_max:\n kde = MultivariateNormalTransition()\n if kde_1d is None and show_kde_max_1d:\n kde_1d = MultivariateNormalTransition()\n\n # fill matrices\n # iterate over populations\n for i_run, (h, t, m) in enumerate(zip(histories, ts, ms)):\n df, w = h.get_distribution(m=m, t=t)\n # normalize weights to be sure\n w /= w.sum()\n # fit kde\n if show_kde_max:\n _kde_max_pnt = compute_kde_max(kde, df, w)\n # iterate over parameters\n for i_par, par in enumerate(par_names):\n # as numpy array\n vals = np.array(df[par])\n # median\n median[i_par, i_run] = compute_quantile(vals, w, 0.5)\n # mean\n if show_mean:\n mean[i_par, i_run] = np.sum(w * vals)\n # kde max\n if show_kde_max:\n kde_max[i_par, i_run] = _kde_max_pnt[par]\n if show_kde_max_1d:\n _kde_max_1d_pnt = compute_kde_max(kde_1d, df[[par]], w)\n kde_max_1d[i_par, i_run] = _kde_max_1d_pnt[par]\n # levels\n for i_c, confidence in enumerate(levels):\n lb, ub = compute_credible_interval(vals, w, confidence)\n cis[i_par, i_run, i_c] = lb\n cis[i_par, i_run, -1 - i_c] = ub\n\n # plot\n for i_par, (par, ax) in enumerate(zip(par_names, arr_ax)):\n for i_run in range(len(histories)):\n for i_c in reversed(range(len(levels))):\n y_err = np.array(\n [\n median[i_par, i_run] - cis[i_par, i_run, i_c],\n cis[i_par, i_run, -1 - i_c] - median[i_par, i_run],\n ]\n )\n y_err = y_err.reshape((2, 1))\n ax.errorbar(\n x=[i_run],\n y=median[i_par, i_run],\n yerr=y_err,\n capsize=(10.0 / n_confidence) * (i_c + 1),\n color=f'C{i_c}',\n )\n # reference value\n if refvals[i_run] is not None:\n ax.plot([i_run], [refvals[i_run][par]], 'x', color='black')\n ax.set_title(f\"Parameter {par}\")\n # mean\n if show_mean:\n ax.plot(range(n_run), mean[i_par], 'x', color=f'C{n_confidence}')\n # kde max\n if show_kde_max:\n ax.plot(\n range(n_run), kde_max[i_par], 'x', color=f'C{n_confidence + 1}'\n )\n if show_kde_max_1d:\n ax.plot(\n range(n_run),\n kde_max_1d[i_par],\n 'x',\n color=f'C{n_confidence + 2}',\n )\n ax.set_xticks(range(n_run))\n ax.set_xticklabels(labels, rotation=rotation)\n leg_colors = [f'C{i_c}' for i_c in reversed(range(n_confidence))]\n leg_labels = ['{:.2f}'.format(c) for c in reversed(levels)]\n if show_mean:\n leg_colors.append(f'C{n_confidence}')\n leg_labels.append(\"Mean\")\n if show_kde_max:\n leg_colors.append(f'C{n_confidence + 1}')\n leg_labels.append(\"Max KDE\")\n if show_kde_max_1d:\n leg_colors.append(f'C{n_confidence + 2}')\n leg_labels.append(\"Max KDE 1d\")\n if refvals is not None:\n leg_colors.append('black')\n leg_labels.append(\"Reference value\")\n handles = [\n Line2D([0], [0], color=c, label=l)\n for c, l in zip(leg_colors, leg_labels)\n ]\n ax.legend(handles=handles, bbox_to_anchor=(1.04, 1), loc=\"upper left\")\n\n # format\n arr_ax[-1].set_xlabel(\"Population t\")\n if size is not None:\n fig.set_size_inches(size)\n fig.tight_layout()\n\n return arr_ax", "def plot_age_curve_traj(self, period=6., lon=323, lat=48, showfig=True):\n\t\tgroup = self['%g_sec'%( period )]\n\t\tmask = self._cons_traj(lon=lon,lat=lat,period=period)\n\t\ttomo_data = group['tomo_data'].value\n\t\tage_Arr = group['age_Arr'].value\n\t\tvel_vec = tomo_data[~mask]\n\t\tage_vec = age_Arr[~mask]\n\t\tplt.plot(age_vec, vel_vec, 'r.')\n\t\tplt.xlim(xmin=0)\n\t\tplt.xlabel('Age (Ma)', fontsize=14)\n\t\tplt.ylabel('vel (km/s)', fontsize=14)\n\t\tfig = plt.gcf()\n\t\tfig.suptitle(str(period)+' sec', fontsize=14)\n\t\tif showfig:\n\t\t\tplt.show()\n\t\tpass", "def ice_plot(curves, **kwargs):\n \n n,m = curves.shape\n \n #grid of plots\n gridspec_kw = {'height_ratios':[3, 1], \n 'wspace':0.0, \n 'hspace':0.0}\n fig, (ax_curves, ax_feature) = plt.subplots(2,1, \n gridspec_kw=gridspec_kw, \n figsize=kwargs.get('figsize', (6,6)))\n \n # top graph - curves\n for curve in curves:\n ax_curves.plot(np.arange(m), curve)\n ax_curves.set_xticklabels([])\n ax_curves.set_xlim((0, m))\n ax_curves.set_ylabel(kwargs.get('ylabel', 'decision function $\\Delta$'))\n \n # bottom graph - feature values\n if 'feature_values' in kwargs:\n ax_feature.plot(np.arange(m), kwargs.get('feature_values'))\n ax_feature.set_xlim((0, m)) \n ax_feature.set_xlabel(kwargs.get('xlabel', '$X_S$ values'))\n \n return fig, (ax_curves, ax_feature)", "def plotSnakingTimeline(\n self, pklfile=\"./\", outspecfile=\"./\", PPoutpath=\"./\", folder=\"./\"\n ):\n DRM, outspec = self.loadFiles(pklfile, outspecfile)\n\n allModes = outspec[\"observingModes\"]\n mode1 = [\n mode\n for mode in allModes\n if \"detectionMode\" in mode.keys() or \"detection\" in mode.keys()\n ]\n assert len(mode1) >= 1, \"This needs to be enhanced\"\n mode = mode1[0]\n if not \"timeMultiplier\" in mode.keys():\n mode[\"timeMultiplier\"] = 1.0\n\n LD = np.arange(len(DRM[\"DRM\"]))\n arrival_times = [DRM[\"DRM\"][i][\"arrival_time\"].value for i in LD]\n sumOHTIME = (\n outspec[\"settlingTime\"]\n + outspec[\"starlightSuppressionSystems\"][0][\"ohTime\"]\n )\n det_times = [\n DRM[\"DRM\"][i][\"det_time\"].value * (mode[\"timeMultiplier\"]) + sumOHTIME\n for i in LD\n ]\n det_timesROUNDED = [\n round(\n DRM[\"DRM\"][i][\"det_time\"].value * (mode[\"timeMultiplier\"]) + sumOHTIME,\n 1,\n )\n for i in LD\n ]\n ObsNums = [DRM[\"DRM\"][i][\"ObsNum\"] for i in LD]\n y_vals = np.zeros(len(det_times)).tolist()\n char_times = [\n DRM[\"DRM\"][i][\"char_time\"].value * (1.0 + outspec[\"charMargin\"])\n + sumOHTIME * (DRM[\"DRM\"][i][\"char_time\"].value > 0.0)\n for i in LD\n ]\n OBdurations = np.asarray(outspec[\"OBendTimes\"]) - np.asarray(\n outspec[\"OBstartTimes\"]\n )\n\n # print(sum(det_times))\n # print(sum(char_times))\n # This is just testing stuff for now\n # from pylab import *\n arr = [DRM[\"DRM\"][i][\"arrival_time\"].value for i in np.arange(len(DRM[\"DRM\"]))]\n dt = [DRM[\"DRM\"][i][\"det_time\"].value + 1.0 for i in np.arange(len(DRM[\"DRM\"]))]\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n colors = [\"red\", \"blue\"]\n for i in np.arange(len(arr)):\n ax.barh(\n 1.0,\n dt[i],\n align=\"center\",\n left=arr[i],\n color=colors[int(i) % len(colors)],\n )\n plt.show(block=False)\n\n # Check if plotting font #########################################################\n tmpfig = plt.figure(figsize=(20, 5), num=0)\n ax = tmpfig.add_subplot(111)\n t = ax.text(\n 0,\n 0,\n \"Obs# , d\",\n ha=\"center\",\n va=\"center\",\n rotation=\"vertical\",\n fontsize=8,\n )\n r = tmpfig.canvas.get_renderer()\n bb = t.get_window_extent(renderer=r)\n Obstxtwidth = bb.width # Width of text\n Obstxtheight = bb.height # height of text\n FIGwidth, FIGheight = tmpfig.get_size_inches() * tmpfig.dpi\n # plt.show(block=False)\n plt.close()\n daysperpixelapprox = (\n max(arrival_times) / FIGwidth\n ) # approximate #days per pixel\n if mean(det_times) * 0.8 / daysperpixelapprox > Obstxtwidth:\n ObstextBool = True\n else:\n ObstextBool = False\n\n tmpfig = plt.figure(figsize=(25, 5), num=0)\n ax = tmpfig.add_subplot(111)\n t = ax.text(\n 0,\n 0,\n \"OB# , dur.= d\",\n ha=\"center\",\n va=\"center\",\n rotation=\"horizontal\",\n fontsize=12,\n )\n r = tmpfig.canvas.get_renderer()\n bb = t.get_window_extent(renderer=r)\n OBtxtwidth = bb.width # Width of text\n OBtxtheight = bb.height # height of text\n FIGwidth, FIGheight = tmpfig.get_size_inches() * tmpfig.dpi\n # plt.show(block=False)\n plt.close()\n if mean(OBdurations) * 0.8 / daysperpixelapprox > OBtxtwidth:\n OBtextBool = True\n else:\n OBtextBool = False\n #################################################################################\n\n ######################################################################\n # Finds arrival times that occur within that year\n ObsNumsL = list()\n det_timesL = list()\n char_timesL = list()\n arrival_timesL = list()\n truthArr = list()\n for i in np.arange(int(np.ceil(max(arrival_times) / 365.25))):\n truthArr = (np.asarray(arrival_times) >= 365.25 * np.float(i)) * (\n np.asarray(arrival_times) < 365.25 * np.float(i + 1.0)\n )\n arrival_timesL.append([arrival_times[ii] for ii in np.where(truthArr)[0]])\n det_timesL.append([det_times[ii] for ii in np.where(truthArr)[0]])\n char_timesL.append([char_times[ii] for ii in np.where(truthArr)[0]])\n ObsNumsL.append([ObsNums[ii] for ii in np.where(truthArr)[0]])\n #######################################################################\n\n #######################################################################\n # Plotting\n colors = \"rb\" #'rgbwmc'\n patch_handles = []\n fig = plt.figure(\n figsize=(20, 3 + int(np.ceil(max(arrival_times) / 365.25)) / 2.0)\n )\n self.prettyPlot()\n ax = fig.add_subplot(111)\n\n char_color = (0.0 / 255.0, 128 / 255.0, 0 / 255.0)\n\n # Plot individual blocks\n # Plot All Detection Observations for Year\n for iyr in np.arange(int(np.ceil(max(arrival_times) / 365.25))):\n ind = 0\n obs = 0\n for (det_time, l, char_time, arrival_times_yr) in zip(\n det_timesL[iyr], ObsNumsL[iyr], char_timesL[iyr], arrival_timesL[iyr]\n ):\n # print det_time, l\n patch_handles.append(\n ax.barh(\n int(np.ceil(max(arrival_times) / 365.25)) - iyr,\n det_time,\n align=\"center\",\n left=arrival_times_yr - 365.25 * iyr,\n color=colors[int(obs) % len(colors)],\n )\n )\n if not char_time == 0.0:\n ax.barh(\n int(np.ceil(max(arrival_times) / 365.25)) - iyr,\n char_time,\n align=\"center\",\n left=arrival_times_yr + det_time - 365.25 * iyr,\n color=char_color,\n )\n ind += 1\n obs += 1\n patch = patch_handles[-1][0]\n bl = patch.get_xy()\n x = 0.5 * patch.get_width() + bl[0]\n y = 0.5 * patch.get_height() + bl[1]\n if ObstextBool:\n ax.text(\n x,\n y,\n \"Obs#%d, %dd\" % (l, det_time),\n ha=\"center\",\n va=\"center\",\n rotation=\"vertical\",\n fontsize=8,\n )\n\n # Set Plot Xlimit so the end of the timeline is at the end of the figure box\n ax.set_xlim([None, 365.25])\n\n # Plot Asthetics\n y_pos = (\n np.arange(int(np.ceil(max(arrival_times) / 365.25))) + 1\n ) # Number of xticks to have\n\n yticklabels = list()\n for i in np.arange(int(np.ceil(max(arrival_times) / 365.25))):\n yticklabels.append(\n str(int(np.ceil(max(arrival_times) / 365.25)) - i) + \"yr\"\n )\n ax.set_yticks(y_pos)\n ax.set_yticklabels(yticklabels, fontsize=30)\n # ax.set_yticklabels(('3yr','2yr','1yr'),fontsize=30)\n ax.xaxis.set_tick_params(labelsize=30)\n ax.set_xlabel(\n \"Time Since Start of Mission Year (days)\", weight=\"bold\", fontsize=30\n )\n plt.title(\n \"Mission Timeline for runName: \"\n + folder.split(\"/\")[-1]\n + \"\\nand pkl file: \"\n + os.path.basename(pklfile).split(\".\")[0],\n weight=\"bold\",\n fontsize=12,\n )\n plt.tight_layout()\n plt.show(block=False)\n\n DT = datetime.datetime\n date = str(DT.now()) # ,\"utf-8\")\n date = \"\".join(\n c + \"_\" for c in re.split(\"-|:| \", date)[0:-1]\n ) # Removes seconds from date\n fname = \"TimelineSnake_\" + folder.split(\"/\")[-1] + \"_\" + date\n plt.savefig(os.path.join(PPoutpath, fname + \".png\"))\n plt.savefig(os.path.join(PPoutpath, fname + \".svg\"))\n plt.savefig(os.path.join(PPoutpath, fname + \".eps\"))\n plt.savefig(os.path.join(PPoutpath, fname + \".pdf\"))", "def volatility_factor_plot(prices: list, dates: list, vf_data: VFStopsResultType,\n green_zone_x_values: List[list], red_zone_x_values: List[list],\n yellow_zone_x_values: List[list], y_range: float, minimum: float,\n text_str: str = \"\", str_color: str = \"\", **kwargs):\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n register_matplotlib_converters()\n\n title = kwargs.get('title', '')\n save_fig = kwargs.get('save_fig', False)\n filename = kwargs.get('filename', 'temp_candlestick.png')\n\n stop_loss_objects = vf_data.data_sets\n\n shown_stop_loss = f\"VF: {np.round(vf_data.vf.curated, 3)}\\n\"\n if vf_data.current_status.status.value != 'stopped_out':\n shown_stop_loss += f\"Stop Loss: ${np.round(vf_data.stop_loss.curated, 2)}\"\n else:\n shown_stop_loss += \"Stop Loss: n/a\"\n\n fig, ax_handle = plt.subplots()\n\n date_indexes = [datetime.strptime(date, '%Y-%m-%d').date() for date in dates]\n ax_handle.plot(date_indexes, prices, color='black')\n\n # Set the tick spacing (this is because dates crowd easily)\n mid_tick_size = int(len(date_indexes) / 4)\n ax_handle.xaxis.set_ticks([\n date_indexes[0], date_indexes[mid_tick_size], date_indexes[mid_tick_size * 2],\n date_indexes[mid_tick_size * 3], date_indexes[-1]\n ])\n\n y_start = minimum - (y_range * 0.05)\n height = y_range * 0.02\n\n for stop in stop_loss_objects:\n sub_dates = [date_indexes[index] for index in stop.time_index_list]\n ax_handle.plot(sub_dates, stop.caution_line, color='gold')\n ax_handle.plot(sub_dates, stop.stop_loss_line, color='red')\n\n for green_zone in green_zone_x_values:\n start = mdates.date2num(date_indexes[green_zone[0]])\n end = mdates.date2num(date_indexes[green_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='green',\n facecolor='green',\n fill=True\n )\n )\n\n for red_zone in red_zone_x_values:\n start = mdates.date2num(date_indexes[red_zone[0]])\n end = mdates.date2num(date_indexes[red_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='red',\n facecolor='red',\n fill=True\n )\n )\n\n for yellow_zone in yellow_zone_x_values:\n start = mdates.date2num(date_indexes[yellow_zone[0]])\n end = mdates.date2num(date_indexes[yellow_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='yellow',\n facecolor='yellow',\n fill=True\n )\n )\n\n ax_handle.set_title(title)\n\n if len(text_str) > 0 and len(str_color) > 0:\n new_start = minimum - (y_range * 0.2)\n new_end = minimum + (y_range * 1.02)\n ax_handle.set_ylim(new_start, new_end)\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.02,\n text_str,\n color=str_color,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n if len(shown_stop_loss) > 0:\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.90,\n shown_stop_loss,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n try:\n if save_fig:\n temp_path = os.path.join(\"output\", \"temp\")\n if not os.path.exists(temp_path):\n # For functions, this directory may not exist.\n plt.close(fig)\n plt.clf()\n return\n\n filename = os.path.join(temp_path, filename)\n if os.path.exists(filename):\n os.remove(filename)\n plt.savefig(filename)\n\n else:\n plt.show()\n\n except: # pylint: disable=bare-except\n print(\n f\"{utils.WARNING}Warning: plot failed to render in 'volatility factor plot' of \" +\n f\"title: {title}{utils.NORMAL}\")\n\n plt.close('all')\n plt.clf()", "def show(self, delay=None, iterations=None, **kwds):\n\n # Positional parameters for the sake of backwards compatibility\n if delay is not None:\n kwds.setdefault(\"delay\", delay)\n if iterations is not None:\n kwds.setdefault(\"iterations\", iterations)\n\n from sage.repl.rich_output import get_display_manager\n dm = get_display_manager()\n dm.display_immediately(self, **kwds)", "def plot_traces(self, cellname, targettime, historytime, srctype, syntype):\n self.tstart = targettime - historytime\n self.istart = int(self.tstart / self.plotdt + 0.5)\n self.tend = targettime + historytime\n self.iend = int(self.tend / self.plotdt + 0.5)\n self.tseries = np.linspace(self.tstart, self.tend, \n self.iend - self.istart)\n if cellname not in self.datafile['/Vm']:\n return []\n vm = self.datafile['/Vm/' + cellname] \n plt.plot(self.tseries, \n normalize(vm[self.istart:self.iend]),\n label=cellname)\n stimdata = np.asarray(self.datafile['/stimulus/stim_bg'])\n stim_start = int(self.tstart/self.simdt+0.5)\n stim_end = int(self.tend/self.simdt+0.5)\n stimdata = stimdata[stim_start: stim_end]\n plt.plot(np.linspace(self.tstart, self.tend, len(stimdata)),\n normalize(stimdata),\n 'r--', \n label='STIMULUS')\n precells = self.plot_presynaptic(cellname, srctype, syntype)\n return precells", "def show_charts_simulator(simulator):\n\tstats_t, stats_s, stats_v, stats_a, stats_j = simulator.stats_t, simulator.stats_s, simulator.stats_v, simulator.stats_a, simulator.stats_j\n\tdatasets = get_datasets(stats_t, stats_s, stats_v, stats_a, stats_j)\n\tplot_datasets(datasets)", "def footprint_demo(**kw):\n # Note: needs fixed slits in single_point()\n count = 1500000\n data = []\n for theta in np.linspace(0.15, 5, 30):\n n = single_point(theta=theta, count=count, trace=False, **kw)\n data.append((theta, np.sum(n.active)))\n print(data[-1])\n x, y = zip(*data)\n pylab.plot(x, np.array(y)/count)\n pylab.show()", "def visualize(self):\n import matplotlib.pyplot as plt\n import numpy as np\n\n plt.figure()\n sw_ = np.linspace(0.0, 1.0, 50)\n plt.plot(sw_, self.krw(sw_), label=\"Water\")\n plt.plot(sw_, self.kro(sw_), label=\"Oil\")\n plt.xlabel(\"Water saturation\")\n plt.ylabel(\"Relative permeability\")\n plt.legend()", "def test_plot_waterfall(self):\n # Unpack the list of baseline-pairs into a Python list\n blpairs = np.unique(self.uvp.blpair_array).tolist()\n blps = [self.uvp.blpair_to_antnums(blp) for blp in blpairs]\n\n # Set cosmology and plot in non-delay (i.e. cosmological) units\n self.uvp.set_cosmology(conversions.Cosmo_Conversions(), overwrite=True)\n f1 = plot.delay_waterfall(self.uvp, [blps,], spw=0, pol=('xx','xx'),\n average_blpairs=True, delay=False)\n plt.close(f1)\n\n # Plot in Delta^2 units\n f2 = plot.delay_waterfall(self.uvp, [blps,], spw=0, pol=('xx','xx'),\n average_blpairs=True, delay=False,\n deltasq=True)\n plt.close(f2)\n\n # Try some other arguments\n f3 = plot.delay_waterfall(self.uvp, [blpairs,], spw=0, pol=('xx','xx'),\n average_blpairs=False, delay=True,\n log=False, vmin=-1., vmax=3.,\n cmap='RdBu', fold=True, component='abs')\n plt.close(f3)\n\n # Try with imaginary component\n f4 = plot.delay_waterfall(self.uvp, [blpairs,], spw=0, pol=('xx','xx'),\n average_blpairs=False, delay=True,\n log=False, vmin=-1., vmax=3.,\n cmap='RdBu', fold=True, component='imag')\n plt.close(f4)\n\n # Try some more arguments\n fig, axes = plt.subplots(1, len(blps))\n plot.delay_waterfall(self.uvp, [blps,], spw=0, pol=('xx','xx'),\n lst_in_hrs=False,\n times=np.unique(self.uvp.time_avg_array)[:10],\n axes=axes, component='abs', title_type='blvec')\n plt.close()\n\n # exceptions\n uvp = copy.deepcopy(self.uvp)\n for i in range(1, 4):\n _uvp = copy.deepcopy(self.uvp)\n _uvp.blpair_array += i * 20\n uvp += _uvp\n pytest.raises(ValueError, plot.delay_waterfall, uvp,\n uvp.get_blpairs(), 0, ('xx','xx'))\n fig = plot.delay_waterfall(uvp, uvp.get_blpairs(), 0, ('xx','xx'),\n force_plot=True)\n plt.close()", "def demo(self, tmin=0, tmax=27.4, cadence=30.0 / 60.0 / 24.0, offset=0, raw=False, ax=None):\n t = np.arange(tmin, tmax, cadence)\n if ax is None:\n plt.figure('demo', figsize=(8, 3))\n else:\n plt.sca(ax)\n y = self.model(t)\n if raw:\n plt.plot(t, y + offset, alpha=0.25, linewidth=1, color='royalblue')\n plt.plot(t, self.integrated(t) + offset, alpha=0.5, linewidth=1, color='darkorange')\n plt.xlim(tmin, tmax)\n # plt.ylim(np.max(y)+0.01, np.min(y)-0.01)\n plt.xlabel('Time (days)')\n plt.ylabel('Flux (mag.)')", "def plot(self, show=True):\n xs, ys = zip(*[(float(ix)/self.sample_rate, val)\n for ix, val in enumerate(self.samples)])\n plt.plot(xs, ys)\n if show:\n plt.show()", "def plot_slices(hists, slice_width, fibre_pos):\n pmt_info = rat.utility().GetPMTInfo()\n # Find max angle\n max_angle = 0\n pmtIDs = hists.keys()\n for pmtID in pmtIDs:\n angle = taf.fibre_to_pmt_angle(fibre_pos, pmt_info.GetPosition(pmtID))\n if angle > max_angle:\n max_angle = angle\n print max_angle\n\n # Step between 0 and max_angle in slices of slice_width\n # create a plot of all pmt time spectra within each slice\n ROOT.gStyle.SetPalette(1) \n cuts = np.arange(0., max_angle+slice_width, slice_width)\n for i, cut in enumerate(cuts): \n tmpHists = []\n if i > 0:\n low_range = cuts[i-1]\n hi_range = cuts[i]\n s = ROOT.THStack(\"stack\", \"Slice: %1.1f - %1.1f deg\" % (low_range, hi_range)) \n count = 0\n for pmtID in pmtIDs:\n angle = taf.fibre_to_pmt_angle(fibre_pos, pmt_info.GetPosition(pmtID))\n if angle > low_range and angle < hi_range:\n #print pmtID\n count = count + 1\n hists[pmtID].SetLineColor(count)\n s.Add(hists[pmtID])\n print \"Drawing...\"\n s.Draw(\"nostack\")\n s.GetHistogram().GetXaxis().SetTitle(\"Time (ns)\")\n #s.Write()\n #c1.BuildLegend(0.5, 0.2, 0.88, 0.88)\n c1.Update()\n c1.Modified()\n c1.Print(\"./results/slices/Slice_%1.1f.png\" % low_range)\n s.Delete()\n #time.sleep(1)", "def select_traces(traces, interval, sample_rate=None):\n start, end = interval\n i, j = round(sample_rate * start), round(sample_rate * end)\n i, j = int(i), int(j)\n traces = traces[i:j]\n traces = traces - np.median(traces, axis=0)\n return traces", "def show_derivative(self):\n for trace in self.plotWidget.plotDataItems:\n dt = float(trace.attrs['dt'])\n dtrace = np.diff(trace.data)\n x = pgplot.make_xvector(dtrace, dt)\n self.plotWidget.plot(x, dtrace, pen=pg.mkPen('r'))", "def show_runs(self,start=0,end=99999999,csv=False):\n if csv:\n print '{:>7}, {:>10}, {:>8}, {:>10}, {:3}, {:2}'.format('Run', \n 'Day', 'Time', 'Length', 'xtc', 'h5') \n \n else:\n print '='*72\n print 'Experiment {:}'.format(self.exp)\n print ' xtc dir {:}'.format(self.xtc_dir)\n print ' hdf5 dir {:}'.format(self.h5_dir)\n print '-'*72\n print '{:>7} {:>10} {:>8} {:>10} {:3} {:2}'.format('Run', 'Day', 'Time', \n 'Length', 'xtc', 'h5') \n print '-'*72\n \n for item in self.runs:\n run = item['num']\n if run >= start and run <= end:\n datestr = time.strftime('%Y-%m-%d',\n time.localtime(item['begin_time_unix']))\n timestr = time.strftime('%H:%M:%S',\n time.localtime(item['begin_time_unix']))\n if len(item['xtc_files']) > 0:\n xtc = 'xtc'\n else:\n xtc = ''\n \n if len(item['h5_files']) > 0:\n h5 = 'h5'\n else:\n h5 = ''\n \n begin_time = item['begin_time_unix']\n end_time = item['end_time_unix'] \n if end_time:\n dtime = end_time - begin_time\n flag = ' '\n else:\n dtime = time.time() - begin_time\n flag = '*'\n\n dmin = int(dtime/60)\n dsec = int(dtime % 60)\n if dmin > 0:\n dtstr = '{:4}m {:02}s'.format(dmin,dsec)\n else:\n dtstr = '{:02}s'.format(dsec)\n\n if csv:\n print '{:7}, {:10}, {:8}, {:>10}, {:3}, {:2}'.format(run,\n datestr, timestr, dtstr, xtc, h5)\n else:\n print '{:7} {:10} {:8} {:>10} {:3} {:2}'.format(run,\n datestr, timestr, dtstr, xtc, h5)\n\n if flag in '*':\n print '* Currently Acquiring Data for Run {:}'.format(run)", "def update_plot(axes):\n axes.clear()\n\n i = C.i\n C.i += di # globale Zählvariable erhöhen\n if C.i >= len(tt):\n time.sleep(2)\n C.i = 0\n\n t = tt[i]\n q1 = qq1[i]\n q2 = qq2[i]\n q3 = qq3[i]\n CCframe(q1, q2, q3)\n\n # Ausgabe der aktuellen Zeit\n pl.text(0.06, 0.05, \"t = %3.2fs\" % t, transform = axes.transAxes)\n pl.axis([-3, 3, -3, 3])\n axes.figure.canvas.draw()", "def Chart3PTL(tickerListing, years=5, verbose_mode=False): \n List = tickerListing.split()\n chatty = verbose_mode\n for i in List:\n print(i)\n PlotTimeSeries(i, years, verbose_mode=chatty)", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def plot_age_curve_traj_2(self, period=6., lon=323, lat=48, N=20, showfig=True):\n\t\tlons_out, lats_out = self._cons_traj_stream(period=period,lon=lon,lat=lat,N=N)\n\t\ttry:\n\t\t\tself.age_func\n\t\texcept:\n\t\t\tself._get_age_func()\n\t\tages = self.age_func(np.column_stack((lons_out,lats_out)))\n\t\tgroup = self['%g_sec'%( period )]\n\t\ttomo_data = group['tomo_data'].value\n\t\ttomo_data_msk = group['tomo_data_msk'].value\n\t\tlonArr = group['lonArr'].value\n\t\tlatArr = group['latArr'].value\n\t\tx1 = lonArr[~tomo_data_msk]\n\t\ty1 = latArr[~tomo_data_msk]\n\t\tz1 = tomo_data[~tomo_data_msk]\n\t\t# f_tomo = interp2d(x1, y1, z1, fill_value=0.)\n\t\t# vels = (f_tomo(lons_out, lats_out)).diagonal()\n\t\tvels = griddata(np.column_stack((x1,y1)), z1, (lons_out, lats_out), method='linear', fill_value=0.)\n\t\tmask = np.logical_or(ages>180, vels==0)\n\t\tfig1 = plt.figure(1)\n\t\tplt.plot(ages[~mask], vels[~mask], 'r.')\n\t\tplt.xlim(xmin=0)\n\t\tplt.xlabel('Age (Ma)', fontsize=14)\n\t\tplt.ylabel('vel (km/s)', fontsize=14)\n\t\tfig1.suptitle(str(period)+' sec', fontsize=14)\n\t\tif showfig:\n\t\t\tfig1.show()\n\t\tfig2 = plt.figure(2)\n\t\tm = self.plot_age(period=period, projection='lambert',geopolygons=None, showfig=False, vmin=0, vmax=None, hillshade=False)\n\t\tx2, y2 = m(lons_out, lats_out)\n\t\tax = fig2.gca()\n\t\tax.plot(x2[~mask], y2[~mask],color='gray')\n\t\tax.plot(x2[~mask], y2[~mask],'.', color='gray')\n\t\tfig2.show()\n\t\tfig3 = plt.figure(3)\n\t\tm = self.plot_tomo_vel(period=period, projection='lambert',geopolygons=None, showfig=False, vmin=None, vmax=None, sta=True, hillshade=False)\n\t\tx3, y3 = m(lons_out, lats_out)\n\t\tax3 = fig3.gca()\n\t\tax3.plot(x3[~mask], y3[~mask],color='gray')\n\t\tax3.plot(x3[~mask], y3[~mask],'.',color='gray')\n\t\tfig3.show()\n\t\tpass", "def plotTimeDelta(data, type_plot, device):\n mean = data.mean()\n std = data.std()\n max_data = data.max()\n min_data = data.min()\n max_indx = np.argmax(data) # max value index\n min_indx = np.argmin(data) # min value index\n x = np.arange(min_data, max_data, 0.1)\n y = normfun(x, mean, std)\n res_quantile = quantileValues(data, device)\n if type_plot == 0:\n plt.plot(x, y, color='blue')\n annot_max_min(x, y)\n # plt.hist(data.dropna(), bins=500, rwidth=0.9, normed=True)\n plt.title('Time Delta distribution')\n plt.xlabel('Time Delta')\n plt.ylabel('Probability')\n sns.distplot(tmp.deltaSeconds.dropna(),\n kde=True, rug=True, rug_kws={\"color\": \"k\"},\n kde_kws={\"color\": \"red\", \"lw\": 3, \"label\": \"KDE\"},\n hist_kws={\"histtype\": \"step\", \"lw\": 3, \"alpha\": 1,\n \"color\": \"g\"},\n bins=500)\n # ax.set(xlabel='Vibration Intensity', ylabel='Probability')\n elif type_plot == 1: # plot the max and min point\n plt.plot(data)\n plt.plot(max_indx, data[max_indx], 'ks')\n show_max = '['+str(max_indx)+' '+str(data[max_indx])+']'\n plt.annotate(show_max,\n xytext=(max_indx, data[max_indx]),\n xy=(max_indx, data[max_indx]))\n plt.plot(min_indx, data[min_indx], 'gs')\n show_min = '['+str(min_indx)+' '+str(data[min_indx])+']'\n plt.annotate(show_min,\n xytext=(min_indx, data[min_indx]),\n xy=(min_indx, data[min_indx]))\n plt.title('Time Delta')\n plt.xlabel('Index')\n plt.ylabel('Vibration Intensity Value')\n elif type_plot == 2: # boxplot\n boxplot(data.dropna())\n return res_quantile", "def showSegments(self, surface):\n for segment in self.segments:\n segment.show(surface)", "def display_time_updates(bar):\n threading.Thread(target=_show_time_updates, args=(bar,)).start()", "def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)", "def plot(self, num_levels=10):\n if num_levels == -1:\n num_levels = len(self.energies())\n print(self.energies(num_levels))\n figure(figsize=(20, 5))\n subplot(1, num_levels + 1, 1)\n self.plot_potential()\n #xlabel('$\\phi$')\n for ii, psi2D in enumerate(self.get_2Dpsis(num_levels)):\n subplot(1, num_levels + 1, ii + 2)\n #imshow(psi2D.real,extent=(self.x[0],self.x[-1],self.y[0],self.y[-1]),interpolation=\"None\",aspect='auto')\n imshow(psi2D.real, interpolation=\"None\", aspect='auto')\n xlabel(ii)", "def display_time(seconds, granularity=2):\n result = []\n\n for name, count in intervals:\n value = seconds // count\n if value:\n seconds -= value * count\n if value == 1:\n name = name.rstrip(\"s\")\n result.append(f\"{value} {name}\")\n return \", \".join(result[:granularity])", "def printAll():\n data = load_yield_data()\n numberColumns = 5\n firstEntry = 'tmax5'\n lastEntry = 'lstmax9'\n colNames = list(data)\n firstIndex =colNames.index(firstEntry)\n lastIndex = colNames.index(lastEntry)\n numberTypesOfVariables = 5\n months = 5\n f, axarr = plt.subplots(numberTypesOfVariables, months)\n variables = ['tave5', 'tave6', 'tave7', 'tave8', 'tave9', 'vpdave5', 'vpdave6', 'vpdave7', 'vpdave8', 'vpdave9', 'precip5', 'precip6', 'precip7', 'precip8', 'precip9', 'evi5', 'evi6', 'evi7', 'evi8', 'evi9', 'lstmax5', 'lstmax6', 'lstmax7', 'lstmax8', 'lstmax9']\n print(firstIndex, lastIndex)\n print(colNames)\n for i in range(len(variables)):\n axarr[int(i/numberColumns), int(i%numberColumns)].plot(data[variables[i]], data[\"yield_rainfed_ana\"],'bx')\n axarr[int(i/numberColumns), int(i%numberColumns)].set_title([variables[i]])\n Z = lowess(data['yield_rainfed_ana'], data[variables[i]],frac=0.3,it=3)\n axarr[int(i/numberColumns), int(i%numberColumns)].plot(Z[:,0], Z[:,1], 'g-', lw=5)\n plt.show()", "def run():\n\n # Build list of stations\n stations = build_station_list()\n \n # Update latest level data for all stations\n update_water_levels(stations)\n \n # Stations at which the current relative level is over 0.8\n z= stations_level_over_threshold(stations, 0.8)\n for a in z:\n print(a[0],a[1])\n print(\".\") \n print(\".\")", "def display(self):\n self.figure, self.axes = self.createFigure()\n\n self.setupLayout()\n self.quitFlag = False\n self.animation = animation.FuncAnimation(self.figure, self.animate, interval=100)\n plt.show()", "def view(self, lo_en: Quantity = Quantity(0.0, \"keV\"), hi_en: Quantity = Quantity(30.0, \"keV\"),\n figsize: Tuple = (8, 6)):\n if lo_en > hi_en:\n raise ValueError(\"hi_en cannot be greater than lo_en\")\n else:\n lo_en = lo_en.to(\"keV\").value\n hi_en = hi_en.to(\"keV\").value\n\n if len(self._plot_data.keys()) != 0:\n # Create figure object\n plt.figure(figsize=figsize)\n\n # Set the plot up to look nice and professional.\n ax = plt.gca()\n ax.minorticks_on()\n ax.tick_params(axis='both', direction='in', which='both', top=True, right=True)\n\n # Set the title with all relevant information about the spectrum object in it\n plt.title(\"{n} - {o}{i} Spectrum\".format(n=self.src_name, o=self.obs_id, i=self.instrument.upper()))\n for mod_ind, mod in enumerate(self._plot_data):\n x = self._plot_data[mod][\"x\"]\n # If the defaults are left, just update them to the min and max of the dataset\n # to avoid unsightly gaps at the sides of the plot\n if lo_en == 0.:\n lo_en = x.min()\n if hi_en == 30.0:\n hi_en = x.max()\n\n # Cut the x dataset to just the energy range we want\n plot_x = x[(x > lo_en) & (x < hi_en)]\n\n if mod_ind == 0:\n # Read out the data just for line length reasons\n # Make the cuts based on energy values supplied to the view method\n plot_y = self._plot_data[mod][\"y\"][(x > lo_en) & (x < hi_en)]\n plot_xerr = self._plot_data[mod][\"x_err\"][(x > lo_en) & (x < hi_en)]\n plot_yerr = self._plot_data[mod][\"y_err\"][(x > lo_en) & (x < hi_en)]\n plot_mod = self._plot_data[mod][\"model\"][(x > lo_en) & (x < hi_en)]\n\n plt.errorbar(plot_x, plot_y, xerr=plot_xerr, yerr=plot_yerr, fmt=\"k+\", label=\"data\", zorder=1)\n else:\n # Don't want to re-plot data points as they should be identical, so if there is another model\n # only it will be plotted\n plot_mod = self._plot_data[mod][\"model\"][(x > lo_en) & (x < hi_en)]\n\n # The model line is put on\n plt.plot(plot_x, plot_mod, label=mod, linewidth=1.5)\n\n # Generate the legend for the data and model(s)\n plt.legend(loc=\"best\")\n\n # Ensure axis is limited to the chosen energy range\n plt.xlim(lo_en, hi_en)\n\n plt.xlabel(\"Energy [keV]\")\n plt.ylabel(\"Normalised Counts s$^{-1}$ keV$^{-1}$\")\n\n ax.set_xscale(\"log\")\n ax.xaxis.set_major_formatter(ScalarFormatter())\n ax.xaxis.set_minor_formatter(FuncFormatter(lambda inp, _: '{:g}'.format(inp)))\n ax.xaxis.set_major_formatter(FuncFormatter(lambda inp, _: '{:g}'.format(inp)))\n\n plt.tight_layout()\n # Display the spectrum\n plt.show()\n\n # Wipe the figure\n plt.close(\"all\")\n\n else:\n warnings.warn(\"There are no XSPEC fits associated with this Spectrum, you can't view it.\")", "def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()", "def showFunctions(self,window):\n wsx,wsy=window.size\n for i,function in enumerate(self.functions):\n self.showGraph(function,window,self.colors[i])\n window.print(str(function),[wsx-wsx/5,wsy-wsy/20*(i+2)],color=self.colors[i],size=25)", "def display_time(self, display='LEDScreen'):\r\n self.bin_time = self._update_time()\r\n wide = False # defines one or two LEDS for display\r\n if display == 'LEDScreen':\r\n if not wide:\r\n for frame_updates in range(30):\r\n for time_slot in range(len(self.bin_time)):\r\n for bit in range(len(self.bin_time[time_slot])):\r\n if self.bin_time[time_slot][bit] == 1:\r\n self.display.light_led(6 - time_slot, 6 - bit, 0.001)\r\n else:\r\n for frame_updates in range(30):\r\n for time_slot in range(3):\r\n for bit in range(6):\r\n if self.bin_time[time_slot][bit] == 1:\r\n coord = 2 * time_slot\r\n self.display.light_led(7 - coord, 5 - bit, 0.0001)\r\n self.display.light_led(7 - coord - 1, 5 - bit, 0.0001)\r\n\r\n else:\r\n for time_slot in range(3):\r\n if time_slot == 0:\r\n current_leds = self.second_leds\r\n elif time_slot == 1:\r\n current_leds = self.minute_leds\r\n else:\r\n current_leds = self.hour_leds\r\n\r\n bin_position = 0\r\n for pin in range(len(current_leds)):\r\n bin_value = self.bin_time[time_slot][bin_position]\r\n if bin_value > 0:\r\n current_leds[bin_position].on()\r\n else:\r\n current_leds[bin_position].off()\r\n bin_position += 1\r\n return", "def display_non_parametric(km_model, figure_size = (18, 5) ):\r\n\r\n # Check that the model is a Non-Parametric model\r\n if 'kaplan' not in km_model.name.lower() :\r\n error = \"This function can only take as input a Non-Parametric model\"\r\n raise NotImplementedError(error)\r\n\r\n # Title of the chart\r\n if 'smooth' in km_model.name.lower() :\r\n is_smoothed = True\r\n title = 'Smooth Kaplan-Meier Survival function'\r\n else:\r\n is_smoothed = False\r\n title = 'Kaplan-Meier Survival function'\r\n\r\n # Initializing the chart\r\n fig, ax = plt.subplots(figsize=figure_size )\r\n\r\n # Extracting times and survival function\r\n times, survival = km_model.times, km_model.survival\r\n\r\n # Plotting Survival\r\n plt.plot(times, survival, label = title, \r\n color = 'blue', lw = 3) \r\n\r\n # Defining the x-axis and y-axis\r\n ax.set_xlabel('Time')\r\n ax.set_ylabel( 'S(t) Survival function' )\r\n ax.set_ylim([0.0, 1.05])\r\n ax.set_xlim([0.0, max(times)*1.01])\r\n vals = ax.get_yticks()\r\n ax.set_yticklabels(['{:.1f}%'.format(v*100) for v in vals])\r\n plt.title(title, fontsize=25)\r\n\r\n # Extracting times and survival function\r\n times, survival = km_model.times, km_model.survival\r\n\r\n if is_smoothed :\r\n\r\n # Display\r\n plt.plot(times, survival, label = 'Original Kaplan-Meier', \r\n color = '#f44141', ls = '-.', lw = 2.5) \r\n plt.legend(fontsize=15)\r\n plt.show()\r\n\r\n else:\r\n\r\n # Extracting CI\r\n survival_ci_upper = km_model.survival_ci_upper\r\n survival_ci_lower = km_model.survival_ci_lower\r\n\r\n # Plotting the Confidence Intervals\r\n plt.plot(times, survival_ci_upper, \r\n color='red', alpha =0.1, ls='--')\r\n plt.plot(times, survival_ci_lower, \r\n color='red', alpha =0.1, ls='--')\r\n\r\n # Filling the areas between the Survival and Confidence Intervals curves\r\n plt.fill_between(times, survival, survival_ci_lower, \r\n label='Confidence Interval - lower', color='red', alpha =0.2)\r\n plt.fill_between(times, survival, survival_ci_upper, \r\n label='Confidence Interval - upper', color='red', alpha =0.2)\r\n \r\n # Display\r\n plt.legend(fontsize=15)\r\n plt.show()", "def plot_planned_trajectory(ax, xs, ys, headings, steers, physical_params, interval = 20):\n ax.plot(xs, ys, color=\"r\")\n for i in range(len(steers)):\n # ellipse = Ellipse(xy = (x, y), width = x_length, height = y_length, angle = np.rad2deg(heading), alpha = 0.4, ec = \"k\", fc = fc)\n # ax.add_patch(ellipse)\n if i % interval == 0:\n plot_vehicle(ax, xs[i], ys[i], headings[i], steers[i], 0.7, 0.7, physical_params.wheel_length, physical_params.wheel_width)\n ax.set_xlabel(\"X Position\")\n ax.set_ylabel(\"Y Position\")\n ax.axis('equal')", "def show_strokes(self):\n assert(self.strokes is not False)\n fig = plt.figure()\n for istroke in range(self.strokes.shape[0]):\n plt.subplot(self.strokes.shape[0] + 1, 1, istroke + 1)\n plt.plot(self.strokes[istroke])\n fig.show()\n raw_input('press enter when finished...')" ]
[ "0.57920915", "0.5683707", "0.5661111", "0.5605037", "0.55469894", "0.5529888", "0.55209893", "0.5516258", "0.5514016", "0.54653513", "0.54639024", "0.5443685", "0.5424659", "0.54157746", "0.54123294", "0.5401272", "0.5386288", "0.53694296", "0.53634524", "0.5349171", "0.5345181", "0.52848387", "0.5280429", "0.52640337", "0.5245146", "0.523994", "0.521748", "0.5206954", "0.520479", "0.5183742", "0.51826406", "0.51716745", "0.516519", "0.5158113", "0.51544356", "0.51423126", "0.513522", "0.5116145", "0.51107556", "0.5108981", "0.51087564", "0.51038253", "0.5103169", "0.5090219", "0.5087638", "0.5081326", "0.5081116", "0.5077511", "0.50761914", "0.50710434", "0.5068555", "0.5052627", "0.50465775", "0.50459033", "0.5038943", "0.5027273", "0.5026929", "0.502319", "0.5021796", "0.5015387", "0.50139207", "0.50083244", "0.5000747", "0.49980333", "0.49971285", "0.49913496", "0.4988905", "0.49837282", "0.4979812", "0.49748686", "0.49635187", "0.49609873", "0.49608856", "0.49525958", "0.49489194", "0.49422646", "0.49413177", "0.49370465", "0.49362522", "0.49345234", "0.4929818", "0.49189553", "0.49118796", "0.4905264", "0.49045673", "0.4903693", "0.490214", "0.49000412", "0.48985568", "0.4894163", "0.4893394", "0.48913684", "0.48904797", "0.4888726", "0.48868358", "0.48812437", "0.48797452", "0.48751897", "0.48729807", "0.48668146" ]
0.500734
62
Attach the view to the GUI.
def attach(self, gui): super(TraceView, self).attach(gui) self.actions.add(self.toggle_show_labels, checkable=True, checked=self.do_show_labels) self.actions.add( self.toggle_highlighted_spikes, checkable=True, checked=self.show_all_spikes) self.actions.add(self.toggle_auto_scale, checkable=True, checked=self.auto_scale) self.actions.add(self.switch_origin) self.actions.separator() self.actions.add( self.go_to, prompt=True, prompt_default=lambda: str(self.time)) self.actions.separator() self.actions.add(self.go_to_start) self.actions.add(self.go_to_end) self.actions.separator() self.actions.add(self.shift, prompt=True) self.actions.add(self.go_right) self.actions.add(self.go_left) self.actions.add(self.jump_right) self.actions.add(self.jump_left) self.actions.separator() self.actions.add(self.widen) self.actions.add(self.narrow) self.actions.separator() self.actions.add(self.go_to_next_spike) self.actions.add(self.go_to_previous_spike) self.actions.separator() self.set_interval()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return", "def attach(self, gui):\n ManualClusteringView.attach(self, gui)\n # ScalingMixin.attach(self, gui)\n\n # self.actions.add(self.toggle_show_labels, checkable=True, checked=self.do_show_labels)\n # self.actions.add(self.toggle_auto_scale, checkable=True, checked=self.auto_scale)\n self.actions.add(self.switch_origin)\n self.actions.separator()\n\n self.actions.add(self.go_to, prompt=True, prompt_default=lambda: str(self.time))\n self.actions.separator()\n\n self.actions.add(self.go_to_start)\n self.actions.add(self.go_to_end)\n self.actions.separator()\n\n self.actions.add(self.shift, prompt=True)\n self.actions.add(self.go_right)\n self.actions.add(self.go_left)\n self.actions.add(self.jump_right)\n self.actions.add(self.jump_left)\n self.actions.separator()\n\n self.actions.add(self.widen)\n self.actions.add(self.narrow)\n self.actions.separator()\n\n self.set_interval()", "def __init__(self):\n self.view = GuiView(self)\n return", "def attach_edgework_view(self, view: \"EdgeworkView\") -> None:\n self._edgework_view = view", "def on_show_view(self):\n arcade.set_background_color(arcade.color.DARK_BLUE_GRAY)\n\n # Enable the UIManager when the view is showm.\n self.manager.enable()", "def debug_view(self):\n\n self.view.show()", "def show(self) -> None:\n show(self._layout)", "def show(self):\n self.Show()", "def setup(self):\n self.ui.setup_window()", "def on_show_view(self):\n\n # Makes the background darker\n arcade.set_background_color([rgb - 50 for rgb in arcade.color.DARK_BLUE_GRAY])\n\n # Enable the UIManager when the view is showm.\n self.manager.enable()", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def setup_gui(self):\n # if there are more than 1 visualizer we need to assure that there\n # will not be tag conflicts\n BaseRealTimeVisualizer.setup_gui_lock.acquire()\n # look for valid tag\n dpg.create_context()\n\n self.id = 0\n while dpg.does_item_exist(f'main_window_{self.id}'):\n self.id += 1\n\n with dpg.texture_registry(show=False):\n dpg.add_dynamic_texture(\n width=self.width,\n height=self.height,\n default_value=np.zeros((self.width, self.height, 3)),\n tag=f'input_image_texture_{self.id}',\n )\n\n with dpg.window(\n tag=f'main_window_{self.id}',\n no_title_bar=True,\n autosize=True\n ):\n dpg.add_image(\n texture_tag=f'input_image_texture_{self.id}',\n tag=f'image_render_{self.id}',\n pos=(_PADDING, _PADDING)\n )\n\n dpg.set_global_font_scale(_FONT_SCALE)\n\n if self.id == 0:\n dpg.set_primary_window(f'main_window_{self.id}', True)\n dpg.create_viewport(\n title=self.title,\n width=self.width + _PADDING*2,\n height=self.height + _PADDING*2,\n resizable=True\n )\n dpg.setup_dearpygui()\n dpg.show_viewport()\n elif self.id == 1:\n dpg.set_primary_window('main_window_0', False)\n\n BaseRealTimeVisualizer.setup_gui_lock.release()", "def _connectView(self):\n self._view.select_asset = self.select_asset\n self._view.add_assets = self.add_assets\n self._view.remove_assets = self.remove_assets\n self._view.update_assets = self.update_assets\n self._view.commit = self.commit", "def on_show_view(self):\n self.setup()\n arcade.set_background_color(arcade.color.BLACK)\n arcade.set_viewport(0, constants.SCREEN_WIDTH - 1, 0, constants.SCREEN_HEIGHT - 1)", "def set_view(self):\n self.scene.mlab.view(azimuth=90.0, elevation=-90.0)", "def start_ui(self):\n\t\tself.start_animation()\n\t\tself.app.exec()", "def show_gui():\n pass", "def on_show_view(self):\n self.setup()\n arcade.set_background_color(arcade.color.BLACK)", "def show(self, window):\r\n\r\n return", "def gui(self):\n return gui", "def visualise(self):\n self.w = VisualizeSetupBox(self.master, self._df)\n self.master.wait_window(self.w.top)", "def create_view(self):\n title_label = Label(self, text='Upload, Preview, Describe and Visualize',\n fg='blue', font=('Arial', 16))\n title_label.pack(fill=BOTH, expand=True)\n select_file_button = Button(self, background='White', text='Select Data File [.csv, .xlsx, .xls, .json, .txt]',\n command=self.start_upload)\n select_file_button.pack(padx=5, pady=10)", "def inicialUI(self):\r\n\r\n self.setGeometry(500, 500, 500, 500)\r\n self.setWindownTitle(\"Pesquisa\")\r\n self.displayWidgets()\r\n\r\n self.show()", "def __call__(self):\n self.show()", "def visualize(self):\n app = QtGui.QApplication([''])\n SceneGUI(self)\n app.exec_()", "def start(self) -> Gui:\n self.show()\n self.app.exec_()\n\n return self", "def show(self):\n self.wid.show()", "def iniciaUI(self):\n\n self.setGeometry(100,100, 300, 200)\n self.setWindowTitle(\"Formulario\")\n self.displayWidgets()\n\n self.show()", "def show(self):\n self.driver.send(self.canvas)", "def viewWidgetCreated(self, view, plot):\n return", "def on_show_view(self):\r\n self.setup()\r\n arcade.set_background_color(BACKGROUND_COLOR)", "def show_window(self):\n self.show()", "def show(self):\n self.scene().show()", "def show(self, parent=None):\n # Some Gui's don't like to process all events from a single \n # call to process events (Qt), and pumping the loop is not\n # reliable. Instead, we just schedule the call to set_visible \n # to occur after we start the event loop and with a priority \n # that is less than any relayouts the may be triggered by \n # pending events. This means that the layout queue should \n # finish processing, and then the window will be shown.\n self._prep_window()\n app = self.toolkit.app\n app.schedule(self.set_visible, (True,), priority=75)\n app.start_event_loop()", "def _showView(self, win, fn=None):\n raise RuntimeError('Not implemented')", "def add_views_widget(self):\n axial_view = QtWidgets.QPushButton(\"Axial\")\n coronal_view = QtWidgets.QPushButton(\"Coronal\")\n sagittal_view = QtWidgets.QPushButton(\"Sagittal\")\n views_box = QtWidgets.QGroupBox(\"Views\")\n views_box_layout = QtWidgets.QVBoxLayout()\n views_box_layout.addWidget(axial_view)\n views_box_layout.addWidget(coronal_view)\n views_box_layout.addWidget(sagittal_view)\n views_box.setLayout(views_box_layout)\n self.grid.addWidget(views_box, 3, 0, 2, 2)\n axial_view.clicked.connect(self.set_axial_view)\n coronal_view.clicked.connect(self.set_coronal_view)\n sagittal_view.clicked.connect(self.set_sagittal_view)", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def __newDocumentView(self):\n aw = self.activeWindow()\n if aw:\n self.newEditorView(aw.getFileName(), aw, aw.getFileType())", "def __init__(self):\n\n # GUI constructor\n super().__init__()\n\n # graphics scene\n self.scene = QGraphicsScene(0, 0, 400, 200)\n self.scene.addItem(RectangleRoi(50, 10, 50, 40))\n self.scene.addItem(RectangleRoi(100, 50, 100, 20))\n self.scene.addItem(EllipseRoi(75, 20, 60, 20))\n self.scene.addItem(EllipseRoi(120, 70, 8, 8))\n\n # graphics view\n self.viewer = QGraphicsView(self.scene)\n self.viewer.setSceneRect(0, 0, self.scene.width(), self.scene.height())\n self.viewer.setInteractive(True)\n self.viewer.show()\n\n # layout\n layout = QVBoxLayout()\n layout.addWidget(self.viewer)\n self.setLayout(layout)\n self.resize(self.scene.width(), self.scene.height())", "def add_view_pl_button(self):\n self.view_pl = QPushButton(\"View Playlist\")\n self.view_pl.clicked.connect(self.view_pl_btn_push)\n self.hbtnbox.addWidget(self.view_pl)", "def show(self):\r\n\t\tself.frame.Show(True)", "def _setup_ui(self):\n\n self.window = ui.Widget()\n self.window.dimensions = ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n self.window.background_color = ImageColor.getcolor('#000000', 'RGB')\n\n interface_frame = ui.Widget(parent=self.window)\n interface_frame.dimensions = ui.normalize_dimension((\n self.preview_renderer.window[2],\n 0,\n self.normalized_screen_resolution[0] - self.preview_renderer.window[2],\n self.normalized_screen_resolution[1]\n ))\n interface_frame.background_color = ImageColor.getcolor('#ffffff', 'RGB')\n\n number = ui.LabelWidget(\"\",\n name=NAME_GET_STARTED,\n parent=interface_frame,\n align=\"center\",\n font_color=(0, 0, 0, 255))\n number.dimensions = (\n 5, 5,\n interface_frame.width - 10,\n interface_frame.height - 10\n )", "def ShowMe(self, event):\n self.Show(True)", "def main(self: object) -> None:\n print(\"[View] main\")\n self.mainloop()", "def create(self, parent):\n self.widget = QFrame(parent)", "def attachViewToGlasses(self,visNode):\n\t\tself.head_tracker = viz.link(visNode,viz.NullLinkable,srcFlag=viz.ABS_PARENT)\n\t\t\n\t\t\"\"\"\n\t\tCreate CaveView object for manipulating the virtual viewpoint.\n\t\tcave_origin is a node that controls the position of the cave within the virtual world.\n\t\tFor example, if you wanted to simulate the cave user flying through an environment,\n\t\tyou would apply the transformation to the cave_origin node.\n\t\t\"\"\"\n\t\tcave_origin = vizcave.CaveView(self.head_tracker)\n\n\t\t\"\"\"\n\t\tThe cave_origin node is a standard Vizard node that you can apply any position/rotation to.\n\t\tIn this example we will create a keyboard/mouse tracker (using arrow keys) and link it to\n\t\tthe cave_origin node, allowing us to fly the cave user through the virtual environment.\n\t\t\"\"\"\n\n\t\torigin_tracker = viztracker.KeyboardMouse6DOF()\n\t\torigin_link = viz.link(origin_tracker, cave_origin)\n\t\torigin_link.setMask(viz.LINK_POS)\n\t\t\n\t\t\n\t\t#head_tracker.setMask(viz.LINK_POS)\n\n\t\t\n\t\t\"\"\"\n\t\tPass the head tracker to the cave object so it can automatically update the\n\t\tview frustums every frame based on the current head position relative to each wall.\n\t\t\"\"\"\n\t\tself.cave.setTracker(self.head_tracker)", "def on_show_view(self):\n self.window.background_color = arcade.color.BLACK", "def init_gui(self):\n # Choose a layout.\n main_vb = QtGui.QVBoxLayout(self)\n\n # Add a list or tree view.\n self.list_view = QtGui.QListWidget()\n\n # Add the buttons.\n load_btn = QtGui.QPushButton('Load Selected')\n cancel_btn = QtGui.QPushButton('Cancel')\n load_btn.clicked.connect(self.update_list_view)\n cancel_btn.clicked.connect(self.close)\n\n # Connect the list/tree view with a method appropriate for user interaction.\n self.list_view.currentItemChanged['QListWidgetItem*', 'QListWidgetItem*'].connect(self.set_current_name)\n self.list_view.itemChanged['QListWidgetItem*'].connect(self.change_name)\n\n # Add the widgets to the layout.\n btn_hb = QtGui.QHBoxLayout()\n btn_hb.addWidget(load_btn)\n btn_hb.addWidget(cancel_btn)\n main_vb.addWidget(self.list_view)\n main_vb.addLayout(btn_hb)\n\n # Show the GUI.\n self.setGeometry(300, 300, 450, 300)\n self.setWindowTitle('Hello World')\n img_icon = 'C:/Users/caj150430/code/so_much_win.png'\n self.setWindowIcon(QtGui.QIcon(img_icon))\n self.show()", "def __init__(self, parent: View):\n self.parent = parent\n self.root = self.parent.root\n # Content frame\n self.frame = tk.Frame(self.parent.frame)\n # Reference\n self.visible = False", "def double_clicked_to_view(self):\n\n # TODO need this method? better in init to go to view_file\n self.view_file()", "def initUI(self) -> None:\n ratio = 70\n width_to_set = (ratio * self.get_current_window_info()[0]) / 100.0\n height_to_set = (ratio * self.get_current_window_info()[1]) / 100.0\n self.setGeometry(200, 100, width_to_set, height_to_set)\n self.createTable()\n # Add box layout, add table to box layout and add box layout to widget\n self.layout = QVBoxLayout()\n self.layout.addWidget(self.tableWidget)\n self.setLayout(self.layout)\n self.setWindowTitle('View files')\n self.show()", "def show_add_actor(self):\n\t\tformulario = view_form_actor.Form(self)\n\t\tformulario.exec_()\n\t\tself.load_data()", "def show(self):\n self.frame.grid()\n self.visible = True", "def on_show_view(self):\n self.window.background_color = arcade.color.WHITE", "def show(self):\n self._impl.show()", "def do_activate(self, *args, **kwargs):\n self.register_signals()\n self.perform_setup()\n assert self.main_window\n self.main_window.show()\n self.hold()", "def update_view(self): \n raise NotImplementedError(\"Widget descendents MUST implement the update_view() method!\")", "def add_view(self, view):\n # Add to views\n self._views.append(view)\n\n # If app was provided in constructor, register view with Flask app\n if self.app is not None:\n self.app.register_blueprint(view.create_blueprint(self))\n if view.is_menu:\n self._add_view_to_menu(view)", "def create(self, parent):\n self.widget = _QMainWindow(parent)", "def initializeUI(self):\n self.setGeometry(100, 100, 300, 200)\n self.setWindowTitle('Event Handling Example')\n\n self.show()", "def add_tree_view(self):\n self.data_view = QTreeView()\n self.data_view.setRootIsDecorated(False)\n self.data_view.setAlternatingRowColors(True)\n self.mbox.addWidget(self.data_view)\n\n self.data_layout = QHBoxLayout()\n self.data_layout.addWidget(self.data_view)\n\n self.model = self.create_track_model(self)\n self.data_view.setModel(self.model)", "def initGui(self):\n from p4_view import Gui\n self.updateStatus(\"Launching GUI...\")\n self.gui = Gui(self, self.lmap)\n self.gui.setStart(self.cfg[\"START\"])\n self.gui.setGoal(self.cfg[\"GOAL\"])\n self.gui.setPossGoals(self.cfg[\"POSS_GOALS\"])\n #GHD\n self.gui.setMapName(self.cfg[\"MAP_FILE\"])\n self.updateStatus(\"OK\")\n self.gui.mainloop()", "def ui(self, ui):\n\n self._ui = ui", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def menu_design_a_gui_with_wxglade(self, event=None):\n self.parentPanel.design_a_gui_with_wxglade()", "def create(self, parent):\n self.widget = QtCore.QObject(parent)", "def show(self):\n self._window.show()", "def do_activate(self):\n\n Gtk.Application.do_activate(self)\n self.initiate_plugins()\n self.other[\"menu_button\"].set_menu_model(self.prepare_menu())\n self.output_window.show_all()\n self.window.show_all()", "def register_plugin(self):\n self.create_toggle_view_action()\n\n self.main.add_dockwidget(self)", "def buildUI(self):\n\n if cmds.window(\"pyART_AddToCanvasWIN\", exists=True):\n cmds.deleteUI(\"pyART_AddToCanvasWIN\", wnd=True)\n\n # create the main window\n self.mainWin = QtWidgets.QMainWindow(self.pickerUI)\n\n # create the main widget\n self.mainWidget = QtWidgets.QWidget()\n self.mainWin.setCentralWidget(self.mainWidget)\n\n # create the mainLayout\n self.layout = QtWidgets.QVBoxLayout(self.mainWidget)\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/animPicker.qss\")\n f = open(styleSheetFile, \"r\")\n self.style = f.read()\n f.close()\n\n self.mainWin.setStyleSheet(self.style)\n\n self.mainWin.setMinimumSize(QtCore.QSize(250, 400))\n self.mainWin.setMaximumSize(QtCore.QSize(250, 400))\n self.mainWin.resize(250, 400)\n\n # set qt object name\n self.mainWin.setObjectName(\"pyART_AddToCanvasWIN\")\n self.mainWin.setWindowTitle(\"Add Module To Canvas\")\n\n # label, listWidget, button\n label = QtWidgets.QLabel(\"Available Modules:\")\n label.setProperty(\"boldFont\", True)\n self.layout.addWidget(label)\n\n self.moduleList = QtWidgets.QListWidget()\n self.moduleList.setMaximumSize(230, 300)\n self.moduleList.setMinimumSize(230, 300)\n self.layout.addWidget(self.moduleList)\n\n # add modules to listWidget\n self.addModulesToList()\n\n # create add button\n button = QtWidgets.QPushButton(\"Add Selected To Canvas\")\n self.layout.addWidget(button)\n button.setObjectName(\"blueButton\")\n button.clicked.connect(self.addSelectedToCanvas)\n\n # show ui\n self.mainWin.show()", "def setViewComponent(self, viewComponent):\n self.viewComponent = viewComponent", "def view(self):\n window = tk.Tk()\n label = tk.Label(window)\n label.pack()\n img = self.get_tkimage()\n label[\"image\"] = label.img = img\n window.mainloop()", "def _init_ui(self):\n self.setWindowTitle(\"HB Havens: resultaten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n self.setLayout(QtWidgets.QVBoxLayout())\n\n # Create figure\n self.figure = Figure(figsize=(4,4))\n self.ax = self.figure.add_subplot()\n\n self.ax.grid()\n self.ax.spines['right'].set_visible(False)\n self.ax.spines['top'].set_visible(False)\n self.ax.tick_params(axis='y', color='0.75')\n self.ax.tick_params(axis='x', color='0.75')\n self.ax.set_aspect(1)\n\n # Add canvas\n self.canvas = FigureCanvasQTAgg(self.figure)\n\n # this is the Navigation widget\n # it takes the Canvas widget and a parent\n self.layout().addWidget(self.canvas)\n\n # Add location selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Locatie:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.location_combobox = QtWidgets.QComboBox()\n self.location_combobox.addItems(self.result_locations)\n self.location_combobox.setCurrentIndex(self.locid)\n self.location_combobox.currentIndexChanged.connect(self._set_location)\n hbox.addWidget(self.location_combobox)\n self.layout().addLayout(hbox)\n\n # Add parameter selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Parameter:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.parameter_combobox = QtWidgets.QComboBox()\n self.input_parameters = self.modelunctab.mainmodel.hydraulic_loads.result_columns[:]\n self.parameter_combobox.addItems(self.input_parameters)\n self.parameter_combobox.currentIndexChanged.connect(self._set_parameter)\n self.parameter_combobox.setCurrentIndex(0)\n self._set_parameter()\n self.figure.tight_layout()\n hbox.addWidget(self.parameter_combobox)\n self.layout().addLayout(hbox)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n self.layout().addWidget(line)\n\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n self.layout().addWidget(self.closebutton, 0, QtCore.Qt.AlignRight)\n\n self.layout().setSizeConstraint(QtWidgets.QLayout.SetFixedSize)", "def createView(self):\n logging.debug(\"ShortestPathUI.createView function started\")\n formLayout = QFormLayout()\n\n self.fromLineEdit = QLineEdit()\n self.fromLineEdit.textChanged.connect(partial(self.__clearErrorInfo,\n self.fromLineEdit))\n formLayout.addRow(\"From: \", self.fromLineEdit)\n\n self.toLineEdit = QLineEdit()\n self.toLineEdit.textChanged.connect(partial(self.__clearErrorInfo,\n self.toLineEdit))\n formLayout.addRow(\"To: \", self.toLineEdit)\n\n self.pathLineEdit = QLineEdit()\n self.pathLineEdit.setReadOnly(True)\n formLayout.addRow(\"Path: \", self.pathLineEdit)\n\n self.lengthLabel = QLabel()\n formLayout.addRow(\"Length: \", self.lengthLabel)\n self.__generalLayout.addLayout(formLayout, 0, 0)\n\n self.OkButton = QPushButton(\"Ok\")\n self.OkButton.setFixedWidth(50)\n self.OkButton.clicked.connect(self.updatePath)\n self.__generalLayout.addWidget(self.OkButton, 0, 1, alignment=Qt.AlignTop)\n\n logging.debug(\"ShortestPathUI.createView function ended\\n\")", "def __init__(self):\n # Root window\n self.root = tk.Tk()\n self.root.title(\"Crossword\")\n # Padding frame\n self.frame = tk.Frame(self.root)\n self.frame.pack(fill=\"both\", padx=PAD, pady=PAD)\n # Initialize widget groups\n self.header = HeaderView(self)\n self.puzzle = PuzzleView(self)\n self.clues = CluesView(self)\n # Show widgets\n self.header.show()\n self.puzzle.show()\n self.clues.show()", "def onInsert(self):\n self.mainWindow.insert()", "def initGUI(self):\n\n\t\t# Set main frame's location \n\t\tself.grid(row=0, column=0, sticky=\"nsew\")\n\n\t\t# Set path entry frame and its location\n\t\tself.entryFrame = Frame(self, relief = RAISED, borderwidth = 1)\n\t\tself.entryFrame.pack(fill = BOTH, expand = False)\n\t\t# Make label\n\t\tif self.message:\n\t\t\tmessageLabel = Label(self.entryFrame, text = self.message, font=(\"Bradley\", 10))\n\t\t\tmessageLabel.pack(anchor=W, padx=0, pady=0)\n\n\t\t# Set path entry and its location\n\t\tself.filePathEntry = Entry(self.entryFrame, bd = 4, width = 50)\n\t\tself.filePathEntry.pack(side = LEFT, padx=2, pady=1)", "def __create_ui(self):\n vbox = gtk.VBox()\n\n # Create the viewable area of the file browser\n self.__view_port = gtk.ScrolledWindow()\n self.__view_port.set_policy(gtk.POLICY_AUTOMATIC,\n gtk.POLICY_AUTOMATIC)\n # Create the tree view and add it to the viewable area\n self.__tree_view = ProjectTreeView()\n self.__project_explorer = ProjectExplorer(self.window, self.__tree_view)\n self.__tree_view.connect('button_press_event',\n self.__on_treeview_button_press_event)\n self.__project_explorer.set_repository()\n self.__view_port.add(self.__tree_view)\n # Create the toolbar\n hbox = gtk.HBox()\n toolbar = gtk.Toolbar()\n toolbar.set_style(gtk.TOOLBAR_ICONS)\n toolbar.set_icon_size(gtk.ICON_SIZE_MENU)\n back = gtk.ToolButton(gtk.STOCK_GO_UP)\n back.connect('clicked', self.__on_back_clicked)\n toolbar.insert(back, 0)\n toolbar.insert(gtk.SeparatorToolItem(), 1)\n refresh = gtk.ToolButton(gtk.STOCK_REFRESH)\n refresh.connect('clicked', self.__on_refresh_clicked)\n toolbar.insert(refresh, 2)\n hbox.pack_start(toolbar, True, True, 0)\n vbox.pack_start(hbox, False, False, 0)\n vbox.pack_start(self.__view_port, True, True, 0)\n\n # Setup the create the buttons for:\n # New File, New Folder\n # ----------------------------------------------------------------------\n hbox1 = gtk.VBox()\n toolbar_actions = gtk.Toolbar()\n toolbar_actions.set_style(gtk.TOOLBAR_ICONS)\n toolbar_actions.set_icon_size(gtk.ICON_SIZE_MENU)\n new_file = gtk.ToolButton(gtk.STOCK_NEW)\n new_file.connect('clicked', self.__on_new_file_clicked_cb)\n toolbar_actions.insert(new_file, 0)\n new_dir = gtk.ToolButton(gtk.STOCK_OPEN) # TODO: use a custom icon\n new_dir.connect('clicked', self.__on_new_dir_clicked_cb)\n toolbar_actions.insert(new_dir, 1)\n hbox1.pack_start(gtk.HSeparator(), True, True, 0)\n hbox1.pack_start(toolbar_actions, True, True, 0)\n vbox.pack_end(hbox1, False, False, 0)\n # ----------------------------------------------------------------------\n vbox.show_all()\n # Attach the project explorer to GMate's side panel\n self.__side_panel = self.window.get_side_panel()\n self.__side_panel.add_tab(vbox, msg0005, gtk.STOCK_HARDDISK)", "def Show(self):\r\n return Control.Show(self)", "def add_to(self, main_lay):\n main_lay.addWidget(self._tab)\n self.setParent(main_lay.parentWidget())", "def showUI(cls):\r\n win = cls(uiFile)\r\n win.create()\r\n return win", "def show_frame(self, container):\r\n\r\n frame = self.frames[container]\r\n\r\n frame.tkraise()", "def vp_start_gui():\n global val, w, root\n root = tk.Tk()\n plot_support.set_Tk_var()\n top = Toplevel1(root)\n plot_support.init(root, top)\n root.mainloop()", "def setup_ui(self):\n self.setLayout(self.main_layout)\n\n self.pv_layout.addWidget(self.pv_protocol_cmb)\n self.pv_layout.addWidget(self.pv_name_line_edt)\n self.pv_layout.addWidget(self.pv_connect_push_btn)\n QTimer.singleShot(0, self.pv_name_line_edt.setFocus)\n\n self.curve_settings_tab.setLayout(self.curves_tab_layout)\n self.chart_settings_tab.setLayout(self.chart_settings_layout)\n self.setup_chart_settings_layout()\n\n self.tab_panel.addTab(self.curve_settings_tab, \"Curves\")\n self.tab_panel.addTab(self.chart_settings_tab, \"Chart\")\n self.tab_panel.hide()\n\n self.crosshair_settings_layout.addWidget(self.enable_crosshair_chk)\n self.crosshair_settings_layout.addWidget(self.cross_hair_coord_lbl)\n\n self.chart_control_layout.addWidget(self.auto_scale_btn)\n self.chart_control_layout.addWidget(self.view_all_btn)\n self.chart_control_layout.addWidget(self.reset_chart_btn)\n self.chart_control_layout.addWidget(self.pause_chart_btn)\n self.chart_control_layout.addLayout(self.crosshair_settings_layout)\n self.chart_control_layout.addWidget(self.import_data_btn)\n self.chart_control_layout.addWidget(self.export_data_btn)\n\n self.chart_control_layout.setStretch(4, 15)\n self.chart_control_layout.insertSpacing(5, 350)\n\n self.chart_layout.addWidget(self.chart)\n self.chart_layout.addLayout(self.chart_control_layout)\n\n self.chart_panel.setLayout(self.chart_layout)\n\n self.splitter.addWidget(self.chart_panel)\n self.splitter.addWidget(self.tab_panel)\n self.splitter.setStretchFactor(0, 0)\n self.splitter.setStretchFactor(1, 1)\n\n self.charting_layout.addWidget(self.splitter)\n\n self.body_layout.addLayout(self.pv_layout)\n self.body_layout.addLayout(self.charting_layout)\n self.body_layout.addLayout(self.chart_control_layout)\n self.main_layout.addLayout(self.body_layout)\n\n self.enable_chart_control_buttons(False)", "def open_gui():\n guiController.main()", "def launch_gui(instance=None):\n app = Controller(instance)\n app.RunGui()", "def createUI(self):\n self.widget = QWidget(self)\n self.setCentralWidget(self.widget)\n\n # In this widget, the video will be drawn\n if sys.platform == \"darwin\": # for MacOS\n from PyQt5.QtWidgets import QMacCocoaViewContainer\n self.videoframe = QMacCocoaViewContainer(0)\n else:\n self.videoframe = QFrame()\n self.palette = self.videoframe.palette()\n self.palette.setColor (QPalette.Window,\n QColor(0,0,0))\n self.videoframe.setPalette(self.palette)\n self.videoframe.setAutoFillBackground(True)\n\n self.hbuttonbox = QHBoxLayout()\n self.playbutton = QPushButton(\"Run my program\")\n self.hbuttonbox.addWidget(self.playbutton)\n self.playbutton.clicked.connect(partial(self.drone_vision.run_user_code, self.playbutton))\n\n self.landbutton = QPushButton(\"Land NOW\")\n self.hbuttonbox.addWidget(self.landbutton)\n self.landbutton.clicked.connect(self.drone_vision.land)\n\n self.stopbutton = QPushButton(\"Quit\")\n self.hbuttonbox.addWidget(self.stopbutton)\n self.stopbutton.clicked.connect(self.drone_vision.close_exit)\n\n self.vboxlayout = QVBoxLayout()\n self.vboxlayout.addWidget(self.videoframe)\n self.vboxlayout.addLayout(self.hbuttonbox)\n\n self.widget.setLayout(self.vboxlayout)\n\n # the media player has to be 'connected' to the QFrame\n # (otherwise a video would be displayed in it's own window)\n # this is platform specific!\n # you have to give the id of the QFrame (or similar object) to\n # vlc, different platforms have different functions for this\n if sys.platform.startswith('linux'): # for Linux using the X Server\n self.mediaplayer.set_xwindow(self.videoframe.winId())\n elif sys.platform == \"win32\": # for Windows\n self.mediaplayer.set_hwnd(self.videoframe.winId())\n elif sys.platform == \"darwin\": # for MacOS\n self.mediaplayer.set_nsobject(int(self.videoframe.winId()))", "def show(self):\n # * displays the window, after using either the iconify or the withdraw methods\n self.wm_deiconify()\n # * this method can be called after the event which needs to happen before the window event\n self.wait_window()", "def show(self):\n # This function has to be placed here (and not in the user.py script)\n self.showMaximized()\n visapp.run()", "def view():\r\n # collect figures in list\r\n figures = list(map(plt.figure, plt.get_fignums()))\r\n # start app\r\n app = QtWidgets.QApplication(sys.argv)\r\n main = Main()\r\n\r\n if figures:\r\n for count, figure in enumerate(figures):\r\n # main names for figures\r\n name = f\"{figure.number}\"\r\n # aliases for figures\r\n titles = [figure.axes[0].get_title(loc=i) for i in [\r\n \"left\", \"center\", \"right\"]]\r\n titles = [i for i in titles if i]\r\n title = f\"{count+1}- {titles[0]}\" if titles else \"\"\r\n axes_labels = f\"{count+1}- {figure.axes[0].get_ylabel()} vs {figure.axes[0].get_xlabel()} \"\r\n fignum = f\"Figure {figure.number}\"\r\n # Append figure to App\r\n main.append_fig(title, axes_labels, fignum, name, figure)\r\n\r\n main.show()\r\n sys.exit(app.exec_())", "def iniciaUI(self):\n\n self.setGeometry(100,100, 250, 250)\n self.setWindowTitle(\"Login\")\n self.displayWidgets()\n\n self.show()", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def sync_view(self):\n new_callbacks = []\n for c in self._ngl_displayed_callbacks_after_loaded:\n if (c._method_name == 'loadFile' and\n 'defaultRepresentation' in c._ngl_msg['kwargs']):\n # set to False to avoid autoView\n # so subsequent display of `self` won't reset view orientation.\n c._ngl_msg['kwargs']['defaultRepresentation'] = False\n msg = c._ngl_msg\n msg['last_child'] = True\n def callback(widget, msg=msg):\n widget.send(msg)\n callback._method_name = msg['methodName']\n callback._ngl_msg = msg\n new_callbacks.append(callback)\n\n msg = {}\n msg['target'] = 'Widget'\n msg['type'] = 'call_method'\n msg['methodName'] = 'set_representation_from_backend'\n msg['args'] = []\n msg['kwargs'] = {}\n msg['last_child'] = True\n\n def callback(widget, msg=msg):\n widget.send(msg)\n callback._method_name = msg['methodName']\n callback._ngl_msg = msg\n\n new_callbacks.append(callback)\n self._fire_callbacks(new_callbacks)", "def initView(self):\n #Draw the Session View\n self._sessionView = SessionView(self._app)\n leftDockWidget = QtGui.QDockWidget(\"Session\", self)\n leftDockWidget.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea)\n leftDockWidget.setWidget(self._sessionView)\n leftDockWidget.setFeatures(QtGui.QDockWidget.DockWidgetMovable | QtGui.QDockWidget.DockWidgetClosable)\n\n #temporary !\n titleBar = QtGui.QWidget()\n leftDockWidget.setTitleBarWidget(titleBar)\n\n self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, leftDockWidget)\n\n #Draw the central widget\n self.mdiArea = QtGui.QMdiArea()\n self.setCentralWidget(self.mdiArea)\n\n #Draw the Player View\n #rightDockWidget = QtGui.QDockWidget(\"Player\", self)\n #rightDockWidget.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea)\n ##rightDockWidget.setWidget(self.player)\n #rightDockWidget.setFeatures(QtGui.QDockWidget.DockWidgetMovable | QtGui.QDockWidget.DockWidgetClosable)\n #self.addDockWidget(QtCore.Qt.RightDockWidgetArea, rightDockWidget)", "def init_ui(self):\n self.master.title(\"Backbone\")\n self.master.geometry(\"300x150\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=90, y=10)\n\n self.btn_create_training_file = Button(self, text=\"Create & upload training file\",\n command=self.create_training_file)\n self.btn_create_training_file.place(x=30, y=40)\n\n self.btn_run_algorithm = Button(self, text=\"Run algorithm\", command=self.run_algorithm)\n self.btn_run_algorithm.place(x=80, y=70)\n\n self.btn_view_results = Button(self, text=\"View Results\", command=self.view_results)\n self.btn_view_results.place(x=85, y=100)", "def show(self):\n\n self.serial = self.parent.board.serial\n self.deiconify() # Show window\n self.visible = True\n\n self.input_entry.focus()\n\n self.start_repl()" ]
[ "0.66884124", "0.66884124", "0.66884124", "0.66009265", "0.6581134", "0.6503647", "0.6470148", "0.6323154", "0.62344205", "0.6183854", "0.6170608", "0.6120538", "0.60880315", "0.60534596", "0.604596", "0.6022283", "0.5969684", "0.5946267", "0.5931756", "0.58913046", "0.58907974", "0.5853351", "0.585087", "0.5800141", "0.57991755", "0.5796315", "0.57949257", "0.5784987", "0.5778917", "0.5769542", "0.5746932", "0.5740254", "0.5734591", "0.57080895", "0.57065356", "0.5693382", "0.568715", "0.56771994", "0.567686", "0.5670405", "0.56616807", "0.56495243", "0.56469584", "0.56265676", "0.5624919", "0.5603306", "0.55783856", "0.55741584", "0.5573626", "0.5565059", "0.5563092", "0.5562166", "0.55614114", "0.5552535", "0.55517936", "0.55488163", "0.5546853", "0.55457973", "0.55269676", "0.5521505", "0.5518317", "0.5506793", "0.55010587", "0.54962957", "0.5493887", "0.5489668", "0.54862726", "0.5485788", "0.5485017", "0.54840213", "0.5483172", "0.5479271", "0.54767317", "0.54728967", "0.5469265", "0.54574794", "0.54548055", "0.5444485", "0.54185516", "0.5417486", "0.5405626", "0.53975874", "0.5396227", "0.53934354", "0.53897274", "0.5387121", "0.5382321", "0.53773755", "0.5372333", "0.53633296", "0.5360154", "0.53569543", "0.5355183", "0.534426", "0.53434926", "0.53365225", "0.53271925", "0.5323053", "0.5321606", "0.53143716" ]
0.6533588
5
Whether to show the channels from top to bottom (`top` option, the default), or from bottom to top (`bottom`).
def origin(self): return getattr(self.canvas.layout, 'origin', Stacked._origin)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __redrawChannels(self):\n self.__channelWin.clear()\n all_chans = self._client.getChannels()\n all_chans.sort(key=lambda c: c.getName())\n count = min(len(all_chans), self.__channelWin.getmaxyx()[0])\n show = all_chans[:count]\n for c in show:\n cur = self._client.currentChannel() == c\n if cur:\n attr = curses.A_REVERSE\n elif c in self._client.getJoined():\n attr = curses.A_BOLD\n else:\n attr = curses.A_DIM\n if c != self._client.getNoneChannel():\n self.__channelWin.addstr(\n \"{chan}\\n\".format(chan=c.getName()),\n attr\n )", "def showTopView(self):\r\n if(self.dataController.fileLoaded == True): \r\n self.dataController.showTopView()\r\n self.midsagittalView = True\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False", "def bottom_option():\n active = get_active_window()\n Width= get_middle_Width(active)\n Height=get_bottom_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def showBottomView(self):\r\n if(self.dataController.fileLoaded == True): \r\n self.dataController.showBottomView()\r\n self.midsagittalView = False\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = True", "def GripperTop(self, attop=True):\r\n \r\n return self.SetFlag(self.optionGripperTop, attop)", "def keep_top_or_bottom(self):\n return self._keep_top_or_bottom", "def top_option():\n active = get_active_window()\n Width=get_middle_Width(active)\n Height=get_top_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def showChannels(img, ypos = 0, wait=False):\n num_channels = img.shape[2] if len(img.shape) == 3 else 1\n if num_channels == 1:\n label = 'One channel'\n cv2.imshow(label, img)\n cv2.moveWindow(label, 0, ypos)\n else:\n for i in range(num_channels):\n label = 'Channel ' + str(i)\n cv2.imshow(label, img[:,:,i])\n cv2.moveWindow(label, i * img.shape[1], ypos)\n if wait:\n if cv2.waitKey() & 0xFF == ord('q'):\n sys.exit(0)", "def showChannels(self):\n print(\"Channels:\")\n for c in self.channels:\n if c.role != channel_pb2.Channel.Role.DISABLED:\n cStr = stripnl(MessageToJson(c.settings))\n print(\n f\" {channel_pb2.Channel.Role.Name(c.role)} psk={pskToString(c.settings.psk)} {cStr}\")\n publicURL = self.getURL(includeAll=False)\n adminURL = self.getURL(includeAll=True)\n print(f\"\\nPrimary channel URL: {publicURL}\")\n if adminURL != publicURL:\n print(f\"Complete URL (includes all channels): {adminURL}\")", "def draw_top(self):\n return group()", "def always_top(self, value: bool):\n self.tk_ref.wm_attributes('-topmost', int(value))", "def HasGripperTop(self):\r\n\r\n return self.HasFlag(self.optionGripperTop)", "def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False", "def display(self, channel1 = False, channel2 = False, channel3 = False, channel4 = False):\t\t\n\t\tself.scope.write(\":CHANnel1:DISPlay %s\"%bool2ONOFF(channel1))\n\t\tself.scope.write(\":CHANnel2:DISPlay %s\"%bool2ONOFF(channel2))\n\t\tself.scope.write(\":CHANnel3:DISPlay %s\"%bool2ONOFF(channel3))\n\t\tself.scope.write(\":CHANnel4:DISPlay %s\"%bool2ONOFF(channel4))", "def switch_origin(self):\n self.origin = 'bottom' if self.origin == 'top' else 'top'", "def show_grid(self, **kwargs):\n kwargs.setdefault('grid', 'back')\n kwargs.setdefault('location', 'outer')\n kwargs.setdefault('ticks', 'both')\n return self.show_bounds(**kwargs)", "def top_visible(self) -> bool:\n return self.vertical_scroll == 0", "def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def use_config_backorders(self):\n return self._use_config_backorders", "def config_independent_frames(self):\n return {'standard': 'dispname','bias': None, 'dark': None}", "def always_top(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-topmost'))", "def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")", "def show_trunk(height=2):\n for k in range(height):\n print(\"|\".center(GROUND_WIDTH))", "def bottom_right_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_bottom_Height()\n PosX = get_right_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def getDefaultDisplayMode(self):\n return \"Wireframe\"", "def show_board(self):\n board_vis = f\"\\n{'*' * 22}Board state{'*' * 23}\\n\"\n str_p2_store=\" \"+str(self.p2_store()) if self.p2_store()<10 else str(self.p2_store())\n board_vis += (f\" {str_p2_store} - | \" +\n \" || \".join(\n [i if len(i) == 2 else ' ' + i for i in list(map(str, self.p2_pits()[::-1]))]) + \" | \\n\")\n board_vis += f\"{'-------' * (self.M + 2)}\\n\"\n board_vis += (\" | \" + \" || \".join(\n [i if len(i) == 2 else ' ' + i for i in list(map(str, self.p1_pits()))]) +\n f\" | - {self.p1_store()}\\n\")\n board_vis += f\"{'*' * 56}\\n\"\n print(board_vis)", "def showHidden(*args, above: bool=True, allObjects: bool=True, below: bool=True, lastHidden:\n bool=True, **kwargs)->None:\n pass", "def hits_top_or_bottom(self):\n if self.y >= self.scene.screen.get_height() - self.image.get_height() or self.y <= 0:\n return True\n else:\n return False", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def show_sequence(data, ordering='channel_last'):\n xb, yb = data\n batch_size = xb.shape[0]\n stacked_size = xb.shape[1]\n \n fig = plt.figure(figsize=(5 * stacked_size, 5 * 2 * batch_size))\n for i in range(batch_size):\n x = xb[i]\n for j in range(stacked_size):\n fig.add_subplot(2 * batch_size, stacked_size, stacked_size * (2 * i) + j + 1) \n show_image(x[j])\n \n if yb[i] is not None:\n y = yb[i]\n else:\n y = np.zeros_like(xb[i])\n \n for j in range(stacked_size):\n fig.add_subplot(2 * batch_size, stacked_size, stacked_size * (2 * i + 1) + j + 1)\n if ordering == 'channel_first':\n y[j] = np.moveaxis(y[j], 0, -1)\n \n if y.shape[-1] == 1:\n show_image(y[j])\n else:\n show_label(y[j])\n\n return fig", "def __str__(self):\n return \"Bottom -> \" + repr(self._items) + \" <- Top\"", "def canStack(bottom, top):\n bw, bh, bd = bottom\n tw, th, td = top\n return (bw < tw) and (bh < th) and (bd < td)", "def config_independent_frames(self):\n return {'standard': 'dispname', 'bias': None, 'dark': None}", "def IsTopDockable(self):\r\n \r\n return self.HasFlag(self.optionTopDockable)", "def visible(self, show):", "def isTop(self):\n return self.top", "def updateChannels(self):\n self.__redrawChannels()\n self.__update()", "def bottom_left_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_bottom_Height()\n PosX = get_left_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def setSurfaceVisibility(visible='both'):\n vdict = {'both':'BOTH','top':'TOP','bottom':'BOTTOM'}\n dislin.survis(vdict[visible])", "def IsTopSnappable(self):\r\n \r\n return self.HasFlag(self.optionTopSnapped)", "def DrawTop(screen, top_x, top_y, top_len, top_width):\n pygame.draw.rect(screen, (255,0,0),(top_x, top_y, top_len*2, top_width*2), 4)", "def check_top(self):\n\t\tif self.rect.top <=0:\n\t\t\tself.target_direction = 1", "def get_active_end_b(self, orientation):\r\n if orientation == \"height\":\r\n # first button is not displayed, second is\r\n if self.number % 2 == 1:\r\n return True\r\n else:\r\n return False\r\n else:\r\n # first button is displayed, second is not\r\n if self.number % 2 == 0:\r\n return True\r\n else:\r\n return False", "def middlemakevisible(self, pos):\n pass", "def display_other_options():\n print(\"> - Next Song page.\")\n print(\"< - Previous song page.\")\n print(\"q - to quit\")", "def display_cli(conversations, alt_speaker, human_speaker):\n for speaker, speech in conversations:\n if speaker == END_OF_CONVO:\n print(\"-\" * 20 + \"END OF CONVERSATION\" + \"-\" * 20)\n elif speaker == alt_speaker:\n print(\"%-15s: %s\" % (speaker[:15], speech))\n else:\n prBlueBG(\"%-15s: %s\" % (speaker[:15], speech))", "def top_right_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_top_Height()\n PosX = get_right_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def setDisplayMode(self):\n self.step = (self.max_step + int(self.include))\n self.display = Fourier.inverseTransform(\n self.coefficients, self.display_number)", "def getDefaultDisplayMode(self):\n return \"Shaded\"", "def check_edges(self):\n\t\tbottom_screen_limit = 2 * self.rect.height\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif (self.rect.top <= 100) or (self.rect.bottom >= self.screen_rect.bottom):\n\t\t#self.rect.bottom >= self.screen_rect.bottom:\n\t\t\treturn True", "def switch_frequency_plot_channel_two(self):\n if self.plot_channel_key_booleans[1]:\n self.plot_channel_key_booleans[1] = False\n self.parent_widget.graph_channel_two_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[1] = True\n self.parent_widget.graph_channel_two_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[1]))", "def gridDisplay(self):\n\n if self.griddButton.isCheckable():\n self.photo_grid.setVisible(False)\n self.griddButton.setCheckable(False)\n self.griddButton.setDown(False)\n self.statustext.setText(\"Hide Grid\")\n else:\n self.griddButton.setCheckable(True)\n self.photo_grid.setVisible(True)\n self.griddButton.setDown(True)\n self.statustext.setText(\"Display Grid - Rule of thirds\")", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def print_backward(self):\n print(\"[\", end=\" \")\n if self.__head is not None:\n self.__head.print_backward()\n print(\"]\")", "def show_bottom_status(self):\n editor = self.app.get_editor()\n size = self.size()\n cur = editor.cursor()\n data = \"@ \"+str(cur[0])+\",\"+str(cur[1])+\" \"+\\\n \"cur:\"+str(len(editor.cursors))+\" \"+\\\n \"buf:\"+str(len(editor.buffer))\n if self.app.config[\"display\"][\"show_last_key\"]:\n data += \" key:\"+str(self.app.last_input)\n #if self.app.config[\"display\"][\"show_term_size\"]:\n # data += \" [\"+str(size[0])+\"x\"+str(size[1])+\"]\"\n if self.app.config[\"app\"][\"debug\"]:\n data += \" cs:\"+str(editor.current_state)+\" hist:\"+str(len(editor.history)) # Undo / Redo debug\n #if editor.last_find:\n # find = editor.last_find\n # if len(find) > 10:find = find[:10]+\"...\"\n # data = \"find:'\"+find+\"' \" + data\n\n # Add module statuses to the status bar\n for name in self.app.modules.modules.keys():\n module = self.app.modules.modules[name]\n if module.options[\"status\"] == \"bottom\":\n data += \" \" + module.get_status();\n\n self.status_win.clear()\n status = self.app.get_status()\n extra = size[0] - len(status+data) - 1\n line = status+(\" \"*extra)+data\n\n if len(line) >= size[0]:\n line = line[:size[0]-1]\n\n self.status_win.addstr(0,0, line, curses.color_pair(0) | curses.A_REVERSE)\n self.status_win.refresh()", "def test_config(self):\n\n p = SyncProto(packet_port, None)\n\n d = make_axes(500, .1, usteps=16, steps_per_rotation=200)\n p.config(4, 18, 32, False, False, axes=d['axes1']);\n p.info()\n\n d = make_axes(1000, .2, usteps=16, steps_per_rotation=200,\n output_mode=OutMode.OUTPUT_OPENDRAIN, highval=OutVal.LOW)\n p.config(4, 7, 9, False, False, axes=d['axes1']);\n p.info()", "def expandColorBarScaling(direction='none'):\n ddict = {'none':'NONE','down':'FIRST','both':'BOTH'}\n dislin.expzlb(ddict[direction])", "def setDisplayMode(self, mode):\n return \"Wireframe\"", "def fullscreen(self):\n self.port_edit.setVisible(False)\n self.ip_edit.setVisible(False)\n self.connect_btn.setVisible(False)\n self.setup_btn.setVisible(False)\n self.play_btn.setVisible(False)\n self.pause_btn.setVisible(False)\n self.teardown_btn.setVisible(False)\n self.fullscreen_btn.setVisible(False)\n self.video_slider.setVisible(False)\n self.rtp_port_edit.setVisible(False)\n self.rtp_label.setVisible(False)\n self.rtcp_port_edit.setVisible(False)\n self.rtcp_port_label.setVisible(False)\n self.movie_name_edit.setVisible(False)\n self.movie_name_label.setVisible(False)\n self.rate_select.setVisible(False)\n self.time_label.setVisible(False)\n self.low_level_video.setVisible(False)\n self.high_level_video.setVisible(False)\n self.video_list.setVisible(False)\n self.video_label.setGeometry(0, 0, self.screen_width, self.screen_height)\n self.showFullScreen()", "def get_channels(self):\n return [self.afos, \"%s...\" % (self.afos[:3], )]", "def bottom_visible(self) -> bool:\n return self.last_visible_line() == self.content_height - 1", "def show_board(self):\n for i in range(self.num_rows):\n print(' ----'*8)\n s = \"\"\n for j in range(self.num_cols):\n s += '| {} '.format(self._show_piece(i, j))\n print(\"{}|\".format(s))\n print(' ----'*8)", "def reshape(self, bottom, top):\r\n pass", "def TopSnappable(self, b=True):\r\n \r\n return self.SetFlag(self.optionTopSnapped, b)", "def set_invert_display(enable):\n if enable:\n send_command(0xA7)\n else:\n send_command(0xA6)", "def reshape(self, bottom, top):\n\t\tpass", "def set_view_options(self):\n active_panel = self.get_active_panel()\n # turn all show/hide display options off except for polygons and\n # surfaces\n pm.modelEditor(active_panel, e=1, allObjects=False)\n pm.modelEditor(active_panel, e=1, manipulators=False)\n pm.modelEditor(active_panel, e=1, grid=False)\n\n pm.modelEditor(active_panel, e=1, polymeshes=True)\n pm.modelEditor(active_panel, e=1, nurbsSurfaces=True)\n pm.modelEditor(active_panel, e=1, subdivSurfaces=True)\n pm.modelEditor(active_panel, e=1,\n pluginObjects=('gpuCacheDisplayFilter', True))\n pm.modelEditor(active_panel, e=1, planes=True)\n\n # turn all hud displays off\n hud_flags = pm.headsUpDisplay(lh=1)\n for flag in hud_flags:\n pm.headsUpDisplay(flag, e=1, vis=0)\n\n # set camera options for playblast\n for camera in pm.ls(type='camera'):\n camera.setAttr('overscan', 1)\n camera.setAttr('filmFit', 1)\n camera.setAttr('displayFilmGate', 1)\n camera.setAttr('displayResolution', 0)", "def autostop():", "def vis2TopDown(points):\n if points is None or points.size == 0:\n return None\n\n i = np.array(points)\n i = i.astype(float) - VIS_RADIUS\n i[:, 0] = np.negative(i[:, 0]) # invert y axis\n return np.array([i[:, 1], i[:, 0]])", "def __init__(self):\r\n #set up pannel in centre of screen, just above the bottom of the screen.\r\n super(Pannel, self).__init__(image = Pannel.pannel,\r\n x = games.screen.width/2,\r\n y = games.screen.height -11)", "def _set_view_slice(self):\n nd = self.dims.not_displayed\n\n if self.multichannel:\n # if multichannel need to keep the final axis fixed during the\n # transpose. The index of the final axis depends on how many\n # axes are displayed.\n order = self.dims.displayed_order + (self.dims.ndisplay,)\n else:\n order = self.dims.displayed_order\n\n # Slice thumbnail\n indices = np.array(self.dims.indices)\n downsampled = indices[nd] / self.level_downsamples[-1, nd]\n downsampled = np.round(downsampled.astype(float)).astype(int)\n downsampled = np.clip(downsampled, 0, self.level_shapes[-1, nd] - 1)\n indices[nd] = downsampled\n\n image = np.asarray(self.data[-1][tuple(indices)]).transpose(order)\n\n if self.multichannel and image.dtype.kind == 'f':\n self._data_thumbnail = np.clip(image, 0, 1)\n else:\n self._data_thumbnail = image\n\n # Slice currently viewed level\n indices = np.array(self.dims.indices)\n level = self.data_level\n downsampled = indices[nd] / self.level_downsamples[level, nd]\n downsampled = np.round(downsampled.astype(float)).astype(int)\n downsampled = np.clip(downsampled, 0, self.level_shapes[level, nd] - 1)\n indices[nd] = downsampled\n\n disp_shape = self.level_shapes[level, self.dims.displayed]\n scale = np.ones(self.ndim)\n for d in self.dims.displayed:\n scale[d] = self.level_downsamples[self.data_level][d]\n self._scale = scale\n self.events.scale()\n\n if np.any(disp_shape > self._max_tile_shape):\n for d in self.dims.displayed:\n indices[d] = slice(\n self._top_left[d],\n self._top_left[d] + self._max_tile_shape,\n 1,\n )\n self.translate = self._top_left * self.scale\n else:\n self.translate = [0] * self.ndim\n\n image = np.asarray(self.data[level][tuple(indices)]).transpose(order)\n\n if self.multichannel and image.dtype.kind == 'f':\n self._data_view = np.clip(image, 0, 1)\n else:\n self._data_view = image\n\n self._update_thumbnail()\n self._update_coordinates()\n self.events.set_data()", "def reveal_top_card(self):\n if self.get_length() != 0:\n if not self.get_topmost_card().get_exposed():\n self.get_topmost_card().flip_card()", "def show_channels(chmaps, n_cols=8, normalize=None, ofpath=None):\n n_rows = (chmaps.shape[0] - 1) // n_cols + 1\n\n if n_rows == 1:\n n_cols = chmaps.shape[0]\n\n if normalize is None:\n vmin, vmax = None, None\n else:\n vmin, vmax = normalize\n\n fig = plt.figure()\n\n grid = AxesGrid(fig, 111,\n nrows_ncols=(n_rows, n_cols),\n axes_pad=0.0,\n share_all=True)\n\n for i, chmap in enumerate(chmaps):\n grid[i].imshow(chmap, vmin=vmin, vmax=vmax)\n\n grid.axes_llc.get_xaxis().set_ticks([])\n grid.axes_llc.get_yaxis().set_ticks([])\n\n if ofpath is None:\n plt.get_current_fig_manager().window.showMaximized()\n plt.show()\n else:\n fig.savefig(ofpath)\n plt.close(fig)", "def show_grid(self):\n for ax in (self.time_velocity, self.time_power, self.power_velocity):\n ax.grid(True)", "def IsVertical(self):\r\n\r\n return self.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT, AUI_DOCK_CENTER]", "def show_top_status(self):\n self.header_win.clear()\n size = self.size()\n display = self.app.config[\"display\"]\n head_parts = []\n if display[\"show_app_name\"]:\n head_parts.append(\"Suplemon Editor v\"+self.app.version)\n if display[\"show_clock\"]:\n head_parts.append(curr_time())\n if display[\"show_file_list\"]:\n head_parts.append(self.file_list_str())\n\n # Add module statuses to the status bar\n for name in self.app.modules.modules.keys():\n module = self.app.modules.modules[name]\n if module.options[\"status\"] == \"top\":\n head_parts.append(module.get_status());\n\n head = \" - \".join(head_parts)\n head = head + ( \" \" * (self.screen.getmaxyx()[1]-len(head)-1) )\n if len(head) >= size[0]:\n head = head[:size[0]-1]\n self.header_win.addstr(0,0, head, curses.color_pair(0) | curses.A_REVERSE)\n self.header_win.refresh()", "def IsVertical(self):\r\n\r\n return self.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT]", "def show(black, white):\n for x in X:\n for y in Y:\n if (x == 7) and (y == 7):\n print(\"\\033[%d;%d;%dm**\\033[0m\" % (0, 33, 41), end='')\n elif black & gobit[(x, y)]:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 31, 41), end='')\n elif white & gobit[(x, y)]:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 32, 42), end='')\n else:\n print(\" \", end='')\n print(\"\")", "def switch_frequency_plot_channel_eight(self):\n if self.plot_channel_key_booleans[7]:\n self.plot_channel_key_booleans[7] = False\n self.parent_widget.graph_channel_eight_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[7] = True\n self.parent_widget.graph_channel_eight_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[7]))", "def set_zlim(self, bottom=None, top=None):\n if isinstance(self._frame, root.TH1F):\n warnings.warn(\"Attempting to set z-axis limits for 2D axes\")\n return\n\n if top is None and np.iterable(bottom):\n bottom, top = bottom\n\n if bottom is None or top is None:\n old_bottom, old_top = self.get_zlim()\n if bottom is None:\n bottom = old_bottom\n if top is None:\n top = old_top\n\n if bottom == top:\n warnings.warn(\n \"Attempting to set identical bottom == top == {} z-axis limits\".format(\n bottom\n ),\n stacklevel=2,\n )\n\n if bottom > top:\n raise ValueError(\"Axis limits must be in increasing order\")\n\n if top <= 0 and self._logz:\n warnings.warn(\n \"Attempting to set non-positive top zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n top = self.get_zlim()[1]\n\n elif bottom <= 0 and self._logy:\n warnings.warn(\n \"Attempting to set non-positive bottom zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n bottom = self.get_zlim()[0]\n\n self._frame.SetMinimum(bottom)\n self._frame.SetMaximum(top)\n\n self._pad.Modified() # Draw the updated axes\n\n return (bottom, top)", "def display(self):\n for r in range(1, self.size+1):\n print(\"+\" + (\"-+\"*self.size))\n print(\"|\", end=\"\")\n for c in range(1, self.size+1):\n print(self.gameState[r,c], end=\"\")\n print(\"|\",end=\"\")\n print()\n print(\"+\" + (\"-+\"*self.size))", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def num_channels(self):\n return 3", "def display(board):\n for i in range(height-1, -1, -1):\n print(' '.join(['O' if at(board, i * width + j) else '-' for j in range(1, width+1)]))\n print(\"\")", "def showDisplay(self, type=\"DEFAULT\"):\n gd = mamba.getDisplayer() # <- trick to ensure the root windows is created and hidden\n if type==\"DEFAULT\":\n # First if there is any display already opened it is showed\n no_display = True\n if self._displayUsr:\n self._displayUsr.show()\n no_display = False\n if self._displayVtk:\n self._displayVtk.show()\n no_display = False\n if self._displayPjt:\n self._displayPjt.show()\n no_display = False\n \n if no_display:\n # If no display is yet open we create one\n # preferentially using user defines display\n # or if not VTK\n if self._displayerUsr:\n self._displayUsr = self._displayerUsr(self.name)\n if self._displayUsr:\n self._displayUsr.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayUsr.updateim()\n else:\n self._displayVtk = self._displayerVtk(self.name)\n if self._displayVtk:\n self._displayVtk.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayVtk.updateim()\n \n elif type==\"USER\":\n if self._displayerUsr:\n if self._displayUsr:\n self._displayUsr.show()\n else:\n self._displayUsr = self._displayerUsr(self.name)\n if self._displayUsr:\n self._displayUsr.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayUsr.updateim()\n \n elif type==\"PROJECTION\":\n if self._displayerPjt:\n if self._displayPjt:\n self._displayPjt.show()\n else:\n self._displayPjt = self._displayerPjt(self.name)\n if self._displayPjt:\n self._displayPjt.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayPjt.updateim()\n \n elif type==\"VTK\":\n if self._displayerVtk:\n if self._displayVtk:\n self._displayVtk.show()\n else:\n self._displayVtk = self._displayerVtk(self.name)\n if self._displayVtk:\n self._displayVtk.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayVtk.updateim()", "def IsBottomSnappable(self):\r\n \r\n return self.HasFlag(self.optionBottomSnapped)", "def showRightClickMenu(self,pos):\n\t\tprint('bStackWidget.showRightClickMenu()')\n\t\tmenu = QtWidgets.QMenu()\n\t\t#self.menu = QtWidgets.QMenu()\n\n\t\tnumChannels = self.mySimpleStack.numChannels # number of channels in stack\n\t\tmaxNumChannels = self.mySimpleStack.maxNumChannels\n\t\t#actions = ['Channel 1', 'Channel 2', 'Channel 3', 'RGB', 'Channel 1 Mask', 'Channel 2 Mask', 'Channel 3 Mask']\n\t\tprint(' showRightClickMenu() numChannels:', numChannels, 'maxNumChannels:', maxNumChannels)\n\t\tactionsList = []\n\t\tisEnabledList = []\n\t\tisCheckedList = []\n\t\t# abb oct 2020, maybe put these back in\n\t\t'''\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber}')\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(chanNumber)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == chanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber} Mask')\n\t\t\tactualChanNumber = maxNumChannels + i + 1\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(actualChanNumber)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == actualChanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber} Skel')\n\t\t\tactualChanNumber = 2 * maxNumChannels + i + 1\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(actualChanNumber)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == actualChanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\n\t\t# abb oct 2020, maybe put this back in ???\n\t\t'''\n\t\tif numChannels>1:\n\t\t\tactionsList.append('RGB')\n\t\t\tisEnabledList.append(True)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == 'rgb' # lower case !!!\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\n\t\tfor i, actionStr in enumerate(actionsList):\n\t\t\t# make an action\n\t\t\tcurrentAction = QtWidgets.QAction(actionStr, self, checkable=True)\n\t\t\t# decide if it is checked\n\t\t\tisEnabled = isEnabledList[i]\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == i+1\n\t\t\tisChecked = isCheckedList[i]\n\n\t\t\tcurrentAction.setEnabled(isEnabled)\n\t\t\tcurrentAction.setChecked(isChecked)\n\t\t\t# add to menu\n\t\t\tmenuAction = menu.addAction(currentAction)\n\n\t\t#\n\t\t# do again for edt\n\t\tedtIdx = 3 # (raw==0, mask==1, skel==2, edt==3)\n\t\tactionsList = []\n\t\tisEnabledList = []\n\t\tisCheckedList = []\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber} EDT')\n\t\t\tactualChanNumber = (maxNumChannels * edtIdx) + i + 1\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(actualChanNumber)\n\t\t\tprint(' edt actualChanNumber:', actualChanNumber, 'isEnabled:', isEnabled)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == actualChanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\tfor i, actionStr in enumerate(actionsList):\n\t\t\t# make an action\n\t\t\tcurrentAction = QtWidgets.QAction(actionStr, self, checkable=True)\n\t\t\t# decide if it is checked\n\t\t\tisEnabled = isEnabledList[i]\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == i+1\n\t\t\tisChecked = isCheckedList[i]\n\n\t\t\tcurrentAction.setEnabled(isEnabled)\n\t\t\tcurrentAction.setChecked(isChecked)\n\t\t\t# add to menu\n\t\t\tmenuAction = menu.addAction(currentAction)\n\n\t\t#\n\t\tmenu.addSeparator()\n\n\t\t#\n\t\t# view\n\t\t# abb oct 2020, maybe put these back in ???\n\t\t#actions = ['Image', 'Sliding Z', 'Nodes', 'Edges']\n\t\tactions = ['Image']\n\t\tfor actionStr in actions:\n\t\t\t# make an action\n\t\t\tcurrentAction = QtWidgets.QAction(actionStr, self, checkable=True)\n\t\t\t# decide if it is checked\n\t\t\tisChecked = False\n\t\t\tif actionStr == 'Image':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['showImage']\n\t\t\telif actionStr == 'Sliding Z':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['displaySlidingZ']\n\t\t\telif actionStr == 'Nodes':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['showNodes']\n\t\t\telif actionStr == 'Edges':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['showEdges']\n\t\t\tcurrentAction.setChecked(isChecked)\n\t\t\tcurrentAction.triggered.connect(self.actionHandler)\n\t\t\t# add to menu\n\t\t\t#menuAction = self.menu.addAction(currentAction)\n\t\t\tmenuAction = menu.addAction(currentAction)\n\n\t\tmenu.addSeparator()\n\n\t\t#\n\t\t# panels\n\n\t\t'''\n\t\tannotationsAction = QtWidgets.QAction('Left Toolbar', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showLeftToolbar'])\n\t\t#annotationsAction.setShortcuts('[')\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# nodes\n\t\tannotationsAction = QtWidgets.QAction('Node List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showNodeList'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# edges\n\t\tannotationsAction = QtWidgets.QAction('Edge List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showEdgeList'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# search\n\t\tannotationsAction = QtWidgets.QAction('Search List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showSearch'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# annotations\n\t\tannotationsAction = QtWidgets.QAction('Annotation List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showAnnotations'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# contrast\n\t\tcontrastAction = QtWidgets.QAction('Contrast Panel', self, checkable=True)\n\t\tcontrastAction.setChecked(self.options['Panels']['showContrast'])\n\t\ttmpMenuAction = menu.addAction(contrastAction)\n\t\t'''\n\n\t\t'''\n\t\t# status toolbar\n\t\tannotationsAction = QtWidgets.QAction('Status Panel', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showStatus'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# line profile toolbar\n\t\tannotationsAction = QtWidgets.QAction('Line Profile Panel', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showLineProfile'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t# napari\n\t\tmenu.addSeparator()\n\t\tnapariAction = QtWidgets.QAction('Napari', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(napariAction)\n\n\t\tmenu.addSeparator()\n\t\t# make square\n\t\tmakeSquareAction = QtWidgets.QAction('Square', self, checkable=True)\n\t\tmakeSquareAction.setChecked(False)\n\t\ttmpMenuAction = menu.addAction(makeSquareAction)\n\n\t\tmenu.addSeparator()\n\n\t\t# save image\n\t\tsaveImageAction = QtWidgets.QAction('Save Image', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(saveImageAction)\n\n\t\t# save movie\n\t\tsaveMovieAction = QtWidgets.QAction('Save Movie', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(saveMovieAction)\n\n\t\t# options\n\t\t'''\n\t\tmenu.addSeparator()\n\t\toptionsAction = QtWidgets.QAction('Options', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(optionsAction)\n\t\t'''\n\n\t\t# refresh tracing\n\t\tmenu.addSeparator()\n\t\trefeshAction = QtWidgets.QAction('Refresh', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(refeshAction)\n\n\t\t#\n\t\t# edits\n\t\tself.addEditMenu(menu)\n\n\t\t#\n\t\t# get the action selection from user\n\n\t\tprint('=== bStackWidget.showRightClickMenu()')\n\t\t# was this\n\t\tuserAction = menu.exec_(self.mapToGlobal(pos))\n\t\t# now this\n\t\t'''\n\t\tself.menu.move(self.mapToGlobal(pos))\n\t\tself.menu.show()\n\t\t'''\n\n\t\t#userAction = None\n\t\tif userAction is None:\n\t\t\t# abort when no menu selected\n\t\t\treturn\n\t\tuserActionStr = userAction.text()\n\t\tprint(' showRightClickMenu() userActionStr:', userActionStr)\n\t\tsignalName = 'bSignal ' + userActionStr\n\t\tuserSelectedMenu = True\n\n\t\tdoStackRefresh = False\n\n\t\t# image\n\t\tmaxNumChannels = self.mySimpleStack.maxNumChannels\n\t\tif userActionStr == 'Channel 1':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 1\n\t\t\t#doStackRefresh = True\n\t\t\tself.optionsChange('Panels', 'displayThisStack', value=1, doEmit=True)\n\t\t\t#self.getStackView().displayStateChange('displayThisStack', value=1)\n\t\telif userActionStr == 'Channel 2':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 2\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=2)\n\t\telif userActionStr == 'Channel 3':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 3\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=3)\n\n\t\telif userActionStr == 'Channel 1 Mask':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 4\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=4)\n\t\telif userActionStr == 'Channel 2 Mask':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 4+1\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=4+1)\n\t\telif userActionStr == 'Channel 3 Mask':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 4+2\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=4+2)\n\n\t\telif userActionStr == 'Channel 1 Skel':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 7\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=7)\n\t\telif userActionStr == 'Channel 2 Skel':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 7+1\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=7+1)\n\t\telif userActionStr == 'Channel 3 Skel':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 7+2\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=7+2)\n\n\t\t# EDT\n\t\telif userActionStr == 'Channel 1 EDT':\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=10)\n\t\telif userActionStr == 'Channel 2 EDT':\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=10+1)\n\t\telif userActionStr == 'Channel 3 EDT':\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=10+2)\n\n\n\t\telif userActionStr == 'RGB':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 'rgb'\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value='rgb')\n\n\t\t#\n\t\t# view of tracing\n\t\telif userActionStr == 'Image':\n\t\t\tself.getStackView().displayStateChange('showImage', toggle=True)\n\t\t\tdoStackRefresh = True\n\t\t\t#self.displayStateDict['showImage'] = not self.displayStateDict['showImage']\n\t\telif userActionStr == 'Sliding Z':\n\t\t\t#self.getStackView().displayStateDict['displaySlidingZ'] = not self.getStackView().displayStateDict['displaySlidingZ']\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displaySlidingZ', toggle=True)\n\t\telif userActionStr == 'Nodes':\n\t\t\t#optionsChange('Panels', 'showLeftToolbar', toggle=True, doEmit=True)\n\t\t\tself.getStackView().displayStateDict['showNodes'] = not self.getStackView().displayStateDict['showNodes']\n\t\t\tdoStackRefresh = True\n\t\telif userActionStr == 'Edges':\n\t\t\tself.getStackView().displayStateDict['showEdges'] = not self.getStackView().displayStateDict['showEdges']\n\t\t\tdoStackRefresh = True\n\n\t\t#\n\t\t# toolbars\n\t\telif userActionStr == 'Left Toolbar':\n\t\t\tself.optionsChange('Panels', 'showLeftToolbar', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showLeftToolbar'] = not self.options['Panels']['showLeftToolbar']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Contrast Panel':\n\t\t\tself.optionsChange('Panels', 'showContrast', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showContrast'] = not self.options['Panels']['showContrast']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Node List':\n\t\t\tself.optionsChange('Panels', 'showNodeList', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showNodeList'] = not self.options['Panels']['showNodeList']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Edge List':\n\t\t\tself.optionsChange('Panels', 'showEdgeList', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showEdgeList'] = not self.options['Panels']['showEdgeList']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Search List':\n\t\t\tself.optionsChange('Panels', 'showSearch', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showSearch'] = not self.options['Panels']['showSearch']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Annotation List':\n\t\t\tself.optionsChange('Panels', 'showAnnotations', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showSearch'] = not self.options['Panels']['showSearch']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Status Panel':\n\t\t\tself.optionsChange('Panels', 'showStatus', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showStatus'] = not self.options['Panels']['showStatus']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Line Profile Panel':\n\t\t\tself.optionsChange('Panels', 'showLineProfile', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showLineProfile'] = not self.options['Panels']['showLineProfile']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Caiman':\n\t\t\tself.optionsChange('Panels', 'showCaiman', toggle=True, doEmit=True)\n\n\t\t# other\n\t\telif userActionStr == 'Options':\n\t\t\toptionsDialog = bimpy.interface.bOptionsDialog(self, self)\n\t\telif userActionStr == 'Napari':\n\t\t\tself.openNapari()\n\t\telif userActionStr == 'Square':\n\t\t\tself.myStackView2.toggleMakeSquare()\n\t\t\t#self.resizeEvent(QtGui.QResizeEvent(self.size(), QtCore.QSize()))\n\t\t\t#self.repaint()\n\t\telif userActionStr == 'Save Image':\n\t\t\tself.saveImage()\n\t\telif userActionStr == 'Save Movie':\n\t\t\tself.saveMovie()\n\t\telif userActionStr == 'Refresh':\n\t\t\tself.getStackView()._preComputeAllMasks()\n\n\t\telse:\n\t\t\tprint(' showRightClickMenu() -->> no action taken for userActionStr:', userActionStr)\n\t\t\tuserSelectedMenu = False\n\n\t\t# emit a signal\n\t\t# todo: this is emitting when self.getStackView().displayStateDict is not changing, e.g. for user action 'Contrast' and 'Annotations'\n\t\t'''\n\t\tif userSelectedMenu:\n\t\t\tself.setSlice() # update\n\t\t\tself.displayStateChangeSignal.emit(signalName, self.getStackView().displayStateDict)\n\t\t'''\n\n\t\tif doStackRefresh:\n\t\t\tself.getStackView().setSlice()\n\n\t\t#return False\n\t\t#print('right click menu return')\n\t\treturn", "def plot_frame(ax=None, left=None, right=None, top=None, bottom=None):\n ax = to_axis(ax)\n if top is not None:\n ax.spines['top'].set_visible(bool(top))\n if right is not None:\n ax.spines['right'].set_visible(bool(right))\n if bottom is not None:\n ax.spines['bottom'].set_visible(bool(bottom))\n if left is not None:\n ax.spines['left'].set_visible(bool(left))\n return ax", "def __init__(self, bottom, top, current):\n self.bottom = bottom\n self.top = top\n self.current = current", "def show_next_frame(self):\n if self.frames:\n self.config(image=next(self.frames))\n self.after(self.delay, self.show_next_frame)" ]
[ "0.55284494", "0.55176115", "0.54155743", "0.5375682", "0.53093606", "0.5295649", "0.5262697", "0.52525777", "0.5098146", "0.5055639", "0.5050628", "0.50497913", "0.50408816", "0.49749383", "0.49492052", "0.49289334", "0.48759848", "0.48495993", "0.4837377", "0.4822591", "0.47991785", "0.47934374", "0.47859353", "0.47826695", "0.4754844", "0.47213313", "0.47011536", "0.4700263", "0.46734428", "0.46664983", "0.46637988", "0.46625102", "0.46623474", "0.46617398", "0.46574315", "0.4649372", "0.46426705", "0.46396795", "0.46318352", "0.4585659", "0.4583719", "0.45775005", "0.45710662", "0.45688483", "0.45667577", "0.45624614", "0.45517927", "0.45459697", "0.45419282", "0.45404038", "0.4527666", "0.45268846", "0.4512648", "0.4512648", "0.4512648", "0.4512648", "0.4512648", "0.4512648", "0.4512648", "0.4512648", "0.4512648", "0.4512648", "0.4512648", "0.45058894", "0.45007375", "0.450001", "0.4491872", "0.44839776", "0.44799668", "0.4468811", "0.44617274", "0.44546202", "0.4449056", "0.4444578", "0.44425178", "0.44380194", "0.443678", "0.44365022", "0.44316146", "0.44152644", "0.44045863", "0.4404457", "0.44021726", "0.43967426", "0.43955663", "0.43954214", "0.4393803", "0.43902415", "0.4385411", "0.4384558", "0.4383242", "0.43826178", "0.43826178", "0.43820372", "0.43806168", "0.43760693", "0.43730527", "0.43711197", "0.43695608", "0.43682516", "0.43619576" ]
0.0
-1
Switch between top and bottom origin for the channels.
def switch_origin(self): self.origin = 'bottom' if self.origin == 'top' else 'top'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reshape(self, bottom, top):\r\n pass", "def reshape(self, bottom, top):\n\t\tpass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self,bottom,top):\n pass", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def update_ballpos(self,pos):\n if self.options.visualize_switch_xy:\n self.col.set_offsets(pos[:,::-1]) # reverse x-y direction\n else:\n self.col.set_offsets(pos)", "def fl_flip_yorigin():\n _fl_flip_yorigin = library.cfuncproto(\n library.load_so_libforms(), \"fl_flip_yorigin\",\\\n None, [],\\\n \"\"\"void fl_flip_yorigin()\"\"\")\n _fl_flip_yorigin()", "def backToMiddlePos():\n\tprogMode(True) # Active le couple de servos\n\taxDriver.goToPosition(axDriver.BROADCASTID, 0x1FF) # Renvoie a la position 0x1FF", "def __init__(self):\r\n #set up pannel in centre of screen, just above the bottom of the screen.\r\n super(Pannel, self).__init__(image = Pannel.pannel,\r\n x = games.screen.width/2,\r\n y = games.screen.height -11)", "def set_position( self ):\n\t\tscreen_rect = self.get_preview_window_screen_rect( )\n\n\t\twhile screen_rect.Intersects( self.GetScreenRect( ) ):\n\t\t\tpos = self.GetPosition( )\n\t\t\tself.SetPosition( ( pos[ 0 ] - 2, pos[ 1 ] + 2 ) )", "def set_zlim(self, bottom=None, top=None):\n if isinstance(self._frame, root.TH1F):\n warnings.warn(\"Attempting to set z-axis limits for 2D axes\")\n return\n\n if top is None and np.iterable(bottom):\n bottom, top = bottom\n\n if bottom is None or top is None:\n old_bottom, old_top = self.get_zlim()\n if bottom is None:\n bottom = old_bottom\n if top is None:\n top = old_top\n\n if bottom == top:\n warnings.warn(\n \"Attempting to set identical bottom == top == {} z-axis limits\".format(\n bottom\n ),\n stacklevel=2,\n )\n\n if bottom > top:\n raise ValueError(\"Axis limits must be in increasing order\")\n\n if top <= 0 and self._logz:\n warnings.warn(\n \"Attempting to set non-positive top zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n top = self.get_zlim()[1]\n\n elif bottom <= 0 and self._logy:\n warnings.warn(\n \"Attempting to set non-positive bottom zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n bottom = self.get_zlim()[0]\n\n self._frame.SetMinimum(bottom)\n self._frame.SetMaximum(top)\n\n self._pad.Modified() # Draw the updated axes\n\n return (bottom, top)", "def __window_forward(self):\n pass", "def move_north(self):\n self.vertical = (self.vertical * 2)[1:5]\n self.horizontal[1] = self.vertical[0]\n self.horizontal[3] = self.vertical[2]", "def head_towards(self):\n dest = self.target_destination - self.location\n if dest.length() != 0:\n dest.scale_to_length(self.speed)\n dest.normalize()\n self.rect.left += dest.x\n self.rect.top += dest.y", "def move_east(self):\n self.horizontal = (self.horizontal * 2)[3:7]\n self.vertical[0] = self.horizontal[1]\n self.vertical[2] = self.horizontal[3]", "def switchPlayer(self):\n\n \n tmp = self.current\n self.current = self.other\n self.other = tmp\n\n self.topSelector.toggleActive()\n self.bottomSelector.toggleActive()", "def __init__(self, bottom, top, current):\n self.bottom = bottom\n self.top = top\n self.current = current", "def backward(self, top, propagate_down, bottom):\r\n pass", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def mirror_y(board):\n new_board = board[:]\n new_board.reverse()\n return new_board", "def bottom_option():\n active = get_active_window()\n Width= get_middle_Width(active)\n Height=get_bottom_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def move_west(self):\n self.horizontal = (self.horizontal * 2)[1:5]\n self.vertical[0] = self.horizontal[1]\n self.vertical[2] = self.horizontal[3]", "def move(self, top=None, left=None, bottom=None, right=None):\r\n rect = self.image.get_rect()\r\n\r\n if bottom != None:\r\n rect.bottom = bottom\r\n if right != None:\r\n rect.right = right\r\n if top != None:\r\n rect.top = top\r\n if left != None:\r\n rect.left = left", "def move_south(self):\n self.vertical = (self.vertical * 2)[3:7]\n self.horizontal[1] = self.vertical[0]\n self.horizontal[3] = self.vertical[2]", "def reset(self):\n self._top = [self.middle, self.middle, self.middle]\n self._left = [self.middle, self.middle, self.middle]\n self._right = [self.middle, self.middle, self.middle]\n self._bottom = [self.middle, self.middle, self.middle]", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def mirror(self):\n self.__mirror = not self.__mirror", "def setup(self, channels):\n self.channels = channels[:]", "def set_cover_position(self, **kwargs: Any) -> None:\n self._multi_level_switch_property.set(kwargs[\"position\"])", "def flip(self):\n self._start, self._end = self._end, self._start", "def __redrawChannels(self):\n self.__channelWin.clear()\n all_chans = self._client.getChannels()\n all_chans.sort(key=lambda c: c.getName())\n count = min(len(all_chans), self.__channelWin.getmaxyx()[0])\n show = all_chans[:count]\n for c in show:\n cur = self._client.currentChannel() == c\n if cur:\n attr = curses.A_REVERSE\n elif c in self._client.getJoined():\n attr = curses.A_BOLD\n else:\n attr = curses.A_DIM\n if c != self._client.getNoneChannel():\n self.__channelWin.addstr(\n \"{chan}\\n\".format(chan=c.getName()),\n attr\n )", "def overlay_up(self, idx):\n if not self.is_top_layer(idx):\n self.overlay_list[idx], self.overlay_list[idx+1] = \\\n self.overlay_list[idx+1], self.overlay_list[idx]", "def bend_connection(self, room_1, room_2):\n bend_point = Point(random.randint(room_1.left, room_1.right),\n random.randint(room_2.top, room_2.bottom))\n if room_1.bottom < room_2.top:\n draw_point = bend_point\n while self.get_tile(draw_point) == '#':\n self.set_tile(draw_point, glyph='|')\n draw_point = Point(draw_point.x, draw_point.y-1)\n else:\n draw_point = bend_point\n while self.get_tile(draw_point) == '#':\n self.set_tile(draw_point, glyph='|')\n draw_point = Point(draw_point.x, draw_point.y+1)\n if room_1.left < room_2.right:\n # Move off our starting point, so we start on a wall\n draw_point = Point(bend_point.x+1, bend_point.y)\n while self.get_tile(draw_point) == '#':\n self.set_tile(draw_point, glyph='-')\n draw_point = Point(draw_point.x+1, draw_point.y)\n else:\n # Move off our starting point, so we start on a wall\n draw_point = Point(bend_point.x-1, bend_point.y)\n while self.get_tile(draw_point) == '#':\n self.set_tile(draw_point, glyph='-')\n draw_point = Point(draw_point.x-1, draw_point.y)", "def resize_top(self, new_z, padding=None):\n self.upper_vertex[2] = new_z + padding", "def update_H(self):\n self.grid.H[:, :, -1, :] = self.grid.H[:, :, 0, :]", "def swap_master(qtile):\n grp = qtile.current_group\n if grp.layout.clients.current_index > 0:\n grp.layout.cmd_swap_main()\n elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:\n grp.layout.cmd_shuffle_down()\n c = grp.layout.clients.focus_first()\n grp.focus(c, True)", "def set_board(board):", "def mirror(self):\n \n screen_width = self.game.screen.get_width()\n screen_height = self.game.screen.get_height()\n\n if self.rect.centerx < 0 and self.vx < 0:\n self.rect.centerx = screen_width\n if self.rect.centerx > screen_width:\n self.rect.centerx = 0\n if self.rect.centery < 0 and self.vy < 0:\n self.rect.centery = 0\n if self.rect.centery > screen_height and self.vy > 0:\n self.rect.centery = screen_height", "def _hold_bounds(self):\n adc_channel = self.graph_renderer.channels[0]\n if self.sx2 > adc_channel.size():\n self.anchored = True\n\n if self.anchored:\n # anchor right side of the window to the last graph sample. so the graph always animates, grows out from\n # the right side of the window. (anchor sx2 to adc_channel.size())\n dx = self.sx2 - adc_channel.size()\n dxw = self.wsx2 - adc_channel.size()\n self.sx1 -= dx\n self.sx2 -= dx\n self.wsx1 -= dxw\n self.wsx2 -= dxw\n\n # eliminate integer overflow problems. only allow indices smaller than a 32bit integer value. and then divide\n # it by four just to be sure.. maybe it's not necessary, but maybe there are some other tricks used in the\n # graph rendering..\n bound = 0xffffffff / 4\n # hmm. this allows only 12 days of data with ~960Hz. time to go 64bit?\n self.sx1 = max(self.sx1, -bound)\n self.sy1 = max(self.sy1, -bound)\n self.sx1 = min(self.sx1, bound)\n self.sy1 = min(self.sy1, bound)\n self.sx2 = max(self.sx2, -bound)\n self.sy2 = max(self.sy2, -bound)\n self.sx2 = min(self.sx2, bound)\n self.sy2 = min(self.sy2, bound)\n self.wsx1 = max(self.wsx1, -bound)\n self.wsy1 = max(self.wsy1, -bound)\n self.wsx1 = min(self.wsx1, bound)\n self.wsy1 = min(self.wsy1, bound)\n self.wsx2 = max(self.wsx2, -bound)\n self.wsy2 = max(self.wsy2, -bound)\n self.wsx2 = min(self.wsx2, bound)\n self.wsy2 = min(self.wsy2, bound)\n\n # limit horizontal zoom to 2 samples. can't zoom in anymore if less than one sample stays on screen.\n # don't have time to implement and test line segment cutting, if one sample is outside the window, and another\n # is inside.\n if self.wsx2 - self.wsx1 < 2.:\n self.wsx2 = self.wsx1 + 2.\n if self.sx2 - self.sx1 < 2.:\n self.sx2 = self.sx1 + 2.\n\n #\n # limit vertical movement and vertical zoom\n #\n\n val_min = adc_channel.value_min\n val_max = adc_channel.value_max\n\n # allow offset of this percent/100 of the screen\n overlap = .30\n\n # top of the screen has smaller sample values than bottom of the screen. inverted graph.\n # sy1 is top pixel, sy2 bottom. bottom-left coordinat is (0, 0)\n if self.sy1 < self.sy2:\n val_top = val_min + (self.wsy1 - self.wsy2) * overlap\n val_bottom = val_max - (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 < val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 < val_top:\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy2 = val_bottom\n else:\n val_bottom = val_min - (self.wsy1 - self.wsy2) * overlap\n val_top = val_max + (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 > val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 > val_top:\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy2 = val_bottom", "def swap_main(self):\n if self.align == self._up:\n self.swap_right()\n elif self.align == self._down:\n self.swap_left()", "def set_window_position(self, left, top, right, bottom, state, is_floating):\n self._set_window_position(left, top, right, bottom, state, is_floating)", "def init_position(self):\n if self.invert_init_angle is False:\n self.theta_i_top = -self.theta_s_top\n self.theta_i_bot = -self.theta_s_bot\n else:\n self.theta_i_top = self.theta_s_top\n self.theta_i_bot = self.theta_s_bot\n\n self.move_mid_block(theta=self.theta_i_bot)\n self.move_top_block(theta=self.theta_i_top)\n\n # Variables used to motion\n self.x_offset = self.block_top.center.x\n self.d_top = np.sin(self.theta_s_top) * self.bars_top.length * 2\n self.d_bot = np.sin(self.theta_s_bot) * self.bars_bot.length * 2", "def yview_moveto(self, fraction):\n self.tk.call(self._w, 'yview', 'moveto', fraction)", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def update_H(self):\n self.grid.H[-1, :, :, :] = self.grid.H[0, :, :, :]", "def clear_top(self):\n background = pygame.Surface((720, 77))\n background.fill((255, 255, 255))\n self.screen.blit(background, (0, 0))\n pygame.display.update((0, 0, 720, 77))", "def move_back(self):\r\n self.center_x, self.center_y = self.save_pos", "def update_H(self):\n self.grid.H[:, -1, :, :] = self.grid.H[:, 0, :, :]", "def update_position(self):\n self.back = self.pos % self.road_len\n self.front = (self.pos + self.length) % self.road_len", "def autostop():", "def move(self, origin=(0, 0), destination=None, axis=None):\n dx, dy = _parse_move(origin, destination, axis)\n self.origin = np.array(self.origin) + np.array((dx, dy))\n\n if self.owner is not None:\n self.owner._bb_valid = False\n return self", "def transfer(self, *args):\n if self.cur != Win.right:\n return\n \n cur_pl = self.rightwin.data\n cur_song = self.rightwin.highlighted()\n if not cur_song:\n return\n\n for pl in itertools.filterfalse(lambda a: not a.highlighted, self.leftwin):\n if pl.data is not cur_pl:\n pl.data.insert(cur_song.data['path'])\n \n self.rightwin.down()", "def update(self):\n super().update()\n if self.center_y > TOP_LIMIT:\n self.center_y = BOTTOM_LIMIT\n if self.center_y < BOTTOM_LIMIT:\n self.center_y = TOP_LIMIT\n\n if self.center_x < 250:\n self.change_x = (0.2) * OBJECTS_SPEED\n elif self.center_x > SCREEN_WIDTH - 250:\n self.change_x = (-0.2) * OBJECTS_SPEED", "def update_minimap_position(self):\n\t\tbtn = self.get_active_pane().get_minimap_btn()\n\t\tif btn is not None:\n\t\t\tself.window.minimap.set_relative_to(btn)\n\t\telse:\n\t\t\tself.window.minimap.set_relative_to(self.window.bottom_panes_box)", "def do_south(self, arg):\r\n moveDirection('south')", "def setSurfaceColors(topcolor=-1,bottomcolor=-1):\n dislin.surclr(topcolor, bottomcolor)", "def bottom(self, bottom):\n self.ptr.bottom(bottom)", "def adjust_tile(self, tile, connection, begin):\r\n if begin:\r\n if tile[1] != connection:\r\n tile.reverse()\r\n elif tile[0] != connection:\r\n tile.reverse()", "def shuffle_up(self):\n self.clients.shuffle_up()\n self.group.layout_all()\n self.group.focus(self.clients.current_client)", "def positioning(self):\n pass", "def move_up(self):\n if self.center.y < (self.screen_height - (self.height / 2)):\n self.center.y += 5", "def update_target(self):\n\t\tself.check_top()\n\t\tself.check_bottom()\n\t\tself.update()\n\t\tself.screen.fill(self.target_color, self.rect)", "def swap_main(self):\n self.swap(self.clients.current_client, self.clients[0])", "def __init__(self, from_bottom=0, from_top=0):\n self._x = 7\n self._top = from_top\n self._bottom = from_bottom", "def set_origin(self, origin_x, origin_y):\r\n self.x = origin_x - (self.rect.width / 2)\r\n self.y = origin_y + self.rect.height\r\n self._update_rect()", "def check_top(self):\n\t\tif self.rect.top <=0:\n\t\t\tself.target_direction = 1", "def bottom_left_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_bottom_Height()\n PosX = get_left_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def top_option():\n active = get_active_window()\n Width=get_middle_Width(active)\n Height=get_top_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def reset_position(self):\n self.rect.left, self.rect.top = self.start_pos", "def init_position_electrodes_screen(self):\n self.line_shoulder_pos_l.hide()\n self.line_shoulder_pos_r.hide()\n self.txt_shoulder_pos_r.hide()\n self.txt_shoulder_pos_r.hide()", "def _set_object_origin(obj: bpy.types.Object, origin: Vector):\n\twith SelectObjects([obj]), CursorAt(origin):\n\t\tbpy.ops.object.origin_set(type='ORIGIN_CURSOR', center='MEDIAN')", "def forward(self, bottom, top):\n blobs = self._get_next_minibatch() # Get a blob\n\n for blob_id, blob in enumerate(blobs): # blob[0] for image, blob[1] for mask\n # Reshape net's input blobs\n top[blob_id].reshape(*(blob.shape)) # Reshape top tensor/blob\n top[blob_id].data[...] = blob.astype(np.float32, copy=False) # Feed corresponding data into top tensor/blob", "def clockwise(self):\n temp = self._top\n self._top = self._left\n self._left = self._bottom\n self._bottom = self._right\n self._right = temp", "def do_north(self, arg):\r\n moveDirection('north')", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def correct_position(self):\n\n width = self.screen.get_width()\n height = self.screen.get_height()\n\n if self.last_screen_dimensions[\"width\"] > width:\n self.x -= self.last_screen_dimensions[\"width\"] - width\n\n if self.last_screen_dimensions[\"height\"] > height:\n self.y -= self.last_screen_dimensions[\"height\"] - height" ]
[ "0.5853456", "0.58374345", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.56316376", "0.56309074", "0.5490777", "0.5398423", "0.5386707", "0.53046316", "0.5291994", "0.5280545", "0.5252932", "0.5172233", "0.5165766", "0.51486814", "0.5148352", "0.512192", "0.50995785", "0.5093663", "0.5070834", "0.50644016", "0.5056251", "0.5050895", "0.5035585", "0.50352585", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.5034911", "0.50162715", "0.49961323", "0.4966976", "0.49523035", "0.49422422", "0.49323687", "0.49263245", "0.49082822", "0.4906515", "0.49037364", "0.4903582", "0.49016082", "0.4901117", "0.48990756", "0.48826647", "0.4879303", "0.4877727", "0.4875969", "0.4875969", "0.4875969", "0.48649585", "0.4855793", "0.4850054", "0.48477027", "0.48470467", "0.48442015", "0.48438695", "0.48336136", "0.48248258", "0.48241758", "0.4818675", "0.48174742", "0.4816637", "0.4815632", "0.48106357", "0.4810261", "0.48069897", "0.48036677", "0.48029137", "0.48001695", "0.47994417", "0.47864634", "0.47811064", "0.47802535", "0.47740483", "0.47667068", "0.47570282", "0.4748268", "0.47469252", "0.47450432", "0.47387695", "0.4726301" ]
0.7755325
0
Time at the center of the window.
def time(self): return sum(self._interval) * .5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def center_on_spawn(self):\n self.center_on(*self.world.metadata['playerStart'])", "def center(window):\n window.update_idletasks()\n\n # Find the screen resolution\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n\n # Find new (x, y) coordinates\n size = tuple(int(_) for _ in window.geometry().split('+')[0].split('x'))\n x = screen_width/2 - 7 * size[0] / 13\n y = screen_height/2 - 6 * size[1] / 11\n\n # Apply new coordinates\n window.geometry(\"+%d+%d\" % (x, y))", "def centre(self):\n self.top.update_idletasks()\n # The horizontal position is calculated as (screenwidth - window_width)/2\n hpos = int((self.top.winfo_screenwidth() - self.top.winfo_width())/2)\n # And vertical position the same, but with the height dimensions\n vpos = int((self.top.winfo_screenheight() - self.top.winfo_height())/2)\n # And the move call repositions the window\n self.top.geometry('+{x}+{y}'.format(x=hpos, y=vpos))", "def center_window(self):\n\n\t\tframe_geo = self.frameGeometry()\n\t\tcursor_pos = QtWidgets.QApplication.desktop().cursor().pos()\n\t\tscreen = QtWidgets.QApplication.desktop().screenNumber(cursor_pos)\n\t\tcenter_point = QtWidgets.QApplication.desktop().screenGeometry(screen).center()\n\t\tframe_geo.moveCenter(center_point)\n\t\tself.move(frame_geo.topLeft())", "def start(self):\n self.timeStart = pygame.time.get_ticks()", "def center(self):\n self.root.update_idletasks()\n w = self.root.winfo_screenwidth()\n h = self.root.winfo_screenheight()\n size = tuple(int(_) for _ in self.root.geometry().split('+')[0].split('x'))\n x = w/2 - size[0]/2\n y = h/2 - size[1]/2\n self.root.geometry(\"240x80+%d+%d\" % (x, y))", "def center_ava(self):\n\t\tself.rect.midbottom = self.screen_rect.midbottom\n\t\tself.x = float(self.rect.x)", "def center(self):\r\n self.centerx = self.screen_rect.centerx \r\n self.centery = self.screen_rect.centery", "def center_peaktime(self):\n ind = np.argmax(abs(self.Et))\n shift = (self.Et.shape[0] / 2 - ind).astype(np.int)\n self.Et = np.roll(self.Et, shift)", "def drawCenter(self):\n pygame.draw.circle(display, self.color, (self.screenx, self.screeny), 1, 0)", "def center(self):\n if self.pos != 0.0:\n self.pos = 0.0", "def moving(self):\n self.animation()\n assert(self.rect.x % 32 == 0 or self.rect.y % 32 == 0), \\\n 'Not centered on tile'", "def center(self):\n # get the compute screen's size\n screen = QDesktopWidget().screenGeometry()\n # get the app windows' size\n size = self.geometry()\n self.move(int((screen.width() - size.width()) / 2), int((screen.height() - size.height()) / 2))", "def center_mario(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x, self.y = float(self.rect.x), float(self.rect.y)", "def zoomAtCenter(self, f):\n wforms = self.grid.getWaveforms()\n if wforms:\n dur = wforms[0].getWidthSeconds()\n dur2 = dur / f\n a = wforms[0].getBeginSeconds() + (dur-dur2)/2.0\n for wform in wforms:\n wform.display(a, dur2)", "def center(self):\r\n frameGm = self.frameGeometry()\r\n screen = QtGui.QApplication.desktop().screenNumber(QtGui.QApplication.desktop().cursor().pos())\r\n centerPoint = QtGui.QApplication.desktop().screenGeometry(screen).center()\r\n frameGm.moveCenter(centerPoint)\r\n self.move(frameGm.topLeft())", "def centerWindow(self):\n framegeo = self.frameGeometry()\n center = QtGui.QDesktopWidget().availableGeometry().center()\n framegeo.moveCenter(center)\n self.move(framegeo.topLeft())", "def auto_moving(self):\n self.animation()\n\n assert(self.rect.x % 32 == 0 or self.rect.y % 32 == 0), \\\n 'Not centered on tile'", "def tick(self):\n uh.rotation(270)\n while True:\n self.show_time()\n time.sleep(60)\n uh.off()", "def _place_elements(self, dt):\n self.root.size = Window.size\n center = Window.center\n self.rect.pos = center[0] + 100, center[1] + 100\n self.circle.pos = center[0] - 100, center[1] - 100", "def center_on_screen(self):\n window_frame = self.frameGeometry()\n screen_center = QtGui.QDesktopWidget().availableGeometry().center()\n window_frame.moveCenter(screen_center)\n self.move(window_frame.topLeft())", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def simulator_window_center(self, value: str) -> None:\n self.set_capability(SIMULATOR_WINDOW_CENTER, value)", "def CenterZombie(self):\n # Requirement ID: 8.0.1\n\n self.center = self.screen_rect.centerx", "def test_center_window(manager):\n manager.test_window(\"one\")\n\n manager.c.window.set_position_floating(50, 50)\n manager.c.window.set_size_floating(200, 100)\n info = manager.c.window.info()\n assert info[\"x\"] == 50\n assert info[\"y\"] == 50\n assert info[\"width\"] == 200\n assert info[\"height\"] == 100\n\n manager.c.window.center()\n info = manager.c.window.info()\n assert info[\"x\"] == (800 - 200) / 2 # (screen width - window width) / 2\n assert info[\"y\"] == (600 - 100) / 2 # (screen height - window height) / 2\n assert info[\"width\"] == 200\n assert info[\"height\"] == 100", "def center(win):\n win.update_idletasks()\n width = 1120\n frm_width = win.winfo_rootx() - win.winfo_x()\n win_width = width + 2 * frm_width\n height = 630\n titlebar_height = win.winfo_rooty() - win.winfo_y()\n win_height = height + titlebar_height + frm_width\n x = win.winfo_screenwidth() // 2 - win_width // 2\n y = win.winfo_screenheight() // 2 - win_height // 2\n win.geometry(\"{}x{}+{}+{}\".format(width, height, x, y))\n win.deiconify()", "def start_clock(self):\n pass", "def update_time_base(self, event):\n print(\"TimeBase.update_time_base()\")\n print(\"Base de temps : \", self.scale_T.get())\n if not isinstance(self.parent, Tk):\n self.parent.update_time(self.scale_T.get())", "def align_window(self):\n self.parent.update()\n\n # get screen info\n screen_width = self.parent.winfo_screenwidth()\n screen_height = self.parent.winfo_screenheight()\n\n # get window info\n window_width = self.parent.winfo_width()\n window_height = self.parent.winfo_height()\n\n # determine position of the window\n x = screen_width - window_width/2 - 120\n y = screen_height - window_height/2 - 60\n\n # move the window to determined position\n self.parent.geometry('+%d+%d' % (x, y))", "def center_on_mouse(w):\n root=w.get_toplevel().get_root_window()\n (screen, x, y, mod) = root.get_display().get_pointer()\n r = screen.get_monitor_geometry(screen.get_monitor_at_point(x, y))\n\n # Let's try to center the window on the mouse as much as possible.\n width, height = w.get_size()\n\n posx = max(r.x, x - width / 2)\n if posx + width > r.x + r.width:\n posx = r.x + r.width - width\n\n posy = max(r.y, y - height / 2)\n if posy + height > r.y + r.height:\n posy = r.y + r.height - height\n\n w.move(posx, posy)", "def time(self):\n return pygame.time.get_ticks() - self.start_time", "def animation(self, t):\n self.program['u_clock'] = 2*t\n gloo.clear('black')\n self.program.draw('points')\n return _screenshot((0, 0, self.size[0], self.size[1]))[:,:,:3]", "def alarm(self, event):\r\n\r\n # top left corner of top level window\r\n x1_coordinate, y1_coordinate = self.winfo_rootx(), self.winfo_rooty()\r\n\r\n # bottom right corner of top level window\r\n x2_coordinate = x1_coordinate + self.winfo_width()\r\n y2_coordinate = y1_coordinate + self.winfo_height()\r\n if not (x1_coordinate < event.x_root < x2_coordinate and\r\n y1_coordinate < event.y_root < y2_coordinate):\r\n self.attributes(\"-alpha\", 0.1)\r\n self.bell()\r\n self.after(100, lambda: self.attributes(\"-alpha\", 1))", "def center_window(top):\n screen_width = top.winfo_screenwidth()\n screen_height = top.winfo_screenheight()\n\n width, height, old_x, old_y = get_geometry(top)\n\n new_x = (screen_width - width) // 2\n new_y = (screen_height - height) // 2\n geom = '{}x{}+{}+{}'.format(width, height, new_x, new_y)\n print(\"new geometry:\", geom)\n top.geometry(geom)", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def update(self): \n super().update()\n if self.center_x < constants.left_limit:\n self.center_x = self.screen_width + constants.offscreen_space\n if self.center_x > self.screen_width + constants.offscreen_space:\n self.center_x = constants.left_limit\n if self.center_y > self.screen_height + constants.offscreen_space:\n self.center_y = constants.bottom_limit\n if self.center_y < constants.bottom_limit:\n self.center_y = self.screen_height + constants.offscreen_space", "def _positionWindow(self):\n\t\tscreen = QtGui.QDesktopWidget().screenGeometry()\n\t\tself.setGeometry(1050, 275, 375, 350)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def tick(self):\n if self.display_seconds:\n new_time = time.strftime('%I:%M:%S %p')\n else:\n new_time = time.strftime('%I:%M:%S %p').lstrip('0')\n if new_time != self.time:\n self.time = new_time\n self.display_time = self.time\n self.config(text=self.display_time)\n self.after(200, self.tick)", "def setCurTime(self):\n\t\tself.config.SET_CUT_TIME = True", "def tick(self):\r\n if self.display_seconds:\r\n new_time = time.strftime('%H:%M:%S')\r\n else:\r\n new_time = time.strftime('%I:%M %p').lstrip('0')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.display_time = self.time\r\n self.config(text=self.display_time)\r\n self.after(200, self.tick)", "def start_time(self) -> float:\r\n ...", "def drawTimer(self,screen):\n if(self.frame - self.genStartFrame >= self.nextCheckpointCost):\n self.checkpoint +=1\n self.lastCheckpointCost = self.nextCheckpointCost\n self.nextCheckpointCost = self.maze.checkFuelCost(self.checkpoint)\n angle = 2*np.pi*(self.nextCheckpointCost - self.frame + self.genStartFrame) / (self.nextCheckpointCost - self.lastCheckpointCost)\n temppos = [50 - 20*np.sin(angle),50 - 20*np.cos(angle)]\n tempsize = int(angle * 3)\n pygame.draw.line(screen,(240,240,240),(50,50),temppos,2)\n pygame.draw.circle(screen,(240,240,240),(50,50),max(24-tempsize,1),1)", "def offset_capture():\n Clock()", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def action_to_spawn(self):\n self.scene.center_on_spawn()", "def tick(self):", "def getCenter(self):\n return [self.tx/self.tw, self.ty/self.tw]", "def ShowTime():\n ClearDisplay()\n DisplayMsg('{:^16}'.format(\"CLOCK\"), 8)\n strData = GetTime().split(\" \")\n DisplayMsg('{:^16}'.format(strData[0]), 32)\n DisplayMsg('{:^16}'.format(strData[1]), 40)\n display.show()", "def time(self):\r\n time = datetime.datetime.now().strftime(\"%I:%M:%S\")\r\n self.speak(\"the current time is\")\r\n self.speak(time)", "def tick(self):\r\n new_time = time.strftime('%H:%M:%S')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.config(text=self.time)\r\n self.after(200, self.tick)", "def _positionWindow(self):\n\t\tif sys.platform=='win32':\n\t\t\tself.setGeometry(1050, 30, 375, 220)\n\t\telse:\n\t\t\tself.setGeometry(1050, 0, 375, 220)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def center(self):\n return self.pos + self.axis / 2.0", "def center(self):\r\n qr = self.frameGeometry()\r\n cp = QtWidgets.QDesktopWidget().availableGeometry().center()\r\n qr.moveCenter(cp)\r\n self.move(qr.topLeft())", "def center_screen(self, window_width, window_height):\n offset_right = int(self.winfo_screenwidth()/2 - window_width/2)\n offset_down = int((self.winfo_screenheight()-40)/2 - window_height / 2)\n\n self.geometry('+{}+{}'.format(offset_right, offset_down))", "def center_screen(self, window_width, window_height):\n offset_right = int(self.winfo_screenwidth()/2 - window_width/2)\n offset_down = int((self.winfo_screenheight()-40)/2 - window_height / 2)\n\n self.geometry('+{}+{}'.format(offset_right, offset_down))", "def automatic_window(self):\n \n #Create window and label\n automatic_window = tk.Toplevel(self)\n windowtext = self.translate('How many days do you want the simulation to run for?') \n automatic_window.title(windowtext)\n automatic_window.config(bg=self.default_background)\n lbl_text = tk.Label(automatic_window, text=windowtext,\n bg=self.default_background)\n lbl_text.grid(column=0, row=0)\n \n #Create input box\n self.auto_var = tk.IntVar()\n self.auto_var.set(1)\n auto_menu = tk.Entry(automatic_window)\n auto_menu.insert(0,0)\n auto_menu.configure(width=5)\n auto_menu.grid(column=0, row=1)\n\n #Create button to initate the simulation\n auto_run_button = tk.Button(automatic_window, text=self.translate('Run Simulation'), \n command = lambda: self.auto_run(automatic_window, int(auto_menu.get())),\n bg=self.button_color,\n highlightbackground=self.highlight_color)\n auto_run_button.grid(column=0, row=2)\n \n #Center the window on the screen\n automatic_window.withdraw()\n automatic_window.update_idletasks() # Update \"requested size\" from geometry manager\n x = (self.screenwidth - automatic_window.winfo_reqwidth()) / 2\n y = (self.screenheight - automatic_window.winfo_reqheight()) / 2\n automatic_window.geometry(\"+%d+%d\" % (x, y))\n automatic_window.deiconify()", "def wall_time(self):", "def wrap(self):\n if self.center.x > SCREEN_WIDTH:\n self.center.x = 0\n if self.center.y > SCREEN_HEIGHT:\n self.center.y = 0\n if self.center.x < 0:\n self.center.x = SCREEN_WIDTH\n if self.center.y < 0:\n self.center.y = SCREEN_HEIGHT", "def hide(self):\r\n self.rect.center = (WINDOWWIDTH/2, WINDOWHEIGHT -2000)", "def center(self):\n qr = self.frameGeometry()\n cp = QtWidgets.QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)", "def center(self):\n cp = self.dat.flowsheet.getCenter()\n self.centerOn(cp[0], cp[1])", "def update(self):\n self.x += 0.1\n self.rect.centerx = self.x\n\n if self.rect.left >= self.screen_rect.right:\n self.x -= self.screen_rect.width + self.rect.width\n self.rect.y = randint(0, self.screen_rect.height)", "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = 600", "def tick(self):\n self.delta = self.clock.tick(50) / 1000.0", "def tick(self):\n self.delta = self.clock.tick(50) / 1000.0", "def time(self):\n self.convert_window(\"Time\", \"seconds\", [\"centuries\", \"days\", \"decades\", \"femtoseconds\", \"fortnights\", \"hours\", \"microseconds\", \"millenia\", \"milliseconds\", \"minutes\", \"months(Common)\", \"months(Synodic)\", \"nanoseconds\", \"picoseconds\", \"quarters(Common)\", \"seconds\", \"shakes\", \"weeks\", \"years(Average Gregorian)\", \"years(Common)\", \"years(Julian)\", \"years(Leap)\", \"years(Tropical)\"])", "def move_center(obj):\n desktop = QApplication.desktop()\n dw = desktop.width()\n dh = desktop.height()\n size = obj.size()\n mw = size.width()\n mh = size.height()\n obj.move(dw/2-mw/2, dh/2-mh/2)", "def tick(self):\r\n pass", "def setup_winner(self):\n self.winnertime = FPS*4 \n self.wevegotawinner = self.winnertime#how many seconds it shows the winner\n self.winner_radius = max(self.height,self.width)\n self.now = time.time()", "def move(self, window):\r\n self.save_pos = (self.center_x, self.center_y) # sauvegarde la position avant de bouger\r\n self.center_x = math.cos(self.angle) * self.velocity + self.center_x\r\n self.center_y = math.sin(self.angle) * self.velocity + self.center_y\r\n self.rectangle = pygame.draw.circle(window, self.color, (self.center_x, self.center_y), self.radius) # update le rectangle\r", "def main():\r\n x = int(input(\"Enter the x coordinate of the center point: \"))\r\n y = int(input(\"Enter the y coordinate of the center point: \"))\r\n radius = int(input(\"Enter the radius: \"))\r\n drawCircle(Turtle(), x, y, radius)\r\n sleep(5)", "def center(self):\n \n geometry = self.frameGeometry()\n center_p = QDesktopWidget().availableGeometry().center()\n geometry.moveCenter(center_p)\n self.move(geometry.topLeft())", "def update_time(self):\n pass # Do nothing", "def tick():\n\n global time1\n # get the current local time from the PC\n time2 = time.strftime(\"%H:%M:%S\")\n # if time string has changed, update it\n if time2 != time1:\n time1 = time2\n timeLabel.config(text=time2)\n # calls itself every 200 milliseconds\n # to update the time display as needed\n # could use >200 ms, but display gets jerky\n timeLabel.after(200, tick)", "def tick(self):\n pass", "def tick(self):\n pass", "def display_time(self, time):\n pygame.draw.rect(self.screen, self.font_fgcolor, self.time_rect)\n self.screen.blit(self.small_font.render(\"Elapsed time: %.0f s\" % time, -1, (0, 0, 0)), (5, 720))\n pygame.display.update(self.time_rect)\n return", "def t0(self):\n return self._time_axis.start", "def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "def center(self):\n qr = self.frameGeometry()\n central_p = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(central_p)\n self.move(qr.topLeft())", "def current_time(cls) -> float:", "def setTime(self, *args):\n return _osgAnimation.Motion_setTime(self, *args)", "def update(self):\n \n # If the countdown timer has not yet hit 0\n if self.__time > 0:\n \n # Displays the grace period time in seconds\n countdown_message = str(self.__time)\n self.image = self.__font1.render(countdown_message, 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n \n # Message is positioned in the center of the screen near the top\n self.rect.centerx, self.rect.centery = 400, 30\n \n # If the countdown timer has hit 0\n elif self.__time <= 0:\n \n # Notifies the Player that the Minotaur has escaped from his contained area\n release_message = \"he has been released\"\n self.image = self.__font2.render(release_message, 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n \n # Message is positioned in the center of the screen near the top\n self.rect.centerx, self.rect.centery = 400, 30", "def update(self):\n tic = time.time()\n if self.playing:\n self.show_frame()\n add_delay = np.maximum(1, self.delay - int(1000 * (time.time() - tic)))\n self.window.after(ms=add_delay, func=self.update)", "def night_center(self, date=None):\n sunset = self.sunset(date=date)\n sunrise = self.sunrise(date=sunset)\n center = sunset + timedelta(0, (sunrise - sunset).total_seconds() / 2.0)\n center = self.date_to_local(center)\n return center", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def handle_tick():\n reset()\n hideturtle()\n move_aquarium(aq)\n draw_aquarium(aq)\n update()\n ontimer(handle_tick, 10)", "def _setCursorLocOnTimeLabel(self, waveform, t):\n self.tm.setTime(t)", "def start_time(self):\n pass", "def centre(self):\n\n qr = self.frameGeometry()\n cp = QtWidgets.QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "def position_window(self):\n x, y = self.get_position()\n root_x = self.anchor_widget.winfo_rootx() + x\n root_y = self.anchor_widget.winfo_rooty() + y\n self.tipwindow.wm_geometry(\"+%d+%d\" % (root_x, root_y))", "def update_time(self, *args):\n s = int(time.time() - self.start_time)\n self.time_label.text = str(datetime.timedelta(seconds=s))", "def docked_time(self):\n return self._docked_time", "def calculate_window_position(self):\n self.x = SQUARE_SIZE * self.col + SQUARE_SIZE // 2\n self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2", "def showCenter(self, window, color=None, mode=None):\n if not color: color = self.center_color\n if not mode: mode = self.center_mode\n self.center.show(window, mode=mode, color=color)", "def center(self):\n return self._center", "def center(self):\n return self['center']" ]
[ "0.6251276", "0.61837316", "0.6181719", "0.6170674", "0.6131986", "0.6077728", "0.60649556", "0.6055015", "0.6043705", "0.60249746", "0.6022022", "0.6018045", "0.60002804", "0.59981203", "0.5997838", "0.5936975", "0.5931709", "0.59026456", "0.58981544", "0.5848138", "0.58142173", "0.57956165", "0.57956165", "0.57956165", "0.57956165", "0.5788309", "0.5777181", "0.5767182", "0.5764551", "0.57564527", "0.5730806", "0.5716363", "0.56946903", "0.56909144", "0.5662485", "0.5652126", "0.56450236", "0.56436294", "0.5633116", "0.56306344", "0.5627061", "0.56135416", "0.55895823", "0.55696917", "0.55577683", "0.5557029", "0.5554242", "0.55267745", "0.5525003", "0.55234414", "0.55172765", "0.55164623", "0.5511939", "0.55069375", "0.5481115", "0.5480456", "0.54799646", "0.54799646", "0.54743624", "0.54718924", "0.5470638", "0.5469384", "0.5461572", "0.5449155", "0.54448545", "0.54432434", "0.5440275", "0.5440275", "0.5439672", "0.5439652", "0.54386383", "0.5433831", "0.5429863", "0.54250306", "0.5413066", "0.54120624", "0.5398956", "0.53818905", "0.53818905", "0.5380869", "0.5375696", "0.5366644", "0.5366644", "0.5366054", "0.5351616", "0.5349156", "0.5346004", "0.534078", "0.532961", "0.53197885", "0.53192", "0.531533", "0.53099877", "0.5305218", "0.5302519", "0.52995694", "0.5295699", "0.52911365", "0.5281988", "0.52803606", "0.5273303" ]
0.0
-1
Interval as `(tmin, tmax)`.
def interval(self): return self._interval
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_tmin_tmax(inst, tmin, tmax):\n _check_type(tmin, (None, \"numeric\"), item_name=\"tmin\")\n _check_type(tmax, (None, \"numeric\"), item_name=\"tmax\")\n\n # check positiveness for tmin, tmax\n for name, arg in ((\"tmin\", tmin), (\"tmax\", tmax)):\n if arg is None:\n continue\n if arg < 0:\n raise ValueError(\n f\"Argument '{name}' must be positive. \" f\"Provided '{arg}'.\"\n )\n # check tmax is shorter than instance\n if tmax is not None and inst.times[-1] < tmax:\n raise ValueError(\n \"Argument 'tmax' must be shorter than the instance \"\n f\"length. Provided: '{tmax}', larger than \"\n f\"{inst.times[-1]}s instance.\"\n )\n # check that tmax is larger than tmin\n if tmax is not None and tmin is not None and tmax <= tmin:\n raise ValueError(\n \"Argument 'tmax' must be strictly larger than 'tmin'. \"\n f\"Provided 'tmin' -> '{tmin}' and 'tmax' -> '{tmax}'.\"\n )\n # check that tmin is shorter than instance\n if tmin is not None and inst.times[-1] <= tmin:\n raise ValueError(\n \"Argument 'tmin' must be shorter than the instance \"\n f\"length. Provided: '{tmin}', larger than \"\n f\"{inst.times[-1]}s instance.\"\n )\n return tmin, tmax", "def range(series):\n return min(series), max(series)", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def interval(self):\n return (self.start, S.Infinity)", "def range(x):\n try:\n return (min(min(y) for y in x), max(max(y) for y in x))\n except ValueError:\n return (None, None)", "def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))", "def get_min_max_tuple(min_max_tuple, value):\n min_v, max_v = min_max_tuple\n\n min_v = smart_min(min_v, value)\n max_v = smart_max(max_v, value)\n\n return (min_v, max_v)", "def range(self) -> ty.Tuple[float, float]:\r\n ...", "def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value", "def range(self):\n\n return time_stat(self, stat=\"range\")", "def heckbert_interval(data_low, data_high, numticks=8, nicefunc=_nice, enclose=False):\n if data_high == data_low:\n return data_high, data_low, 0\n if numticks == 0:\n numticks = 1\n\n range = nicefunc(data_high - data_low)\n if numticks > 1:\n numticks -= 1\n d = nicefunc(range / numticks, round=True)\n if enclose:\n graphmin = ceil(data_low / d) * d\n graphmax = floor(data_high / d) * d\n else:\n graphmin = floor(data_low / d) * d\n graphmax = ceil(data_high / d) * d\n return graphmin, graphmax, d", "def range_(self):\n return tuple((e[0], e[-1]) for e in self.edges)", "def test_inclusive_intervals(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5.5)\n assert dim.interval() == (-3, 3)", "def map_to_range(val, old_min, old_max, new_min, new_max):\n return new_max - (val - old_min) * (new_max - new_min) / (old_max - old_min)", "def get_range(self):\n return time_to_range(self.get_time())", "def get_interval(array, min_value, max_value, epsilon_1=0, epsilon_2=0):\n min_index = np.min(np.where(array >= min_value + epsilon_1))\n max_index = np.max(np.where(array <= max_value - epsilon_2))\n\n indexes = np.arange(min_index, max_index + 1)\n\n return indexes", "def range(self):\n return self.times[0], self.times[-1]", "def range(self) -> Tuple[Union[int, float], Union[int, float]]:\n return self._range", "def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))", "def min_max(xs):\n return min(xs), max(xs)", "def timeRange(self):\r\n _times = self.getTimes()\r\n return _times[0], _times[-1]", "def get_range(lst):\n return float(max(lst)) - float(min(lst))", "def test_interval(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4)\n assert dim.interval(1.0) == (\n -3.0,\n 1.0,\n ) # reminder that `scale` is not upper bound", "def interval_distance(min_a: float, max_a: float, min_b: float, max_b: float):\n return min_b - max_a if min_a < min_b else min_a - max_b", "def timedInterval(self, start, end=False):\n\n assert type(start) == float\n interval1 = min(\n enumerate(self.intervals), key=lambda x: abs(x[1].xmin - start))\n\n if end:\n assert type(end) == float\n interval2 = self.timedInterval(end)\n else:\n interval2 = interval1\n\n return (interval1[0], interval2[0] + 1)", "def eta_range(self):\n\t\tticks = self.eta_details.keys()\n\t\treturn min(ticks), max(ticks)", "def _interval(cls,best,lo,hi):\n return ugali.utils.stats.interval(best,lo,hi)", "def scalar_range2tuple(sr: ScalarRange, defaults=(-np.inf, np.inf)):\n return (\n sr.min.value if sr.HasField(\"min\") else defaults[0],\n sr.max.value if sr.HasField(\"max\") else defaults[1],\n )", "def bounds(self) -> Tensor:\n return torch.cat([self.mins, self.mins + self.ranges], dim=-2)", "def intervalle(bMin, bMax):\n\tfor i in range(bMin+1,bMax):\n\t\tyield i", "def interval(self):\n return Intersection(*(a.interval for a in self.args))", "def between(min, max):\n def func(x):\n return min <= x <= max\n return func", "def data_range(x):\n return max(x)-min(x)", "def test_intervals(self):\n x = np.array([-5, -3, -2, -2, 100])\n self.assertEqual(\n npinterval.interval(x, 2/5),\n (-2, -2, 2, 4))\n self.assertEqual(\n npinterval.interval(x, 3/5),\n (-3, -2, 1, 4))\n self.assertEqual(\n npinterval.interval(x, 4/5),\n (-5, -2, 0, 4))", "def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)", "def range(self):\n return (self._start, self._end)", "def range(self):\n return self.timerange()", "def map_range(x, in_min, in_max, out_min, out_max):\n mapped = (x-in_min) * (out_max - out_min) / (in_max-in_min) + out_min\n if out_min <= out_max:\n return max(min(mapped, out_max), out_min)\n return min(max(mapped, out_max), out_min)", "def interval_MAX_SMT(intervals):\n lower_indices = np.argsort(intervals[:, 0])\n lower_sorted = intervals[lower_indices, 0]\n\n upper_indices = np.argsort(intervals[:, 1])\n upper_sorted = intervals[upper_indices, 1]\n\n best_lower, best_upper = 0, 0\n upper_i = 0\n best_met = -1\n n_met = 0\n for lower_i, lower in enumerate(lower_sorted):\n # First, we update upper -- everything in this loop is an interval\n # we were meeting before but not anymore.\n while upper_sorted[upper_i] < lower:\n n_met -= 1\n upper_i += 1\n # We now meet the interval that this lower is from.\n n_met += 1\n if n_met > best_met:\n best_lower, best_upper = lower, upper_sorted[upper_i]\n best_met = n_met\n elif (len(lower_sorted) - lower_i) < (best_met - n_met):\n # Each iteration adds *at most* 1 to n_met. For us to even have\n # a chance of updating best_met, then, we will have to do at\n # least (best_met - n_met) more iterations.\n break\n return best_lower, best_upper, best_met", "def interval(start, end):\n return seconds_since_midnight(end) - seconds_since_midnight(start)", "def timerange(self, t):\n\n t = T(t)\n\n if t.anchored:\n return (t.T, t.T)\n successors = [n for n in self._anchored_successors(t)]\n predecessors = [n for n in self._anchored_predecessors(t)]\n\n earlier_successor = None\n if successors:\n earlier_successor = min(successors)\n\n later_predecessor = None\n if predecessors:\n later_predecessor = max(predecessors)\n\n return (later_predecessor.T, earlier_successor.T)", "def _bound(x, min_value, max_value):\n return np.maximum(min_value, np.minimum(x, max_value))", "def remap_interval(val, in_start, in_end, out_start, out_end):\n in_range = in_end-in_start\n out_range = out_end-out_start\n return (val-in_start)/in_range*out_range+out_start", "def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)", "def get_xrange_indices(self, lower, upper) -> Tuple[int, int]:\n lower_index = np.argmax(self.x >= lower)\n upper_index = np.argmax(self.x >= upper)\n return int(lower_index), int(upper_index)", "def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]", "def test_interval(self):\n dim = Real(\"yolo\", \"norm\", 0, 3, low=-3, high=+3)\n assert dim.interval() == (-3, 3)\n\n dim = Real(\"yolo\", \"alpha\", 0.9, low=-3, high=+3)\n assert dim.interval() == (0, 3)\n\n dim = Real(\"yolo\", \"uniform\", -2, 4, low=-3, high=+3)\n assert dim.interval() == (-2.0, 2.0)", "def get_interval(self, start_time):\n end_time = start_time + self.interval\n return start_time, end_time", "def get_interval(self, start_time):\n end_time = start_time + self.interval\n return start_time, end_time", "def test_interval(self):\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=2)\n space.register(dim)\n dim = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim)\n dim = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim)\n\n assert space.interval() == [categories, (-3, 3), (-np.inf, np.inf)]", "def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()", "def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)", "def get_min_max(ints):\n current_max = None\n current_min = None\n\n if (len(ints) == 0) or (ints is None):\n return tuple([current_min, current_max])\n\n for i, n in enumerate(ints):\n if i == 0:\n current_max = n\n current_min = n\n else:\n if n > current_max:\n current_max = n\n elif n < current_min:\n current_min = n\n\n return tuple([current_min, current_max])", "def getSliderRange(*args):\n\n #get timeslider range start\n startF = cmds.playbackOptions(query=True, min=True)\n endF = cmds.playbackOptions(query=True, max=True)\n return(startF, endF)", "def ticks(self, start, end, desired_ticks=8):\n if start == end or isnan(start) or isnan(end):\n return [start]\n min, max, delta = heckbert_interval(start, end, desired_ticks,\n nicefunc=self._nice_pow10,\n enclose = True)\n return frange(min, max, delta)", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def min_max_range(s):\n # note np.argmax, np.argmin returns the position of first occurence of global max, min\n sign = np.sign(np.argmax(s) - np.argmin(s))\n if sign == 0:\n return 0.0\n else:\n return sign*(np.max(s) - np.min(s))", "def range_overlap(ranges):\n max_left = 0.0\n min_right = 1.0\n for (left, right) in ranges:\n max_left = max(max_left, left)\n min_right = min(min_right, right)\n return (max_left, min_right)", "def get_range(min, max, intervals, log):\n if not log:\n min = float(min)\n max = float(max)\n difference = max-min\n step_size = difference/intervals\n output = [min + i*step_size for i in range(intervals+1)]\n return output\n else:\n from math import log10 as log\n log_min = log(min)\n log_max = log(max)\n log_difference = log_max - log_min\n step_size = log_difference/intervals\n output = [pow(10, log_min + i*step_size) for i in range(intervals+1)]\n return output", "def set_period_limits(self): # function[Tmin, Tmax] = setTlim(obj)\n\n x_min = self.tf.minimum_period\n x_max = self.tf.maximum_period\n\n Tmin = 10 ** (np.floor(np.log10(x_min) * 2) / 2)\n if (np.log10(x_min) - np.log10(Tmin)) < 0.15:\n Tmin = 10 ** (np.log10(Tmin) - 0.3)\n\n Tmax = 10 ** (np.ceil(np.log10(x_max) * 2) / 2)\n if (np.log10(Tmax) - np.log10(x_max)) < 0.15:\n Tmax = 10 ** (np.log10(Tmax) + 0.3)\n return Tmin, Tmax", "def get_min_max(ints):\n if not ints:\n return\n max = ints[0]\n min = ints[0]\n\n\n for i in ints:\n if i > max:\n max = i\n if i < min:\n min = i\n return (min, max)", "def selecting_a_representative_for_an_interval(begin, end, the_set):\n save_tuple = None\n the_min = float('inf')\n for t in the_set:\n if begin <= t[0] <= end:\n if t[1] < the_min:\n save_tuple = t\n the_min = t[1]\n\n return save_tuple", "def ticks(self, start, end, desired_ticks=8):\n if start == end or isnan(start) or isnan(end):\n return [start]\n min, max, delta = heckbert_interval(start, end, desired_ticks, enclose=True)\n return frange(min, max, delta)", "def _builtin_between(low, high, value, **k):\n mode = check_mode((low, high, value), ['iii', 'iiv'], functor='between', **k)\n low_v = int(low)\n high_v = int(high)\n if mode == 0: # Check\n value_v = int(value)\n if low_v <= value_v <= high_v:\n return [(low, high, value)]\n else: # Enumerate\n results = []\n for value_v in range(low_v, high_v + 1):\n results.append((low, high, Constant(value_v)))\n return results", "def interval(self):\n return Interval(self._ll_tree.get_left(), self._ll_tree.get_right())", "def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value", "def set_maprange(xmin, ymin, xmax, ymax, epsg_in='epsg:4326'):\n outProj = pyproj.Proj(init='epsg:3857')\n inProj = pyproj.Proj(init=epsg_in)\n xmin,ymin = 75, -55\n xmax,ymax = 175, -5\n x1,y1 = pyproj.transform(inProj,outProj,xmin,ymin)\n x2,y2 = pyproj.transform(inProj,outProj,xmax,ymax)\n return x1, y1, x2, y2", "def ticks(self, domain_min, domain_max):\n raise NotImplementedError()", "def se2interval(a, b):\n\n Iab = (a,neg(b))\n return Iab", "def minima_in_range(r, g_r, r_min, r_max):\n idx = np.where(np.logical_and(np.greater_equal(r, r_min), np.greater_equal(r_max, r)))\n g_r_slice = g_r[idx]\n g_r_min = g_r_slice[g_r_slice.argmin()]\n idx_min, _ = find_nearest(g_r, g_r_min)\n return r[idx_min], g_r[idx_min]", "def _query_range_get(self):\n return (self.query_start, self.query_end)", "def maxima_in_range(r, g_r, r_min, r_max):\n idx = np.where(np.logical_and(np.greater_equal(r, r_min), np.greater_equal(r_max, r)))\n g_r_slice = g_r[idx]\n g_r_max = g_r_slice[g_r_slice.argmax()]\n idx_max, _ = find_nearest(g_r, g_r_max)\n return r[idx_max], g_r[idx_max]", "def _bi_range(start, end):\n if start == end:\n return (start,)\n\n elif end < start:\n return reversed(range(end, start + 1))\n\n else:\n return range(start, end + 1)", "def minmax(xs):\n min_val = None\n max_val = None\n for x in xs:\n if min_val is None or x < min_val:\n min_val = x\n if max_val is None or x > max_val:\n max_val = x\n return (min_val, max_val)", "def simplebounds(cls, val, lower, upper):\n if val < lower:\n val = lower\n if val > upper:\n val = upper\n return val", "def test_interval(self):\n dim = Fidelity(\"epoch\", 1, 10)\n dim.interval() == (1, 10)", "def between(minl:int, maxl:int) -> str:\n return f\"{{{minl},{maxl}}}\"", "def interval(self):\n return self._base_interval", "def tas(tasmin: xr.DataArray, tasmax: xr.DataArray) -> xr.DataArray:\n tasmax = convert_units_to(tasmax, tasmin)\n tas = (tasmax + tasmin) / 2\n tas.attrs[\"units\"] = tasmin.attrs[\"units\"]\n return tas", "def assert_between(value, minval, maxval):\n assert_greater_equal(value, minval)\n assert_less_equal(value, maxval)", "def range_limit(val, minv, maxv):\n\tif (val < minv):\n\t\tval = minv\n\telif (val > maxv):\n\t\tval = maxv\n\treturn val", "def interval(self):\n raise NotImplementedError()", "def range(self):\n return self._upper - self._lower", "def _get_day_limits(self) -> Tuple[datetime, datetime]:\n day_start = min(self.dset.time.datetime)\n day_end = max(self.dset.time.datetime)\n \n return day_start, day_end", "def define_intervals(self):\n i = 5 # a step of increment\n interval_sum = self.min_step\n interval_list = [self.min_step]\n while interval_sum < self.max_step:\n interval_sum += i\n interval_list.append(interval_sum)\n # interval_list.append(self.max_step)\n # print(\"Intervals\", interval_list)\n return interval_list", "def bounds(x, xMin, xMax):\n if (x < xMin):\n x = xMin\n elif (x > xMax):\n x = xMax\n return(x)", "def range_around(goal_val: int, spread: int, min_val: int = 0, max_val: int = math.inf):\n lower = max(min_val, goal_val - spread)\n upper = min(max_val, goal_val + spread)\n return (lower, upper)", "def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)", "def min_max(items):\n return min(items), max(items)", "def _interval_tuples(self, interval, entries):\n date_first, date_last = getters.get_min_max_dates(entries,\n (Transaction))\n\n if not date_first:\n return []\n\n interval_tuples = []\n while date_first <= date_last:\n next_date = get_next_interval(date_first, interval)\n interval_tuples.append((date_first, next_date))\n date_first = next_date\n\n return interval_tuples", "def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range", "def rangeLin(min, max, n):\n\n return np.arange( min, max, (max-min)/n )", "def minmin_maxmax( *args ):\n rmin = min( [ mv.min() for mv in args ] )\n rmax = max( [ mv.max() for mv in args ] )\n rmv = cdms2.createVariable( [rmin,rmax] )\n return rmv", "def alpha_range(x0, x1, x_min, x_max):\n if x0 == x1:\n raise ValueError('x1 and x2 should be different, get {} and {}'.format(x0, x1))\n alpha_x1 = (x_min - x0) / (x1 - x0)\n alpha_x2 = (x_max - x0) / (x1 - x0)\n alpha_min = max(0, min(alpha_x1, alpha_x2))\n alpha_max = min(1, max(alpha_x1, alpha_x2))\n return alpha_min, alpha_max", "def intervals_between(self, start: dt.datetime, end: dt.datetime) -> List[Interval]:\n relevant_intervals: List[Interval] = []\n\n for interval in self.intervals:\n if start <= interval.start_time < end:\n relevant_intervals.append(interval)\n\n return relevant_intervals", "def high_and_low(numbers):\n highest = max(numbers)\n lowest = min(numbers)\n return (highest,lowest)", "def mapRange(num, min1, max1, min2, max2, clamp=True):\n if(clamp and num < min1):\n return min2\n if(clamp and num > max1):\n return max2\n\n num1 = (num - min1) / (max1 - min1)\n num2 = (num1 * (max2 - min2)) + min2\n return num2", "def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin", "def tnuc_range2gnuc_range_(np, tbeg, tend):\n try:\n return min(np[tbeg-1], np[tend-1]), max(np[tbeg-1], np[tend-1])\n except IndexError:\n raise IncompatibleTranscriptError('invalid_cDNA_range_[%d_%d];expect_[0_%d]' % (tbeg, tend, len(np)))", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax" ]
[ "0.68151754", "0.66623974", "0.6629554", "0.6584883", "0.65043294", "0.6474935", "0.6412259", "0.6410299", "0.63806736", "0.63406235", "0.62529576", "0.6237908", "0.62276185", "0.6169634", "0.6162228", "0.6150554", "0.6136628", "0.6109361", "0.61021554", "0.608784", "0.6040315", "0.60171014", "0.60040855", "0.59701246", "0.5963273", "0.59284776", "0.59219295", "0.5912832", "0.59030527", "0.587506", "0.5874606", "0.5872871", "0.58663213", "0.586458", "0.58565134", "0.58353704", "0.5829863", "0.5823994", "0.5814764", "0.5800921", "0.57931143", "0.57916695", "0.5771588", "0.5756668", "0.57403934", "0.57402223", "0.573599", "0.5735104", "0.5730228", "0.5730228", "0.5727912", "0.57252055", "0.5723944", "0.57214546", "0.5720759", "0.5718124", "0.5715894", "0.5714886", "0.5707812", "0.57065433", "0.56949717", "0.56812894", "0.5677475", "0.5675774", "0.56715256", "0.56682545", "0.5664229", "0.5662889", "0.56444347", "0.56295264", "0.56115407", "0.5611112", "0.560223", "0.55981696", "0.55975294", "0.5592851", "0.5591034", "0.55882853", "0.55874306", "0.5586629", "0.55865866", "0.5584597", "0.55825686", "0.5582134", "0.5580692", "0.5579475", "0.55665225", "0.55626404", "0.55571884", "0.55552644", "0.55429286", "0.55424213", "0.5540144", "0.55142945", "0.5505", "0.549822", "0.5495195", "0.54878545", "0.5487343", "0.54834867", "0.5482229" ]
0.0
-1
Half of the duration of the current interval.
def half_duration(self): if self._interval is not None: a, b = self._interval return (b - a) * .5 else: return self.interval_duration * .5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def half_step_time(self):\n\n return self.full_step_time() * self.half_to_full_step_time_ratio", "def _get_half_time(self):\n return self.__half_time", "def widen(self):\n t, h = self.time, self.half_duration\n h *= self.scaling_coeff_x\n self.set_interval((t - h, t + h))", "def narrow(self):\n t, h = self.time, self.half_duration\n h /= self.scaling_coeff_x\n self.set_interval((t - h, t + h))", "def getDurationReciprocal(self):\n return 1/self.duration", "def duration(self):\r\n return self.t2 - self.t1", "def duration(self):\n return float('{0:.2f}'.format(self.end_time - self.start_time))", "def get_interval(self):\n return self.interval * 1000", "def duration(self):\n return self._end - self._begin", "def duration(self):\r\n return self.stop - self.start", "def time(self):\n return sum(self._interval) * .5", "def full_step_time(self):\n\n total_step_time = self.duration()\n return total_step_time / (2 * self.half_to_full_step_time_ratio + (self.num_steps() - 2))", "def break_time(self):\n\t\ts = timedelta()\n\t\tfor i in xrange(1, len(self.toggles)-1, 2):\n\t\t\ts += self.toggles[i+1] - self.toggles[i]\n\n\t\t# If not working need to add the last period of time\n\t\tif not self.status():\n\t\t\ts += datetime.now() - self.toggles[-1]\n\t\treturn s", "def middle_value(self):\n duration = self.__end.get_midpoint() - self.__begin.get_midpoint()\n return float(self.__begin.get_midpoint()) + float(duration) / 2.", "def period(self) -> int:", "def get_duration(self):\n return float(self.time.iloc[-1] - self.time.iloc[0])", "def duration(self):\n self._current_duration = time.perf_counter() - self._duration_start\n return round(self._current_duration, 4)", "def _period( self ):\r\n\treturn 2 * pi * sqrt( self.orbital_elements[0]**3 / self.mu_central_body )\r\n\t# http://en.wikipedia.org/wiki/Orbital_period#Calculation\r", "def duration(self):\n return self.end - self.start", "def duration(self) -> float:\n return self.delta_t * len(self)", "def duration(self) -> float:\n return self.delta_t * len(self)", "def update_period(self):\n return 0.1", "def duration(self) -> float:\n return float(len(self.__samples))/float(self.__rate)", "def duration(self) -> float:\n return self._stop - self._start if self._stop is not None else None", "def duration(self) -> float:\n return self.endTime()-self.startTime()", "def duration(self):\n return self.end_abs - self.start", "def period(self):\n return float(self._period) / 1000", "def half_frame(self) -> None:\n pass", "def half_frame(self) -> None:\n pass", "def get_duration(self):\n return (self.stop_day - self.start_day) * (24 * 60) \\\n + (self.stop_hour - self.start_hour) * 60", "def duration(self):\r\n return (self.end_time or time.time()) - self.start_time", "def time_step(self) -> float:\n return self._timestep", "def getDuration(self):\n if self.getDot():\n return self.duration*1.5\n else:\n return self.duration", "def get_duration(f):\n return 0", "def duration(self):\r\n\t\treturn (self.globEnd - self.globStart)", "def duration(self):\n pass", "def duration(self):\n pass", "def fourier_period(self, s):\n return 4 * np.pi * s / (self.w0 + (2 + self.w0 ** 2) ** 0.5)", "def duty_ns(self, duration: Optional[int]):", "def duration(self):\n window_length = self.window_length\n if self.window_length is None:\n warnings.warn(\n \"spectrogram must have window_length attribute to\"\n \" accurately calculate duration. Approximating duration.\"\n )\n return self.times[-1]\n else:\n return self.times[-1] + window_length / 2", "def taper_ratio(self) -> float:\n return self.xsecs[-1].chord / self.xsecs[0].chord", "def durationRemain(self, l=None):\n if l is None:\n l = self.rhythm\n full = float(self.time.upper)/self.time.lower\n s = 0\n for i in range(len(l)):\n s += 1.0 / l[i]\n return full - s", "def duration(self):\n return self.end_time - self.start_time", "def interval(self):\n return self.__interval", "def pulse_width_percent(self) -> float:", "def duration_in_seconds(self):\n \"Should not set track length\"\n return self.duration / float(self.samplerate)", "def bandwidth(self):\n return self.stop_hz - self.start_hz", "def block_period_duration(self):\n return self._safe_value(VAR_BLOCKPERIODDURATION, int)", "def render_constant(duration):\n\n return lambda now: duration", "def get_duration(period, aor, e):\n return 0.25 * period * np.sqrt(1 - e**2) / aor", "def timeScale(self) -> int:\n return int(1 / (1 - self.momentum))", "def interval(self) -> int:\n return pulumi.get(self, \"interval\")", "def get_duration(self):\n\n return self.endtime - self.starttime", "def get_duration(self):\n frame_dur = self.get_frame_duration()\n num_frames = self.get_num_frames()\n motion_dur = frame_dur * (num_frames - 1)\n return motion_dur", "def duration(self):\n # duration is the difference between the midpoints\n value = self.__end.get_midpoint() - self.__begin.get_midpoint()\n\n # vagueness of the duration is based on begin/end radius values\n vagueness = 0\n if self.__begin.get_radius() is not None:\n vagueness += self.__begin.get_radius()\n if self.__end.get_radius() is not None:\n vagueness += self.__end.get_radius()\n\n return sppasDuration(value, vagueness)", "def interval(self):\n return self._interval", "def interval(self):\n return self._interval", "def get_duration(self) -> int:\n return int( (self._frame_count / self._fps) * 1000 )", "def pulse_width(self) -> int:", "def to_length_secs(self):\n return (self.bpm / 60.0) / self.period", "def interval(self) -> int:\n return self._interval", "def length(self):\n\t\treturn datetime.now() - self.toggles[0]", "def _isBasisDuration(self):\n invdur = 1/self.getDurationNoDot()\n if invdur % 1 > 0:\n return False\n else:\n return True", "def delay(self):\r\n return self.relative_phases / (2 * np.pi * self.frequencies)", "def delay(self):\r\n return self.relative_phases / (2 * np.pi * self.frequencies)", "def sample_interval(self):\n\n if self.sample_rate != 0:\n return 1.0 / self.sample_rate\n return 0.0", "def duration(self) -> int:\n return 0", "def model_wave(time, period, width) -> float:\n cur_time = time % period\n half_width = width//2\n if cur_time < half_width:\n return float(cur_time) / half_width\n elif cur_time < width:\n return 1 - float(cur_time - half_width) / half_width\n else:\n return 0", "def block_period_consumption(self):\n return self._safe_value(VAR_BLOCKPERIODCONSUMPTION, float)", "def duration(self):\n if self._exc_end and self._inc_begin:\n return self._exc_end - self._inc_begin\n return 0", "def get_waveform_halfwidth(waveform, sampling_rate=30000.):\n w = resample(waveform,200)#upsample to smooth the data\n time = np.linspace(0,len(waveform)/sampling_rate,200)\n trough = np.where(w==np.min(w))[0][0]\n peak = np.where(w==np.max(w))[0][0]\n \n #dur = time[trough:][np.where(w[trough:]==np.max(w[trough:]))[0][0]] - time[trough]\n if w[peak] > np.abs(w[trough]):\n dur = time[peak:][np.where(w[peak:]>=0.5*np.min(w[peak:]))[0][0]] - time[peak] \n else:\n dur = time[trough:][np.where(w[trough:]<=0.5*np.max(w[trough:]))[0][0]] - time[trough] \n if peak<trough:\n dur=-dur\n return dur", "def mid(self):\n return LibraryFunctions.per(self.nums(), 0.5)", "def half_bit(self):\n return self._half_bit", "def timespan(self):\n center = self._half_temp_res + self._shifts * self._half_temp_res\n return Timespan(start=center - self._half_temp_res, end=center + self._half_temp_res)", "def get_framerate(self):\n return self._framerate", "def time_interval( self ):\n begin = self.begin; end = self.end\n if end - begin < 600*self.hour_switch:\n return 600\n if end - begin < 86400*self.day_switch:\n return 3600\n elif end - begin < 86400*7*self.week_switch:\n return 86400\n else:\n return 86400*7", "def period(self, value: int, /) -> None:", "def timestep(self) -> Optional[float]:\n dt = None\n if len(self.time) > 1 and self.is_equidistant:\n dt = (self.time[1] - self.time[0]).total_seconds() # type: ignore\n return dt", "def get_bend_length(self):\n # The length of a parametric curve x(t) y(t) is Integral[ sqrt( (dx/dt)^2 + (dy/dt)^2 ), {t,0,t0}], which for a Fresnel curve, simplifies to just t0\n if abs(self.turnby) <= np.pi / 2.0:\n return 2 * self.t * self.scale_factor\n else:\n return 2 * self.t * self.scale_factor + (\n 2 * np.pi * self.wgt.bend_radius\n ) * (self.circle_angle / (2 * np.pi))", "def freq_step(self) -> int:\n f = self.frequency\n return int(f.step)", "def get_bend_length(self):\n # The length of a parametric curve x(t) y(t) is Integral[ sqrt( (dx/dt)^2 + (dy/dt)^2 ), {t,0,t0}], which for a Fresnel curve, simplifies to just t0\n return 4 * self.t * self.scale_factor", "def wavelenstep(self):\n return self._wavelenstep", "def get_interval(self):\n return self._period", "def get_duration(self):\n duration_ns = self.stream.InitialTimeToWaitGet()\n duration_ns += self.stream.NumberOfFramesGet() * self.stream.InterFrameGapGet()\n return datetime.timedelta(seconds=duration_ns / 1e9)", "def window_duration(self) -> float:\n return min(self.max_duration, pytimeparse.parse(str(self.slo.config.duration)))", "def delta(self):\n return (self._stages[EStage.CURRENT] - self._stages[EStage.START]) \\\n / (self._stages[EStage.END] - self._stages[EStage.START])", "def duration(self):\n return total_seconds(self.timestamp - self.start_timestamp)", "def interval(self):\n return self._base_interval", "def getDuration(self):\n #return np.sum(self.subintinfo['TSUBINT']) #This is constant.\n return np.sum(self.getSubintinfo('TSUBINT')) #This is constant.", "def duty_cycle(self):\n diff = np.diff(self.lc.time)\n t = np.median(diff)\n std = np.std(diff)\n mask = diff > (t + 3 * std)\n return (1 - np.sum(diff[mask]) / np.sum(diff))", "def _round(self, x):\n return x - x % self.minutes_per_step", "def get_duration(self):\n return self.duration", "def duration(self) -> datetime.timedelta:\n return self._duration", "def _unit_sec(self):\n return self.time_base / 60.0", "def duration(self):\n\n ended = time.time() if self.ended is None else self.ended\n return ended - self.started", "def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)", "def span(self):\n return self.interval.span", "def step(self):\n if self._step is None:\n return self._n_fft // 2\n else:\n return self._step", "def interval(start, end):\n return seconds_since_midnight(end) - seconds_since_midnight(start)", "def duration(self):\n\t\tif self.status():\n\t\t\t# Currently on, return time since session was started\n\t\t\treturn self.length()\n\t\telse:\n\t\t\t# Otherwise return time until last bit of work\n\t\t\t# Check that this isn't an empty session\n\t\t\tif not self.toggles: return timedelta()\n\t\t\treturn self.toggles[-1] - self.toggles[0]" ]
[ "0.72750044", "0.70751405", "0.6361613", "0.6327403", "0.63129026", "0.6184249", "0.61641526", "0.61528426", "0.6123823", "0.6084359", "0.608264", "0.6048802", "0.6041668", "0.603156", "0.60129833", "0.60048217", "0.59908354", "0.5974596", "0.59592074", "0.5935029", "0.5935029", "0.5930361", "0.5929488", "0.58833724", "0.58503926", "0.5845789", "0.58188003", "0.5786204", "0.5786204", "0.57613546", "0.5755652", "0.5746171", "0.57420415", "0.57372344", "0.5716821", "0.57147187", "0.57147187", "0.5710001", "0.567076", "0.56595206", "0.5651754", "0.56454843", "0.56448805", "0.5607913", "0.56056136", "0.5604879", "0.5600097", "0.55978405", "0.5586817", "0.55808264", "0.5575326", "0.5558183", "0.5554388", "0.55521995", "0.55418867", "0.5529558", "0.5529558", "0.55264705", "0.5523201", "0.55189943", "0.55122554", "0.5498723", "0.5487052", "0.5486386", "0.5486386", "0.54801726", "0.5467838", "0.5466787", "0.5463358", "0.54625493", "0.5461147", "0.5451802", "0.5439537", "0.5438733", "0.5437152", "0.54283726", "0.54112595", "0.5408888", "0.54039425", "0.53836876", "0.5383591", "0.53799444", "0.5373502", "0.536776", "0.53534615", "0.53496474", "0.5345196", "0.53422976", "0.53400695", "0.533558", "0.5333661", "0.53322524", "0.5329688", "0.53267413", "0.5320663", "0.53169215", "0.5313308", "0.53069115", "0.530396", "0.53002083" ]
0.8561854
0
Go to a specific time (in seconds).
def go_to(self, time): half_dur = self.half_duration self.set_interval((time - half_dur, time + half_dur))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jump(self, seconds: float) -> None:\n if seconds < 0:\n raise ValueError(\"time can't go backwards\")\n self._virtual_base += seconds", "def set_time(self, sec):\n self.set_timed(round(sec * 10.0))", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)", "def advance_time_seconds(seconds):\r\n advance_time_delta(datetime.timedelta(0, seconds))", "def sleep(seconds):\r\n time.sleep(seconds)", "def sleep(self, seconds):\n time.sleep(seconds)", "def go_then_wait(self, position, seconds):\n self.go(position)\n self.wait(seconds)", "def wait_up_to_second(second, time_template=None):\r\n current_second = datetime.datetime.now().second\r\n target_second = int(second)\r\n\r\n if current_second > target_second:\r\n sleep_time = 60 - (current_second - target_second)\r\n else:\r\n sleep_time = target_second - current_second\r\n\r\n if sleep_time:\r\n print('Waiting {} second(s)'.format(sleep_time))\r\n time.sleep(sleep_time)\r\n\r\n if time_template:\r\n return Utils.get_current_time(time_template)", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def setTimeOut(self, sec):\n if (sec is not None) and (sec > 0):\n to = sec\n else:\n to = None\n self._simulator_.update(timeout=to)\n\n return", "def set_sleep_timer(self, option, time):\n params = [\n ('option', option),\n ('sleeptime', int(time)),\n ]\n\n self.get(COMMAND_UIC, 'SetSleepTimer', params)", "def set_sleep_time(self, time):\n self.sleep_time = time", "def seek(self, time):\n command = 'seek ' + str(time)\n self.run_command(command)", "def pause(seconds):\n time.sleep(seconds);", "def pause(seconds: float) -> None:\n time.sleep(cast(float, seconds))", "def set_timeout(self, seconds):\n self._timeout = seconds", "def timer_change(self):\n if self.time < 999:\n self.time += 1\n self.time_lcd.display(self.time)\n else:\n self.timer.stop()", "def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)", "def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()", "def update_time(self, *args):\n s = int(time.time() - self.start_time)\n self.time_label.text = str(datetime.timedelta(seconds=s))", "def start_timer(self, secs):\r\n self.secs = secs\r\n self.countdownTimer.start(1000)", "def set_time(self, value: float):\n if value < 0:\n value = 0\n\n self.player.seek(value)", "def process_next_second(self):\n self.check_day_advance()\n rd = self.active_row\n if not rd:\n # Paused when we still have the 'after' method active.\n # Now that it is not active so we do nothing.\n return\n secs = int((datetime.now() - self.start_time).total_seconds())\n time = self.seconds_to_hms(secs)\n rd.time = time\n rd.label.config(text=time)\n rd.frame.after(1000, self.process_next_second)", "def sleep(seconds):\n\n # Check seconds to ensure it is a valid type.\n if type(seconds) not in [long, float, int]:\n raise RepyArgumentError(\"Invalid type \" + str(type(seconds)))\n\n # Using getruntime() in lieu of time.time() because we want elapsed time \n # regardless of the oddities of NTP\n start = nonportable.getruntime()\n sleeptime = seconds\n\n # Return no earlier than the finish time\n finish = start + seconds\n\n while sleeptime > 0.0:\n time.sleep(sleeptime)\n\n # If sleeptime > 0.0 then I woke up early...\n sleeptime = finish - nonportable.getruntime()", "def set_timer_time(self, time: int) -> None:\n current_mode = self.get_mode()\n # Defining the time for the Timer program only has an effect\n # when first the Timer program is selected.\n if current_mode != 'Timer':\n self.set_mode('Timer')\n self.logger.info(f\"Switching program from '{current_mode}' to \"\n \"'Timer'.\")\n\n return self.send(self.cmd.SET_TIMER_TIME, time)", "def sleep(min_seconds=1, max_seconds=10):\n time.sleep(randint(min_seconds, max_seconds))", "def _change_time(self):\r\n msg = \"Notice! if you don't write hours the time\\nwill be calculated as seconds.\\nEnter new time:\"\r\n new_time = simpledialog.askstring(title=\"Change recording time\", prompt=msg)\r\n\r\n # new_time has to be a digit bigger than 0\r\n while not new_time:\r\n msg = \"Time must have a value. For example: 1 hours/ 1.5 hours/ 25 seconds\"\r\n messagebox.showerror(title=\"ERROR\", message=msg)\r\n new_time = simpledialog.askstring(title=\"Change recording time\", prompt=\"Enter new time:\")\r\n if new_time:\r\n self.time.set(\"time: \" + new_time + ''.join(' ' for _ in range(42 - len(new_time))))", "def sleep(secs=1.0):\n time.sleep(secs)", "def set_imeastime(self, time):\n self.itime = time", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def set_time(self, time):\n self._time = time", "def wait(self, seconds):\n time.sleep(seconds)", "def time_thread(self):\n while self.time > 0:\n t.sleep(1)\n self.time -= 1\n self.end_round(\"Time is up\")", "def jump_to(self):\n\n jt = dialog.JumpTo(self.timeFormat)\n\n if jt.exec_():\n if self.timeFormat == HHMMSS:\n newTime = int(time2seconds(jt.te.time().toString(HHMMSSZZZ)) * 1000)\n else:\n newTime = int(jt.te.value() * 1000)\n\n if self.playerType == VLC:\n if self.playMode == FFMPEG:\n frameDuration = Decimal(1000 / list(self.fps.values())[0])\n currentFrame = round(newTime / frameDuration)\n self.FFmpegGlobalFrame = currentFrame\n\n if self.second_player():\n currentFrame2 = round(newTime / frameDuration)\n self.FFmpegGlobalFrame2 = currentFrame2\n\n if self.FFmpegGlobalFrame > 0:\n self.FFmpegGlobalFrame -= 1\n if self.second_player() and self.FFmpegGlobalFrame2 > 0:\n self.FFmpegGlobalFrame2 -= 1\n self.ffmpegTimerOut()\n\n else: # play mode VLC\n\n if self.media_list.count() == 1:\n\n if newTime < self.mediaplayer.get_length():\n self.mediaplayer.set_time(newTime)\n if self.simultaneousMedia:\n self.mediaplayer2.set_time(int(self.mediaplayer.get_time() -\n self.pj[OBSERVATIONS][self.observationId]\n [TIME_OFFSET_SECOND_PLAYER] * 1000))\n\n else:\n QMessageBox.warning(self, programName,\n \"The indicated position is behind the end of media ({})\".\n format(seconds2time(self.mediaplayer.get_length() / 1000)))\n\n elif self.media_list.count() > 1:\n\n if newTime < sum(self.duration):\n\n # remember if player paused (go previous will start playing)\n flagPaused = self.mediaListPlayer.get_state() == vlc.State.Paused\n\n tot = 0\n for idx, d in enumerate(self.duration):\n if newTime >= tot and newTime < tot + d:\n self.mediaListPlayer.play_item_at_index(idx)\n\n # wait until media is played\n while True:\n if self.mediaListPlayer.get_state() in [vlc.State.Playing, vlc.State.Ended]:\n break\n\n if flagPaused:\n self.mediaListPlayer.pause()\n\n self.mediaplayer.set_time(newTime -\n sum(self.duration[0: self.media_list.index_of_item(\n self.mediaplayer.get_media())]))\n\n break\n tot += d\n else:\n QMessageBox.warning(self, programName,\n \"The indicated position is behind the total media duration ({})\".format(\n seconds2time(sum(self.duration) / 1000)))\n\n self.timer_out()\n self.timer_spectro_out()\n # self.timer_plot_data_out()", "async def time(self, ctx):\n global time_msg\n if timer > 0:\n if time_msg:\n await time_msg.delete()\n time_msg = None\n minutes = timer // 60\n seconds = timer % 60 if timer % 60 > 9 else '0' + str(timer % 60)\n time_msg = await ctx.send(embed=make_time_embed('work'))\n else:\n # await ctx.send(\"No timer active.\")\n await send_msg(ctx, \"❌\", \"No Timer Active\", color='error')\n await ctx.message.delete()", "def _make_time_pass(self, seconds, timeout, time_mock):\n time_mock.return_value = TIMEOUT_EPOCH\n timeout.start_connect()\n time_mock.return_value = TIMEOUT_EPOCH + seconds\n return timeout", "def run(seconds=1):\n time.sleep(seconds)\n print('Slept for ',seconds,' seconds')", "def set_time(self, time):\n with self.loopback_guard('time'):\n self.widget().setTime(time)", "def set_time(self, value: float):\n if value < 0:\n value = 0\n\n self.controller.row = self.rps * value", "def set_time_of_last_turn(time: int):\n store.time_of_last_turn = time", "def set_time_in_round(time: int):\n store.round_time = time", "def sleep(seconds):\n\n return Sleep(seconds)", "def elapseTime(self, progress, seconds):\n cursor = connection.cursor()\n newtime = progress.time - datetime.timedelta(seconds=seconds)\n cursor.execute(\"update script_scriptprogress set time = '%s' where id = %d\" %\n (newtime.strftime('%Y-%m-%d %H:%M:%S.%f'), progress.pk))\n try:\n session = ScriptSession.objects.get(connection=progress.connection, end_time=None)\n session.start_time = session.start_time - datetime.timedelta(seconds=seconds)\n session.save()\n except ScriptSession.DoesNotExist:\n pass", "def setTime(self,time):\n self.time = time", "def sleep(sleep_time=0.250):\n time.sleep(sleep_time)", "def increment(time, seconds):\n assert valid_time(time)\n seconds += time.time_to_int()\n return int_to_time(seconds)", "def pause(*args, seconds: int=0, **kwargs)->None:\n pass", "def passTime(self, time: int) -> None:\n if self.delayed == True:\n self.delayed = None\n return\n\n if self.enabled == True:\n self.time -= time", "def add_5_seconds(time=datetime.datetime.now()):\n print(time.time())\n print((time + datetime.timedelta(0, 5)).time())", "def time_automation_listener(now):\n action()", "def sleep_until(self, time):\n raise NotImplementedError()", "def set_step_time(self, us):\n if us < 20: # 20 us is the shortest possible for esp8266\n self.step_time = 20\n else:\n self.step_time = us", "def clock_helper(total_seconds):\n seconds_in_minute = total_seconds % 60", "def advance_time(self, set_to=None, increment_by=None):\n self._time_condition.acquire()\n if set_to is not None:\n self._time = set_to\n else:\n self._time += increment_by\n self._time_condition.notifyAll()\n self._time_condition.release()", "def __timeout(self, seconds, func, *args):\n t = threading.Timer(seconds, func, *args)\n self._timer = t\n t.start()", "def timeout(self, seconds: Optional[float]):\n if (seconds is not None) and (seconds < 0):\n raise ValueError(\"negative\")\n\n self._timeout = seconds", "def time(self, time: float) -> None:\n self._time = time", "def set_timed(self, dsec):\n assert 1 <= dsec <= 9999\n self.send(\"!T%04u\" % dsec)\n assert dsec == self.get_timed()", "def change_time(self, new_time):\r\n self.when = new_time", "def increment(time, seconds):\n assert valid_time(time)\n seconds += time_to_int(time)\n return int_to_time(seconds)", "def timer(self, seconds=80):\n try:\n if self.to_stop or seconds <= 0: # if the time is up or everyone already guessed.\n if not self.to_stop:\n self.server_socket.send('end;'.encode())\n self.to_stop = True\n if self.game_number == 3:\n ending_label = Label(self.root2,\n text=\"thank you for playing!\\nplease register again to\\nplay another game\",\n font=('bubble', 15), bg='white')\n ending_label.place(x=200, y=250)\n ending_label.after(5000, self.root2.destroy)\n else:\n next_round_label = Label(self.root2, text=\"next round starts in a bit\", font=('bubble', 15))\n next_round_label.pack(padx=50, pady=20, side=TOP)\n self.root2.destroy()\n else:\n timer_label = Label(self.root2, text=str(seconds), font=('bubble', 15), bg='white', width=5)\n timer_label.place(x=235, y=40)\n self.root2.after(1000, lambda: self.timer(seconds - 1))\n\n except:\n self.timer(0)\n # self.clear_screen()\n # next_round_label = Label(self.root2, text=\"next round starts in a bit\", font=('bubble', 15))\n # next_round_label.pack(padx=50, pady=20, side=TOP)\n # self.to_stop = True\n # self.root2.after(5000, self.restart())", "def wake_till(seconds):\n while True:\n if int(time.time()) < seconds:\n time.sleep(5)\n else:\n return", "async def paydaytime(self, ctx: commands.Context, seconds: int):\r\n guild = ctx.guild\r\n if await bank.is_global():\r\n await self.config.PAYDAY_TIME.set(seconds)\r\n else:\r\n await self.config.guild(guild).PAYDAY_TIME.set(seconds)\r\n await ctx.send(\r\n _(\"Value modified. At least {num} seconds must pass between each payday.\").format(\r\n num=seconds\r\n )\r\n )", "async def slottime(self, ctx: commands.Context, seconds: int):\r\n guild = ctx.guild\r\n if await bank.is_global():\r\n await self.config.SLOT_TIME.set(seconds)\r\n else:\r\n await self.config.guild(guild).SLOT_TIME.set(seconds)\r\n await ctx.send(_(\"Cooldown is now {num} seconds.\").format(num=seconds))", "def execute(self, time):\n self._exec_time -= time", "def timer(self):\n self.time_remaining -= 1\n if self.time_remaining > 0:\n Timer(1, self.timer).start()", "async def _wait_setheist(self, ctx, seconds: int):\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n theme = await self.thief.config.guild(guild).Theme()\r\n t_crew = theme[\"Crew\"]\r\n\r\n if seconds > 0:\r\n config[\"Wait\"] = seconds\r\n await self.thief.config.guild(guild).Config.set(config)\r\n time_fmt = self.thief.time_format(seconds)\r\n msg = \"Setting {} gather time to {}.\".format(t_crew, time_fmt)\r\n else:\r\n msg = \"Need a number higher than 0.\"\r\n await ctx.send(msg)", "def set_current_time(self, ttime):\n if not isinstance(ttime, Time):\n raise TypeError\n try:\n localtime = ttime.local_repr().split()\n timeSetCmd = 'date -s ' + localtime[3]\n #XXX: here seems a dirty quick way (os.system).\n os.system(timeSetCmd)\n yield WaitDBus(self.rtc.SetCurrentTime, int(ttime.value) )\n except Exception, ex:\n logger.exception(\"Exception : %s\", ex)\n raise", "def pause_game_timer(self):\n self._pause_start_time = datetime.datetime.now()", "def set_time(self, value: float):\n super().set_time(value)\n self.music.set_time(value)", "def sleep_sim_time(world, seconds, state_break=[False]):\n start = world.last_time if world.last_time else Time()\n remain = seconds\n\n while remain > 0 and not state_break[0]:\n yield From(trollius.sleep(0.1))\n now = world.last_time if world.last_time else Time()\n remain = seconds - float(now - start)", "def wait_for_seconds(self, seconds, sleeptime=0.001):\n self.listen_until_return(timeout=seconds, sleeptime=sleeptime)", "def settimeout(self, to):\r\n self._timeout = to", "def sleep_approx(self, seconds):\n upperbound = (seconds+0.2)*10000\n if (seconds >= 1):\n lowerbound = (seconds-0.2)*10000\n else:\n lowerbound = seconds*10000\n\n sleeptime = random.randint(lowerbound, upperbound)\n sleeptime = sleeptime/10000\n sleeptime = sleeptime*.8\n\n if (self.botspeed == 1.25):\n sleeptime = sleeptime*.75\n elif (self.botspeed == 1.5):\n sleeptime = sleeptime*.5\n sleep(sleeptime)", "async def time(self, ctx):\r\n time = market_time()\r\n await ctx.send(f'It is currently {time.time().strftime(\"%H:%M:%S\")} EDT for the market.')", "def time(self, start_time):\n \n TIME_LIST.append((time.time() - start_time))\n print(\"--- %s seconds ---\" % (time.time() - start_time))", "def timer(start_time=None):\r\n if not start_time:\r\n start_time = datetime.now()\r\n return start_time\r\n elif start_time:\r\n thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)\r\n tmin, tsec = divmod(temp_sec, 60)\r\n print('Time taken: %i hours %i minutes and %s seconds.' % (thour, tmin, round(tsec, 2)))", "def seek(self, time: int):\n self._select_interface(self._rc_seek, self._http_seek, time)", "def advance(self, dt):\n self.workTill(self.currentTime + dt)", "def time_of_day(self, value):\n self.time_of_day_value = value", "def time(self, time):\n # type: (int) -> None\n\n if time is not None:\n if not isinstance(time, int):\n raise TypeError(\"Invalid type for `time`, type has to be `int`\")\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def set_search_time(self, play_time):\n self.get(COMMAND_UIC, 'SetSearchTime', [('playtime', int(play_time))])", "def setTimepoint(self, tp):\n\t\tpass", "def clock( current_time ):\n global D\n number_of_seconds_since_start = int(current_time - D.start_time)\n if D.last_time_printed < number_of_seconds_since_start:\n print \"[Brains] [State:\", D.STATE, \"] time is\", \\\n number_of_seconds_since_start, \"seconds since starting...\"\n D.last_time_printed = number_of_seconds_since_start", "def set_time(self, value: float):\n raise NotImplementedError()", "def sleep(self, amount: float):\n time.sleep(amount)", "def time(self):\r\n time = datetime.datetime.now().strftime(\"%I:%M:%S\")\r\n self.speak(\"the current time is\")\r\n self.speak(time)", "def timertick(self):\r\n if self.secs > 120:\r\n self.countdownString.setText(\"%d min.\" % (self.secs / 60 + 1)) # e.g., 5 min\r\n else:\r\n self.countdownString.setText(\"%02d:%02d\" % (self.secs / 60, self.secs % 60)) # e.g., 01:36\r\n\r\n # Flash the screen when there is 1 minute and when there is 30 seconds left\r\n if self.secs == 60 or self.secs == 30:\r\n self.start_flash_timer()\r\n\r\n # In the last 10 seconds, display countdown in red\r\n if self.secs <= 10:\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : white; color : red; }\")\r\n\r\n self.secs -= 1\r\n if self.secs < 0:\r\n self.stop_timer()\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : white; color : black; }\")", "def sleep(seconds: typing.Union[float, int]):\n if seconds == 0:\n yield\n elif seconds == inf:\n yield from sleepinf()\n else:\n end = monotonic() + seconds\n while end >= monotonic():\n yield", "def seconds(self, seconds):\n\n self._seconds = seconds", "def play_seconds(self,segundos):\n suma_duracion = 0\n posicion_actual = self.tiempos.posicion_actual()\n while True:\n try:\n if not self.tiempos.esta_vacia():\n self._reproducir(self.tiempos.actual())\n suma_duracion += self.tiempos.actual().obtener_duracion()\n if suma_duracion >= segundos:\n break\n self.tiempos.siguiente()\n except StopIteration:\n break\n self.tiempos.volver_al_inicio()\n self.tiempos.actualizar(posicion_actual)", "async def countdown(self, ctx, seconds: int):\r\n\r\n if seconds > 20:\r\n await ctx.send('Error: must be 20 seconds or lower')\r\n return\r\n\r\n if self.chk is True:\r\n await ctx.send('Timer is already running')\r\n return\r\n else:\r\n self.chk = True\r\n\r\n a = await ctx.send('```' +'Server online in ' + str(seconds) + ' seconds' + '```')\r\n\r\n while seconds > 0:\r\n time.sleep(1)\r\n seconds -= 1\r\n print(seconds)\r\n await a.edit(content='```' +'Server online in ' + str(seconds) + ' seconds' + '```')\r\n await a.edit(content='```diff' + u\"\\u000A\" + '+ Server is online' + u\"\\u000A\" + '```')\r\n time.sleep(5)\r\n await a.delete()\r\n self.chk = False", "def GAME_TIME_ADVANCE(dt):" ]
[ "0.6700288", "0.66491824", "0.6632575", "0.6577623", "0.65731984", "0.629269", "0.62756586", "0.62237495", "0.6199625", "0.6169536", "0.6169536", "0.6143821", "0.6137382", "0.6088691", "0.60664135", "0.602092", "0.6020566", "0.60139376", "0.6013062", "0.593964", "0.5881879", "0.58608747", "0.58516955", "0.5849226", "0.5840093", "0.58319664", "0.58282197", "0.5811083", "0.5795957", "0.5794308", "0.5784854", "0.5784347", "0.57687265", "0.5766624", "0.57661366", "0.5761149", "0.57551837", "0.574427", "0.57223284", "0.5718997", "0.5718177", "0.57143056", "0.5699782", "0.5684906", "0.5684833", "0.5673695", "0.5664138", "0.5653953", "0.5652787", "0.5645679", "0.5639457", "0.5632435", "0.5622845", "0.5600034", "0.5569554", "0.55659777", "0.5552948", "0.5548454", "0.5546823", "0.5530638", "0.5529701", "0.5524921", "0.5521198", "0.5504834", "0.55037546", "0.55001605", "0.550011", "0.5489174", "0.54821223", "0.5478976", "0.54773176", "0.5476522", "0.5469664", "0.5459119", "0.5457307", "0.5456021", "0.5440072", "0.54394376", "0.54377025", "0.5437549", "0.5434697", "0.5432613", "0.54320294", "0.5421499", "0.5421499", "0.5421499", "0.5421499", "0.5421499", "0.5417522", "0.5405239", "0.5403401", "0.5401109", "0.539493", "0.5393217", "0.53925055", "0.53850347", "0.5379596", "0.53786516", "0.53750074", "0.53677535" ]
0.6876498
0
Shift the interval by a given delay (in seconds).
def shift(self, delay): self.go_to(self.time + delay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift(self, delay):\n self.__begin.shift(delay)\n self.__end.shift(delay)", "def delay(interval):\n time.sleep(interval / 1000.0)", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def delay():\r\n time.sleep(2)", "async def sleep(cls, delay: float) -> None:", "def _delay(self):\n if not self.next_scheduled:\n self.next_scheduled = self.clock_func() + self.interval\n return\n while True:\n current = self.clock_func()\n if current >= self.next_scheduled:\n extratime = current - self.next_scheduled\n self.next_scheduled = current + self.interval - extratime\n return\n delay_amt = self.next_scheduled - current\n #Call for 0, because that might be meaningful to sleep_func.\n if self.allow_negative_sleep or delay_amt >= 0: \n self.sleep_func(self.next_scheduled - current)", "def delay(ms: int, /) -> None:", "def sleep(interval):\n time.sleep(interval) # pragma: no cover", "def set_delay(delay):\r\n inst.write(\"PULS:DEL %f\" %(delay))", "def delay(self, delay=None):\n if delay is None:\n return self._delayvalue\n self._delayvalue = int(delay)", "def delay(self, dt, keep_length=True):\n x = delay(self.fs, self.in_time, dt, keep_length=keep_length)\n return self.from_time(self.fs, x)", "def delay(self, distance, seconds):\n delay = distance/seconds\n return delay", "def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))", "def delay(dt):\n return dt.total_seconds()", "def __delay(msecs):\n time.sleep(msecs / 1000)", "def set_delay_ns(delay):\r\n inst.write(\"PULS:DEL %f NS\" %(delay))", "def _delay(self, n=None):", "def _delay(self, delay=None):\n return self.screen.delay(delay)", "def delay(self, seconds):\n\n if self.call is None:\n return\n self.call.delay(seconds)", "def _stop_after(delay):\n timer = CFRunLoopTimerCreate(\n None, # allocator\n CFAbsoluteTimeGetCurrent() + delay, # fireDate\n 0, # interval\n 0, # flags\n 0, # order\n _c_stop_callback,\n None,\n )\n CFRunLoopAddTimer(\n CFRunLoopGetMain(),\n timer,\n kCFRunLoopCommonModes,\n )", "def fake_delay(self, ha_delay):\n hass_now = dt_util.utcnow()\n shifted_time = hass_now + timedelta(seconds=ha_delay)\n self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: shifted_time})", "def setDelay(self, channel, delay, unitCode=0):\n resp = self.XAPCommand('DELAY', channel, delay, unitCode=unitCode)\n return float(resp)", "def delay(self, interval=None):\n if self._start:\n raise OperationFailError(\"Task is already running.\")\n\n if interval is None:\n self._delay = None\n else:\n if isinstance(interval, timedelta):\n self._start_at = None # Use delay instead of start time.\n self._delay = interval\n elif isinstance(interval, int):\n self._start_at = None # Use delay instead of start time.\n self._delay = timedelta(seconds=interval)\n else:\n time_pattern = r'^([0-1]?\\d|[2][0-3]):[0-5]?\\d:[0-5]?\\d$'\n if re.match(time_pattern, interval):\n self._start_at = None # Use delay instead of start time.\n tsp = interval.split(\":\")\n self._delay = timedelta(hours=int(tsp[0]),\n minutes=int(tsp[1]),\n seconds=int(tsp[2]))\n else:\n raise TimeFormatError\n\n return self", "def run_after_delay(delay_ms: float, callback: Callable[[], None]):\n heapq.heappush(\n _sorted_scheduled_events,\n _ScheduledEvent(\n time=pygame.time.get_ticks() + delay_ms, callback=callback\n ),\n )", "def _delay(self, delay):\n self.cv.after(delay)", "def moving_delay(self, duration):\n start_time = monotonic()\n while (monotonic() - start_time)*1e3 < duration:\n if self.check_movement() == False:\n if self.move_state != MOV_ROTATE: # rotate is only valid movement\n print(\"Stopping in moving_delay()\")\n self.move_brake()", "def id_sleep(x, delay=0):\n sleep(delay)\n return x", "def sleep(seconds):\n\n return Sleep(seconds)", "def delay(dt):\n return dt.days * 86400 + dt.seconds + 1e-6 * dt.microseconds", "def udelay(us: int, /) -> None:", "def sleep(seconds):\r\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def set_reg_to_delay_timer(self):\n register = (self.opcode & 0xFFF) >> 8\n self.registers[register] = self.delay_timer\n logger.info(\"Set register V{} to delay timer {}\".format(\n register,\n self.registers[register]))", "def sleep_after(self, seconds):\n if self._firmware >= 264:\n self.write(self.ASCII_ESC, '8', seconds, seconds >> 8)\n else:\n self.write(self.ASCII_ESC, '8', seconds)", "def randomized_sleep(duration):\n sleep(duration + duration * random.random())", "def setdelay(self):\n delay=self.inputdelay.getEntry()\n cmd=\"setDelay(\"+self.board+','+self.inpedge+','+delay+')'\n self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")", "def setTimeDelay(*args):\n args[0].TimeState.TimeDelay.time_delay = args[1]", "def sleep(cls, delay, session):\n print(\"Start sleep for [\", delay, \"]s.[\", session['ip_addr'], \"]\")\n cls.log(1, \"Start sleep for [\", delay,\n \"]s.[\", session['ip_addr'], \"]\")\n time.sleep(delay)\n print(\"End sleep of [\", delay, \"]s.[\", session['ip_addr'], \"]\")\n cls.log(1, \"End sleep of [\",\n delay, \"]s.[\", session['ip_addr'], \"]\")", "def delay(delay=0.):\n def wrap(f):\n @wraps(f)\n def delayed(*args, **kwargs):\n timer = threading.Timer(delay, f, args=args, kwargs=kwargs)\n timer.start()\n return delayed\n return wrap", "def sleep_for(self, duration):\n raise NotImplementedError()", "def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)", "def delay(self, length):\n self.log_info(f\"Browser.delay: Sleeping for {length} seconds\")\n return sleep(length)", "def restart(self, delay=None):\n if self._timer:\n self._timer.cancel()\n if not delay:\n delay = self.delay\n self._timer = Timer(delay, self.callback)\n self._timer.daemon = True\n self._timer.start()", "def sleep(self, seconds):\n time.sleep(seconds)", "def schedule(self, sleep_time, delay):\n self.sleep_time = sleep_time\n self.delay = delay\n self.thread = Thread(target=self.run)\n self.thread.start()", "def click_periodically(self, pos=(0, 0), interval=1, duration=1):\n time_passed = 0\n while time_passed < duration/interval:\n self.click(pos=pos)\n sleep(interval)\n time_passed += interval", "def setDelay(self, *args):\n return _libsbml.Event_setDelay(self, *args)", "def adjustableDelay(delay, ants) :\n antlist = helpers.makeList(ants)\n s.adjustableDelay(delay, antlist)", "def sleep(self, duration):\n active_item = self.stack.pop()\n self.sleeping.sleep(active_item, duration)", "def delayed(data, delay, K=0):\n ts = data.index\n\n if K and data.iloc[0]:\n ts_, data = extend_data(data, ts, delay, max(K, 0.0))\n else:\n ts_ = ts + delay\n\n return pd.Series(np.interp(ts, ts_, data), index=ts)", "def move(self, dt):\n dt = dt", "def circdelay(self, dt):\n x = self.in_time\n n = int(round(dt * self.fs))\n shifted = np.roll(x, n, axis=-1)\n\n return self.from_time(self.fs, shifted)", "def delay(self, delayTime=0.):\n bytes = int(round(delayTime * self.f_s))\n self.data_source.seek(bytes)", "def delay_align(x, y, delay):\n if delay >= 0:\n x = x[delay:]\n y = y[:-delay or None]\n else:\n x = x[:delay]\n y = y[abs(delay):]\n return x, y", "def RandomDelay():\r\n sleep(random())", "def sleep(sleep_time=0.250):\n time.sleep(sleep_time)", "def TimeDelay (self, delay, cancel = None):\n if self.Disposed:\n return RaisedFuture (FutureCanceled ('Core is stopped'))\n\n return self.timer.Await (time () + delay, cancel)", "def _timeout(delay):\n loop = asyncio.get_running_loop()\n return _Timeout(loop.time() + delay if delay is not None else None)", "def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)", "def sleep(self, *args, seconds):\n return deferLater(reactor, seconds, lambda: None)", "def make_timeout(delay_seconds):\n if delay_seconds is None:\n return None\n return time.time() + delay_seconds", "def set_delay_timer_to_reg(self):\n register = (self.opcode & 0xFFF) >> 8\n self.delay_timer = self.registers[register]\n\n logger.info(\"Set delay timer to register V{} = {}\".format(\n register,\n self.registers[register]))", "def delay(self):\n _delay = self.config.get('delay', {\"hours\":0, \"minutes\":10, \"seconds\":0})\n return datetime.timedelta(\n hours=_delay.get('hours', 0), minutes=_delay.get('minutes', 0),\n seconds=_delay.get('seconds', 0))", "def delay(delay, ant, subarray=DEFAULT) :\n if ant < 1 or ant > 23:\n raise Exception, \"Antenna number(%d) must be in range [1-23]\" %ant\n mp = \"Control.Antenna%d.delayOffset3mmRx\" %ant\n try :\n oldDelay = queryDouble(mp, retries=0)\n if False: print \"Previous value of delay for antenna %d was %7.3f nsec\" \\\n %(ant,oldDelay)\n except: pass \n multiSubarray('delay', subarray, delay, ant)", "def test_delay():\n time1 = time.time()\n res = delay(1)(_dummy_func)(2)\n time2 = time.time()\n assert res == (2, 4)\n assert time2 - time1 >= 1", "def call_later(self, delay, callback):\n reactor.callLater(delay, callback)", "def _pause(self, delay):\n start = time.time()\n end = start + delay\n while time.time() < end:\n yield", "def _pause(self, delay):\n start = time.time()\n end = start + delay\n while time.time() < end:\n yield", "def sleep(self, seconds):\n\n # We schedule an alarm signal for x=seconds out in the future.\n # noinspection PyUnusedLocal\n def handle_alarm(signal_num, frame):\n pass\n\n signal.signal(signal.SIGALRM, handle_alarm)\n signal.alarm(seconds)\n\n # Wait for either the alarm to go off or for us to receive a SIGINT.\n signal.pause()\n\n # Remove the alarm if it is still pending.\n signal.alarm(0)", "def delayExec(self, delay_ms):\n if not 0 < delay_ms < 30000:\n raise(ValueError('`delay` [{0}] must be between 0 and 40000 ms'\n ''.format(delay_ms)))\n cmd_string = 'M{0}'.format(delay_ms)\n self.cmd_chain += cmd_string", "def Time_Delay():\n Delay=[1]\n return Delay", "async def twisted_sleep(delay, twisted_reactor):\n deferred: Deferred[None] = Deferred()\n twisted_reactor.callLater(delay, deferred.callback, None)\n await deferred", "def sleep(self, item, duration):\n if not isinstance(duration, datetime.timedelta):\n raise TypeError(\n \"timestamp must be a timedelta object (given '{}')\"\n .format(str(type(duration))))\n\n wake_at = datetime.datetime.now() + duration\n self.wake_at(item, wake_at)", "def winner_delay(self, winner_delay):\n\n self._winner_delay = winner_delay", "def call_later(self, delay, callback):\n reactor.callFromThread(reactor.callLater, delay, callback)", "def sleep(secs=1.0):\n time.sleep(secs)", "async def module_delay_event(self, delay: Union[int, float], event: str, ctx: Context, *args, **kwargs):\n self.logger.debug(f\"Delaying event {event} for {delay} seconds\")\n await asyncio.sleep(delay)\n await self.module_send_event(event, ctx, *args, **kwargs)", "def sleep(self, amount: float):\n time.sleep(amount)", "def update():\n\twith locked_level(create=False) as level:\n\t\tif level is None:\n\t\t\t# There is no timer\n\t\t\tlogging.info('There is currently no timer')\n\t\t\treturn\n\t\tdelay = pickle.load(level)\n\t\tif delay.source_position <= DIM_FLOOR:\n\t\t\tos.remove(LEVEL_FILE)\n\t\t\treturn\n\t\tnow = datetime.datetime.utcnow()\n\t\tremaining = delay.when - now\n\t\tif remaining >= datetime.timedelta(minutes=1):\n\t\t\tlogging.info('Aborting because the timer still has: %s', remaining)\n\t\t\treturn\n\t\tif remaining.total_seconds() > 0:\n\t\t\tlogging.info('Sleeping because the timer still has: %s', remaining)\n\t\t\tfcntl.lockf(level, fcntl.LOCK_UN)\n\t\t\ttime.sleep(remaining.total_seconds())\n\t\t\tfcntl.lockf(level, fcntl.LOCK_EX)\n\n\t\tif delay.expected_positions:\n\t\t\t# There shouldn't be any expected positions left, so something has interrupted the dimmer\n\t\t\tlogging.info('Expected positions were not consumed, so reverting from %d to %d',\n\t\t\t\tdelay.target_position, delay.source_position)\n\t\t\tposition = delay.source_position\n\n\t\tposition_increment = min(\n\t\t\tdelay.target_position - DIM_FLOOR,\n\t\t\tmax(\n\t\t\t\t1,\n\t\t\t\tint((100 - DIM_FLOOR) * DIM_DURATION_MAX_INCREMENT.total_seconds() / DIM_DURATION_TOTAL.total_seconds())\n\t\t\t)\n\t\t)\n\t\tif position_increment <= 0:\n\t\t\treturn\n\t\tposition = delay.target_position - position_increment\n\t\t# This will be near DIM_DURATION_MAX_INCREMENT but accounts for rounding\n\t\tramp_time = datetime.timedelta(seconds=int(position_increment / (100 - DIM_FLOOR) * DIM_DURATION_TOTAL.total_seconds()))\n\n\t\tif delay.target_position > DIM_FLOOR:\n\t\t\t# The switch reports the old and then the new position when it dims\n\t\t\tnext_delay = delay_record.Delay(now + ramp_time, delay.target_position, position, [delay.target_position, position])\n\t\t\texpect(level, next_delay)\n\t\telse:\n\t\t\tos.remove(LEVEL_FILE)\n\n\tlogging.info('Dimming to %d over %s', position, ramp_time)\n\twith ozwd_util.get_thrift_client() as thrift_client, (\n\t\t\tozwd_util.get_stompy_client()) as stompy_client:\n\t\tozwd_set_value.set_value_connected(DIMMER_RAMP_TIME_VALUE.value, ramp_time.total_seconds(), thrift_client)\n\t\ttry:\n\t\t\tozwd_set_value.set_value_connected(DIMMER_VALUE.value, position, thrift_client)\n\t\tfinally:\n\t\t\tozwd_set_value.set_value_connected(DIMMER_RAMP_TIME_VALUE.value, 2, thrift_client)", "def delay(self, interval=None):\n for task in self._tasks:\n task.delay(interval)\n\n return self", "def pause(seconds):\n time.sleep(seconds);", "def delayToNextPacket(self):\n delay = -(1.0 / (self.mPacketsPerSecond)) * np.log(1 - np.random.uniform())\n # exponential distribution in seconds\n return round(delay * Constants.TICKS_PER_SECOND)\n #return (Math.round(delay * Main.TICKS_PER_SECOND))", "def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'", "def sleep(self, seconds):\n ten_ms_steps = int(round(seconds * 100))\n for _i in xrange(0,ten_ms_steps):\n if self._sequence_stop_signal:\n break\n sleep(0.01)", "def _DelayedStop(self, delay):\n blue.synchro.SleepSim(delay)\n if self.playerEffect is not None:\n self.RemoveFromScene(self.playerEffect)\n self.playerEffect = None\n if self.gfx is not None:\n ShipEffect.Stop(self)", "def _sleep(self):\n while 1:\n diff = (time.time()-self.lastcall) - self.mindelay\n if diff >= 0: return\n time.sleep(max(-diff/2.0, 0.01))", "def delay_s(\r\n self,\r\n callable,\r\n timeout = None,\r\n immediately = True,\r\n verify = False,\r\n wakeup = True\r\n ):\r\n\r\n # creates the next element tuple that is going to be scheduled according\r\n # to the definition provided to the method\r\n next = (callable, timeout, immediately, verify)\r\n\r\n # acquires the lock that controls the access to the delayed for next\r\n # tick list and then adds the callable to such list, please note that\r\n # the delayed (next) list is only going to be joined/merged with delay\r\n # operations and list on the next tick (through the merge operation)\r\n self._delayed_l.acquire()\r\n try: self._delayed_n.append(next)\r\n finally: self._delayed_l.release()\r\n\r\n # in case the wakeup flag is set this delay operation should have\r\n # been called from a different thread and the event loop should\r\n # awaken as soon as possible to handle the event\r\n if wakeup: self.wakeup()", "async def tormentdelay(self, ctx, delay : int = None):\r\n\t\t\r\n\t\tchannel = ctx.message.channel\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\r\n\t\t# Only allow owner to change server stats\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\treturn\r\n\t\telif isOwner == False:\r\n\t\t\treturn\r\n\t\t\r\n\t\tif delay == None:\r\n\t\t\tif self.waitBetween == 1:\r\n\t\t\t\tawait ctx.message.author.send('Current torment delay is *1 second.*')\r\n\t\t\telse:\r\n\t\t\t\tawait ctx.message.author.send('Current torment delay is *{} seconds.*'.format(self.waitBetween))\r\n\t\t\treturn\r\n\t\t\r\n\t\ttry:\r\n\t\t\tdelay = int(delay)\r\n\t\texcept Exception:\r\n\t\t\tawait ctx.message.author.send('Delay must be an int.')\r\n\t\t\treturn\r\n\t\t\r\n\t\tif delay < 1:\r\n\t\t\tawait ctx.message.author.send('Delay must be at least *1 second*.')\r\n\t\t\treturn\r\n\t\t\r\n\t\tself.waitBetween = delay\r\n\t\tif self.waitBetween == 1:\r\n\t\t\tawait ctx.message.author.send('Current torment delay is now *1 second.*')\r\n\t\telse:\r\n\t\t\tawait ctx.message.author.send('Current torment delay is now *{} seconds.*'.format(self.waitBetween))", "def delay_requests(self, seconds: float):\n delta_since_last_send = time.time() - self._last_send\n self._last_send = (time.time() - delta_since_last_send) + seconds", "def goRight(self, seconds):\n self.change_x = 5", "async def twisted_sleep(delay: float, twisted_reactor: \"SygnalReactor\") -> None:\n deferred: Deferred[None] = Deferred()\n twisted_reactor.callLater(delay, deferred.callback, None)\n await deferred", "def delay(fs, x, dt, keep_length=True, axis=-1):\n dn = int(round(dt * fs))\n x = np.asarray(x)\n n = x.shape[axis]\n\n if dn > 0:\n # delay\n zeros_shape = list(x.shape)\n zeros_shape[axis] = dn\n zeros = np.zeros(zeros_shape)\n\n delayed = np.concatenate((zeros, x), axis=axis)\n\n if keep_length:\n # slice that takes 0 to ntaps samples along axis\n slc = [slice(None)] * len(x.shape)\n slc[axis] = slice(0, n)\n delayed = delayed[tuple(slc)]\n\n elif dn < 0:\n # pre-delay\n slc = [slice(None)] * len(x.shape)\n slc[axis] = slice(-dn, n)\n delayed = x[tuple(slc)]\n\n if keep_length:\n zeros_shape = list(x.shape)\n zeros_shape[axis] = -dn\n zeros = np.zeros(zeros_shape)\n delayed = np.concatenate((delayed, zeros), axis=axis)\n else:\n # no delay\n delayed = x\n\n return delayed", "def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)", "def _call_later(self, delay, callback):\n self.io_loop.call_later(delay, callback)", "def sample_delay(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_sample_delay(self, *args, **kwargs)", "def wait(delay=2):\n time.sleep(delay)", "def advance_time_seconds(seconds):\r\n advance_time_delta(datetime.timedelta(0, seconds))", "def pause(seconds: float) -> None:\n time.sleep(cast(float, seconds))", "def delay_func(delay, func, arg=None):\n trig = Trig()\n trig_func = TrigFunc(trig, func, arg=arg)\n trig.play(delay=delay)\n return trig, trig_func" ]
[ "0.7768829", "0.7139381", "0.6876155", "0.6494857", "0.649145", "0.64053226", "0.6397227", "0.6366633", "0.63602847", "0.6356303", "0.6325486", "0.6317313", "0.6315866", "0.62965715", "0.62463284", "0.6218299", "0.61278665", "0.6077595", "0.60770804", "0.60277456", "0.60083115", "0.599665", "0.5984027", "0.5964296", "0.5938278", "0.5935404", "0.5934932", "0.59299296", "0.5928673", "0.5907131", "0.5890198", "0.5871083", "0.5871083", "0.5870603", "0.58590853", "0.5834942", "0.5828275", "0.58256567", "0.58062094", "0.57974076", "0.5773798", "0.5773056", "0.5738618", "0.5734486", "0.5728689", "0.5717382", "0.57113034", "0.5703898", "0.56921184", "0.56746197", "0.5664808", "0.5657196", "0.564802", "0.56392443", "0.56248343", "0.5612734", "0.56053954", "0.55900747", "0.55858105", "0.5560623", "0.5560623", "0.5544584", "0.5509011", "0.54944533", "0.54879135", "0.5486302", "0.5483124", "0.54830974", "0.54830974", "0.5475939", "0.54732347", "0.54621106", "0.5438914", "0.54379034", "0.54377604", "0.5437015", "0.5431211", "0.5408105", "0.5397624", "0.53946865", "0.5391568", "0.53871316", "0.53747463", "0.5372841", "0.53688294", "0.5365051", "0.5363692", "0.5353988", "0.5350962", "0.53504026", "0.533435", "0.5333725", "0.53315425", "0.53103167", "0.53103083", "0.5288324", "0.5269724", "0.526843", "0.5267718", "0.5251764" ]
0.82353926
0
Go to the start of the recording.
def go_to_start(self): self.go_to(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recording_start(self):\n self._post('recording/start')", "def start_recording(self):\n\n\t\tself.eyetribe.start_recording()\n\t\tself.recording = True", "def start(self):\n self.recording = True", "def start(self):\n self.events[0].record()\n self.cur = 1", "def start_recording(self):\n self.start_recording_and_saving_data(self.eeg_file_path)", "def start_recording(self):\n self.flag_event.set()\n self.statusBar().showMessage('Starting the Recording')\n startThread = threading.Thread(name='record', target=self.record)\n startThread.start()\n self.statusBar().showMessage('Recording')", "def start_single_record(self):\r\n self.autoRecordWidget.set_recording(True)\r\n self.autoRecordWidget.set_display_message()\r\n self.autoRecordWidget.start_timer(self.timeUntilEnd)\r\n if self.controller.record_talk_id(self.singleID):\r\n log.debug(\"Auto-recording for the current talk started.\")\r\n self.recorded = True\r\n self.beforeEndTimer.setInterval((self.timeUntilEnd + 1) * 1000)\r\n self.beforeEndTimer.setSingleShot(True)\r\n self.beforeEndTimer.start()", "def __macroStartRecording(self):\n self.activeWindow().macroRecordingStart()", "def startRecording(self):\n self.currentRecordingPath = osp.join(self._path, \"rec{}\".format(datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")))\n makedirs(self.currentRecordingPath)\n self._initFileHandler()\n self._recordInitialMdibState()\n self._bindToObservables()\n self._logger.info(\"Recording started into directory {}\".format(self.currentRecordingPath))", "def start_recording(self) -> None:\n # Clear the internal ring buffer.\n self._buffer.fill(0)\n\n # Start recording using sounddevice's InputStream.\n self._stream.start()", "def goto_start(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.LEFT))", "def start_recording(self):\n\n self.stop_recording()\n self.recorder_thread = RecorderThread()\n self.recorder_thread.start()", "def capture_start(self):\n pass", "def start(self, start):\n\n self._start = start", "def start(self, start):\n\n self._start = start", "def start(self, start):\n\n self._start = start", "def start(self, start):\n\n self._start = start", "def handle_record_begin():\n LOG.info(\"Begin Recording...\")\n context = {'client_name': 'mycroft_listener',\n 'source': 'audio',\n 'destination': [\"skills\"]}\n bus.emit(Message('recognizer_loop:record_begin', context=context))", "def start_loop_recording(self, track):\n pass", "def start_btn_press(self):\n video_name_prefix = 'record_cam_'\n curr_btn_text = self.ids.camera_type_btn.text\n cam_type = None\n if curr_btn_text == 'LEFT camera':\n cam_type = CameraType.LEFT\n video_name_prefix += 'left_'\n elif curr_btn_text == 'RIGHT camera':\n cam_type = CameraType.RIGHT\n video_name_prefix += 'right_'\n elif curr_btn_text == 'RGB camera':\n cam_type = CameraType.RGB\n video_name_prefix += 'rgb_'\n else:\n logger.fatal('Camera type is not supported.')\n exit(1)\n\n saving_dir = self.ids.saving_dir_textbox.text\n if saving_dir == '':\n msg = 'Recording saving path has not been specified.'\n logger.error(msg)\n show_notification(MsgType.ERROR, msg)\n return\n\n video_name_prefix += '{}'.format(int(time.time()))\n\n app = App.get_running_app()\n app.start_recording(cam_type, saving_dir, video_name_prefix)\n\n self.ids.lower_section.remove_widget(self.ids.lower_section.children[0])\n self.ids.lower_section.add_widget(self._stop_section)", "def start_recording(self, *args, **kwargs):\n return self.recorder.start_recording(*args, **kwargs)", "def start_record(cr):\r\n \"\"\"Emulate the keyboard \"\"\"\r\n _player = input_playback.InputPlayback()\r\n _player.emulate(input_type='keyboard')\r\n _player.find_connected_inputs()\r\n \"\"\"To get list of UI elements\"\"\"\r\n ui = ui_utils.UI_Handler()\r\n ui.start_ui_root(cr)\r\n list=ui.get_name_role_list()\r\n \"\"\"To Open status tray and click on Screen Recording option\"\"\"\r\n logging.info(\"Opening status tray\")\r\n ui.doDefault_on_obj(STATUS_TRAY_REGEXP, True, role='button')\r\n time.sleep(WAIT)\r\n ui.doDefault_on_obj('/Close/i', True, role='button')\r\n ui.doDefault_on_obj('/Screen capture/i', True, role='button')\r\n ui.doDefault_on_obj('/Screen record/i', True,role='toggleButton')\r\n ui.doDefault_on_obj('/Record full screen/i', True,role='toggleButton')\r\n _player.blocking_playback_of_default_file(input_type='keyboard', filename='keyboard_enter')\r\n \"\"\"To open Chrome Page\"\"\"\r\n _player.blocking_playback_of_default_file(input_type='keyboard', filename='keyboard_ctrl+t')\r\n time.sleep(WAIT)\r\n logging.info(\"Recording Started\")\r\n return ui", "def start_record_trajectory(self):\r\n return self._arm.start_record_trajectory()", "def testStartRecord(self):\n self.mgr.handleRecordCommand( CAPTURE_MODE_VIDEO, RECORD_COMMAND_START )\n self.mgr.sendGoProCommand.assert_called_with(mavutil.mavlink.GOPRO_COMMAND_SHUTTER, (1, 0, 0, 0))", "def start(self):\n self._state = 'Started'", "def trigger_recording_started(_):\n log_threadsafe(obs.LOG_DEBUG, 'Recording started')\n \n global state\n with mutex_state_sending:\n state = int(time.time())\n pipe_send_state()", "def begin_turn(self):\n pass", "def start(self):\n self.reset()\n self.on_start()", "def enter_play(self, recording=None, offset=0):\n self.app.pingWebSessions()\n\n # look up the next recording\n if not recording:\n if self.queue.isEmpty():\n # if no recording, transfer to conference\n self.agi.finish()\n self.app.transferToConference(self)\n current = \"weareforests-audio/silent\"\n else:\n current = self.queue.pop()\n else:\n current = recording\n\n print \"Playing recording: %s, offset %d\" % (current, offset)\n d = self.agi.streamFile(str(current), chr(self.digit), offset)\n def audioDone(r):\n digit, offset = r\n if digit == self.digit:\n self.setStateAfterSample(\"recording\", \"weareforests-audio/record\", current, offset)\n else:\n self.state.set(\"play\")\n d.addCallback(audioDone)\n d.addErrback(self.catchHangup)", "def start(self):\n\n # Call the protected _turn method to start the game\n self._turn()", "def start_recording(self, capture_duration):\n self.camera = cv2.VideoCapture(0)\n\n # Create timer to enforce how long the camera records for\n self.start_time = time.time()\n self.capture_duration = capture_duration\n\n self.timer.start(0, self)", "def start(self) -> None:", "def start(self) -> None:", "def start(self):\n self.gripper_io.set_signal_value(\"go\", True)", "def testStartRecordAlreadyRecording(self):\n self.mgr.isRecording = True\n self.mgr.captureMode = CAPTURE_MODE_VIDEO\n self.mgr.handleRecordCommand( CAPTURE_MODE_VIDEO, RECORD_COMMAND_START )\n self.assertTrue(self.mgr.isRecording)\n assert not self.mgr.sendGoProCommand.called\n assert not self.mgr.sendState.called", "def on_record(self):\n self.record_label.set(\"Recording...\")\n\n recorder = RecorderThread(self.after_record)\n recorder.start()", "def _start_rec_by_subject(self, subject):\r\n self.recording = True\r\n time = time_to_int(self.time.get())\r\n if subject == 'Audio':\r\n name = short_name(self.audio_name.get())\r\n self.recordings.append(record_audio(name=name, max_time=time))\r\n else:\r\n name = short_name(self.video_name.get())\r\n self.recordings.append(record_video(name=name, max_time=time))\r\n self._recoding_beep()\r\n print(\"Started recording \" + subject)", "def StartRecording( self ):\r\n\r\n self._socket.write( 'B' ) \r\n \r\n return self.GetServerResponse()", "def set_to_start(self) -> None:\n start_config = self._path_points[0]\n self._mobile.set_2d_pose(start_config[:3])\n self._path_done = False", "def start(self):\n self.start_time = time.time()", "def start_record_microphone(self):\n if not os.path.exists(self.audio_file_folder):\n os.makedirs(self.audio_file_folder)\n\n self.microphone_handler.start_recording()\n self.current_session.put(self.microphone_handler.current_session)", "def phone_start(self) -> None:", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def start(self):\r\n pass", "def go_to_record_home(self, obj_id):\n url = self.cumulusci.org.lightning_base_url\n url = \"{}/lightning/r/{}/view\".format(url, obj_id)\n self.selenium.go_to(url)\n self.wait_until_loading_is_complete(lex_locators[\"actions\"])", "def start_of_game(self):\n pass", "def start():\n # Have the car begin at a stop\n rc.drive.stop()\n\n # Print start message\n print(\">> Lab 3B - Depth Camera Cone Parking\")", "def begin(self):\n pass", "def single_auto_record(self):\r\n if self.recorded:\r\n self.controller.stop()\r\n self.recorded = False\r\n log.debug(\"Auto-recording for the current talk stopped.\")\r\n\r\n if self.autoTalks.next():\r\n starttime = QtCore.QTime.fromString(self.autoTalks.value(8).toString())\r\n endtime = QtCore.QTime.fromString(self.autoTalks.value(9).toString())\r\n currenttime = QtCore.QTime.currentTime()\r\n\r\n if currenttime <= starttime:\r\n self.singleID = self.autoTalks.value(0).toString()\r\n title = self.autoTalks.value(1).toString()\r\n speaker = self.autoTalks.value(2).toString()\r\n\r\n # Time (in seconds) until recording for the talk starts\r\n self.timeUntilStart = currenttime.secsTo(starttime)\r\n # Time (in seconds) from the starttime to endtime of this talk\r\n self.timeUntilEnd = starttime.secsTo(endtime)\r\n\r\n # Display fullscreen countdown and talk info until talk starts\r\n self.autoRecordWidget.set_recording(False)\r\n self.autoRecordWidget.set_display_message(title, speaker)\r\n self.autoRecordWidget.start_timer(self.timeUntilStart)\r\n self.autoRecordWidget.showFullScreen()\r\n\r\n # Wait for talk to start, then change display and start recording\r\n self.beforeStartTimer.setInterval((self.timeUntilStart + 1) * 1000)\r\n self.beforeStartTimer.setSingleShot(True)\r\n self.beforeStartTimer.start()\r\n else:\r\n # Start time has already passed, so move on to next talk\r\n self.single_auto_record()\r\n else:\r\n self.stop_auto_record_gui()", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n ...", "def start(self):\n raise NotImplementedError(\"(%s).start\" % self)", "def startDocument(self):\n pass", "def startDocument(self):\n pass", "def resume(self):\n pass", "def resume(self):\n pass", "def resume(self):\n pass", "def start(self):\n assert not self.state is CallState.finished\n\n self.state = CallState.started\n self.start_time = time()", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def resume(self):\n\t\tpass", "def start(self, start: pos.Pos) -> None:\n self.__start = start", "def beginStep(self, message=''):\n if not self.initialized:\n self.start(message)", "def start(self):\n self.ids.camera.opacity = 1\n self.ids.camera.play = True\n self.ids.start.text = 'Stop Camera'\n self.ids.camera.texture = self.ids.camera._camera.texture", "def start():\n # Have the car begin at a stop\n rc.drive.stop()\n # Print start message\n print(\">> Lab 4B - LIDAR Wall Following\")", "def begin(self):\n self._logger.debug(\"Begin\")", "def first_move(self):\n self.play_sound(self.first_key)\n self.make_blink()\n self.wait_second_move()", "def start (self):\n pass", "def start (self):\n pass", "def start():", "def start():", "def start():", "def start():", "def start(self) -> global___Pos:", "def StartCapture(self, argin):\n handler = self.get_command_object(\"StartCapture\")\n handler(argin)", "def observe_first(self, timestep: dm_env.TimeStep) -> None:\n self._last_timestep = timestep\n if self._logger is not None:\n self._logger.info('START')\n self._reset_deck()", "def start(self):\n\t\tself.stream.start_stream()", "def _device_start_capture(self):\n\n # TODO: we may want to provide an option to flush the SDRam buffer here before capture stops?\n self._start_capture_to_ram()\n self._start_streaming_ram_to_host()", "def start(self):\r\n self.start_time = time.time()", "def start(self, data):\n log.info(data)\n self.stop()\n self.time_start = time.time() - data.get('time_offset', 0) - self.time_offset\n self.bpm = float(data.get('bpm', self.DEFAULT_BPM))\n self.timesigniture = parse_timesigniture(data.get('timesigniture', DEFAULT_TIMESIGNITURE))\n if data.get('sequence'):\n sequence_name = data.get('sequence')\n assert sequence_name in self.sequences, '{0} is not a known sequence'.format(sequence_name)\n self.sequence = self.sequences[sequence_name]\n if data.get('scene'):\n # Single scene - Fake the sequence list by inserting the name of the single scene required\n self.sequence = (data.get('scene', self.DEFAULT_SCENE_NAME), )\n self.sequence_index = 0", "def _on_key_press(self, key):\n if key is self.TRIGGER_KEY and not self.do_record:\n print(\"Start Recording...\")\n self.do_record = True", "def start(self):\n\t\t# deactivate Go button\n\t\tbutton_go_traj = builder.get_object(\"traj_go_button\")\n\t\tbutton_go_traj.set_sensitive(False)\n\n\t\tGLib.timeout_add(50, self.updateDest)\n\t\tself.updateDest()", "def on_start(self):\n self.state = STARTED", "def start(self):\n # Call the protected _turn method to start the game\n self._end_time = time.time() + 60\n self._turn()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self):\n raise NotImplementedError()", "def start(self, event):\n return", "def enter_recording(self, currentlyPlaying=None, offset=0):\n self.app.pingWebSessions()\n\n start = Time()\n filename = Recording.userRecordingFilename(self.app)\n d = self.agi.recordFile(\"weareforests-recordings/\" + filename, \"gsm\", chr(self.digit), 45)\n\n def save(r):\n digit, tpe, duration = r\n duration = duration / 8000\n rec = Recording(store=self.app.store, filename=unicode(filename), created=start, caller_id=self.callerId, duration=duration, user_recording=True)\n print \"saved!\"\n if tpe == 'hangup':\n print \"user hung up during recording.\"\n self.app.sessionEnded(self.channel)\n\n # add it to everybody's queue\n self.app.recordingAdded(self, rec)\n # resume play where we stopped\n self.setStateAfterSample(\"play\", \"weareforests-audio/listen\", currentlyPlaying, offset)\n\n d.addCallback(save)\n d.addErrback(self.catchHangup)", "def start(self):\n self.active = True" ]
[ "0.7811632", "0.748685", "0.74566555", "0.737498", "0.71553403", "0.68922603", "0.6806795", "0.67850584", "0.6769072", "0.67346567", "0.67321515", "0.6717027", "0.6545686", "0.6453229", "0.6453229", "0.6453229", "0.6453229", "0.6446663", "0.643614", "0.6370786", "0.6351596", "0.6301377", "0.62652636", "0.62509763", "0.62475115", "0.6236949", "0.6180073", "0.6175568", "0.61732405", "0.6110522", "0.61085176", "0.61040056", "0.61040056", "0.60974437", "0.60840476", "0.6080652", "0.6074258", "0.60585344", "0.60513115", "0.60347605", "0.600056", "0.59932035", "0.5992349", "0.5992349", "0.59541106", "0.5952913", "0.59520334", "0.594428", "0.5924848", "0.5918508", "0.59109944", "0.59109944", "0.59109944", "0.5896543", "0.5891289", "0.5889209", "0.5889209", "0.5888263", "0.5888263", "0.5888263", "0.5887427", "0.5879824", "0.5879824", "0.5879824", "0.5879824", "0.5879824", "0.5879824", "0.5879824", "0.5879824", "0.58787364", "0.58755946", "0.58713126", "0.5866827", "0.5851156", "0.5849248", "0.58436096", "0.58295554", "0.58295554", "0.58292675", "0.58292675", "0.58292675", "0.58292675", "0.5820291", "0.58152294", "0.58102435", "0.5808446", "0.58056784", "0.57896304", "0.57724744", "0.5771129", "0.57710016", "0.57566696", "0.57503533", "0.5738441", "0.5738441", "0.5738441", "0.5738441", "0.57340646", "0.5733224", "0.5730351" ]
0.7430042
3
Go to end of the recording.
def go_to_end(self): self.go_to(self.duration)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end(self) -> None:", "def end(self):\n pass", "def end(self):\n pass", "def end(self):\n pass", "def handle_record_end():\n LOG.info(\"End Recording...\")\n context = {'client_name': 'mycroft_listener',\n 'source': 'audio',\n 'destination': [\"skills\"]}\n bus.emit(Message('recognizer_loop:record_end', context=context))", "def end(self):\n ...", "def stop(self):\n self.recording = False", "def end(self) -> None:\n return", "def end(self):\r\n # print(self.send_command('battery?'))\r\n if not self.is_dummy:\r\n self.send_command('land')\r\n if self.background_frame_read is not None:\r\n self.background_frame_read.stop()\r\n # It appears that the VideoCapture destructor releases the capture, hence when \r\n # attempting to release it manually, a segmentation error occurs.\r\n # if self.cap is not None:\r\n # self.cap.release()\r", "def end(self):\n # Stop driving\n self.robot.drivetrain.arcade_drive(0.0, 0.0)", "def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False", "def goto_end(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.RIGHT))", "def end(self):\n self.kill_flag.value = True\n while (not self.pseye.thread_complete.value) or (not self.saver.saving_complete.value):\n pass", "def _end(self):\n\n self.logger.msg1(\"Done\")", "def end(self):\n self._log.debug('%s: doing ..', __class__.__name__)\n self._log.debug('%s: done.', __class__.__name__)", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._servo.end()\n self._mtr.end()\n self._log.debug('done')", "def finish(self):\n pass", "def finish(self):\n pass", "def stop_recording(self):\n\n\t\tself.eyetribe.stop_recording()\n\t\tself.recording = False", "def recording_stop(self):\n self._post('recording/stop')", "def finish():\n pass", "def end(self):\n self.f.close()\n print(\"Macro recorded, filename \" + self.name)", "def end(self) -> None:\n unicurses.endwin()", "def stopit(self):\n\n self.stop.stop()\n self.stream.close()\n self.p.terminate()\n self.p = None\n\n print(\"Recording terminated!\")", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._log.debug('done')", "def end(self):\n if self.flowComponent:\n self.flowComponent.end()\n pass", "def finishTurn(self):\n print \"go\"\n sys.stdout.flush()", "def stop_recording(self):\n self.flag_event.clear()\n self.statusBar().showMessage('Recording Stopped')\n print('boo ya')", "def _on_key_release(self, key):\n if key is self.TRIGGER_KEY:\n print(\"End Recording\")\n self.do_record = False", "def endGame(self):\n pass", "def end(self):\n\n # Close the prediction and\n # release the camera\n self.__predict_start = False\n self.__cap.release()", "def _exit_print(self):\n if self.cur_frame >= self.config.MAX_FRAMES:\n self.stopped = True", "def end(self):\n return self.__end", "def finish(self) -> None:", "def finish(self) -> None:", "def advance(self) -> None:\n pass", "def StopRecording( self ): \r\n\r\n self._socket.write( 'E' ) \r\n \r\n return self.GetServerResponse()", "def _track_finished(self, *_args):\n if not self.loop:\n self.stop()\n else:\n self.seek(0.)\n self.player.play()", "def end_phase():\n pass", "def finish(self):", "def finish(self):", "def on_record(self):\n self.record_label.set(\"Recording...\")\n\n recorder = RecorderThread(self.after_record)\n recorder.start()", "def eof(self):\n self.report_scenario_completed()\n self.report_feature_completed()\n self.report_failures()\n self.stream.flush()\n self.reset()", "def endDocument(self):\n self.return_q.put(self.obj_depth[-1])", "def end(self):\n #self.manipulator_restore()\n #self.header_text_restore()\n #self.cursor_modal_restore()\n pass", "def next_step(self):\n if self.time_point + 1 >= len(self.data):\n print(\"Error: at last time point\")\n else:\n self.time_point = self.time_point + 1\n self.load_frame()", "def end(self, won, reason):\n pass\n # replace with your end logic", "def finish():", "def finish():", "def finish():", "def finish():", "def stop_recording(self):\n self.timer.stop()\n self.camera.release()", "def Finish(self):\n pass", "def finish(self):\r\n\r\n self._is_finished = True", "def end(self):\n\t\treturn self._end", "def done_action(self) -> None:\n self.end = datetime.now()", "def EndDraw(self):\r\n\r\n pass", "def end_of_game(self):\n self.log.info('The game has ended')\n #\n end_callout = callout.FinishCallout(\n 'callout',\n 'finish_callout',\n ['exit_button'],\n S['end-game-callout'],\n self.deaths,\n )\n end_callout.show()\n #\n self.objects.append(end_callout)\n #\n while True:\n if end_callout.dismiss_button:\n music.fadeout(2)\n yield 2\n break\n yield 0\n #\n sys.exit(0)", "def reached_end_of_stream(self):\n pass", "def endDocument(self):\n pass", "def endDocument(self):\n pass", "def end(self):\n return self._end", "def end(self):\n return self._end", "def end(self):\n return self._end", "def stop(self):\n self.ctrl_obj.finish = True", "def stop(self):\n self.ctrl_obj.finish = True", "def end(self, time, reward):\n return True", "def endCompetition(self):\n self.robot_exit = True", "def record(self):\n\t\twhile True:\n\t\t\tif not self.recording:\n\t\t\t\tbreak\n\t\t\t#print('hal')\n\t\t\telapsed = (datetime.datetime.now() - self.timestamps[-1]).total_seconds()\n\t\t\tif elapsed < self.timePerFrame:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t#print(len(self.Video))\n\t\t\t\tret,frame = self.vs.read()\n\t\t\t\tif ret:\n\t\t\t\t\tself.timestamps.append(datetime.datetime.now())\n\t\t\t\t\tself.Video.append(frame)\n\t\t\t\t\tself.FPStracker.update()\n\t\t\t\t\tself.newAvailable = True\n\t\t\t\t\tif not self.threaded:\n\n\t\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tprint('error: camera failed to capture image')\n\t\t\t\t\tprint('canceling recording session')\n\t\t\t\t\tself.stop()\n\t\t#print('\\n recording loop ended, returning to main')\n\t\tself.vs.stop()\n\t\treturn", "def __macroStopRecording(self):\n self.activeWindow().macroRecordingStop()", "def finish(self) -> None:\n self.__exit__(None, None, None)", "def finish(self):\n if self.serial:\n self.serial.close()", "def endAVI(self):\n nes_lib.endAVI.argtypes = [c_void_p]\n nes_lib.endAVI.restype = None\n nes_lib.endAVI(self.obj)", "def finish(self):\n self.body.finish()", "def finished(self):\n\t\telog(\"finished\")", "def end(self, end):\n\n self._end = end", "def end(self, end):\n\n self._end = end", "def end(self, end):\n\n self._end = end", "def end_meassuring(self):\n self.enabler = 0\n #self.t.join()\n return 1", "def end(self):\n print('\\nCompleted in {:.2f} seconds\\n'.format(time.time() - self.start_time))", "def finished(self):\n pass", "def end(self, interrupted: bool) -> None:\n self.drive.arcadeDrive(0, 0)", "def _exit_exam(self):\n self.finger.back()\n self._goto(\"exit_exam\")\n self.finger.back()", "def end_turn(self):\n self.history.append({\n 'user': self.user,\n 'system': self.system,\n 'nlu': self.nlu.to_cambridge_da_string() if isinstance(self.nlu, DA) else self.nlu,\n 'action': self.action.to_cambridge_da_string() if isinstance(self.action, DA) else self.action,\n 'state': {k: v for k, v in self.state.items()},\n })\n self.user = ''\n self.system = ''\n self.nlu = DA()\n self.action = DA()", "def tellIfEnded(self):\n self.congratulate()", "def end(self):\n self._watch_file = False\n self.experiment.end()\n if self.thread:\n self.thread.join(timeout=self._monitor_thread_timeout)", "def end(self):\n self._bc.close()", "def do_EOF(self, arg):\n\t\tself.finished = True", "def end():\n return say()", "def end_turn(self):\r\n self.turn += 1", "def send_finish_event(self):\n self.status['type'] = '__end__'\n self._send()", "def end(self, obs: AgentObservation):\n self.brain.end(obs)", "def stop(self):\r\n self.terminating = True", "def endRep(self, rep):\n \n pass", "def end(self) -> None:\n self.process_event(\n PipelineEvent(\n PipelineEventType.RUN_END,\n )\n )", "def end(self):\n while self.position < len(self.document.characters\n ) and self.document.characters[\n self.position].character != '\\n':\n self.position += 1", "def StopRecording(self, done=True):\n return _gmat_py.EphemManager_StopRecording(self, done)", "def endPage(self) :\n #self.logdebug(\"FORMFEED %i at %08x\" % (self.pagecount, self.pos-1))\n if not self.hpgl2 :\n # Increments page count only if we are not inside an HPGL2 block\n self.pagecount += 1", "def stop_recording():\n do_command('PlayStop')\n print('Stopped')", "def endMessage(self):" ]
[ "0.71201277", "0.709415", "0.709415", "0.709415", "0.7093299", "0.7023222", "0.6937872", "0.6920976", "0.6730083", "0.6668539", "0.6595177", "0.652316", "0.6494475", "0.6478494", "0.6469474", "0.64187795", "0.6406178", "0.6406178", "0.63864815", "0.63863313", "0.636873", "0.63622594", "0.63096756", "0.62970585", "0.6283105", "0.627556", "0.6257113", "0.62548035", "0.62500185", "0.621994", "0.6204159", "0.62003475", "0.6172002", "0.6170745", "0.6170745", "0.6163541", "0.61477405", "0.6106408", "0.6096189", "0.608146", "0.608146", "0.6081426", "0.60720664", "0.6069982", "0.60683244", "0.6066266", "0.60650426", "0.60635173", "0.60635173", "0.60635173", "0.60635173", "0.6062888", "0.6057741", "0.6046561", "0.6032695", "0.60137594", "0.6003271", "0.599707", "0.5982689", "0.5971862", "0.5971862", "0.59615296", "0.59615296", "0.59615296", "0.5949922", "0.5949922", "0.59459597", "0.59351", "0.5932655", "0.5925317", "0.5923384", "0.5922506", "0.5909407", "0.5900741", "0.5897955", "0.5890301", "0.5890301", "0.5890301", "0.5888248", "0.58776504", "0.5867015", "0.58644474", "0.58629847", "0.5848698", "0.58483315", "0.584819", "0.5845429", "0.5844234", "0.58429766", "0.5842083", "0.5837655", "0.5833376", "0.5821124", "0.5818241", "0.5817967", "0.5814445", "0.58124346", "0.58093274", "0.5808607", "0.5800342" ]
0.7161482
0
Jump to next or previous spike from the selected clusters.
def _jump_to_spike(self, delta=+1): spike_times = self.get_spike_times() if spike_times is not None and len(spike_times): ind = np.searchsorted(spike_times, self.time) n = len(spike_times) self.go_to(spike_times[(ind + delta) % n])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go_to_next_spike(self, ):\n self._jump_to_spike(+1)", "def go_to_previous_spike(self, ):\n self._jump_to_spike(-1)", "def next(self):\n self.jumpahead(1)", "def jump(self):\n global jumpSize\n print \"jumping...\"\n # create a range that includes all the available feature indices\n featureIndices = range(0, len(self.features))\n # remove indices until there are only jumpSize left\n while len(featureIndices) > jumpSize:\n # choose a random index\n index = random.randint(0, len(featureIndices)-1)\n # remove that item from the list of indices\n del featureIndices[index]\n for featureIndex in featureIndices:\n # get a pointer to that feature\n feature = self.features[featureIndex]\n # pick a random number based on the size of the feature's domain\n domainIncrement = random.randint(0, len(feature.domain) - 1)\n # get the index within the domain of the current feature value\n domainIndex = feature.domain.index(feature.value)\n # go to a different value in the domain\n newDomainIndex = (domainIndex + domainIncrement) % len(feature.domain)\n # assign the value from the domain\n feature.value = feature.domain[newDomainIndex]", "def jumpp(self):\r\n\r\n if not self.current_jump is None:\r\n self.current_jump = self.current_jump.next", "def goto_node(self):\n p = self.get_position()\n if p and p != self.c.p:\n self.c.selectPosition(p)", "def goto(self, index):\n raise NotImplementedError", "def cmd_k(self):\n node = self.start\n while node is not None:\n if node == self.cursor:\n if node.prev is not None:\n self.cursor = node.prev\n break\n node = node.next\n self.get_text()", "def _next(self, _):\n self.notebook.SetSelection(self.idx+1)", "def jump(distance):\r\n t.penup()\r\n t.forward(200)\r\n t.pendown()\r\n return None", "def go_to_next_state(self):\n pass", "def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')", "def jumped_on(self):\r\n pass", "def next(self):\n while not self.is_stable():\n self.step()", "def test_restart(self):\n\n selector = PCovCUR(n_to_select=1)\n selector.fit(self.X, self.y)\n\n for i in range(len(self.idx) - 2):\n selector.n_to_select += 1\n selector.fit(self.X, warm_start=True)\n self.assertEqual(selector.selected_idx_[i], self.idx[i])", "def _prev(self, _):\n self.notebook.SetSelection(self.idx-1)", "def jump(self):\n print(\"Inside ElfRider.jump\")", "def goto_next_level(self, *args):\n self.manager.current = self.manager.next()\n self.reset()", "def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass", "def choose_next_player(self):\n player_index = self.players.index(self.current_player)\n if self.direction_clock_wise:\n if player_index >= len(self.players) - 1:\n self.current_player = self.players[0]\n else:\n self.current_player = self.players[player_index + 1]\n else:\n if player_index <= 0:\n self.current_player = self.players[len(self.players) - 1]\n else:\n self.current_player = self.players[player_index - 1]", "def jumping_on_the_clouds(clouds):\n index_clouds = [index for index, v in enumerate(clouds) if v == 0]\n for cloud in index_clouds:\n if index_clouds.index(cloud) == len(index_clouds) - 1:\n break\n if index_clouds[index_clouds.index(cloud) - 1] == cloud - 1 and \\\n index_clouds[index_clouds.index(cloud) + 1] == cloud + 1:\n index_clouds.remove(cloud)\n\n return len(index_clouds) - 1", "def jump(self):\n\n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(\n self, self.platforms, False)\n self.rect.y -= 2\n\n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= WIN_HEIGHT:\n self.change_y = -10", "def next_step(self):\n self.proceed()\n self.execute_current()", "def _go(self, distance):\n ende = self._position + self._orient * distance\n self._goto(ende)", "def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def walk_park(self):\n response = input(\"Do you want to see the next animal in the Zoo or go back to the previous?\")\n while response not in ['next', 'previous']:\n response = input(f\"Please enter 'next' or 'previous':\")\n if response == 'next':\n try:\n self.park_location += 1\n print(f\"You´ve arrived at the {self.animals[self.park_location]}.\")\n print(\"\\n\\n\")\n self._view_animal()\n except IndexError:\n self.park_location -= 1\n print(\"done\")\n else:\n try:\n self.park_location -= 1\n print(f\"You went back to the {self.animals[self.park_location]}.\")\n print(\"\\n\\n\")\n self._view_animal()\n except IndexError:\n print(\"done\")\n self.park_location += 1", "def nextRange(self):\r\n if (self.selectedmap < len(self.maplevels)-1):\r\n self.pickMap(self.selectedmap+1)", "def middleselectitem(self, pos):\n self._linklist.select(pos)", "def _goto(self, end):\n self._position = end", "def distortion_jump(X, cluster_estimator, k_max=None,\n distortion_meth='sqeuclidean', p=2):\n nb_data, nb_feature = X.shape\n # if no maximum number of clusters set, take datasize divided by 2\n if not k_max:\n k_max = nb_data // 2\n\n Y = - nb_feature / 2\n info_gain = 0\n old_dist = pow(\n distortion(X, np.zeros(nb_data), distortion_meth, p) / nb_feature, Y)\n for k in range(2, k_max + 1):\n cluster_estimator.set_params(n_clusters=k)\n labs = cluster_estimator.fit_predict(X)\n new_dist = pow(\n distortion(X, labs, distortion_meth, p) / nb_feature, Y)\n if new_dist - old_dist >= info_gain:\n k_star = k\n info_gain = new_dist - old_dist\n old_dist = new_dist\n return k_star", "def select_next_cup(self):\n idx = self.current_cup_idx()\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n self.current_cup = self.cups[idx]", "def do_impact(self, car):\r\n\r\n if car is not None:\r\n if self.head_of_jump is None:\r\n self.head_of_jump = Jump()\r\n else:\r\n jj = Jump()\r\n jj.next = self.head_of_jump\r\n self.head_of_jump = jj\r\n\r\n # self.current_jump = self.head_of_jump\r", "def loops_back_to_screen(self):\r\n for segment in self.all_turtles:\r\n if segment.xcor() < -300 or segment.xcor() > 300:\r\n segment.goto(-segment.xcor(), segment.ycor())\r\n\r\n elif segment.ycor() < -300 or segment.ycor() > 300:\r\n segment.goto(segment.xcor(), -segment.ycor())", "def focus_next_cell(self, next):\n x, y = self._cell_input.cursor_coordinates()\n y_new = 0\n next._cell_input.set_cursor_coordinates(x, y_new)\n next.set_focus()\n self.lost_focus(force=True)", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCR_HEIGHT:\n self.change_y = -8", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0: #or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def jump(self):\n \n # move down and see if there's a platform below us.\n # Move down 2 pixels because it doesn't work well if you only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set the speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def next(self):\n next_index = self.current_target_index + 1\n self.current_target_index = next_index % self.targets_amount\n updated_pos = self.positions[self.current_target_index]\n self.current_target = updated_pos\n return updated_pos", "def next(self):\n current = self.listbox.curselection()[0]\n if current < self.listbox.size() - 1:\n self.listbox.selection_clear(current)\n self.listbox.activate(current+1)\n self.listbox.select_set(current+1)\n self.play()", "def on_mouse_click(self, e):\n if 'Control' in e.modifiers:\n # Get mouse position in NDC.\n box_id, _ = self.canvas.stacked.box_map(e.pos)\n channel_id = np.nonzero(self.channel_y_ranks == box_id)[0]\n # Find the spike and cluster closest to the mouse.\n db = self.data_bounds\n # Get the information about the displayed spikes.\n wt = [(t, s, c, ch) for t, s, c, ch in self._waveform_times if channel_id in ch]\n if not wt:\n return\n # Get the time coordinate of the mouse position.\n mouse_pos = self.canvas.panzoom.window_to_ndc(e.pos)\n mouse_time = Range(NDC, db).apply(mouse_pos)[0][0]\n # Get the closest spike id.\n times, spike_ids, spike_clusters, channel_ids = zip(*wt)\n i = np.argmin(np.abs(np.array(times) - mouse_time))\n # Raise the select_spike event.\n spike_id = spike_ids[i]\n cluster_id = spike_clusters[i]\n emit('select_spike', self, channel_id=channel_id,\n spike_id=spike_id, cluster_id=cluster_id)\n\n if 'Shift' in e.modifiers:\n # Get mouse position in NDC.\n box_id, _ = self.canvas.stacked.box_map(e.pos)\n channel_id = int(np.nonzero(self.channel_y_ranks == box_id)[0][0])\n emit('select_channel', self, channel_id=channel_id, button=e.button)", "def jump(self):\n\t\tself.vel = -10\n\t\tself.tick_count = 0\n\t\tself.height = self.y", "def set_jump(self, jump):\n self.jump = jump", "def goto_start(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.LEFT))", "def jumpto(self, item, offset):\n try:\n self.ret = idc.jumpto(offset)\n except:\n self.ret = False\n\n return self.ret", "def scrollPoint(self):\r\n # productive #onButton\r\n profprint()\r\n self.changeValue()\r\n widget = slicer.modules.NeedleFinderWidget\r\n needle = widget.editNeedleTxtBox.value\r\n # print self.ptNumber\r\n # print needle\r\n coord = [0, 0, 0]\r\n ptName = '.' + str(needle) + '-' + str(self.ptNumber)\r\n # print ptName\r\n modelNode = slicer.util.getNode(ptName)\r\n if modelNode != None:\r\n self.ptNumber = self.ptNumber + 1\r\n if modelNode.GetAttribute(\"ValidationNeedle\") == \"1\":\r\n modelNode.GetFiducialCoordinates(coord)\r\n X = coord[0]\r\n Y = coord[1]\r\n Z = coord[2]\r\n\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\r\n if sRed == None :\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\r\n\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\r\n if sYellow == None :\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\r\n\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n\r\n mYellow = sYellow.GetSliceToRAS()\r\n mYellow.SetElement(0, 3, X)\r\n sYellow.Modified()\r\n sYellow.UpdateMatrices()\r\n\r\n mGreen = sGreen.GetSliceToRAS()\r\n mGreen.SetElement(1, 3, Y)\r\n sGreen.Modified()\r\n sGreen.UpdateMatrices()\r\n\r\n mRed = sRed.GetSliceToRAS()\r\n mRed.SetElement(2, 3, Z)\r\n sRed.Modified()\r\n sRed.UpdateMatrices()\r\n elif self.ptNumber != 0:\r\n self.ptNumber = 0\r\n self.scrollPoint()", "def fix_jump(self):\n pass", "def goto(n):\n n = int('{}'.format(n))\n get_controller().step_to(n)", "def goto(self, offset):\n self._vim.command('goto {}'.format(offset))", "def hypermove(self,index):\r\n\r\n if self.hypermovemode == 0:\r\n # MODE ONE randomly jumps to related notes\r\n\r\n if str(index) not in self.indexes():\r\n\r\n index = Index(random.choice(self.indexes()))\r\n keylist_temp = list(self.get_keys_from_note(index))\r\n\r\n if keylist_temp:\r\n key_temp = random.choice(keylist_temp)\r\n else:\r\n return index\r\n if self.key_dict_contains(key_temp):\r\n indexlist_temp = [x_temp for x_temp\r\n in self.get_indexes_for_key(key_temp)\r\n if Index(x_temp) >= Index(0)]\r\n if str(index) in indexlist_temp:\r\n indexlist_temp.remove(str(index))\r\n if not indexlist_temp:\r\n indexlist_temp = [str(index)]\r\n else:\r\n indexlist_temp = [str(index)]\r\n return Index(random.choice(indexlist_temp))\r\n\r\n if self.hypermovemode in [1,2]:\r\n\r\n if self.hypermovemode == 1:\r\n # MODE TWO randomly jumps to hyperlinked indexes\r\n # MODE THREE offers a choice\r\n\r\n func_temp = random.choice\r\n else:\r\n func_temp = self.choose_from\r\n\r\n\r\n\r\n if str(index) not in self.indexes():\r\n index = Index(random.choice(list(self.indexes())))\r\n\r\n keylist_temp = list(self.get_keys_from_note(index))\r\n keylist_temp = transpose_keys(check_hyperlinks(keylist_temp,\r\n purge=True,\r\n display=display,notebook=notebook),\r\n notebook=notebook)\r\n keylist_temp = sorted([x_temp\r\n .replace('<',EMPTYCHAR)\r\n .replace('>',EMPTYCHAR) for x_temp in keylist_temp])\r\n if not keylist_temp:\r\n if isinstance(index,(int,str)):\r\n index = Index(index)\r\n if self.key_dict_contains(str(index)):\r\n return Index(func_temp(list(self.get_indexes_for_key(index))))\r\n return index\r\n elif len(keylist_temp) == 1:\r\n return Index(keylist_temp[0])\r\n else:\r\n return Index(func_temp(keylist_temp))", "def refresh(self):\n self.goto(self.starting_position)", "def on_reset(self):\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- seeking back before first step\")\n self.set('_index', None)", "def jump(self):\n if (self.falling or self.rising) and self.doubleJump:\n self.speed_y = -20 # //////Aquí se cambia la velocidad incial cuando se salta//////\n self.fallin = False\n self.rising = True\n self.doubleJump = False\n\n if not self.falling and not self.rising:\n self.speed_y = -20 # //////Aquí se cambia la velocidad incial cuando se salta//////\n self.rising = True", "def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)", "def goto(self, speed=1):\n\n self.safe_goto(speed, 0)", "def do_STEP(self, parametros):\n if len(cancion.marks)!=0:\n cancion.moveCursor(1,True)", "def next_point(self, start_pos, goal_pos):\r\n\t\tself.shift = 0\r\n\t\tself.start_pos = start_pos\r\n\t\tself.goal_pos = goal_pos", "def parallelControl(state, powerControl):\n return kickAt(state, state.ball_pos + state.attacking_vector, powerControl)", "def jump(self):\n self.vy = -9", "def next_leaveout(self, force=None):\n if force is not None:\n self.leaveout = force\n\n if self.leaveout == self.NUM_BUCKETS:\n print('Have completed cross-validation')\n # raise CrossValidationComplete\n return None\n\n # Select next bucket to leave out as evaluation\n self.x_eval = self.eval_points = self.x[self.leaveout]\n self.y_eval = self.y[self.leaveout]\n\n # Convert the remaining buckets into one list\n self.x_train = self.traindata = np.concatenate(\n [arr for i, arr in enumerate(self.x) if i != self.leaveout]\n )\n self.y_train = np.concatenate(\n [arr for i, arr in enumerate(self.y) if i != self.leaveout]\n )\n\n self.leaveout += 1\n\n return self.leaveout", "def decide_next_move(self):\n pass", "def _jump(self):\n # can't jump while jump\n if self._player.is_jumping():\n # no hard-code, set jump with 2*max_velocity\n self._player.set_velocity((0, -2*self._max_velocity))\n self._player.set_jumping(False) # means can't jump", "def advance(self):\n self.currentIndex += 1\n self.updateCurrentCommand()", "async def jump(self, ctx, song_index: int):\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n if not player.is_connected:\n # We can't disconnect, if we're not connected.\n return await ctx.send(embed=self.error_embed(f'Not playing. [{ctx.message.author.mention}]'))\n\n if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):\n # Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot\n # may not disconnect the bot.\n return await ctx.send(embed=self.error_embed(f'Not connected to the same voice channel. [{ctx.message.author.mention}]'))\n\n if song_index > len(player.queue) + 1:\n return await ctx.send(embed=self.error_embed(\"There is no such song in the queue.\"))\n\n for i in range(song_index - 1):\n player.queue.pop(0)\n await player.skip()\n await ctx.message.add_reaction(\"✅\")", "def step_forward(self):", "def start_jumping(self):\n self.remove_action(Action.jump_charge)\n self.update_action(Action.jumping)\n self.image = self.current_sprite_list[-1]", "def focus_next(self):\n self.focus_item()", "def jump(self):\n\t\tself._is_falling = True\n\t\tself._dy = -5", "def jump(self):\r\n if self.grounded == True:\r\n self.vel.y = -13", "def goto_pt(self, pt):\n curr_xy = [self.state.x, self.state.y]\n target_xy = (pt[0], pt[1])\n dist = math.sqrt((curr_xy[0] - target_xy[0])**2\n + (curr_xy[1] - target_xy[1])**2)\n\n if dist > self.goto_thresh:\n self.controller.target_velocity = self.goto_vel\n steering, velocity = \\\n self.controller.compute_control(self.state, target_xy)\n self.data_handler.update_target(self.controller.target)\n return steering, velocity\n else:\n self.controller.target_velocity = 0.0\n steering = 0.0\n velocity = 0.0\n return steering, velocity", "def next(self):\n \n jump = 0\n \n for event in pudding.process_event():\n if event[0] == sdlconst.KEYDOWN:\n if (event[1] == sdlconst.K_q) or (event[1] == sdlconst.K_ESCAPE):\n tofu.GAME_INTERFACE.end_game() # Quit the game\n \n elif event[1] == sdlconst.K_m:\n print \"trying to change single to multiplayer mode\"\n tofu.GAME_INTERFACE.end_game('client')\n \n elif event[1] == sdlconst.K_LSHIFT:\n # Shift key is for jumping\n # Contrary to other action, jump is only performed once, at the beginning of\n # the jump.\n jump = 1\n \n elif event[1] == sdlconst.K_LEFT: self.left_key_down = 1\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 1\n elif event[1] == sdlconst.K_UP: self.up_key_down = 1\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 1\n \n elif event[0] == sdlconst.KEYUP:\n if event[1] == sdlconst.K_LEFT: self.left_key_down = 0\n elif event[1] == sdlconst.K_RIGHT: self.right_key_down = 0\n elif event[1] == sdlconst.K_UP: self.up_key_down = 0\n elif event[1] == sdlconst.K_DOWN: self.down_key_down = 0\n \n if jump: return Action(ACTION_JUMP)\n \n # People saying that Python doesn't have switch/select case are wrong...\n # Remember this if you are coding a fighting game !\n return Action({\n (0, 0, 1, 0) : ACTION_ADVANCE,\n (1, 0, 1, 0) : ACTION_ADVANCE_LEFT,\n (0, 1, 1, 0) : ACTION_ADVANCE_RIGHT,\n (1, 0, 0, 0) : ACTION_TURN_LEFT,\n (0, 1, 0, 0) : ACTION_TURN_RIGHT,\n (0, 0, 0, 1) : ACTION_GO_BACK,\n (1, 0, 0, 1) : ACTION_GO_BACK_LEFT,\n (0, 1, 0, 1) : ACTION_GO_BACK_RIGHT,\n }.get((self.left_key_down, self.right_key_down, self.up_key_down, self.down_key_down), ACTION_WAIT))", "def seek(self, cutoff):\n while self.step_num < cutoff and self.op_state == Turing_Machine.RUNNING:\n \"\"\"Perform an atomic transition or chain step.\"\"\"\n if self.op_state != Turing_Machine.RUNNING:\n continue\n if self.end_time and time.time() >= self.end_time:\n self.op_state = Turing_Machine.TIME_OUT\n continue\n\n if self.compute_steps:\n self.old_step_num = self.step_num\n # Note: We increment the number of loops early to take care of all the\n # places step() could early-return.\n self.num_loops += 1\n\n # Get current symbol\n cur_symbol = self.tape.get_top_symbol()\n\n # Lookup TM transition rule\n cond, (symbol2write, next_state, next_dir), num_steps = \\\n self.machine.get_transition(cur_symbol, self.state, self.dir)\n\n # Test condition\n self.op_state = cond[0]\n self.op_details = cond[1:]\n\n # Apply transition\n # Chain move\n self.tape.apply_single_move(symbol2write, next_dir)\n self.state = next_state\n self.dir = next_dir\n self.num_macro_moves += 1\n if self.compute_steps:\n self.step_num += num_steps\n self.steps_from_macro += num_steps\n if self.op_state == Turing_Machine.INF_REPEAT:\n self.inf_reason = REPEAT_IN_PLACE\n\n if self.op_state != Turing_Machine.UNDEFINED:\n self.verbose_print()", "def lockAtTarget(self, initial_call):\n if initial_call:\n self.chassis.setBrakeMode()\n if not self.isAligned():\n self.next_state(\"driveToTarget\")\n else:\n self.chassis.stop()", "def _advance(self):\n self._prev, self._current = self._current, self._prev + self._current", "def cycle(self) -> None:\n self.current_option_index = (self.current_option_index + 1) % len(self.options)", "def move(self):\r\n segments = len(self.all_turtles) - 1\r\n for i in range(len(self.all_turtles)):\r\n if segments == 0:\r\n self.all_turtles[segments].forward(MOVE_DISTANCE)\r\n else:\r\n new_x = self.all_turtles[segments - 1].xcor()\r\n new_y = self.all_turtles[segments - 1].ycor()\r\n self.all_turtles[segments].goto(new_x, new_y)\r\n segments -= 1", "def scrollPoint(self):\n #productive #onButton\n profprint()\n self.changeValue()\n widget = slicer.modules.NeedleFinderWidget\n needle = widget.editNeedleTxtBox.value\n #print self.ptNumber\n #print needle\n coord = [0,0,0]\n ptName = '.'+str(needle)+'-'+str(self.ptNumber)\n #print ptName\n modelNode = slicer.util.getNode(ptName)\n if modelNode != None:\n self.ptNumber = self.ptNumber+1\n if modelNode.GetAttribute(\"ValidationNeedle\") == \"1\":\n modelNode.GetFiducialCoordinates(coord)\n X = coord[0]\n Y = coord[1]\n Z = coord[2]\n \n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\n if sRed ==None :\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\n\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\n if sYellow ==None :\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\n \n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\n if sGreen ==None :\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\") \n\n mYellow= sYellow.GetSliceToRAS()\n mYellow.SetElement(0,3,X)\n sYellow.Modified()\n sYellow.UpdateMatrices()\n\n mGreen= sGreen.GetSliceToRAS()\n mGreen.SetElement(1,3,Y)\n sGreen.Modified()\n sGreen.UpdateMatrices()\n\n mRed= sRed.GetSliceToRAS()\n mRed.SetElement(2,3,Z)\n sRed.Modified()\n sRed.UpdateMatrices()\n elif self.ptNumber!=0:\n self.ptNumber=0\n self.scrollPoint()", "def select(self):\n current = self.root\n actions = []\n while not current.is_leaf:\n # print(\"Selection... considering between:\")\n # print([\"{} ({:.2f})\".format(n, n.ucb) for n in current.visited_children])\n # print(\"Node's stats say that best child is {} ({})\".format(current.best_child, current.best_child.ucb))\n current = current.best_child\n actions.append(current.previous_action)\n # print(\"Selected\", current)\n # print()\n\n for a in actions:\n self.sim.play_action(a)\n # print(\"--------------------------------------\")\n return current", "def cg_goto(self, cmd):\n label = self.makeLabel(cmd)\n self.asm(unindent(f\"\"\"\n @{label}\n 0;JMP\n \"\"\"))", "def combinedcontrol(self):\n print('conbinedcontrol\\r')\n persondistance = math.sqrt((self.currentx-self.personx)**2 + (self.currenty-self.persony)**2)\n if persondistance <= .5:\n self.goto_point(self.clearx,self.cleary)\n print 'avoiding\\r'\n else:\n self.goto_point(self.personx,self.persony)\n print 'following\\r'\n self.sendMessage()", "def move_to_position1(self):", "def previousRange(self):\r\n if (self.selectedmap > 0):\r\n self.pickMap(self.selectedmap-1)", "def step(self, move):", "def foward_shimmey(self):\n for x in range(6):\n self.right(primary=60, counter=30)\n time.sleep(.5)\n self.left(primary=70, counter=30)\n time.sleep(.5)\n self.back()\n time.sleep(2) \n self.stop()", "def moveSelectedSegs(self,dragPosy,source):\n # TODO: check: I think the dict is always in descending order down screen?\n self.segsChanged = True\n # The first line seemed neater, but the verticalSpacing() doesn't update when you rescale the window\n #movetoID = dragPosy//(self.picbuttons[0].size().height()+self.flowLayout.layout.verticalSpacing())\n movetoID = dragPosy//(self.flowLayout.layout.geometry().height()//self.nclasses)\n\n # drags which start and end in the same cluster most likely were just long clicks:\n for ix in range(len(self.picbuttons)):\n if self.picbuttons[ix] == source:\n if self.segments[ix][-1] == movetoID:\n source.clicked.emit()\n return\n\n # Even if the button that was dragged isn't highlighted, make it so\n source.mark = 'yellow'\n\n for ix in range(len(self.picbuttons)):\n if self.picbuttons[ix].mark == 'yellow':\n self.segments[ix][-1] = movetoID\n self.picbuttons[ix].mark = 'green'\n\n # update self.clusters, delete clusters with no members\n todelete = []\n for ID, label in self.clusters.items():\n empty = True\n for seg in self.segments:\n if seg[-1] == ID:\n empty = False\n break\n if empty:\n todelete.append(ID)\n\n self.clearButtons()\n\n # Generate new class labels\n if len(todelete) > 0:\n keys = [i for i in range(self.nclasses) if i not in todelete] # the old keys those didn't delete\n # print('old keys left: ', keys)\n\n nclasses = self.nclasses - len(todelete)\n max_label = nclasses - 1\n labels = []\n c = self.nclasses - 1\n while c > -1:\n if c in keys:\n labels.append((c, max_label))\n max_label -= 1\n c -= 1\n\n # print('[old, new] labels')\n labels = dict(labels)\n print(labels)\n\n # update clusters dictionary {ID: cluster_name}\n clusters = {}\n for i in keys:\n clusters.update({labels[i]: self.clusters[i]})\n\n print('before move: ', self.clusters)\n self.clusters = clusters\n print('after move: ', self.clusters)\n\n # update the segments\n for seg in self.segments:\n seg[-1] = labels[seg[-1]]\n\n self.nclasses = nclasses\n\n # redraw the buttons\n self.updateButtons()\n self.updateClusterNames()\n self.completeChanged.emit()", "def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def train_loop_pre(self, current_step):\r\n pass", "def set_selected_point(self, i):\n\n if i < len(self.poses):\n self.selected_point = min(len(self.poses), max(0, i))\n self.calibration_changed()", "def backToMiddlePos():\n\tprogMode(True) # Active le couple de servos\n\taxDriver.goToPosition(axDriver.BROADCASTID, 0x1FF) # Renvoie a la position 0x1FF", "def choose_point_command(a):\n global canvas, best_line, list_best_label_distance, label_text_result\n if choose_point[0] != a and choose_point[1] != a: # if a was not be choose\n if choose_point[0] == -1 and choose_point[1] == -1:\n choose_point[0] = a\n list_point[a].configure(bg=point_color_choose, fg=\"white\") # Change color of point\n elif choose_point[0] != -1 and choose_point[1] == -1:\n choose_point[1] = a\n list_point[a].configure(bg=point_color_choose, fg=\"white\")\n best_line = dijkstra(data, amount_point_var, choose_point[0], choose_point[1]) # Find best line\n if best_line is not None:\n draw_bestline(best_line[\"path\"], canvas, list_position) # Draw best line with difference color\n\n # Draw best distance with difference color\n list_best_label_distance = draw_best_distance(best_line[\"path\"], data, canvas, list_position, 0.1)\n # Draw result\n text = draw_result(canvas, best_line, data)\n label_text_result = Label(canvas, text=text, height=4, wraplength=150, bg='lawn green')\n label_text_result.pack(pady=100, padx=10, anchor=NW)\n\n else:\n messagebox.showwarning(\"Warning\", \"Not exist path from point{} to point{}\"\n .format(choose_point[0]+1, choose_point[1]+1))\n elif choose_point[0] != -1 and choose_point[1] != -1:\n list_point[choose_point[0]].configure(bg=point_color, fg=\"black\")\n list_point[choose_point[1]].configure(bg=point_color, fg=\"black\")\n choose_point[0] = a\n choose_point[1] = -1 # Uncheck\n list_point[a].configure(bg=point_color_choose, fg=\"white\")\n canvas.delete(\"best_line_tag\")\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n label_text_result.destroy()\n elif choose_point[0] == a:\n if choose_point[1] == -1:\n choose_point[0] = -1 # Uncheck\n list_point[a].configure(bg=point_color, fg=\"black\")\n else:\n choose_point[a] = -1 # Uncheck\n list_point[a].configure(bg=point_color, fg=\"black\")\n canvas.delete(\"best_line_tag\") # delete best line to refresh\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n label_text_result.destroy()\n elif choose_point[1] == a:\n list_point[a].configure(bg=point_color, fg=\"black\")\n choose_point[1] = -1\n canvas.delete(\"best_line_tag\") # delete best line to refresh\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n label_text_result.destroy()", "def _advance_to_next_stage(self, config_ids, losses):\n rank = nondominated_sort(losses)\n indices = np.array(range(len(losses)))\n keep_indices = np.array([], dtype=int)\n\n # nondominance rank-based selection\n i = 0\n while len(keep_indices) + sum(rank == i) <= self.num_configs[self.stage]:\n keep_indices = np.append(keep_indices, indices[rank == i])\n i += 1\n keep_indices = np.append(keep_indices, indices[rank == i])\n\n # hypervolume contribution-based selection\n #ys_r = losses[rank == i]\n #indices_r = indices[rank == i]\n #worst_point = np.max(losses, axis=0)\n #reference_point = np.maximum(\n # np.maximum(\n # 1.1 * worst_point, # case: value > 0\n # 0.9 * worst_point # case: value < 0\n # ),\n # np.full(len(worst_point), eps) # case: value = 0\n #)\n\n #S = []\n #contributions = []\n #for j in range(len(ys_r)):\n # contributions.append(hypervolume([ys_r[j]]).compute(reference_point))\n #while len(keep_indices) + 1 <= self.num_configs[self.stage]:\n # hv_S = 0\n # if len(S) > 0:\n # hv_S = hypervolume(S).compute(reference_point)\n # index = np.argmax(contributions)\n # contributions[index] = -1e9 # mark as already selected\n # for j in range(len(contributions)):\n # if j == index:\n # continue\n # p_q = np.max([ys_r[index], ys_r[j]], axis=0)\n # contributions[j] = contributions[j] - (hypervolume(S + [p_q]).compute(reference_point) - hv_S)\n # S = S + [ys_r[index]]\n # keep_indices = np.append(keep_indices, indices_r[index])\n\n return_stat = np.zeros((len(losses))).astype(bool)\n return_stat[keep_indices] = True\n return return_stat\n\n # ranks = np.argsort(np.argsort(losses))\n # return (ranks < self.num_configs[self.stage])", "def go_to_start(self):\n self.go_to(0)", "def moveToNext(self):\n\t\tif self.G.debug:\n\t\t\ttic=time.clock()\n\t\tself.debugPrint('looks for new spot')\n\t\texceeds=self.m.exceedsAngleLim\t#function\n\t\tinside=self.m.isWithinPlantingBorders\t#function\n\t\tcart=self.m.getCartesian\n\t\tauto=self.m.automatic\n\t\tt=self.m.times\n\t\tcommands=[]\n\t\tif self.autoMoved:\n\t\t\topt=self.pos\n\t\t\tself.autoMoved=False #if this search is unsuccessfull, automove is enabled to next ideal pos.\n\t\telse:\n\t\t\topt=self.getNextOptimal()\n\t\tmoveTo=opt #for so long..\n\t\trTemp=0.1\n\t\tthTemp=0\n\t\tb=0.05 #constant for the spiral\n\t\ta=0.1\n\t\tplant=True #we will plant in this step...\n\t\td2=self.m.plantMinDist**2 #dist^2\n\t\tpossible = False #for so long\n\t\twhile not possible:\n\t\t\ttic=time.clock()\n\t\t\tpossible=True\n\t\t\tobstList=self.G.terrain.GetVisibleObstacles(moveTo, R=self.radius)\n\t\t\ttreeList=self.G.terrain.GetTrees(moveTo, R=self.radius+self.m.plantMinDist)\n\t\t\tobstList+=[tr for tr in treeList if not tr in obstList] #this procedure minimizes R in Getobst\n\t\t\t#[p1, p2]=self.getPHCoord(moveTo)\n\t\t\tphPos=self.getPHCoord(moveTo)\n\t\t\tplantSpots=self.getPlantingCoord(moveTo)\n\t\t\t#[f1,f2]=self.getPlantingCoord(moveTo)\n\t\t\tif self.otherDevice is not None:\n\t\t\t\totherDevPlantCor=self.otherDevice.getPlantingCoord(self.otherDevice.pos)\n\t\t\t\t#check for colissions and similar related to other device\n\t\t\t\tif collide(self, self.otherDevice, o1pos=moveTo): \n\t\t\t\t\tpossible=False\n\t\t\t\telse:\n\t\t\t\t\tfor o in otherDevPlantCor:\n\t\t\t\t\t\tfor f in plantSpots:\n\t\t\t\t\t\t\t#if getDistanceSq(f1, o)<d2 or getDistanceSq(f2, o)<d2:\n\t\t\t\t\t\t\tif getDistanceSq(f,o)<d2:#plantingspot of device is closer than allowed to other Device's plantingspot\n\t\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t\tbreak\t\t\n\t\t\tif possible:\t#either 1a or angle OK and above check OK\n\t\t\t\tfor obst in obstList:\n\t\t\t\t\t#tic=time.clock()\n\t\t\t\t\tif isinstance(obst, Tree):\n\t\t\t\t\t\t#other demands, more than 1.5 m from plantingspot.\n\t\t\t \t\t\tfor f in plantSpots:\n\t\t\t\t\t\t\t#if getDistanceSq(f1, o)<d2 or getDistanceSq(f2, o)<d2:\n\t\t\t\t\t\t\tif getDistanceSq(f, obst.pos)<d2 or collide(self, obst, o1pos=moveTo):\n\t\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\telif isinstance(obst, Hole): #hole can be in beetween plantheads... Plantpos can be in hole.\n\t\t\t\t\t\tif len(self.plantHeads)==1: #bracke\n\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif collide(self.plantHeads[0], obst, o1pos=phPos[0]) or collide(self.plantHeads[1], obst, o1pos=phPos[1]):\n\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t#PlantingDevice.timesProf[0]+=time.clock()-tic\t\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telif collide(self, obst, o1pos=moveTo):\n\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t#PlantingDevice.timesProf[0]+=time.clock()-tic\t\n\t\t\t\t\t\tbreak\n\t\t\t\tif possible and self.otherDevice is not None and exceeds(self, moveTo, self.otherDevice):\n\t\t\t\t\tpossible=False\t#angle is too big to the other device\n\t\t\t#at this point, all test for \"possibility\" are performed.\n\t\t\tPlantingDevice.timesProf[0]+=time.clock()-tic\n\t\t\tdthini=pi/50.\n\t\t\tif not possible:\n\t\t\t\t#move in a spiral outwards\n\t\t\t\trTemp=a+b*thTemp\n\t\t\t\tdth=(pi/25.)/(rTemp/2.)\n\t\t\t\tthTemp+=dth\n\t\t\t\tthInit=thTemp #used to avoid infinite loop\n\t\t\t\tmoveTo=cart([rTemp,thTemp],opt)\n\t\t\t\twhile not inside(moveTo) or (self.otherDevice is not None and exceeds(self, moveTo, self.otherDevice)):\n\t\t\t\t\t#outside borders or too big angle.. make above expression shorter..\n\t\t\t\t\t#self.pSpots.append(self.m.getCartesian([rTemp,thTemp], opt))\n\t\t\t\t\trTemp=a+b*thTemp\n\t\t\t\t\tthTemp+=(pi/25.)/(rTemp/2.)\t\t\t\t\t\n\t\t\t\t\t#if abs(thTemp-thInit)>2*pi: #if radius is too big..\n\t\t\t\t\tif abs(thInit-thTemp)>2*pi:\n\t\t\t\t\t\tplant=False #we will not plant this time.\n\t\t\t\t\t\t#move to make it easier for the other head:\n\t\t\t\t\t\tif self.otherDevice is not None and self.lastPos==self.pos and self.struckLastTime:\t\t\t\t\t\t\n\t\t\t\t\t\t\tthIni=self.posCyl[1]-dthini\n\t\t\t\t\t\t\tthTemp=thIni\n\t\t\t\t\t\t\t\"\"\"if exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\tnp=cart([self.posCyl[0],thTemp])\"\"\" #old stuff... should be removed, right?\n\t\t\t\t\t\t\twhile inside(cart([self.posCyl[0],thTemp])) and not exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\tthTemp-=dthini #moves in order to make more space\n\t\t\t\t\t\t\tif thTemp==thIni: #it wasnt inside or exceeded\n\t\t\t\t\t\t\t\tcommands.extend(self.releaseDriver()) #releases driver, if he is used\n\t\t\t\t\t\t\t\tif exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\t\t#we are struck! Wait for other device to move.\n\t\t\t\t\t\t\t\t\tself.m.stopControl() #we could have reached the end here.\n\t\t\t\t\t\t\t\t\tcommands.append((waitevent, self, self.otherDevice.moveEvent))\n\t\t\t\t\t\t\t\telse: #not inside, we have reached the end of the half circle\n\t\t\t\t\t\t\t\t\tself.debugPrint(\"end of pattern reached, passivates %s device\"%self.mountPoint)\n\t\t\t\t\t\t\t\t\tself.noMoreSpots=True\n\t\t\t\t\t\t\t\t\tself.m.stopControl() #we could have reached the end here.\n\t\t\t\t\t\t\t\t\tcommands.append((passivate, self))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmoveTo=cart([self.posCyl[0],thTemp+dthini])\n\t\t\t\t\t\t\t\ttraveltime=self.setPos(moveTo)\n\t\t\t\t\t\t\t\tself.debugPrint('clears for other head')\n\t\t\t\t\t\t\t\tcommands=self.cmnd(commands, traveltime,auto=auto['clearForOtherHead'])\n\t\t\t\t\t\tif plant:\n\t\t\t\t\t\t\tcommands=self.cmnd(commands, t['searchTime'],auto=auto['micrositeSelection'])\n\t\t\t\t\t\t\tself.m.timeConsumption['searchTime']+=t['searchTime']\n\t\t\t\t\t\treturn (commands,plant)\n\t\t\t\t\tmoveTo=cart([rTemp,thTemp],opt)\n\t\ttravelTime=self.setPos(moveTo)\n\t\tself.debugPrint('traveltime: %f'%travelTime)\n\t\tif plant: #this timeconsumption is only for succesfull...\n\t\t\tcommands=self.cmnd(commands, t['searchTime'],auto=auto['micrositeSelection'])\n\t\t\tself.m.timeConsumption['searchTime']+=t['searchTime']\t\t\n\t\tcommands=self.cmnd(commands, travelTime,auto=auto['moveToMicro'])\n\t\treturn (commands,plant)", "def goto(x, y):\n turtleTmp.setposition(x, y)", "def puzzle1(offsets):\n return find_jumps_to_exit(offsets, lambda o: o + 1)", "def drawJumper(self):\n for x in self.jumper:\n print(x)", "def go_home(self):\n self.set_all_positions([0]*self.nleaflets)", "def target_nearest_enemy():\n keyboard.send('ctrl+tab')", "def go_to(self, value=None):\n self.go_to_this_line = self.line_number.get()\n self.my_text.mark_set(INSERT, str(float(self.go_to_this_line)))\n self.current_area()\n self.my_text.see(INSERT)\n self.searcher.destroy()", "def next_target(self):\n raise NextTargetEvent", "def next( self ):\n next(self)" ]
[ "0.70530874", "0.666981", "0.59389496", "0.58566797", "0.5765864", "0.5735018", "0.56252164", "0.5610634", "0.557462", "0.55167204", "0.54348326", "0.541708", "0.53921545", "0.5388768", "0.536333", "0.5288356", "0.5214645", "0.5199193", "0.5193192", "0.5163095", "0.51492554", "0.51274824", "0.51029634", "0.5092246", "0.50885594", "0.5078407", "0.507124", "0.50507337", "0.5046228", "0.5027033", "0.501879", "0.5007157", "0.5005678", "0.50031227", "0.50011986", "0.4961054", "0.4954235", "0.49517912", "0.49393183", "0.49359563", "0.49318168", "0.49297813", "0.49286917", "0.49218696", "0.49189383", "0.49082947", "0.49077624", "0.49075955", "0.49015298", "0.49005112", "0.48930877", "0.4879356", "0.48519284", "0.48513716", "0.48474157", "0.4839462", "0.48332906", "0.48332635", "0.48289624", "0.48286057", "0.4826403", "0.48251516", "0.48224947", "0.48209506", "0.48143902", "0.4812618", "0.48119602", "0.48097062", "0.48059702", "0.47999462", "0.47986227", "0.47937226", "0.4791732", "0.47891027", "0.47866687", "0.4778524", "0.4777264", "0.4768573", "0.47680554", "0.4762362", "0.47596642", "0.47592235", "0.47480044", "0.47448647", "0.47372815", "0.47368857", "0.4735273", "0.4735188", "0.47306496", "0.47271577", "0.47266182", "0.4715354", "0.47130492", "0.4706409", "0.47043827", "0.47018844", "0.46979183", "0.46973434", "0.46952775", "0.4692809" ]
0.6327784
2
Jump to the next spike from the first selected cluster.
def go_to_next_spike(self, ): self._jump_to_spike(+1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go_to_previous_spike(self, ):\n self._jump_to_spike(-1)", "def _jump_to_spike(self, delta=+1):\n spike_times = self.get_spike_times()\n if spike_times is not None and len(spike_times):\n ind = np.searchsorted(spike_times, self.time)\n n = len(spike_times)\n self.go_to(spike_times[(ind + delta) % n])", "def next(self):\n self.jumpahead(1)", "def goto_node(self):\n p = self.get_position()\n if p and p != self.c.p:\n self.c.selectPosition(p)", "def goto(self, index):\n raise NotImplementedError", "def next(self):\n while not self.is_stable():\n self.step()", "def jump(self):\n global jumpSize\n print \"jumping...\"\n # create a range that includes all the available feature indices\n featureIndices = range(0, len(self.features))\n # remove indices until there are only jumpSize left\n while len(featureIndices) > jumpSize:\n # choose a random index\n index = random.randint(0, len(featureIndices)-1)\n # remove that item from the list of indices\n del featureIndices[index]\n for featureIndex in featureIndices:\n # get a pointer to that feature\n feature = self.features[featureIndex]\n # pick a random number based on the size of the feature's domain\n domainIncrement = random.randint(0, len(feature.domain) - 1)\n # get the index within the domain of the current feature value\n domainIndex = feature.domain.index(feature.value)\n # go to a different value in the domain\n newDomainIndex = (domainIndex + domainIncrement) % len(feature.domain)\n # assign the value from the domain\n feature.value = feature.domain[newDomainIndex]", "def _next(self, _):\n self.notebook.SetSelection(self.idx+1)", "def choose_next_player(self):\n player_index = self.players.index(self.current_player)\n if self.direction_clock_wise:\n if player_index >= len(self.players) - 1:\n self.current_player = self.players[0]\n else:\n self.current_player = self.players[player_index + 1]\n else:\n if player_index <= 0:\n self.current_player = self.players[len(self.players) - 1]\n else:\n self.current_player = self.players[player_index - 1]", "def go_to_next_state(self):\n pass", "def jumpp(self):\r\n\r\n if not self.current_jump is None:\r\n self.current_jump = self.current_jump.next", "def go_to_start(self):\n self.go_to(0)", "def select_next_cup(self):\n idx = self.current_cup_idx()\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n self.current_cup = self.cups[idx]", "def next(self):\n next_index = self.current_target_index + 1\n self.current_target_index = next_index % self.targets_amount\n updated_pos = self.positions[self.current_target_index]\n self.current_target = updated_pos\n return updated_pos", "def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass", "def getNextTarget(self):\r\n\r\n\t\tif self.pathToGoal == []:\r\n#\t\t\tprint \"\\tPath empty, finding a new one.\"\r\n\t\t\tself.decideOnGoal()\r\n\t\t\tself.calculateNewPath()\r\n\t\r\n\t\tself.currentTarget = self.pathToGoal.pop(0)", "def next(self):\n old_candidate = self._candidate\n new_candidate = self._genome_factory.build([old_candidate])\n new_candidate.run()\n if new_candidate.fitness > old_candidate.fitness:\n self._candidate = new_candidate\n\n self._converged = self._convergence_criterion.converged(old_candidate, new_candidate)", "def goto_next_level(self, *args):\n self.manager.current = self.manager.next()\n self.reset()", "def next_point(self, start_pos, goal_pos):\r\n\t\tself.shift = 0\r\n\t\tself.start_pos = start_pos\r\n\t\tself.goal_pos = goal_pos", "def goto(self, speed=1):\n\n self.safe_goto(speed, 0)", "def jump(distance):\r\n t.penup()\r\n t.forward(200)\r\n t.pendown()\r\n return None", "def next( self ):\n next(self)", "def FindNext():\r\n return _hiew.HiewGate_FindNext()", "def refresh(self):\n self.goto(self.starting_position)", "def test_restart(self):\n\n selector = PCovCUR(n_to_select=1)\n selector.fit(self.X, self.y)\n\n for i in range(len(self.idx) - 2):\n selector.n_to_select += 1\n selector.fit(self.X, warm_start=True)\n self.assertEqual(selector.selected_idx_[i], self.idx[i])", "def next_step(self):\n self.proceed()\n self.execute_current()", "def next_step(self):\n logging.debug(u\"Moving to next step\")\n\n if not self.steps or len(self.steps) < 1:\n logging.debug(u\"- no steps have ben set\")\n return None\n\n index = self.get('_index')\n\n if index is None:\n index = 0\n elif index < len(self.steps)-1:\n index += 1\n else:\n logging.debug(u\"- all steps have ben consumed\")\n return None\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- triggering step #{}\".format(index+1))\n self.set('_index', index)\n step = self.steps[index]\n step.trigger(bot=self.bot)\n return step", "def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_station = la1[0].name\n # if adding the connection exceeds the tracks max time length \n if self.track.add_station(self.grid, next_station) is False:\n break\n\n lookahead_2 = self.grid.get_station(la1[0].name).connections\n\n # keeps adding stations untill the time limit is reached\n for la2 in lookahead_2:\n la2 = stations.get(la2)\n if self.track.add_station(self.grid, la2.name) is False:\n break\n \n quality = self.grid.get_quality()\n \n self.track.remove_last_station()\n\n # if quality improves, add first station to the track\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [la2.name, la1[0].name]\n \n self.track.remove_last_station()", "def get_next_position(self):", "def cmd_k(self):\n node = self.start\n while node is not None:\n if node == self.cursor:\n if node.prev is not None:\n self.cursor = node.prev\n break\n node = node.next\n self.get_text()", "def goto_start(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.LEFT))", "def move_next_point(self, seconds, nearest_zombie_pos=None):\n direction_vector = self.get_next_moving_direction(nearest_zombie_pos)\n\n magnitude = self.speed * seconds\n if direction_vector[0] == 0:\n next_pos = (self.pos[0], \\\n self.pos[1] + magnitude)\n else:\n theta = math.atan(direction_vector[1]/direction_vector[0])\n next_pos = (self.pos[0] + magnitude*math.cos(theta), \\\n self.pos[1] + magnitude*math.sin(theta))\n\n #Check if that position is allowed\n next_pos = self.check_new_pos(next_pos)\n self.pos = next_pos", "def nextRange(self):\r\n if (self.selectedmap < len(self.maplevels)-1):\r\n self.pickMap(self.selectedmap+1)", "def next_target(self):\n raise NextTargetEvent", "def focus_next_cell(self, next):\n x, y = self._cell_input.cursor_coordinates()\n y_new = 0\n next._cell_input.set_cursor_coordinates(x, y_new)\n next.set_focus()\n self.lost_focus(force=True)", "def index(self) -> None:\n self._nearest_point = kd.Tree(self._points).nearest_point", "def select_next_target(self) -> DriverTarget:\n self.log.log(WORDY, \"Selecting next target.\")\n\n self.targetid += 1\n\n target = DriverTarget(\n observing_block=next(iter(self.observing_blocks.values())),\n targetid=self.targetid,\n )\n\n target.num_exp = 2\n target.exp_times = [15.0, 15.0]\n target.num_props = 1\n target.propid_list = [0]\n\n return target", "def moveToNext(self):\n\t\tif self.G.debug:\n\t\t\ttic=time.clock()\n\t\tself.debugPrint('looks for new spot')\n\t\texceeds=self.m.exceedsAngleLim\t#function\n\t\tinside=self.m.isWithinPlantingBorders\t#function\n\t\tcart=self.m.getCartesian\n\t\tauto=self.m.automatic\n\t\tt=self.m.times\n\t\tcommands=[]\n\t\tif self.autoMoved:\n\t\t\topt=self.pos\n\t\t\tself.autoMoved=False #if this search is unsuccessfull, automove is enabled to next ideal pos.\n\t\telse:\n\t\t\topt=self.getNextOptimal()\n\t\tmoveTo=opt #for so long..\n\t\trTemp=0.1\n\t\tthTemp=0\n\t\tb=0.05 #constant for the spiral\n\t\ta=0.1\n\t\tplant=True #we will plant in this step...\n\t\td2=self.m.plantMinDist**2 #dist^2\n\t\tpossible = False #for so long\n\t\twhile not possible:\n\t\t\ttic=time.clock()\n\t\t\tpossible=True\n\t\t\tobstList=self.G.terrain.GetVisibleObstacles(moveTo, R=self.radius)\n\t\t\ttreeList=self.G.terrain.GetTrees(moveTo, R=self.radius+self.m.plantMinDist)\n\t\t\tobstList+=[tr for tr in treeList if not tr in obstList] #this procedure minimizes R in Getobst\n\t\t\t#[p1, p2]=self.getPHCoord(moveTo)\n\t\t\tphPos=self.getPHCoord(moveTo)\n\t\t\tplantSpots=self.getPlantingCoord(moveTo)\n\t\t\t#[f1,f2]=self.getPlantingCoord(moveTo)\n\t\t\tif self.otherDevice is not None:\n\t\t\t\totherDevPlantCor=self.otherDevice.getPlantingCoord(self.otherDevice.pos)\n\t\t\t\t#check for colissions and similar related to other device\n\t\t\t\tif collide(self, self.otherDevice, o1pos=moveTo): \n\t\t\t\t\tpossible=False\n\t\t\t\telse:\n\t\t\t\t\tfor o in otherDevPlantCor:\n\t\t\t\t\t\tfor f in plantSpots:\n\t\t\t\t\t\t\t#if getDistanceSq(f1, o)<d2 or getDistanceSq(f2, o)<d2:\n\t\t\t\t\t\t\tif getDistanceSq(f,o)<d2:#plantingspot of device is closer than allowed to other Device's plantingspot\n\t\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t\tbreak\t\t\n\t\t\tif possible:\t#either 1a or angle OK and above check OK\n\t\t\t\tfor obst in obstList:\n\t\t\t\t\t#tic=time.clock()\n\t\t\t\t\tif isinstance(obst, Tree):\n\t\t\t\t\t\t#other demands, more than 1.5 m from plantingspot.\n\t\t\t \t\t\tfor f in plantSpots:\n\t\t\t\t\t\t\t#if getDistanceSq(f1, o)<d2 or getDistanceSq(f2, o)<d2:\n\t\t\t\t\t\t\tif getDistanceSq(f, obst.pos)<d2 or collide(self, obst, o1pos=moveTo):\n\t\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\telif isinstance(obst, Hole): #hole can be in beetween plantheads... Plantpos can be in hole.\n\t\t\t\t\t\tif len(self.plantHeads)==1: #bracke\n\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif collide(self.plantHeads[0], obst, o1pos=phPos[0]) or collide(self.plantHeads[1], obst, o1pos=phPos[1]):\n\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t#PlantingDevice.timesProf[0]+=time.clock()-tic\t\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telif collide(self, obst, o1pos=moveTo):\n\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t#PlantingDevice.timesProf[0]+=time.clock()-tic\t\n\t\t\t\t\t\tbreak\n\t\t\t\tif possible and self.otherDevice is not None and exceeds(self, moveTo, self.otherDevice):\n\t\t\t\t\tpossible=False\t#angle is too big to the other device\n\t\t\t#at this point, all test for \"possibility\" are performed.\n\t\t\tPlantingDevice.timesProf[0]+=time.clock()-tic\n\t\t\tdthini=pi/50.\n\t\t\tif not possible:\n\t\t\t\t#move in a spiral outwards\n\t\t\t\trTemp=a+b*thTemp\n\t\t\t\tdth=(pi/25.)/(rTemp/2.)\n\t\t\t\tthTemp+=dth\n\t\t\t\tthInit=thTemp #used to avoid infinite loop\n\t\t\t\tmoveTo=cart([rTemp,thTemp],opt)\n\t\t\t\twhile not inside(moveTo) or (self.otherDevice is not None and exceeds(self, moveTo, self.otherDevice)):\n\t\t\t\t\t#outside borders or too big angle.. make above expression shorter..\n\t\t\t\t\t#self.pSpots.append(self.m.getCartesian([rTemp,thTemp], opt))\n\t\t\t\t\trTemp=a+b*thTemp\n\t\t\t\t\tthTemp+=(pi/25.)/(rTemp/2.)\t\t\t\t\t\n\t\t\t\t\t#if abs(thTemp-thInit)>2*pi: #if radius is too big..\n\t\t\t\t\tif abs(thInit-thTemp)>2*pi:\n\t\t\t\t\t\tplant=False #we will not plant this time.\n\t\t\t\t\t\t#move to make it easier for the other head:\n\t\t\t\t\t\tif self.otherDevice is not None and self.lastPos==self.pos and self.struckLastTime:\t\t\t\t\t\t\n\t\t\t\t\t\t\tthIni=self.posCyl[1]-dthini\n\t\t\t\t\t\t\tthTemp=thIni\n\t\t\t\t\t\t\t\"\"\"if exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\tnp=cart([self.posCyl[0],thTemp])\"\"\" #old stuff... should be removed, right?\n\t\t\t\t\t\t\twhile inside(cart([self.posCyl[0],thTemp])) and not exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\tthTemp-=dthini #moves in order to make more space\n\t\t\t\t\t\t\tif thTemp==thIni: #it wasnt inside or exceeded\n\t\t\t\t\t\t\t\tcommands.extend(self.releaseDriver()) #releases driver, if he is used\n\t\t\t\t\t\t\t\tif exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\t\t#we are struck! Wait for other device to move.\n\t\t\t\t\t\t\t\t\tself.m.stopControl() #we could have reached the end here.\n\t\t\t\t\t\t\t\t\tcommands.append((waitevent, self, self.otherDevice.moveEvent))\n\t\t\t\t\t\t\t\telse: #not inside, we have reached the end of the half circle\n\t\t\t\t\t\t\t\t\tself.debugPrint(\"end of pattern reached, passivates %s device\"%self.mountPoint)\n\t\t\t\t\t\t\t\t\tself.noMoreSpots=True\n\t\t\t\t\t\t\t\t\tself.m.stopControl() #we could have reached the end here.\n\t\t\t\t\t\t\t\t\tcommands.append((passivate, self))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmoveTo=cart([self.posCyl[0],thTemp+dthini])\n\t\t\t\t\t\t\t\ttraveltime=self.setPos(moveTo)\n\t\t\t\t\t\t\t\tself.debugPrint('clears for other head')\n\t\t\t\t\t\t\t\tcommands=self.cmnd(commands, traveltime,auto=auto['clearForOtherHead'])\n\t\t\t\t\t\tif plant:\n\t\t\t\t\t\t\tcommands=self.cmnd(commands, t['searchTime'],auto=auto['micrositeSelection'])\n\t\t\t\t\t\t\tself.m.timeConsumption['searchTime']+=t['searchTime']\n\t\t\t\t\t\treturn (commands,plant)\n\t\t\t\t\tmoveTo=cart([rTemp,thTemp],opt)\n\t\ttravelTime=self.setPos(moveTo)\n\t\tself.debugPrint('traveltime: %f'%travelTime)\n\t\tif plant: #this timeconsumption is only for succesfull...\n\t\t\tcommands=self.cmnd(commands, t['searchTime'],auto=auto['micrositeSelection'])\n\t\t\tself.m.timeConsumption['searchTime']+=t['searchTime']\t\t\n\t\tcommands=self.cmnd(commands, travelTime,auto=auto['moveToMicro'])\n\t\treturn (commands,plant)", "def goto(n):\n n = int('{}'.format(n))\n get_controller().step_to(n)", "def jump(self):\n print(\"Inside ElfRider.jump\")", "def next(self):\n return self.cycle.next()", "def _step_snell(self) -> None:\n self.snell.step()", "def next(self):\n current = self.listbox.curselection()[0]\n if current < self.listbox.size() - 1:\n self.listbox.selection_clear(current)\n self.listbox.activate(current+1)\n self.listbox.select_set(current+1)\n self.play()", "def _goto(self, end):\n self._position = end", "def _go(self, distance):\n ende = self._position + self._orient * distance\n self._goto(ende)", "def jumped_on(self):\r\n pass", "def next(self):\n self.state += 1\n if self.state > 1:\n self.state = 0", "def jumpto(self, item, offset):\n try:\n self.ret = idc.jumpto(offset)\n except:\n self.ret = False\n\n return self.ret", "def on_reset(self):\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- seeking back before first step\")\n self.set('_index', None)", "def select_new_current_cup(self):\n\n # \"The crab selects a new current cup: the cup which is immediately clockwise of the current cup.\"\n\n current_position = self.cups.index(self.current)\n if current_position < len(self.cups) - 1: # Current cup is not on the end of the list.\n self.current = self.cups[current_position + 1]\n else:\n self.current = self.cups[0]", "def move_to_position1(self):", "def poke(self):\n self._messaged.emit((\"poke\",None,0,None))", "def next(self, event):\n self.result = 1", "def findTarget(self, initial_call):\n if self.vision.hasTarget():\n self.next_state(\"driveToTarget\")\n else:\n self.chassis.setOutput(self.SEARCH_SPEED, -self.SEARCH_SPEED)", "def first_move(self):\n self.play_sound(self.first_key)\n self.make_blink()\n self.wait_second_move()", "def next(self):\r\n pass", "def take_leader(self):", "def middleselectitem(self, pos):\n self._linklist.select(pos)", "def lockAtTarget(self, initial_call):\n if initial_call:\n self.chassis.setBrakeMode()\n if not self.isAligned():\n self.next_state(\"driveToTarget\")\n else:\n self.chassis.stop()", "def decide_next_move(self):\n pass", "def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')", "def get_next_if_any(self):\n try:\n ret = self.work[deepcopy(self.i)]\n self.i += 1\n # print \"Trickling item\", self.i\n return ret\n except Exception:\n return None", "def _run_next_state(self):\n if self.state != \"STOP\":\n self.state = self.get_state_info(\"next\")\n self._run_state()", "def next_address():\n\t\tkeylist = vessel_list.keys()\n\t\tcurrentkey = keylist.index(str(node_id))\n\t\treturn vessel_list[keylist[(currentkey+1)%len(keylist)]]", "def first_active(self, k):\n return k - self.p", "def _advance(self):\n self._current += 1", "def getNext(self):", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass", "def set_selected_point(self, i):\n\n if i < len(self.poses):\n self.selected_point = min(len(self.poses), max(0, i))\n self.calibration_changed()", "def _autostepSpoke(self):\n # do we have some spokes to work on ?\n if self._spokesToStepIn:\n # take one of them\n spoke = self._spokesToStepIn.pop()\n\n # increment the number of processed spokes\n self._spokeAutostepIndex += 1\n\n log.debug(\"stepping to spoke %s (%d/%d)\", spoke.__class__.__name__, self._spokeAutostepIndex, len(self._spokes))\n\n # notify the spoke about the upcoming automatic entry and set a callback that will be called\n # once the spoke has been successfully processed\n spoke.automaticEntry = True\n spoke.autostepDoneCallback = lambda x: self._autostepSpoke()\n\n # if this is the last spoke, tell it to return to hub once processed\n if self._spokesToStepIn == []:\n spoke.lastAutostepSpoke = True\n gtk_call_once(self._on_spoke_clicked, None, None, spoke)\n else:\n log.info(\"autostep for hub %s finished\", self.__class__.__name__)\n gtk_call_once(self._doPostAutostep)", "def goto(self, offset):\n self._vim.command('goto {}'.format(offset))", "def scrollPoint(self):\r\n # productive #onButton\r\n profprint()\r\n self.changeValue()\r\n widget = slicer.modules.NeedleFinderWidget\r\n needle = widget.editNeedleTxtBox.value\r\n # print self.ptNumber\r\n # print needle\r\n coord = [0, 0, 0]\r\n ptName = '.' + str(needle) + '-' + str(self.ptNumber)\r\n # print ptName\r\n modelNode = slicer.util.getNode(ptName)\r\n if modelNode != None:\r\n self.ptNumber = self.ptNumber + 1\r\n if modelNode.GetAttribute(\"ValidationNeedle\") == \"1\":\r\n modelNode.GetFiducialCoordinates(coord)\r\n X = coord[0]\r\n Y = coord[1]\r\n Z = coord[2]\r\n\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\r\n if sRed == None :\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\r\n\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\r\n if sYellow == None :\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\r\n\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n\r\n mYellow = sYellow.GetSliceToRAS()\r\n mYellow.SetElement(0, 3, X)\r\n sYellow.Modified()\r\n sYellow.UpdateMatrices()\r\n\r\n mGreen = sGreen.GetSliceToRAS()\r\n mGreen.SetElement(1, 3, Y)\r\n sGreen.Modified()\r\n sGreen.UpdateMatrices()\r\n\r\n mRed = sRed.GetSliceToRAS()\r\n mRed.SetElement(2, 3, Z)\r\n sRed.Modified()\r\n sRed.UpdateMatrices()\r\n elif self.ptNumber != 0:\r\n self.ptNumber = 0\r\n self.scrollPoint()", "def next_task(self):\n self.task_index = self.task_index + 1", "def first_spike_tind(V, startind=0):\n spikes, _ = find_peaks(V, [1, 1000])\n\n if len(spikes) == 0:\n found_spike = False\n else:\n found_spike = True\n\n if found_spike is False:\n raise NoSpikeFoundException\n else:\n return spikes[0]", "def focus_next(self):\n self.focus_item()", "def wait(self):\n time.sleep(self.next())", "def search_next(self):\n # Initiate search if needed\n if self.status == STATUS_IDLE:\n self.last_result = None\n self.agent.start_search()\n self._set_status(STATUS_SEARCH_WAITING)\n # Notify listeners about start of search\n self._notify_listeners_start_operation(listener.OPERATION_SOLVE)\n\n # Check if status is aborted in the mean time (may be caused by listener)\n if self._check_status_aborted():\n return self.last_result\n\n self._check_status(STATUS_SEARCH_WAITING)\n\n # Search next\n stime = time.time()\n self._set_status(STATUS_SEARCH_RUNNING)\n try:\n sres = self.agent.search_next()\n except BaseException as e:\n sys.stdout.flush()\n # Check if aborted in the mean time\n if self._check_status_aborted():\n return self.last_result\n if self.context.log_exceptions:\n traceback.print_exc()\n raise CpoSolverException(\"Exception caught from CP solver: {}\".format(e))\n if self.abort_supported and sres.get_search_status() == SEARCH_STATUS_STOPPED:\n self._set_status(STATUS_IDLE)\n else:\n self._set_status(STATUS_SEARCH_WAITING)\n stime = time.time() - stime\n self.context.solver.log(1, \"Model '\", self.model.get_name(), \"' next solution in \", round(stime, 2), \" sec.\")\n\n # Special case for old solvers where last optimal solution is empty\n if sres.is_solution_optimal and (sres.solution is None or sres.solution.is_empty()) and (self.last_result is not None):\n sres.solution = self.last_result.solution\n\n # Store last solution\n self.last_result = sres\n\n # Notify listeners\n for lstnr in self.listeners:\n lstnr.new_result(self, sres)\n\n # Return solution\n return sres", "def __nextTask(self):\n self.activeWindow().nextTask()", "def next_gene(self):\n pass", "def index_wrap(self, k):\n return (self.first_player + k) % self.num_players", "def next(self) -> int:\n self.index += 1\n return self.nodes_sorted[self.index]", "def next_move(self):\n\n # Calculate all paths to destination from current location and time.\n solution = self.calculate_best_solution((None, None), self.currentTurn, [self.character.path[-1]],\n self.character.spent)\n\n # Add travel weight to spent.\n if solution[1] is not None and solution[1][0] != solution[1][1]:\n self.character.spent += self.pekingMap.get_vertex(solution[1][0]).weight(solution[1][1])\n\n # Return next point in shortest path to location.\n if solution[1] is not None:\n return solution[1][1]\n\n return None", "def goto_first():\n\tglobal c1\n\tglobal a1\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c1.recv(BUF_SIZE) # wait for taken off message\n\tprint a1, ' >> ', msg\n\tif msg != 'Taken Off':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'GOTO'\n\t\tnew_msg['arg1'] = init1\n\t\tc1.send(json.dumps(new_msg))\n\t\tstate += 1", "def seek(self, cutoff):\n while self.step_num < cutoff and self.op_state == Turing_Machine.RUNNING:\n \"\"\"Perform an atomic transition or chain step.\"\"\"\n if self.op_state != Turing_Machine.RUNNING:\n continue\n if self.end_time and time.time() >= self.end_time:\n self.op_state = Turing_Machine.TIME_OUT\n continue\n\n if self.compute_steps:\n self.old_step_num = self.step_num\n # Note: We increment the number of loops early to take care of all the\n # places step() could early-return.\n self.num_loops += 1\n\n # Get current symbol\n cur_symbol = self.tape.get_top_symbol()\n\n # Lookup TM transition rule\n cond, (symbol2write, next_state, next_dir), num_steps = \\\n self.machine.get_transition(cur_symbol, self.state, self.dir)\n\n # Test condition\n self.op_state = cond[0]\n self.op_details = cond[1:]\n\n # Apply transition\n # Chain move\n self.tape.apply_single_move(symbol2write, next_dir)\n self.state = next_state\n self.dir = next_dir\n self.num_macro_moves += 1\n if self.compute_steps:\n self.step_num += num_steps\n self.steps_from_macro += num_steps\n if self.op_state == Turing_Machine.INF_REPEAT:\n self.inf_reason = REPEAT_IN_PLACE\n\n if self.op_state != Turing_Machine.UNDEFINED:\n self.verbose_print()", "def face_nearest_block(self):\n try:\n block = self.swarmie.get_nearest_block_location(\n use_targets_buffer=True\n )\n except tf.Exception:\n # The caller should be about to exit with a normal exit code\n # after this call anyway, so the pickup behavior is launched.\n return\n\n if block is not None:\n angle = self.get_angle_to_face_point(block)\n self.swarmie.turn(angle, ignore=Obstacle.IS_VISION, throw=False)\n\n return", "def next_player(self) -> None:\n self.player = (self.player + 1) % len(self.players)", "async def jump(self, ctx, song_index: int):\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n if not player.is_connected:\n # We can't disconnect, if we're not connected.\n return await ctx.send(embed=self.error_embed(f'Not playing. [{ctx.message.author.mention}]'))\n\n if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):\n # Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot\n # may not disconnect the bot.\n return await ctx.send(embed=self.error_embed(f'Not connected to the same voice channel. [{ctx.message.author.mention}]'))\n\n if song_index > len(player.queue) + 1:\n return await ctx.send(embed=self.error_embed(\"There is no such song in the queue.\"))\n\n for i in range(song_index - 1):\n player.queue.pop(0)\n await player.skip()\n await ctx.message.add_reaction(\"✅\")", "def attempt_to_acquire_leader(self, permanent=False):", "def setBestCluster(cluster):\r\n global bestCluster\r\n bestCluster = cluster", "def next(self):\n # The contents of self.bag have already been randomized so the\n # next item is always the item at our current position.\n next_item = self.bag[self.pos]\n\n # Each time an item is selected we decrement our position\n # until we finally arrive back at the beginning of the list.\n if self.pos == 0:\n # We've given out as many items as are in our list, reset\n # our position to the end of the list for the next time\n # the next() method is called.\n self.pos = len(self.bag) - 1\n # Also, shuffle that bag back up. Nobody likes predictable\n # randomness.\n self._shuffle()\n else:\n # We have not yet reached the beginning of the\n # bag. Decrement our position by one.\n self.pos -= 1\n\n # Give back the item. It wasn't 'randomly selected', so much\n # as it was selected sequentially from a randomized list of\n # stuff.\n return next_item", "def _select_next(self, X_pairwise, gain, idx):\n\n\t\tif self.cupy:\n\t\t\tself.current_values = cupy.sum(X_pairwise, self.current_values)\n\t\telif self.sparse:\n\t\t\tself.current_values = numpy.sum(\n\t\t\t\tX_pairwise.toarray()[0], self.current_values)\n\t\telse:\n\t\t\tself.current_values = numpy.sum(X_pairwise, \n\t\t\t\tself.current_values)\n\n\t\tsuper(SaturatedCoverageSelection, self)._select_next(\n\t\t\tX_pairwise, gain, idx)", "def restart(self):\n self.idx = 0", "def _get_next_waypoint(self, tolerance_step):\n print('\\nGetting new nav plan.')\n\n for i in range(4):\n try:\n self.plan = self.swarmie.get_plan(\n self.goal,\n tolerance=self.tolerance,\n use_home_layer=self.avoid_home\n )\n break # plan received\n except rospy.ServiceException:\n print('ServiceException.')\n if i < 3:\n print('Expanding tolerance.')\n self.tolerance += tolerance_step\n else:\n raise # tried 3 times, we give up\n\n print('Received nav plan.')\n pose = self.plan.plan.poses[0]\n\n return Point(x=pose.pose.position.x, y=pose.pose.position.y)", "def start(self):\n try:\n return self.index[0]\n except:\n pass", "def goto_sun(self, seconds_ahead = 0, blocking = True):\n assert self.is_initialized\n solar_ephemeris = self.devices['solar_ephemeris']\n tracking_mirror_positioner = self.controllers['tracking_mirror_positioner']\n #self.set_windings('on')\n #start tracking time\n t0 = time.time()\n #get current sun location\n jd_now, el_now, az_now = solar_ephemeris.update()\n #predict where sun will be at next control point\n jd_future, el_future, az_future = solar_ephemeris.predict(seconds_ahead, jd_now)\n #send start event\n info = OrderedDict()\n info['timestamp'] = t0\n info['seconds_ahead'] = seconds_ahead\n info['jd_now'] = jd_now\n info['az_now'] = az_now\n info['el_now'] = el_now\n info['jd_future'] = jd_future\n info['az_future'] = az_future\n info['el_future'] = el_future\n \n self._send_event(\"SOLAR_TRACKER_GOTO_SUN_STARTED\", info)\n if blocking:\n tracking_mirror_positioner.goto(az_target = az_future,\n el_target = el_future,\n blocking = blocking,\n )\n t1 = time.time()\n used_t = t1-t0\n #send end event\n info = OrderedDict()\n info['timestamp'] = t1\n info['az_pos'] = self.az_pos\n info['el_pos'] = self.el_pos\n info['used_time'] = used_t\n self._send_event(\"SOLAR_TRACKER_GOTO_SUN_COMPLETED\", info)\n return used_t\n else:\n tracking_mirror_positioner.goto(az_target = az_future,\n el_target = el_future,\n blocking = blocking,\n )", "def next():", "def next():" ]
[ "0.68979293", "0.6640994", "0.6507172", "0.5977406", "0.5866238", "0.5749817", "0.57480246", "0.568603", "0.5647011", "0.5641555", "0.56349427", "0.55913675", "0.5588329", "0.5521469", "0.54015243", "0.5398123", "0.5389734", "0.53794944", "0.5359989", "0.53176546", "0.528841", "0.5274591", "0.522305", "0.5221612", "0.52207553", "0.52070814", "0.5191429", "0.5189402", "0.5177124", "0.51536715", "0.51510906", "0.51424336", "0.5141704", "0.5136775", "0.51204944", "0.51100516", "0.5109621", "0.51007336", "0.50994974", "0.50980204", "0.50863165", "0.5072984", "0.5067189", "0.5066989", "0.50402534", "0.5037923", "0.5034921", "0.50328726", "0.5022649", "0.50101393", "0.5003891", "0.50036573", "0.5003419", "0.5001087", "0.4998873", "0.4985297", "0.49791667", "0.49708578", "0.49706268", "0.4964087", "0.49596235", "0.4956373", "0.49533325", "0.4950926", "0.49496347", "0.4942586", "0.49407995", "0.49308348", "0.49308348", "0.49308348", "0.49308348", "0.49286774", "0.49175453", "0.49160975", "0.4914626", "0.49131426", "0.49095687", "0.49080738", "0.49033448", "0.48996538", "0.4891127", "0.48805544", "0.48765168", "0.4873855", "0.48686507", "0.48670077", "0.48631588", "0.4861567", "0.4860636", "0.4859748", "0.48582363", "0.48551682", "0.4846794", "0.48453832", "0.4842758", "0.483974", "0.4833263", "0.48296407", "0.48248938", "0.48248938" ]
0.7934081
0
Jump to the previous spike from the first selected cluster.
def go_to_previous_spike(self, ): self._jump_to_spike(-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go_to_next_spike(self, ):\n self._jump_to_spike(+1)", "def _jump_to_spike(self, delta=+1):\n spike_times = self.get_spike_times()\n if spike_times is not None and len(spike_times):\n ind = np.searchsorted(spike_times, self.time)\n n = len(spike_times)\n self.go_to(spike_times[(ind + delta) % n])", "def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')", "def _prev(self, _):\n self.notebook.SetSelection(self.idx-1)", "def on_reset(self):\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- seeking back before first step\")\n self.set('_index', None)", "def goto_node(self):\n p = self.get_position()\n if p and p != self.c.p:\n self.c.selectPosition(p)", "def go_to_start(self):\n self.go_to(0)", "def previous(self, event):\n self.result = -1", "def previousRange(self):\r\n if (self.selectedmap > 0):\r\n self.pickMap(self.selectedmap-1)", "def jumpp(self):\r\n\r\n if not self.current_jump is None:\r\n self.current_jump = self.current_jump.next", "def next(self):\n self.jumpahead(1)", "def go_to_next_state(self):\n pass", "def refresh(self):\n self.goto(self.starting_position)", "def goto(self, index):\n raise NotImplementedError", "def jump(self):\n global jumpSize\n print \"jumping...\"\n # create a range that includes all the available feature indices\n featureIndices = range(0, len(self.features))\n # remove indices until there are only jumpSize left\n while len(featureIndices) > jumpSize:\n # choose a random index\n index = random.randint(0, len(featureIndices)-1)\n # remove that item from the list of indices\n del featureIndices[index]\n for featureIndex in featureIndices:\n # get a pointer to that feature\n feature = self.features[featureIndex]\n # pick a random number based on the size of the feature's domain\n domainIncrement = random.randint(0, len(feature.domain) - 1)\n # get the index within the domain of the current feature value\n domainIndex = feature.domain.index(feature.value)\n # go to a different value in the domain\n newDomainIndex = (domainIndex + domainIncrement) % len(feature.domain)\n # assign the value from the domain\n feature.value = feature.domain[newDomainIndex]", "def previous(self):\n self._select_interface(self._rc_previous, self._http_previous)", "def focus_prev(self):\n self.focus_item(forward=False)", "def restart(self):\n self.idx = 0", "def previous(self):\n\n pass", "def jumped_on(self):\r\n pass", "def MoveToPreviousSlide(self, event):\n pass", "def move_to_position1(self):", "def previous_line():\r\n set_point(point().previous_line())", "def select_next_cup(self):\n idx = self.current_cup_idx()\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n self.current_cup = self.cups[idx]", "def backtrack(self):\n last_intersection = self.intersection.pop()\n retrace = Shortest_path().shortestPath(self.graph, self.current, last_intersection)\n print retrace\n print \"Moving back...\"\n self.current = retrace.pop(0)\n if self.current in self.intersection:\n self.intersection.remove(self.current)\n while retrace:\n position = retrace.pop(0)\n self.move_to_position(position)\n if position in self.intersection:\n self.intersection.remove(position)", "def cmd_k(self):\n node = self.start\n while node is not None:\n if node == self.cursor:\n if node.prev is not None:\n self.cursor = node.prev\n break\n node = node.next\n self.get_text()", "def select_new_current_cup(self):\n\n # \"The crab selects a new current cup: the cup which is immediately clockwise of the current cup.\"\n\n current_position = self.cups.index(self.current)\n if current_position < len(self.cups) - 1: # Current cup is not on the end of the list.\n self.current = self.cups[current_position + 1]\n else:\n self.current = self.cups[0]", "def choose_next_player(self):\n player_index = self.players.index(self.current_player)\n if self.direction_clock_wise:\n if player_index >= len(self.players) - 1:\n self.current_player = self.players[0]\n else:\n self.current_player = self.players[player_index + 1]\n else:\n if player_index <= 0:\n self.current_player = self.players[len(self.players) - 1]\n else:\n self.current_player = self.players[player_index - 1]", "def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass", "def go_home(self):\n self.set_all_positions([0]*self.nleaflets)", "def backToMiddlePos():\n\tprogMode(True) # Active le couple de servos\n\taxDriver.goToPosition(axDriver.BROADCASTID, 0x1FF) # Renvoie a la position 0x1FF", "def jump(distance):\r\n t.penup()\r\n t.forward(200)\r\n t.pendown()\r\n return None", "def prev_tour_button(self):\r\n if self.paths_gen is None:\r\n SlTrace.lg(\"paths_gen connection has NOT been setup\")\r\n return\r\n \r\n self.paths_gen.prev_tour()", "def goBack(self):\r\n if self.currLoc > 0:\r\n self.currLoc -= 1\r\n return self.history[self.currLoc]", "def test_restart(self):\n\n selector = PCovCUR(n_to_select=1)\n selector.fit(self.X, self.y)\n\n for i in range(len(self.idx) - 2):\n selector.n_to_select += 1\n selector.fit(self.X, warm_start=True)\n self.assertEqual(selector.selected_idx_[i], self.idx[i])", "def _next(self, _):\n self.notebook.SetSelection(self.idx+1)", "def goto_next_level(self, *args):\n self.manager.current = self.manager.next()\n self.reset()", "def goto_start(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.LEFT))", "def move_previous_position(self, lifting=800, **kwargs):\n\n return self.move_to(\n [self.previous_xloc, self.previous_yloc, self.previous_zloc],\n True,\n lifting,\n **kwargs\n )", "def first_active(self, k):\n return k - self.p", "def jump(self):\n print(\"Inside ElfRider.jump\")", "def lockAtTarget(self, initial_call):\n if initial_call:\n self.chassis.setBrakeMode()\n if not self.isAligned():\n self.next_state(\"driveToTarget\")\n else:\n self.chassis.stop()", "def activate_previous_item(self):\n select_ok = self.select_previous_item()\n if select_ok:\n self.controller.display_item(self.selected_item)\n else:\n self.controller.display_message(\"No more message, you're at the top of the list\")", "def prev(self):\n\t\tif not self.play_random:\n\t\t\t# normal\n\t\t\tif self.direction is \"forward\":\n\t\t\t\tself._current_id -= 1\n\t\t\telse:\n\t\t\t\tself._current_id += 1\n\t\t\tself.limit_id_range()\n\n\t\t# random\n\t\telse:\n\t\t\tif not len(self._id_queue_past):\n\t\t\t\treturn # no more previous queue to go to, so don't do anything\n\t\t\telse:\n\t\t\t\tself._current_id = self._id_queue_past.pop()\n\t\t\t\tself._id_queue.append(self.current_id)\n\n\t\tself._dispatch_update()", "def _goto(self, end):\n self._position = end", "def getNextTarget(self):\r\n\r\n\t\tif self.pathToGoal == []:\r\n#\t\t\tprint \"\\tPath empty, finding a new one.\"\r\n\t\t\tself.decideOnGoal()\r\n\t\t\tself.calculateNewPath()\r\n\t\r\n\t\tself.currentTarget = self.pathToGoal.pop(0)", "def movePrev(self):\n parentNode = self.parentNode\n index = parentNode.idevices.index(self)\n if index > 0:\n temp = parentNode.idevices[index - 1]\n parentNode.idevices[index - 1] = self\n parentNode.idevices[index] = temp", "def jump(self):\n\t\tself._is_falling = True\n\t\tself._dy = -5", "def __previousTask(self):\n self.activeWindow().previousTask()", "def __previousBookmark(self):\n self.activeWindow().previousBookmark()", "def _restart_attack(self):\n self._stop_attack()\n self._competing_chain_tip_antipast = set(self._honest_dag._antipast)\n self._currently_attacked_block_gid = self._honest_dag._coloring_tip_gid\n self._virtual_competing_chain_block_parents = \\\n self._get_competing_chain_tip_parents(self._currently_attacked_block_gid,\n self._competing_chain_tip_antipast,\n self[self._honest_dag._coloring_tip_gid].get_parents())", "def fix_jump(self):\n pass", "def reset_position(self):\n self.goto(STARTING_POSITION)", "def next(self):\n while not self.is_stable():\n self.step()", "def train_loop_pre(self, current_step):\r\n pass", "def skip_to_prev(self):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackSkipToPrev())", "def take_leader(self):", "def jump(self):\n\t\tself.vel = -10\n\t\tself.tick_count = 0\n\t\tself.height = self.y", "def go_left(self):\n self.change_x = -6", "def go_left(self):\n self.change_x = -6", "def skip(self):\n self.click_back_button()", "def previous(self, _event):\n self.set_val(self.val - 1)", "def goto_first():\n\tglobal c1\n\tglobal a1\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c1.recv(BUF_SIZE) # wait for taken off message\n\tprint a1, ' >> ', msg\n\tif msg != 'Taken Off':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'GOTO'\n\t\tnew_msg['arg1'] = init1\n\t\tc1.send(json.dumps(new_msg))\n\t\tstate += 1", "def jump(self, seconds: float) -> None:\n if seconds < 0:\n raise ValueError(\"time can't go backwards\")\n self._virtual_base += seconds", "def select_item_prev(self):\n\n loc_diff = self._get_distance_betweenitems(self.page_current.item_selected, self.page_current.item_selected - 1)\n if loc_diff + self.render_offset_item < self.terminal_height:\n self.page_current.item_selected -= 1\n self.render_offset_item = 0\n else:\n self.render_offset_item -= self.terminal_height\n\n self.render() # TODO Why the render function needs to be called for instant update unknown. Need to look into.", "def jump(self):\r\n if self.grounded == True:\r\n self.vel.y = -13", "def back(self, distance):\n self._go(-distance)", "def start_jumping(self):\n self.remove_action(Action.jump_charge)\n self.update_action(Action.jumping)\n self.image = self.current_sprite_list[-1]", "def move_previous():\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis(\n [True, True, True]\n ) # so all axis can be adressed\n errorcode = self.variables.table.move_to(\n [self.previous_xloc, self.previous_yloc, self.previous_zloc],\n True,\n self.variables.default_values_dict[\"settings\"][\"height_movement\"],\n )\n # if errorcode:\n # self.variables.message_to_main.put(errorcode)\n self.variables.table.set_axis([True, True, False]) # so z axis is off again\n self.variables.table.set_joystick(True)", "def first_move(self):\n self.play_sound(self.first_key)\n self.make_blink()\n self.wait_second_move()", "def keep_first_iteration(self):\n self.keep_first_iteration_flag = True", "def restart(self):\n self._song_idx = 0\n self._song_position = 0\n try:\n self._cur_song = self.songs[self._song_idx]\n except IndexError:\n self._cur_song = None", "def go_to_exit(self, _: int = 0) -> None:\n self.current_option = self.last_item_index\n self.draw()", "def goLeft(self, seconds):\n self.change_x = -5", "def prev(self, prev):\n\n self._prev = prev", "def previous(self):\n resp = yield from self.command('previous')\n return True", "def go_home(self):\n self.move_wl(0)", "def focus_prev_cell(self, prev):\n x, y = self._cell_input.cursor_coordinates()\n y_new = prev._cell_input.rows() - 1\n prev._cell_input.set_cursor_coordinates(x, y_new)\n prev.set_focus()", "def prev(self):\n if self.signbit.dec_value == 0:\n method = 'prev'\n else:\n method = 'next'\n return self._step(method)", "def goToPrevLink():\n if wikiPageStackTrace[-2].getUrl() != \"\":\n oldpage = wikiPageStackTrace[-2]\n print(\"going back to \", oldpage.getUrl())\n titleStackTrace.append(oldpage.getTitle())\n urlStackTrace.append(oldpage.getUrl())\n del wikiPageStackTrace[-1]\n update()\n else:\n update()", "def moveToPreviousFrame(self):\n\t\tall_ts = [s for t in self.stamps_by_stream.values() for s in t]\n\t\tall_ts.sort()\n\t\tfirst_frame = all_ts[0]\n\n\t\tselected_index = bisect.bisect_right(all_ts, self._timeline.current_pos)-1\n\t\tif selected_index <= 0 or all_ts[selected_index-1] < first_frame:\n\t\t\t# There is no data before, or no frame. Do nothing\n\t\t\treturn\n\t\tself._timeline.current_pos = all_ts[selected_index-1]\n\t\tself.objectSelected.emit(\n\t\t self.getFileAtStamp(self._timeline.current_pos)\n\t\t)", "def jump(self):\n self.vy = -9", "def get_previous_step(self):\n return self.get_step_by_index(-2)", "def previous(self):\n current = self.listbox.curselection()[0]\n if current > 0:\n self.listbox.selection_clear(current)\n self.listbox.activate(current-1)\n self.listbox.select_set(current-1)\n selected = self.files[self.listbox.selection_get()]\n pygame.mixer.music.load(selected)\n pygame.mixer.music.play(loops=0)", "def set_previous(self, new_previous):\n self.previous = new_previous", "def go_home(self):\n self.set_jpos(self._home_position, wait=True)", "def previous_board(self):\n pass", "def retarget(self):\n if self.retargetCount < self.retargetGoal:\n self.retargetCount += 1\n else:\n self.retargetCount = 0\n self.setCurrentTarget()\n self.setMode()", "def back(self):\n self.position -= 1", "def home(self):\n self.initial_offset = 0", "def stop(self):\n self.change_x = 0", "def stop(self):\n self.change_x = 0", "def stop(self):\n self.change_x = 0", "def stop(self):\n self.change_x = 0", "def goto(self, speed=1):\n\n self.safe_goto(speed, 0)", "def OldStartingIndex(self) -> int:", "def previous(self):\n return _osgAnimation.SwigPyIterator_previous(self)", "def jumping_on_the_clouds(clouds):\n index_clouds = [index for index, v in enumerate(clouds) if v == 0]\n for cloud in index_clouds:\n if index_clouds.index(cloud) == len(index_clouds) - 1:\n break\n if index_clouds[index_clouds.index(cloud) - 1] == cloud - 1 and \\\n index_clouds[index_clouds.index(cloud) + 1] == cloud + 1:\n index_clouds.remove(cloud)\n\n return len(index_clouds) - 1", "def home(self):\n self.goto(0, 0)", "def set_selected_point(self, i):\n\n if i < len(self.poses):\n self.selected_point = min(len(self.poses), max(0, i))\n self.calibration_changed()" ]
[ "0.714149", "0.63978726", "0.63840806", "0.61748135", "0.60427266", "0.5830913", "0.5813735", "0.5735725", "0.57223296", "0.57025665", "0.57012117", "0.55288374", "0.5477547", "0.547428", "0.5469675", "0.5461348", "0.5453362", "0.5451666", "0.5414452", "0.534706", "0.5315017", "0.53017795", "0.5295359", "0.5289185", "0.52636623", "0.52520895", "0.52414554", "0.523569", "0.5233294", "0.52109677", "0.5199741", "0.5191122", "0.5188316", "0.5185971", "0.51802224", "0.5168637", "0.51639944", "0.51592475", "0.51563686", "0.5155606", "0.5146614", "0.5144298", "0.5128179", "0.5123562", "0.5119698", "0.51176095", "0.5109503", "0.5104563", "0.5103283", "0.5102271", "0.50996804", "0.50967723", "0.509174", "0.50877696", "0.5071178", "0.5058568", "0.5051619", "0.50433284", "0.5028547", "0.5028547", "0.5026018", "0.5023696", "0.50235164", "0.501818", "0.5016355", "0.50085354", "0.50082177", "0.50050306", "0.49937132", "0.4991969", "0.498424", "0.4980969", "0.49805874", "0.49750513", "0.4975006", "0.4973666", "0.49696177", "0.49691772", "0.4967171", "0.49549818", "0.49506822", "0.49473342", "0.4943295", "0.4942898", "0.49428266", "0.4939436", "0.49383694", "0.4932865", "0.49327368", "0.49180567", "0.49142456", "0.49142456", "0.49142456", "0.49142456", "0.49090028", "0.49028635", "0.49019533", "0.48998576", "0.48991728", "0.48981503" ]
0.79967767
0
Toggle between showing all spikes or selected spikes.
def toggle_highlighted_spikes(self, checked): self.show_all_spikes = checked self.set_interval()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ToggleOff(self):\n for s in self.sensors:\n self.gSetpt[s.GetID()].Disable()\n\n self.top_sizer.Layout()\n print(\"Toggle off\")", "def hidden_singles(self):\n self.change = True\n while self.change:\n self.hidden_round()", "def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(0, 0, 0, 50)))\n\n # Disable the selection of edges and hide the marker if there is one\n for edge in self.edge_list:\n edge.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(False)\n\n # Hide markers on points\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(False)", "def toggle(self):", "def toggle(self) -> None:", "def toggle(self) -> None:", "def __toggleAll(self):\n aw = self.activeWindow()\n if aw:\n aw.foldAll()", "def toggle(self) -> None:\n ...", "def toggleSelection(self):\n for item in self.getItemsToModify():\n checked = item.checkState() == Qt.Checked\n item.setCheckState(Qt.Unchecked if checked else Qt.Checked)", "def toggleSelection(self):\n for item in self.getItemsToModify():\n checked = item.checkState() == Qt.Checked\n item.setCheckState(Qt.Unchecked if checked else Qt.Checked)", "def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)", "def stopAll(self, event=None):\n self.paused = False if self.paused else True\n self.gotData = False\n label = \"Resume all Sensors\" if (~self.paused) else \"Pause all Sensors\"\n self.displayPanel1.paused = False if self.displayPanel1.paused else True\n pass", "def hideallstate(self):\n if self.hideallcheck.isChecked() == True:\n self.field.setOwnRobotsVisibility(False, self.index)\n self.field.setPathVisibility(False, self.index)\n self.field.setBallVisibility(False, self.index)\n self.field.setTeammateVisibility(False, self.index)\n #self.field.setPathVisibility(False, self.index)\n self.field.setOpponentVisibility(False, self.index)\n self.field.setUndefVisibility(False, self.index)\n self.ballcheck.setChecked(False)\n self.teammatecheck.setChecked(False)\n self.opponentcheck.setChecked(False)\n self.undefcheck.setChecked(False)\n self.targetcheck.setChecked(False)\n else:\n self.field.setOwnRobotsVisibility(True, self.index)\n self.field.setPathVisibility(True, self.index)\n self.field.setBallVisibility(True, self.index)\n self.field.setTeammateVisibility(True, self.index)\n #self.field.setPathVisibility(True, self.index)\n self.field.setOpponentVisibility(True, self.index)\n self.field.setUndefVisibility(True, self.index)\n self.ballcheck.setChecked(True)\n self.teammatecheck.setChecked(True)\n self.opponentcheck.setChecked(True)\n self.undefcheck.setChecked(True)\n self.targetcheck.setChecked(True)", "def toggle_pivot():\n for piv_switcher in get_one_switcher():\n piv_switcher.toggle()", "def toggle(self):\n self._interrupt_flash()\n GPIO.output(self.pin, GPIO.LOW if self.on else GPIO.HIGH)\n self.on = not self.on", "def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()", "def toggle(self, **kwargs):\n self.on = False if self.on else True", "def set_highlighted_spikes(self, spikes=[]):\n \n if len(spikes) == 0:\n # do update only if there were previously selected spikes\n do_update = len(self.highlighted_spikes) > 0\n self.highlight_mask[:] = 0\n else:\n do_update = True\n self.highlight_mask[:] = 0\n if len(spikes) > 0:\n ind = self.find_indices_from_spikes(spikes)\n self.highlight_mask[ind] = 1\n \n if do_update:\n self.paint_manager.set_data(\n highlight=self.highlight_mask,\n visual='waveforms')\n \n self.highlighted_spikes = spikes", "def toggle_refresh(self, event):\n self._continue = not self._continue\n if self._continue:\n self.canvas.itemconfig(\"toggle-text\", text=\"Stop\")\n self.refresh(self._refresh_rate)\n else:\n self.canvas.itemconfig(\"toggle-text\", text=\"Start\")", "def toggle_valve():\n new_status = not tank_valve_open\n print(\"- Toggling valve status to '{}'.\".format(\"Open\" if new_status\n else \"Closed\"))\n set_valve_open(new_status)", "def toggle_fore_mod(self, checked):\n for tile in self.tiles:\n tile.toggle_fore_mod(checked)", "def on_gas_filled_toggled(self, checked):\n # TODO: not implemented yet\n if checked:\n self.gas_set = 1\n self.VI_gas_set.setEnabled(True)\n else:\n self.gas_set = 0\n self.VI_gas_set.setEnabled(False)", "def change(self):\r\n\r\n # If checkboxes are available, check status and set boat speed reference line visibility accordingly.\r\n if self.cb:\r\n if self.cb_bt.checkState() == QtCore.Qt.Checked:\r\n for item in self.bt:\r\n item.set_visible(True)\r\n else:\r\n for item in self.bt:\r\n item.set_visible(False)\r\n # GGA\r\n if self.cb_gga.checkState() == QtCore.Qt.Checked:\r\n for item in self.gga:\r\n item.set_visible(True)\r\n # self.gga[0].set_visible(True)\r\n elif self.gga is not None:\r\n for item in self.gga:\r\n item.set_visible(False)\r\n # self.gga[0].set_visible(False)\r\n # VTG\r\n if self.cb_vtg.checkState() == QtCore.Qt.Checked:\r\n for item in self.vtg:\r\n item.set_visible(True)\r\n # self.vtg[0].set_visible(True)\r\n elif self.vtg is not None:\r\n for item in self.vtg:\r\n item.set_visible(False)\r\n # self.vtg[0].set_visible(False)\r\n\r\n # Draw canvas\r\n self.canvas.draw()", "def toggleShowOnlySelection(self):\r\n\t\tself.showOnlySelection = not self.showOnlySelection", "def select_toggle(self):\n self.selection_toggle(*self.get_children())", "def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()", "def highlight_spikes(self, spikes):\n spikes = np.intersect1d(self.data_manager.waveform_indices_array, \n spikes)\n if len(spikes) > 0:\n spikes_rel = np.digitize(spikes, \n self.data_manager.waveform_indices_array) - 1\n self.highlighting = True\n self.set_highlighted_spikes(spikes_rel)\n else:\n self.cancel_highlight()", "def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])", "def _button_sweep_toggled(self, *a):\r\n _debug('GUISignalGenerator: _button_sweep_toggled()', a)\r\n \r\n # Only run the sweep if we have enabled the button\r\n if self.button_sweep.is_checked():\r\n \r\n # Run the \"before sweep\" setup function for the user to overwrite \r\n # (default is just a pause)\r\n self.before_sweep()\r\n \r\n # Set list mode\r\n self.combo_mode.set_index(1)\r\n self.api.set_mode('List') #Set the mode to list !!\r\n # Update the RF button\r\n self.button_rf.set_checked(self.api.get_output(), block_events=True)\r\n \r\n \r\n # Get list length from the generator\r\n ps = self.api.get_list_powers()\r\n fs = self.api.get_list_frequencies()\r\n \r\n # Make sure they match!\r\n if not len(ps) == len(fs): \r\n print(\"ERROR: Lengths of power and frequency lists do not match!\")\r\n return\r\n \r\n \r\n # Update the user\r\n self.label_list_status.set_text(str(len(fs)) + ' points in list memory')\r\n \r\n # Loop for the number of iterations\r\n self.number_iteration.set_value(0)\r\n while self.number_iteration.get_value() < self.settings['Sweep/Iterations'] \\\r\n or self.settings['Sweep/Iterations'] <= 0:\r\n \r\n # Break out if canceled\r\n if not self.button_sweep.is_checked(): break\r\n \r\n # Loop\r\n for n in range(self.settings['Sweep/n1'], min(self.settings['Sweep/n2'], len(fs))):\r\n \r\n # Break out if canceled\r\n if not self.button_sweep.is_checked(): break\r\n \r\n # Set the list index, which updates the machine\r\n self.api.set_list_index(n)\r\n #I'm adding these lines to debug the fact that Api doesn't change the frequency of its output. \r\n _debug(self.api.get_list_index(), self.api.get_frequency(), self.api.get_power())\r\n #print(self.api.get_list_frequencies())\r\n \r\n self.number_list_index.set_value(n, block_events=True)\r\n self.number_frequency .set_value(fs[n], block_events=True)\r\n self.number_dbm .set_value(ps[n], block_events=True)\r\n self.window.process_events()\r\n \r\n # This is where you could insert some interesting code.\r\n self.after_sweep_set_list_index()\r\n \r\n # Increase the iteration count\r\n self.number_iteration.increment()\r\n \r\n # Run user code\r\n self.after_single_sweep()\r\n \r\n # Run user code\r\n self.after_all_sweeps()\r\n \r\n # All done with the loop. Disable the sweep button!\r\n # We put this after the user functions so they can tell if\r\n # someone manually quit out of the loop.\r\n self.button_sweep.set_checked(False, block_events=True)", "def toggle_select(self):\r\n if not len(self.items):\r\n return\r\n item = self.items[self.item_sel]\r\n if item in self.selected:\r\n self.selected.remove(item)\r\n else:\r\n self.selected.append(item)\r\n self.do_paint()", "def all_off():\n print(\"Climate is within set parameters; toggling systems off if any are on\")\n GPIO.output(HEATPIN, RELAYOFF)\n GPIO.output(COOLPIN, RELAYOFF)\n GPIO.output(FANPIN, RELAYOFF)\n time.sleep(30)", "def segmentNeedle(self):\r\n # productive #event\r\n profprint()\r\n if self.fiducialButton.isEnabled():\r\n print \"new checked state: \", not self.fiducialButton.checked\r\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)", "def select_sweepstakes(self):\n pass", "def set_highlighted_spikes(self, spikes, do_emit=True):\n if len(spikes) == 0:\n # do update only if there were previously selected spikes\n do_update = len(self.highlighted_spikes) > 0\n self.highlight_mask[:] = 0\n else:\n do_update = True\n # from absolute indices to relative indices\n # only keep spikes that are displayed\n spikes = np.intersect1d(spikes, self.spike_ids)\n self.highlight_mask[:] = 0\n if len(spikes) > 0:\n spikes_rel = np.digitize(spikes, self.spike_ids) - 1\n ind = self.find_indices_from_spikes(spikes_rel)\n self.highlight_mask[ind] = 1\n \n if do_update:\n \n # emit the HighlightSpikes signal\n if do_emit:\n ssignals.emit(self.parent, 'HighlightSpikes', spikes)\n # self.spike_ids[np.array(spikes, dtype=np.int32)])\n \n self.paint_manager.set_data(\n highlight=self.highlight_mask,\n visual='waveforms')\n \n self.highlighted_spikes = spikes", "def toggled(self, *args, **kwargs): # real signature unknown\n pass", "def toggle(self):\n if self._state in [STATE_OFF, STATE_IDLE, STATE_STANDBY]:\n self._state = STATE_ON\n else:\n self._state = STATE_OFF", "def toggle_selected(self):\n\n self._selected = not self._selected", "def toggle_active(self):\n with_one_active = self.filtered(\n lambda product:len(product.woo_template_id.woo_product_ids) == 1)\n for product in with_one_active:\n product.woo_template_id.toggle_active()\n return super(ProductProductEpt, self - with_one_active).toggle_active()", "def turnOn(self):\n self.off = False\n self.turnOnAnimation()", "def _set_spikes(self, listOfSpikes):\n self._spikes = listOfSpikes", "def segmentNeedle(self):\n #productive #event\n profprint()\n if self.fiducialButton.isEnabled():\n print \"new checked state: \",not self.fiducialButton.checked\n self.onStartStopGivingNeedleTipsToggled(not self.fiducialButton.checked)", "def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def toggle_exposure(self):\n\n checked1 = self.exp1_radio.isChecked()\n if checked1:\n self.exp2_radio.setChecked(True)\n else:\n self.exp1_radio.setChecked(True)\n self.select_exposure()", "def onStartStopGivingObturatorNeedleTipsToggled(self, checked):\n #productive\n profprint()\n if checked:\n self.fiducialButton.checked = 0\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\n self.startGivingControlPointsButton.checked = 0\n self.start(self.obturatorNeedleTipClicks)\n self.fiducialObturatorButton.text = \"Stop Giving Obturator Needle Tips\" \n else:\n self.stop()\n self.fiducialObturatorButton.text = \"Start Giving Obturator Needle Tips\"", "def smooth_trace_enabled(self):\n if self.ui.smoothData_checkBox.isChecked():\n self.ui.smoothData_spinBox.setEnabled(True)\n else:\n self.ui.smoothData_spinBox.setEnabled(False)", "def toggle(self, layout, item, feats):\n if self.active.isChecked():\n self.fill_active(layout)\n\n self.default_button = QPushButton('set to defaults', feats)\n layout.addWidget(self.default_button)\n self.default_button.clicked.connect(self.rec_default)\n\n item.setForeground(QColor('black'));\n else:\n self.clear_params(layout, item)", "def fullLatticeCheckChanged(self, val):\n if val == QtCore.Qt.Unchecked:\n self.writeFullLattice = False\n else:\n self.writeFullLattice = True", "def selectAll(self):\n for ID in range(len(self.cboxes)):\n if self.cboxes[ID].isChecked():\n for ix in range(len(self.segments)):\n if self.segments[ix][-1] == ID:\n self.picbuttons[ix].mark = 'yellow'\n self.picbuttons[ix].buttonClicked = True\n self.picbuttons[ix].setChecked(True)\n self.picbuttons[ix].repaint()\n else:\n for ix in range(len(self.segments)):\n if self.segments[ix][-1] == ID:\n self.picbuttons[ix].mark = 'green'\n self.picbuttons[ix].buttonClicked = False\n self.picbuttons[ix].setChecked(False)\n self.picbuttons[ix].repaint()", "def onStartStopGivingValidationControlPointsToggled(self, checked):\n #productive\n profprint()\n if checked:\n self.fiducialObturatorButton.checked = 0\n self.fiducialButton.checked = 0\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\n self.start(self.needleValidationClicks)\n self.startGivingControlPointsButton.text = \"Stop Giving Control Points\" \n else:\n self.stop()\n self.startGivingControlPointsButton.text = \"Start Giving Control Points\"", "def ToggleDrawingTools(self, event):\n pass", "def ToggleVisible(self, event):\n pass", "def ToggleSpinner(event, state, widget):\n if state == True:\n widget.Enable()\n else:\n widget.Disable()\n event.Skip()", "def toggle_call(self) -> None:", "def toggle(self):\r\n self._variable.set(not self._variable.get()) \r\n self._activate()", "def all_off(self):\n\n for b in self.gamebuttons:\n b.but_off()", "def pressS(self):\n self.myParent.mode.modifyIndustry(self.myParent.mySystemDict['id'], self.currentValue, self.myIndustryData.id)\n self.disableButton('S')", "def entryToggle(self):\n status = \"normal\" if self.optionVar.get() == 4 else \"disabled\"\n for i in range(3):\n self.entry[i].configure(state=status)", "def on_pushButton_toggled(self, checked):\n self.isPause = checked", "def set_visible(self, value):\n for artist in self.artists:\n artist.set_visible(value)", "def set_show_stockfish(self, show_stockfish):\n self.show_stockfish = show_stockfish\n logger.debug(\"Stockfish output is now {0}\".format(\n \"enabled\" if self.show_stockfish else \"disabled\"))\n for halfmove, tag in self.tags.items():\n if self.show_stockfish:\n self.update_info(halfmove)\n else:\n tag.set_property(\n \"foreground-gdk\", Gdk.Color(65535, 65535, 65535))", "def onStartStopGivingValidationControlPointsToggled(self, checked):\r\n # productive\r\n profprint()\r\n if checked:\r\n self.fiducialObturatorButton.checked = 0\r\n self.fiducialButton.checked = 0\r\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\r\n self.start(self.needleValidationClicks)\r\n self.startGivingControlPointsButton.text = \"Stop Giving Control Points\"\r\n else:\r\n self.stop()\r\n self.startGivingControlPointsButton.text = \"Start Giving Control Points\"", "def toggle_hidden(self):\n self.show_hidden = not self.show_hidden\n self.reload('.')", "def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()", "def onStartStopGivingObturatorNeedleTipsToggled(self, checked):\r\n # deprecated\r\n profprint()\r\n if checked:\r\n self.fiducialButton.checked = 0\r\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\r\n self.startGivingControlPointsButton.checked = 0\r\n self.start(self.obturatorNeedleTipClicks)\r\n self.fiducialObturatorButton.text = \"Stop Giving Obturator Needle Tips\"\r\n else:\r\n self.stop()\r\n self.fiducialObturatorButton.text = \"Start Giving Obturator Needle Tips\"", "def clicked_checkbox_model_smoothing(self):\n if self.checkbox_model_smoothing.isChecked():\n self._get_selected_model().metadata[\"smoothing_kernel\"] = True\n self.edit_manual_smoothing.setEnabled(False)\n else:\n self._get_selected_model().metadata[\"smoothing_kernel\"] = False\n self.edit_manual_smoothing.setEnabled(True)\n return None", "def toggle(self):\n s = self.status()\n if s == self.POWER_OFF:\n self.on()\n else:\n self.off()\n return self.status()", "def toggle_border_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n poly.setBrush(QColor(0, 0, 0, 0))\n\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(True)\n # Enable selection of the edges of the polygon, if the edge has a marker display it\n for edge in self.edge_list:\n edge.childItems()[0].setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(True)", "def toggle_pause(self):\n self.stdin_queue.put(\"toggle\")", "def on_tog_small_method(self, tog_small_class):\n self.txt_small_method.set_sensitive(tog_small_class.get_active())\n self.txt_small_method.set_text(\"0\")", "def toggle(self):\n self.open = not self.open", "def onStartStopGivingNeedleTipsToggled(self, checked=True):\r\n # productive\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n self.fiducialButton.checked = checked\r\n if checked:\r\n self.startGivingControlPointsButton.checked = 0\r\n self.fiducialObturatorButton.checked = 0\r\n self.start()\r\n self.fiducialButton.text = \"2. Stop Giving Needle Tips [CTRL + ENTER]\"\r\n widget.editUtil.setCurrentEffect(\"NeedleFinder\")\r\n else:\r\n self.stop()\r\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\r\n widget.editUtil.setCurrentEffect(\"DefaultTool\")\r\n widget.resetDetectionButton.setEnabled(1)\r\n tempFidNodes = slicer.mrmlScene.GetNodesByName('.temp')\r\n for i in range(tempFidNodes.GetNumberOfItems()):\r\n node = tempFidNodes.GetItemAsObject(i)\r\n if node:\r\n slicer.mrmlScene.RemoveNode(node)\r\n widget.deleteNeedleButton.setEnabled(1)", "def hide_all(self, immediate=True):\n raise NotImplementedError", "def toggle_scattering(self, setting=1):\n if setting not in [0, 1, \"on\", \"off\"]:\n raise ValueError(\n \"The input for the toggle the us of scattering \"\n 'in the model must \"on\" (1) or \"off\" (0)'\n )\n self.use_scat = 1 if setting == \"on\" else 0 if setting == \"off\" else setting", "def onStartStopGivingNeedleTipsToggled(self, checked = True):\n #productive\n profprint()\n widget = slicer.modules.NeedleFinderWidget\n self.fiducialButton.checked = checked\n if checked:\n self.startGivingControlPointsButton.checked = 0\n self.fiducialObturatorButton.checked = 0\n self.start()\n self.fiducialButton.text = \"2. Stop Giving Needle Tips [CTRL + ENTER]\"\n else:\n self.stop()\n self.fiducialButton.text = \"2. Start Giving Needle Tips [CTRL + ENTER]\"\n widget.resetDetectionButton.setEnabled(1)\n tempFidNodes = slicer.mrmlScene.GetNodesByName('Temp')\n for i in range(tempFidNodes.GetNumberOfItems()):\n node = tempFidNodes.GetItemAsObject(i)\n if node:\n slicer.mrmlScene.RemoveNode(node)\n widget.deleteNeedleButton.setEnabled(1)", "def on_showpointsToolbutton_toggled(self, button):\n\n self._state( 'showpoints', button.get_active() )\n self._refresh_ui()", "def toggle(self):\n self._variable.set(not self._variable.get())\n self._activate()", "def toggle(self) -> None:\n if bool(self.show.get()):\n self.sub_frame.pack(fill=tk.X, expand=True)\n self.toggle_button.configure(text=self.sep[0])\n else:\n self.sub_frame.forget()\n self.toggle_button.configure(text=self.sep[1])", "def _all_labels_false_1(self):\n # get all values of current labels toggles\n all_values = [ww.value for ww in self.labels_box.children]\n # if all of them are False\n if all(item is False for item in all_values):\n for ww in self.labels_box.children:\n # temporarily remove render function\n ww.on_trait_change(self._render_function, 'value', remove=True)\n # set value\n ww.value = True\n # re-add render function\n ww.on_trait_change(self._render_function, 'value')", "def _switch(self):\n self.fill= not self.fill", "def reset_energizer_flag(self): \r\n self.energizer_flag = False", "def plot_spikes(self, show=False, save_path=None, expand = False):\n spikes = np.array(self.spike_history)\n spike_time, e_idx = np.where(spikes)\n spike_time = spike_time.astype('float32')\n spike_time *= self.global_dt\n spike_time_pair = zip(e_idx,spike_time)\n spike_time_pair.sort()\n spike_time_pair = np.array(spike_time_pair)\n spike_time_pair = list(np.split(spike_time_pair, np.where(np.diff(spike_time_pair[:,0]))[0]+1))\n\n if self.enable_spike_dump:\n n = len(self.all_compartments)\n else:\n n = len(self.electrodes)\n\n s = []\n for i in xrange(n):\n s1 = [t[:,1] for t in spike_time_pair if t[0,0] == i]\n s.append(s1)\n\n fig = plt.figure()\n ax = self.raster(s)\n\n if n < 50 or expand:\n ax.set_yticks(np.arange(1, n + 1))\n if self.enable_spike_dump:\n ax.set_yticklabels(tuple(self.all_compartments))\n else:\n ax.set_yticklabels(tuple(self.electrodes))\n else:\n ax.set_yticklabels([])\n\n ax.set_ylabel('Electrode IDX')\n ax.set_xlabel('Time (msec)')\n ax.set_title('CSTMD Electrode Spikes for ' + str(n) + ' compartments')\n\n if not show and expand:\n if n > 40:\n w,h = fig.get_size_inches()\n h *= n / 40\n fig.set_size_inches(w,h)\n\n if save_path is not None:\n #fig.tight_layout()\n plt.savefig(save_path, bbox_inches='tight')\n print \"Saved Cstmd spike train to \" + save_path\n plt.gcf().clear()\n if show:\n plt.show()", "def toggle_pause(self):\n self.m_btn_pause = not self.m_btn_pause", "def toggle_item_starred(self):\n self.get_selected()\n if not self.selected_item:\n return\n was_starred = self.selected_item.starred\n message = 'Starred flag is now ON'\n if was_starred:\n message = 'Starred flag is now OFF'\n self.trigger_item_starred(not was_starred)\n self.controller.display_message(message)", "def none_toggled(self):\n\t\tfor t in self.toggles:\n\t\t\tif t == True: return False\n\t\treturn True", "def toggle_airports(self):\n if self.locations_map.show_airports:\n self.locations_map.show_airports = False\n else:\n if self.locations_map.zoom > 5:\n self.locations_map.show_airports = True\n self.locations_map.start_getting_locations_in_fov()\n else:\n self.btn_toggle_airports.state = 'normal'\n show_message_popup(\"Zoom level must be greater than 5.\")", "def _checkbutton_toggle(self):\n new_value = self.value_checkbutton.var.get()\n if self.master.change_field_value(self.field_name, new_value):\n self.value_checkbutton.config(fg=\"#3F3\" if new_value else \"#F33\", text=\"ON\" if new_value else \"OFF\")\n else:\n self.value_checkbutton.var.set(not new_value)", "def toggle_viz(self):\n\n return self.hide() if self.viz else self.show()", "def autoExposureChk(self, state):\n if state == Qt.Checked and self.kinect.kinectConnected == True:\n self.kinect.toggleExposure(True)\n else:\n self.kinect.toggleExposure(False)", "def pause(self):\n for item in self.canvas[\"items\"]: item['state']=DGG.DISABLED\n self.ignoreAll()", "def on_show_only_selection(self):\n state = self.selection_btn.isChecked()\n MTTSettings.set_value('onlySelectionState', state)\n self.filterSelectionToggled.emit(state)", "def hide_all_line():\n global canvas, button_off_all_line, best_line, button_off_best_line\n if button_off_all_line['text'] == \"Hide all line\": # If current status is show and want to hide\n canvas.delete(\"all_line_tag\") # Delete all lines from canvas\n button_off_all_line.configure(bg=button_off_color, text=\"Show all line\") # Change status of button\n else: # If current status is hide and want to show\n draw_line(canvas, list_position, data) # Draw new all line\n # If best line is not none, draw best line\n if best_line is not None and button_off_best_line['text'] == 'Hide best line':\n draw_bestline(best_line[\"path\"], canvas, list_position)\n button_off_all_line.configure(bg=button_on_color, text=\"Hide all line\")", "def toggle_layer_visibility(self, check_box_array):\n for check_box in check_box_array.checkboxes:\n if self.map_state.layer == check_box.name:\n check_box.toggle()", "def set_is_watering(valve: Valve, value: bool) -> None:\n valve.is_watering = value", "def toggle_active(self):\n res = super().toggle_active()\n Product = self.env['lunch.product'].with_context(active_test=False)\n all_products = Product.search([('supplier_id', 'in', self.ids)])\n all_products._sync_active_from_related()\n return res", "def toggled(self, b):\n self.group.setVisible(b)\n\n for line in (self.rLine, self.gLine, self.bLine):\n line.setVisible(b)\n\n self.parent.image.timeLine.setVisible(not b)", "def toggle_selector(event): \n if event.key in ['Q', 'q'] and span.visible:\n print '**SpanSelector deactivated.**'\n span.visible = False\n if event.key in ['A', 'a'] and not span.visible:\n print '**SpanSelector activated.**'\n span.visible = True", "def toggle_button(self, button):\n if button.get_sensitive():\n button.set_sensitive(False)\n else:\n button.set_sensitive(True)", "def selectAll(self, value):\n for item in self.getItemsToModify():\n item.setCheckState(Qt.Checked if value else Qt.Unchecked)", "def selectAll(self, value):\n for item in self.getItemsToModify():\n item.setCheckState(Qt.Checked if value else Qt.Unchecked)", "def strike_on(self):\n self._set_print_mode(self.STRIKE_MASK)" ]
[ "0.6208926", "0.6050148", "0.5986689", "0.5931181", "0.5903119", "0.5903119", "0.5695317", "0.56854576", "0.56528676", "0.56528676", "0.56507105", "0.5585108", "0.5578206", "0.557582", "0.5550592", "0.55118", "0.5508104", "0.5504786", "0.5502983", "0.55022866", "0.54935396", "0.54821044", "0.54805285", "0.5471335", "0.54580235", "0.5436714", "0.5390373", "0.5367722", "0.535418", "0.53518206", "0.53432053", "0.53397715", "0.53316164", "0.53305703", "0.5328467", "0.53231966", "0.5313973", "0.5295781", "0.5277373", "0.52755773", "0.5249746", "0.5249145", "0.5248158", "0.524754", "0.52248526", "0.52162176", "0.520146", "0.51856697", "0.51537114", "0.51407754", "0.5135376", "0.5118154", "0.5113756", "0.511124", "0.5111119", "0.5105518", "0.50987095", "0.50960284", "0.5093861", "0.50897944", "0.5086341", "0.5075279", "0.50743586", "0.50700885", "0.50639105", "0.5060077", "0.505895", "0.5056184", "0.504093", "0.5032199", "0.5023562", "0.5015774", "0.5009642", "0.49985173", "0.4994197", "0.49910513", "0.4980286", "0.49529025", "0.49504182", "0.49488834", "0.49443448", "0.49296805", "0.4928296", "0.49227414", "0.49222025", "0.49054846", "0.49046272", "0.49016595", "0.49012443", "0.48871908", "0.4887065", "0.48854542", "0.48839405", "0.48834008", "0.48825008", "0.48757377", "0.4875484", "0.48753253", "0.48753253", "0.48714444" ]
0.69948447
0
Increase the interval size.
def widen(self): t, h = self.time, self.half_duration h *= self.scaling_coeff_x self.set_interval((t - h, t + h))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateSize(self):\n self.currentsize = len(self.intervals)\n try:\n assert self.currentsize <= self.size\n except AssertionError:\n print(self.currentsize)\n print(self.size)\n sys.exit('[', sys.arg[0] + ']: Size problem')", "def resize(self, newIntervals):\n self.intervals = []\n for i in newIntervals:\n self.addInterval(i)\n self.size = len(self.intervals)", "def inc_size(self):\r\n self.__length += 1", "def grow(self, delta_width, delta_height):\r\n self.width += delta_width\r\n self.height += delta_height", "def grow(self, delta_width, delta_height):\n self.width += delta_width\n self.height += delta_height", "def grow(self, delta_width, delta_height):\n self.width += delta_width\n self.height += delta_height", "def grow(self, delta_width, delta_height):\n self.width += delta_width\n self.height += delta_height", "def changeSize(self, value):\n self.layer.brush_size = value", "def update_step_size(self):\n self.setSingleStep(10 ** self.step_exponent)\n self.update_format_string()", "def updateSize(self, *args):\n return None", "def set_size(self, new_bunch_size):\n self.bunch_size = new_bunch_size", "def set_size(self, new_size: int):\n self.__tab_size = new_size\n self.__check_interpreter()\n self.__vals = [0 for _ in range(self.__tab_size)]", "def change_size(self, action):\r\n leftPos, topPos, rightPos, bottomPos = self.canvas.coords(self.ball)\r\n if action == \"larger\":\r\n if leftPos > 0 and rightPos < 400 and topPos > 0 and bottomPos < 400:\r\n self.size += 1\r\n self.canvas.coords(self.ball, leftPos - 1, topPos - 1, rightPos + 1, bottomPos + 1)\r\n else:\r\n if self.size > 1:\r\n self.size -= 1\r\n self.canvas.coords(self.ball, leftPos + 1, topPos + 1, rightPos - 1, bottomPos - 1)", "def extend(self, s):\n newTimeExtent = int(s + 1 - 1e-10)\n if newTimeExtent > self.timeExtent:\n for s in xrange(self.timeExtent , newTimeExtent):\n self.drawSecond(s)\n self.timeExtent = newTimeExtent\n self.resize(newTimeExtent * self.scale, self.height)\n for f in self.resizeCallbacks:\n f()", "def resize(self, old, new):", "def on_size(self, event):\n size = self.GetSize()\n self.SetSize(size)\n gauge_pos, gauge_size = self.get_gauge_dimensions()\n self.gauge.SetSize(gauge_size)\n event.Skip()\n self.Update()", "def grow(self):\n self.mass *= 1.1", "def change_wafer_size(self, size):\n if size not in self.SIZES:\n raise ValueError(\"The wafer must be a valid size: {0}\".format(self.SIZES))\n \n self.size = size * self._MM_IN_MICRONS\n\n self._create_drawing_area()\n self.partition(self.rows, self.cols)", "def _component_size_changed(self):\n self._size_changed()", "def plot_insertsize():", "def increment(self, length):\r\n self.progress_bar.update(length)", "def update_size(self,\r\n entrylist=None,\r\n newsize=60):\r\n if entrylist is None:\r\n entrylist = []\r\n\r\n for i in entrylist:\r\n\r\n if str(i) in self.indexes():\r\n\r\n tempnote = self.get_note(i).change_size(newsize)\r\n self.add_note(i,note=tempnote)", "def size(self, value):\n self.width = value", "def grow(self):\n self.capacity = self.capacity * 2\n self.rehash()", "def setsize(self, size):\n self.__size = size", "def _resize_list(self, new_size: int):\n for _ in range((new_size + 1) - len(self)):\n self.append(0)", "def size(self, size):\n self._size = size", "def _resize_interval(start, end, size):\n center = int(0.5 * (start + end))\n half_size = int(0.5 * size)\n left = center - half_size\n right = left + size\n return left, right", "def resized(self,size=1.,tol=1.e-5):\n s = self.sizes()\n s[s<tol*s.max()] = size\n return self.scale(size/s)", "def appendsize(self, numents):\n self._numents += numents", "def change_size(self, width, height):\n oldw = float(self.size().width())\n oldh = float(self.size().height())\n\n if self.indicator_type == 'session':\n neww = int(oldw + oldw * (width / 100.0))\n if neww > 0:\n self.setFixedSize(neww, oldh)\n elif self.indicator_type == 'unit':\n newh = int(oldh + oldh * (height / 100.0))\n if newh > 0:\n self.setFixedSize(oldw, newh)\n\n self.set_font_size()", "def fixStepSize(self, fixit):\n self.step_size_fixed = fixit", "def appendsize(self, numents):\n pass", "def resize(self):\n pass", "def Resize(self):\n\n self.history_length = int( round( self.owner['time_span']/self.owner['sample_speed']))\n self.FreshStart()", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def _grow_main(self, amt):\n self.ratio += amt\n self.ratio = min(self.max_ratio, self.ratio)", "def change_size(self, new_size):\n if not Circle.available_circles.has_key(new_size - 1):\n logging.debug('Circle Cache miss: ' + str(new_size))\n Circle.available_circles[new_size - 1] = AACircle(new_size, color=(0, 0, 0), antialias=2)\n self.image = Circle.available_circles[new_size - 1]\n self.size = new_size", "def resize_view_axis(interval, newsize, image_length):\n if newsize < image_length - interval[0]:\n # Window can be expanded without any shift of image or whitespace\n interval[1] = interval[0] + newsize\n elif newsize < image_length:\n # Window can be expanded without whitespace by moving image\n interval[1] = int(image_length)\n interval[0] = interval[1] - newsize\n else:\n # Set maximum along this length\n interval[0] = 0\n interval[1] = int(image_length)", "def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()", "def set_node_size(self, new_node_size: float):\n self.node_size = new_node_size", "def grow(self):\n change = self.hsv[_RATE]/15\n self.hsv[_AMOUNT] = min(1, self.hsv[_AMOUNT] + change)\n if self.hsv[2] == 1:\n self.change_list.remove(self)", "def update_size(self, dt):\n if self.cursor_on_button:\n self.size = min(self.SIZE_MAX, self.size + self.SCALING_VEL * dt)\n else:\n self.size = max(self.SIZE_MIN, self.size - self.SCALING_VEL * dt)\n self.surface = pg.transform.scale(self.image, (round(self.size), round(self.size)))", "def _minimum_size_changed(self):\n self.update_minimum_size()", "def set_size(self, size):\n self.dtSize = size", "def resize(self, size):\n assert size >= 0 and size <= self._cap, \\\n \"invalid size[%d] for resize\" % (size)\n\n self._size = size", "def setPointSize(self, size):\n for point in self.points:\n point.size = size", "def resize(self):\n e = self.e\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n e = self.er\n self.dsize = numpy.clip((self.np_req/self.np)**(1./e), 1/self.r, self.r)\n self.size *= self.dsize", "def _set_markers_size(self, markers_size, key):\n self.markers_size[key] = markers_size\n self._update_markers(self.markers, key)", "def _assign_sizes(self):", "def resize(self, size):\n assert numpy.issubdtype(type(size), numpy.integer), \\\n \"Bug: 'size' must be int, not {}\".format( type(size) )\n\n if self._resizing:\n return\n if self.level == 0:\n raise RuntimeError(\"Can't resize a level-0 slot!\")\n\n oldsize = len(self)\n if size == oldsize:\n return\n\n self._resizing = True\n if self.operator is not None:\n self.logger.debug(\"Resizing slot {} of operator {} to size {}\".format(\n self.name, self.operator.name, size))\n\n # call before resize callbacks\n self._sig_resize(self, oldsize, size)\n\n new_subslots = []\n while size > len(self):\n self.insertSlot(len(self), len(self)+1, propagate=False)\n new_subslots.append( len(self) - 1 )\n\n while size < len(self):\n self.removeSlot(len(self)-1, len(self)-1, propagate=False)\n\n # propagate size change downward\n for c in self.partners:\n if c.level == self.level:\n c.resize(size)\n\n # propagate size change upward\n if (self.partner and len(self.partner) < size and self.partner.level == self.level):\n self.partner.resize(size)\n\n # connect newly added slots\n # We must connect these subslots here, AFTER all resizes have propagated up and down through the graph.\n # Otherwise, our new subslots may lose downstream partners (happens in \"diamond\" shaped graphs.)\n for i in new_subslots:\n self._connectSubSlot(i)\n\n # call after resize callbacks\n self._sig_resized(self, oldsize, size)\n\n self._resizing = False", "def cb_size(self, event):\n if not self.size_timer.IsRunning():\n self.size_timer.StartOnce(2000)\n event.Skip(True)", "def size(self, size: int):\n\n self._size = size", "def set_width(self, w):\n if np.isscalar(w):\n w = np.ones(self._n_parameters) * w\n else:\n w = pints.vector(w)\n if len(w) != self._n_parameters:\n raise ValueError(\n 'Width for interval expansion must a scalar or an array'\n ' of length n_parameters.')\n if np.any(w < 0):\n raise ValueError('Width for interval expansion must be positive.')\n self._w = w", "def update_size(self):\n return 3 + self.memory_unit_size", "def change_window_size(self, size):\n value = 0\n try:\n value = int(size)\n except ValueError:\n raise ValueError(\"Please type in a valid number.\")\n\n if value >= 0:\n self.__window_size = value\n else:\n raise ValueError(\"Please type in a valid positive number.\")", "def brush_size(self, new_value: int) -> None:\n # get the brush size context and set its value\n with self._brush_size.get_lock():\n # if the brush size is different, queue a cursor update\n if self._brush_size.value != new_value:\n self.is_cursor_change = True\n # set the brush size to the new value\n self._brush_size.value = new_value", "def resize(self, size):\n self.instance.resize_volume(size)\n self.size = size", "def update_max_fringe_size(self, fringe_len):\n if self.max_fringe_size < fringe_len:\n self.max_fringe_size = fringe_len", "def _on_brush_size_change(self, event=None):\n with self.layer.events.brush_size.blocker():\n value = self.layer.brush_size\n value = np.clip(int(value), 1, 40)\n self.brushSizeSlider.setValue(value)", "def sn_size(self, val):\n if isinstance(val, int) and val >= 1:\n if val != self._faux._sn_size:\n self._faux._sn_size = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")", "def verticalScaleIncrease(self):\n scaleFac = float(self.qline4.text())\n self.qline4.setText(str(scaleFac * 2))\n self.model.refreshScreen()", "def pensize(self, width):\n self._penwidth = width", "def set_size(self, w, h):\n\t\tpass", "def grow(self, size):\n # size of the instance\n if size is not None and (type(size) == int or size.isdigit()):\n size = { 'size': int(size) }\n else:\n # TODO : proper error\n raise Exception()\n\n if self.size > size['size']:\n # TODO : proper error\n raise Exception((\"This instance has a data storage volume of %d GB and cannot \" + \\\n \"be shrunk. (Tried to specify %d GB as new size.)\") % (self.size, size['size']))\n\n self.client.post(self.path+'/action', { 'resize': {'volume': size} })\n return True", "def scale_in(self, count):\n pass", "def updateSize(self, *args):\n width = self.width.get()\n height = self.height.get()\n self.initialXScale.config(to=width)\n self.initialYScale.config(to=height)\n # error check that state is not outside bounds\n for ball, state in self.ballStates.items():\n if state[0] > width:\n state[0] = width\n if state[1] > height:\n state[1] = height", "def on_body_width_add(self, val):\n val = max(0, int(val))\n self.mdl.cmp.s_add_width = val\n self.refresh_svg_canvas()", "def _increase_size(self) -> None:\n keys_vals_to_move = [item for item in self.HashMap if item]\n self.length = 0\n self.capacity = self.capacity * 2\n self.HashMap = [None] * self.capacity\n for item in keys_vals_to_move:\n while len(item) > 0:\n self.add(item[0], item[1])\n item.pop(0)\n item.pop(0)", "def _changed_size(self, **kw):\n\t\tself._clear_matrix()\n\t\t\n\t\tself._recalc_adjustments()\n\t\t\n\t\tif self.flags() & gtk.REALIZED:\n\t\t\tif kw.get('resize', True): self.queue_resize()\n\t\t\tif kw.get('draw', True): self.queue_draw()", "def calc_size(self):\r\n pass", "def update_layout(self, canvas_origin, canvas_size, *, immediate=False):\n canvas_size = Geometry.IntSize.make(canvas_size)\n canvas_size = Geometry.IntSize(height=self.__calculate_layout_height(), width=canvas_size.width)\n super().update_layout(canvas_origin, canvas_size, immediate=immediate)", "def clRelu(self, size):", "def set_point_size(self, point_size=0.0):\r\n for b in self.buf:\r\n b.unib[8] = point_size", "def fits(self, current_count, current_size, max_size, new_span):\n raise NotImplementedError()", "def add_size_fig(cls, quad, obj_temp):\n\n\t\ttype = abs(quad.result) // 1000 # integer division\n\t\tif not cls.fig_can_add_size(type):\n\t\t\tError.wrong_attribute_for_figure_execution(type, \"size\")\n\n\t\tsize = cls.get_address_value(quad.right_operand)\n\t\tobj_temp.setSize(size)", "def size(self, val):\n self.width = val\n self.height = val", "def increment_size(self, amount: int) -> None:\n if amount == 0:\n return\n\n # Do this first, in case it fails\n self.tree.quota.increment(amount)\n\n ancestor_pks = [folder.pk for folder in self.ancestors]\n Folder.objects.filter(pk__in=ancestor_pks).update(size=(models.F('size') + amount))\n\n # Update local model with the new size value\n # Also, discard potential local references to the parent model, as its size is also invalid\n self.refresh_from_db(fields=['size', 'parent'])", "def modifyHeapSizeProperties(self):\n pass", "def set_dayu_size(self, value):\n self._dayu_size = value\n self.tool_button_group.update_size(self._dayu_size)\n self.style().polish(self)", "def grow(self):\n while self.splittable_nodes:\n self.split_next()", "def update_dimensions(self):\n self.chunk = numpy.full((self.current_height, self.current_width), fill_value=Constants.VALUE_INITIALIZER,\n dtype=\"int16\")", "def resize(self, inc=True, require_val=False, val=None):\n if require_val: # Set to value\n if not val:\n val = self.default_width\n try:\n val = int(val)\n except:\n message = \"Library width must be an integer\"\n self.vimiv.statusbar.err_message(message)\n return\n self.width = val\n else: # Grow/shrink by value\n if not val:\n val = 20\n try:\n val = int(val)\n except:\n message = \"Library width must be an integer\"\n self.vimiv.statusbar.err_message(message)\n return\n if inc:\n self.width += val\n else:\n self.width -= val\n # Set some reasonable limits to the library size\n if self.width > self.vimiv.winsize[0] - 200:\n self.width = self.vimiv.winsize[0] - 200\n elif self.width < 100:\n self.width = 100\n self.scrollable_treeview.set_size_request(self.width, 10)\n # Rezoom image\n if not self.vimiv.image.user_zoomed and self.vimiv.paths:\n self.vimiv.image.zoom_to(0)", "def resize(self, width: int, height: int):\n pass", "def resize(self, size):\n self.widget.resize(*size)", "def change_width(self, value):\n self.layer.edge_width = value\n self.widthSpinBox.clearFocus()\n self.setFocus()", "def _grow_secondary(self, amt):\n self._resize_secondary(amt)", "def onSetToFourthSize(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(1)\n\t\tif self.dataUnits:\n\t\t\tzf = 1\n\t\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\t\t\n\t\t\tif self.fourthResampleZ.GetValue():\n\t\t\t\tzf = 0.25\n\t\t\tself.currSize = int(0.25 * x), int(0.25 * y), int(zf * z) \n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(0)", "def onSize(self, event): \n\t\tw, h = self.GetClientSizeTuple()\n\t\tself.tree.SetDimensions(0, 0, w, h)", "def resize(self, size):\n if len(size) != len(self._Fkernel.shape[1:-1]):\n raise RuntimeError(\"length of resize shape is incorrect.\")\n if not np.all(size >= self._Fkernel.shape[1:-1]):\n raise RuntimeError(\"resize shape is too small.\")\n kernel = self._frequency_2_real()\n kernel_pad = self._zero_pad(kernel, size)\n self._Fkernel = self._real_2_frequency(kernel_pad)\n self.basis._axes_shape = kernel_pad.shape[1:-1]", "def numBinsChanged(self, val):\n self.numBins = val", "def setInterval(self, x):\n self._base_interval = x", "def inc_pc(self, size):\n current_pc = self.get_register('PC')\n self.set_pc(current_pc + size)", "def increase_window(self):\n # self.sp_cwnd += MSS\n pass", "def update(self, loss, size):\n self.loss += loss.item() * size\n self.cnt += 1", "def onSetToHalfSize(self, evt):\n\t\tself.halfResampleZ.Enable(1)\n\t\tif self.dataUnits:\n\t\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\t\tzf = 1\n\t\t\t\n\t\t\tif self.halfResampleZ.GetValue():\n\t\t\t\tzf = 0.5\n\t\t\tself.currSize = int(0.5 * x), int(0.5 * y), int(zf * z)\n\t\tself.fourthResampleZ.Enable(0)\n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(0)", "def set_dayu_size(self, value):\n self._dayu_size = value\n self.style().polish(self)" ]
[ "0.7556731", "0.70139605", "0.6836615", "0.66695124", "0.6534693", "0.6534693", "0.6534693", "0.646582", "0.6381011", "0.63793176", "0.6339734", "0.628063", "0.62720335", "0.626589", "0.61986613", "0.61749625", "0.617216", "0.6102264", "0.60876834", "0.6084448", "0.6077518", "0.6053831", "0.6046496", "0.6037302", "0.6034817", "0.6008417", "0.60065264", "0.600196", "0.5997645", "0.59857386", "0.5965961", "0.5938035", "0.5936141", "0.59334975", "0.5909108", "0.5908247", "0.5908247", "0.5908247", "0.5908247", "0.58972794", "0.58785987", "0.58589196", "0.5855403", "0.58439755", "0.5840567", "0.5836494", "0.5828909", "0.5814595", "0.58107656", "0.57980675", "0.5794138", "0.5793416", "0.5788571", "0.5767628", "0.5762267", "0.57622623", "0.5754825", "0.5753321", "0.5745703", "0.5736005", "0.57139444", "0.57133", "0.5695697", "0.5694493", "0.5688398", "0.56713706", "0.5661023", "0.5656774", "0.56525743", "0.56498694", "0.5643894", "0.5636608", "0.56221795", "0.56220466", "0.5615358", "0.5614985", "0.5608194", "0.5587394", "0.556191", "0.5561416", "0.55606323", "0.5560296", "0.5559448", "0.5553636", "0.55443", "0.5543331", "0.5538197", "0.5533438", "0.55314195", "0.55281126", "0.5521608", "0.5510669", "0.5506344", "0.54907095", "0.54906654", "0.548071", "0.54805255", "0.5476492", "0.54735464", "0.54522425" ]
0.6189907
15
Decrease the interval size.
def narrow(self): t, h = self.time, self.half_duration h /= self.scaling_coeff_x self.set_interval((t - h, t + h))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dec_size(self):\r\n self.__length -= 1", "def updateSize(self):\n self.currentsize = len(self.intervals)\n try:\n assert self.currentsize <= self.size\n except AssertionError:\n print(self.currentsize)\n print(self.size)\n sys.exit('[', sys.arg[0] + ']: Size problem')", "def shrink(self):\r\n # first we need to decide how to shrink\r\n choice = rand.choice([0, 1, 2, 3])\r\n # now do it\r\n if ((choice == 0) and (self.xspan > mparam.min_s_xspan)):\r\n # delete first row\r\n self.cells = np.delete(self.cells, (0), axis=0) \r\n elif ((choice == 1) and (self.xspan > mparam.min_s_xspan)):\r\n # delete last row\r\n self.cells = np.delete(self.cells, (-1), axis=0) \r\n elif ((choice == 2) and (self.yspan > mparam.min_s_yspan)):\r\n # delete first column\r\n self.cells = np.delete(self.cells, (0), axis=1) \r\n elif ((choice == 3) and (self.yspan > mparam.min_s_yspan)):\r\n # delete last column\r\n self.cells = np.delete(self.cells, (-1), axis=1) \r\n # now let's update xspan and yspan to the new size\r\n self.xspan = self.cells.shape[0]\r\n self.yspan = self.cells.shape[1]\r\n #\r", "def resize(self, newIntervals):\n self.intervals = []\n for i in newIntervals:\n self.addInterval(i)\n self.size = len(self.intervals)", "def verticalScaleDecrease(self):\n scaleFac = float(self.qline4.text())\n self.qline4.setText(str(scaleFac / 2))\n self.model.refreshScreen()", "def _shrink(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def shrink(self):\n self.mass *= 0.8", "def shrink(self):\r\n # Shrink the size by half but not below the default capacity\r\n # and remove those garbage cells from the underlying list\r\n newSize = max(self._capacity, len(self) // 2)\r\n for count in range(len(self) - newSize):\r\n self._items.pop()", "def widen(self):\n t, h = self.time, self.half_duration\n h *= self.scaling_coeff_x\n self.set_interval((t - h, t + h))", "def resize(self, old, new):", "def change_size(self, action):\r\n leftPos, topPos, rightPos, bottomPos = self.canvas.coords(self.ball)\r\n if action == \"larger\":\r\n if leftPos > 0 and rightPos < 400 and topPos > 0 and bottomPos < 400:\r\n self.size += 1\r\n self.canvas.coords(self.ball, leftPos - 1, topPos - 1, rightPos + 1, bottomPos + 1)\r\n else:\r\n if self.size > 1:\r\n self.size -= 1\r\n self.canvas.coords(self.ball, leftPos + 1, topPos + 1, rightPos - 1, bottomPos - 1)", "def adaptive_example_deletion(self):\n self.example_wise_shrink(Length)", "def changeSize(self, value):\n self.layer.brush_size = value", "def unsetSize(self):\n return _libsbml.Compartment_unsetSize(self)", "def resize(self):\n pass", "def _shrink_main(self, amt):\n self.ratio -= amt\n self.ratio = max(self.min_ratio, self.ratio)", "def shrink(self):\r\n\r\n old = self._data\r\n self._capacity = self._capacity // 2\r\n self._data = [0] * self._capacity\r\n\r\n if len(old) >= 1:\r\n for i in range(self._size):\r\n\r\n self._data[i] = old[i]", "def decrease_newly(self, quantitiy):\n self._newly = self._newly - quantitiy", "def on_size(self, event):\n size = self.GetSize()\n self.SetSize(size)\n gauge_pos, gauge_size = self.get_gauge_dimensions()\n self.gauge.SetSize(gauge_size)\n event.Skip()\n self.Update()", "def resized(self,size=1.,tol=1.e-5):\n s = self.sizes()\n s[s<tol*s.max()] = size\n return self.scale(size/s)", "def shrink(self):\n for i in range(1, len(self.vertices)):\n self.vertices[i] = self.vertices[0] + self.sigma*(self.vertices[i]-self.vertices[0])", "def truncate(self, size=None):\n raise NotImplementedError(\"truncate() not supported\")", "def grow(self):\n change = self.hsv[_RATE]/15\n self.hsv[_AMOUNT] = min(1, self.hsv[_AMOUNT] + change)\n if self.hsv[2] == 1:\n self.change_list.remove(self)", "def deal_size(self, deal_size):\n\n self._deal_size = deal_size", "def resize_view_axis(interval, newsize, image_length):\n if newsize < image_length - interval[0]:\n # Window can be expanded without any shift of image or whitespace\n interval[1] = interval[0] + newsize\n elif newsize < image_length:\n # Window can be expanded without whitespace by moving image\n interval[1] = int(image_length)\n interval[0] = interval[1] - newsize\n else:\n # Set maximum along this length\n interval[0] = 0\n interval[1] = int(image_length)", "def _resize_interval(start, end, size):\n center = int(0.5 * (start + end))\n half_size = int(0.5 * size)\n left = center - half_size\n right = left + size\n return left, right", "def _shrink_arr(self):\n self._resize_arr(self._capacity // self._growth_factor)", "def resize(self):\n e = self.e\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n e = self.er\n self.dsize = numpy.clip((self.np_req/self.np)**(1./e), 1/self.r, self.r)\n self.size *= self.dsize", "def _resize(self, cap): # nonpublic utitity\n B = self._make_array(cap) # new (bigger) array\n for k in range(self._size): # for each existing value\n B[k] = self._Array[k]\n self._Array = B # use the bigger array\n self._capacity = cap", "def shrink(value):\n return (1 + value) / 2", "def _shrink_secondary(self, amt):\n self._resize_secondary(-amt)", "def resize(self, size):\n assert size >= 0 and size <= self._cap, \\\n \"invalid size[%d] for resize\" % (size)\n\n self._size = size", "def shrink_interval(shrinking_factor: float, interval: Interval, shrinking_anchor: float) -> Interval:\n neighborhood = shrinking_factor * (interval[1] - interval[0])\n return shrinking_anchor - neighborhood / 2, shrinking_anchor + neighborhood / 2", "def remove_sizegroup(self, sizegroup):\n self.sizegroups.remove(sizegroup)\n self.emit('remove-sizegroup', sizegroup)", "def decrease(self):\n self.score -= self.score", "def update_max_fringe_size(self, fringe_len):\n if self.max_fringe_size < fringe_len:\n self.max_fringe_size = fringe_len", "def __exit__(self, type, value, traceback):\n plt.rcParams[\"font.size\"] = self.old_size", "def change_size(self, new_size):\n if not Circle.available_circles.has_key(new_size - 1):\n logging.debug('Circle Cache miss: ' + str(new_size))\n Circle.available_circles[new_size - 1] = AACircle(new_size, color=(0, 0, 0), antialias=2)\n self.image = Circle.available_circles[new_size - 1]\n self.size = new_size", "def fixStepSize(self, fixit):\n self.step_size_fixed = fixit", "def updateSize(self, *args):\n return None", "def update_size(self, dt):\n if self.cursor_on_button:\n self.size = min(self.SIZE_MAX, self.size + self.SCALING_VEL * dt)\n else:\n self.size = max(self.SIZE_MIN, self.size - self.SCALING_VEL * dt)\n self.surface = pg.transform.scale(self.image, (round(self.size), round(self.size)))", "def resize(self, width: int, height: int):\n pass", "def decrease_font_size(window, delta=2):\n\n increase_font_size(window, delta=-2)\n \n return", "def shrink(self):\n old = self.data # keep track of existing list\n self.capacity = self.capacity // 2\n self.data = [None] * (self.capacity) # allocate list with new capacity\n walk = self.head\n for k in range(self.size): # only consider existing elements\n self.data[k] = old[walk] # intentionally shift indices\n walk = (1 + walk) % len(old) # use old size as modulus\n self.head = 0 # front has been realigned\n self.tail = self.size", "def resize(self, size):\n assert numpy.issubdtype(type(size), numpy.integer), \\\n \"Bug: 'size' must be int, not {}\".format( type(size) )\n\n if self._resizing:\n return\n if self.level == 0:\n raise RuntimeError(\"Can't resize a level-0 slot!\")\n\n oldsize = len(self)\n if size == oldsize:\n return\n\n self._resizing = True\n if self.operator is not None:\n self.logger.debug(\"Resizing slot {} of operator {} to size {}\".format(\n self.name, self.operator.name, size))\n\n # call before resize callbacks\n self._sig_resize(self, oldsize, size)\n\n new_subslots = []\n while size > len(self):\n self.insertSlot(len(self), len(self)+1, propagate=False)\n new_subslots.append( len(self) - 1 )\n\n while size < len(self):\n self.removeSlot(len(self)-1, len(self)-1, propagate=False)\n\n # propagate size change downward\n for c in self.partners:\n if c.level == self.level:\n c.resize(size)\n\n # propagate size change upward\n if (self.partner and len(self.partner) < size and self.partner.level == self.level):\n self.partner.resize(size)\n\n # connect newly added slots\n # We must connect these subslots here, AFTER all resizes have propagated up and down through the graph.\n # Otherwise, our new subslots may lose downstream partners (happens in \"diamond\" shaped graphs.)\n for i in new_subslots:\n self._connectSubSlot(i)\n\n # call after resize callbacks\n self._sig_resized(self, oldsize, size)\n\n self._resizing = False", "def _resize(self, cap):\n old = self._data\n self._data = [None] * cap\n walk = self._front\n for i in range(self._size):\n self._data[i] = old[walk]\n walk = (walk + 1) % len(old)\n self._front = 0", "def change_wafer_size(self, size):\n if size not in self.SIZES:\n raise ValueError(\"The wafer must be a valid size: {0}\".format(self.SIZES))\n \n self.size = size * self._MM_IN_MICRONS\n\n self._create_drawing_area()\n self.partition(self.rows, self.cols)", "def resize(self, size):\n self.instance.resize_volume(size)\n self.size = size", "def _on_brush_size_change(self, event=None):\n with self.layer.events.brush_size.blocker():\n value = self.layer.brush_size\n value = np.clip(int(value), 1, 40)\n self.brushSizeSlider.setValue(value)", "def _zoomCamera(self, sizeChange):\n self.camSize -= sizeChange", "def del_max(self):\r\n maxVal = self.find_max()\r\n if maxVal is not None:\r\n self.items[1] = self.items[self.size]\r\n self.items[self.size] = None\r\n self.size -= 1\r\n self.perc_down(1)", "def Resize(self):\n\n self.history_length = int( round( self.owner['time_span']/self.owner['sample_speed']))\n self.FreshStart()", "def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()", "def resize(self, size):\n if len(size) != len(self._Fkernel.shape[1:-1]):\n raise RuntimeError(\"length of resize shape is incorrect.\")\n if not np.all(size >= self._Fkernel.shape[1:-1]):\n raise RuntimeError(\"resize shape is too small.\")\n kernel = self._frequency_2_real()\n kernel_pad = self._zero_pad(kernel, size)\n self._Fkernel = self._real_2_frequency(kernel_pad)\n self.basis._axes_shape = kernel_pad.shape[1:-1]", "def deside_figure_size(self):\n # HEIGHT >\n self.figure_height = FIGURE_HEIGHT\n\n av = self.canvas.height() / FIGURE_HEIGHT\n left_over = self.canvas.height() - (FIGURE_HEIGHT * math.floor(av))\n\n if left_over > av:\n self.figure_height += math.floor(left_over / math.floor(av))\n self.figure_height = int(self.figure_height)\n\n self.figure_height -= 3 # gives geometry.height() breathing room\n\n # WIDTH >\n self.figure_width = self.figure_height * 0.6\n av = math.floor(self.canvas.width() / self.figure_width)\n left_over = self.canvas.width() - (self.figure_width * math.floor(av))\n if left_over > av:\n self.figure_width += math.floor(left_over / math.floor(av))\n self.figure_width = int(self.figure_width)\n\n self.figure_width -= 3 # gives geometry.width() breathing room", "def set_size(self, new_bunch_size):\n self.bunch_size = new_bunch_size", "def delMin(self):\n retval = self.heapList[1]\n self.heapList[1] = self.heapList[self.currentSize]\n self.currentSize = self.currentSize - 1\n self.heapList.pop()\n self.percDown(1)\n return retval", "def set_size(self, new_size: int):\n self.__tab_size = new_size\n self.__check_interpreter()\n self.__vals = [0 for _ in range(self.__tab_size)]", "def shutdown_instances(self):\r\n self.min_size = 0\r\n self.max_size = 0\r\n self.desired_capacity = 0\r\n self.update()", "def truncate(self):\n for i in range(self.dimension):\n self.components[i] = int(self.components[i])", "def change_window_size(self, size):\n value = 0\n try:\n value = int(size)\n except ValueError:\n raise ValueError(\"Please type in a valid number.\")\n\n if value >= 0:\n self.__window_size = value\n else:\n raise ValueError(\"Please type in a valid positive number.\")", "def _grow_secondary(self, amt):\n self._resize_secondary(amt)", "def rescale(self, factor):\n scaled_size = (int(self.width * factor), int(self.height * factor))\n return self.resize(scaled_size)", "def _changed_size(self, **kw):\n\t\tself._clear_matrix()\n\t\t\n\t\tself._recalc_adjustments()\n\t\t\n\t\tif self.flags() & gtk.REALIZED:\n\t\t\tif kw.get('resize', True): self.queue_resize()\n\t\t\tif kw.get('draw', True): self.queue_draw()", "def onSetToHalfSize(self, evt):\n\t\tself.halfResampleZ.Enable(1)\n\t\tif self.dataUnits:\n\t\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\t\tzf = 1\n\t\t\t\n\t\t\tif self.halfResampleZ.GetValue():\n\t\t\t\tzf = 0.5\n\t\t\tself.currSize = int(0.5 * x), int(0.5 * y), int(zf * z)\n\t\tself.fourthResampleZ.Enable(0)\n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(0)", "def clRelu(self, size):", "def decrement(self):\n self.data[self.pointer] -= 1\n self.data[self.pointer] %= 256", "def scale(self):", "def restore_peak_size(self):\n if self.left_peak_size > 0 and self.peak_size < self.size:\n # Account for the left_peak_size which might be less than peak_size\n diff = min(self.size - self.peak_size, self.left_peak_size)\n self.peak_size += diff\n self.left_peak_size -= diff", "def _shrink(self, cidx, amt):\n # get max resizable amount\n margin = self.get_shrink_margin(cidx)\n if amt > margin: # too much\n self.relative_sizes[cidx] -= self._get_relative_size_from_absolute(margin)\n return amt - margin\n else:\n self.relative_sizes[cidx] -= self._get_relative_size_from_absolute(amt)\n return 0", "def _shrink(self):\n self.capacity = round(self.capacity / self.factor)\n temp = [None] * self.capacity\n for i in range(self.capacity):\n temp[i] = self.store[i]\n self.store = temp", "def change_size(self, width, height):\n oldw = float(self.size().width())\n oldh = float(self.size().height())\n\n if self.indicator_type == 'session':\n neww = int(oldw + oldw * (width / 100.0))\n if neww > 0:\n self.setFixedSize(neww, oldh)\n elif self.indicator_type == 'unit':\n newh = int(oldh + oldh * (height / 100.0))\n if newh > 0:\n self.setFixedSize(oldw, newh)\n\n self.set_font_size()", "def truncate(self):\n\n self.population = self.population[:self.max_number_trees]", "def _resize_list(self, new_size: int):\n for _ in range((new_size + 1) - len(self)):\n self.append(0)", "def inc_size(self):\r\n self.__length += 1", "def brush_size(self, new_value: int) -> None:\n # get the brush size context and set its value\n with self._brush_size.get_lock():\n # if the brush size is different, queue a cursor update\n if self._brush_size.value != new_value:\n self.is_cursor_change = True\n # set the brush size to the new value\n self._brush_size.value = new_value", "def subtract(self, interval):\n first, last = self._intersect(interval)\n if last - first == 1:\n new_intervals = self.intervals[first].subtract(interval)\n del self.intervals[first]\n self.intervals.update(new_intervals)\n elif last - first != 0:\n if self.intervals[first].lower != interval.lower:\n self.intervals[first].upper = interval.lower\n if self.intervals[first].length > 0:\n first = min(first + 1, len(self.intervals))\n if self.intervals[last - 1].upper != interval.upper:\n self.intervals[last - 1].lower = interval.upper\n if self.intervals[last - 1].length > 0:\n last = max(last - 1, 0)\n del self.intervals[first:last]", "def cb_size(self, event):\n if not self.size_timer.IsRunning():\n self.size_timer.StartOnce(2000)\n event.Skip(True)", "def AdjustFontSize(self):\r\n self.sh.Columns(\"A\").Delete()", "def fix_size(value):\n try:\n obj_size = int(float(value) * wx.GetApp().settings.size_coeff)\n except AttributeError:\n obj_size = int(value)\n return obj_size", "def re_size_unit(self):\n detach = self.army.detachments[self._get_user_detachment()]\n battlefield_role = self._get_user_battlefield_role()\n unit = self._get_user_unit(detach, battlefield_role)\n size = self._get_user_size(unit)\n unit.re_size(*size)\n return", "def resize_memory(self, new_size=None):\n\n self.capacity = new_size\n\n # self.push() takes care of decreasing the memory.\n # # Oldest experiences are discarded. For Ever.\n # # TODO: Check for a more efficient way of cleaning the memory.\n # while len(self.memory) > self.capacity:\n # _ = self.pop()", "def resize(self, size):\n self.widget.resize(*size)", "def removeFromBack(self):\n if self.size <= 0:\n raise IndexError(\"the array is empty\")\n self._shrinkCheck()\n super().removeFromBack()", "def _component_size_changed(self):\n self._size_changed()", "def grow(self, delta_width, delta_height):\r\n self.width += delta_width\r\n self.height += delta_height", "def grow(self):\n self.mass *= 1.1", "def maxsize(self, maxsize):\n self.shape = (int(maxsize), ) + self.shape[1:]\n self.clear()", "def removeScale(*args):\n return _libsbml.Unit_removeScale(*args)", "def plot_insertsize():", "def remove(self, pos, length):\n if pos in self.removals:\n self.removals[pos] += length\n else:\n self.removals[pos] = length", "def modifyHeapSizeProperties(self):\n pass", "def rangeSliderSize(self):\n return float(self.width() - self.bar_width)", "def updateIntervals(self):\n for job in self.scheduler.get_jobs():\n job.remove()\n self.addJobs()", "def _resize(self, size: Tuple[int, int], axis: int = None) -> None:\n\t\tif self.name == \"\":\n\t\t\tself.ds._file['/matrix'].resize(size, axis)\n\t\telse:\n\t\t\tself.ds._file['/layers/' + self.name].resize(size, axis)", "def setsize(self, size):\n self.__size = size", "def get_range(self):\n if self.size == 75:\n return 260\n elif self.size == 100:\n return 315", "def unsetScale(self):\n return _libsbml.Unit_unsetScale(self)", "def shrink(self):\n if self.focused == 0:\n self._shrink_main(self.change_ratio)\n elif len(self.clients) == 2:\n self._shrink_solo_secondary(self.change_ratio)\n else:\n self._shrink_secondary(self.change_size)\n self.group.layout_all()", "def onSetToFourthSize(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(1)\n\t\tif self.dataUnits:\n\t\t\tzf = 1\n\t\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\t\t\n\t\t\tif self.fourthResampleZ.GetValue():\n\t\t\t\tzf = 0.25\n\t\t\tself.currSize = int(0.25 * x), int(0.25 * y), int(zf * z) \n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(0)" ]
[ "0.7060343", "0.68583477", "0.64114755", "0.6321924", "0.6098751", "0.607889", "0.6018984", "0.599691", "0.5946714", "0.59390116", "0.5916328", "0.5848298", "0.58161247", "0.57695884", "0.5745519", "0.5734558", "0.5694274", "0.5650514", "0.563735", "0.56304765", "0.56119967", "0.5606902", "0.55642813", "0.5544015", "0.5541415", "0.5516267", "0.5513826", "0.54785186", "0.546443", "0.54294324", "0.5428425", "0.54265016", "0.5425601", "0.5384976", "0.53700006", "0.53510565", "0.53409076", "0.5329259", "0.53288084", "0.53285754", "0.53229254", "0.5299994", "0.52939135", "0.52885145", "0.52844983", "0.5283153", "0.5279463", "0.5267605", "0.52510625", "0.5250337", "0.5248996", "0.52382755", "0.52346385", "0.52305955", "0.5228702", "0.5216856", "0.52142614", "0.5211737", "0.5206362", "0.52034235", "0.5181921", "0.5178484", "0.51768655", "0.5169957", "0.5153287", "0.5145713", "0.514286", "0.51325893", "0.5131727", "0.5127969", "0.51234776", "0.51230156", "0.5093672", "0.5080598", "0.507275", "0.5067523", "0.50529873", "0.50428855", "0.504005", "0.50274014", "0.5022682", "0.5006664", "0.5004656", "0.49956685", "0.4994715", "0.49833038", "0.49797037", "0.49656934", "0.49652705", "0.4956661", "0.49552032", "0.4948813", "0.49448103", "0.49402332", "0.49400473", "0.4934021", "0.49337605", "0.49306288", "0.49304572", "0.49281633" ]
0.563252
19
Toggle the display of the channel ids.
def toggle_show_labels(self, checked): logger.debug("Set show labels to %s.", checked) self.do_show_labels = checked self.text_visual.toggle() self.canvas.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False", "def display(self, channel1 = False, channel2 = False, channel3 = False, channel4 = False):\t\t\n\t\tself.scope.write(\":CHANnel1:DISPlay %s\"%bool2ONOFF(channel1))\n\t\tself.scope.write(\":CHANnel2:DISPlay %s\"%bool2ONOFF(channel2))\n\t\tself.scope.write(\":CHANnel3:DISPlay %s\"%bool2ONOFF(channel3))\n\t\tself.scope.write(\":CHANnel4:DISPlay %s\"%bool2ONOFF(channel4))", "async def _toggle(self, ctx, id: int = None):\n if id is None:\n id = ctx.channel.id\n if id in self.etrigs['channels']:\n self.etrigs['channels'].remove(id)\n else:\n self.etrigs['channels'].append(id)\n self.write_config()\n await ctx.message.add_reaction('\\u2705')", "def __redrawChannels(self):\n self.__channelWin.clear()\n all_chans = self._client.getChannels()\n all_chans.sort(key=lambda c: c.getName())\n count = min(len(all_chans), self.__channelWin.getmaxyx()[0])\n show = all_chans[:count]\n for c in show:\n cur = self._client.currentChannel() == c\n if cur:\n attr = curses.A_REVERSE\n elif c in self._client.getJoined():\n attr = curses.A_BOLD\n else:\n attr = curses.A_DIM\n if c != self._client.getNoneChannel():\n self.__channelWin.addstr(\n \"{chan}\\n\".format(chan=c.getName()),\n attr\n )", "def _channelList_changed(self):\n self.oscilloscope.visibleChannels = self.channelList", "def showChannels(self):\n print(\"Channels:\")\n for c in self.channels:\n if c.role != channel_pb2.Channel.Role.DISABLED:\n cStr = stripnl(MessageToJson(c.settings))\n print(\n f\" {channel_pb2.Channel.Role.Name(c.role)} psk={pskToString(c.settings.psk)} {cStr}\")\n publicURL = self.getURL(includeAll=False)\n adminURL = self.getURL(includeAll=True)\n print(f\"\\nPrimary channel URL: {publicURL}\")\n if adminURL != publicURL:\n print(f\"Complete URL (includes all channels): {adminURL}\")", "def ToggleVisible(self, event):\n pass", "async def cmd_galtogglechannel(self, ctx, channel):\n\n # ===== GET CHANNEL ID\n try:\n ch_id = int(channel.lower().replace('<').replace('>').replace('#').strip())\n\n except ValueError:\n ctx.send_help('galtogglechannel', delete_after=Gallery.delete_after)\n \n ret_msg=\"\"\n\n # ===== REMOVE CHANNEL ID FROM LIST\n if ch_id in self.cogset['channel_ids']:\n self.cogset['channel_ids'].remove(ch_id)\n\n ret_msg = f\"<#{ch_id}> is no longer a gallery channel.\"\n\n ###=== DELETE LOGGED MESSAGES FROM DATABASE\n await self.db.execute(pgCmds.DEL_GALL_MSGS_FROM_CH, ch_id, self.cogset['guild_id'])\n\n # ===== ADD CHANNEL ID TO LIST\n else:\n self.cogset['channel_ids'] = list(set(self.cogset['channel_ids']) + {ch_id})\n ret_msg = f\"<#{ch_id}> has been made a gallery channel.\"\n\n # ===== SAVE SETTINGS \n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== END\n await ctx.channel.send(content=ret_msg, delete_after=Gallery.delete_after)\n return", "def action_togglevisible(self, ids):\n # Load all populations by the set of IDs\n target_populations = self.get_query().filter(self.model.id.in_(ids)).all()\n\n # Build a list of all the results\n results = []\n\n if len(target_populations) > 0:\n\n for population in target_populations:\n # Build a helpful message string to use for messages.\n population_str = 'population #' + str(population.id) + ' (' + population.name + ')'\n visible_status = ''\n try:\n if not population.visible:\n population.visible = True\n visible_status = ' as visible'\n else:\n population.visible = False\n visible_status = ' as not visible'\n except Exception as ex:\n results.append('Error changing ' + population_str + ': ' + str(ex))\n else:\n results.append('Marked ' + population_str + visible_status + '.')\n\n # Save our changes.\n self.session.commit()\n\n else:\n results.append('No populations were selected.')\n\n # Flash the results of everything\n flash(\"\\n\".join(msg for msg in results))", "def set_visible(self, value):\n for artist in self.artists:\n artist.set_visible(value)", "async def hidden(self, ctx: commands.Context, true_or_false: Optional[bool] = True):\n data = await self.config.guild(ctx.guild).pchannels()\n try:\n for key in data:\n if data[key] == ctx.author.voice.channel.id:\n ov = {\n ctx.guild.default_role: discord.PermissionOverwrite(\n view_channel=False, connect=False\n ),\n ctx.author: discord.PermissionOverwrite(\n view_channel=True, connect=True, speak=True, manage_channels=True\n ),\n }\n if self.invoiceConfig:\n ov[\n ctx.guild.get_role(\n await self.invoiceConfig.channel(ctx.author.voice.channel).role()\n )\n ] = discord.PermissionOverwrite(\n view_channel=True, connect=True, speak=True\n )\n await ctx.author.voice.channel.edit(overwrites=ov)\n await ctx.tick()\n await ctx.send(_(\"VC has been hidden successfully.\"))\n except AttributeError:\n return await ctx.send(_(\"You need to be in a VC to do this.\"))", "def toggle(self):", "def toggle_show_frame_number(self):\n if self.show_frame_num:\n self.show_frame_num = False\n self.btn_toggle_frame_num.config(text='Show frame num')\n else:\n self.show_frame_num = True\n self.btn_toggle_frame_num.config(text='Hide frame num')", "def channels():\n channels = db.session.query(Channel).all()\n return render_template(\"admin/channels.html\", channels=channels)", "def toggle(self) -> None:", "def toggle(self) -> None:", "def set_entire_display_on(enable):\n if enable:\n send_command(0xA5)\n else:\n send_command(0xA4)", "def updateChannels(self):\n self.__redrawChannels()\n self.__update()", "def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])", "def toggle(self) -> None:\n ...", "def toggle_hidden(self):\n self.show_hidden = not self.show_hidden\n self.reload('.')", "def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()", "def switch_frequency_plot_channel_two(self):\n if self.plot_channel_key_booleans[1]:\n self.plot_channel_key_booleans[1] = False\n self.parent_widget.graph_channel_two_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[1] = True\n self.parent_widget.graph_channel_two_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[1]))", "async def togglegames(self, ctx, *, channel: discord.TextChannel = None):\n channel = channel or ctx.channel\n user = await self.ex.get_user(ctx.author.id)\n if not channel:\n log.console(f\"Could not find text channel. -> User: {user.id} - Moderator.togglegames\")\n msg = await self.ex.get_msg(user, \"moderator\", \"channel_not_found\")\n return await ctx.send(msg)\n\n enabled_msg = \"enabled\" if await self.ex.u_moderator.toggle_games(channel.id) else \"disabled\"\n\n msg = await self.ex.get_msg(user, \"moderator\", \"channel_toggled\", [\n [\"name\", ctx.author.display_name], [\"text_channel\", channel.name], [\"result\", enabled_msg]\n ])\n\n return await ctx.send(msg)", "def switch_frequency_plot_channel_one(self):\n if self.plot_channel_key_booleans[0]:\n self.plot_channel_key_booleans[0] = False\n self.parent_widget.graph_channel_one_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n\n else:\n self.plot_channel_key_booleans[0] = True\n self.parent_widget.graph_channel_one_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[0]))", "def toggle_hidden(self):\n AbstractChild.toggle_hidden(self)\n self.accFrame.update_values()\n self.botFrame.update_values()\n # On toggle hidden\n self.on_toggle_hidden()", "def turn_display_off(turn_off):\n if turn_off:\n send_command(0xAE)\n else:\n send_command(0xAF)", "def toggle_visibility(self):\n if self.is_visible():\n self.hide()\n else:\n self.show()", "def set_invert_display(enable):\n if enable:\n send_command(0xA7)\n else:\n send_command(0xA6)", "def show(self):\n # Disable IRQ to improve speed\n with NoIRQ():\n for chip in range(NB_CHIPS):\n self._select(chip)\n row = 0 if chip in (0, 1) else 1\n col = 0 if chip in (0, 2) else 1\n data = self.get_ht1632_data(row, col)\n green = (is_green(value) for value in data)\n red = (is_red(value) for value in data)\n self._write_data(green, red)", "def toggle(self) -> None:\n if bool(self.show.get()):\n self.sub_frame.pack(fill=tk.X, expand=True)\n self.toggle_button.configure(text=self.sep[0])\n else:\n self.sub_frame.forget()\n self.toggle_button.configure(text=self.sep[1])", "def unhide(self):\n self.course.quick_action(self.id, 'show')", "def gridDisplay(self):\n\n if self.griddButton.isCheckable():\n self.photo_grid.setVisible(False)\n self.griddButton.setCheckable(False)\n self.griddButton.setDown(False)\n self.statustext.setText(\"Hide Grid\")\n else:\n self.griddButton.setCheckable(True)\n self.photo_grid.setVisible(True)\n self.griddButton.setDown(True)\n self.statustext.setText(\"Display Grid - Rule of thirds\")", "def _turn_on(self):\n self._turn_display('ON')", "def switch_frequency_plot_channel_six(self):\n if self.plot_channel_key_booleans[5]:\n self.plot_channel_key_booleans[5] = False\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[5] = True\n self.parent_widget.graph_channel_six_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[5]))", "def switch_frequency_plot_channel_four(self):\n if self.plot_channel_key_booleans[3]:\n self.plot_channel_key_booleans[3] = False\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[3] = True\n self.parent_widget.graph_channel_four_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[3]))", "def visible(self, show):", "def hide(self, indices):\n traj_ids = set(traj.id for traj in self._trajlist)\n\n for index in indices:\n comp_id = self._ngl_component_ids[index]\n if comp_id in traj_ids:\n traj = self._get_traj_by_id(comp_id)\n traj.shown = False\n self._remote_call(\n \"setVisibility\",\n target='compList',\n args=[\n False,\n ],\n kwargs={'component_index': index})", "async def togglechannel(self, ctx, channel):\r\n\r\n user = ctx.message.author\r\n channel = await commands.clean_content().convert(ctx, channel)\r\n await ctx.message.delete()\r\n\r\n if channel == \"nsfw\":\r\n\r\n if self.bot.nsfw_role in user.roles:\r\n await user.remove_roles(self.bot.nsfw_role)\r\n await user.send(\"Access to NSFW channels revoked.\")\r\n else:\r\n await user.add_roles(self.bot.nsfw_role)\r\n await user.send(\"Access to NSFW channels granted.\")\r\n else:\r\n await user.send(\"{} is not a togglable channel.\".format(channel))", "async def listchannels(self, ctx: commands.Context):\n db_session = self.bot.create_db_session()\n channels_query = db_session.query(Channel).filter(Channel.joinable == True).order_by(Channel.name)\n db_session.close()\n\n header_message = \"Here is a list of the joinable channels\"\n channel_list = \"\\n\".join(channel.name for channel in channels_query)\n footer_messge = (\"To join or leave one of these channels, use the !joinchannel and !leavechannel commands.\\n\"\n \"To join multiple channels, separate them with a space.\")\n\n message = discord.Embed()\n message.title = \"Joinable Channels\"\n message.description = channel_list\n message.set_footer(text=footer_messge)\n\n await ctx.send(embed=message)", "def set_visible(self, visible):\n self._visible = visible\n for artist in self.artists:\n artist.set_visible(visible)", "async def managechannels(self, ctx:commands.Context):", "def displayGrid(self, toggled):\n self.scene.setGridVisible(visible=toggled)", "def watch_show_ac(self, show_ac: bool) -> None:\n self.query_one(\"#c\").display = not show_ac\n self.query_one(\"#ac\").display = show_ac", "def switch_frequency_plot_channel_five(self):\n if self.plot_channel_key_booleans[4]:\n self.plot_channel_key_booleans[4] = False\n self.parent_widget.graph_channel_five_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[4] = True\n self.parent_widget.graph_channel_five_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[4]))", "def toggleShowInvisibles(self: Self, event: Event = None) -> None:\n c = self\n colorizer = c.frame.body.getColorizer()\n showInvisiblesHelper(c, not colorizer.showInvisibles)", "def toggle_cont(self):\n if self.cont.isChecked():\n self.cont_dir_label.show()\n self.cont_dir_button.show()\n else:\n self.cont_dir_label.hide()\n self.cont_dir_button.hide()", "async def toggle(self, ctx):\r\n server = ctx.guild\r\n if self._logs[str(server.id)][\"toggle\"] == True:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(\"Modlogs are now disabled.\")\r\n return\r\n if self._logs[str(server.id)][\"toggle\"] == False:\r\n self._logs[str(server.id)][\"toggle\"] = True\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"Modlogs are now enabled {self.bot.get_emoji(470063310386233344)}\")\r\n return", "def func(self):\n from evennia.comms.models import ChannelDB\n\n caller = self.caller\n if self.args not in (\"on\", \"off\"):\n return super(CmdArxAllCom, self).func()\n if self.args == \"on\":\n # get names of all channels available to listen to\n # and activate them all\n channels = [\n chan\n for chan in ChannelDB.objects.get_all_channels()\n if chan.access(caller, \"listen\")\n ]\n for channel in channels:\n unmuted = channel.unmute(caller)\n if unmuted:\n self.msg(\"You unmute channel %s.\" % channel)\n else:\n caller.execute_cmd(\"addcom %s\" % channel.key)\n return\n channels = ChannelDB.objects.get_subscriptions(caller)\n for channel in channels:\n if channel.mute(caller):\n self.msg(\"You mute channel %s.\" % channel)", "async def togglechannel(self, ctx, channel):\n\n user = ctx.message.author\n await ctx.message.delete()\n\n if channel == \"nsfw\":\n\n if self.bot.nsfw_role in user.roles:\n await user.remove_roles(self.bot.nsfw_role)\n await user.send(\"Access to NSFW channels revoked.\")\n else:\n await user.add_roles(self.bot.nsfw_role)\n await user.send(\"Access to NSFW channels granted.\")\n else:\n await user.send(\"{} is not a togglable channel.\".format(channel))", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def toggle(self):\n if bool(self.show.get()):\n self.sub_frame.pack(fill=\"x\", expand=1)\n self.toggle_button.configure(text='-')\n else:\n self.sub_frame.forget()\n self.toggle_button.configure(text='+')", "def print_all(self):\r\n for e in self.channels:\r\n e.print()", "async def clear(self, ctx):\n await self.config.guild(ctx.guild).channels.clear()\n await ctx.send(\"Spoiler channel list cleared.\")", "def set_multiplex_shown(self, c, channel, on):\n try:\n states = self.binding.get_switcher_signal_states(channel)\n states['show'] = on\n self.binding.set_switcher_signal_states(channel, states)\n return True\n except Exception, e:\n self.handle_wavemeter_error(e)\n return False", "def switch_frequency_plot_channel_seven(self):\n if self.plot_channel_key_booleans[6]:\n self.plot_channel_key_booleans[6] = False\n self.parent_widget.graph_channel_seven_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[6] = True\n self.parent_widget.graph_channel_seven_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[6]))", "def switch_frequency_plot_channel_eight(self):\n if self.plot_channel_key_booleans[7]:\n self.plot_channel_key_booleans[7] = False\n self.parent_widget.graph_channel_eight_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[7] = True\n self.parent_widget.graph_channel_eight_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[7]))", "def switch_rawhide(self, key, rows):\n self.controller.set_context('rawhide')", "def _fcn_cbar_display_grp(self):\n viz = self.cbqt.cbui._cbar_grp.isChecked()\n self.menuDispCbar.setChecked(viz)\n self._fcn_menu_disp_cbar()", "def toggle(self):\n self.open = not self.open", "def toggle(self):\n self._show = not self._show\n if self._show:\n self._sub_frame.pack(fill=tk.X, expand=1)\n self._toggle_button.configure(text='-')\n else:\n self._sub_frame.forget()\n self._toggle_button.configure(text='+')", "def ToggleOff(self):\n for s in self.sensors:\n self.gSetpt[s.GetID()].Disable()\n\n self.top_sizer.Layout()\n print(\"Toggle off\")", "def toggle(self, id):\n e = self.objectmanager.objects.get(id=id)\n e.enabled = not e.enabled\n e.save()\n return render({\"id\": id, \"status\": e.enabled})", "def show_only(self, indices='all'):\n traj_ids = set(traj.id for traj in self._trajlist)\n\n if indices == 'all':\n indices_ = set(range(self.n_components))\n else:\n indices_ = set(indices)\n\n for index, comp_id in enumerate(self._ngl_component_ids):\n if comp_id in traj_ids:\n traj = self._get_traj_by_id(comp_id)\n else:\n traj = None\n if index in indices_:\n args = [\n True,\n ]\n if traj is not None:\n traj.shown = True\n else:\n args = [\n False,\n ]\n if traj is not None:\n traj.shown = False\n\n self._remote_call(\n \"setVisibility\",\n target='compList',\n args=args,\n kwargs={'component_index': index})", "def toggle_autorun(self, event):\n self.lnp.toggle_autorun(self.proglist.item(self.proglist.identify(\n 'row', event.x, event.y), 'text'))\n self.update_autorun_list()", "def switch_frequency_plot_channel_three(self):\n if self.plot_channel_key_booleans[2]:\n self.plot_channel_key_booleans[2] = False\n self.parent_widget.graph_channel_three_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n\n else:\n self.plot_channel_key_booleans[2] = True\n self.parent_widget.graph_channel_three_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[2]))", "def _syncDisplayMenu(ned, menu):\n pass", "async def list_channel(self, ctx: MyContext):\n channels = self.db_get_channels(ctx.guild.id)\n if not channels: # we can't send an empty list\n await ctx.send(\n await self.bot._(\n ctx.guild.id, \"wormhole.error.no-channels\", p=ctx.prefix\n )\n )\n return\n txt = \"\\n\".join([c.to_str() for c in channels])\n await ctx.send(txt)", "def display(self):\r\n if not self.visible:\r\n return # Skip if invisible\r\n \r\n SlTrace.lg(\"display %s: %s\" % (self.get_tag_list(), self), \"display\")\r\n for comp in self.comps:\r\n comp.display()\r\n self.task_update()", "def change_channel():\n global interface\n\n print(\"Change channels for interface {}\".format(interface))\n channel_number = 1\n\n while True:\n system(f\"iwconfig {interface} channel {channel_number}\")\n channel_number = channel_number % 14 + 1\n sleep(0.5)", "def showSettings(self):\n self.c.show()", "def ChannelSelect(self):\n self.active_mode = 'default'\n self.reset_buttons()\n # Dialog to choose channels from specific brain regions\n w = SelectChannelsDialog(\n stringlist=self.model.all_regions,\n checked=self.model.regions_mask\n )\n all_locs = self.model.electrodes_table['location'][self.model.electrical_series_channel_ids]\n self.model.channels_mask = np.zeros(len(all_locs))\n for loc in w.choices:\n self.model.channels_mask += all_locs == np.array(loc)\n # Indices of channels from chosen regions\n self.model.channels_mask_ind = np.where(self.model.channels_mask)[0]\n self.model.n_channels_total = len(self.model.channels_mask_ind)\n # Reset channels span control\n self.model.lastCh = np.minimum(16, self.model.n_channels_total)\n self.model.firstCh = 1\n self.model.nChToShow = self.model.lastCh - self.model.firstCh + 1\n self.qline0.setText(str(self.model.lastCh))\n self.qline1.setText(str(self.model.firstCh))\n # Update signals plot\n self.model.selectedChannels = self.model.channels_mask_ind[self.model.firstCh - 1:self.model.lastCh]\n self.model.refreshScreen()", "def toggle(self, color='all'):\n if color in ['all', 'r']:\n self.__send('r', 'toggle')\n\n if color in ['all', 'g']:\n self.__send('g', 'toggle')\n\n if color in ['all', 'b']:\n self.__send('b', 'toggle')", "def display(self):\n self.displaycontrol |= self.LCD_DISPLAYON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)", "def print(self):\r\n for e in self.channels:\r\n print(e)", "def _showhide_scope(self, widget):\n\t\tself.host_list.toggle_scope()\n\t\tself.services_list.toggle_scope()\n\n\t\tself._sync() #reset=True)", "async def votechannel_list(self, ctx):\n channels = await self.bot.db.execute(\n \"\"\"\n SELECT channel_id, voting_type FROM voting_channel WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n )\n if not channels:\n raise exceptions.Info(\"There are no voting channels on this server yet!\")\n\n rows = []\n for channel_id, voting_type in channels:\n rows.append(f\"<#{channel_id}> - `{voting_type}`\")\n\n content = discord.Embed(\n title=f\":1234: Voting channels in {ctx.guild.name}\", color=int(\"3b88c3\", 16)\n )\n await util.send_as_pages(ctx, content, rows)", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "def _set_show_hide_products(self):\n \n visible_count = 0\n\n for (counter, product) in enumerate(self.product_displays):\n\n if (counter < self.product_displays.top_index):\n # Hide all the products above the list product top\n product.set_visible(False)\n elif visible_count < self.limits.screen_products:\n # Show screen products based on their quantity\n product.visible = True\n visible_count += 1\n else:\n # Hide products below list bottom\n product.set_visible(False)", "def update_visible(self, immediate=False):\n raise NotImplementedError", "def toggle_color(self, index):\n if self.get_state(index):\n self.canvas.itemconfigure(self.cells[index], state=HIDDEN)\n else:\n self.canvas.itemconfigure(self.cells[index], state=NORMAL)", "def set_visible(self):\n\t\tself.hide()\n\t\tself.__sys_tray_icon.setVisible(True)", "def toggle_show_variable_identifier(self, show_ident):\n\n self._show_variable_ident = show_ident\n\n self.current_graph.refresh()", "def togglePulseUI():\n if isPulseUIShowing():\n hidePulseUI()\n else:\n showPulseUI()", "def set_toggle_devices_enabled(self, track, xclip, ident, value = None):\n for device in track.devices:\n if(hasattr(device, 'parameters')):\n self._parent._device_actions.set_device_on_off(device, track, xclip, ident);", "def display_groups(self, display_groups):\n\n self._display_groups = display_groups", "async def async_toggle(self):\n await self.async_mute_volume(not self._muted)", "def collapse_all_tracks(self):\n self.command(\"collapse\")", "async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)", "def hide(self):\n self.visible = False" ]
[ "0.67623276", "0.670535", "0.63494956", "0.63484985", "0.61224884", "0.60341847", "0.5839348", "0.5775318", "0.57307273", "0.57006425", "0.5679107", "0.564858", "0.5595665", "0.55708903", "0.55689216", "0.55689216", "0.5560915", "0.5559971", "0.55417454", "0.55388993", "0.5520554", "0.5499055", "0.548649", "0.5484991", "0.5466075", "0.54581743", "0.5449103", "0.54337084", "0.539835", "0.53809506", "0.53727263", "0.5367805", "0.5352981", "0.535154", "0.5350411", "0.5326765", "0.53015095", "0.52920836", "0.5291906", "0.5288067", "0.5286671", "0.52840906", "0.5269556", "0.5250319", "0.5240698", "0.5236868", "0.5236393", "0.5181252", "0.5175506", "0.51726454", "0.51682526", "0.51682526", "0.51682526", "0.51682526", "0.51682526", "0.51682526", "0.51682526", "0.51682526", "0.51682526", "0.51682526", "0.51682526", "0.51555574", "0.51470745", "0.5146645", "0.5132743", "0.513241", "0.5120155", "0.51195467", "0.5118085", "0.5093975", "0.50915414", "0.508245", "0.50694436", "0.50527465", "0.504639", "0.5044523", "0.5041644", "0.50416416", "0.5018485", "0.50173473", "0.50148505", "0.5013953", "0.5009629", "0.5008075", "0.500347", "0.4999347", "0.49896485", "0.49822754", "0.49756232", "0.4975463", "0.49712804", "0.49648342", "0.4957352", "0.49557066", "0.49528387", "0.49357235", "0.49315736", "0.49307328", "0.49284387", "0.4926314" ]
0.49804327
88
Toggle automatic scaling of the traces.
def toggle_auto_scale(self, checked): logger.debug("Set auto scale to %s.", checked) self.auto_scale = checked
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onToggleScale(self, event):\r\n if self.get_yscale() == 'log':\r\n self.set_yscale('linear')\r\n else:\r\n self.set_yscale('log')\r\n self.subplot.figure.canvas.draw_idle()", "def clickAutoscale(self, event):\n self.axes.autoscale_view()", "def scaling_enabled(self):\n return False", "def on_scale (self):\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_foreground()\n\t\t\tself.redraw_background()\n\n\t\tif self.expand2 == _('Use a scrollbar'):\n\t\t\tself.width = int((self.icon_size * 2 * self.rows + ((self.border_size+self.shadow_size)*2)+15 ) + 24/self.scale)\n\t\t\tself.update_scrollbar()", "def reset_scale(self) -> None:\n self._scale.set(self._start_val)", "def is_scale_enabled(self) -> bool:\r\n ...", "def reset_limits(self):\n self.autoscale = True\n self.pixels.autoscale()", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale):\n\n self._scale = scale", "def setScaleMode(self, mode):\n if mode != self.__scale_mode and mode in (self.ScaleModeGlobal, self.ScaleModeLocal):\n self.__scale_mode = mode\n self.__scaled_datasets = None\n self.__axisDomains = None\n self.dataChanged.emit()", "def scale(self):", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def linux_zoomer_minus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def setscaling(self, scaling):\n\n self.__scaling = scaling", "def reset_limits(self):\n self.autoscale = True\n self.camera.autoscale()", "def update_axis_scale(self, scale, axis='left'):\n self.plt.getAxis(axis).setScale(scale=scale)", "def change_scaling(self, scales=None, offsets=None) -> None:\n self.points.change_scaling(scales, offsets)\n\n self.header.scales = scales\n self.header.offsets = offsets", "def scale_invert(self):", "def setPlotScaling(x,y):\n dislin.trfscl(x,y)", "def setDoRescale(self, value):\n return self._set(doRescale=value)", "def toggle_maximized(self):\n if self.isMaximized():\n self.showNormal()\n else:\n self.showMaximized()", "def rescale(self):\n # forecast on real data, don't need this anymore\n pass", "def enableZoomOut(self):\n self.zoomOutID = self.canvas.mpl_connect('button_press_event', self.onZoomOut)\n self.master.config(cursor = \"cross\")", "def scaleBoard(self, scale):\n self.scaling = scale\n self.my_font.config(size=25 * self.scaling)\n self.reset_button.config(width=40 * self.scaling, height=40 * self.scaling, borderwidth=2 * self.scaling)\n self.board.updateBoardUI(self.scaling)", "def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)", "def autoScale(self):\n\t\tif self.autoscaleToggle:\n\t\t\tif not self.fullscreenToggle:\n\t\t\t\tmaxSize = (self.get_screen().get_width() - 100, self.get_screen().get_height() - 100)\n\t\t\telse:\n\t\t\t\tmaxSize = (self.get_screen().get_width(), self.get_screen().get_height())\n\t\t\timgSize = [self.currentPixbuf.get_width(), self.currentPixbuf.get_height()]\n\n\t\t\tif imgSize[0] > maxSize[0] or imgSize[1] > maxSize[1]:\n\t\t\t\tscaleFactor = 1.0 * maxSize[0] / imgSize[0]\n\t\t\t\tif imgSize[1] * scaleFactor > maxSize[1]:\n\t\t\t\t\tscaleFactor = 1.0 * maxSize[1] / imgSize[1]\n\t\t\t\tself.scaleFactor = scaleFactor\n\t\t\t\timgSize[0] = int(imgSize[0] * scaleFactor)\n\t\t\t\timgSize[1] = int(imgSize[1] * scaleFactor)", "def windows_zoomer(self, event):\n if event.delta > 0:\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n elif event.delta < 0:\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def toggle(self):\n if not self.hidden and not self.vimiv.commandline.entry.is_visible():\n self.bar.hide()\n else:\n self.bar.show()\n self.hidden = not self.hidden\n # Resize the image if necessary\n if not self.vimiv.image.user_zoomed and self.vimiv.paths and \\\n not self.vimiv.thumbnail.toggled:\n self.vimiv.image.zoom_to(0)", "def autorange(self):\n self._checkfigure()\n self.axes.autoscale_view(True)", "def showscale(self):\n return self['showscale']", "def set_scale_control(self, scale_ctl=3):\n self._scale_ctl = scale_ctl", "def setScalingMode(mode='down'):\n mdict = {'down':'DOWN','full':'FULL'}\n dislin.sclmod(mode)", "def use_fscale(self,use_fscale):\n if type(use_fscale).__name__ == 'bool':\n self._use_fscale = use_fscale\n else:\n raise KINSOL_Exception(\"The variable sent to 'use_fscale' must be a boolean.\")", "def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self", "def onScales(self):\n # Ensure that we can work\n plt = Plot.getPlot()\n if not plt:\n self.updateUI()\n return\n # Get again all the subwidgets (to avoid PySide Pitfalls)\n mw = self.getMainWindow()\n form = mw.findChild(QtGui.QWidget, \"TaskPanel\")\n form.all = self.widget(QtGui.QCheckBox, \"allAxes\")\n form.xAuto = self.widget(QtGui.QCheckBox, \"xAuto\")\n form.yAuto = self.widget(QtGui.QCheckBox, \"yAuto\")\n form.xSMin = self.widget(QtGui.QLineEdit, \"xMin\")\n form.xSMax = self.widget(QtGui.QLineEdit, \"xMax\")\n form.ySMin = self.widget(QtGui.QLineEdit, \"yMin\")\n form.ySMax = self.widget(QtGui.QLineEdit, \"yMax\")\n\n axesList = [plt.axes]\n if form.all.isChecked():\n axesList = plt.axesList\n if not self.skip:\n self.skip = True\n # X axis\n if form.xAuto.isChecked():\n for ax in axesList:\n ax.set_autoscalex_on(True)\n form.xSMin.setEnabled(False)\n form.xSMax.setEnabled(False)\n lim = plt.axes.get_xlim()\n form.xSMin.setText(str(lim[0]))\n form.xSMax.setText(str(lim[1]))\n else:\n form.xSMin.setEnabled(True)\n form.xSMax.setEnabled(True)\n try:\n xMin = float(form.xSMin.text())\n except:\n xMin = plt.axes.get_xlim()[0]\n form.xSMin.setText(str(xMin))\n try:\n xMax = float(form.xSMax.text())\n except:\n xMax = plt.axes.get_xlim()[1]\n form.xSMax.setText(str(xMax))\n for ax in axesList:\n ax.set_xlim((xMin, xMax))\n # Y axis\n if form.yAuto.isChecked():\n for ax in axesList:\n ax.set_autoscaley_on(True)\n form.ySMin.setEnabled(False)\n form.ySMax.setEnabled(False)\n lim = plt.axes.get_ylim()\n form.ySMin.setText(str(lim[0]))\n form.ySMax.setText(str(lim[1]))\n else:\n form.ySMin.setEnabled(True)\n form.ySMax.setEnabled(True)\n try:\n yMin = float(form.ySMin.text())\n except:\n yMin = plt.axes.get_ylim()[0]\n form.ySMin.setText(str(yMin))\n try:\n yMax = float(form.ySMax.text())\n except:\n yMax = plt.axes.get_ylim()[1]\n form.ySMax.setText(str(yMax))\n for ax in axesList:\n ax.set_ylim((yMin, yMax))\n plt.update()\n self.skip = False", "def showscale(self):\n return self[\"showscale\"]", "def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()", "def fix_auto(self):\n if self.share_x:\n self.rescale_axes(x=True, y=False)\n self.fix_axes_ticks(axis='x')\n if self.share_y:\n self.rescale_axes(x=False, y=True)\n self.fix_axes_ticks(axis='y')", "def scale_out(self, *args, **kwargs):\n pass", "def update_zoom_plot(self):\n self.plot_zoom.setXRange(*self.linear_region.getRegion(), padding=0)", "def set_allow_upscaling(self, allow):\n self.widget.setAllowUpscaling(allow)", "def _force_rescale(self, setpoint_x, setpoint_y):", "def shell_allow_upscaling_changed(self, allow):\n self.set_allow_upscaling(allow)", "def shell_allow_upscaling_changed(self, allow):\n self.set_allow_upscaling(allow)", "def shell_scale_to_fit_changed(self, scale_to_fit):\n self.set_scale_to_fit(scale_to_fit)", "def shell_scale_to_fit_changed(self, scale_to_fit):\n self.set_scale_to_fit(scale_to_fit)", "def linux_zoomer_plus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def scale(self, _: Application) -> bool:\n return False", "def toggle_y_zoom(self, b):\n if self.y_crop_slider.disabled is True:\n self.y_crop_slider.disabled = False\n elif self.y_crop_slider.disabled is False:\n self.y_crop_slider.disabled = True", "def unsetScale(self):\n return _libsbml.Unit_unsetScale(self)", "def b_scale_object():\n \n bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))\n bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))\n bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))\n bpy.ops.object.transform_apply(scale=True)", "def yscale(self, newscale, linthreshy=1.e-4):\n self._checkfigure()\n if newscale == 'symlog':\n self.axes.set_yscale(newscale, linthreshy=linthreshy)\n else:\n self.axes.set_yscale(newscale)", "def verticalScale(self):\n self.model.refreshScreen()", "def on_zoom_change(self, event) -> None:\r\n\r\n zoom_level = int(self.zoom_scale.get())\r\n self.painter.zoom = zoom_level\r\n self.painter.draw_board()", "def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)", "def process_zoom(self, status):\n log.debug(\"Zoom tool clicked %s\", status)\n if status == \"True\":\n self.auto_scale = False", "def toggle_span_grid(self, x):\r\n self.konfig.span.set_grid(x)\r\n self.spanGraf.toggle_grid(x)", "def enableZoomIn(self):\n self.zoomInID = self.canvas.mpl_connect('button_press_event', self.onZoomIn)\n self.master.config(cursor = \"cross\")", "def set_allow_upscaling(self, allow):\n self.widget.SetAllowUpscaling(allow)", "def set_scale(self, scale):\n scale = float(scale)\n if scale <= 1:\n raise ValueError('The scale parameter must exceed 1.')\n self._a = scale", "def scale(self, size):\n self._surf = pygame.transform.smoothscale(self._surf, size).convert_alpha()", "def set_scaling(self, scaling):\n self.scaling = scaling\n self.eff_box_size = int(self.box_size*self.scaling+0.5)", "def normalize_plot(self):\n\n kwargs = dict(stretch = self.stretch,\n vmin = self.vmin_button.get_value(),\n vmax = self.vmax_button.get_value())\n norm = aplpy.normalize.APLpyNormalize(**kwargs)\n self.parent.aplpy_plot.image.set_norm(norm)\n self.parent.aplpy_plot.refresh()", "def scale(self,id,x,y,s):\n if id not in self.elements.keys():\n print(\"Id input not registered! Please check your process\")\n return False\n element=self.elements[id]\n state=element.scale(self.h-1-y,x,s,self.w,self.h)\n if state==True:\n self.canvas=np.ones((self.h,self.w,3),dtype=np.uint8)*255\n self.sync=False\n return state", "def zoom(self, scaleChange):\n\t\tself.scaleFactor += scaleChange\n\t\t\n\t\t# don't allow smaller then 10%\n\t\tif self.scaleFactor < 0.1:\n\t\t\tself.scaleFactor = 0.1\n\t\t\n\t\tif scaleChange > 0:\n\t\t\tself.setTitle(\"Zoom +\")\n\t\telse:\n\t\t\tself.setTitle(\"Zoom -\")\n\t\t\n\t\tself.display()\n\t\tself.autoResize()\n\t\tgobject.timeout_add(10, self.display)", "def turn_squeeze_image_on(self):\n self.squeeze_image = True", "def scale(self, scale):\n\t\tself._current_score *= scale", "def SetLogicalScale(*args, **kwargs):\n return _gdi_.DC_SetLogicalScale(*args, **kwargs)", "def setScale(self, sx, sy=None, sz=None):\n self.transform.setScale(sx, sy, sz)", "def ToggleOff(self):\n for s in self.sensors:\n self.gSetpt[s.GetID()].Disable()\n\n self.top_sizer.Layout()\n print(\"Toggle off\")", "def update_rescale_entry(self):\n if self.var_rescale_frame.get() == 0:\n self.checkbox_rescale_frame[\"text\"] = \"Rescale Frames\"\n self.rescale_factor_entry.config(state=\"disabled\")\n elif self.var_rescale_frame.get() == 1:\n self.checkbox_rescale_frame[\"text\"] = \"By a factor of: \"\n self.rescale_factor_entry.config(state=\"normal\")", "def verticalScaleDecrease(self):\n scaleFac = float(self.qline4.text())\n self.qline4.setText(str(scaleFac / 2))\n self.model.refreshScreen()", "def refresh(self):\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.canvas.draw()", "def toggle_draw_axes(self):\n if self.draw_axes:\n self.draw_axes = False\n else:\n self.draw_axes = True\n self.redraw()", "def toggle_draw_axes(self):\n if self.draw_axes:\n self.draw_axes = False\n else:\n self.draw_axes = True\n self.redraw()", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def yscale(self, value='linear'):\r\n for ax in self._subaxes:\r\n ax.set_yscale(value)\r\n self.figure.canvas.draw()", "def scale(self, app: Application) -> bool:\n pass", "def __zoomOut(self):\n if QApplication.focusWidget() == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").zoomOut()\n else:\n aw = self.activeWindow()\n if aw:\n aw.zoomOut()\n self.sbZoom.setValue(aw.getZoom())", "def onClick(self, event):\n\t\tif (event.button == 1):\n\t\t\tfor loop in range(1,4):\n\t\t\t\tself.limits[loop,:] *= 0.5\n\n\t\tif (event.button == 3):\n\t\t\tfor loop in range(1,4):\n\t\t\t\tself.limits[loop,:] *= 2.0\n\n\t\tif (event.button == 2):\n\t\t\tself.mapsFig.canvas.mpl_disconnect(self.motionEvent)\n\t\t\tself.mapsFig.canvas.mpl_disconnect(self.clickEvent)\n\t\t\tpl.close('all')\n\n\t\tfor loop in range(4):\t\t\t\n\t\t\tself.axStokes[loop].set_ylim(self.limits[loop,:])", "def reset_graph(self):\n log.debug(\"reset graph\")\n self.auto_scale = True\n self.select_tool.action.setChecked(True)\n\n dgplot = self.main_curve_dialog.get_plot()\n dgplot.do_autoscale()\n\n dgimage = self.main_curve_dialog.get_plot()\n dgimage.do_autoscale()", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "def toggle_satni_grid(self, x):\r\n self.konfig.satni.set_grid(x)\r\n self.satniGraf.toggle_grid(x)", "def __init__(self, scale=False):\n self.scale = scale", "def on_click(event):\n ax = event.inaxes\n \n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n \n if self.current_plot == 'single':\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n self.ax_zoomed = True\n self.current_ax = ax\n ax.set_position([0.1, 0.05, 0.85, 0.80])\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n \n for axis in self.sp_fig.axes:\n if axis is not ax:\n axis.set_visible(False)\n \n except ValueError:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if self.ax_zoomed:\n self.ax_zoomed = False\n #self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n elif self.current_plot == 'multi':\n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n ant1, ant2 = ax.get_title().split(\" \")\n except:\n ant1 = int(ax.get_title().strip('Tile').strip('Antenna').strip('Stand'))\n ant2 = ant1 \n try:\n self.spin_ref_ant.setValue(int(ant1))\n self.spin_ref_ant2.setValue(int(ant2))\n self.plot_select.setCurrentIndex(0)\n self.current_plot = 'single'\n \n self.updatePlot()\n except:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if not self.ax_zoomed:\n ax.set_position([0.1, 0.1, 0.85, 0.85])\n # TODO: fix labelling of zoom plots\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n self.orig_position = ax.get_position()\n for axis in event.canvas.figure.axes:\n # Hide all the other axes...\n if axis is not ax:\n axis.set_visible(False)\n self.ax_zoomed=True\n else:\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n event.canvas.draw()", "def resetMinZoomVisibility(self):\n self._min_zoom = None", "def set_equal(ax):\n scaling = array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n ax.auto_scale_xyz(*[[min(scaling), max(scaling)]]*3)", "def xscale(self, newscale, linthreshx=1.e-4):\n self._checkfigure()\n if newscale == 'symlog':\n self.axes.set_xscale(newscale, linthreshx=linthreshx)\n else:\n self.axes.set_xscale(newscale)", "def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale,bits=2)\r\n\t\tcurrentVal[3] = scale[0]\r\n\t\tcurrentVal[4] = scale[1]\r\n\t\tcurrentVal = self.binList2Dec(currentVal)\r\n\t\tself.write(reg, currentVal)", "def setAxisScaling(scalingtype='linear', axes='XYZ'):\n scalingdict = {'linear':'LIN', 'log':'LOG'} \n dislin.axsscl(scalingdict[scalingtype],axes)", "def shrink(self):\n if self.focused == 0:\n self._shrink_main(self.change_ratio)\n elif len(self.clients) == 2:\n self._shrink_solo_secondary(self.change_ratio)\n else:\n self._shrink_secondary(self.change_size)\n self.group.layout_all()", "def _need_rescale(self, fieldname, scale):\n cropped = IAnnotations(self.context).get(PAI_STORAGE_KEY)\n if cropped and '%s_%s' % (fieldname, scale) in cropped:\n self._allow_rescale = False\n else:\n self._allow_rescale = True", "def zoomReset(self):\n self.viewNP.setScale(0.5)\n self.nodeMgr.updateConnections()", "def force_rescale(self,rescaleFactor):\n if not self.built:\n raise Exception(\"model should be built before calling this function\")\n for l in self.layerList:\n l.rescale(rescaleFactor)\n self.rescaleFactor.assign(rescaleFactor)", "def toggle_scattering(self, setting=1):\n if setting not in [0, 1, \"on\", \"off\"]:\n raise ValueError(\n \"The input for the toggle the us of scattering \"\n 'in the model must \"on\" (1) or \"off\" (0)'\n )\n self.use_scat = 1 if setting == \"on\" else 0 if setting == \"off\" else setting", "def scale(self, sf):\n self.scale(sf, sf)", "def _zoomCamera(self, sizeChange):\n self.camSize -= sizeChange", "def scale(self, sx, sy):\n frameWidth *= sx\n frameHeight *= sy\n repaint()", "def shouldAutoScale(self):\n if self.autoscale is not None:\n return self.autoscale\n # preserve backwards compatability for zenpacks\n for dp in self.graphPoints():\n if dp.meta_type == 'DataPointGraphPoint' and dp.shouldAutoScale():\n return True\n return False", "def scale_smaller(self):\n new_factor = self._zoom_factor - 0.1\n if 0 < float(new_factor) < self._MAX_ZOOM:\n self._zoom_factor = new_factor" ]
[ "0.7329849", "0.7177367", "0.65722066", "0.61756915", "0.61486137", "0.6119862", "0.6056649", "0.6035681", "0.6035681", "0.6006283", "0.5996011", "0.5986425", "0.597488", "0.5941395", "0.5927027", "0.5925209", "0.5909223", "0.5884189", "0.5856268", "0.5849367", "0.5846804", "0.58342034", "0.5830538", "0.582898", "0.57837635", "0.57739615", "0.576577", "0.5754124", "0.573523", "0.5723398", "0.5693992", "0.5684643", "0.5653009", "0.5646694", "0.5641598", "0.56320804", "0.5602191", "0.5601616", "0.5585844", "0.5576437", "0.5566577", "0.5551435", "0.5547139", "0.5547139", "0.55323786", "0.55323786", "0.553111", "0.5524825", "0.55083156", "0.5486301", "0.54772925", "0.5458524", "0.5452016", "0.54514647", "0.5446853", "0.5442449", "0.54311997", "0.5429982", "0.54294187", "0.5428762", "0.54280984", "0.54225004", "0.54158276", "0.5412313", "0.5399703", "0.5397996", "0.5390719", "0.5390413", "0.53795713", "0.53762627", "0.53707445", "0.53470623", "0.5341218", "0.53367895", "0.53367895", "0.5333416", "0.5333394", "0.53310066", "0.5320556", "0.5317208", "0.5314829", "0.5288109", "0.52809507", "0.5280519", "0.5280201", "0.527975", "0.5274071", "0.5273231", "0.5270558", "0.526735", "0.52670467", "0.52571833", "0.5248485", "0.524732", "0.52458835", "0.524508", "0.52398366", "0.52329266", "0.5228206", "0.52171075" ]
0.72248846
1
Update the view when the color scheme changes.
def update_color(self): self.plot(update_traces=False, update_waveforms=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_color(self):\r\n \r\n \r\n colorset = self.colorset\r\n \r\n self.grfx[0].colorset = colorset\r\n pass", "def plot_color_changed(self):\n self.plot_color = self.plot_color_button.color()", "def changeColor(self):\n self.layer.new_colormap()", "def _update_color(self, color):\n self.color = color", "def _color_change_mode(self):\r\n self.dlg.exec_()\r\n self.color = self.dlg.currentColor().name()\r\n self.colorPlate.setStyleSheet(\"background-color: %s;\" % self.color)\r\n self.input_scene.get_stk_color(self.color)\r\n return", "def _onEdit(self, event):\n index = self.colorlist.GetSelection()\n icol = self._indexTupleToColor(index)\n icd = wx.ColourData()\n icd.SetColour(icol)\n dialog = wx.ColourDialog(self, icd)\n\n if dialog.ShowModal() == wx.ID_OK:\n tup = _colorDataToTuple(dialog.GetColourData())\n self.graphColors[index] = tup\n self._tupleListToStrings()\n self._updateButtons(None)", "def _update_color(self, *args):\n\n if self._variable and 'w' in self._mode and not self._dnd_started:\n self._internal_color_change = True\n self.color_var.set(self._variable)", "def _on_colormap_change(self, event=None):\n with self.layer.events.colormap.blocker():\n self.colormap_combobox.setCurrentIndex(\n self.colormap_combobox.findData(self.layer.colormap)\n )", "def color(self, color_value):\n self.app.color = color_value", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def set_color(self):\n new_color = QColorDialog.getColor(QColor(self.config['color']))\n if not new_color.isValid():\n return\n self.config['color'] = new_color.rgb()\n self.paint()", "def OnSysColourChanged(self, event):\r\n \r\n # This event is probably triggered by a theme change \r\n # so we have to re-init the art provider.\r\n if self._art:\r\n self._art.Init()\r\n\r\n if self._frame:\r\n self.Update()\r\n self._frame.Refresh()", "def _update_color(self):\n self._vertex_list.colors[:] = self._rgba * self._num_verts", "def _update(self):\n\n if self.rgb:\n self._canvas['bg'] = tks.color_funcs.rgb_to_hex_string(self.rgb)\n self._text['text'] = self._color_info_text()\n else:\n self._canvas['bg'] = self._blank_label_color\n self._text['text'] = ''", "def _color_var_changed(self, *args):\n\n if not self._internal_color_change:\n self._variable = self.color_var.get()\n self._update()\n self._internal_color_change = False", "def refresh_color(self):\n self.color = max(0, int(math.sqrt(self.vx ** 2\n + self.vy ** 2)) + 100)", "def update(self):\n try:\n if not self._light.connected:\n self._light.connect()\n # pylint: disable=invalid-name\n r, g, b, w = self._light.get_color()\n except pykulersky.PykulerskyException as exc:\n if self._available:\n _LOGGER.warning(\"Unable to connect to %s: %s\", self._light.address, exc)\n self._available = False\n return\n if not self._available:\n _LOGGER.info(\"Reconnected to %s\", self.entity_id)\n self._available = True\n\n hsv = color_util.color_RGB_to_hsv(r, g, b)\n self._hs_color = hsv[:2]\n self._brightness = int(round((hsv[2] / 100) * 255))\n self._white_value = w", "def update_style(self):\n pass", "def _on_edge_color_change(self, event=None):\n with self.layer.events.edge_color.blocker():\n index = self.edgeComboBox.findText(\n self.layer.edge_color, Qt.MatchFixedString\n )\n self.edgeComboBox.setCurrentIndex(index)\n color = Color(self.layer.edge_color).hex\n self.edgeColorSwatch.setStyleSheet(\"background-color: \" + color)", "def update_background(self):\n color = QColorDialog().getColor()\n self.model.set('Look', 'background', str(color.name(QColor.HexRgb)))\n self.model.announce_update()", "def set_color(self, new_color):\n self.color = new_color", "def change_color(self, color):\n self.color = color", "def updateTheme(self):\n self.myUpdate(stateDict=None)", "def shell_fgcolor_changed(self, color):\n self.set_fgcolor(color)", "def on_material_color_btn_color_set(self,button,data=None):\n self.app.reload_job()", "def shell_bgcolor_changed(self, color):\n self.set_bgcolor(color)", "def set_window_colour(self, event):\n rgb_triplet, rgb_string = tkColorChooser.askcolor()\n self.canvas.config(bg = rgb_string)", "def updateColors(self):\n self.negativeColor = (int(self.negativeRedTextField.get(\"1.0\", tk.END)),\n int(self.negativeGreenTextField.get(\"1.0\", tk.END)),\n int(self.negativeBlueTextField.get(\"1.0\", tk.END)))\n self.positiveColor = (int(self.positiveRedTextField.get(\"1.0\", tk.END)),\n int(self.positiveGreenTextField.get(\"1.0\", tk.END)),\n int(self.positiveBlueTextField.get(\"1.0\", tk.END)))\n # Update the positive and negative labels\n self.negativeLabel.config(background=self.negativeColorHex())\n self.positiveLabel.config(background=self.positiveColorHex())\n\n print(f\"Negative: {self.negativeColor}\")\n print(f\"Positive: {self.positiveColor}\")", "def changeColorPanel():\n panel = nuke.Panel('Change Colorspace')\n\n #add pulldown for choice of colorspace\n luts = ' '.join(lutList())\n spaces = panel.addEnumerationPulldown(\"new colorspace\", luts)\n\n for each in nuke.allNodes(\"Read\"):\n readFileName = each[\"file\"].value().split('/').pop().split('.').pop(0)\n panel.addBooleanCheckBox(\"%s :[%s]\" % (readFileName, each[\"colorspace\"].value()), False)\n\n ret = panel.show()\n\n for each in nuke.allNodes(\"Read\"):\n readFileName = each[\"file\"].value().split('/').pop().split('.').pop(0)\n if panel.value(\"%s :[%s]\" % (readFileName, each[\"colorspace\"].value())):\n each[\"colorspace\"].setValue(panel.value(\"new colorspace\"))", "def _on_palette_change(self, palette_data: dict) -> None:\n # set the color from the metadata\n color = self._label_to_rgb[palette_data['label']]\n # if the selected color is different, queue a cursor update\n if not np.array_equal(self._color, color):\n self.is_cursor_change = True\n # store the color with the new value\n self._color[:] = color\n # set the is brush flag\n self.is_brush = palette_data['paint'] == 'brush'\n # store the brush size with the new value\n self.brush_size = palette_data['brush_size']\n # if the palette is in super pixel mode, get that data\n if palette_data['paint'] == 'super_pixel':\n # get the algorithm from the dictionary\n algorithm = palette_data['super_pixel']\n # get the arguments for the specific algorithm\n arguments = palette_data[algorithm]\n # get the segments using the given algorithm and arguments\n segs = segment(self._image, algorithm, **arguments)\n # apply the segmented image pixels and segments to local structures\n self._super_pixel_segments[:], self._super_pixel[:] = segs\n # otherwise set the super pixel data back to 0\n else:\n self._super_pixel_segments[:] = 0\n self._super_pixel[:] = 0", "def magic_colors(self,parameter_s = ''):\n \n new_scheme = parameter_s.strip()\n if not new_scheme:\n print 'You must specify a color scheme.'\n return\n try:\n self.shell.outputcache.set_colors(new_scheme)\n except:\n warn('Error changing prompt color schemes.\\n'\n + str(sys.exc_info()[1]))\n else:\n self.shell.rc.colors = \\\n self.shell.outputcache.color_table.active_scheme_name\n try:\n self.shell.InteractiveTB.set_colors(scheme = new_scheme)\n self.shell.SyntaxTB.set_colors(scheme = new_scheme)\n except:\n warn('Error changing exception color schemes.\\n'\n + str(sys.exc_info()[1]))\n if self.shell.rc.color_info:\n try:\n self.shell.inspector.set_active_scheme(new_scheme)\n except:\n warn('Error changing object inspector color schemes.\\n'\n + str(sys.exc_info()[1]))\n else:\n self.shell.inspector.set_active_scheme('NoColor')", "def change(widget, colors): \n\t\n new_val = '#'\n for name in ('red', 'green', 'blue'):\n new_val += colors[name].get()\n widget['bg'] = new_val", "def _updateColor(self, color):\n primitive = self._getScenePrimitive()\n if len(primitive.children) != 0:\n primitive.children[0].setAttribute('color', color)", "def register(self):\n active = True\n self.rgb = colormodel.RGB(0, 255, 0)\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.update()", "def on_show_view(self):\n self.window.background_color = arcade.color.BLACK", "def update_model_color(self, model_name, car_color):\n visuals = self.get_model_visuals(model_name)\n\n self.update_color(visuals=visuals,\n car_color=car_color)", "def change_colors(self, interval):\n for shape in self.shapes:\n shape.set_pen_color(choice(Color.PALETTE)).set_fill_color(\n choice(Color.PALETTE)\n )", "def activate_color_selector(self, event):\n\n color = colorchooser.askcolor()[1]\n self.variables.foreground_color = color\n self.change_shape_color(self.variables.current_shape_id, color)", "def _on_change(self, *_):\n colour = self.on_colour if self.value else self.off_colour\n self.configure(bg=colour)\n if self.label:\n self.label.configure(bg=colour)", "def _update(self):\n\n self.color = self.qcolor.getRgb()[0:3]\n self.setStyleSheet(\n \"\"\"\n QToolButton\n {\n background-color: qlineargradient(spread:pad, x1:0, y1:1, x2:0, y2:0, stop:0 rgb(%d, %d, %d), stop:1 rgb(%d, %d, %d))\n };\n \"\"\" % (self.color[0]*.45, self.color[1]*.45, self.color[2]*.45, self.color[0], self.color[1], self.color[2])\n )", "def _on_color_by_change(self, event=None):\n with self.layer.events.color_by.blocker():\n color_by = self.layer.color_by\n\n idx = self.color_by_combobox.findText(\n color_by, Qt.MatchFixedString\n )\n self.color_by_combobox.setCurrentIndex(idx)", "def _setColor(self, index):\n\n self.colorLabel.setStyleSheet(\"border: 1px solid black; background-color:rgb(%s, %s, %s);\" % (\n cControlColors[index][0] * 255, cControlColors[index][1] * 255,\n cControlColors[index][2] * 255))\n self.rgbColorDlg.setCurrentColor(QColor.fromRgb(\n cControlColors[index][0] * 255, cControlColors[index][1] * 255,\n cControlColors[index][2] * 255))\n self.colorSlider.setValue(index)", "def on_show_view(self):\n self.window.background_color = arcade.color.WHITE", "def color(self, color):\n\n self.container['color'] = color", "def update(self, rgb, cmyk, hsv):\n compRGB = a3.complement_rgb(rgb)\n if (compRGB is None):\n compRGB = rgb\n \n rgb_str = rgb_to_str(rgb)\n cmyk_str = '' if cmyk is None else str5_cmyk(cmyk) \n hsv_str = '' if hsv is None else str5_hsv(hsv)\n \n self.main.text = (\"Color\\nRGB: \" + rgb_str +\n \"\\nCMYK: \" + cmyk_str +\n \"\\nHSV: \" + hsv_str + \"\\n \\n\" +\n \"R,G,B sliders in: 0..255\\n\" +\n \"C,M,Y,K sliders: 0 to 100%\\n\" +\n \"H slider: 0 <= H < 360 degrees\\n\" +\n \"S,V sliders: 0 <= S,V <= 1\")\n self.main.background = rgb.glColor()\n self.main.foreground = compRGB.glColor()\n self.comp.text = (\"Color\\nRGB: \" + rgb_str +\n \"\\nCMYK: \" + cmyk_str +\n \"\\nHSV: \" + hsv_str + \"\\n \\n\" +\n \"R,G,B sliders in: 0..255\\n\" +\n \"C,M,Y,K sliders: 0 to 100%\\n\" +\n \"H slider: 0 <= H < 360 degrees\\n\" +\n \"S,V sliders: 0 <= S,V <= 1\" )\n self.comp.background = compRGB.glColor()\n self.comp.foreground = rgb.glColor()\n \n # set the sliders\n self.rSlider.value = rgb.red*100\n self.gSlider.value = rgb.green*100\n self.bSlider.value = rgb.blue*100\n self.cSlider.value = 0 if cmyk is None else cmyk.cyan*100 \n self.mSlider.value = 0 if cmyk is None else cmyk.magenta*100\n self.ySlider.value = 0 if cmyk is None else cmyk.yellow*100\n self.kSlider.value = 0 if cmyk is None else cmyk.black*100\n self.hSlider.value = 0 if hsv is None else hsv.hue*100\n self.sSlider.value = 0 if hsv is None else hsv.saturation*100\n self.vSlider.value = 0 if hsv is None else hsv.value*100", "def onColorMenu(self, item):\n self.canvas.color = item.color\n return 1", "def on_show_view(self):\n\n # Makes the background darker\n arcade.set_background_color([rgb - 50 for rgb in arcade.color.DARK_BLUE_GRAY])\n\n # Enable the UIManager when the view is showm.\n self.manager.enable()", "def output_entry_changed(self, event):\n value = self.output.get_text().lstrip(\"#\")\n\n if len(value) == 6:\n rgb = hex_to_rgb(value)\n self.change_color(rgb)", "def update(self, grid, colRamp = ['white', 'blue']):\n \n # update the cell colors\n for y in range(len(grid)):\n yl = y + 1\n for x in range(len(grid[y])):\n xl = x + 1\n color = colRamp[int(grid[y][x])]\n self.displayWindow.update((xl, yl), color)\n\n # refresh the window\n self.displayWindow.tkupdate()", "def update_view(self, w: Wrapper) -> None:\n\n w.setStyleSheet(\"/* */\") # forces visual update", "def set_cmap_cb(self, w, index):\n old_cmap_name = self._cmap_name\n name = cmap.get_names()[index]\n self.cmap_name = name\n self.pipeline.push(StageAction(self,\n dict(cmap_name=old_cmap_name),\n dict(cmap_name=self._cmap_name),\n descr=\"rgbmap / change cmap\"))\n\n self.pipeline.run_from(self)", "def redraw(self):\r\n self.c.update()", "def set_color_list(self, new_list):\n self.__clr_list = itertools.cycle(new_list)", "def _updateColor(self, color):\n primitive = self._getScenePrimitive()\n if (len(primitive.children) != 0 and\n isinstance(primitive.children[0], primitives.ColormapMesh3D)):\n primitive.children[0].alpha = self._color[3]\n else:\n super(ComplexIsosurface, self)._updateColor(color)", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def color(self, color):\n #self._color = color\n new_color = \"{0}{1}{2}\".format(hex(int(color[0]))[2:].zfill(2),\n hex(int(color[1]))[2:].zfill(2),\n hex(int(color[2]))[2:].zfill(2))\n #self.log.info(\"RASPLes.color(%s : %s -> %s)\" % (self.number, color, new_color))\n #print(\"color(%s -> %s)\" % (self.number, new_color))\n try:\n self.current_color = new_color\n #self.strip.setPixelColor(int(self.number), self.current_color)\n self.strip.setPixelColorRGB(int(self.number), color[0], color[1], color[2])\n\n self.strip.updated = True\n except Exception as e:\n self.log.error(\"led update error\" + str(e))", "def set_color(self, color):\n with doc_ctrl.open_command():\n doc_ctrl.set_color(self.lbl, color)\n std_events.document_modified.emit()", "def _update_visual(self):\n\n # Check if the options are opened\n if self._is_opened:\n\n self._option_border.set_color(self.border_press)\n\n else:\n\n self._option_border.set_color((0, 0, 0, 0))", "def colors(self, parameter_s=''):\n def color_switch_err(name):\n warn('Error changing %s color schemes.\\n%s' %\n (name, sys.exc_info()[1]), stacklevel=2)\n\n\n new_scheme = parameter_s.strip()\n if not new_scheme:\n raise UsageError(\n \"%colors: you must specify a color scheme. See '%colors?'\")\n # local shortcut\n shell = self.shell\n\n # Set shell colour scheme\n try:\n shell.colors = new_scheme\n shell.refresh_style()\n except:\n color_switch_err('shell')\n\n # Set exception colors\n try:\n shell.InteractiveTB.set_colors(scheme = new_scheme)\n shell.SyntaxTB.set_colors(scheme = new_scheme)\n except:\n color_switch_err('exception')\n\n # Set info (for 'object?') colors\n if shell.color_info:\n try:\n shell.inspector.set_active_scheme(new_scheme)\n except:\n color_switch_err('object inspector')\n else:\n shell.inspector.set_active_scheme('NoColor')", "def update(self, rgb, cmyk, hsv):\n # RGB Fields\n self.rField.text = `rgb.red`\n self.gField.text = `rgb.green`\n self.bField.text = `rgb.blue`\n # CMYK fields\n self.cField.text = \"\" if cmyk is None else `round(cmyk.cyan,2)`\n self.mField.text = \"\" if cmyk is None else `round(cmyk.magenta,2)`\n self.yField.text = \"\" if cmyk is None else `round(cmyk.yellow,2)`\n self.kField.text = \"\" if cmyk is None else `round(cmyk.black,2)`\n # HSV fields\n self.hField.text = \"\" if hsv is None else `round(hsv.hue,1)`\n self.sField.text = \"\" if hsv is None else `round(hsv.saturation,3)`\n self.vField.text = \"\" if hsv is None else `round(hsv.value,3)`", "def set_color(self, color):\n self.light_color = color\n for f in self.color_change_cb:\n f(self)", "def _confirm_color(self, event = None):\n color = self._entry.get().strip()\n if color != \"\":\n self._color = color\n self._window.destroy()", "def set_color(self, background_color, color):\n self.background_color = background_color\n self.tile_color = color\n self.controller.refresh_board()", "def on_meta_colors(self, color_array):\n\n # print(self.analysis.landmark_orig_indexes)\n analysis_colors = color_array[self.analysis.landmark_orig_indexes]\n # print(analysis_colors)\n self.embedding_viewer.set_face_colors(analysis_colors)", "def drawChanges(self):\n self.draw(wait=False)\n draw(self.values,color='yellow',bbox=None,clear=False,shrink=self.shrink)", "def update_color(self, visuals, car_color):\n link_names = []\n visual_names = []\n ambients, diffuses, speculars, emissives = [], [], [], []\n\n for visual_name, link_name in zip(visuals.visual_names, visuals.link_names):\n if \"car_body_link\" in visual_name:\n visual_names.append(visual_name)\n link_names.append(link_name)\n ambient = ColorRGBA(const.COLOR_MAP[car_color].r * 0.1,\n const.COLOR_MAP[car_color].g * 0.1,\n const.COLOR_MAP[car_color].b * 0.1,\n const.COLOR_MAP[car_color].a)\n diffuse = ColorRGBA(const.COLOR_MAP[car_color].r * 0.35,\n const.COLOR_MAP[car_color].g * 0.35,\n const.COLOR_MAP[car_color].b * 0.35,\n const.COLOR_MAP[car_color].a)\n\n ambients.append(ambient)\n diffuses.append(diffuse)\n speculars.append(const.DEFAULT_COLOR)\n emissives.append(const.DEFAULT_COLOR)\n if len(visual_names) > 0:\n req = SetVisualColorsRequest()\n req.visual_names = visual_names\n req.link_names = link_names\n req.ambients = ambients\n req.diffuses = diffuses\n req.speculars = speculars\n req.emissives = emissives\n self._set_visual_colors(req)", "def set_color(self, color):\n\t\tpass", "def clickDarkReference(self, event):\n if self.darkReference is None:\n self.darkReference = self.spectrometer.getSpectrum()\n self.darkBtn.color = '0.99'\n else:\n self.darkReference = None\n self.darkBtn.color = '0.85'\n plt.pause(0.3)\n self.axes.autoscale_view()", "def process_color(self, color):\n self.controller.game.receive_color(color)\n self.parent.parent.update_stat_frame()\n self.parent.parent.update_table_frame()\n self.parent.parent.end_turn()", "def _itemChanged(self, event):\n if event == items.ItemChangedType.COLORMAP:\n self._sigColormapChanged.emit()\n if self._colormap is not None:\n self._colormap.sigChanged.disconnect(self._colormapChanged)\n\n item = self.item()\n if item is not None:\n self._colormap = item.getColormap()\n self._colormap.sigChanged.connect(self._colormapChanged)\n else:\n self._colormap = None\n\n elif event == items.ItemChangedType.DATA:\n self._sigColormapChanged.emit()", "def _update_color(self, rgb_tuple):\n for color in rgb_tuple._fields:\n pin = getattr(PINS, color)\n value = getattr(rgb_tuple, color)\n # Ensure color between 0 and 255\n value = max(min(value, 255), 0)\n # print(pin, value)\n self.pi.set_PWM_dutycycle(pin, value)", "def _updateColormapImage(self, *args, **kwargs):\n if self._colormapImage is not None:\n self._colormapImage = None\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def on_show_view(self):\n self.setup()\n arcade.set_background_color(arcade.color.BLACK)", "def update_visualization(self) -> None:\n pass", "def change_color(self, rgb):\n\n rgba = Gdk.RGBA()\n rgba.parse(\"rgb({},{},{})\".format(*rgb))\n self.square.override_background_color(Gtk.StateType.NORMAL, rgba)\n\n GObject.signal_handler_block(self.spinbutton_r, self.red_sb_id)\n self.spinbutton_r.set_value(rgb[0])\n GObject.signal_handler_unblock(self.spinbutton_r, self.red_sb_id)\n GObject.signal_handler_block(self.slider_r, self.red_s_id)\n self.slider_r.set_value(rgb[0])\n GObject.signal_handler_unblock(self.slider_r, self.red_s_id)\n\n GObject.signal_handler_block(self.spinbutton_g, self.green_sb_id)\n self.spinbutton_g.set_value(rgb[1])\n GObject.signal_handler_unblock(self.spinbutton_g, self.green_sb_id)\n GObject.signal_handler_block(self.slider_g, self.green_s_id)\n self.slider_g.set_value(rgb[1])\n GObject.signal_handler_unblock(self.slider_g, self.green_s_id)\n\n GObject.signal_handler_block(self.spinbutton_b, self.blue_sb_id)\n self.spinbutton_b.set_value(rgb[2])\n GObject.signal_handler_unblock(self.spinbutton_b, self.blue_sb_id)\n GObject.signal_handler_block(self.slider_b, self.blue_s_id)\n self.slider_b.set_value(rgb[2])\n GObject.signal_handler_unblock(self.slider_b, self.blue_s_id)\n\n GObject.signal_handler_block(self.output, self.output_id)\n self.output.set_text(rgb_to_hex(rgb))\n GObject.signal_handler_unblock(self.output, self.output_id)\n\n self.rgb_color = rgb\n self.change_output()", "def updateFromRgb ( self ):\n hsl = self.rgbToHsl( self.r, self.g, self.b )\n self.h = hsl[0]\n self.s = hsl[1]\n self.l = hsl[2]\n self.hsl = hsl\n self.hsla = [ hsl[0], hsl[1], hsl[2], self.a ]", "def _update_color(self, txt):\n color = self.valid_color\n if not self.hasAcceptableInput():\n color = self.invalid_color\n self.setStyleSheet(\"background-color: %s\" % color)", "def __button_routes_marker_color_clicked(self):\n color = QColorDialog.getColor()\n if color.isValid():\n self.vis.change_route_marker_color(color.name())", "def setColor(self):\n\n sel = cmds.ls(selection=True, type=['shape', 'transform'])\n if len(sel) > 0:\n for obj in sel:\n if cmds.nodeType(obj) == 'transform':\n shapes = cmds.listRelatives(obj, type='shape')\n if len(shapes) > 0 and self.shapeTypeCbx.isChecked():\n for shape in shapes:\n if cmds.attributeQuery('overrideEnabled', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideEnabled', True)\n if self.colorsTab.currentIndex() == 0:\n if cmds.attributeQuery('overrideRGBColors', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideRGBColors', False)\n if cmds.attributeQuery('overrideColor', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideColor', self.colorSlider.value())\n else:\n if cmds.attributeQuery('overrideRGBColors', node=shape, exists=True):\n cmds.setAttr(shape + '.overrideRGBColors', True)\n if cmds.attributeQuery('overrideColorRGB', node=shape, exists=True):\n color = self.rgbColorDlg.currentColor()\n cmds.setAttr(shape + '.overrideColorRGB', color.red()/255.0, color.green()/255.0, color.blue()/255.0)\n\n if self.transformTypeCbx.isChecked():\n if cmds.attributeQuery('overrideEnabled', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideEnabled', True)\n if self.colorsTab.currentIndex() == 0:\n if cmds.attributeQuery('overrideRGBColors', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideRGBColors', False)\n if cmds.attributeQuery('overrideColor', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideColor', self.colorSlider.value())\n else:\n if cmds.attributeQuery('overrideRGBColors', node=obj, exists=True):\n cmds.setAttr(obj + '.overrideRGBColors', True)\n if cmds.attributeQuery('overrideColorRGB', node=obj, exists=True):\n color = self.rgbColorDlg.currentColor()\n cmds.setAttr(obj + '.overrideColorRGB', color.red() / 255.0,\n color.green() / 255.0, color.blue() / 255.0)", "def on_rgb_slide(self,r,g,b):\n if not self.active:\n return\n red = int(round(r / 100.0))\n green = int(round(g / 100.0))\n blue = int(round(b / 100.0))\n self.rgb = colormodel.RGB(red, green, blue)\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.cmyk = a3.rgb_to_cmyk(self.rgb)\n assert (self.cmyk == None or type(self.cmyk) == colormodel.CMYK), 'rgb_to_cmyk does not return a CMYK object'\n self.update()", "def the_user_changes_the_color_of_the_device(color):\n web_app.change_property_softassert(\"color\",color)", "def set_colors(self, ):\n try:\n odd = self._parent.settings.get_key('interface.odd_color')\n even = self._parent.settings.get_key('interface.even_color')\n self.dialog.instruments.set_odd_color(odd)\n self.dialog.accounts.set_odd_color(odd)\n self.dialog.instruments.set_even_color(even)\n self.dialog.accounts.set_even_color(even)\n except od_exception_config_key_error:\n pass", "def setNewColor(self, color: QColor):\n self.drawNewColor = color", "def choose_rgb_color(self, b, name):\n section, option = name\n cur = b.background_color[:3]\n picker = RGBColorPicker(cur)\n self.subview_open = True\n rgb = picker.get_color()\n self.subview_open = False\n _stash.config.set(section, option, str(rgb))\n self.table.reload_data()\n self.save()", "def set_color(self, color):\n pass", "def update_courses_colors() -> None:\n last_modified = get_last_modified()\n table = get_table_content()\n\n if last_modified > table[\"lastUpdated\"]:\n print(\"getting file changes...\")\n\n service = get_authenticated_service(\"sheets\", \"v4\")\n\n response = (\n service.spreadsheets()\n .values()\n .get(spreadsheetId=SPREADSHEET_ID, range=RANGE_NAME, majorDimension=\"ROWS\")\n .execute()\n )\n values = response[\"values\"]\n\n for row in values:\n course = row[0].lower()\n try:\n color = row[1]\n except IndexError:\n color = None\n table[\"courses\"][course][\"color\"] = color\n\n table[\"lastUpdated\"] = last_modified\n\n with VALID_COURSES.open(\"w\", encoding=\"utf-8\") as outfile:\n json.dump(table, outfile, ensure_ascii=False)\n\n print(\"table updated\")\n else:\n print(\"table is up do date\")", "def initialize_colors(self) -> None:\n curses.init_pair(ColorPair.black_on_white.value, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(ColorPair.red_on_black.value, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(ColorPair.blue_on_black.value, curses.COLOR_BLUE, curses.COLOR_BLACK)\n curses.init_pair(ColorPair.green_on_black.value, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(ColorPair.white_on_black.value, curses.COLOR_WHITE, curses.COLOR_BLACK)", "def updateChannels(self):\n self.__redrawChannels()\n self.__update()", "def _onAdd(self, event):\n dialog = wx.ColourDialog(self)\n\n if dialog.ShowModal() == wx.ID_OK:\n tup = _colorDataToTuple(dialog.GetColourData())\n self.graphColors.append(tup)\n self._tupleListToStrings()\n self._updateButtons(None)", "def on_autocolor_switch(self, action, value):\n\t\taction.set_state(value)\n\t\tautocolor = value.get_boolean()\n\t\tself.config[\"color\"][\"auto\"] = autocolor\n\n\t\tcolor = self.config[\"color\"][\"autofg\"] if autocolor else self.config[\"color\"][\"fg\"]\n\t\tself.gui[\"fg-colorbutton\"].set_rgba(color)\n\t\tself._mainapp.draw.color_update()", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def visual_attr_changed(self):\n if self.component:\n self.component.invalidate_draw()\n self.component.request_redraw()\n else:\n self.invalidate_draw()\n self.request_redraw()", "def _style_colours(self):\n\n pass", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.redraw()\n self._changed = False", "def __button_routes_line_color_clicked(self):\n color = QColorDialog.getColor()\n if color.isValid():\n self.vis.change_route_line_color(color.name())", "def refresh(self):\n self.getWindow().getDecorView().postInvalidate()", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour,mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def slider_action(self, sender):\n self.r = self.rslider.value\n self.g = self.gslider.value\n self.b = self.bslider.value\n self.preview.background_color = self.rgb\n self.colorlabel.text = self.hexcode", "def visualAppearanceChanged(event, obj):\n\n if _currentPresentationManager >= 0:\n _PRESENTATION_MANAGERS[_currentPresentationManager].\\\n visualAppearanceChanged(event, obj)" ]
[ "0.6837024", "0.67573655", "0.67464536", "0.6674691", "0.6582233", "0.6487157", "0.6407548", "0.6334135", "0.62867904", "0.6270955", "0.6240912", "0.62101436", "0.62077475", "0.6205991", "0.61959213", "0.6193557", "0.6117789", "0.6114603", "0.61016107", "0.608556", "0.6044547", "0.5986738", "0.5953682", "0.5948146", "0.5940888", "0.5934156", "0.59273386", "0.5865338", "0.58461636", "0.58403015", "0.5828251", "0.5828249", "0.58150196", "0.5798064", "0.57686645", "0.57627136", "0.574454", "0.572702", "0.5705246", "0.5704471", "0.56950724", "0.56935364", "0.56820637", "0.56715095", "0.5671243", "0.5662111", "0.5659255", "0.565887", "0.56533617", "0.56514364", "0.56501585", "0.56253725", "0.5619638", "0.56188536", "0.5616549", "0.5598327", "0.5566128", "0.55518", "0.55458", "0.5543265", "0.5530235", "0.5504775", "0.5500107", "0.54997134", "0.5490214", "0.54858524", "0.54857385", "0.54794663", "0.5468379", "0.546746", "0.5463581", "0.54627216", "0.54599196", "0.5456444", "0.545007", "0.54476106", "0.5441663", "0.5436648", "0.54361427", "0.54345334", "0.54303306", "0.54159325", "0.5415179", "0.53959256", "0.5395253", "0.5394856", "0.5393877", "0.53858495", "0.53802854", "0.53770095", "0.53737766", "0.536869", "0.5367349", "0.53642005", "0.53642005", "0.5348591", "0.534672", "0.53318965", "0.5331587", "0.53270096" ]
0.6732327
3
Scaling of the channel boxes.
def scaling(self): return self.stacked._box_scaling[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale(self):", "def __scale_bboxes(self, bboxes, scale_x, scale_y):\n with tf.variable_scope('scale_bboxes'):\n return tf.multiply(bboxes, tf.tile([[scale_y, scale_x, scale_y,\n scale_x]],\n [tf.shape(bboxes)[0], 1]))", "def scaleBoard(self, scale):\n self.scaling = scale\n self.my_font.config(size=25 * self.scaling)\n self.reset_button.config(width=40 * self.scaling, height=40 * self.scaling, borderwidth=2 * self.scaling)\n self.board.updateBoardUI(self.scaling)", "def scalebox(self, b):\n return [int(b[0]*self.video_w/self.detection_image_size[0]),\n int(b[1]*self.video_h/self.detection_image_size[1]),\n int(b[2]*self.video_w/self.detection_image_size[0]),\n int(b[3]*self.video_h/self.detection_image_size[1])]", "def scale(self, up):\n s = 1.1 if up else 0.9\n self.scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([s, s, s])\n )\n\n self.aabb.scale(s)", "def scale_boxes(boxes, image_shape):\n height = image_shape[0]\n width = image_shape[1]\n image_dims = K.stack([height, width, height, width])\n image_dims = K.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n return boxes", "def scale_box(box, img_size):\n xscale = img_size[0] / FLAGS.size\n yscale = img_size[1] / FLAGS.size\n x0, y0, x1, y1 = box\n return [\n float(x0) * xscale,\n float(y0) * yscale,\n float(x1) * xscale,\n float(y1) * yscale,\n ]", "def set_scaling(self, scaling):\n self.scaling = scaling\n self.eff_box_size = int(self.box_size*self.scaling+0.5)", "def get_scale():\r\n\r\n \r\n return 0.5", "def rescale_box(box, img_size_orig, img_size_new):\n orig_w, orig_h = img_size_orig\n new_w, new_h = img_size_new\n scale_x = new_w / orig_w\n scale_y = new_h / orig_h\n sx, sy, ex, ey = box\n return [sx * scale_x, sy * scale_y, ex * scale_x, ey * scale_y]", "def scaleAll(self, scale):\n center = [self.width/2, self.height/2, 0, 0]\n matrix = self.scaleMatrix(scale, scale, scale)\n\n for wireframe in self.wireframes.values():\n wireframe.scale(center, matrix)", "def box_scale(k, m, s_min=0.1, s_max=0.9):\n\n # equation 4 from paper\n return s_min + (s_max - s_min) * (k - 1) / (m - 1)", "def scale(self, scale=1):\n self.x *= scale\n self.y *= scale\n self.width *= scale\n self.height *= scale\n\n # Always update the corners after operation\n self.update_corners()\n return", "def scale_positions_and_cell(self):\n\n taupscl = self.dt / self.taup\n stress = self.atoms.get_stress()\n old_pressure = self.atoms.get_isotropic_pressure(stress)\n scl_pressure = 1.0 - taupscl * self.compressibility / 3.0 * \\\n (self.pressure - old_pressure)\n\n #print \"old_pressure\", old_pressure\n #print \"volume scaling by:\", scl_pressure\n\n cell = self.atoms.get_cell()\n cell = scl_pressure * cell\n self.atoms.set_cell(cell, scale_atoms=True)", "def _resize_cbboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('cbbox_fields', []):\n cbboxes = []\n for cbox in results[key]:\n tmp_cbox = np.array(cbox, dtype=np.float32)\n new_tmp_cbox = []\n for ccbox in tmp_cbox:\n ccbox = np.array(ccbox, dtype=np.float32)\n ccbox[0::2] *= results['scale_factor'][0]\n ccbox[1::2] *= results['scale_factor'][1]\n new_tmp_cbox.append(ccbox)\n tmp_cbox = np.array(new_tmp_cbox, dtype=np.float32)\n if self.bbox_clip_border:\n tmp_cbox[:, 0::2] = np.clip(tmp_cbox[:, 0::2], 0, img_shape[1])\n tmp_cbox[:, 1::2] = np.clip(tmp_cbox[:, 1::2], 0, img_shape[0])\n cbboxes.append(tmp_cbox)\n results[key] = cbboxes", "def scaling(self):\n return self.__scaling", "def GetScale(self):\n ...", "def compute_scale(self, box, plane):\n center, normal = plane\n vertex_dots = [np.dot(vertex, normal) for vertex in box[1:]]\n vertex_dots = np.sort(vertex_dots)\n center_dot = np.dot(center, normal)\n scales = center_dot / vertex_dots[:4]\n return np.mean(scales)", "def scale(self):\n return self._scale", "def scale(self, sx, sy):\n frameWidth *= sx\n frameHeight *= sy\n repaint()", "def scaling(self):\n return self._scaling", "def scaling(self):\n return self._scaling", "def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)", "def b_scale_object():\n \n bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))\n bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))\n bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))\n bpy.ops.object.transform_apply(scale=True)", "def verticalScale(self):\n self.model.refreshScreen()", "def _resize_bboxes(self, ori_bboxes, scale_factor):\n bboxes = ori_bboxes * scale_factor\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, self.img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, self.img_shape[0])\n return bboxes", "def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()", "def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r", "def scale(self, s):\n for n in range(len(self.mV)):\n self.mV[n] *= s\n return self", "def scale(self, from_min, from_max, to_min, to_max):\n for i in range(len(self.poses)):\n self.poses[i].position.scale(from_min[:3], from_max[:3], to_min[:3], to_max[:3])\n self.wrenches[i].scale(from_min[3:], from_max[3:], to_min[3:], to_max[3:])", "def scale(self):\n return self.distribution.scale", "def calculate_scaling_factors(blk):\n\n def cs(blk2):\n \"\"\"Recursive function for to do subblocks first\"\"\"\n for b in blk2.component_data_objects(pyo.Block, descend_into=False):\n cs(b)\n if hasattr(blk2, \"calculate_scaling_factors\"):\n blk2.calculate_scaling_factors()\n\n # Call recursive function to run calculate_scaling_factors on blocks from\n # the bottom up.\n cs(blk)\n # If a scale factor is set for an indexed component, propagate it to the\n # component data if a scale factor hasn't already been explicitly set\n propagate_indexed_component_scaling_factors(blk)\n # Use the variable scaling factors to scale the arc constraints.\n scale_arc_constraints(blk)", "def scale(self, factors):\n if isinstance(factors, numbers.Number):\n factors = np.ones(self.dim) * factors;\n self.raw_wires.scale(factors);", "def _convert_to_square(self, bboxes):\n\n square_bboxes = torch.zeros_like(bboxes, device=self.device, dtype=torch.float32)\n x1, y1, x2, y2 = [bboxes[:, i].float() for i in range(4)]\n h = y2 - y1 + 1.0\n w = x2 - x1 + 1.0\n max_side = torch.max(h, w)\n square_bboxes[:, 0] = x1 + w*0.5 - max_side*0.5\n square_bboxes[:, 1] = y1 + h*0.5 - max_side*0.5\n square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0\n square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0\n\n square_bboxes = torch.ceil(square_bboxes + 1).int()\n return square_bboxes", "def adapt_length_scale(self):\n Ne = max(1,self.Ne)\n Nc = max(1,self.Nc)\n ratio = Ne/(Ne+Nc)\n self.mu *= 2*ratio", "def scale(self, x, y, z) -> None:\n ...", "def scale(self):\n return self._gev_bijector.scale", "def rescale_boxes(boxes, current_dim, original_shape):\n orig_h, orig_w = original_shape\n\n # The amount of padding that was added\n pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))\n pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))\n\n # Image height and width after padding is removed\n unpad_h = current_dim - pad_y\n unpad_w = current_dim - pad_x\n\n # Rescale bounding boxes to dimension of original image\n boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h\n boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h\n return boxes", "def updateSize(self, *args):\n width = self.width.get()\n height = self.height.get()\n self.initialXScale.config(to=width)\n self.initialYScale.config(to=height)\n # error check that state is not outside bounds\n for ball, state in self.ballStates.items():\n if state[0] > width:\n state[0] = width\n if state[1] > height:\n state[1] = height", "def getScale(self):\n return self.factor**self.turnOn", "def scale_in(self, count):\n pass", "def scale_configuration(trj, scale, system='W'):\n trj['box'][0] = trj['box'][0]*scale\n trj['box0'] = trj['box0']*scale\n trj['energy'][0] = universal_eos(scale, system)*len(trj['xyz'][0])\n trj['free_energy'][0] = universal_eos(scale, system)*len(trj['xyz'][0])\n trj['total_energy'][0] = universal_eos(scale, system)*len(trj['xyz'][0])\n trj['forces'][0] = np.zeros_like(trj['forces'][0])\n\n return trj", "def scale(self, sx : float, sy : float, sz : float):\n answ = self.clone()\n for i in range(len(self._elements)):\n answ._elements[i]._element = self._elements[i].element.scale(sx, sy, sz)\n\n return answ", "def set_scales(self):\r\n self.canvas.update()\r\n self.dxmin = self.dmargin\r\n self.dymin = self.dmargin\r\n self.dxmax = self.canvas.winfo_width() - self.dmargin - 1\r\n self.dymax = self.canvas.winfo_height() - self.dmargin - 1\r\n\r\n # Flip the Y coordinates to invert the result.\r\n if self.y_is_flipped:\r\n self.dymin, self.dymax = self.dymax, self.dymin\r\n\r\n self.xscale = (self.dxmax - self.dxmin) / (self.wxmax - self.wxmin)\r\n self.yscale = (self.dymax - self.dymin) / (self.wymax - self.wymin)\r\n\r\n # Calculate 1 pixel in world coordinates.\r\n self.xpix = 1 / self.xscale\r\n self.ypix = 1 / self.yscale", "def update_size(self, dt):\n if self.cursor_on_button:\n self.size = min(self.SIZE_MAX, self.size + self.SCALING_VEL * dt)\n else:\n self.size = max(self.SIZE_MIN, self.size - self.SCALING_VEL * dt)\n self.surface = pg.transform.scale(self.image, (round(self.size), round(self.size)))", "def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res", "def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size", "def box2cs(box):\r\n input_size = (256, 256)\r\n\r\n x, y, w, h = box[:4]\r\n aspect_ratio = input_size[0] / input_size[1]\r\n center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)\r\n\r\n if w > aspect_ratio * h:\r\n h = w * 1.0 / aspect_ratio\r\n elif w < aspect_ratio * h:\r\n w = h * aspect_ratio\r\n\r\n # pixel std is 200.0\r\n scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)\r\n\r\n scale = scale * 1.25\r\n\r\n return center, scale", "def scale(self, factor):\n for a in self.symbol_attributes:\n a.scale(factor)", "def scale(self, sc):\n daskD.wait(self.client.map(_call_scale, self.vecDask, sc=sc, pure=False))\n return self", "def scale(self,s):\n return Vector(self.x * s, self.y * s, self.z * s)", "def __init__(self,scale):\n self.scale = scale", "def scale(self, scale):\n\t\tself._current_score *= scale", "def scale(self, scale_factor: float) -> None:\n self.tensor[:, :3] *= scale_factor", "def updatemaxbombs(self):\n tiles: int = int(self.widthbox.get()) * int(self.heightbox.get())\n self.bombsbox.configure(to=tiles/2)", "def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return", "def __resizeBox(self, x, y):\n # Implement the correct behavior for dragging a side\n # of the box: Only change one dimension.\n if not self.aspectRatio:\n if self.__currentCursor == wx.CURSOR_SIZENS:\n x = None\n elif self.__currentCursor == wx.CURSOR_SIZEWE:\n y = None\n\n x0,y0,w0,h0 = self.currentBox\n currentExtent = boxToExtent(self.currentBox)\n if x == None:\n if w0 < 1:\n w0 += 1\n else:\n w0 -= 1\n x = x0 + w0\n if y == None:\n if h0 < 1:\n h0 += 1\n else:\n h0 -= 1\n y = y0 + h0\n x1,y1 = x, y\n w, h = abs(x1-x0)+1, abs(y1-y0)+1\n if self.aspectRatio:\n w = max(w, int(h * self.aspectRatio))\n h = int(w / self.aspectRatio)\n w *= [1,-1][isNegative(x1-x0)]\n h *= [1,-1][isNegative(y1-y0)]\n newbox = (x0, y0, w, h)\n self.__drawAndErase(boxToDraw=normalizeBox(newbox), boxToErase=normalizeBox(self.currentBox))\n self.currentBox = (x0, y0, w, h)", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def scale(self, size=128):\n scale_factor = size / max(self.voxels.shape)\n self.voxels = ndimage.zoom(self.voxels, scale_factor)\n self.point_position = self.point_position * scale_factor\n self.voxel_size = False # To ignore this\n \n return(self)", "def scale(self):\n return self._a", "def GetScaleBlocks(width):\n\n rord=numpy.log10(abs(width)/2.0)\n nrord=rord % 1\n\n if nrord < numpy.log10(2):\n spc=0.2*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n elif nrord < numpy.log10(5):\n spc=0.5*pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=5*spc\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4]\n else:\n spc=pow(10,numpy.floor(rord))\n smallspc=spc\n bigspc=spc*5\n newspc=[0,smallspc,smallspc*2,smallspc*3,smallspc*4,smallspc*5]\n\n if len(newspc) == 5:\n #labels=['0',None,\"%g\" % smallspc*2,None,\"%g\" % (smallspc*4)]\n labels=['0',None,None,None,\"%g\" % (smallspc*4)]\n else:\n labels=['0',None,None,None,None,\"%g\" % (smallspc*5)]\n\n temp_max=newspc[len(newspc)-1]\n start=temp_max\n for temp in numpy.arange(start,width-bigspc/2,bigspc):\n temp_max=temp_max+bigspc\n newspc.append(temp_max)\n labels.append(\"%g\" % temp_max)\n\n #start=temp_max\n #for temp in Numeric.arange(start,width-smallspc/2,smallspc):\n # labels.append(None)\n # temp_max=temp_max+smallspc \n # newspc.append(temp_max) \n\n return (numpy.array(newspc,numpy.float32),labels)", "def __init__(self, incoming, size, method='BILINEAR', align_corners=False, name='ScalingLayer'):\n super(ScalingLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n if len(self.incoming_shape) > 4:\n self.output_shape = self.incoming_shape[:2] + list(size) + self.incoming_shape[4:]\n else:\n self.output_shape = self.incoming_shape[:1] + list(size) + self.incoming_shape[3:]\n \n self.scale_size = size\n self.method_name = method\n self.method = getattr(tf.image.ResizeMethod, method)\n self.align_corners = align_corners\n \n self.out = None\n self.name = name", "def generate_scales(self, height, width):\n min_hw = min(height, width)\n m_scale = 12.0 / self.min_size\n min_hw = int(min_hw * m_scale)\n scales = []\n factor_count = 0\n while min_hw >= 50:\n scales.append(m_scale * pow(self.face_factor, factor_count))\n min_hw = int(min_hw * self.face_factor)\n factor_count += 1\n return scales", "def rescale(self, event: tkinter.Event) -> None:\n # the properties which are linked to the event of reconfiguration\n # contain all the new sizes of the panel :\n self.width, self.height = event.width - 4, event.height - 4\n # The subtraction of 4 pixels is here to compensate the width\n # of the 'highlight bordure' rolling the canvas)\n self.draw_board()", "def scale_image(self, pixels, size):\n x_min, x_max = np.amin(pixels[:,0]), np.amax(pixels[:,0])\n y_min, y_max = np.amin(pixels[:,1]), np.amax(pixels[:,1])\n z_min, z_max = np.amin(pixels[:,2]), np.amax(pixels[:,2])\n \n pixels[:,0] -= x_min \n pixels[:,1] -= y_min\n pixels[:,2] -= z_min\n \n x_max -= x_min\n y_max -= y_min\n z_max -= z_min\n \n scale_factor = size / max(x_max, y_max, z_max) \n # All points are now between [0..max]\n\n pixels *= scale_factor\n return pixels", "def verticalScaleIncrease(self):\n scaleFac = float(self.qline4.text())\n self.qline4.setText(str(scaleFac * 2))\n self.model.refreshScreen()", "def scale(self, size):\n self._surf = pygame.transform.smoothscale(self._surf, size).convert_alpha()", "def scale(self, factor):\n self.b = factor * self.b", "def transform(self, interval):\n c = self.physics_canvas.canvas\n c.scale(self.canvas_id, 0,1, 1.01, 1.01)", "def scaling(self):\n \n if self.colindex == self.rowsize: # last chart in row\n self.colindex = 0\n self.rowindex += 1 \n xorigin = self.indent + (self.colindex * self.xscale) \n yorigin = self.rowindex * self.yscale\n xscale = self.xscale # to fulfil % formatting below\n yscale = self.yscale \n self.colindex += 1\n\n res = \"origin(%(xorigin)s%%, %(yorigin)s%%), scale(%(xscale)s%%, %(yscale)s%%)\" % locals()\n return res", "def auto_convert(boxes: Type[Union[Tensor, np.ndarray]], w: int, h: int):\n\n if boxes.max() < 2:\n # to pixel coordinates\n boxes[:, 0::2] *= w\n boxes[:, 1::2] *= h\n else:\n # to normalized 0-1\n boxes[:, 0::2] /= w\n boxes[:, 1::2] /= h\n return boxes", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def scale(self, size):\n self._surf = pygame.transform.smoothscale(self._surf,\n size).convert_alpha()\n self._version += 1\n return self", "def scale(self, scale_x: float, scale_y: float) -> None:\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y", "def scale_uv(self):\n self.u = [i * self.scale * self.scaleratio for i in self.u]\n self.v = [i * self.scale for i in self.v]", "def rel_boxes_resize_square(boxes, old_shape):\n h0, w0 = old_shape\n\n dw0, dh0 = max(h0, w0) - w0, max(w0, h0) - h0\n w1, h1 = w0 + dw0, h0 + dh0\n\n box_abs = boxes * np.tile(old_shape[::-1], 2)\n box_abs[:, 0::2] += dw0 / 2\n box_abs[:, 1::2] += dh0 / 2\n\n box_abs[:, 0::2] /= w1\n box_abs[:, 1::2] /= h1\n\n return box_abs", "def scale(self, factor):\n new = self.copy()\n new.d.clear()\n\n for val, prob in self.items():\n new.set(val * factor, prob)\n return new", "def get_scaling(self):\n if self.constrain_navigation:\n self.activate_navigation_constrain()\n return self.sx, self.sy", "def resize_to_box(im, size):\n #mx = np.max(im.shape[:2])\n\n factors = [size[i]/im.shape[i] for i in range(2)]\n\n f = np.min(factors)\n if f < 1.0:\n return resize_with_factor_new(im, f)\n else:\n return im", "def scale(self,bvp):\n\n sol = bvp.solution\n # Additional aux entries for initial and terminal BCs\n extras = [{'type':'initial','vars':self.problem_data['state_list']},\n {'type':'terminal','vars':self.problem_data['state_list']}]\n\n # Scale the states and costates\n for idx,state in enumerate(self.problem_data['state_list']):\n sol.y[idx,:] /= self.scale_vals['states'][state]\n\n # Scale auxiliary variables\n for aux in (self.problem_data['aux_list']+extras):\n if aux['type'] not in Scaling.excluded_aux:\n for var in aux['vars']:\n sol.aux[aux['type']][var] /= self.scale_vals[aux['type']][var]\n\n # Scale parameters\n for idx, param in enumerate(self.problem_data['parameter_list']):\n sol.parameters[idx] /= self.scale_vals['parameters'][param]", "def on_scale (self):\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_foreground()\n\t\t\tself.redraw_background()\n\n\t\tif self.expand2 == _('Use a scrollbar'):\n\t\t\tself.width = int((self.icon_size * 2 * self.rows + ((self.border_size+self.shadow_size)*2)+15 ) + 24/self.scale)\n\t\t\tself.update_scrollbar()", "def setScaling(factor=1.0):\n dislin.sclfac(factor)", "def scale(self,id,x,y,s):\n if id not in self.elements.keys():\n print(\"Id input not registered! Please check your process\")\n return False\n element=self.elements[id]\n state=element.scale(self.h-1-y,x,s,self.w,self.h)\n if state==True:\n self.canvas=np.ones((self.h,self.w,3),dtype=np.uint8)*255\n self.sync=False\n return state", "def resize_and_crop_box(self):\n box = self.scale_box(self._box, self._scaled_width, self._scaled_height)\n box = self.offset_box(box, self._crop_offset_x, self._crop_offset_y)\n box = self.clip_boxes(box)\n return box", "def resize(self):\n\t\tself.win.erase()\n\t\tfor c in self.components:\n\t\t\tc.resize()\n\t\tself.draw(True)", "def scale2x(self) -> 'BaseImage':\n self._surface = pygame.transform.scale2x(self._surface)\n return self", "def __mul__(self, scale):\n return Vec(self.x * scale, self.y * scale)", "def scale_channel(channel):\r\n # A possibly cheaper version can be done using cumsum/unique_with_counts\r\n # over the histogram values, rather than iterating over the entire image.\r\n # to compute mins and maxes.\r\n lo = tf.cast(tf.reduce_min(channel), tf.float32)\r\n hi = tf.cast(tf.reduce_max(channel), tf.float32)\r\n\r\n # Scale the image, making the lowest value 0 and the highest value 255.\r\n def scale_values(im):\r\n scale = 255.0 / (hi - lo)\r\n offset = -lo * scale\r\n im = tf.cast(im, tf.float32) * scale + offset\r\n return tf.saturate_cast(im, tf.uint8)\r\n\r\n result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel)\r\n return result", "def scale(self,factor):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y*factor for y in self.coord[x]])\n return self", "def __normalizeBox(self):\n self.currentBox = normalizeBox(self.currentBox)", "def Draw_Scale( self ):\r\n self.canvas_scale.delete(ALL)\r\n if(cb.longx != 0):\r\n value = str( round( cb.longx, 3 ) )\r\n self.canvas_scale.create_line( cb.xorigin,5,cb.xorigin + cb.xtotal,5 )\r\n splits = 10.0\r\n increment = cb.xtotal/splits\r\n for i in range(int(splits + 1)):\r\n self.canvas_scale.create_line( int(cb.xorigin+i*increment),1,int(cb.xorigin+i*increment),9 )\r\n if( self.filter_distance > cb.longx ):\r\n self.filter_distance = cb.longx\r\n x = cb.xtotal - self.filter_distance*cb.xtotal/cb.longx + cb.xorigin\r\n top = str(round(self.filter_distance,3))\r\n \r\n while len(top) < 5:\r\n top = top + \"0\"\r\n self.scale_text = self.canvas_scale.create_text( cb.xorigin + cb.xtotal + 10,1,anchor = \"nw\",text = top + \"/\" + value)\r\n self.scale_marker = self.canvas_scale.create_polygon( x,7, x+4,3, x-4,3, fill=self.highlight_color,outline=self.highlight_color )\r\n if( self.filter_line_on ):\r\n if(self.filter_line != 0 ):\r\n self.canvas_one.delete( self.filter_line )\r\n self.filter_line = self.canvas_one.create_line( x,0,x,self.ys, fill=self.highlight_color)", "def scale_actions(self, actions):", "def rescale(self, factor):\n scaled_size = (int(self.width * factor), int(self.height * factor))\n return self.resize(scaled_size)", "def cs(blk2):\n for b in blk2.component_data_objects(pyo.Block, descend_into=False):\n cs(b)\n if hasattr(blk2, \"calculate_scaling_factors\"):\n blk2.calculate_scaling_factors()", "def extractScale(self,groups):\n self.scaleX = float(groups[0])\n self.scaleY = float(groups[0])\n if len(groups) == 2 and groups[1]:\n self.scaleY = float(groups[1])\n self.matrix = [[self.scaleX, 0.0, 0.0], \\\n [0.0, self.scaleY, 0.0]]", "def scale4x(self) -> 'BaseImage':\n return self.scale2x().scale2x()", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def center_size(boxes):\n return torch.cat([(boxes[:, :2] + boxes[:, 2:])/2, # cx, cy\n boxes[:, :2] - boxes[:, 2:]], 1) # w, h", "def center_size(boxes):\n wh = boxes[:, 2:] - boxes[:, :2] + 1.0\n if isinstance(boxes, np.ndarray):\n return np.column_stack((boxes[:, :2] + 0.5 * wh, wh))\n return torch.cat((boxes[:, :2] + 0.5 * wh, wh), 1)" ]
[ "0.6791516", "0.66905487", "0.6362399", "0.6291339", "0.6284861", "0.6226618", "0.62150896", "0.61979586", "0.6165425", "0.60886663", "0.6006513", "0.60016114", "0.5972433", "0.5896266", "0.5881052", "0.5877557", "0.58730024", "0.58693355", "0.58449113", "0.58054405", "0.5792033", "0.5792033", "0.57887053", "0.5782439", "0.5773249", "0.5772994", "0.57594764", "0.5751153", "0.57501566", "0.57501197", "0.5731197", "0.57300615", "0.5694439", "0.5685178", "0.5673715", "0.5673304", "0.565479", "0.565024", "0.56491584", "0.564897", "0.56206393", "0.562011", "0.5614345", "0.56116956", "0.5597208", "0.55896676", "0.558107", "0.5564933", "0.55614895", "0.55546236", "0.55458", "0.5543632", "0.55407596", "0.5528764", "0.55280346", "0.55221045", "0.55203754", "0.5513696", "0.5503279", "0.5489196", "0.54758096", "0.5475316", "0.5464532", "0.54633766", "0.54592484", "0.5445801", "0.54374474", "0.543298", "0.5425154", "0.5419447", "0.5407815", "0.5399975", "0.539724", "0.5376944", "0.53756464", "0.5375484", "0.53665936", "0.5365016", "0.5364462", "0.5359171", "0.53575027", "0.53554887", "0.53549653", "0.5349501", "0.53386796", "0.53330374", "0.5332821", "0.53326744", "0.5328786", "0.53216827", "0.5320566", "0.5320466", "0.53198045", "0.53141636", "0.5311906", "0.5309847", "0.5309488", "0.5309488", "0.5304464", "0.5303752" ]
0.6452094
2
Select a cluster by clicking on a spike.
def on_mouse_click(self, e): if 'Control' in e.modifiers: # Get mouse position in NDC. box_id, _ = self.canvas.stacked.box_map(e.pos) channel_id = np.nonzero(self.channel_y_ranks == box_id)[0] # Find the spike and cluster closest to the mouse. db = self.data_bounds # Get the information about the displayed spikes. wt = [(t, s, c, ch) for t, s, c, ch in self._waveform_times if channel_id in ch] if not wt: return # Get the time coordinate of the mouse position. mouse_pos = self.canvas.panzoom.window_to_ndc(e.pos) mouse_time = Range(NDC, db).apply(mouse_pos)[0][0] # Get the closest spike id. times, spike_ids, spike_clusters, channel_ids = zip(*wt) i = np.argmin(np.abs(np.array(times) - mouse_time)) # Raise the select_spike event. spike_id = spike_ids[i] cluster_id = spike_clusters[i] emit('select_spike', self, channel_id=channel_id, spike_id=spike_id, cluster_id=cluster_id) if 'Shift' in e.modifiers: # Get mouse position in NDC. box_id, _ = self.canvas.stacked.box_map(e.pos) channel_id = int(np.nonzero(self.channel_y_ranks == box_id)[0][0]) emit('select_channel', self, channel_id=channel_id, button=e.button)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click(self, event):\n x, y = self.canvas.invert([event.x, event.y])\n i, j = int(floor(x)), int(floor(y))\n patch = self.get_cell(i, j)\n if patch and patch.state == \"green\":\n cluster = self.get_cluster(patch)\n self.show_cluster(cluster)", "def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)", "def find_cluster(self, id):\n raise NotImplementedError", "def selectVertex(self, addToSelection: bool) -> None:\n ...", "def mk_station_selector(on_select,\n stations=None,\n dst_map=None,\n **kw):\n import ipyleaflet as L\n\n if stations is None:\n stations = get_stations()\n\n stations = [st for st in stations if st.pos is not None]\n pos2st = {st.pos: st for st in stations}\n\n def on_click(event='', type='', coordinates=None):\n pos = tuple(coordinates)\n st = pos2st.get(pos)\n if st is None:\n # should probably log warning here\n print(\"Can't map click to station\")\n return\n\n on_select(st)\n\n markers = [L.Marker(location=st.pos,\n draggable=False,\n title=st.name)\n for st in stations]\n\n cluster = L.MarkerCluster(markers=markers)\n\n if dst_map is None:\n dst_map = L.Map(**kw)\n\n dst_map.add_layer(cluster)\n cluster.on_click(on_click)\n\n return dst_map, cluster", "def test_selecting_nodes_clicking_them_discovered(self):\n with Nodes()as n:\n for node in n.nodes_discovered:\n node.parent.click()\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Discovered node is selected')", "def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))", "def select(self):\r\n pass", "def find_cluster(self, id: str) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def select_cell(self, event):\n # Get row and symbols.\n row = event.GetRow()\n symbol1 = self.grid_correlations.GetCellValue(row, self.COLUMN_SYMBOL1)\n symbol2 = self.grid_correlations.GetCellValue(row, self.COLUMN_SYMBOL2)\n self.__selected_correlation = [symbol1, symbol2]\n\n self.show_graph(symbol1, symbol2)", "def cluster(self,\n clustering=None,\n algorithm='klustakwik',\n spike_ids=None,\n **kwargs):\n if clustering is None:\n clustering = 'main'\n\n kk2_dir = op.join(self.settings.exp_settings_dir, 'klustakwik2')\n _ensure_dir_exists(kk2_dir)\n\n # Take KK2's default parameters.\n from klustakwik2.default_parameters import default_parameters\n params = default_parameters.copy()\n # Update the PRM ones, by filtering them.\n params.update({k: v for k, v in self.model.metadata.items()\n if k in default_parameters})\n # Update the ones passed to the function.\n params.update(kwargs)\n\n # Original spike_clusters array.\n if self.model.spike_clusters is None:\n n_spikes = (len(spike_ids) if spike_ids is not None\n else self.model.n_spikes)\n spike_clusters_orig = np.zeros(n_spikes, dtype=np.int32)\n else:\n spike_clusters_orig = self.model.spike_clusters.copy()\n\n # HACK: there needs to be one clustering.\n if 'empty' not in self.model.clusterings:\n self.model.add_clustering('empty', spike_clusters_orig)\n\n # Instantiate the KlustaKwik instance.\n kk = KlustaKwik(**params)\n\n # Save the current clustering in the Kwik file.\n @kk.connect\n def on_iter(sc):\n # Update the original spike clusters.\n spike_clusters = spike_clusters_orig.copy()\n spike_clusters[spike_ids] = sc\n # Save to a text file.\n path = op.join(kk2_dir, 'spike_clusters.txt')\n # Backup.\n if op.exists(path):\n shutil.copy(path, path + '~')\n np.savetxt(path, spike_clusters, fmt='%d')\n\n info(\"Running {}...\".format(algorithm))\n # Run KK.\n sc = kk.cluster(model=self.model, spike_ids=spike_ids)\n info(\"The automatic clustering process has finished.\")\n\n # Save the results in the Kwik file.\n spike_clusters = spike_clusters_orig.copy()\n spike_clusters[spike_ids] = sc\n\n # Add a new clustering and switch to it.\n if clustering in self.model.clusterings:\n self.change_clustering('empty')\n self.model.delete_clustering(clustering)\n self.model.add_clustering(clustering, spike_clusters)\n\n # Copy the main clustering to original (only if this is the very\n # first run of the clustering algorithm).\n if clustering == 'main':\n self.model.copy_clustering('main', 'original')\n self.change_clustering(clustering)\n\n # Set the new clustering metadata.\n params = kk.params\n params['version'] = kk.version\n metadata = {'{}_{}'.format(algorithm, name): value\n for name, value in params.items()}\n self.model.clustering_metadata.update(metadata)\n self.save()\n info(\"The clustering has been saved in the \"\n \"`{}` clustering in the `.kwik` file.\".format(clustering))\n self.model.delete_clustering('empty')\n return sc", "def select(self):\n pass", "def select(self):\n pass", "def on_cell_clicked(self, modelIndex):\n self.catalogue_map.select([self.catalogue_model.event_at(modelIndex)])", "def assignSpikes(clusts, df, show=False, force=True):\n if 'clust_inds' in df.columns and force is False:\n print('Data frame already contains clust_inds')\n return\n \n def assignTms(clusts, tms):\n # Assign a delta_tms to cluster1 or cluster2\n assns = [abs(np.mean(clusts[c])-tms) for c in range(len(clusts))]\n return assns.index(min(assns))\n \n # Assign each spike time to a cluster\n clust_tms = [ [] for c in clusts]\n for t in range(len(df.times)-1):\n t_clust = assignTms(clusts, df.times.values[t+1]-df.times.values[t])\n clust_tms[t_clust].append(df.times.values[t])\n \n # Group spikes from same spike type together\n type_tms = []\n for c in range(len(clust_tms)):\n for t in clust_tms[c]:\n type_tms.append([t, c]) # [spk tms, clust index]\n \n # Group these together \n clust_id = []\n for i in range(df.shape[0]):\n if df.iloc[i].times in [k[0] for k in type_tms]:\n clust_id.append(type_tms[[k[0] for k in type_tms].index(df.iloc[i].times)][1])\n else: # Not matching spike found -- happens w/ isolated spikes\n clust_id.append(np.nan)\n \n df['clust_inds'] = clust_id\n print([clust_id.count(j) for j in list(set(clust_id))], list(set(clust_id)))\n if show: # Show the cluter spikes\n for c in range(max(clust_id)+1): # Plot cluster spikes individually\n temp_spikes = df[df['clust_inds']==c]['times']\n plt.plot(temp_spikes, [c+1 for i in temp_spikes], '|', \n color=['blue', 'red'][c])\n plt.ylim([0,3])\n plt.show()\n \n return df", "def change_clustering(self, clustering):\n self._clustering = clustering\n self.model.clustering = clustering\n info(\"Switched to `{}` clustering.\".format(clustering))\n self.emit('open')", "def pressSCV(self):\n\t\t\t\n\t\tm_name = self.ui.findChild(QWidget, \"m_name\")\n\t\tm_name.setText(\"Sensorinen neurografia\")\n\t\t\n\t\t\n\t\tprint \"SCV button pressed\"\n\t\t# Make a database query and draw a graph and distribution\n\t\t\n\t\t# set every checkbox back to the initial state\n\t\t\n\t\tif self.patient_chosen:\n\t\t\t# Make a database query which fetches the patient's SCV data.\n\t\t\tprint \"showing patient SCV data\"\n\t\t\n\t\tself.current_measurement = \"SCV\"\t\n\t\treturn", "def select_cluster(self, clusters):\n min_sim = float(\"inf\")\n min_cluster = None\n \n for cluster in clusters:\n sim = 0.0\n for index, value in cluster.centroid.items():\n sim += value * value\n \n if sim < min_sim:\n min_sim = sim\n min_cluster = cluster\n \n return min_cluster", "def get_one_cluster_by_name(ctx, cluster_name, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].get()\n pprint(cluster.data)", "def createNewcluster(self):\n self.segsChanged = True\n\n # There should be at least one segment selected to proceed\n proceed = False\n for ix in range(len(self.picbuttons)):\n if self.picbuttons[ix].mark == 'yellow':\n proceed = True\n break\n\n if proceed:\n # User to enter new cluster name\n #newLabel, ok = QInputDialog.getText(self, 'Cluster name', 'Enter unique Cluster Name\\t\\t\\t')\n #if not ok:\n #self.completeChanged.emit()\n #return\n names = [self.tboxes[ID].text() for ID in range(self.nclasses)]\n nextNumber = 0\n newLabel = 'Cluster_'+str(nextNumber)\n names.append(newLabel)\n while len(names) != len(set(names)):\n del(names[-1])\n nextNumber += 1\n newLabel = 'Cluster_'+str(nextNumber)\n names.append(newLabel)\n\n # create new cluster ID, label\n newID = len(self.clusters)\n self.clusters[newID] = newLabel\n self.nclasses += 1\n print('after adding new cluster: ', self.clusters)\n\n for ix in range(len(self.picbuttons)):\n if self.picbuttons[ix].mark == 'yellow':\n self.segments[ix][-1] = newID\n self.picbuttons[ix].mark = 'green'\n\n # Delete clusters with no members left and update self.clusters before adding the new cluster\n todelete = []\n for ID, label in self.clusters.items():\n empty = True\n for seg in self.segments:\n if seg[-1] == ID:\n empty = False\n break\n if empty:\n todelete.append(ID)\n\n # Generate new class labels\n if len(todelete) > 0:\n keys = [i for i in range(self.nclasses) if i not in todelete] # the old keys those didn't delete\n # print('old keys left: ', keys)\n nclasses = self.nclasses - len(todelete)\n max_label = nclasses - 1\n labels = []\n c = self.nclasses - 1\n while c > -1:\n if c in keys:\n labels.append((c, max_label))\n max_label -= 1\n c -= 1\n\n # print('[old, new] labels')\n labels = dict(labels)\n print(labels)\n\n # update clusters dictionary {ID: cluster_name}\n clusters = {}\n for i in keys:\n clusters.update({labels[i]: self.clusters[i]})\n\n print('before: ', self.clusters)\n self.clusters = clusters\n self.nclasses = nclasses\n print('after: ', self.clusters)\n\n # update the segments\n for seg in self.segments:\n seg[-1] = labels[seg[-1]]\n # redraw the buttons\n self.clearButtons()\n self.updateButtons()\n #self.cmbUpdateSeg.addItem(newLabel)\n self.completeChanged.emit()\n else:\n msg = SupportClasses_GUI.MessagePopup(\"t\", \"Select\", \"Select calls to make the new cluster\")\n msg.exec_()\n self.completeChanged.emit()\n return", "def select_sweepstakes(self):\n pass", "def select(self, target):", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def ksel(self, k: int) -> Status:\n result = self._read_inline(f\"ksel({k})\")\n return Status(result)", "def poll_cluster(self, server, obj, name):\n\n return self._poll_group('cluster', server, obj, name)", "def select_desired_index_from_the_list(self, index_name):\n select_index = \"(//*[name()='svg'][@class='css-8mmkcg'])\"\n select_index_sitem = self.locator_finder_by_xpath(select_index)\n select_index_sitem.click()\n time.sleep(1)\n\n element = self.locator_finder_by_xpath(f\"//*[contains(text(), '{index_name}')]\")\n actions = ActionChains(self.driver)\n # Move the mouse pointer to the element containing the text\n actions.move_to_element(element)\n # Perform a click action\n actions.click().perform()", "def select_critical_for_failover_group_select_2(driver):\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__Critical\"]').click()\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Failover Group\"]').click()\n wait_on_element(driver, 0.5, 5, '//mat-option[@ix-auto=\"option__Failover Group_2\"]')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Failover Group_2\"]').click()", "def find_kubernetes_cluster(self, id: str) -> dto.KubernetesCluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def choose_box(group, plot_data):\n if group.shape[0] == 1:\n return group\n else:\n #Find centroid\n individual_id = group.individual.unique()[0]\n stem_location = plot_data[plot_data[\"individual\"]==individual_id].geometry.iloc[0]\n closest_stem = group.centroid.distance(stem_location).sort_values().index[0]\n return group.loc[[closest_stem]]", "def choose_box(group, plot_data):\n if group.shape[0] == 1:\n return group\n else:\n #Find centroid\n individual_id = group.individual.unique()[0]\n stem_location = plot_data[plot_data[\"individual\"]==individual_id].geometry.iloc[0]\n closest_stem = group.centroid.distance(stem_location).sort_values().index[0]\n return group.loc[[closest_stem]]", "def select_settings_tab(self, is_cluster, check=False):\n self.click_submenu_entry(\"Settings\")\n if check:\n if not is_cluster:\n select_settings_name_textbox_sitem = self.locator_finder_by_xpath(self.select_settings_name_textbox_id)\n select_settings_name_textbox_sitem.click()\n select_settings_name_textbox_sitem.clear()\n select_settings_name_textbox_sitem.send_keys(\"testDocRenamed\")\n self.locator_finder_by_select(self.select_settings_wait_type_id, 0)\n select_new_settings_save_btn_sitem = None\n try:\n select_new_settings_save_btn_sitem = self.locator_finder_by_id(self.select_newer_settings_save_btn_id)\n if select_new_settings_save_btn_sitem.text != \"Save\":\n select_new_settings_save_btn_sitem = self.locator_finder_by_id(self.select_new_settings_save_btn_id)\n except TimeoutException:\n select_new_settings_save_btn_sitem = self.locator_finder_by_id(self.select_new_settings_save_btn_id)\n\n select_new_settings_save_btn_sitem.click()\n time.sleep(2)\n print(\"Loading Index into memory\\n\")\n select_load_index_into_memory_sitem = self.locator_finder_by_xpath(self.select_load_index_into_memory_id)\n select_load_index_into_memory_sitem.click()\n time.sleep(2)\n self.wait_for_ajax()", "def on_middle_click(self, client, game) -> None:\n pass", "def click(self, selector):\n el = self.locate_element(selector)\n el.click()", "def pick(layer, event):\n # on press\n layer.selected_label = layer._value or 0", "def select_entry(self):\n logging.debug(\"element selected\")\n if len(self.contents) > 0:\n self.to_background()\n self.contents[self.pointer][1]()\n self.to_foreground()\n if self.path_chosen:\n self.deactivate()\n else:\n self.to_foreground()", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def _find_cluster(clusters, label):\n for clst in clusters:\n if clst.label == label: return clst\n return None", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def cluster(self, cluster):\n\n self._cluster = cluster", "def click(self, event):\n if self.segs == []:\n startCircle = self.findInter(event.x, event.y)\n if startCircle:\n xa, ya, xb, yb = self.can.coords(startCircle)\n self.firstCoords = ((xa + xb)/2, (ya + yb)/2)\n if not self.helpShown:\n self.showHelp()", "def select_cluster_numbers():\n\n cluster_list = []\n\n while True:\n cluster_number = input('Specify clusters whose motifs should be searched against the sequence dataset (press Enter when done): ')\n if cluster_number == '':\n break\n else:\n cluster_list.append(cluster_number)\n\n return cluster_list", "def EditCluster(self, event = None):\n self.UpdateData()\n clusterWindow = ClusterWindow(self, self.state)\n clusterWindow.ShowModal()\n self.React()", "def event_node_selected(self, node):\n # TODO\n print(\"selected node:\", node)", "def show_vsan_cluster(self, cluster_id):\n url = \"clusters/%s\" % str(cluster_id)\n resp, body = self.get(url)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])", "def test_selecting_nodes_clicking_them_offline(self):\n with Nodes()as n:\n for node in n.nodes_offline:\n node.parent.click()\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Offline node is not selected')", "def launch_cluster(self):\n version = self.get_latest_spark_version()\n import os\n real_path = os.path.dirname(os.path.realpath(__file__))\n if self.is_aws():\n with open(real_path+'/../data/aws_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n else:\n with open(real_path+'/../data/azure_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n # set the latest spark release regardless of defined cluster json\n cluster_json['spark_version'] = version['key']\n c_info = self.post('/clusters/create', cluster_json)\n self.wait_for_cluster(c_info['cluster_id'])\n return c_info['cluster_id']", "def selectitem_double_click(a):\n\n view_thumbnail_main(treeview)", "def SelectClusters(image, background_prediction, result_clustering,\n n_clusters, bands_thresholds=[\"B2\", \"B3\", \"B4\"],\n region_of_interest=None,\n tileScale=PARAMS_CLOUDCLUSTERSCORE_DEFAULT['tileScale']): \n bands_norm_difference = [b + \"_difference\" for b in bands_thresholds]\n\n img_joined = image.subtract(background_prediction)\\\n .select(bands_thresholds, bands_norm_difference)\\\n .addBands(image.select(bands_thresholds))\n\n bands_and_difference_bands = bands_thresholds + bands_norm_difference\n\n multitemporal_score = None\n reflectance_score = None\n\n for i in range(n_clusters):\n img_diff_clus = img_joined.updateMask(\n result_clustering.eq(i)).select(bands_and_difference_bands)\n\n clusteri = img_diff_clus.reduceRegion(ee.Reducer.mean(),\n geometry=region_of_interest,\n bestEffort=True,\n scale=30,\n tileScale=tileScale\n )\n \n clusteri_diff = clusteri.toArray(bands_norm_difference)\n clusteri_refl = clusteri.toArray(bands_thresholds)\n \n clusteri_refl_norm = clusteri_refl.multiply(clusteri_refl).reduce(ee.Reducer.mean(),\n axes=[0]).sqrt().get([0])\n\n clusteridiff_mean = clusteri_diff.reduce(ee.Reducer.mean(), axes=[0]).get([0])\n clusteridiff_norm = clusteri_diff.multiply(clusteri_diff).reduce(ee.Reducer.mean(),\n axes=[0]).sqrt().get([0])\n\n multitemporal_score_clusteri = ee.Algorithms.If(clusteridiff_mean.gt(0),\n clusteridiff_norm,\n clusteridiff_norm.multiply(-1))\n\n multitemporal_score_clusteri = result_clustering.eq(\n i).toFloat().multiply(ee.Number(multitemporal_score_clusteri))\n reflectance_score_clusteri = result_clustering.eq(\n i).toFloat().multiply(ee.Number(clusteri_refl_norm))\n\n if multitemporal_score is None:\n multitemporal_score = multitemporal_score_clusteri\n reflectance_score = reflectance_score_clusteri\n else:\n multitemporal_score = multitemporal_score.add(\n multitemporal_score_clusteri)\n reflectance_score = reflectance_score.add(\n reflectance_score_clusteri)\n\n return multitemporal_score, reflectance_score", "def click(self, selector, index=0):\n self.find_css(selector).nth(index).click()", "def slot_selectPoint(self, selectionDict):\n\t\tprint('bStackWidget.slot_selectPoint() selectionDict:', selectionDict)\n\t\tif selectionDict is None:\n\t\t\treturn\n\t\tif selectionDict['name'] == 'toggle rect roi':\n\t\t\treturn\n\t\ttype = selectionDict['type']\n\t\tidx = selectionDict['idx']\n\t\tif type == 'Nodes':\n\t\t\tnodeIdx = idx\n\t\t\tself.myStackView2.selectNode(nodeIdx, snapz=True, isShift=False, doEmit=True)\n\t\telif type == 'Edges':\n\t\t\tedgeIdx = idx\n\t\t\tself.myStackView2.selectEdge(edgeIdx, snapz=True, isShift=False, doEmit=True)", "def onclick_pick(self, click):\n from ..backend.util import _annot\n from ..backend.viz_raw import _plot_single_psd\n\n if self.plotType == 'All PSD':\n _annot(self, click, self.annot)\n # If double click, we plot the PSD\n if click.mouseevent.dblclick:\n ch = str(click.artist.get_label())\n index = self.psd.info['ch_names'].index(ch)\n index = self.psd.picks.index(index)\n _plot_single_psd(self, index + 1)", "def setBestCluster(cluster):\r\n global bestCluster\r\n bestCluster = cluster", "def middleselectitem(self, pos):\n self._linklist.select(pos)", "def select_random_node(cluster_ips):\n return random.choice(cluster_ips)", "def select_me(self, mouse_pos):\r\n\t\t#self.active = self.rect.collidepoint(mouse_pos)\r\n\t\tself.active = True", "def onPick(self, event):\n\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n isShift = modifiers == QtCore.Qt.ShiftModifier\n\n logger.info(f'isShift:{isShift}')\n line = event.artist\n\n # filter out clicks on 'Annotation' used by mplcursors\n try:\n # when Scatter, line is 'PathCollection', a list of (x,y)\n offsets = line.get_offsets()\n except (AttributeError) as e:\n return\n\n ind = event.ind # ind is a list []\n if len(ind)==0:\n return\n ind = ind[0]\n\n # ind is the ith element in (x,y) list of offsets\n # ind 10 (0 based) is index 11 (1 based) in table list\n logger.info(f' selected from plot ind:{ind}, offsets values are {offsets[ind]}')\n selectDict = self.getAnnotation(ind)\n\n # to do, just put copy of state dict ???\n selectDict['plotType'] = self.stateDict['plotType']\n selectDict['dataType'] = self.stateDict['dataType']\n\n selectDict['isShift'] = isShift\n\n #\n # emit\n logger.info(f' -->> signalSelectFromPlot.emit()')\n for _k, _v in selectDict.items():\n logger.info(f' {_k}: {_v}')\n self.signalSelectFromPlot.emit(selectDict)", "def choose_point_command(a):\n global canvas, best_line, list_best_label_distance, label_text_result\n if choose_point[0] != a and choose_point[1] != a: # if a was not be choose\n if choose_point[0] == -1 and choose_point[1] == -1:\n choose_point[0] = a\n list_point[a].configure(bg=point_color_choose, fg=\"white\") # Change color of point\n elif choose_point[0] != -1 and choose_point[1] == -1:\n choose_point[1] = a\n list_point[a].configure(bg=point_color_choose, fg=\"white\")\n best_line = dijkstra(data, amount_point_var, choose_point[0], choose_point[1]) # Find best line\n if best_line is not None:\n draw_bestline(best_line[\"path\"], canvas, list_position) # Draw best line with difference color\n\n # Draw best distance with difference color\n list_best_label_distance = draw_best_distance(best_line[\"path\"], data, canvas, list_position, 0.1)\n # Draw result\n text = draw_result(canvas, best_line, data)\n label_text_result = Label(canvas, text=text, height=4, wraplength=150, bg='lawn green')\n label_text_result.pack(pady=100, padx=10, anchor=NW)\n\n else:\n messagebox.showwarning(\"Warning\", \"Not exist path from point{} to point{}\"\n .format(choose_point[0]+1, choose_point[1]+1))\n elif choose_point[0] != -1 and choose_point[1] != -1:\n list_point[choose_point[0]].configure(bg=point_color, fg=\"black\")\n list_point[choose_point[1]].configure(bg=point_color, fg=\"black\")\n choose_point[0] = a\n choose_point[1] = -1 # Uncheck\n list_point[a].configure(bg=point_color_choose, fg=\"white\")\n canvas.delete(\"best_line_tag\")\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n label_text_result.destroy()\n elif choose_point[0] == a:\n if choose_point[1] == -1:\n choose_point[0] = -1 # Uncheck\n list_point[a].configure(bg=point_color, fg=\"black\")\n else:\n choose_point[a] = -1 # Uncheck\n list_point[a].configure(bg=point_color, fg=\"black\")\n canvas.delete(\"best_line_tag\") # delete best line to refresh\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n label_text_result.destroy()\n elif choose_point[1] == a:\n list_point[a].configure(bg=point_color, fg=\"black\")\n choose_point[1] = -1\n canvas.delete(\"best_line_tag\") # delete best line to refresh\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n label_text_result.destroy()", "def cluster_select(arrayName, x0, y0, type_stack, w, cc_stack, ncor, Tmin, \\\n Tmax, RMSmin, RMSmax, xmin, xmax, ymin, ymax, typecluster, nc, \\\n palette, amp, n1, n2, draw_scatter=True, draw_hist=True, \\\n envelope=True, draw_cc=True, draw_ac=True, draw_colored_cc=True, \\\n draw_colored_ac=True):\n # Read file containing data from stack_ccorr_tremor\n filename = 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \\\n arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \\\n type_stack)\n data = pickle.load(open(filename, 'rb'))\n EW_UD = data[6]\n NS_UD = data[7]\n # Read file containing data from stack_acorr_tremor\n# filename = 'ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \\\n# arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \\\n# type_stack)\n# data = pickle.load(open(filename, 'rb'))\n# EW = data[6]\n# NS = data[7]\n# UD = data[8]\n # Stack over all tremor windows\n if (cc_stack == 'lin'):\n EW_UD_stack = linstack([EW_UD], normalize=False)[0]\n NS_UD_stack = linstack([NS_UD], normalize=False)[0]\n# EW_stack = linstack([EW], normalize=False)[0]\n# NS_stack = linstack([NS], normalize=False)[0]\n# UD_stack = linstack([UD], normalize=False)[0]\n elif (cc_stack == 'pow'):\n EW_UD_stack = powstack([EW_UD], w, normalize=False)[0]\n NS_UD_stack = powstack([NS_UD], w, normalize=False)[0]\n# EW_stack = powstack([EW], w, normalize=False)[0]\n# NS_stack = powstack([NS], w, normalize=False)[0]\n# UD_stack = powstack([UD], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n EW_UD_stack = PWstack([EW_UD], w, normalize=False)[0]\n NS_UD_stack = PWstack([NS_UD], w, normalize=False)[0]\n# EW_stack = PWstack([EW], w, normalize=False)[0]\n# NS_stack = PWstack([NS], w, normalize=False)[0]\n# UD_stack = PWstack([UD], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n # Initialize indicators of cross correlation fit\n nt = len(EW_UD)\n ccmaxEW = np.zeros(nt)\n cc0EW = np.zeros(nt)\n timedelayEW = np.zeros(nt)\n rmsEW = np.zeros(nt)\n ccmaxNS = np.zeros(nt)\n cc0NS = np.zeros(nt)\n timedelayNS = np.zeros(nt)\n rmsNS = np.zeros(nt)\n # Windows of the cross correlation to look at\n i0 = int((len(EW_UD_stack) - 1) / 2)\n ibegin = i0 + int(Tmin / EW_UD_stack.stats.delta)\n iend = i0 + int(Tmax / EW_UD_stack.stats.delta) + 1\n rmsb = i0 + int(RMSmin / EW_UD_stack.stats.delta)\n rmse = i0 + int(RMSmax / EW_UD_stack.stats.delta) + 1\n # Time function\n dt = EW_UD_stack.stats.delta\n imax = int((EW_UD_stack.stats.npts - 1) / 2)\n t = dt * np.arange(- imax, imax + 1)\n for i in range(0, nt):\n rmsEW[i] = np.max(np.abs(EW_UD[i][ibegin:iend])) / \\\n np.sqrt(np.mean(np.square(EW_UD[i][rmsb:rmse])))\n rmsNS[i] = np.max(np.abs(NS_UD[i][ibegin:iend])) / \\\n np.sqrt(np.mean(np.square(NS_UD[i][rmsb:rmse])))\n # Cross correlate cc for EW with stack \n cc_EW = correlate(EW_UD[i][ibegin : iend], \\\n EW_UD_stack[ibegin : iend], ncor)\n ccmaxEW[i] = np.max(cc_EW)\n cc0EW[i] = cc_EW[ncor]\n timedelayEW[i] = (np.argmax(cc_EW) - ncor) * EW_UD_stack.stats.delta\n # Cross correlate cc for NS with stack\n cc_NS = correlate(NS_UD[i][ibegin : iend], \\\n NS_UD_stack[ibegin : iend], ncor)\n ccmaxNS[i] = np.max(cc_NS)\n cc0NS[i] = cc_NS[ncor]\n timedelayNS[i] = (np.argmax(cc_NS) - ncor) * NS_UD_stack.stats.delta\n # Clustering\n df = pd.DataFrame({'ccmaxEW' : ccmaxEW, 'ccmaxNS' : ccmaxNS, \\\n 'cc0EW' : cc0EW, 'cc0NS' : cc0NS, 'timedelayEW' : timedelayEW, \\\n 'timedelayNS' : timedelayNS, 'rmsEW' : rmsEW, 'rmsNS' : rmsNS})\n df = preprocessing.scale(df)\n df = pd.DataFrame(df)\n df.columns = ['ccmaxEW', 'ccmaxNS', 'cc0EW', 'cc0NS', 'timedelayEW', \\\n 'timedelayNS', 'rmsEW', 'rmsNS']\n if (typecluster == 'kmeans'):\n clusters = KMeans(n_clusters=nc, random_state=0).fit_predict(df)\n elif (typecluster == 'agglo'):\n clustering = AgglomerativeClustering(n_clusters=nc).fit(df)\n clusters = clustering.labels_\n else:\n raise ValueError( \\\n 'Type of clustering must be kmeans or agglo')\n # Scatter plot\n if (draw_scatter == True):\n colors = [palette[c] for c in clusters]\n pd.plotting.scatter_matrix(df, c=colors, figsize=(20, 20))\n plt.tight_layout()\n plt.savefig( \\\n 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_scatter.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n plt.close()\n # Compute time lags\n timelagEW = np.zeros(nt)\n timelagNS = np.zeros(nt)\n for i in range(0, nt):\n # Time lags\n EWenvelope = obspy.signal.filter.envelope(EW_UD[i].data)\n i0 = np.argmax(EWenvelope[ibegin:iend])\n timelagEW[i] = t[ibegin:iend][i0]\n NSenvelope = obspy.signal.filter.envelope(NS_UD[i].data)\n i0 = np.argmax(NSenvelope[ibegin:iend])\n timelagNS[i] = t[ibegin:iend][i0]\n # Compute width of timelags distribution\n timelags = pd.DataFrame({'timelagEW' : timelagEW, 'timelagNS' : timelagNS})\n width_clust_EW = []\n width_clust_NS = []\n timelag_clust_EW = []\n timelag_clust_NS = []\n for j in range(0, nc):\n times = timelags['timelagEW'].iloc[clusters == j]\n width_clust_EW.append(np.std(times))\n timelag_clust_EW.append(times)\n times = timelags['timelagNS'].iloc[clusters == j]\n width_clust_NS.append(np.std(times))\n timelag_clust_NS.append(times)\n # Save timelags into file\n filename = 'cc/{}/{}_{:03d}_{:03d}/'.format(arrayName, arrayName, \\\n int(x0), int(y0)) + '{}_{:03d}_{:03d}_{}_{}_cluster_timelags.pkl'. \\\n format(arrayName, int(x0), int(y0), type_stack, cc_stack)\n pickle.dump([timelag_clust_EW, timelag_clust_NS], open(filename, 'wb'))\n # Plot histogram of timelags\n if (draw_hist == True):\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n plt.figure(1, figsize=(10 * nc, 16))\n tlag_min = min(np.min(timelags['timelagEW']), np.min(timelags['timelagNS']))\n tlag_max = max(np.max(timelags['timelagEW']), np.max(timelags['timelagNS']))\n # EW / Vertical\n for j in range(0, nc):\n plt.subplot2grid((2, nc), (0, j))\n times = timelags['timelagEW'].iloc[clusters == j]\n m = np.mean(times)\n s = np.std(times)\n plt.hist(times, range=(tlag_min, tlag_max))\n plt.axvline(m + s, color='grey', linestyle='--')\n plt.axvline(m - s, color='grey', linestyle='--')\n plt.title('EW / UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(times)), fontsize=24)\n plt.xlabel('Time lag (s)', fontsize=24)\n # NS / Vertical\n for j in range(0, nc):\n plt.subplot2grid((2, nc), (1, j))\n times = timelags['timelagNS'].iloc[clusters == j]\n m = np.mean(times)\n s = np.std(times)\n plt.hist(times, range=(tlag_min, tlag_max))\n plt.title('NS / UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(times)), fontsize=24)\n plt.axvline(m + s, color='grey', linestyle='--')\n plt.axvline(m - s, color='grey', linestyle='--')\n plt.xlabel('Time lag (s)', fontsize=24)\n # End figure\n plt.tight_layout()\n plt.savefig( \\\n 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_timelags.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n plt.close(1)\n # Plot stacked cross correlation\n if (draw_cc == True):\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n plt.figure(2, figsize=(10 * nc, 16))\n # Time function\n npts = int((EW_UD_stack.stats.npts - 1) / 2)\n dt = EW_UD_stack.stats.delta\n t = dt * np.arange(- npts, npts + 1)\n # EW / Vertical\n cc_clust_EW = []\n t_clust_EW = []\n ratio_clust_EW = []\n EW_UD_stacks = Stream()\n EW_ntremor = []\n for j in range(0, nc):\n # Stack over selected tremor windows\n EWselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n if envelope == True:\n EW_UD[i].data = obspy.signal.filter.envelope(EW_UD[i].data)\n EWselect.append(EW_UD[i])\n EW_ntremor.append(len(EWselect))\n if (cc_stack == 'lin'):\n EWselect_stack = linstack([EWselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n EWselect_stack = powstack([EWselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n EWselect_stack = PWstack([EWselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n # Max cc and ratio with RMS\n cc_clust_EW.append(np.max(np.abs(EWselect_stack.data[ibegin:iend])))\n i0 = np.argmax(np.abs(EWselect_stack.data[ibegin:iend]))\n t_clust_EW.append(t[ibegin:iend][i0])\n RMS = np.sqrt(np.mean(np.square(EWselect_stack.data[rmsb:rmse])))\n ratio_clust_EW.append(np.max(np.abs(EWselect_stack.data[ibegin:iend])) / RMS)\n # Plot\n if (draw_cc == True):\n plt.subplot2grid((2, nc), (0, j))\n plt.axvline(Tmin, color='grey', linestyle='--')\n plt.axvline(Tmax, color='grey', linestyle='--')\n plt.plot(t, EW_UD_stack.data, 'k-', label='All')\n plt.plot(t, EWselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j))\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n plt.title('EW / UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(EWselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # Save into stream\n EW_UD_stacks.append(EWselect_stack)\n # Get the best stack\n i0_EW = cc_clust_EW.index(max(cc_clust_EW))\n t_EW = t_clust_EW[i0_EW]\n cc_EW = max(cc_clust_EW)\n ratio_EW = ratio_clust_EW[i0_EW]\n width_EW = width_clust_EW[i0_EW]\n stack_EW = EW_UD_stacks[i0_EW]\n ntremor = EW_ntremor[i0_EW]\n # NS / Vertical\n cc_clust_NS = []\n t_clust_NS = []\n ratio_clust_NS = []\n NS_UD_stacks = Stream()\n NS_ntremor = []\n for j in range(0, nc):\n # Stack over selected tremor windows\n NSselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n if envelope == True:\n NS_UD[i].data = obspy.signal.filter.envelope(NS_UD[i].data)\n NSselect.append(NS_UD[i])\n NS_ntremor.append(len(NSselect))\n if (cc_stack == 'lin'):\n NSselect_stack = linstack([NSselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n NSselect_stack = powstack([NSselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n NSselect_stack = PWstack([NSselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n # Max cc and ratio with RMS\n cc_clust_NS.append(np.max(np.abs(NSselect_stack[ibegin:iend])))\n i0 = np.argmax(np.abs(NSselect_stack[ibegin:iend]))\n t_clust_NS.append(t[ibegin:iend][i0])\n RMS = np.sqrt(np.mean(np.square(NSselect_stack[rmsb:rmse])))\n ratio_clust_NS.append(np.max(np.abs(NSselect_stack[ibegin:iend])) \\\n / RMS) \n # Plot\n if (draw_cc == True):\n plt.subplot2grid((2, nc), (1, j))\n plt.axvline(Tmin, color='grey', linestyle='--')\n plt.axvline(Tmax, color='grey', linestyle='--')\n plt.plot(t, NS_UD_stack.data, 'k-', label='All')\n plt.plot(t, NSselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j, ))\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n plt.title('NS / UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(NSselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # Save into stream\n NS_UD_stacks.append(NSselect_stack)\n # Get the best stack\n i0_NS = cc_clust_NS.index(max(cc_clust_NS))\n t_NS = t_clust_NS[i0_NS]\n cc_NS = max(cc_clust_NS)\n ratio_NS = ratio_clust_NS[i0_NS]\n width_NS = width_clust_NS[i0_NS]\n stack_NS = NS_UD_stacks[i0_NS]\n ntremor = NS_ntremor[i0_NS]\n # End figure\n if (draw_cc == True):\n plt.tight_layout()\n plt.savefig( \\\n 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_stackcc.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n plt.close(2)\n # Save clusters into file\n filename = 'cc/{}/{}_{:03d}_{:03d}/'.format(arrayName, arrayName, \\\n int(x0), int(y0)) + '{}_{:03d}_{:03d}_{}_{}_clusters.pkl'. \\\n format(arrayName, int(x0), int(y0), type_stack, cc_stack)\n pickle.dump([data[0], data[1], data[2], data[3], data[4], data[5], \\\n clusters, i0_EW, i0_NS], open(filename, 'wb'))\n # Save best stacks into file\n filename = 'cc/{}/{}_{:03d}_{:03d}/'.format(arrayName, arrayName, \\\n int(x0), int(y0)) + '{}_{:03d}_{:03d}_{}_{}_cluster_stacks.pkl'. \\\n format(arrayName, int(x0), int(y0), type_stack, cc_stack)\n pickle.dump([stack_EW, stack_NS], open(filename, 'wb'))\n # Plot stacked autocorrelation\n if (draw_ac == True):\n plt.figure(3, figsize=(10 * nc, 24))\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n npts = int((EW_stack.stats.npts - 1) / 2)\n dt = EW_stack.stats.delta\n t = dt * np.arange(- npts, npts + 1)\n # EW\n for j in range(0, nc):\n plt.subplot2grid((3, nc), (0, j))\n plt.plot(t, EW_stack.data, 'k-', label='All')\n EWselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n EWselect.append(EW[i])\n # Stack over selected tremor windows\n if (cc_stack == 'lin'):\n EWselect_stack = linstack([EWselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n EWselect_stack = powstack([EWselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n EWselect_stack = PWstack([EWselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n plt.plot(t, EWselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j))\n plt.xlim(0, xmax)\n plt.ylim(- ymax, ymax)\n plt.title('EW - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(EWselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # NS\n for j in range(0, nc):\n plt.subplot2grid((3, nc), (1, j))\n plt.plot(t, NS_stack.data, 'k-', label='All')\n NSselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n NSselect.append(NS[i])\n # Stack over selected tremor windows\n if (cc_stack == 'lin'):\n NSselect_stack = linstack([NSselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n NSselect_stack = powstack([NSselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n NSselect_stack = PWstack([NSselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n plt.plot(t, NSselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j))\n plt.xlim(0, xmax)\n plt.ylim(- ymax, ymax)\n plt.title('NS - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(NSselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # UD\n for j in range(0, nc):\n plt.subplot2grid((3, nc), (2, j))\n plt.plot(t, UD_stack.data, 'k-', label='All')\n UDselect = Stream()\n for i in range(0, nt):\n if (clusters[i] == j):\n UDselect.append(UD[i])\n # Stack over selected tremor windows\n if (cc_stack == 'lin'):\n UDselect_stack = linstack([UDselect], normalize=False)[0]\n elif (cc_stack == 'pow'):\n UDselect_stack = powstack([UDselect], w, normalize=False)[0]\n elif (cc_stack == 'PWS'):\n UDselect_stack = PWstack([UDselect], w, normalize=False)[0]\n else:\n raise ValueError( \\\n 'Type of stack must be lin, pow, or PWS')\n plt.plot(t, UDselect_stack.data, color=palette[j], \\\n label='Cluster {:d}'.format(j))\n plt.xlim(0, xmax)\n plt.ylim(- ymax, ymax)\n plt.title('UD - Cluster {:d} ({:d} tremor windows)'.format(j, \\\n len(UDselect)), fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.legend(loc=1)\n # End figure\n plt.tight_layout()\n plt.savefig( \\\n 'ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_stackac.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n plt.close(3)\n # Plot colored cross correlation windows\n if (draw_colored_cc == True):\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n plt.figure(4, figsize=(20, 16))\n # EW - UD cross correlation\n ax1 = plt.subplot(121)\n index = 0\n for j in range(0, nc):\n for i in range(n1, n2):\n if (clusters[i] == j):\n dt = EW_UD[i].stats.delta\n ncor = int((EW_UD[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * index + 1) + amp * EW_UD[i].data, \\\n color=palette[j])\n index = index + 1\n plt.xlim(xmin, xmax)\n plt.ylim(0.0, 2.0 * index)\n plt.title('East / Vertical component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Cross correlation', fontsize=24)\n ax1.set_yticklabels([])\n ax1.tick_params(labelsize=20)\n # NS - UD cross correlation\n ax2 = plt.subplot(122)\n index = 0\n for j in range(0, nc):\n for i in range(n1, n2):\n if (clusters[i] == j):\n dt = NS_UD[i].stats.delta\n ncor = int((NS_UD[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * index + 1) + amp * NS_UD[i].data, \\\n color=palette[j])\n index = index + 1\n plt.xlim(xmin, xmax)\n plt.ylim(0.0, 2.0 * index)\n plt.title('North / Vertical component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Cross correlation', fontsize=24)\n ax2.set_yticklabels([])\n ax2.tick_params(labelsize=20)\n # End figure\n plt.tight_layout()\n plt.savefig( \\\n 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_ccwin.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n ax1.clear()\n ax2.clear()\n plt.close(4)\n # Plot colored autocorrelation windows\n if (draw_colored_ac == True):\n plt.figure(5, figsize=(20, 24))\n params = {'legend.fontsize': 24, \\\n 'xtick.labelsize': 24, \\\n 'ytick.labelsize': 24}\n pylab.rcParams.update(params)\n # EW autocorrelation\n ax1 = plt.subplot(131)\n for i in range(n1, n2):\n dt = EW[i].stats.delta\n ncor = int((EW[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW[i].data, color=colors[i])\n plt.xlim(0, xmax)\n plt.ylim(0.0, 2.0 * (n2 - n1))\n plt.title('East component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Autocorrelation', fontsize=24)\n ax1.set_yticklabels([])\n ax1.tick_params(labelsize=20)\n # NS autocorrelation\n ax2 = plt.subplot(132)\n for i in range(n1, n2):\n dt = NS[i].stats.delta\n ncor = int((NS[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS[i].data, color=colors[i])\n plt.xlim(0, xmax)\n plt.ylim(0.0, 2.0 * (n2 - n1))\n plt.title('North component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Autocorrelation', fontsize=24)\n ax2.set_yticklabels([])\n ax2.tick_params(labelsize=20)\n # UD autocorrelation\n ax3 = plt.subplot(133)\n for i in range(n1, n2):\n dt = UD[i].stats.delta\n ncor = int((UD[i].stats.npts - 1) / 2)\n t = dt * np.arange(- ncor, ncor + 1)\n plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * UD[i].data, color=colors[i])\n plt.xlim(0, xmax)\n plt.ylim(0.0, 2.0 * (n2 - n1))\n plt.title('Vertical component', fontsize=24)\n plt.xlabel('Lag time (s)', fontsize=24)\n plt.ylabel('Autocorrelation', fontsize=24)\n ax3.set_yticklabels([])\n ax3.tick_params(labelsize=20)\n # End figure and plot\n plt.tight_layout()\n plt.savefig( \\\n 'ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_cluster_acwin.eps'. \\\n format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \\\n int(y0), type_stack, cc_stack), format='eps')\n ax1.clear()\n ax2.clear()\n ax3.clear()\n plt.close(5)\n return (clusters, t_EW, t_NS, cc_EW, cc_NS, ratio_EW, ratio_NS, \\\n width_EW, width_NS, ntremor)", "def click_on_hero():\n mouseclick(coords_hero_button[0], coords_hero_button[1])", "def test_call_low_cluster_identity(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n # Should only get 6 clusters\r\n exp_otu_ids = [str(x) for x in range(6)]\r\n\r\n exp_clusters =\\\r\n [['uclust_test_seqs_0', 'uclust_test_seqs_6',\r\n 'uclust_test_seqs_9'], ['uclust_test_seqs_1'], ['uclust_test_seqs_2'],\r\n ['uclust_test_seqs_3',\r\n 'uclust_test_seqs_5',\r\n 'uclust_test_seqs_8'],\r\n ['uclust_test_seqs_4'], ['uclust_test_seqs_7']]\r\n\r\n app = UsearchOtuPicker(params={'save_intermediate_files': False,\r\n 'db_filepath': self.tmp_ref_database,\r\n 'output_dir': self.temp_dir,\r\n 'remove_usearch_logs': True,\r\n 'reference_chimera_detection': False,\r\n 'de_novo_chimera_detection': False,\r\n 'cluster_size_filtering': False,\r\n 'minlen': 12,\r\n 'w': 12,\r\n 'minsize': 1,\r\n 'percent_id': 0.80,\r\n 'percent_id_err': 0.97\r\n })\r\n\r\n obs = app(self.tmp_seq_filepath1)\r\n\r\n obs_otu_ids = sorted(obs.keys())\r\n obs_clusters = sorted(obs.values())\r\n # The relation between otu ids and clusters is abitrary, and\r\n # is not stable due to use of dicts when parsing clusters -- therefore\r\n # just checks that we have the expected group of each\r\n self.assertEqual(obs_otu_ids, exp_otu_ids)\r\n self.assertEqual(obs_clusters, exp_clusters)", "def select_server(self):\n pass", "def show_cluster(self, cluster):\n for patch in cluster:\n patch.config(fill=\"red\")", "def select_create_collection(self):\n select_create_collection_sitem = self.locator_finder_by_id(self.select_create_collection_id)\n select_create_collection_sitem.click()\n time.sleep(1)", "def changeSelection(self, value):\n self.layer.selected_label = value\n self.selectionSpinBox.clearFocus()\n self.setFocus()", "def select(self):\n if not self._selected:\n \tself._selected = True\n\t\tself.log(\"device {} is now selected\".format(self._secondary_address))", "def onpick(cls, event):\n if cls.rate_limiting():\n return True\n\n if len(event.ind) != 1:\n print(\"Two or more points are too close! Please zoom in.\")\n print(\"Showing the one with higher fitness score\")\n\n cloud_plot = gs.canvas2cloud_plot[event.canvas]\n artist = event.artist\n ind = event.ind[-1]\n button = event.mouseevent.button\n\n if button == 1:\n cls.button_1(cloud_plot, artist, ind)\n elif button == 3:\n cls.button_3(cloud_plot, artist, ind)", "def select_action(self, state):", "def select(self):\n return", "def select_action(self):\n pass", "def _select_classifier_from_sk_search(estimator, X, A):\n estimator.fit(X, A)\n best_estimator = clone(estimator.best_estimator_)\n return best_estimator", "def for_failover_vhid_select_30(driver):\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Failover VHID\"]').click()\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Failover VHID_30\"]').click()", "def select_me(self, mouse_pos):\r\n\t\tself.active = self.rect.collidepoint(mouse_pos)", "def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())", "def 选择项目(self, n): # real signature unknown; restored from __doc__\n return self.Select(n)", "def _set_selection(self, new_sel_index):\r\n if new_sel_index >= 0 and new_sel_index <= len(self.points) -1:\r\n iid = self._tree.get_children()[new_sel_index]\r\n self._tree.selection_set(iid)", "def _onclick(self,event):\r\n if self.NumCells > 0:\r\n ShapeMask = np.shape(self.Mask)\r\n # get coorinates at selected location in image coordinates\r\n if event.xdata == None or event.ydata == None:\r\n return\r\n xcoor = min(max(int(event.xdata),0),ShapeMask[1])\r\n ycoor = min(max(int(event.ydata),0),ShapeMask[0])\r\n \r\n # search for the mask coresponding to the selected cell\r\n for EachCell in range(self.NumCells):\r\n if self.Mask[ycoor,xcoor,EachCell]:\r\n self.SelectedCellIndex = EachCell\r\n break\r\n \r\n # highlight selected cell\r\n if self.SelectedCellIndex not in self.selected_ML_Index:\r\n # Get the selected cell's contour coordinates and mask patch\r\n self.contour_verts, self.Cell_patch = self.get_cell_polygon(self.Mask[:,:,self.SelectedCellIndex])\r\n \r\n self.Matdisplay_Figure_axis.add_patch(self.Cell_patch)\r\n self.Matdisplay_Canvas.draw()\r\n \r\n self.selected_ML_Index.append(self.SelectedCellIndex)\r\n self.selected_cells_infor_dict['cell{}_verts'.format(str(self.SelectedCellIndex))] = self.contour_verts\r\n else:\r\n # If click on the same cell\r\n self.Cell_patch.remove()\r\n self.Matdisplay_Canvas.draw()\r\n self.selected_ML_Index.remove(self.SelectedCellIndex)\r\n self.selected_cells_infor_dict.pop('cell{}_verts'.format(str(self.SelectedCellIndex)))", "def mousePressEvent(self, event):\n #sw = self.spw.windows['Sort']\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #sw.on_actionSelectRandomSpikes_triggered()\n #sw.spykewindow.plotButton.click() # same as hitting ENTER in nslist\n self.selecting = True\n self.setMouseTracking(True) # while selecting\n self.selectPointsUnderCursor()\n self.lastPressPos = QtCore.QPoint(event.pos())\n self.lastPos = QtCore.QPoint(event.pos())", "def test_call_low_cluster_identity(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n # Should only get 6 clusters\r\n exp_otu_ids = [str(x) for x in range(10)]\r\n\r\n exp_clusters = [['uclust_test_seqs_0'],\r\n ['uclust_test_seqs_1'],\r\n ['uclust_test_seqs_2'],\r\n ['uclust_test_seqs_3'],\r\n ['uclust_test_seqs_4'],\r\n ['uclust_test_seqs_5'],\r\n ['uclust_test_seqs_6'],\r\n ['uclust_test_seqs_7'],\r\n ['uclust_test_seqs_8'],\r\n ['uclust_test_seqs_9']]\r\n\r\n app = UsearchReferenceOtuPicker(\r\n params={'save_intermediate_files': False,\r\n 'db_filepath':\r\n self.tmp_ref_database,\r\n 'output_dir': self.temp_dir,\r\n 'remove_usearch_logs': True,\r\n 'reference_chimera_detection':\r\n False,\r\n 'de_novo_chimera_detection':\r\n False,\r\n 'cluster_size_filtering':\r\n False,\r\n 'minlen': 12,\r\n 'w': 12,\r\n 'minsize': 1,\r\n 'percent_id': 0.80,\r\n 'percent_id_err': 0.97\r\n })\r\n\r\n obs = app(self.tmp_seq_filepath1, self.tmp_otu_ref_database)\r\n\r\n obs_otu_ids = sorted(obs.keys())\r\n obs_clusters = sorted(obs.values())\r\n # The relation between otu ids and clusters is abitrary, and\r\n # is not stable due to use of dicts when parsing clusters -- therefore\r\n # just checks that we have the expected group of each\r\n self.assertEqual(obs_otu_ids, exp_otu_ids)\r\n self.assertEqual(obs_clusters, exp_clusters)", "def clickCell(self, row, col):\n self.clicked[row, col] = 1", "def game_click(coord):\n mouseclick(coord[0], coord[1])\n time.sleep(0.5)", "def cluster_id(self, cluster_id):\n self._cluster_id = cluster_id", "def click(self):\n return self.selected.click()", "def toggle_cluster_javascript(ctx, project_name, cluster_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n # Alias to reduce verbosity.\n pargs = ctx.obj.groups[project.id].clusters[cluster_name].processArgs\n\n initial_process_args = pargs.get()\n target_js_value = not initial_process_args.data.javascriptEnabled\n\n cluster = pargs.patch(javascriptEnabled=target_js_value)\n pprint(cluster.data)", "def select_number_of_shards(self, shard_value):\n shards = \"new-collection-shards\"\n shards_sitem = self.locator_finder_by_id(shards)\n shards_sitem.click()\n shards_sitem.clear()\n shards_sitem.send_keys(shard_value)\n time.sleep(2)", "def select_collection_settings(self):\n select_collection_settings_sitem = self.locator_finder_by_id(self.select_collection_settings_id)\n select_collection_settings_sitem.click()\n time.sleep(2)", "def delete_cluster(ctx, project_name, cluster_name):\n project = ctx.obj.groups.byName[project_name].get().data\n ctx.obj.groups[project.id].clusters[cluster_name].delete().data\n click.echo(\"DONE!\")", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def clicked_glycan(self, event):\n #tab = self.tab_control.tab(self.tab_control.select(), \"text\")\n tab = self.tab_control.index(self.tab_control.select())\n item = event.widget.find_closest(event.x, event.y)\n idx = int(event.widget.gettags(item)[0])\n \n if self.selected_canvas:\n self.selected_canvas.delete(self.selection)\n\n if tab == 0:\n self.selected_canvas = self.common_canvas[idx]\n self.selected_glycan = self.common_glycans.items()[idx] \n elif tab == 1:\n self.selected_canvas = self.user_canvas[idx]\n self.selected_glycan = self.user_glycans.items()[idx] \n self.selection = self.selected_canvas.create_rectangle(0, 0, 100, 100, outline='red', width=6)", "def select_collection_type(self, value):\n self.locator_finder_by_select(self.select_collection_type_id, value)\n time.sleep(1)", "def select_dispenser(id=1, timeout=default_timeout):\n return click_key(controls['Fuel']['prepay_dispenser_by_id'] % id, timeout=timeout)", "def partition_selection():\n if selection is None:\n warning(\"You need to pick something first.\")\n return\n if not selection.obj_type in ['actor','element']:\n warning(\"You need to pick actors or elements.\")\n return\n for A in GD.canvas.actors:\n if not A.atype() == 'TriSurface':\n warning(\"Currently I can only partition TriSurfaces.\" )\n return\n partitionCollection(selection)\n highlightPartitions(selection)", "def spiketrainselectaroundevent(spiketrain, t_event, t_pre, t_post, shifttime=1):\n if not shifttime:\n spiketrainsel, spiketimesel = spiketraintimesel(spiketrain, t_event-t_pre, t_event+t_post)\n else:\n _, spiketimesel = spiketraintimesel(spiketrain, t_event-t_pre, t_event+t_post)\n spiketrainsel = neo.core.SpikeTrain(times=spiketimesel-t_event, units=spiketrain.units, t_start=-t_pre,\n t_stop=t_post, sampling_rate=spiketrain.sampling_rate,\n file_origin=spiketrain.file_origin, name=spiketrain.name)\n return spiketrainsel, spiketimesel", "def selChgCmd(self, *args):\n self.tDisp.selId = self.tDisp.selection()\n self.tDisp.selIdx = self.tDisp.index(self.tDisp.selId)\n self.event_generate('<<SelItem>>', x=self.tDisp.selIdx)", "def on_NormalNode_clicked(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n print(\"Select Normal Node,not need others attend in! Best Wish\")\n self.select_actor = \"NormalNode\"" ]
[ "0.64221257", "0.58902055", "0.56319547", "0.55411613", "0.5464253", "0.5434376", "0.5432982", "0.5413814", "0.54018533", "0.5387682", "0.5354226", "0.5342046", "0.5314084", "0.5284342", "0.5284342", "0.5258224", "0.52542984", "0.5254069", "0.5253677", "0.5247113", "0.5224413", "0.5219483", "0.52075285", "0.520349", "0.52015686", "0.5191905", "0.5186228", "0.5178217", "0.51689595", "0.5162985", "0.51588863", "0.51588863", "0.5149684", "0.51398283", "0.5130325", "0.51175463", "0.511752", "0.5113369", "0.51112926", "0.5097929", "0.5097929", "0.5097929", "0.5097929", "0.5097929", "0.5097929", "0.50701493", "0.5045363", "0.5045098", "0.5043444", "0.50413364", "0.50376505", "0.5019692", "0.50176257", "0.5010343", "0.50000024", "0.49967304", "0.49834362", "0.49670124", "0.49651766", "0.49627855", "0.49507192", "0.49451706", "0.49401018", "0.49290478", "0.49270767", "0.49194935", "0.49041748", "0.49030274", "0.48949486", "0.48889285", "0.48887196", "0.48855096", "0.48759857", "0.48753908", "0.48753473", "0.48573756", "0.48507014", "0.48486066", "0.4840603", "0.48376125", "0.48358133", "0.48319483", "0.48299924", "0.48271963", "0.48202118", "0.4811847", "0.48103938", "0.4809983", "0.47961497", "0.47941273", "0.47935933", "0.47837973", "0.4783402", "0.47755766", "0.4769624", "0.47692963", "0.47614804", "0.47605464", "0.47542295", "0.47513154" ]
0.6448263
0
Scroll through the data with alt+wheel.
def on_mouse_wheel(self, e): # pragma: no cover super(TraceView, self).on_mouse_wheel(e) if e.modifiers == ('Alt',): start, end = self._interval delay = e.delta * (end - start) * .1 self.shift(-delay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scroll(*args):", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def mouse_wheel(self, event):\n\n if event.num == 5 or event.delta == -120:\n event.widget.yview_scroll(1, UNITS)\n self.tablerowheader.yview_scroll(1, UNITS)\n if event.num == 4 or event.delta == 120:\n if self.canvasy(0) < 0:\n return\n event.widget.yview_scroll(-1, UNITS)\n self.tablerowheader.yview_scroll(-1, UNITS)\n self.redrawVisible()\n return", "def Scroll(self, steps):\n self._EnsureHIDValueInRange(steps)\n self._kit.MouseScroll(steps)\n time.sleep(self.send_delay)", "def wheel(ticks):\n m = PyMouse()\n m.scroll(ticks)", "def yview_scroll(self, number, what):\n self.tk.call(self._w, 'yview', 'scroll', number, what)", "def ev_mousewheel(self, event: MouseWheel) -> None:", "def scroll(self, direction):\n # Handle the specific keys\n if direction == \"h\": # Behave like ranger\n self.remember_pos(os.getcwd(),\n self.vimiv.get_pos(force_widget=\"lib\"))\n self.move_up()\n elif direction == \"l\":\n self.file_select(self.treeview, self.treeview.get_cursor()[0],\n None, False)\n else:\n # Scroll the tree checking for a user step\n if self.vimiv.keyhandler.num_str:\n step = int(self.vimiv.keyhandler.num_str)\n else:\n step = 1\n if direction == \"j\":\n new_pos = self.vimiv.get_pos(force_widget=\"lib\") + step\n if new_pos >= len(self.file_liststore):\n new_pos = len(self.file_liststore) - 1\n else:\n new_pos = self.vimiv.get_pos(force_widget=\"lib\") - step\n if new_pos < 0:\n new_pos = 0\n self.move_pos(True, new_pos)\n return True # Deactivates default bindings (here for Arrows)", "def handle_scrollwheel(self, event):\n delta_x, delta_y, delta_z = self._get_deltas(event)\n if delta_x:\n self.events.append(\n self.emulate_wheel(delta_x, 'x', self.timeval))\n if delta_y:\n self.events.append(\n self.emulate_wheel(delta_y, 'y', self.timeval))\n if delta_z:\n self.events.append(\n self.emulate_wheel(delta_z, 'z', self.timeval))", "def start_scroll():\n send_command(0x2F)", "def idle_loop(self):\n sleep(0.1)\n self.scroll()", "def _wheel_scroll(self, event):\n # For some unknown reason, when using a single scrollbar to control two listboxes they get out of sync by\n # exactly four listbox rows, with the one being hovered over while scrolling being ahead of the other.\n # Therefore, below we have some (seemingly) effective albeit strange logic to make sure both scrollbars stay in\n # sync.\n\n lower_scroll, upper_scroll = self.scrollbar.get()\n # Only make any changes to _curr_scroll_row if the given scroll event would actually make any change to the\n # listboxs (i.e. if we're not at the top of the listboxes and scrolling up nor at the bottom of the listboxes\n # and scrolling down).\n if (lower_scroll != 0 and event.delta > 0) or (upper_scroll != 1 and event.delta < 0):\n # Increment or decrement _curr_scroll_row according to the direction of the scroll event.\n self._curr_scroll_row += int(math.copysign(1, -event.delta))\n # diff is the difference in rows between the \"ahead\" listbox and the \"behind\" one. It always (seemingly\n # arbitrarily) has magnitude 4.\n diff = int(math.copysign(4, -event.delta))\n # Set the yviews of the listboxes, adding the difference to the correct one.\n self.key_listbox.yview(self._curr_scroll_row + (diff if self.key_listbox is not event.widget else 0))\n self.value_listbox.yview(self._curr_scroll_row + (diff if self.value_listbox is not event.widget else 0))", "def _get_scroll(self, event):\n raise NotImplementedError", "def mouse_wheel_down(self):\n if not self.scroll_element is None:\n self.scroll_element.mouse_wheel_down()", "def scroll(self, axis, value):\n\n\t\tself._interface.scroll(axis, value)", "def ev_mousewheel(self, event: tcod.event.MouseWheel) -> T | None:", "def scroll_event(self, widget, event):\n x, y = event.x, event.y\n num_degrees = 0\n direction = 0\n\n # x, y = coordinates of mouse\n self.last_win_x, self.last_win_y = x, y\n\n # calculate number of degrees of scroll and direction of scroll\n # both floats in the 0-359.999 range\n # num_degrees =\n # direction =\n self.logger.debug(\"scroll deg=%f direction=%f\" % (\n num_degrees, direction))\n\n data_x, data_y = self.check_cursor_location()\n\n return self.make_ui_callback('scroll', direction, num_degrees,\n data_x, data_y)", "def _on_scroll(self, event):", "def scroll(self, dir):\n try:\n self.scrool = dir\n except:\n raise ReferenceError", "def do_auto_scroll( self, auto = True ):\n print( \"do_auto_scroll fix !!\" )", "def handle_scrollwheel(self, event):\n # relative Scrollwheel\n scroll_x, scroll_y = self._get_scroll(event)\n\n if scroll_x:\n self.events.append(\n self.emulate_wheel(scroll_x, 'x', self.timeval))\n\n if scroll_y:\n self.events.append(\n self.emulate_wheel(scroll_y, 'y', self.timeval))", "def __navigate_scroll(self):\n try:\n _title = self.browser.title\n _body = self.browser.find_element_by_tag_name('body')\n\n i = 0\n while i < 3:\n _html = str(self.browser.page_source)\n _content = Content(_html, _title)\n _attrs = _content.last_divs\n\n scroll_items = []\n for _attr in _attrs:\n xpath_string = '//div'\n\n for k, v in _attr.items():\n if not v:\n xpath_string = xpath_string + \"[@\" + str(k) + \"]\"\n else:\n if isinstance(v, list):\n _vstring = [\"contains(@\" + str(k) + \", '\" + str(_v) + \"')\" for _v in v]\n vstring = \" and \".join(_vstring)\n\n xpath_string = xpath_string + \"[\" + vstring + \"]\"\n\n div = self.browser.find_elements_by_xpath(xpath_string)\n\n for d in div: scroll_items.append(d)\n\n if len(scroll_items) > 10:\n j = 0\n while j < 10:\n try:\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[j])\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n time.sleep(1)\n j += 1\n except Exception as e:\n print(e)\n j += 1\n continue\n \n else:\n for item in scroll_items:\n try:\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", item)\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n _body.send_keys(Keys.HOME)\n time.sleep(1)\n except Exception as e:\n print(e)\n continue\n\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n new_html = str(self.driver.page_source)\n new_content = Content(new_html, _title)\n new_attrs = new_content.last_divs\n\n i += 1\n if new_attrs == _attrs:\n break\n else:\n continue\n\n return self.browser.page_source\n\n except:\n return None", "def __window_scroll(self, x, y):\n pass", "def scrollDown(self):\n if self.__firstShownLine < len(self.__data) - 1:\n self.__firstShownLine += 1\n self.__refreshContent()\n self.__printRow(self.__firstShownLine + self.height - 2)\n else:\n curses.beep()", "def autoscroll(self):\n self.displaymode |= self.LCD_ENTRYSHIFTINCREMENT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_ENTRYMODESET | self.displaymode)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_ENTRYMODESET | self.displaymode)", "def on_mouse_wheel(self, event):\n delta = event.delta[1]\n if delta > 0: # Zoom in\n factor = 0.9\n elif delta < 0: # Zoom out\n factor = 1 / 0.9\n for _ in range(int(abs(delta))):\n self.zoom(factor, event.pos)", "def emulate_wheel(self, data, direction, timeval):\n if direction == 'x':\n code = 0x06\n elif direction == 'z':\n # Not enitely sure if this exists\n code = 0x07\n else:\n code = 0x08\n\n if WIN:\n data = data // 120\n\n return self.create_event_object(\n \"Relative\",\n code,\n data,\n timeval)", "def _on_scrollbar(self, *args) -> None:\r\n for textbox in self.textboxes:\r\n textbox.yview(*args)", "def __window_scrollBy(self, xDelta, yDelta):\n pass", "def xview_scroll(self, number, what):\n self.tk.call(self._w, 'xview', 'scroll', number, what)", "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def __window_scrollTo(self, x, y):\n pass", "def scroll(self, direction):\n # next cursor position after scrolling\n next_line = self.line + direction\n\n # Up direction scroll overflow\n # current cursor position is 0, but top position is greater than 0\n if (direction == self.UP) and (self.top > 0 and self.line == 0):\n self.top += direction\n \n # Down direction scroll overflow\n # next cursor position touch the max lines, but absolute position of max lines could not touch the bottom\n elif (direction == self.DOWN) and (next_line == self.max_lines -1) and (self.top + self.max_lines < self.bottom):\n self.top += direction\n \n # Scroll up\n # current cursor position or top position is greater than 0\n elif (direction == self.UP) and (self.top > 0 or self.line > 0):\n self.line = next_line\n \n # Scroll down\n # next cursor position is above max lines, and absolute position of next cursor could not touch the bottom\n elif (direction == self.DOWN) and (next_line < self.max_lines) and (self.top + next_line < self.bottom):\n self.line = next_line", "def scroll_page(self):\n scroll_down = self.driver.find_element_by_tag_name(\"html\")\n scroll_down.send_keys(Keys.END)\n sleep(TestData.DELAY)\n scroll_down.send_keys(Keys.CONTROL + Keys.HOME)\n sleep(TestData.DELAY)\n return True", "def mouse_wheel_up(self):\n if not self.scroll_element is None:\n self.scroll_element.mouse_wheel_up()", "def scroll(self):\r\n SCROLL_PAUSE_TIME = 2\r\n current_scrolls = 0\r\n\r\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n while True:\r\n try:\r\n if current_scrolls == total_scroll:\r\n return\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n time.sleep(SCROLL_PAUSE_TIME)\r\n\r\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n if new_height == last_height:\r\n break\r\n last_height = new_height\r\n except TimeoutException:\r\n break\r\n return", "def scroll(self, relative):\n if self.ui.browser and self.ui.browser.main_column:\n self.ui.browser.main_column.scroll(relative)\n self.thisfile = self.thisdir.pointed_obj", "def do_scroll_event(self, event):\n\t\tif event.state & gtk.gdk.CONTROL_MASK:\n\t\t\tif event.direction == gtk.gdk.SCROLL_UP:\n\t\t\t\tself.zoom *= 1.1\n\t\t\telif event.direction == gtk.gdk.SCROLL_DOWN:\n\t\t\t\tself.zoom /= 1.1", "def __scroll_y(self, *args, **kwargs):\n self.canvas_image.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def _scroll (self):\n if self.policy is not None:\n self._scroll_fn(self, *self._policy_args)", "def cycle_text(self, **kwargs):\n self.scroll(**kwargs) # Temporary, will be replaced", "def _on_scroll(self, event):\n self._zoom(event.step, draw=True)", "def control_scrollbar(self):\n _scrollTop = 0\n # 渐进下拉,避免大幅度页面偏移,导致的textarea获取失败...\n for i in range(20):\n _scrollTop += 400\n js = \"var q=document.documentElement.scrollTop={}\".format(_scrollTop)\n self.driver.execute_script(js)\n time.sleep(0.2)\n # 简书AJax刷新3次后,必须点击一次查看更多,才能继续刷新...\n try:\n self.driver.find_element_by_class_name('load-more').click()\n except NoSuchElementException:\n pass\n except ElementNotInteractableException:\n pass", "def activeScrollingEventLogs(self):\n self.logsView.textualViewer.activeAutoscrolling()", "def scroll_to(self):\n\n if self:\n pass", "def wheel(self):\n if self.__enabled and self.__indicator.isVisible():\n self.__stopScrolling()\n return True\n \n return False", "def mouse_scroll(self, x, y, scroll_x, scroll_y):\n # Check if in the menu.\n if x > self.menu_start:\n # Scroll the menu.\n self.menu.scrollMenu(scroll_y)\n # Otherwise scroll the waveforms\n else:\n self.waveform_offset += 4 * scroll_y\n if self.waveform_offset > 0:\n self.waveform_offset = 0\n # Avoid going too far down.\n max_view = self.max_viewable + self.win.geometry.time_scale\n if self.current_view_span - self.waveform_offset > max_view:\n if self.current_view_span > max_view:\n self.waveform_offset = 0\n else:\n self.waveform_offset = -((10 + max_view) - \\\n self.current_view_span)\n # Update the scroll_bar.\n self.scroll_bar.changePosition()", "def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))", "def scrollStep(self, stepCount):\n self['value'] = self.guiItem.getValue() + self.guiItem.getScrollSize() * stepCount", "def scroll(self, direction):\n\n self.counter += direction # Counter of 'up' and 'down'\n do_redraw = self.counter == self.content_size - self.h\n\n if self.size > 0:\n self.count += direction\n pos = self.pos\n if math.fabs(self.count) == math.floor(self.content_size / self.h):\n pos += direction\n self.count = 0\n\n pos = max(0, pos) # Top limit\n pos = min(pos, self.h - self.size) # Bottom limit\n do_redraw = pos != self.pos # Redraw if pos has changed\n self.pos = pos\n\n if do_redraw:\n self._create()", "def lulz(self):\n self.reset()\n self.scrollproc = threading.Thread(target=self.lulzloop)\n self.killedevent.wait()\n self.scrollproc.start()", "def scroll_page(self, where: str, direction: ScrollEnum):\n\n element = self.find_element_by_xpath(where)\n if element:\n if direction == ScrollEnum.UP:\n element.send_keys(Keys.HOME)\n elif direction == ScrollEnum.DOWN:\n element.send_keys(Keys.END)", "def on_key(self, event):\n if event.key() == QtCore.Qt.Key_Up:\n self.model.channel_Scroll_Up('page')\n elif event.key() == QtCore.Qt.Key_PageUp:\n self.model.channel_Scroll_Up('page')\n elif event.key() == QtCore.Qt.Key_Down:\n self.model.channel_Scroll_Down('page')\n elif event.key() == QtCore.Qt.Key_PageDown:\n self.model.channel_Scroll_Down('page')\n elif event.key() == QtCore.Qt.Key_Left:\n self.model.time_scroll(scroll=-1 / 3)\n elif event.key() == QtCore.Qt.Key_Right:\n self.model.time_scroll(scroll=1 / 3)\n event.accept()", "def mouse_wheelEvent(self, e):\n if self.image is not None:\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n if modifiers == QtCore.Qt.ControlModifier:\n wheel_counter = e.angleDelta()\n if wheel_counter.y() / 120 == -1:\n if self.width_result_image == 1000:\n pass\n else:\n self.width_result_image -= 100\n\n if wheel_counter.y() / 120 == 1:\n if self.width_result_image == 4000:\n pass\n else:\n self.width_result_image += 100\n self.show_to_window()", "def scroll_display( self, direction=LCD_MOVELEFT ):\n\t\tassert direction in (LCD_MOVELEFT,LCD_MOVERIGHT), \"Invalid direction %s value\" % direction\n\t\tself.command(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | direction)", "def stop_scroll():\n send_command(0x2E)", "def __scroll(self, result, item, index=1, containerObject=None, relatedAreaEnd=None):\r\n defaultSideWidth=150\r\n counter=0\r\n initialDump = None\r\n\r\n itemCommented = self._getCommented(item) # commented/translated version for test step run\r\n\r\n if not self.isItemScrollable(item,containerObject=containerObject, relatedAreaEnd=relatedAreaEnd):\r\n if containerObject:\r\n self.phone.fail('Cannot scroll to item, item %s (related to %s) is not scrollable' % (self._getCommented(item),self._getCommented(containerObject)))\r\n else:\r\n self.phone.fail('Cannot scroll to item, item is not scrollable %s' %self._getCommented(item))\r\n\r\n maximumDuration = 240000\r\n startTime=time.time()\r\n previousScrollValue = 0\r\n scrollModStep = 0\r\n\r\n containerX, containerY, containerW, containerH = [int(c) for c in result[-1].getAttribute('container-area').split(\",\")]\r\n\r\n screenWidth = min([self.getScreenWidth(),containerX + containerW])\r\n screenHeight = min([self.getScreenHeight(),containerY + containerH])\r\n\r\n screenTop = max(0,containerY)\r\n\r\n while result[0]==self.phone.uiState.HIDDEN:\r\n initialDump = self.currentState.toxml('utf-8')\r\n\r\n # Check if item is outside of screen at right\r\n if result[1][0]>=screenWidth:\r\n yCoordinate = 20\r\n\r\n distance=result[1][0] #Distance from end of screen to coordinate\r\n\r\n #If y coordinates are bigger than screenwith then set them to 0\r\n if screenWidth-distance<0:\r\n x_move=0\r\n else:\r\n x_move=screenWidth-distance\r\n self.phone._touch.drawLine((screenWidth,yCoordinate),(x_move,yCoordinate))\r\n self.phone._run('Scrolling left \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # Checking if item is outside of screen at bottom of screen\r\n if result[1][1]>=screenHeight:\r\n scrollEndY=screenHeight-result[1][1] #Distance from end of screen to coordinate\r\n distanceToScroll = scrollEndY\r\n\r\n # increase scrollModStep if we haven't been able to scroll\r\n # NOTE: This is done due to possible brightness adjust bar in settings list\r\n if previousScrollValue == 0:\r\n previousScrollValue = scrollEndY\r\n elif previousScrollValue == scrollEndY:\r\n scrollModStep += 40\r\n else:\r\n previousScrollValue = 0\r\n scrollModStep = 0\r\n\r\n if scrollEndY<screenTop:\r\n scrollEndY=screenTop\r\n\r\n # -60 so that we won't grab the option list from the bottom of the screen\r\n # scrollModStep is used when for adjusting y coordinate\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,(screenHeight-60) - scrollModStep),(screenWidth-defaultSideWidth,scrollEndY))\r\n self.phone._run('Scrolling down \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # Check if we can safely scroll several times in a row\r\n distanceToScrollMore = screenHeight-result[1][1]\r\n scrolledDistance = distanceToScroll-distanceToScrollMore\r\n if abs(scrolledDistance) > 100:\r\n sweepsRequired = int(distanceToScrollMore/scrolledDistance)\r\n sweeps = min(sweepsRequired-2, 10) # Max 10 sweeps in a row without any checks\r\n if sweeps > 0:\r\n for i in range(0,sweeps):\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,(screenHeight-60) - scrollModStep),(screenWidth-defaultSideWidth,scrollEndY))\r\n self.phone._run('Scrolling down \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n\r\n # Checking if item is outside of screen at up of screen\r\n if result[1][1]<=max(screenTop,(self.phone.uiState.statusbarHeight*2)):# Item must be scrolled lower than status bar\r\n #distance=abs(result[1][1])+self.phone.uiState.statusbarHeight #Distance from top of the screen to coordinate which is now negative\r\n distance=abs(result[1][1]-max(screenTop,self.phone.uiState.statusbarHeight)) #Distance from top of the screen to coordinate which is now negative\r\n distance += ((screenHeight-screenTop)/2)\r\n distanceToScroll = distance\r\n\r\n # y_start must be min. 20 pixels from screenTop to ensure that ntf-drawer is not opened\r\n y_start = max(screenTop,(self.phone.uiState.statusbarHeight*3), 20)\r\n\r\n # increase scrollModStep if we haven't been able to scroll\r\n # NOTE: This is done due to possible brightness adjust bar in settings list\r\n if previousScrollValue == 0:\r\n previousScrollValue = distance\r\n elif previousScrollValue == distance:\r\n scrollModStep += 40\r\n else:\r\n previousScrollValue = 0\r\n scrollModStep = 0\r\n\r\n if screenTop==0:\r\n y_move = distance+(self.phone.uiState.statusbarHeight*3)\r\n else:\r\n y_move = distance+screenTop\r\n\r\n if y_move>=screenHeight:\r\n y_move = screenHeight-1\r\n\r\n # scrollModStep is used when for adjusting y coordinate\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,y_start + scrollModStep),(screenWidth-defaultSideWidth,y_move))\r\n self.phone._run('Scrolling up \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # Check if we can safely scroll several times in a row\r\n distanceToScrollMore=abs(result[1][1]-max(screenTop,self.phone.uiState.statusbarHeight))\r\n distanceToScrollMore += ((screenHeight-screenTop)/2)\r\n scrolledDistance = distanceToScroll-distanceToScrollMore\r\n if abs(scrolledDistance) > 100:\r\n sweepsRequired = int(distanceToScrollMore/scrolledDistance)\r\n sweeps = min(sweepsRequired-2, 10) # Max 10 sweeps in a row without any checks\r\n if sweeps > 0:\r\n for i in range(0,sweeps):\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,y_start + scrollModStep),(screenWidth-defaultSideWidth,y_move))\r\n self.phone._run('Scrolling up \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # if phone UI has changed, let's not increase the counter\r\n if initialDump == self.currentState.toxml('utf-8'):\r\n counter=counter+1\r\n\r\n # give up when counter has maximum value or maximum time is up\r\n if counter == 10 or time.time() > startTime + maximumDuration/1000.0:\r\n self.phone.capture('Failed to scroll to item')\r\n #if initial dump and current dump are identical, phone UI is frozen -> fail testcase\r\n if initialDump == self.currentState.toxml('utf-8'):\r\n self.phone.comment('KBD_KEY_KEYLOCK_TOGGLE pressed to check if phone UI is freezed or not')\r\n self.phone._pressKey('KBD_KEY_KEYLOCK_TOGGLE')\r\n self.phone._run('Press KBD_KEY_KEYLOCK_TOGGLE')\r\n self.phone.delay(500, False)\r\n self.getCurrentState(refresh = True)\r\n #if initial and current dumps are identical after pressing KBD_KEY_BACK then UI is frozen\r\n if initialDump == self.currentState.toxml('utf-8'):\r\n errorString = 'Phone UI freeze detected, unable to scroll'\r\n self.phone.fail(errorString)\r\n\r\n if containerObject:\r\n self.phone.fail('Cannot scroll to item %s (related to %s)' % (self._getCommented(item), self._getCommented(containerObject)))\r\n else:\r\n self.phone.fail('Cannot scroll to item %s' %self._getCommented(item))\r\n\r\n return result", "def scroll_half_page_down(event):\n scroll_forward(event, half=True)", "def mover_scroll(self, x, y):\n self.scrollx += x\n self.scrolly += y", "def onscroll(self, event):\n if self.out_graph is False:\n self.zoom += 10*event.step\n\n if self.zoom >= self.axe_X/2/self.FOV_img*self.FOV_img_Y:\n self.zoom = self.axe_X/2/self.FOV_img*self.FOV_img_Y\n\n if self.zoom <= 0:\n self.zoom = 0\n\n self.draw()", "def __window_scrollByPages(self, pages):\n pass", "def scroll_obfuscate(self, y):\n l_stepCount = random.randint(5, 15)\n self.m_logger.info('Steps: {0}'.format(l_stepCount))\n\n for i in range(l_stepCount, 0, -1):\n d = l_stepCount * 10\n l_yTarget = y + random.randint(-d / 2, d / 2)\n self.m_driver.execute_script('window.scrollTo(0, {0});'.format(l_yTarget))\n time.sleep(.01)\n\n self.m_driver.execute_script('window.scrollTo(0, {0});'.format(y))", "def scrollY(self,yrel):\n # get the display size\n dispw, disph = c_int(), c_int()\n SDL_GetRendererOutputSize(self.rend,dispw,disph)\n\n # scroll vertically\n self.scroll += yrel\n\n # limit scrolling\n if self.scroll <= 0:\n self.scroll = 0\n if self.scroll+disph.value >= (len(self.itemList.items)+1)*150+178:\n self.scroll = (len(self.itemList.items)+1)*150+178-disph.value", "def scrollDown(self, messages=1):\n if self.scrollOffset < 1:\n self.scrollOffset += messages\n self._recalculateCoordinates()", "def _scrollEvent(self, widget, event, adj):\n if event.direction in (gtk.gdk.SCROLL_UP, gtk.gdk.SCROLL_LEFT):\n inc = -adj.step_increment\n elif event.direction in (gtk.gdk.SCROLL_DOWN, gtk.gdk.SCROLL_RIGHT):\n inc = adj.step_increment\n else:\n inc = 0\n adj.set_value(min(adj.upper - adj.page_size, adj.value + inc))\n return False", "def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])", "def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])", "def wheel_click(coords=(0, 0)):\n _perform_click_input(button='middle', coords=coords)", "def driver_scroll(driver, max_scroll, walkthrough_mode=True):\n\n if walkthrough_mode:\n time.sleep(3)\n # scroll smoothly to bottom of the page (sees all that the user is allowing)\n scheight = 1\n while scheight < max_scroll:\n driver.execute_script(f\"window.scrollTo(0, {scheight})\")\n scheight += 1\n\n time.sleep(3)\n else:\n time.sleep(0.2)\n # scroll to access details & confirm authorisation\n driver.execute_script(f\"window.scrollTo(0, {max_scroll})\")\n time.sleep(0.2)", "def scrollUp(self):\n if self.__firstShownLine > 0:\n self.__firstShownLine -= 1\n self.__refreshContent()\n else:\n curses.beep()", "def continuous_scroll(self, context):\n\n self.drawing.redraw_canvas(self.dy)\n \n return True", "def SetScrollRateSmart(self, newstep=None, printinfo=False):\n oldstep = self.GetScrollPixelsPerUnit()[0]\n oldscrollx = self.GetScrollPos(wx.HORIZONTAL)\n oldscrolly = self.GetScrollPos(wx.VERTICAL)\n oldvirtx = self.GetVirtualSize()[0]\n oldvirty = self.GetVirtualSize()[1]\n # rot = event.GetWheelRotation()\n if printinfo:\n print(f\"\\nIN step {oldstep} newstep {newstep} old scroll {oldscrollx}, {oldscrolly} virt {oldvirtx}, {oldvirty}\")\n\n if newstep is not None:\n if oldstep == newstep:\n if printinfo:\n print(f\"Nothing to do, step of {newstep} already set.\")\n else:\n q = newstep / oldstep # min(1, newstep)\n newscrollx = int(oldscrollx / q)\n newscrolly = int(oldscrolly / q)\n # newvirtx = oldvirtx / q\n # newvirty = oldvirty / q\n # Aha - image size * step => virtual bounds\n newvirtx = int(self.maxWidth / newstep * self.zoomscale)\n newvirty = int(self.maxHeight / newstep * self.zoomscale)\n if printinfo:\n print(f\"OUT step {newstep} new scroll {newscrollx}, {newscrolly} virt {newvirtx}, {newvirty} q {q}\")\n\n self.SetScrollbars(\n int(newstep), int(newstep),\n int(newvirtx), int(newvirty), # new virtual size\n int(newscrollx), int(newscrolly), # new scroll positions\n noRefresh=True)\n # self.Refresh()\n if printinfo:\n print(self.GetVirtualSize())", "def _on_textscroll(self, *args) -> None:\r\n self.scrollbar.set(*args)\r\n self._on_scrollbar('moveto', args[0])", "def autoscroll(self):\n return self.getY() == float(1.0)\n #return self.autoscroll", "def set_zooming_wheel(self):\n # Zooming: wheel\n self.set('Wheel', 'Zoom',\n param_getter=lambda p: (\n p[\"wheel\"]*.002, \n p[\"mouse_position\"][0],\n p[\"wheel\"]*.002, \n p[\"mouse_position\"][1]))", "def wheelEvent(self, event):\r\n\t\t\r\n\t\t# If a spritesheet has not been loaded, do nothing\r\n\t\tif self.animation_data and self.animation_data.active_frame is not None:\r\n\t\t\t\r\n\t\t\tif QtWidgets.QApplication.keyboardModifiers() == Qt.ControlModifier:\r\n\t\t\t\t\r\n\t\t\t\t# Next / Previous frame\r\n\t\t\t\tif event.angleDelta().y() > 0:\r\n\t\t\t\t\tself.current_index += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.current_index -= 1\r\n\r\n\t\t\t\tself.update()\r\n\t\t\t\r\n\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t# Zoom in / Out of the spritesheet\r\n\t\t\t\tif event.angleDelta().y() > 0:\r\n\t\t\t\t\tself.set_scale(min(round(self.scale + self.scale_inc, 1), self.scale_max))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.set_scale(max(round(self.scale - self.scale_inc, 1), self.scale_min))", "def scroll_to(self):\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", self._element)", "def scroll_function(self, _, longest_item_length):\r\n\r\n # The width of the scrollable area is calculated as follows:\r\n # - we assume that every ASCII character is 7-pixels wide\r\n # - the width of the buttons appended to each media file is around 250 pixels\r\n # The total width is calculated by multiplying the width of the longest media item by 7, adding the width of\r\n # the buttons to the result\r\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"), width=longest_item_length * 7 + 250, height=200)", "def wheelEvent(self, ev):\n\n # Check if we're in auto Zoom mode\n if self.__zooming:\n # we're zooming\n if (ev.angleDelta().y() > 0):\n self.zoom(ev.pos(), 1)\n else:\n self.zoom(ev.pos(), -1)\n\n else:\n # not zooming - pass wheel event on\n self.mouseWheel.emit(self, ev)", "def on_scroll(event):\n if event.step > 0:\n if plot_mode == 'time_cut':\n sld['time'].set_val( min( sld['time'].val+1, sld['time'].valmax ) )\n else:\n sld['freq'].set_val( min(sld['freq'].val + scale_freq, sld['freq'].valmax) )\n else:\n if plot_mode == 'time_cut':\n sld['time'].set_val( max( sld['time'].val-1, sld['time'].valmin ) )\n else:\n sld['freq'].set_val( max(sld['freq'].val - scale_freq, sld['freq'].valmin) )", "def scroll_for_sessions(self, callback):\n \n print('%s Scrolling for sessions in %r...' % (datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), self.index))\n \n response = ElasticConnection._es.search(\n index = self.index,\n scroll = '5m',\n body = {\n \"size\": 2500,\n \"_source\": {\n \"excludes\": [ \"requests.extended-information\", \"requests.flags\" ]\n },\n \"query\": self._query(),\n \"sort\": [\n {\n \"start-micros\": { \"order\": \"asc\" }\n }\n ]\n }\n )\n \n sid = response['_scroll_id']\n scroll_size = len(response['hits']['hits'])\n scroll_num = 1\n \n while scroll_size > 0:\n print('%s Scroll %r: Processing %r elements...' % (datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), scroll_num, scroll_size))\n \n callback([ d['_source'] for d in response['hits']['hits']])\n \n print('%s Scroll %r: Processing done. Retrieving next documents...' % (datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), scroll_num))\n \n response = ElasticConnection._es.scroll(scroll_id = sid, scroll = '5m')\n \n sid = response['_scroll_id']\n scroll_size = len(response['hits']['hits'])\n scroll_num += 1\n \n print('%s Reached the end of the scroll.' % datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))\n ElasticConnection._es.clear_scroll(scroll_id = sid)", "def scroll_down(self):\r\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\r\n sleep(self.wait)", "def on_mouse_wheel(self, event):\n self.translate -= event.delta[1]\n self.game_program['u_view'] = self.view\n\n self.yaw, self.pitch = 0, 0\n\n self.rot_y(self.yaw * np.pi / 180)\n self.rot_x(self.pitch * np.pi / 180)\n\n self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n self.game_program['u_view'] = self.view\n\n self.update()", "def scroll(self, delta_x, delta_y):\n if delta_x < 0:\n shift_x = 0\n xend = self.width + delta_x\n dt_x = 1\n else:\n shift_x = self.width - 1\n xend = delta_x - 1\n dt_x = -1\n if delta_y < 0:\n y = 0\n yend = self.height + delta_y\n dt_y = 1\n else:\n y = self.height - 1\n yend = delta_y - 1\n dt_y = -1\n while y != yend:\n x = shift_x\n while x != xend:\n self.format.set_pixel(\n self, x, y, self.format.get_pixel(self, x - delta_x, y - delta_y)\n )\n x += dt_x\n y += dt_y", "def on_scroll(self, event):\n if event.button == 'up':\n self.generations += 4000\n elif event.button == 'down':\n if self.generations >= 4000:\n self.generations -= 4000\n self.redraw()", "def __scroll_element_into_view__(self, element):\n y = element.location['y']\n self.driver.execute_script('window.scrollTo(0, {0})'.format(y))", "def scroll_page_down(event):\n w = _current_window_for_event(event)\n b = event.cli.current_buffer\n\n if w and w.render_info:\n # Scroll down one page.\n line_index = max(w.render_info.last_visible_line(), w.vertical_scroll + 1)\n w.vertical_scroll = line_index\n\n b.cursor_position = b.document.translate_row_col_to_index(line_index, 0)\n b.cursor_position += b.document.get_start_of_line_position(after_whitespace=True)", "def wheelEvent(self, event: QWheelEvent):\n # zoom only when CTRL key pressed\n if (event.modifiers() & Qt.ControlModifier) == Qt.ControlModifier:\n steps = event.angleDelta().y() / 15 / 8\n\n if steps == 0:\n event.ignore()\n return\n\n # scale factor 1.25\n sc = pow(1.25, steps)\n self.scale(sc, sc)\n self.centerOn(self.mapToScene(event.pos()))\n event.accept()\n #  act normally on scrollbar\n else:\n # transmit event to parent class wheelevent\n super(QGraphicsView, self).wheelEvent(event)", "def _scrolling_request(self, path, method='GET', body=None, headers=None):\n assert 'pagination' in body\n paginated_view = body\n url = '{}{}'.format(self._url_base, path)\n headers = self._headers() if headers is None else headers\n\n scrolling = True\n while scrolling:\n response, content = super(DSBaseService, self)._request(url,\n method,\n body=str(paginated_view).replace(\"'\", '\"'),\n headers=headers)\n\n if int(response['status']) == 200:\n data = json.loads(content)\n offset = data['currentPage']['offset']\n size = data['currentPage']['size']\n total = data['total']\n if offset + size < total:\n paginated_view['pagination']['offset'] = offset + size\n else:\n scrolling = False\n yield data\n elif int(response['status']) == 429:\n # rate limited, wait before resuming scroll requests\n time.sleep(1)\n else:\n scrolling = False", "def scrollPage(self, pageCount):\n self['value'] = self.guiItem.getValue() + self.guiItem.getPageSize() * pageCount", "def scroll_half_page_up(event):\n scroll_backward(event, half=True)", "def __window_scrollByLines(self, lines):\n pass", "def wheel(self, start=0, end=0):\n\t\tif end == 0: end = self.leds\n\t\tsize = end - start\n\t\tself.wheelOffset += 1\n\t\tif self.wheelOffset == 384: self.wheelOffset = 0;\n\t\tfor i in range(size):\n\t\t\tcolor = (i * (384 / size) + self.wheelOffset) % 384;\n\t\t\tif color < 128:\n\t\t\t\tr = 127 - color % 128\n\t\t\t\tg = color % 128\n\t\t\t\tb = 0\n\t\t\telif color < 256:\n\t\t\t\tg = 127 - color % 128\n\t\t\t\tb = color % 128\n\t\t\t\tr = 0\n\t\t\telse:\n\t\t\t\tb = 127 - color % 128\n\t\t\t\tr = color % 128\n\t\t\t\tg = 0\n\t\t\tself.set(start + i, r, g, b)\n\t\t\tprint r,',',g,',',b\n\t\tself.update()", "def scroll():\n \n SCROLL_PAUSE_TIME = 0.5\n \n last_height = driver.execute_script(\"return window.scrollY\") \n \n tries = 0\n while True:\n down_height = last_height + 1000\n driver.execute_script(\"window.scrollTo(0,\" + str(down_height) + \")\")\n \n time.sleep(SCROLL_PAUSE_TIME)\n \n new_height = driver.execute_script(\"return window.scrollY\")\n if new_height == last_height:\n tries += 1\n if tries == 10:\n break\n else:\n tries = 0\n last_height = new_height", "def cycle(self, event=None):\n if not event:\n self.viewing += 1\n else:\n self.viewing += (event.keysym == 'Right')*2 - 1\n self.intext.set(self.viewlist[self.viewing % len(self.viewlist)])", "def _scroll(self, ui_content: UIContent, width: int, height: int) -> None:\n if self.wrap_lines():\n func = self._scroll_when_linewrapping\n else:\n func = self._scroll_without_linewrapping\n\n func(ui_content, width, height)", "def ScenarioBGEffectExcelAddScroll(builder, Scroll):\n return AddScroll(builder, Scroll)", "def scroll_down(fBody, driver):\n\toverflow = 0\n\textracted = 0\n\tdetection = 0\n\twhile True:\n\t\tdetection = extracted\n\t\tdriver.execute_script('arguments[0].scrollTop = arguments[0].scrollTop + arguments[0].offsetHeight;', fBody)\n\t\ttime.sleep(0.3)\n\t\textracted = len(driver.find_elements_by_xpath(\"//div[@class='isgrP']//li\"))\n\t\tif extracted == detection:\n\t\t\toverflow += 1\n\t\t\tif overflow >= 10: # break\n\t\t\t\tbreak\n\t\telse:\n\t\t\toverflow = 0\n\treturn extracted" ]
[ "0.67423654", "0.66140634", "0.6610619", "0.65981364", "0.6562769", "0.6438472", "0.6420957", "0.6374915", "0.6302239", "0.62492704", "0.6228947", "0.6200425", "0.6184353", "0.6166804", "0.61381143", "0.6054333", "0.6047623", "0.60413134", "0.6029681", "0.6023016", "0.5998706", "0.59753054", "0.59577155", "0.592704", "0.5912652", "0.5909403", "0.58726656", "0.58439034", "0.5830612", "0.58227456", "0.58169", "0.5794069", "0.57588637", "0.5754114", "0.5753528", "0.5752656", "0.5751614", "0.5736051", "0.5713861", "0.5712987", "0.56842065", "0.56753695", "0.5667802", "0.56616306", "0.55952525", "0.5567501", "0.5563978", "0.5557228", "0.5552543", "0.5544193", "0.552741", "0.5524741", "0.5516763", "0.5492853", "0.5487746", "0.5482194", "0.54417807", "0.5434444", "0.54286027", "0.542076", "0.54120743", "0.54086924", "0.53929764", "0.5375841", "0.53626037", "0.53617704", "0.5329415", "0.5325596", "0.5325596", "0.528015", "0.52617395", "0.52542734", "0.525277", "0.5247064", "0.5242951", "0.5241704", "0.5235395", "0.52267534", "0.52201265", "0.5214328", "0.52116936", "0.521098", "0.5192459", "0.5182008", "0.5174727", "0.51732975", "0.51551586", "0.51546043", "0.51529115", "0.5149449", "0.51449406", "0.5144341", "0.5143031", "0.5140224", "0.512904", "0.51149887", "0.51027584", "0.5084858", "0.50677633", "0.5045081" ]
0.62142295
11
Attach the view to the GUI.
def attach(self, gui): ManualClusteringView.attach(self, gui) # ScalingMixin.attach(self, gui) # self.actions.add(self.toggle_show_labels, checkable=True, checked=self.do_show_labels) # self.actions.add(self.toggle_auto_scale, checkable=True, checked=self.auto_scale) self.actions.add(self.switch_origin) self.actions.separator() self.actions.add(self.go_to, prompt=True, prompt_default=lambda: str(self.time)) self.actions.separator() self.actions.add(self.go_to_start) self.actions.add(self.go_to_end) self.actions.separator() self.actions.add(self.shift, prompt=True) self.actions.add(self.go_right) self.actions.add(self.go_left) self.actions.add(self.jump_right) self.actions.add(self.jump_left) self.actions.separator() self.actions.add(self.widen) self.actions.add(self.narrow) self.actions.separator() self.set_interval()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def _add_view(self, window, view):\r\n\r\n # If no 'relative_to' is specified then the view is positioned\r\n # relative to the editor area.\r\n if len(view.relative_to) > 0:\r\n relative_to = window.get_view_by_id(view.relative_to)\r\n \r\n else:\r\n relative_to = None\r\n\r\n # Add the view to the window.\r\n window.add_view(\r\n view, view.position, relative_to, (view.width, view.height)\r\n )\r\n\r\n return", "def attach(self, gui):\n super(TraceView, self).attach(gui)\n\n self.actions.add(self.toggle_show_labels, checkable=True, checked=self.do_show_labels)\n self.actions.add(\n self.toggle_highlighted_spikes, checkable=True, checked=self.show_all_spikes)\n self.actions.add(self.toggle_auto_scale, checkable=True, checked=self.auto_scale)\n self.actions.add(self.switch_origin)\n self.actions.separator()\n\n self.actions.add(\n self.go_to, prompt=True, prompt_default=lambda: str(self.time))\n self.actions.separator()\n\n self.actions.add(self.go_to_start)\n self.actions.add(self.go_to_end)\n self.actions.separator()\n\n self.actions.add(self.shift, prompt=True)\n self.actions.add(self.go_right)\n self.actions.add(self.go_left)\n self.actions.add(self.jump_right)\n self.actions.add(self.jump_left)\n self.actions.separator()\n\n self.actions.add(self.widen)\n self.actions.add(self.narrow)\n self.actions.separator()\n\n self.actions.add(self.go_to_next_spike)\n self.actions.add(self.go_to_previous_spike)\n self.actions.separator()\n\n self.set_interval()", "def __init__(self):\n self.view = GuiView(self)\n return", "def attach_edgework_view(self, view: \"EdgeworkView\") -> None:\n self._edgework_view = view", "def on_show_view(self):\n arcade.set_background_color(arcade.color.DARK_BLUE_GRAY)\n\n # Enable the UIManager when the view is showm.\n self.manager.enable()", "def debug_view(self):\n\n self.view.show()", "def show(self) -> None:\n show(self._layout)", "def show(self):\n self.Show()", "def setup(self):\n self.ui.setup_window()", "def on_show_view(self):\n\n # Makes the background darker\n arcade.set_background_color([rgb - 50 for rgb in arcade.color.DARK_BLUE_GRAY])\n\n # Enable the UIManager when the view is showm.\n self.manager.enable()", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def setup_gui(self):\n # if there are more than 1 visualizer we need to assure that there\n # will not be tag conflicts\n BaseRealTimeVisualizer.setup_gui_lock.acquire()\n # look for valid tag\n dpg.create_context()\n\n self.id = 0\n while dpg.does_item_exist(f'main_window_{self.id}'):\n self.id += 1\n\n with dpg.texture_registry(show=False):\n dpg.add_dynamic_texture(\n width=self.width,\n height=self.height,\n default_value=np.zeros((self.width, self.height, 3)),\n tag=f'input_image_texture_{self.id}',\n )\n\n with dpg.window(\n tag=f'main_window_{self.id}',\n no_title_bar=True,\n autosize=True\n ):\n dpg.add_image(\n texture_tag=f'input_image_texture_{self.id}',\n tag=f'image_render_{self.id}',\n pos=(_PADDING, _PADDING)\n )\n\n dpg.set_global_font_scale(_FONT_SCALE)\n\n if self.id == 0:\n dpg.set_primary_window(f'main_window_{self.id}', True)\n dpg.create_viewport(\n title=self.title,\n width=self.width + _PADDING*2,\n height=self.height + _PADDING*2,\n resizable=True\n )\n dpg.setup_dearpygui()\n dpg.show_viewport()\n elif self.id == 1:\n dpg.set_primary_window('main_window_0', False)\n\n BaseRealTimeVisualizer.setup_gui_lock.release()", "def _connectView(self):\n self._view.select_asset = self.select_asset\n self._view.add_assets = self.add_assets\n self._view.remove_assets = self.remove_assets\n self._view.update_assets = self.update_assets\n self._view.commit = self.commit", "def on_show_view(self):\n self.setup()\n arcade.set_background_color(arcade.color.BLACK)\n arcade.set_viewport(0, constants.SCREEN_WIDTH - 1, 0, constants.SCREEN_HEIGHT - 1)", "def set_view(self):\n self.scene.mlab.view(azimuth=90.0, elevation=-90.0)", "def start_ui(self):\n\t\tself.start_animation()\n\t\tself.app.exec()", "def show_gui():\n pass", "def on_show_view(self):\n self.setup()\n arcade.set_background_color(arcade.color.BLACK)", "def show(self, window):\r\n\r\n return", "def gui(self):\n return gui", "def visualise(self):\n self.w = VisualizeSetupBox(self.master, self._df)\n self.master.wait_window(self.w.top)", "def create_view(self):\n title_label = Label(self, text='Upload, Preview, Describe and Visualize',\n fg='blue', font=('Arial', 16))\n title_label.pack(fill=BOTH, expand=True)\n select_file_button = Button(self, background='White', text='Select Data File [.csv, .xlsx, .xls, .json, .txt]',\n command=self.start_upload)\n select_file_button.pack(padx=5, pady=10)", "def inicialUI(self):\r\n\r\n self.setGeometry(500, 500, 500, 500)\r\n self.setWindownTitle(\"Pesquisa\")\r\n self.displayWidgets()\r\n\r\n self.show()", "def __call__(self):\n self.show()", "def visualize(self):\n app = QtGui.QApplication([''])\n SceneGUI(self)\n app.exec_()", "def start(self) -> Gui:\n self.show()\n self.app.exec_()\n\n return self", "def show(self):\n self.wid.show()", "def iniciaUI(self):\n\n self.setGeometry(100,100, 300, 200)\n self.setWindowTitle(\"Formulario\")\n self.displayWidgets()\n\n self.show()", "def show(self):\n self.driver.send(self.canvas)", "def viewWidgetCreated(self, view, plot):\n return", "def on_show_view(self):\r\n self.setup()\r\n arcade.set_background_color(BACKGROUND_COLOR)", "def show_window(self):\n self.show()", "def show(self):\n self.scene().show()", "def show(self, parent=None):\n # Some Gui's don't like to process all events from a single \n # call to process events (Qt), and pumping the loop is not\n # reliable. Instead, we just schedule the call to set_visible \n # to occur after we start the event loop and with a priority \n # that is less than any relayouts the may be triggered by \n # pending events. This means that the layout queue should \n # finish processing, and then the window will be shown.\n self._prep_window()\n app = self.toolkit.app\n app.schedule(self.set_visible, (True,), priority=75)\n app.start_event_loop()", "def _showView(self, win, fn=None):\n raise RuntimeError('Not implemented')", "def add_views_widget(self):\n axial_view = QtWidgets.QPushButton(\"Axial\")\n coronal_view = QtWidgets.QPushButton(\"Coronal\")\n sagittal_view = QtWidgets.QPushButton(\"Sagittal\")\n views_box = QtWidgets.QGroupBox(\"Views\")\n views_box_layout = QtWidgets.QVBoxLayout()\n views_box_layout.addWidget(axial_view)\n views_box_layout.addWidget(coronal_view)\n views_box_layout.addWidget(sagittal_view)\n views_box.setLayout(views_box_layout)\n self.grid.addWidget(views_box, 3, 0, 2, 2)\n axial_view.clicked.connect(self.set_axial_view)\n coronal_view.clicked.connect(self.set_coronal_view)\n sagittal_view.clicked.connect(self.set_sagittal_view)", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def __newDocumentView(self):\n aw = self.activeWindow()\n if aw:\n self.newEditorView(aw.getFileName(), aw, aw.getFileType())", "def __init__(self):\n\n # GUI constructor\n super().__init__()\n\n # graphics scene\n self.scene = QGraphicsScene(0, 0, 400, 200)\n self.scene.addItem(RectangleRoi(50, 10, 50, 40))\n self.scene.addItem(RectangleRoi(100, 50, 100, 20))\n self.scene.addItem(EllipseRoi(75, 20, 60, 20))\n self.scene.addItem(EllipseRoi(120, 70, 8, 8))\n\n # graphics view\n self.viewer = QGraphicsView(self.scene)\n self.viewer.setSceneRect(0, 0, self.scene.width(), self.scene.height())\n self.viewer.setInteractive(True)\n self.viewer.show()\n\n # layout\n layout = QVBoxLayout()\n layout.addWidget(self.viewer)\n self.setLayout(layout)\n self.resize(self.scene.width(), self.scene.height())", "def add_view_pl_button(self):\n self.view_pl = QPushButton(\"View Playlist\")\n self.view_pl.clicked.connect(self.view_pl_btn_push)\n self.hbtnbox.addWidget(self.view_pl)", "def show(self):\r\n\t\tself.frame.Show(True)", "def _setup_ui(self):\n\n self.window = ui.Widget()\n self.window.dimensions = ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n self.window.background_color = ImageColor.getcolor('#000000', 'RGB')\n\n interface_frame = ui.Widget(parent=self.window)\n interface_frame.dimensions = ui.normalize_dimension((\n self.preview_renderer.window[2],\n 0,\n self.normalized_screen_resolution[0] - self.preview_renderer.window[2],\n self.normalized_screen_resolution[1]\n ))\n interface_frame.background_color = ImageColor.getcolor('#ffffff', 'RGB')\n\n number = ui.LabelWidget(\"\",\n name=NAME_GET_STARTED,\n parent=interface_frame,\n align=\"center\",\n font_color=(0, 0, 0, 255))\n number.dimensions = (\n 5, 5,\n interface_frame.width - 10,\n interface_frame.height - 10\n )", "def ShowMe(self, event):\n self.Show(True)", "def main(self: object) -> None:\n print(\"[View] main\")\n self.mainloop()", "def create(self, parent):\n self.widget = QFrame(parent)", "def attachViewToGlasses(self,visNode):\n\t\tself.head_tracker = viz.link(visNode,viz.NullLinkable,srcFlag=viz.ABS_PARENT)\n\t\t\n\t\t\"\"\"\n\t\tCreate CaveView object for manipulating the virtual viewpoint.\n\t\tcave_origin is a node that controls the position of the cave within the virtual world.\n\t\tFor example, if you wanted to simulate the cave user flying through an environment,\n\t\tyou would apply the transformation to the cave_origin node.\n\t\t\"\"\"\n\t\tcave_origin = vizcave.CaveView(self.head_tracker)\n\n\t\t\"\"\"\n\t\tThe cave_origin node is a standard Vizard node that you can apply any position/rotation to.\n\t\tIn this example we will create a keyboard/mouse tracker (using arrow keys) and link it to\n\t\tthe cave_origin node, allowing us to fly the cave user through the virtual environment.\n\t\t\"\"\"\n\n\t\torigin_tracker = viztracker.KeyboardMouse6DOF()\n\t\torigin_link = viz.link(origin_tracker, cave_origin)\n\t\torigin_link.setMask(viz.LINK_POS)\n\t\t\n\t\t\n\t\t#head_tracker.setMask(viz.LINK_POS)\n\n\t\t\n\t\t\"\"\"\n\t\tPass the head tracker to the cave object so it can automatically update the\n\t\tview frustums every frame based on the current head position relative to each wall.\n\t\t\"\"\"\n\t\tself.cave.setTracker(self.head_tracker)", "def on_show_view(self):\n self.window.background_color = arcade.color.BLACK", "def init_gui(self):\n # Choose a layout.\n main_vb = QtGui.QVBoxLayout(self)\n\n # Add a list or tree view.\n self.list_view = QtGui.QListWidget()\n\n # Add the buttons.\n load_btn = QtGui.QPushButton('Load Selected')\n cancel_btn = QtGui.QPushButton('Cancel')\n load_btn.clicked.connect(self.update_list_view)\n cancel_btn.clicked.connect(self.close)\n\n # Connect the list/tree view with a method appropriate for user interaction.\n self.list_view.currentItemChanged['QListWidgetItem*', 'QListWidgetItem*'].connect(self.set_current_name)\n self.list_view.itemChanged['QListWidgetItem*'].connect(self.change_name)\n\n # Add the widgets to the layout.\n btn_hb = QtGui.QHBoxLayout()\n btn_hb.addWidget(load_btn)\n btn_hb.addWidget(cancel_btn)\n main_vb.addWidget(self.list_view)\n main_vb.addLayout(btn_hb)\n\n # Show the GUI.\n self.setGeometry(300, 300, 450, 300)\n self.setWindowTitle('Hello World')\n img_icon = 'C:/Users/caj150430/code/so_much_win.png'\n self.setWindowIcon(QtGui.QIcon(img_icon))\n self.show()", "def __init__(self, parent: View):\n self.parent = parent\n self.root = self.parent.root\n # Content frame\n self.frame = tk.Frame(self.parent.frame)\n # Reference\n self.visible = False", "def double_clicked_to_view(self):\n\n # TODO need this method? better in init to go to view_file\n self.view_file()", "def initUI(self) -> None:\n ratio = 70\n width_to_set = (ratio * self.get_current_window_info()[0]) / 100.0\n height_to_set = (ratio * self.get_current_window_info()[1]) / 100.0\n self.setGeometry(200, 100, width_to_set, height_to_set)\n self.createTable()\n # Add box layout, add table to box layout and add box layout to widget\n self.layout = QVBoxLayout()\n self.layout.addWidget(self.tableWidget)\n self.setLayout(self.layout)\n self.setWindowTitle('View files')\n self.show()", "def show_add_actor(self):\n\t\tformulario = view_form_actor.Form(self)\n\t\tformulario.exec_()\n\t\tself.load_data()", "def show(self):\n self.frame.grid()\n self.visible = True", "def on_show_view(self):\n self.window.background_color = arcade.color.WHITE", "def show(self):\n self._impl.show()", "def do_activate(self, *args, **kwargs):\n self.register_signals()\n self.perform_setup()\n assert self.main_window\n self.main_window.show()\n self.hold()", "def update_view(self): \n raise NotImplementedError(\"Widget descendents MUST implement the update_view() method!\")", "def add_view(self, view):\n # Add to views\n self._views.append(view)\n\n # If app was provided in constructor, register view with Flask app\n if self.app is not None:\n self.app.register_blueprint(view.create_blueprint(self))\n if view.is_menu:\n self._add_view_to_menu(view)", "def create(self, parent):\n self.widget = _QMainWindow(parent)", "def initializeUI(self):\n self.setGeometry(100, 100, 300, 200)\n self.setWindowTitle('Event Handling Example')\n\n self.show()", "def add_tree_view(self):\n self.data_view = QTreeView()\n self.data_view.setRootIsDecorated(False)\n self.data_view.setAlternatingRowColors(True)\n self.mbox.addWidget(self.data_view)\n\n self.data_layout = QHBoxLayout()\n self.data_layout.addWidget(self.data_view)\n\n self.model = self.create_track_model(self)\n self.data_view.setModel(self.model)", "def initGui(self):\n from p4_view import Gui\n self.updateStatus(\"Launching GUI...\")\n self.gui = Gui(self, self.lmap)\n self.gui.setStart(self.cfg[\"START\"])\n self.gui.setGoal(self.cfg[\"GOAL\"])\n self.gui.setPossGoals(self.cfg[\"POSS_GOALS\"])\n #GHD\n self.gui.setMapName(self.cfg[\"MAP_FILE\"])\n self.updateStatus(\"OK\")\n self.gui.mainloop()", "def ui(self, ui):\n\n self._ui = ui", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def menu_design_a_gui_with_wxglade(self, event=None):\n self.parentPanel.design_a_gui_with_wxglade()", "def create(self, parent):\n self.widget = QtCore.QObject(parent)", "def show(self):\n self._window.show()", "def do_activate(self):\n\n Gtk.Application.do_activate(self)\n self.initiate_plugins()\n self.other[\"menu_button\"].set_menu_model(self.prepare_menu())\n self.output_window.show_all()\n self.window.show_all()", "def register_plugin(self):\n self.create_toggle_view_action()\n\n self.main.add_dockwidget(self)", "def buildUI(self):\n\n if cmds.window(\"pyART_AddToCanvasWIN\", exists=True):\n cmds.deleteUI(\"pyART_AddToCanvasWIN\", wnd=True)\n\n # create the main window\n self.mainWin = QtWidgets.QMainWindow(self.pickerUI)\n\n # create the main widget\n self.mainWidget = QtWidgets.QWidget()\n self.mainWin.setCentralWidget(self.mainWidget)\n\n # create the mainLayout\n self.layout = QtWidgets.QVBoxLayout(self.mainWidget)\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/animPicker.qss\")\n f = open(styleSheetFile, \"r\")\n self.style = f.read()\n f.close()\n\n self.mainWin.setStyleSheet(self.style)\n\n self.mainWin.setMinimumSize(QtCore.QSize(250, 400))\n self.mainWin.setMaximumSize(QtCore.QSize(250, 400))\n self.mainWin.resize(250, 400)\n\n # set qt object name\n self.mainWin.setObjectName(\"pyART_AddToCanvasWIN\")\n self.mainWin.setWindowTitle(\"Add Module To Canvas\")\n\n # label, listWidget, button\n label = QtWidgets.QLabel(\"Available Modules:\")\n label.setProperty(\"boldFont\", True)\n self.layout.addWidget(label)\n\n self.moduleList = QtWidgets.QListWidget()\n self.moduleList.setMaximumSize(230, 300)\n self.moduleList.setMinimumSize(230, 300)\n self.layout.addWidget(self.moduleList)\n\n # add modules to listWidget\n self.addModulesToList()\n\n # create add button\n button = QtWidgets.QPushButton(\"Add Selected To Canvas\")\n self.layout.addWidget(button)\n button.setObjectName(\"blueButton\")\n button.clicked.connect(self.addSelectedToCanvas)\n\n # show ui\n self.mainWin.show()", "def setViewComponent(self, viewComponent):\n self.viewComponent = viewComponent", "def view(self):\n window = tk.Tk()\n label = tk.Label(window)\n label.pack()\n img = self.get_tkimage()\n label[\"image\"] = label.img = img\n window.mainloop()", "def _init_ui(self):\n self.setWindowTitle(\"HB Havens: resultaten\")\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n self.setLayout(QtWidgets.QVBoxLayout())\n\n # Create figure\n self.figure = Figure(figsize=(4,4))\n self.ax = self.figure.add_subplot()\n\n self.ax.grid()\n self.ax.spines['right'].set_visible(False)\n self.ax.spines['top'].set_visible(False)\n self.ax.tick_params(axis='y', color='0.75')\n self.ax.tick_params(axis='x', color='0.75')\n self.ax.set_aspect(1)\n\n # Add canvas\n self.canvas = FigureCanvasQTAgg(self.figure)\n\n # this is the Navigation widget\n # it takes the Canvas widget and a parent\n self.layout().addWidget(self.canvas)\n\n # Add location selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Locatie:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.location_combobox = QtWidgets.QComboBox()\n self.location_combobox.addItems(self.result_locations)\n self.location_combobox.setCurrentIndex(self.locid)\n self.location_combobox.currentIndexChanged.connect(self._set_location)\n hbox.addWidget(self.location_combobox)\n self.layout().addLayout(hbox)\n\n # Add parameter selection\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel('Parameter:')\n label.setFixedWidth(80)\n hbox.addWidget(label)\n self.parameter_combobox = QtWidgets.QComboBox()\n self.input_parameters = self.modelunctab.mainmodel.hydraulic_loads.result_columns[:]\n self.parameter_combobox.addItems(self.input_parameters)\n self.parameter_combobox.currentIndexChanged.connect(self._set_parameter)\n self.parameter_combobox.setCurrentIndex(0)\n self._set_parameter()\n self.figure.tight_layout()\n hbox.addWidget(self.parameter_combobox)\n self.layout().addLayout(hbox)\n\n # Line\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n\n self.layout().addWidget(line)\n\n # Add ok/close\n self.closebutton = QtWidgets.QPushButton('Sluiten')\n self.closebutton.clicked.connect(self.close)\n self.layout().addWidget(self.closebutton, 0, QtCore.Qt.AlignRight)\n\n self.layout().setSizeConstraint(QtWidgets.QLayout.SetFixedSize)", "def createView(self):\n logging.debug(\"ShortestPathUI.createView function started\")\n formLayout = QFormLayout()\n\n self.fromLineEdit = QLineEdit()\n self.fromLineEdit.textChanged.connect(partial(self.__clearErrorInfo,\n self.fromLineEdit))\n formLayout.addRow(\"From: \", self.fromLineEdit)\n\n self.toLineEdit = QLineEdit()\n self.toLineEdit.textChanged.connect(partial(self.__clearErrorInfo,\n self.toLineEdit))\n formLayout.addRow(\"To: \", self.toLineEdit)\n\n self.pathLineEdit = QLineEdit()\n self.pathLineEdit.setReadOnly(True)\n formLayout.addRow(\"Path: \", self.pathLineEdit)\n\n self.lengthLabel = QLabel()\n formLayout.addRow(\"Length: \", self.lengthLabel)\n self.__generalLayout.addLayout(formLayout, 0, 0)\n\n self.OkButton = QPushButton(\"Ok\")\n self.OkButton.setFixedWidth(50)\n self.OkButton.clicked.connect(self.updatePath)\n self.__generalLayout.addWidget(self.OkButton, 0, 1, alignment=Qt.AlignTop)\n\n logging.debug(\"ShortestPathUI.createView function ended\\n\")", "def __init__(self):\n # Root window\n self.root = tk.Tk()\n self.root.title(\"Crossword\")\n # Padding frame\n self.frame = tk.Frame(self.root)\n self.frame.pack(fill=\"both\", padx=PAD, pady=PAD)\n # Initialize widget groups\n self.header = HeaderView(self)\n self.puzzle = PuzzleView(self)\n self.clues = CluesView(self)\n # Show widgets\n self.header.show()\n self.puzzle.show()\n self.clues.show()", "def onInsert(self):\n self.mainWindow.insert()", "def initGUI(self):\n\n\t\t# Set main frame's location \n\t\tself.grid(row=0, column=0, sticky=\"nsew\")\n\n\t\t# Set path entry frame and its location\n\t\tself.entryFrame = Frame(self, relief = RAISED, borderwidth = 1)\n\t\tself.entryFrame.pack(fill = BOTH, expand = False)\n\t\t# Make label\n\t\tif self.message:\n\t\t\tmessageLabel = Label(self.entryFrame, text = self.message, font=(\"Bradley\", 10))\n\t\t\tmessageLabel.pack(anchor=W, padx=0, pady=0)\n\n\t\t# Set path entry and its location\n\t\tself.filePathEntry = Entry(self.entryFrame, bd = 4, width = 50)\n\t\tself.filePathEntry.pack(side = LEFT, padx=2, pady=1)", "def __create_ui(self):\n vbox = gtk.VBox()\n\n # Create the viewable area of the file browser\n self.__view_port = gtk.ScrolledWindow()\n self.__view_port.set_policy(gtk.POLICY_AUTOMATIC,\n gtk.POLICY_AUTOMATIC)\n # Create the tree view and add it to the viewable area\n self.__tree_view = ProjectTreeView()\n self.__project_explorer = ProjectExplorer(self.window, self.__tree_view)\n self.__tree_view.connect('button_press_event',\n self.__on_treeview_button_press_event)\n self.__project_explorer.set_repository()\n self.__view_port.add(self.__tree_view)\n # Create the toolbar\n hbox = gtk.HBox()\n toolbar = gtk.Toolbar()\n toolbar.set_style(gtk.TOOLBAR_ICONS)\n toolbar.set_icon_size(gtk.ICON_SIZE_MENU)\n back = gtk.ToolButton(gtk.STOCK_GO_UP)\n back.connect('clicked', self.__on_back_clicked)\n toolbar.insert(back, 0)\n toolbar.insert(gtk.SeparatorToolItem(), 1)\n refresh = gtk.ToolButton(gtk.STOCK_REFRESH)\n refresh.connect('clicked', self.__on_refresh_clicked)\n toolbar.insert(refresh, 2)\n hbox.pack_start(toolbar, True, True, 0)\n vbox.pack_start(hbox, False, False, 0)\n vbox.pack_start(self.__view_port, True, True, 0)\n\n # Setup the create the buttons for:\n # New File, New Folder\n # ----------------------------------------------------------------------\n hbox1 = gtk.VBox()\n toolbar_actions = gtk.Toolbar()\n toolbar_actions.set_style(gtk.TOOLBAR_ICONS)\n toolbar_actions.set_icon_size(gtk.ICON_SIZE_MENU)\n new_file = gtk.ToolButton(gtk.STOCK_NEW)\n new_file.connect('clicked', self.__on_new_file_clicked_cb)\n toolbar_actions.insert(new_file, 0)\n new_dir = gtk.ToolButton(gtk.STOCK_OPEN) # TODO: use a custom icon\n new_dir.connect('clicked', self.__on_new_dir_clicked_cb)\n toolbar_actions.insert(new_dir, 1)\n hbox1.pack_start(gtk.HSeparator(), True, True, 0)\n hbox1.pack_start(toolbar_actions, True, True, 0)\n vbox.pack_end(hbox1, False, False, 0)\n # ----------------------------------------------------------------------\n vbox.show_all()\n # Attach the project explorer to GMate's side panel\n self.__side_panel = self.window.get_side_panel()\n self.__side_panel.add_tab(vbox, msg0005, gtk.STOCK_HARDDISK)", "def Show(self):\r\n return Control.Show(self)", "def add_to(self, main_lay):\n main_lay.addWidget(self._tab)\n self.setParent(main_lay.parentWidget())", "def showUI(cls):\r\n win = cls(uiFile)\r\n win.create()\r\n return win", "def show_frame(self, container):\r\n\r\n frame = self.frames[container]\r\n\r\n frame.tkraise()", "def vp_start_gui():\n global val, w, root\n root = tk.Tk()\n plot_support.set_Tk_var()\n top = Toplevel1(root)\n plot_support.init(root, top)\n root.mainloop()", "def setup_ui(self):\n self.setLayout(self.main_layout)\n\n self.pv_layout.addWidget(self.pv_protocol_cmb)\n self.pv_layout.addWidget(self.pv_name_line_edt)\n self.pv_layout.addWidget(self.pv_connect_push_btn)\n QTimer.singleShot(0, self.pv_name_line_edt.setFocus)\n\n self.curve_settings_tab.setLayout(self.curves_tab_layout)\n self.chart_settings_tab.setLayout(self.chart_settings_layout)\n self.setup_chart_settings_layout()\n\n self.tab_panel.addTab(self.curve_settings_tab, \"Curves\")\n self.tab_panel.addTab(self.chart_settings_tab, \"Chart\")\n self.tab_panel.hide()\n\n self.crosshair_settings_layout.addWidget(self.enable_crosshair_chk)\n self.crosshair_settings_layout.addWidget(self.cross_hair_coord_lbl)\n\n self.chart_control_layout.addWidget(self.auto_scale_btn)\n self.chart_control_layout.addWidget(self.view_all_btn)\n self.chart_control_layout.addWidget(self.reset_chart_btn)\n self.chart_control_layout.addWidget(self.pause_chart_btn)\n self.chart_control_layout.addLayout(self.crosshair_settings_layout)\n self.chart_control_layout.addWidget(self.import_data_btn)\n self.chart_control_layout.addWidget(self.export_data_btn)\n\n self.chart_control_layout.setStretch(4, 15)\n self.chart_control_layout.insertSpacing(5, 350)\n\n self.chart_layout.addWidget(self.chart)\n self.chart_layout.addLayout(self.chart_control_layout)\n\n self.chart_panel.setLayout(self.chart_layout)\n\n self.splitter.addWidget(self.chart_panel)\n self.splitter.addWidget(self.tab_panel)\n self.splitter.setStretchFactor(0, 0)\n self.splitter.setStretchFactor(1, 1)\n\n self.charting_layout.addWidget(self.splitter)\n\n self.body_layout.addLayout(self.pv_layout)\n self.body_layout.addLayout(self.charting_layout)\n self.body_layout.addLayout(self.chart_control_layout)\n self.main_layout.addLayout(self.body_layout)\n\n self.enable_chart_control_buttons(False)", "def open_gui():\n guiController.main()", "def launch_gui(instance=None):\n app = Controller(instance)\n app.RunGui()", "def createUI(self):\n self.widget = QWidget(self)\n self.setCentralWidget(self.widget)\n\n # In this widget, the video will be drawn\n if sys.platform == \"darwin\": # for MacOS\n from PyQt5.QtWidgets import QMacCocoaViewContainer\n self.videoframe = QMacCocoaViewContainer(0)\n else:\n self.videoframe = QFrame()\n self.palette = self.videoframe.palette()\n self.palette.setColor (QPalette.Window,\n QColor(0,0,0))\n self.videoframe.setPalette(self.palette)\n self.videoframe.setAutoFillBackground(True)\n\n self.hbuttonbox = QHBoxLayout()\n self.playbutton = QPushButton(\"Run my program\")\n self.hbuttonbox.addWidget(self.playbutton)\n self.playbutton.clicked.connect(partial(self.drone_vision.run_user_code, self.playbutton))\n\n self.landbutton = QPushButton(\"Land NOW\")\n self.hbuttonbox.addWidget(self.landbutton)\n self.landbutton.clicked.connect(self.drone_vision.land)\n\n self.stopbutton = QPushButton(\"Quit\")\n self.hbuttonbox.addWidget(self.stopbutton)\n self.stopbutton.clicked.connect(self.drone_vision.close_exit)\n\n self.vboxlayout = QVBoxLayout()\n self.vboxlayout.addWidget(self.videoframe)\n self.vboxlayout.addLayout(self.hbuttonbox)\n\n self.widget.setLayout(self.vboxlayout)\n\n # the media player has to be 'connected' to the QFrame\n # (otherwise a video would be displayed in it's own window)\n # this is platform specific!\n # you have to give the id of the QFrame (or similar object) to\n # vlc, different platforms have different functions for this\n if sys.platform.startswith('linux'): # for Linux using the X Server\n self.mediaplayer.set_xwindow(self.videoframe.winId())\n elif sys.platform == \"win32\": # for Windows\n self.mediaplayer.set_hwnd(self.videoframe.winId())\n elif sys.platform == \"darwin\": # for MacOS\n self.mediaplayer.set_nsobject(int(self.videoframe.winId()))", "def show(self):\n # * displays the window, after using either the iconify or the withdraw methods\n self.wm_deiconify()\n # * this method can be called after the event which needs to happen before the window event\n self.wait_window()", "def show(self):\n # This function has to be placed here (and not in the user.py script)\n self.showMaximized()\n visapp.run()", "def view():\r\n # collect figures in list\r\n figures = list(map(plt.figure, plt.get_fignums()))\r\n # start app\r\n app = QtWidgets.QApplication(sys.argv)\r\n main = Main()\r\n\r\n if figures:\r\n for count, figure in enumerate(figures):\r\n # main names for figures\r\n name = f\"{figure.number}\"\r\n # aliases for figures\r\n titles = [figure.axes[0].get_title(loc=i) for i in [\r\n \"left\", \"center\", \"right\"]]\r\n titles = [i for i in titles if i]\r\n title = f\"{count+1}- {titles[0]}\" if titles else \"\"\r\n axes_labels = f\"{count+1}- {figure.axes[0].get_ylabel()} vs {figure.axes[0].get_xlabel()} \"\r\n fignum = f\"Figure {figure.number}\"\r\n # Append figure to App\r\n main.append_fig(title, axes_labels, fignum, name, figure)\r\n\r\n main.show()\r\n sys.exit(app.exec_())", "def iniciaUI(self):\n\n self.setGeometry(100,100, 250, 250)\n self.setWindowTitle(\"Login\")\n self.displayWidgets()\n\n self.show()", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def sync_view(self):\n new_callbacks = []\n for c in self._ngl_displayed_callbacks_after_loaded:\n if (c._method_name == 'loadFile' and\n 'defaultRepresentation' in c._ngl_msg['kwargs']):\n # set to False to avoid autoView\n # so subsequent display of `self` won't reset view orientation.\n c._ngl_msg['kwargs']['defaultRepresentation'] = False\n msg = c._ngl_msg\n msg['last_child'] = True\n def callback(widget, msg=msg):\n widget.send(msg)\n callback._method_name = msg['methodName']\n callback._ngl_msg = msg\n new_callbacks.append(callback)\n\n msg = {}\n msg['target'] = 'Widget'\n msg['type'] = 'call_method'\n msg['methodName'] = 'set_representation_from_backend'\n msg['args'] = []\n msg['kwargs'] = {}\n msg['last_child'] = True\n\n def callback(widget, msg=msg):\n widget.send(msg)\n callback._method_name = msg['methodName']\n callback._ngl_msg = msg\n\n new_callbacks.append(callback)\n self._fire_callbacks(new_callbacks)", "def initView(self):\n #Draw the Session View\n self._sessionView = SessionView(self._app)\n leftDockWidget = QtGui.QDockWidget(\"Session\", self)\n leftDockWidget.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea)\n leftDockWidget.setWidget(self._sessionView)\n leftDockWidget.setFeatures(QtGui.QDockWidget.DockWidgetMovable | QtGui.QDockWidget.DockWidgetClosable)\n\n #temporary !\n titleBar = QtGui.QWidget()\n leftDockWidget.setTitleBarWidget(titleBar)\n\n self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, leftDockWidget)\n\n #Draw the central widget\n self.mdiArea = QtGui.QMdiArea()\n self.setCentralWidget(self.mdiArea)\n\n #Draw the Player View\n #rightDockWidget = QtGui.QDockWidget(\"Player\", self)\n #rightDockWidget.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea | QtCore.Qt.RightDockWidgetArea)\n ##rightDockWidget.setWidget(self.player)\n #rightDockWidget.setFeatures(QtGui.QDockWidget.DockWidgetMovable | QtGui.QDockWidget.DockWidgetClosable)\n #self.addDockWidget(QtCore.Qt.RightDockWidgetArea, rightDockWidget)", "def init_ui(self):\n self.master.title(\"Backbone\")\n self.master.geometry(\"300x150\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=90, y=10)\n\n self.btn_create_training_file = Button(self, text=\"Create & upload training file\",\n command=self.create_training_file)\n self.btn_create_training_file.place(x=30, y=40)\n\n self.btn_run_algorithm = Button(self, text=\"Run algorithm\", command=self.run_algorithm)\n self.btn_run_algorithm.place(x=80, y=70)\n\n self.btn_view_results = Button(self, text=\"View Results\", command=self.view_results)\n self.btn_view_results.place(x=85, y=100)", "def show(self):\n\n self.serial = self.parent.board.serial\n self.deiconify() # Show window\n self.visible = True\n\n self.input_entry.focus()\n\n self.start_repl()" ]
[ "0.66884124", "0.66884124", "0.66884124", "0.66009265", "0.6533588", "0.6503647", "0.6470148", "0.6323154", "0.62344205", "0.6183854", "0.6170608", "0.6120538", "0.60880315", "0.60534596", "0.604596", "0.6022283", "0.5969684", "0.5946267", "0.5931756", "0.58913046", "0.58907974", "0.5853351", "0.585087", "0.5800141", "0.57991755", "0.5796315", "0.57949257", "0.5784987", "0.5778917", "0.5769542", "0.5746932", "0.5740254", "0.5734591", "0.57080895", "0.57065356", "0.5693382", "0.568715", "0.56771994", "0.567686", "0.5670405", "0.56616807", "0.56495243", "0.56469584", "0.56265676", "0.5624919", "0.5603306", "0.55783856", "0.55741584", "0.5573626", "0.5565059", "0.5563092", "0.5562166", "0.55614114", "0.5552535", "0.55517936", "0.55488163", "0.5546853", "0.55457973", "0.55269676", "0.5521505", "0.5518317", "0.5506793", "0.55010587", "0.54962957", "0.5493887", "0.5489668", "0.54862726", "0.5485788", "0.5485017", "0.54840213", "0.5483172", "0.5479271", "0.54767317", "0.54728967", "0.5469265", "0.54574794", "0.54548055", "0.5444485", "0.54185516", "0.5417486", "0.5405626", "0.53975874", "0.5396227", "0.53934354", "0.53897274", "0.5387121", "0.5382321", "0.53773755", "0.5372333", "0.53633296", "0.5360154", "0.53569543", "0.5355183", "0.534426", "0.53434926", "0.53365225", "0.53271925", "0.5323053", "0.5321606", "0.53143716" ]
0.6581134
4
Whether to show the channels from top to bottom (`top` option, the default), or from bottom to top (`bottom`).
def origin(self): return self._origin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __redrawChannels(self):\n self.__channelWin.clear()\n all_chans = self._client.getChannels()\n all_chans.sort(key=lambda c: c.getName())\n count = min(len(all_chans), self.__channelWin.getmaxyx()[0])\n show = all_chans[:count]\n for c in show:\n cur = self._client.currentChannel() == c\n if cur:\n attr = curses.A_REVERSE\n elif c in self._client.getJoined():\n attr = curses.A_BOLD\n else:\n attr = curses.A_DIM\n if c != self._client.getNoneChannel():\n self.__channelWin.addstr(\n \"{chan}\\n\".format(chan=c.getName()),\n attr\n )", "def showTopView(self):\r\n if(self.dataController.fileLoaded == True): \r\n self.dataController.showTopView()\r\n self.midsagittalView = True\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False", "def bottom_option():\n active = get_active_window()\n Width= get_middle_Width(active)\n Height=get_bottom_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def showBottomView(self):\r\n if(self.dataController.fileLoaded == True): \r\n self.dataController.showBottomView()\r\n self.midsagittalView = False\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = True", "def GripperTop(self, attop=True):\r\n \r\n return self.SetFlag(self.optionGripperTop, attop)", "def keep_top_or_bottom(self):\n return self._keep_top_or_bottom", "def top_option():\n active = get_active_window()\n Width=get_middle_Width(active)\n Height=get_top_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def showChannels(img, ypos = 0, wait=False):\n num_channels = img.shape[2] if len(img.shape) == 3 else 1\n if num_channels == 1:\n label = 'One channel'\n cv2.imshow(label, img)\n cv2.moveWindow(label, 0, ypos)\n else:\n for i in range(num_channels):\n label = 'Channel ' + str(i)\n cv2.imshow(label, img[:,:,i])\n cv2.moveWindow(label, i * img.shape[1], ypos)\n if wait:\n if cv2.waitKey() & 0xFF == ord('q'):\n sys.exit(0)", "def showChannels(self):\n print(\"Channels:\")\n for c in self.channels:\n if c.role != channel_pb2.Channel.Role.DISABLED:\n cStr = stripnl(MessageToJson(c.settings))\n print(\n f\" {channel_pb2.Channel.Role.Name(c.role)} psk={pskToString(c.settings.psk)} {cStr}\")\n publicURL = self.getURL(includeAll=False)\n adminURL = self.getURL(includeAll=True)\n print(f\"\\nPrimary channel URL: {publicURL}\")\n if adminURL != publicURL:\n print(f\"Complete URL (includes all channels): {adminURL}\")", "def draw_top(self):\n return group()", "def always_top(self, value: bool):\n self.tk_ref.wm_attributes('-topmost', int(value))", "def HasGripperTop(self):\r\n\r\n return self.HasFlag(self.optionGripperTop)", "def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False", "def display(self, channel1 = False, channel2 = False, channel3 = False, channel4 = False):\t\t\n\t\tself.scope.write(\":CHANnel1:DISPlay %s\"%bool2ONOFF(channel1))\n\t\tself.scope.write(\":CHANnel2:DISPlay %s\"%bool2ONOFF(channel2))\n\t\tself.scope.write(\":CHANnel3:DISPlay %s\"%bool2ONOFF(channel3))\n\t\tself.scope.write(\":CHANnel4:DISPlay %s\"%bool2ONOFF(channel4))", "def switch_origin(self):\n self.origin = 'bottom' if self.origin == 'top' else 'top'", "def show_grid(self, **kwargs):\n kwargs.setdefault('grid', 'back')\n kwargs.setdefault('location', 'outer')\n kwargs.setdefault('ticks', 'both')\n return self.show_bounds(**kwargs)", "def top_visible(self) -> bool:\n return self.vertical_scroll == 0", "def show_filters(self):\n w_mat = np.transpose(self.sess.run(self.W_fc1))\n\n plt.figure(figsize=(10,10), facecolor='w', edgecolor='w')\n plot_positions = [(0,0),(0,1),(1,0),(1,1)]\n for ch in range(self.n_input_channels):\n grid,_ = ia.image_grid_RGB( w_mat,\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res), n_x=6, n_y=6,\n channel_order=(ch,ch,ch), amplitude_scaling=(1,1,1),\n line_color=1, auto_scale=True, return_borders=False )\n colormax = np.abs(grid).max()\n with sns.axes_style(\"white\"):\n ax = plt.subplot2grid( (2,2), plot_positions[ch] )\n ax.imshow( grid[:,:,0], interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Hidden units, channel {}\".format(ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def use_config_backorders(self):\n return self._use_config_backorders", "def config_independent_frames(self):\n return {'standard': 'dispname','bias': None, 'dark': None}", "def always_top(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-topmost'))", "def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")", "def show_trunk(height=2):\n for k in range(height):\n print(\"|\".center(GROUND_WIDTH))", "def bottom_right_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_bottom_Height()\n PosX = get_right_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def getDefaultDisplayMode(self):\n return \"Wireframe\"", "def show_board(self):\n board_vis = f\"\\n{'*' * 22}Board state{'*' * 23}\\n\"\n str_p2_store=\" \"+str(self.p2_store()) if self.p2_store()<10 else str(self.p2_store())\n board_vis += (f\" {str_p2_store} - | \" +\n \" || \".join(\n [i if len(i) == 2 else ' ' + i for i in list(map(str, self.p2_pits()[::-1]))]) + \" | \\n\")\n board_vis += f\"{'-------' * (self.M + 2)}\\n\"\n board_vis += (\" | \" + \" || \".join(\n [i if len(i) == 2 else ' ' + i for i in list(map(str, self.p1_pits()))]) +\n f\" | - {self.p1_store()}\\n\")\n board_vis += f\"{'*' * 56}\\n\"\n print(board_vis)", "def showHidden(*args, above: bool=True, allObjects: bool=True, below: bool=True, lastHidden:\n bool=True, **kwargs)->None:\n pass", "def hits_top_or_bottom(self):\n if self.y >= self.scene.screen.get_height() - self.image.get_height() or self.y <= 0:\n return True\n else:\n return False", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def show_sequence(data, ordering='channel_last'):\n xb, yb = data\n batch_size = xb.shape[0]\n stacked_size = xb.shape[1]\n \n fig = plt.figure(figsize=(5 * stacked_size, 5 * 2 * batch_size))\n for i in range(batch_size):\n x = xb[i]\n for j in range(stacked_size):\n fig.add_subplot(2 * batch_size, stacked_size, stacked_size * (2 * i) + j + 1) \n show_image(x[j])\n \n if yb[i] is not None:\n y = yb[i]\n else:\n y = np.zeros_like(xb[i])\n \n for j in range(stacked_size):\n fig.add_subplot(2 * batch_size, stacked_size, stacked_size * (2 * i + 1) + j + 1)\n if ordering == 'channel_first':\n y[j] = np.moveaxis(y[j], 0, -1)\n \n if y.shape[-1] == 1:\n show_image(y[j])\n else:\n show_label(y[j])\n\n return fig", "def __str__(self):\n return \"Bottom -> \" + repr(self._items) + \" <- Top\"", "def IsTopDockable(self):\r\n \r\n return self.HasFlag(self.optionTopDockable)", "def canStack(bottom, top):\n bw, bh, bd = bottom\n tw, th, td = top\n return (bw < tw) and (bh < th) and (bd < td)", "def config_independent_frames(self):\n return {'standard': 'dispname', 'bias': None, 'dark': None}", "def visible(self, show):", "def isTop(self):\n return self.top", "def updateChannels(self):\n self.__redrawChannels()\n self.__update()", "def bottom_left_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_bottom_Height()\n PosX = get_left_PosX(active,Width)\n PosY=get_bottom_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def setSurfaceVisibility(visible='both'):\n vdict = {'both':'BOTH','top':'TOP','bottom':'BOTTOM'}\n dislin.survis(vdict[visible])", "def IsTopSnappable(self):\r\n \r\n return self.HasFlag(self.optionTopSnapped)", "def DrawTop(screen, top_x, top_y, top_len, top_width):\n pygame.draw.rect(screen, (255,0,0),(top_x, top_y, top_len*2, top_width*2), 4)", "def check_top(self):\n\t\tif self.rect.top <=0:\n\t\t\tself.target_direction = 1", "def middlemakevisible(self, pos):\n pass", "def get_active_end_b(self, orientation):\r\n if orientation == \"height\":\r\n # first button is not displayed, second is\r\n if self.number % 2 == 1:\r\n return True\r\n else:\r\n return False\r\n else:\r\n # first button is displayed, second is not\r\n if self.number % 2 == 0:\r\n return True\r\n else:\r\n return False", "def display_other_options():\n print(\"> - Next Song page.\")\n print(\"< - Previous song page.\")\n print(\"q - to quit\")", "def display_cli(conversations, alt_speaker, human_speaker):\n for speaker, speech in conversations:\n if speaker == END_OF_CONVO:\n print(\"-\" * 20 + \"END OF CONVERSATION\" + \"-\" * 20)\n elif speaker == alt_speaker:\n print(\"%-15s: %s\" % (speaker[:15], speech))\n else:\n prBlueBG(\"%-15s: %s\" % (speaker[:15], speech))", "def top_right_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_top_Height()\n PosX = get_right_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def setDisplayMode(self):\n self.step = (self.max_step + int(self.include))\n self.display = Fourier.inverseTransform(\n self.coefficients, self.display_number)", "def getDefaultDisplayMode(self):\n return \"Shaded\"", "def check_edges(self):\n\t\tbottom_screen_limit = 2 * self.rect.height\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif (self.rect.top <= 100) or (self.rect.bottom >= self.screen_rect.bottom):\n\t\t#self.rect.bottom >= self.screen_rect.bottom:\n\t\t\treturn True", "def switch_frequency_plot_channel_two(self):\n if self.plot_channel_key_booleans[1]:\n self.plot_channel_key_booleans[1] = False\n self.parent_widget.graph_channel_two_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[1] = True\n self.parent_widget.graph_channel_two_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[1]))", "def gridDisplay(self):\n\n if self.griddButton.isCheckable():\n self.photo_grid.setVisible(False)\n self.griddButton.setCheckable(False)\n self.griddButton.setDown(False)\n self.statustext.setText(\"Hide Grid\")\n else:\n self.griddButton.setCheckable(True)\n self.photo_grid.setVisible(True)\n self.griddButton.setDown(True)\n self.statustext.setText(\"Display Grid - Rule of thirds\")", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def setVisible(*args):", "def print_backward(self):\n print(\"[\", end=\" \")\n if self.__head is not None:\n self.__head.print_backward()\n print(\"]\")", "def show_bottom_status(self):\n editor = self.app.get_editor()\n size = self.size()\n cur = editor.cursor()\n data = \"@ \"+str(cur[0])+\",\"+str(cur[1])+\" \"+\\\n \"cur:\"+str(len(editor.cursors))+\" \"+\\\n \"buf:\"+str(len(editor.buffer))\n if self.app.config[\"display\"][\"show_last_key\"]:\n data += \" key:\"+str(self.app.last_input)\n #if self.app.config[\"display\"][\"show_term_size\"]:\n # data += \" [\"+str(size[0])+\"x\"+str(size[1])+\"]\"\n if self.app.config[\"app\"][\"debug\"]:\n data += \" cs:\"+str(editor.current_state)+\" hist:\"+str(len(editor.history)) # Undo / Redo debug\n #if editor.last_find:\n # find = editor.last_find\n # if len(find) > 10:find = find[:10]+\"...\"\n # data = \"find:'\"+find+\"' \" + data\n\n # Add module statuses to the status bar\n for name in self.app.modules.modules.keys():\n module = self.app.modules.modules[name]\n if module.options[\"status\"] == \"bottom\":\n data += \" \" + module.get_status();\n\n self.status_win.clear()\n status = self.app.get_status()\n extra = size[0] - len(status+data) - 1\n line = status+(\" \"*extra)+data\n\n if len(line) >= size[0]:\n line = line[:size[0]-1]\n\n self.status_win.addstr(0,0, line, curses.color_pair(0) | curses.A_REVERSE)\n self.status_win.refresh()", "def test_config(self):\n\n p = SyncProto(packet_port, None)\n\n d = make_axes(500, .1, usteps=16, steps_per_rotation=200)\n p.config(4, 18, 32, False, False, axes=d['axes1']);\n p.info()\n\n d = make_axes(1000, .2, usteps=16, steps_per_rotation=200,\n output_mode=OutMode.OUTPUT_OPENDRAIN, highval=OutVal.LOW)\n p.config(4, 7, 9, False, False, axes=d['axes1']);\n p.info()", "def expandColorBarScaling(direction='none'):\n ddict = {'none':'NONE','down':'FIRST','both':'BOTH'}\n dislin.expzlb(ddict[direction])", "def setDisplayMode(self, mode):\n return \"Wireframe\"", "def fullscreen(self):\n self.port_edit.setVisible(False)\n self.ip_edit.setVisible(False)\n self.connect_btn.setVisible(False)\n self.setup_btn.setVisible(False)\n self.play_btn.setVisible(False)\n self.pause_btn.setVisible(False)\n self.teardown_btn.setVisible(False)\n self.fullscreen_btn.setVisible(False)\n self.video_slider.setVisible(False)\n self.rtp_port_edit.setVisible(False)\n self.rtp_label.setVisible(False)\n self.rtcp_port_edit.setVisible(False)\n self.rtcp_port_label.setVisible(False)\n self.movie_name_edit.setVisible(False)\n self.movie_name_label.setVisible(False)\n self.rate_select.setVisible(False)\n self.time_label.setVisible(False)\n self.low_level_video.setVisible(False)\n self.high_level_video.setVisible(False)\n self.video_list.setVisible(False)\n self.video_label.setGeometry(0, 0, self.screen_width, self.screen_height)\n self.showFullScreen()", "def get_channels(self):\n return [self.afos, \"%s...\" % (self.afos[:3], )]", "def bottom_visible(self) -> bool:\n return self.last_visible_line() == self.content_height - 1", "def show_board(self):\n for i in range(self.num_rows):\n print(' ----'*8)\n s = \"\"\n for j in range(self.num_cols):\n s += '| {} '.format(self._show_piece(i, j))\n print(\"{}|\".format(s))\n print(' ----'*8)", "def reshape(self, bottom, top):\r\n pass", "def TopSnappable(self, b=True):\r\n \r\n return self.SetFlag(self.optionTopSnapped, b)", "def set_invert_display(enable):\n if enable:\n send_command(0xA7)\n else:\n send_command(0xA6)", "def reshape(self, bottom, top):\n\t\tpass", "def set_view_options(self):\n active_panel = self.get_active_panel()\n # turn all show/hide display options off except for polygons and\n # surfaces\n pm.modelEditor(active_panel, e=1, allObjects=False)\n pm.modelEditor(active_panel, e=1, manipulators=False)\n pm.modelEditor(active_panel, e=1, grid=False)\n\n pm.modelEditor(active_panel, e=1, polymeshes=True)\n pm.modelEditor(active_panel, e=1, nurbsSurfaces=True)\n pm.modelEditor(active_panel, e=1, subdivSurfaces=True)\n pm.modelEditor(active_panel, e=1,\n pluginObjects=('gpuCacheDisplayFilter', True))\n pm.modelEditor(active_panel, e=1, planes=True)\n\n # turn all hud displays off\n hud_flags = pm.headsUpDisplay(lh=1)\n for flag in hud_flags:\n pm.headsUpDisplay(flag, e=1, vis=0)\n\n # set camera options for playblast\n for camera in pm.ls(type='camera'):\n camera.setAttr('overscan', 1)\n camera.setAttr('filmFit', 1)\n camera.setAttr('displayFilmGate', 1)\n camera.setAttr('displayResolution', 0)", "def autostop():", "def vis2TopDown(points):\n if points is None or points.size == 0:\n return None\n\n i = np.array(points)\n i = i.astype(float) - VIS_RADIUS\n i[:, 0] = np.negative(i[:, 0]) # invert y axis\n return np.array([i[:, 1], i[:, 0]])", "def __init__(self):\r\n #set up pannel in centre of screen, just above the bottom of the screen.\r\n super(Pannel, self).__init__(image = Pannel.pannel,\r\n x = games.screen.width/2,\r\n y = games.screen.height -11)", "def _set_view_slice(self):\n nd = self.dims.not_displayed\n\n if self.multichannel:\n # if multichannel need to keep the final axis fixed during the\n # transpose. The index of the final axis depends on how many\n # axes are displayed.\n order = self.dims.displayed_order + (self.dims.ndisplay,)\n else:\n order = self.dims.displayed_order\n\n # Slice thumbnail\n indices = np.array(self.dims.indices)\n downsampled = indices[nd] / self.level_downsamples[-1, nd]\n downsampled = np.round(downsampled.astype(float)).astype(int)\n downsampled = np.clip(downsampled, 0, self.level_shapes[-1, nd] - 1)\n indices[nd] = downsampled\n\n image = np.asarray(self.data[-1][tuple(indices)]).transpose(order)\n\n if self.multichannel and image.dtype.kind == 'f':\n self._data_thumbnail = np.clip(image, 0, 1)\n else:\n self._data_thumbnail = image\n\n # Slice currently viewed level\n indices = np.array(self.dims.indices)\n level = self.data_level\n downsampled = indices[nd] / self.level_downsamples[level, nd]\n downsampled = np.round(downsampled.astype(float)).astype(int)\n downsampled = np.clip(downsampled, 0, self.level_shapes[level, nd] - 1)\n indices[nd] = downsampled\n\n disp_shape = self.level_shapes[level, self.dims.displayed]\n scale = np.ones(self.ndim)\n for d in self.dims.displayed:\n scale[d] = self.level_downsamples[self.data_level][d]\n self._scale = scale\n self.events.scale()\n\n if np.any(disp_shape > self._max_tile_shape):\n for d in self.dims.displayed:\n indices[d] = slice(\n self._top_left[d],\n self._top_left[d] + self._max_tile_shape,\n 1,\n )\n self.translate = self._top_left * self.scale\n else:\n self.translate = [0] * self.ndim\n\n image = np.asarray(self.data[level][tuple(indices)]).transpose(order)\n\n if self.multichannel and image.dtype.kind == 'f':\n self._data_view = np.clip(image, 0, 1)\n else:\n self._data_view = image\n\n self._update_thumbnail()\n self._update_coordinates()\n self.events.set_data()", "def reveal_top_card(self):\n if self.get_length() != 0:\n if not self.get_topmost_card().get_exposed():\n self.get_topmost_card().flip_card()", "def show_channels(chmaps, n_cols=8, normalize=None, ofpath=None):\n n_rows = (chmaps.shape[0] - 1) // n_cols + 1\n\n if n_rows == 1:\n n_cols = chmaps.shape[0]\n\n if normalize is None:\n vmin, vmax = None, None\n else:\n vmin, vmax = normalize\n\n fig = plt.figure()\n\n grid = AxesGrid(fig, 111,\n nrows_ncols=(n_rows, n_cols),\n axes_pad=0.0,\n share_all=True)\n\n for i, chmap in enumerate(chmaps):\n grid[i].imshow(chmap, vmin=vmin, vmax=vmax)\n\n grid.axes_llc.get_xaxis().set_ticks([])\n grid.axes_llc.get_yaxis().set_ticks([])\n\n if ofpath is None:\n plt.get_current_fig_manager().window.showMaximized()\n plt.show()\n else:\n fig.savefig(ofpath)\n plt.close(fig)", "def show_grid(self):\n for ax in (self.time_velocity, self.time_power, self.power_velocity):\n ax.grid(True)", "def IsVertical(self):\r\n\r\n return self.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT, AUI_DOCK_CENTER]", "def show_top_status(self):\n self.header_win.clear()\n size = self.size()\n display = self.app.config[\"display\"]\n head_parts = []\n if display[\"show_app_name\"]:\n head_parts.append(\"Suplemon Editor v\"+self.app.version)\n if display[\"show_clock\"]:\n head_parts.append(curr_time())\n if display[\"show_file_list\"]:\n head_parts.append(self.file_list_str())\n\n # Add module statuses to the status bar\n for name in self.app.modules.modules.keys():\n module = self.app.modules.modules[name]\n if module.options[\"status\"] == \"top\":\n head_parts.append(module.get_status());\n\n head = \" - \".join(head_parts)\n head = head + ( \" \" * (self.screen.getmaxyx()[1]-len(head)-1) )\n if len(head) >= size[0]:\n head = head[:size[0]-1]\n self.header_win.addstr(0,0, head, curses.color_pair(0) | curses.A_REVERSE)\n self.header_win.refresh()", "def IsVertical(self):\r\n\r\n return self.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT]", "def show(black, white):\n for x in X:\n for y in Y:\n if (x == 7) and (y == 7):\n print(\"\\033[%d;%d;%dm**\\033[0m\" % (0, 33, 41), end='')\n elif black & gobit[(x, y)]:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 31, 41), end='')\n elif white & gobit[(x, y)]:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 32, 42), end='')\n else:\n print(\" \", end='')\n print(\"\")", "def switch_frequency_plot_channel_eight(self):\n if self.plot_channel_key_booleans[7]:\n self.plot_channel_key_booleans[7] = False\n self.parent_widget.graph_channel_eight_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n else:\n self.plot_channel_key_booleans[7] = True\n self.parent_widget.graph_channel_eight_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[7]))", "def set_zlim(self, bottom=None, top=None):\n if isinstance(self._frame, root.TH1F):\n warnings.warn(\"Attempting to set z-axis limits for 2D axes\")\n return\n\n if top is None and np.iterable(bottom):\n bottom, top = bottom\n\n if bottom is None or top is None:\n old_bottom, old_top = self.get_zlim()\n if bottom is None:\n bottom = old_bottom\n if top is None:\n top = old_top\n\n if bottom == top:\n warnings.warn(\n \"Attempting to set identical bottom == top == {} z-axis limits\".format(\n bottom\n ),\n stacklevel=2,\n )\n\n if bottom > top:\n raise ValueError(\"Axis limits must be in increasing order\")\n\n if top <= 0 and self._logz:\n warnings.warn(\n \"Attempting to set non-positive top zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n top = self.get_zlim()[1]\n\n elif bottom <= 0 and self._logy:\n warnings.warn(\n \"Attempting to set non-positive bottom zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n bottom = self.get_zlim()[0]\n\n self._frame.SetMinimum(bottom)\n self._frame.SetMaximum(top)\n\n self._pad.Modified() # Draw the updated axes\n\n return (bottom, top)", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self", "def num_channels(self):\n return 3", "def display(self):\n for r in range(1, self.size+1):\n print(\"+\" + (\"-+\"*self.size))\n print(\"|\", end=\"\")\n for c in range(1, self.size+1):\n print(self.gameState[r,c], end=\"\")\n print(\"|\",end=\"\")\n print()\n print(\"+\" + (\"-+\"*self.size))", "def display(board):\n for i in range(height-1, -1, -1):\n print(' '.join(['O' if at(board, i * width + j) else '-' for j in range(1, width+1)]))\n print(\"\")", "def showDisplay(self, type=\"DEFAULT\"):\n gd = mamba.getDisplayer() # <- trick to ensure the root windows is created and hidden\n if type==\"DEFAULT\":\n # First if there is any display already opened it is showed\n no_display = True\n if self._displayUsr:\n self._displayUsr.show()\n no_display = False\n if self._displayVtk:\n self._displayVtk.show()\n no_display = False\n if self._displayPjt:\n self._displayPjt.show()\n no_display = False\n \n if no_display:\n # If no display is yet open we create one\n # preferentially using user defines display\n # or if not VTK\n if self._displayerUsr:\n self._displayUsr = self._displayerUsr(self.name)\n if self._displayUsr:\n self._displayUsr.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayUsr.updateim()\n else:\n self._displayVtk = self._displayerVtk(self.name)\n if self._displayVtk:\n self._displayVtk.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayVtk.updateim()\n \n elif type==\"USER\":\n if self._displayerUsr:\n if self._displayUsr:\n self._displayUsr.show()\n else:\n self._displayUsr = self._displayerUsr(self.name)\n if self._displayUsr:\n self._displayUsr.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayUsr.updateim()\n \n elif type==\"PROJECTION\":\n if self._displayerPjt:\n if self._displayPjt:\n self._displayPjt.show()\n else:\n self._displayPjt = self._displayerPjt(self.name)\n if self._displayPjt:\n self._displayPjt.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayPjt.updateim()\n \n elif type==\"VTK\":\n if self._displayerVtk:\n if self._displayVtk:\n self._displayVtk.show()\n else:\n self._displayVtk = self._displayerVtk(self.name)\n if self._displayVtk:\n self._displayVtk.connect(list(map(lambda im: im.mbIm, self.seq)), self.name)\n self._displayVtk.updateim()", "def showRightClickMenu(self,pos):\n\t\tprint('bStackWidget.showRightClickMenu()')\n\t\tmenu = QtWidgets.QMenu()\n\t\t#self.menu = QtWidgets.QMenu()\n\n\t\tnumChannels = self.mySimpleStack.numChannels # number of channels in stack\n\t\tmaxNumChannels = self.mySimpleStack.maxNumChannels\n\t\t#actions = ['Channel 1', 'Channel 2', 'Channel 3', 'RGB', 'Channel 1 Mask', 'Channel 2 Mask', 'Channel 3 Mask']\n\t\tprint(' showRightClickMenu() numChannels:', numChannels, 'maxNumChannels:', maxNumChannels)\n\t\tactionsList = []\n\t\tisEnabledList = []\n\t\tisCheckedList = []\n\t\t# abb oct 2020, maybe put these back in\n\t\t'''\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber}')\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(chanNumber)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == chanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber} Mask')\n\t\t\tactualChanNumber = maxNumChannels + i + 1\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(actualChanNumber)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == actualChanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber} Skel')\n\t\t\tactualChanNumber = 2 * maxNumChannels + i + 1\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(actualChanNumber)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == actualChanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\n\t\t# abb oct 2020, maybe put this back in ???\n\t\t'''\n\t\tif numChannels>1:\n\t\t\tactionsList.append('RGB')\n\t\t\tisEnabledList.append(True)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == 'rgb' # lower case !!!\n\t\t\tisCheckedList.append(isChecked)\n\t\t'''\n\n\t\tfor i, actionStr in enumerate(actionsList):\n\t\t\t# make an action\n\t\t\tcurrentAction = QtWidgets.QAction(actionStr, self, checkable=True)\n\t\t\t# decide if it is checked\n\t\t\tisEnabled = isEnabledList[i]\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == i+1\n\t\t\tisChecked = isCheckedList[i]\n\n\t\t\tcurrentAction.setEnabled(isEnabled)\n\t\t\tcurrentAction.setChecked(isChecked)\n\t\t\t# add to menu\n\t\t\tmenuAction = menu.addAction(currentAction)\n\n\t\t#\n\t\t# do again for edt\n\t\tedtIdx = 3 # (raw==0, mask==1, skel==2, edt==3)\n\t\tactionsList = []\n\t\tisEnabledList = []\n\t\tisCheckedList = []\n\t\tfor i in range(numChannels):\n\t\t\tchanNumber = i + 1\n\t\t\tactionsList.append(f'Channel {chanNumber} EDT')\n\t\t\tactualChanNumber = (maxNumChannels * edtIdx) + i + 1\n\t\t\tisEnabled = self.mySimpleStack.hasChannelLoaded(actualChanNumber)\n\t\t\tprint(' edt actualChanNumber:', actualChanNumber, 'isEnabled:', isEnabled)\n\t\t\tisEnabledList.append(isEnabled)\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == actualChanNumber\n\t\t\tisCheckedList.append(isChecked)\n\t\tfor i, actionStr in enumerate(actionsList):\n\t\t\t# make an action\n\t\t\tcurrentAction = QtWidgets.QAction(actionStr, self, checkable=True)\n\t\t\t# decide if it is checked\n\t\t\tisEnabled = isEnabledList[i]\n\t\t\tisChecked = self.getStackView().displayStateDict['displayThisStack'] == i+1\n\t\t\tisChecked = isCheckedList[i]\n\n\t\t\tcurrentAction.setEnabled(isEnabled)\n\t\t\tcurrentAction.setChecked(isChecked)\n\t\t\t# add to menu\n\t\t\tmenuAction = menu.addAction(currentAction)\n\n\t\t#\n\t\tmenu.addSeparator()\n\n\t\t#\n\t\t# view\n\t\t# abb oct 2020, maybe put these back in ???\n\t\t#actions = ['Image', 'Sliding Z', 'Nodes', 'Edges']\n\t\tactions = ['Image']\n\t\tfor actionStr in actions:\n\t\t\t# make an action\n\t\t\tcurrentAction = QtWidgets.QAction(actionStr, self, checkable=True)\n\t\t\t# decide if it is checked\n\t\t\tisChecked = False\n\t\t\tif actionStr == 'Image':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['showImage']\n\t\t\telif actionStr == 'Sliding Z':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['displaySlidingZ']\n\t\t\telif actionStr == 'Nodes':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['showNodes']\n\t\t\telif actionStr == 'Edges':\n\t\t\t\tisChecked = self.getStackView().displayStateDict['showEdges']\n\t\t\tcurrentAction.setChecked(isChecked)\n\t\t\tcurrentAction.triggered.connect(self.actionHandler)\n\t\t\t# add to menu\n\t\t\t#menuAction = self.menu.addAction(currentAction)\n\t\t\tmenuAction = menu.addAction(currentAction)\n\n\t\tmenu.addSeparator()\n\n\t\t#\n\t\t# panels\n\n\t\t'''\n\t\tannotationsAction = QtWidgets.QAction('Left Toolbar', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showLeftToolbar'])\n\t\t#annotationsAction.setShortcuts('[')\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# nodes\n\t\tannotationsAction = QtWidgets.QAction('Node List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showNodeList'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# edges\n\t\tannotationsAction = QtWidgets.QAction('Edge List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showEdgeList'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# search\n\t\tannotationsAction = QtWidgets.QAction('Search List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showSearch'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# annotations\n\t\tannotationsAction = QtWidgets.QAction('Annotation List', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showAnnotations'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# contrast\n\t\tcontrastAction = QtWidgets.QAction('Contrast Panel', self, checkable=True)\n\t\tcontrastAction.setChecked(self.options['Panels']['showContrast'])\n\t\ttmpMenuAction = menu.addAction(contrastAction)\n\t\t'''\n\n\t\t'''\n\t\t# status toolbar\n\t\tannotationsAction = QtWidgets.QAction('Status Panel', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showStatus'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t'''\n\t\t# line profile toolbar\n\t\tannotationsAction = QtWidgets.QAction('Line Profile Panel', self, checkable=True)\n\t\tannotationsAction.setChecked(self.options['Panels']['showLineProfile'])\n\t\ttmpMenuAction = menu.addAction(annotationsAction)\n\t\t'''\n\n\t\t# napari\n\t\tmenu.addSeparator()\n\t\tnapariAction = QtWidgets.QAction('Napari', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(napariAction)\n\n\t\tmenu.addSeparator()\n\t\t# make square\n\t\tmakeSquareAction = QtWidgets.QAction('Square', self, checkable=True)\n\t\tmakeSquareAction.setChecked(False)\n\t\ttmpMenuAction = menu.addAction(makeSquareAction)\n\n\t\tmenu.addSeparator()\n\n\t\t# save image\n\t\tsaveImageAction = QtWidgets.QAction('Save Image', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(saveImageAction)\n\n\t\t# save movie\n\t\tsaveMovieAction = QtWidgets.QAction('Save Movie', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(saveMovieAction)\n\n\t\t# options\n\t\t'''\n\t\tmenu.addSeparator()\n\t\toptionsAction = QtWidgets.QAction('Options', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(optionsAction)\n\t\t'''\n\n\t\t# refresh tracing\n\t\tmenu.addSeparator()\n\t\trefeshAction = QtWidgets.QAction('Refresh', self, checkable=False)\n\t\ttmpMenuAction = menu.addAction(refeshAction)\n\n\t\t#\n\t\t# edits\n\t\tself.addEditMenu(menu)\n\n\t\t#\n\t\t# get the action selection from user\n\n\t\tprint('=== bStackWidget.showRightClickMenu()')\n\t\t# was this\n\t\tuserAction = menu.exec_(self.mapToGlobal(pos))\n\t\t# now this\n\t\t'''\n\t\tself.menu.move(self.mapToGlobal(pos))\n\t\tself.menu.show()\n\t\t'''\n\n\t\t#userAction = None\n\t\tif userAction is None:\n\t\t\t# abort when no menu selected\n\t\t\treturn\n\t\tuserActionStr = userAction.text()\n\t\tprint(' showRightClickMenu() userActionStr:', userActionStr)\n\t\tsignalName = 'bSignal ' + userActionStr\n\t\tuserSelectedMenu = True\n\n\t\tdoStackRefresh = False\n\n\t\t# image\n\t\tmaxNumChannels = self.mySimpleStack.maxNumChannels\n\t\tif userActionStr == 'Channel 1':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 1\n\t\t\t#doStackRefresh = True\n\t\t\tself.optionsChange('Panels', 'displayThisStack', value=1, doEmit=True)\n\t\t\t#self.getStackView().displayStateChange('displayThisStack', value=1)\n\t\telif userActionStr == 'Channel 2':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 2\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=2)\n\t\telif userActionStr == 'Channel 3':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 3\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=3)\n\n\t\telif userActionStr == 'Channel 1 Mask':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 4\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=4)\n\t\telif userActionStr == 'Channel 2 Mask':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 4+1\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=4+1)\n\t\telif userActionStr == 'Channel 3 Mask':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 4+2\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=4+2)\n\n\t\telif userActionStr == 'Channel 1 Skel':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 7\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=7)\n\t\telif userActionStr == 'Channel 2 Skel':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 7+1\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=7+1)\n\t\telif userActionStr == 'Channel 3 Skel':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 7+2\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=7+2)\n\n\t\t# EDT\n\t\telif userActionStr == 'Channel 1 EDT':\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=10)\n\t\telif userActionStr == 'Channel 2 EDT':\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=10+1)\n\t\telif userActionStr == 'Channel 3 EDT':\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value=10+2)\n\n\n\t\telif userActionStr == 'RGB':\n\t\t\t#self.getStackView().displayStateDict['displayThisStack'] = 'rgb'\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displayThisStack', value='rgb')\n\n\t\t#\n\t\t# view of tracing\n\t\telif userActionStr == 'Image':\n\t\t\tself.getStackView().displayStateChange('showImage', toggle=True)\n\t\t\tdoStackRefresh = True\n\t\t\t#self.displayStateDict['showImage'] = not self.displayStateDict['showImage']\n\t\telif userActionStr == 'Sliding Z':\n\t\t\t#self.getStackView().displayStateDict['displaySlidingZ'] = not self.getStackView().displayStateDict['displaySlidingZ']\n\t\t\t#doStackRefresh = True\n\t\t\tself.getStackView().displayStateChange('displaySlidingZ', toggle=True)\n\t\telif userActionStr == 'Nodes':\n\t\t\t#optionsChange('Panels', 'showLeftToolbar', toggle=True, doEmit=True)\n\t\t\tself.getStackView().displayStateDict['showNodes'] = not self.getStackView().displayStateDict['showNodes']\n\t\t\tdoStackRefresh = True\n\t\telif userActionStr == 'Edges':\n\t\t\tself.getStackView().displayStateDict['showEdges'] = not self.getStackView().displayStateDict['showEdges']\n\t\t\tdoStackRefresh = True\n\n\t\t#\n\t\t# toolbars\n\t\telif userActionStr == 'Left Toolbar':\n\t\t\tself.optionsChange('Panels', 'showLeftToolbar', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showLeftToolbar'] = not self.options['Panels']['showLeftToolbar']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Contrast Panel':\n\t\t\tself.optionsChange('Panels', 'showContrast', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showContrast'] = not self.options['Panels']['showContrast']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Node List':\n\t\t\tself.optionsChange('Panels', 'showNodeList', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showNodeList'] = not self.options['Panels']['showNodeList']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Edge List':\n\t\t\tself.optionsChange('Panels', 'showEdgeList', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showEdgeList'] = not self.options['Panels']['showEdgeList']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Search List':\n\t\t\tself.optionsChange('Panels', 'showSearch', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showSearch'] = not self.options['Panels']['showSearch']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Annotation List':\n\t\t\tself.optionsChange('Panels', 'showAnnotations', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showSearch'] = not self.options['Panels']['showSearch']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Status Panel':\n\t\t\tself.optionsChange('Panels', 'showStatus', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showStatus'] = not self.options['Panels']['showStatus']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Line Profile Panel':\n\t\t\tself.optionsChange('Panels', 'showLineProfile', toggle=True, doEmit=True)\n\t\t\t#self.options['Panels']['showLineProfile'] = not self.options['Panels']['showLineProfile']\n\t\t\t#self.mainWindow.updateDisplayedWidgets()\n\t\telif userActionStr == 'Caiman':\n\t\t\tself.optionsChange('Panels', 'showCaiman', toggle=True, doEmit=True)\n\n\t\t# other\n\t\telif userActionStr == 'Options':\n\t\t\toptionsDialog = bimpy.interface.bOptionsDialog(self, self)\n\t\telif userActionStr == 'Napari':\n\t\t\tself.openNapari()\n\t\telif userActionStr == 'Square':\n\t\t\tself.myStackView2.toggleMakeSquare()\n\t\t\t#self.resizeEvent(QtGui.QResizeEvent(self.size(), QtCore.QSize()))\n\t\t\t#self.repaint()\n\t\telif userActionStr == 'Save Image':\n\t\t\tself.saveImage()\n\t\telif userActionStr == 'Save Movie':\n\t\t\tself.saveMovie()\n\t\telif userActionStr == 'Refresh':\n\t\t\tself.getStackView()._preComputeAllMasks()\n\n\t\telse:\n\t\t\tprint(' showRightClickMenu() -->> no action taken for userActionStr:', userActionStr)\n\t\t\tuserSelectedMenu = False\n\n\t\t# emit a signal\n\t\t# todo: this is emitting when self.getStackView().displayStateDict is not changing, e.g. for user action 'Contrast' and 'Annotations'\n\t\t'''\n\t\tif userSelectedMenu:\n\t\t\tself.setSlice() # update\n\t\t\tself.displayStateChangeSignal.emit(signalName, self.getStackView().displayStateDict)\n\t\t'''\n\n\t\tif doStackRefresh:\n\t\t\tself.getStackView().setSlice()\n\n\t\t#return False\n\t\t#print('right click menu return')\n\t\treturn", "def IsBottomSnappable(self):\r\n \r\n return self.HasFlag(self.optionBottomSnapped)", "def __init__(self, bottom, top, current):\n self.bottom = bottom\n self.top = top\n self.current = current", "def plot_frame(ax=None, left=None, right=None, top=None, bottom=None):\n ax = to_axis(ax)\n if top is not None:\n ax.spines['top'].set_visible(bool(top))\n if right is not None:\n ax.spines['right'].set_visible(bool(right))\n if bottom is not None:\n ax.spines['bottom'].set_visible(bool(bottom))\n if left is not None:\n ax.spines['left'].set_visible(bool(left))\n return ax", "def pre_filter_channels(self, channels=None): # pragma: no cover\n pass" ]
[ "0.5528585", "0.5517574", "0.54150355", "0.53741115", "0.5310534", "0.5296304", "0.52636003", "0.52526826", "0.5098848", "0.5056651", "0.505137", "0.50510633", "0.5041843", "0.4975196", "0.4949792", "0.4928718", "0.48774537", "0.48504677", "0.48374486", "0.48222828", "0.48000655", "0.47949645", "0.47855955", "0.47814593", "0.4754666", "0.47200808", "0.47015485", "0.4700792", "0.46734872", "0.46658313", "0.4664154", "0.46632004", "0.46627802", "0.46620393", "0.46586886", "0.46510494", "0.46432257", "0.46392757", "0.4631241", "0.45865038", "0.4583585", "0.45790976", "0.4569837", "0.45696512", "0.4566794", "0.45614165", "0.45517477", "0.45458955", "0.45422882", "0.45411006", "0.4528256", "0.45267436", "0.45139113", "0.45139113", "0.45139113", "0.45139113", "0.45139113", "0.45139113", "0.45139113", "0.45139113", "0.45139113", "0.45139113", "0.45139113", "0.4505052", "0.44999135", "0.4499777", "0.44915116", "0.44832206", "0.44791463", "0.44697174", "0.4461593", "0.44534415", "0.4449172", "0.4445417", "0.4442232", "0.4438162", "0.44370505", "0.44365513", "0.4432809", "0.44164398", "0.44050935", "0.4404586", "0.44017196", "0.439674", "0.4395919", "0.43956625", "0.43941012", "0.43891814", "0.4385636", "0.4385016", "0.43841016", "0.43841016", "0.43835834", "0.43824208", "0.43794784", "0.43762496", "0.43721768", "0.43720725", "0.4368912", "0.43686643", "0.43647403" ]
0.0
-1
Scaling of the colormap vrange.
def scaling(self): return self._scaling
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_cmap(self):\n vmax, vmin = np.max(self.values), np.min(self.values)\n self.midpoint = 1 - vmax/(vmax + abs(vmin))\n if self.midpoint > 0.5:\n self.start, self.stop = 0, 0.5 + (1-self.midpoint)\n else:\n self.start, self.stop = 0.5 - self.midpoint, 1", "def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)", "def scale(self, vmin=0.0, vmax=1.0, max_labels=10):\n return StepColormap(\n self.colors,\n index=[\n vmin + (vmax - vmin) * (x - self.vmin) * 1.0 / (self.vmax - self.vmin)\n for x in self.index\n ], # noqa\n vmin=vmin,\n vmax=vmax,\n caption=self.caption,\n max_labels=max_labels,\n )", "def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)", "def autoscale(self, A):\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)", "def scale_uv(self):\n self.u = [i * self.scale * self.scaleratio for i in self.u]\n self.v = [i * self.scale for i in self.v]", "def scale(self, value):\n\t\toldscale = self.oldmax - self.oldmin\n\t\tnewscale = self.newmax - self.newmin\n\t\treturn (newscale * (value - self.oldmin) / oldscale) + self.newmin", "def scale(self):\n return self._gev_bijector.scale", "def scale(self, vmin=0.0, vmax=1.0, max_labels=10):\n return LinearColormap(\n self.colors,\n index=[\n vmin + (vmax - vmin) * (x - self.vmin) * 1.0 / (self.vmax - self.vmin)\n for x in self.index\n ], # noqa\n vmin=vmin,\n vmax=vmax,\n caption=self.caption,\n max_labels=max_labels,\n )", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale(self):", "def select_range_and_scale(data, color_min, color_max, fac=1.0):\n scaled = ((data - color_min) * 1.0 / (color_max - color_min))\n scaled[scaled < 0.0] = 0.0\n scaled[scaled > 1.0] = 1.0\n if fac != 0.0:\n scaled = scaled * fac\n return scaled", "def scale_servos(self, value, minrange=500, maxrange=2500):\n min_servo_range = -1\n max_servo_range = 1\n return min_servo_range + (max_servo_range - min_servo_range) / (maxrange - minrange) * (value - minrange)", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def scale(self, value):\n return (float(value) - float(self.minimum)) / \\\n float(self.maximum - self.minimum) * 2.0 - 1.0", "def setSurfaceColorScale(low,high):\n dislin.zscale(low,high)", "def colorscale(self):\n return self['colorscale']", "def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin", "def scale(x, feature_range=(-1,1)):\r\n x = x * 2 - 1\r\n return x", "def colorscale(self):\n return self[\"colorscale\"]", "def _min_max_scale(arr, new_range=(0, 255)):\n # get array's current min and max\n mn = arr.min()\n mx = arr.max()\n\n # check if scaling needs to be done to be in new_range\n if mn < new_range[0] or mx > new_range[1]:\n # perform min-max scaling\n scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]\n else:\n # return array if already in range\n scaled = arr\n\n return scaled", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def rescale(self, xmin, xmax):\n\n # Normalise\n self.normalise()\n\n \n # Rescale\n range = xmax-xmin\n for seg in self.segments:\n seg.lower_bound = seg.lower_bound*range + xmin\n seg.upper_bound = seg.upper_bound*range + xmin", "def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return", "def scale(input):\n return (input - np.min(input)) / ((np.max(input) - np.min(input)))", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def set_lim(values, scale):\n\n v_min, v_max = min(values), max(values)\n margin = (v_max - v_min) * scale\n v_min, v_max = v_min - margin, v_max + margin\n\n return v_min, v_max", "def setPlotScaling(x,y):\n dislin.trfscl(x,y)", "def box_scale(k, m, s_min=0.1, s_max=0.9):\n\n # equation 4 from paper\n return s_min + (s_max - s_min) * (k - 1) / (m - 1)", "def minmax_scale(X, feature_range=..., *, axis=..., copy=...):\n ...", "def scale(self):\n return self.distribution.scale", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def __scale_constraint(c, v):\n if c.equality:\n c.set_value((c.lower * v, c.body * v))\n else:\n c.set_value(\n (__none_left_mult(c.lower, v), c.body * v, __none_left_mult(c.upper, v))\n )", "def _rescale(x, xlim, ylim):\n m = (ylim[1] - ylim[0]) / (xlim[1] - xlim[0])\n c = ylim[1] - m * xlim[1]\n y = m * x + c\n return y", "def scaling(heatmap):\n N = len(heatmap)\n inds = range(1, int(0.9 * N))\n values = [np.mean(np.diagonal(heatmap, i)) for i in inds]\n return [10 * i for i in inds], values", "def scaling(self):\n return self.__scaling", "def rolloff_scale(self):\n return self._rolloffscale", "def __init__(self, vmin, vmax, cmap=\"Blues\", kind=\"hex\"):\n assert vmin < vmax, \"vmin must be smaller than vmax\"\n assert kind.lower() in [\"hex\", \"rgba\"], 'kind must be one of [\"hex\", \"rgba\"]'\n self.vmin = vmin\n self.vmax = vmax\n self.cmap = cmap\n self.kind = kind\n\n # Normalize range of vals\n norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=True)\n self.mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)\n # def create_continuous_colormapper(vmin, vmax, cmap=\"Greys_r\"):", "def scale(self):\n return self._scale", "def scale_range(data, minTo, maxTo):\n minFrom = np.min(data)\n maxFrom = np.max(data)\n \n scaled_data = []\n \n for point in data:\n new_point = minTo + (maxTo - minTo) * ((point - minFrom)/(maxFrom - minFrom))\n scaled_data.append(new_point)\n \n return scaled_data", "def scale(self, s):\n for n in range(len(self.mV)):\n self.mV[n] *= s\n return self", "def rescale(num, old_min, old_max, new_min, new_max):\n old_range = old_max - old_min\n new_range = new_max - new_min\n new_val = new_min + (((num - old_min) * new_range)/old_range)\n\n return new_val", "def unit_scale(x, eps=1e-8):\n\tx = x.copy()\n\tx -= x.min()\n\tx *= 1.0 / (x.max() + eps)\n\treturn x", "def scale(self,s):\n return Vector(self.x * s, self.y * s, self.z * s)", "def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)", "def scale_vector(vector, scale):\n return vector[0] * scale, vector[1] * scale, vector[2] * scale", "def autoAxisScaling(vlist, axes='XYZ'):\n vlist = list(vlist[:])\n l,h = min(vlist), max(vlist)\n low = l - (h-l)/50.0\n high = h + (h-l)/50.0\n vlist.extend([low,high])\n dislin.setscl(vlist, len(vlist), axes)", "def setScaling(factor=1.0):\n dislin.sclfac(factor)", "def scale_range(x, input_range, target_range):\n\n range = [np.amin(x), np.amax(x)]\n x_std = (x - input_range[0]) / (1.0*(input_range[1] - input_range[0]))\n x_scaled = x_std * (1.0*(target_range[1] - target_range[0])) + target_range[0]\n return x_scaled, range", "def scale_uniform(self, s: float):\n self.vertices = [v * s for v in self.vertices]\n return self", "def scale(self, from_min, from_max, to_min, to_max):\n for i in range(len(self.poses)):\n self.poses[i].position.scale(from_min[:3], from_max[:3], to_min[:3], to_max[:3])\n self.wrenches[i].scale(from_min[3:], from_max[3:], to_min[3:], to_max[3:])", "def scale(self):\n return self._a", "def scale_value(value, ip_range, domain=(0,1)):\n x1, x2 = domain\n y1, y2 = ip_range\n\n assert(y1 <= value <= y2)\n\n m = (x2 - x1)/(y2 - y1)\n b = y1 - m * x1\n return m * value - b", "def scale(self, up):\n s = 1.1 if up else 0.9\n self.scaling_matrix = np.dot(\n self.scaling_matrix,\n F.scaling([s, s, s])\n )\n\n self.aabb.scale(s)", "def scale(x_range=1, y_range=1):\r\n x = rand_val(x_range)\r\n y = rand_val(y_range)\r\n return np.array(((x, 0, 0),\r\n (0, y, 0),\r\n (0, 0, 1)), dtype=np.float)", "def scale_invert(self):", "def scale(x, minimum, maximum):\n return (x - minimum) / (maximum - minimum)", "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()", "def plot_edisp_scale_map(self,vmin=0.5, vmax=1.5):\n\n scale_map = self.get_edisp_scale_map()\n \n pyplot.title(\"Energy dispersion scale plot\")\n pyplot.semilogx()\n pyplot.semilogy()\n\n pyplot.xlabel('MIGRA')\n pyplot.ylabel('MIGRA scaled')\n\n pyplot.step(self._edisp['M'],self._edisp['M'], color='C7', linestyle='--', linewidth=2, where='mid', label='Before')\n pyplot.step(self._edisp['M'],self._edisp['M_new'], color='midnightblue', linewidth=2, where='mid', label='After')\n\n pyplot.legend(loc=4 , frameon=False)", "def rescaled(M,newmin,newmax):\n mmin,mmax = M.min(),M.max()\n M2 = M.copy()\n M2 -= mmin\n M2 *= (newmax-newmin) / (mmax-mmin)\n M2 += newmin\n return M2", "def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return", "def set_mapping(self, value_min, value_min_raw, value_max, value_max_raw):\n assert value_min <= value_max\n # prevent division by zero.\n if value_min == value_max:\n value_max += 1.\n if value_min_raw == value_max_raw:\n value_max_raw += 1.\n self.value_min = value_min\n self.value_max = value_max\n self.value_min_raw = value_min_raw\n self.value_max_raw = value_max_raw\n self._value_scale = (self.value_max - self.value_min) / (self.value_max_raw - self.value_min_raw)", "def _getColormapRange(self):\n item = self.item()\n if item is not None and self._colormap is not None:\n return self._colormap.getColormapRange(item)\n else:\n return 1, 100 # Fallback", "def rescale(range1, range2):\n min1, max1, min2, max2 = min(range1), max(range1), min(range2), max(range2)\n def resize(value):\n return (((value - min1) * (max2 - min2)) / (max1 - min1)) + min2\n return resize", "def scale_img(img, vmin=None, vmax=None, scaling='arcsinh'):\n\n\tepsilon = 1.e-3\n\t# setting vmin and vmax\n\tif vmin is None:\n\t\tvmin = np.min(img)\n\tif vmax is None:\t\t\n\t\tvmax = np.max(img)\n\n\t# scaling\n\tif scaling == 'linear':\n\t\tpass\n\telif scaling == 'arcsinh':\n\t\timg = np.arcsinh(img)\n\t\tvmin = np.arcsinh(vmin)\n\t\tvmax = np.arcsinh(vmax)\n\telse:\n\t\traise ValueError(\"[visualtools] scaling not recognized\")\n\n\t# clipping\n\timage_scaled = np.clip((img-vmin) / (vmax-vmin), 0., 1.- epsilon).astype('f2')\n\timage_scaled_flip = image_scaled[::-1, :]\n\n\treturn image_scaled_flip", "def setColorBarRange(start=1,end=254):\n dislin.colran(start,end)", "def scaleValues(values):\n\n values = values - values.min()\n return values/values.max()", "def _scale_param(self, resid_us):\n return((resid_us**2).sum().sum() / self.dof)", "def create_scale(minfreq, maxfreq, f0, fs, NumVoices):\n a0 = 2**(1./NumVoices)\n minscale = 1.*f0/(maxfreq/(1.*fs))\n maxscale = 1.*f0/(minfreq/(1.*fs))\n minscale = numpy.floor(NumVoices*numpy.log2(minscale))\n maxscale = numpy.ceil(NumVoices*numpy.log2(maxscale))\n scales = a0**numpy.arange(minscale,maxscale+1)/(1.*fs)\n return scales", "def GetScale(self):\n ...", "def colourscale(plotdata):\n M = np.nanmax(plotdata)\n m = np.nanmin(plotdata)\n if M >= abs(m):\n ctrs1 = np.arange(-M, 0, .1*M)\n ctrs2 = np.arange(0.1*M, 1.09*M, .1*M)\n ctrs = np.concatenate((ctrs1, ctrs2))\n caxismin = -M\n caxismax = M\n else:\n m = -m\n ctrs1 = np.arange(-m, 0, .1*m)\n ctrs2 = np.arange(0.1*m, 1.09*m, .1*m)\n ctrs = np.concatenate((ctrs1, ctrs2))\n caxismin = -m\n caxismax = m\n # function will not work if there exist no positive max or negative min\n return caxismin, caxismax, ctrs", "def scaled_vcov(self):\n return (self.rss()[np.newaxis, np.newaxis, :]\n * 1. / self._rdf * self._vcov[:, :, np.newaxis])", "def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()", "def v_multiplier(self):\n return (4./3)*np.pi*(self.bins[:, 1]/2)**3", "def scale_image(image, new_range):\n min_val = np.min(image).astype(np.float32)\n max_val = np.max(image).astype(np.float32)\n min_val_new = np.array(min(new_range), dtype=np.float32)\n max_val_new = np.array(max(new_range), dtype=np.float32)\n scaled_image = (image - min_val) / (max_val - min_val) * (max_val_new - min_val_new) + min_val_new\n return scaled_image.astype(np.uint8)", "def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res", "def verticalScale(self):\n self.model.refreshScreen()", "def gauss_hermite_scale_limit(self):\n return self._gauss_hermite_scale_limit", "def __init__(self, colmaps, min_value, max_value):\n \n self.colmaps = colmaps\n self.anz_seg = len(self.colmaps)\n \n self.xmin = []\n self.xmax = []\n self.colmap = []\n \n # min_value being smaller than the smallest min value\n # of a segment is not allowed (same for max_value)\n if min_value < self.colmaps[0][0]:\n min_value = self.colmaps[0][0]\n \n if max_value > self.colmaps[self.anz_seg-1][1]:\n max_value = self.colmaps[self.anz_seg-1][1]\n \n # scale segment borders to interval [0,1]\n for i in xrange(self.anz_seg):\n x = colmaps[i][0]\n self.xmin.append((x-min_value)/(max_value-min_value))\n \n x = colmaps[i][1]\n self.xmax.append((x-min_value)/(max_value-min_value))\n \n self.colmap.append(colmaps[i][2])\n \n print self.xmin, self.xmax", "def normalization(x, x_min=-5.12, x_max=5.12):\n for i in range(len(x.vect)):\n x.vect[i] = x_min + x.vect[i]*(x_max-x_min)\n return x", "def __mul__(self, scale):\n return Vec(self.x * scale, self.y * scale)", "def saturate_scalar_minmax(value, max_value, min_value):\n mean = (max_value + min_value)/2.0\n half_range = (max_value - min_value)/2.0\n return saturate_vector_dg(value-mean, half_range) + mean", "def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):\n features = np.array(data.features, dtype=float)\n for i in process_cols_list:\n value = features[i]\n if value > max_value_list[i]:\n value = max_value_list[i]\n elif value < min_value_list[i]:\n value = min_value_list[i]\n\n features[i] = (value - min_value_list[i]) / scale_value_list[i]\n _data = copy.deepcopy(data)\n _data.features = features\n return _data", "def pixel_scale(self):\n return np.abs(float(self.header[\"CDELT1\"]))", "def scaling(self):\n return self.stacked._box_scaling[1]", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "def scaleClipl(x):\n x = 0 if x < 0 else x\n x = 1 if x > 1 else x\n return int(round(x*255.))", "def scale(self, smin=0.6, smax=2.0, N=20):\n scaled_geoms = []\n for s in np.linspace(smin, smax, N):\n scaled_atomlist = []\n for (Zi,[xi,yi,zi]) in self.atomlist0:\n scaled_atomlist.append( (Zi, (xi*s,yi*s,zi*s)) )\n scaled_geoms.append(scaled_atomlist)\n return scaled_geoms", "def scale(x, feature_range=(-1, 1)):\n \n # scale from 0-1 to feature_range\n min, max = feature_range\n #x = x * (max - min) + min\n #x = torch.add(torch.mul(x, (max-min)), min)\n x = x.mul(max-min).add_(min)\n return x", "def _filter_scaling(reduction_indices, start_cell_num):\n filter_scaling = 1.0\n for ind in reduction_indices:\n if ind < start_cell_num:\n filter_scaling *= 2.0\n return filter_scaling", "def getValues(self):\n return [self.scale_min, self.scale_max]", "def scale(c, scalar):\n return [c[0]*scalar, c[1]*scalar]", "def plot_psf_scale_map(self, vmin=-0.5, vmax=0.5):\n\n scale_map = self.get_psf_scale_map()\n\n pyplot.title(\"PSF $\\sigma_1$ scale map\")\n pyplot.semilogx()\n\n pyplot.xlabel('Energy, TeV')\n pyplot.ylabel('Off-center angle, deg')\n pyplot.pcolormesh(scale_map['E_edges'], scale_map['Theta_edges'], scale_map['sigma_1'].transpose(),\n cmap='seismic', vmin=vmin, vmax=vmax)\n pyplot.colorbar()", "def scale(c,v,p):\n scaleval = min([coeff.valuation(p) for coeff in c.coefficients()])\n if scaleval > 0:\n c = c/(p**scaleval)\n v = v - scaleval\n if v <= 0:\n flag = False\n else:\n flag = True\n return [flag,c,v]", "def rescale(tx):\n mins = np.amin(tx, axis=0)\n maxs = np.amax(tx, axis=0)\n txscale = (tx - mins) / (maxs - mins)\n return txscale", "def scale_between(minval, maxval, numStops):\n\n scale = []\n\n if numStops < 2:\n return [minval, maxval]\n elif maxval < minval:\n raise ValueError()\n else:\n domain = maxval - minval\n interval = float(domain) / float(numStops)\n for i in range(numStops):\n scale.append(round(minval + interval * i, 2))\n return scale", "def plate_scale(self):\n\n return 206265 * uu.arcsec / (self.diameter.to('mm') * self.f)", "def _scale_array(arr, clip=True):\n if clip:\n scaled = np.clip(arr, 0, 255)\n else:\n scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))\n scaled = _min_max_scale(arr, new_range=scale_range)\n\n return scaled", "def scale(s: (float, int), v: Vector) -> Vector:\n coords = list()\n res = Vector(coords)\n for i in range(len(v.coords)):\n res.coords[i] *= s\n return res" ]
[ "0.6941317", "0.6658408", "0.6627601", "0.6596878", "0.6527981", "0.6513227", "0.6493253", "0.6488291", "0.6472731", "0.6470762", "0.6453213", "0.6415893", "0.641051", "0.6392674", "0.6256625", "0.6209371", "0.6175291", "0.61744946", "0.6136316", "0.6118481", "0.61143214", "0.6085234", "0.6057745", "0.6045163", "0.60153556", "0.6012328", "0.6007984", "0.5982346", "0.59619623", "0.59578556", "0.5953181", "0.5934822", "0.5932159", "0.59253186", "0.58886194", "0.58870465", "0.5873029", "0.5831087", "0.58276176", "0.58267885", "0.5817727", "0.5810679", "0.58006614", "0.57996404", "0.57877254", "0.57816935", "0.5780197", "0.5778268", "0.5762371", "0.5752751", "0.5747385", "0.57434565", "0.57326126", "0.57275236", "0.5714739", "0.5707315", "0.5707123", "0.5706477", "0.5700354", "0.5695543", "0.5693857", "0.56914175", "0.5690192", "0.56900716", "0.56874824", "0.56761736", "0.56756926", "0.56736207", "0.56557584", "0.5633537", "0.5605718", "0.559838", "0.5590152", "0.55726516", "0.5569689", "0.5565299", "0.55593365", "0.5543845", "0.5521721", "0.552042", "0.55190223", "0.55129266", "0.5512648", "0.5512155", "0.55109394", "0.55057573", "0.549275", "0.548782", "0.5487634", "0.5475444", "0.5474621", "0.5465417", "0.5459275", "0.5458842", "0.54553646", "0.54409117", "0.54390264", "0.543836", "0.5436019" ]
0.5751985
51
Create reviews folder if it does not exist
def dirChecking(dir): if not os.path.exists(dir): os.mkdir(dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_folder_if_not_exist(filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)", "def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(markovDir)\r\n except FileExistsError:\r\n pass", "def create_folder_if_needed(path):\n if os.path.exists(path):\n print(\"{} dir exists\".format(path))\n else:\n print(\"{} dir does not exist. Creating dir.\".format(path))\n os.mkdir(path)", "def _create_folders(self):\n if not os.path.exists(os.path.join(BASE_DIR, DIR)):\n os.mkdir(os.path.join(BASE_DIR, DIR))\n directory = os.path.join(BASE_DIR, DIR, self.title)\n if not os.path.exists(directory):\n os.mkdir(directory)\n return directory", "def create_folder(self):\n path = os.path.expanduser('~') + \"/.ip_enrich/\"\n # Does it exist already?\n if os.path.isdir(path):\n return True\n try:\n os.mkdir(path)\n return True\n except Exception as e:\n print (f\"Creation of the directory {path} failed\")\n print (f\"Error {e}\")\n return False", "def create_test_folder_if_does_not_exist(path):\n print('')\n if os.path.exists(path):\n print(' Skip creation of existing folder: {}'.format(path))\n else:\n print(' Create non-existing test folder: {}'.format(path))\n os.makedirs(path, mode=0o775)", "def mkdir_if_not_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def _check_dirs(self):\r\n for dir in [self.papers_dir,\r\n self.buffer_dir]:\r\n if not os.path.exists(dir):\r\n message = f'Dir not exists: {dir}. Making it.'\r\n logging.warning(message)\r\n os.mkdir(dir)", "def verifrep( folder ):\n try:\n #print(\"verifrep check if directory: \" + folder + \" exists\")\n if not os.path.exists( folder ):\n print( \"verifrep Impossible to find the directory - trying to create the directory: \" + folder )\n os.makedirs( folder )\n except Exception, e:\n print( \"Exception while creating folder \" + folder )\n print( str( e ) )", "def mkdir_if_missing(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def create_directories(self, app_label):\n for folder_name in [\"views\", \"urls\", \"templates/%s\" % app_label]:\n directory_path = \"%s/%s\" % (app_label, folder_name)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)", "def create_folders(folder_name):\n\n if os.path.exists(downloads_path + '\\\\' + folder_name):\n pass\n else:\n os.makedirs(folder_name)\n print(f'Folder: {folder_name} has been created in {downloads_path}')", "def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def _mkdir_if_not_exist(path):\n if not(os.path.isdir(path)):\n os.mkdir(path)\n else:\n _logger.info('Skipping existing directory %s' % path)", "def mkdir_if_notexists(path):\n try:\n os.mkdir(path)\n except FileExistsError:\n pass", "def prepare_folders():\n folder_list = [\"./data\", \"./data/stage\", \"./data/spoken\", \"./data/stage_lemmas\", \"./data/spoken_lemmas\"]\n for folder in folder_list:\n if not os.path.exists(folder):\n os.mkdir(folder)\n print(f\"Created folder {folder}\")\n else:\n print(f\"Folder {folder} already existed\")", "def _check_or_create_dir(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_duplicates_directory(self) -> None:\n dups_path = os.path.join(self.get_directory(), \"duplicates\")\n if not self.directory_exists(dups_path): os.mkdir(dups_path)", "def mkdir(path):", "def mkdir(folder_name: str) -> None:\n if exist(folder_name):\n print(\"The folder is already exist\")\n return \n\n os.mkdir(folder_name)", "def _mkdir_p(path):\n if not osp.exists(path):\n os.makedirs(path)", "def mkdir_suppress_err(self, path):\r\n if not self.exists(path):\r\n return self.mkdir(path)", "def create_folders(self):\n\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def check_make(folder_check):\n if not os.path.isdir(folder_check):\n os.mkdir(folder_check)", "def createdatafolder(name):\n folder = os.path.join(pathtofolder(),name)\n os.makedirs(folder)\n pass", "def check_folder(filepath):\n if not os.path.exists(filepath):\n os.mkdir(filepath)\n return filepath", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def create_directory():\n global dirName\n dirName = 'Downloaded Files'\n global folder_path\n if os.path.isdir(dirName) == True:\n print(\"This folder already exists, path:\", os.path.abspath(dirName))\n else:\n os.mkdir(dirName)\n global folder_path\n folder_path = os.path.abspath(dirName)\n print(\"Directory \" , dirName , \" Created \")", "def create_folder(path):\n if not exists(path):\n os.makedirs(path)", "def create_project_folder(self):\n\t\tif not os.path.exists(self.segment_path):\n\t\t\tfileutil.makedirs(self.segment_path)", "def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)", "def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir_if_not_exist(path): #@save\n if not isinstance(path, str):\n path = os.path.join(*path)\n if not os.path.exists(path):\n os.makedirs(path)", "def create_cache_dir(self) -> None:\n try:\n os.makedirs(self.cache_folder)\n except FileExistsError:\n pass", "def create_folder(self):\n Path(self.root_name).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/\").mkdir(parents=True, exist_ok=True)\n Path(self.image_folder_path).mkdir(parents=True, exist_ok=True)\n Path(self.annot_path).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/ImageSets/\").mkdir(parents=True, exist_ok=True)\n Path(self.txt_path).mkdir(parents=True, exist_ok=True)", "def _check_path(path):\n os.system(\"if [ ! -d \" + path + \" ]; then mkdir -p \" + path + \"; fi\")", "def create_folder(self):\n self.config.csv_path.mkdir(parents=True, exist_ok=True)\n self.config.images_path.mkdir(parents=True, exist_ok=True)", "def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")", "def _check_directory(my_folder):\n if not os.path.exists(my_folder):\n os.makedirs(my_folder)", "def create_data_folders() -> None:\n if not os.path.exists(\"data/save\"):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/save\")\n if not os.path.exists(\"data/critics\"):\n os.mkdir(\"./data/critics\")\n if not os.path.exists('data/policies/'):\n os.mkdir('data/policies/')\n if not os.path.exists('data/results/'):\n os.mkdir('data/results/')", "def mkdir(path):\n if not os.path.exists(path):\n os.mkdir(path)", "def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass", "def mkdir ():\n name = \"-\".join(parser_arguments().classes)\n if not os.path.exists(name):\n os.mkdir(name)\n print('The repository {} have been created'.format(parser_arguments().classes))\n else:\n print('The repository {} already exists.'.format(parser_arguments().classes))\n pass", "def create_folder(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)", "def create_app_folders(self):\n\t\tif not os.path.exists(self.TEMP_FOLDER):\n\t\t\tos.makedirs(self.TEMP_FOLDER)\n\t\tif not os.path.exists(self.SAVE_FOLDER):\n\t\t\tos.makedirs(self.SAVE_FOLDER)", "def createFolder(self):\n raise NotImplementedError", "def prep_folder(args):\n if(args.save_folder[-1]!='/'):\n args.save_folder += '/'\n if(not os.path.isdir(args.save_folder)):\n os.mkdir(args.save_folder)", "def createBaseFolder(self):\n if not os.path.isdir(self.gdocs_folder):\n os.mkdir(self.gdocs_folder, 0755)", "def create_folder(self, unformatted_path):\n os.makedirs(self.format_path(unformatted_path), exist_ok=True)", "def folder_guard(folder_path):\n if not os.path.isdir(folder_path):\n print('INFO:folder_guard(): Creating folder: ' + folder_path + '...')\n os.mkdir(folder_path)", "def ensure_dirs(cls, folder_path):\n try:\n cls.mkdirs(folder_path)\n except exceptions.PlotlyRequestError as e:\n if \"already exists\" in e.message:\n pass\n else:\n raise e", "def make_dir_if_needed(path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def mkdir_p(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)", "def create_folder():\n directory = \"data/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n logging.info(\"Data folder created.\")\n else:\n logging.info(\"Data folder already existed.\")", "def mkdir_if_missing(dirname):\n if not osp.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise", "def CreateFolderIfNotExisting(folder_path, communicator):\n if not os.path.isdir(folder_path) and communicator.MyPID() == 0:\n os.makedirs(folder_path)\n communicator.Barrier()", "def create_folder(location: str):\n try:\n os.mkdir(location)\n except FileExistsError:\n pass", "def mkdir(path):\n try:\n os.mkdir(path)\n except FileExistsError:\n pass", "def mkdir(self,pathname):\n if os.path.exists(pathname)==False:\t\t\n os.mkdir(pathname)", "def safeCreateDir(relPath):\n if not os.path.isdir(relPath):\n os.mkdir(relPath)", "def make_dir_if_needed(dir) :\n\tif not exists(dir) :\n\t\tos.makedirs(dir)", "def create_folder(folder_name):\n\n try:\n os.makedirs(folder_name)\n except FileExistsError:\n pass", "def maybe_makedirs(path_to_create):\n try: \n os.makedirs(path_to_create)\n except OSError:\n if not os.path.isdir(path_to_create):\n raise", "def _ensure_directory(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def create_folder(self):\n cur_dir=os.getcwd()\n unique=False\n dirlist= [item for item in os.listdir(cur_dir) if os.path.isdir(os.path.join(cur_dir,item))]\n folder_name='taxonomy_{}_{}'.format(self.place,self.year)\n j=1\n while not unique:\n if folder_name in dirlist:\n folder_name='taxonomy_{}_{}({})'.format(self.place,self.year,str(j))\n j+=1\n else:\n unique=True\n new_folder=os.path.join(cur_dir,folder_name)\n os.mkdir(new_folder)\n os.chdir(new_folder)\n return folder_name", "def create_directory(resources_dir):\n for f in os.listdir(os.path.join(resources_dir, \"docs_for_ner\")):\n fpath = os.path.join(resources_dir, \"docs_for_ner\", f)\n if os.path.isfile(fpath):\n os.unlink(fpath)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_folder(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def create_folder(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def create_directory(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def _create_target_directories(self):\n if os.path.exists(self.PREPROCESSED_DATA_OUT_DIR):\n if self._hparams.over_write:\n print_info(\"Deleting data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n shutil.rmtree(self.PREPROCESSED_DATA_OUT_DIR)\n print_info(\"Recreating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)\n else:\n print_info(\"Skipping preprocessing step, since the data might already be available\")\n else:\n print_info(\"Creating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)", "def createFolder(self):\n\n self.directory = \"D:\\\\CompositionHelper\"\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n print ('Created new folder')", "def create_tree(file, rep):\n try:\n if file is not None:\n rep = rep + '/' + file[0:4] + '/' + file[4:6] + '/' + file[6:8]\n if not exists(rep):\n makedirs(rep)\n move(file, rep)\n else:\n if not exists(rep + '/' + file):\n move(file, rep)\n else:\n print('Already exists!')\n except OSError:\n print('Argh! I could not create the directory!')", "def mymkdir(*folders):\n for folder in folders:\n if not os.path.exists(folder):\n os.mkdir(folder)", "def create_folder(name_folder: str):\n try:\n # Create a new direcctory\n os.mkdir(name_folder)\n except FileExistsError:\n # If the direcctory already exits print.\n print(f\"The directory {name_folder} already exists.\")", "def _create_folder(file_path):\r\n file_base = os.path.dirname(file_path)\r\n if not os.path.exists(file_base):\r\n try:\r\n os.makedirs(file_base)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise", "def assure_path_exists(self, path):\n\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)", "def prepare_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def create_files(save_dir, vid_name):\n file_name = vid_name.split('/')[-1].split('.')[0]\n if not os.path.isdir(os.path.join(save_dir, file_name)):\n os.makedirs(os.path.join(save_dir, file_name))\n return file_name", "def mkdir(dirname):\n try:\n os.mkdir(dirname)\n except Exception:\n pass", "def create_dir(cls, relpath):\r\n safe_mkdir(os.path.join(cls.build_root, relpath))", "def prerun(timestamp):\r\n if not os.path.isdir('log'):\r\n os.makedirs('log')\r\n if not os.path.isdir('collected'):\r\n os.makedirs('collected')\r\n if not os.path.isdir('done'):\r\n os.makedirs('done')\r\n time_stamped_folder = os.path.join('collected', timestamp)\r\n if not os.path.isdir(time_stamped_folder):\r\n os.makedirs(time_stamped_folder)\r\n return time_stamped_folder", "def create_folder(path: str):\n if not os.path.exists(path):\n os.makedirs(path)", "def folder_guard(folder_path):\n \n if not os.path.isdir(folder_path):\n print('INFO:folder_guard(): Creating folder: ' + folder_path + '...')\n os.mkdir(folder_path)", "def check_path(dir_path):\n if not os.path.exists(dir_path):\n os.mkdir(dir_path, 0755)", "def make_folders(self):\n\t\tfor name in self.folders:\n\t\t\tos.makedirs(self.path+\"/\"+name,exist_ok=True)", "def check_file(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def checkpoint_directory(checkpoint):\n if os.path.isdir(checkpoint):\n pass\n else:\n os.mkdir(checkpoint)", "def maybe_makedirs(path_to_create):\n try:\n os.makedirs(path_to_create)\n except OSError:\n if not os.path.isdir(path_to_create):\n raise", "def mkdirquiet(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def create_folder(path):\n try:\n os.listdir(path)\n except:\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n return path", "def reCreateDir(self, name):\n path = self.savePathJoin(name)\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)" ]
[ "0.6261388", "0.6114282", "0.6103237", "0.6101051", "0.6050629", "0.6020232", "0.60049367", "0.5988745", "0.59877414", "0.5978589", "0.5952596", "0.5923892", "0.5916263", "0.58593273", "0.585862", "0.58409756", "0.5830644", "0.5822491", "0.5775935", "0.5775935", "0.5774649", "0.57693034", "0.5764537", "0.5739245", "0.57382894", "0.5699803", "0.568526", "0.56841344", "0.5681589", "0.56747985", "0.56626904", "0.56622815", "0.56610376", "0.5656134", "0.5656007", "0.56523234", "0.5646886", "0.56293935", "0.5628355", "0.56276155", "0.5623748", "0.5609713", "0.55940914", "0.5587232", "0.5586169", "0.55801445", "0.5570821", "0.5558584", "0.55550414", "0.5550155", "0.55402607", "0.5536361", "0.5532458", "0.5526422", "0.5524005", "0.55197793", "0.551421", "0.5504456", "0.5501479", "0.5499942", "0.5493892", "0.54929084", "0.5491745", "0.5491496", "0.54885733", "0.54712266", "0.5470946", "0.5468697", "0.5466689", "0.5462355", "0.5457878", "0.5457878", "0.5457878", "0.5457878", "0.54482925", "0.54482925", "0.54404706", "0.543993", "0.5439072", "0.5436427", "0.54340273", "0.54322976", "0.54280627", "0.54268765", "0.5417008", "0.5416244", "0.541556", "0.5415311", "0.54113245", "0.5410356", "0.54081666", "0.5402504", "0.5395991", "0.5395729", "0.53925943", "0.53801817", "0.53797084", "0.53783345", "0.53783035", "0.53776056" ]
0.5550531
49
Convert the illegal name for freebase
def convertName(name): name = re.sub(r'\$', 's', name) return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _convert_name(self, name):\n if not self.re_name.match(name):\n org_name = name\n name = self.re_white.sub('_', name)\n name = self.re_alpha.sub('_', name)\n if not self.re_name.match(name):\n name = 'x_' + name2\n self.warn('Converting name <' + org_name + '> to <' + name + '>.')\n return name", "def __init__(self, base):\n if isinstance(base, str):\n self._name = base\n else:\n raise TypeError(NAME_CREATE_ERROR)", "def unbound(name):", "def validate_freezable_name(name):\n\n if re.search(\"[./:]+\", name) is None:\n return name\n else:\n raise FreezableNameInvalidError(f'Invalid Freezable Name: \"{name}\"')", "def create_internal_elb_dns_name ( base_name, name ) :\n return 'lb.' + create_dns_name( base_name, name )", "def _adjust_gs_swift_bug(self, name):\n if name:\n return name.replace(\"/\", \"\")\n else:\n return name", "def _bcl_scrub_name(name):\n return re.sub('[^0-9a-zA-Z\\-\\_]+', '_', name)", "def mangle_name(name):\n import re\n try:\n return re.sub('_+','_',re.sub('[^\\w_]','_',name).lower()).rstrip('_')\n except TypeError:\n raise TypeError(\n 'Trying to mangle name with invalid type of: ' + str(type(name)))", "def create_internal_dns_name ( base_name, name ) :\n name = name + '.internal'\n return create_dns_name( base_name, name )", "def fix_name(self):\n self._name_fixed = True", "def get_ig_name ( base_name ) :\n return base_name + '-GW'", "def _revert_encoded_reg_name(self, vdef):\n if vdef.find(\"%\") != -1:\n for (o_reg, re_reg) in self.arch.reg_rename_tbl.items():\n vdef = vdef.replace(re_reg, o_reg)\n return vdef", "def _NiceNameToPreventCompilerErrors(self, attrname):\n # only emit the rhs of a multi part name e.g. undo.UndoItem will appear only as UndoItem\n if attrname.find(\".\") != -1:\n attrname = attrname.split(\".\")[-1] # take the last\n # Prevent compiler errors on the java side by avoiding the generating of java keywords as attribute names\n if attrname in javakeywords:\n attrname = \"_\" + attrname\n return attrname", "def _check_name(self):\n\t\tpass", "def make_python_name(self, name):\n # FIXME see cindex.SpellingCache\n for k, v in [('<', '_'), ('>', '_'), ('::', '__'), (',', ''), (' ', ''),\n (\"$\", \"DOLLAR\"), (\".\", \"DOT\"), (\"@\", \"_\"), (\":\", \"_\"),\n ('-', '_')]:\n if k in name: # template\n name = name.replace(k, v)\n # FIXME: test case ? I want this func to be neutral on C valid\n # names.\n if name.startswith(\"__\"):\n return \"_X\" + name\n if len(name) == 0:\n pass\n elif name[0] in \"01234567879\":\n return \"_\" + name\n return name", "def _fix_up(self, cls, code_name):", "def sanitize_module_name(module_name):\n module_name = module_name.replace('-', '_').replace('.', '_')\n if module_name[0] not in string.ascii_letters:\n module_name = \"a\" + module_name\n return module_name", "def fail_new_brass(name):\n return 'Doublon, la brasserie : %s' %name + ' existe deja'", "def resolve_name(obj, _):\n return obj.name.decode()", "def adjust_name_for_printing(name):\n if name is not None:\n name2 = name\n name = name.replace(\" \", \"_\").replace(\".\", \"_\").replace(\"-\", \"_m_\")\n name = name.replace(\"+\", \"_p_\").replace(\"!\", \"_I_\")\n name = name.replace(\"**\", \"_xx_\").replace(\"*\", \"_x_\")\n name = name.replace(\"/\", \"_l_\").replace(\"@\", '_at_')\n name = name.replace(\"(\", \"_of_\").replace(\")\", \"\")\n if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None:\n raise NameError(\"name {} converted to {} cannot be further converted to valid python variable name!\".format(name2, name))\n return name\n return ''", "def create_importable_name(charm_name):\n return charm_name.replace(\"-\", \"_\")", "def sanitize_luxcore_name(string):\r\n return re.sub(\"[^_0-9a-zA-Z]+\", \"__\", string)", "def exported_name(fullname: str) -> str:\n # TODO: Support unicode\n return fullname.replace('___', '___3_').replace('.', '___')", "def deptype(self) -> str:", "def mangle(raw_name: str) -> str:\n\n # Handle names with '.'.\n if '.' in raw_name:\n res = []\n for name in raw_name.split('.'):\n if invalid_identifier.search(name):\n res.append(mangle(name))\n else:\n res.append(name)\n return '.'.join(res)\n\n name = raw_name.lstrip('_')\n underscores = '_' * (len(raw_name) - len(name))\n return underscores + 'hyx_' + _mangle_re.sub(_match, name)", "def _transform_name(self) -> None:\n self.name = utils.maybe_rename_for_k8s(self.name)", "def convert_packname_for_depsolver(packname):\n return packname.replace('-', '_')", "def _normalize_name(self, name):\n try:\n return safe_join(self.location, name)\n except ValueError:\n raise SuspiciousOperation(\n \"Attempted access to '%s' denied.\" % name,\n )", "def sanitize_name(name):\n # For now just change dashes to underscores. Fix this more in the future\n return name.replace(\"-\", \"_\")", "def non_local_name(self, name):\n if \"!\" in name:\n return name[:name.find(\"!\")+1]\n else:\n return name", "def disableIncorrectNameWarning(*args, **kwargs)->None:\n pass", "def get_elb_name ( base_name, app_name ) :\n max_len = 32\n name = base_name + '-' + app_name.upper( ) + '-LB'\n if len( name ) > max_len :\n name = base_name + '-' + app_name.upper( )\n if len( name ) > max_len :\n raise NameError( 'ELB Name ' + name + ' exceeds limit of ' + str( max_len ) )\n\n return name", "def _MaybeNewName(self, name):\n if not name:\n return name\n if name == self._old[:-1]:\n return self._module_name\n before, match, after = name.partition(self._old)\n if match and not before and \".\" not in after:\n return self._new + after\n else:\n return name", "def _make_unknown_name(self, cursor, field_name):\n parent = cursor.lexical_parent\n pname = self.get_unique_name(parent)\n log.debug('_make_unknown_name: Got parent get_unique_name %s',pname)\n # we only look at types declarations\n _cursor_decl = cursor.type.get_declaration()\n # we had the field index from the parent record, as to differenciate\n # between unnamed siblings of a same struct\n _i = 0\n found = False\n # Look at the parent fields to find myself\n for m in parent.get_children():\n # FIXME: make the good indices for fields\n log.debug('_make_unknown_name child %d %s %s %s',_i,m.kind, m.type.kind,m.location)\n if m.kind not in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL,\n CursorKind.CLASS_DECL]:#,\n #CursorKind.FIELD_DECL]:\n continue\n if m == _cursor_decl:\n found = True\n break\n _i+=1\n if not found:\n raise NotImplementedError(\"_make_unknown_name BUG %s\" % cursor.location)\n # truncate parent name to remove the first part (union or struct)\n _premainer = '_'.join(pname.split('_')[1:])\n # name the anonymous record with the field name if it has one\n if field_name:\n name = '%s_%s' % (_premainer, field_name)\n else:\n name = '%s_%d' % (_premainer, _i)\n return name", "def regular_edge_name(name: str) -> str:\n regular = \"\"\n for char in name:\n if char.isalpha() or char.isdigit():\n regular = f\"{regular}{char}\"\n else:\n regular = f\"{regular}_\"\n if not regular[0].isalpha():\n regular = f\"auto_legalized__{regular}\"\n return regular", "def set_name(self, name):\n if name == 'PositiveInteger' :\n self.name = 'Integer'\n self.output = False\n elif name == 'NaturalNumber' :\n self.name = 'Integer'\n self.output = False\n elif name == 'TimeAndDate' :\n self.name = 'DateTime'\n self.output = False\n elif name == 'Real' :\n self.name = 'Float'\n self.output = False\n elif name == 'Percentage':\n self.name = 'Float'\n self.output = False\n elif name == 'Identifier45':\n self.name = 'String'\n self.length = 45\n self.output = False\n elif name == 'Identifier90':\n self.name = 'String'\n self.length = 90\n self.output = False\n else :\n # print \"Not converting %s to base type\" % (name)\n self.name = name", "def create_charm_name_from_importable(charm_name):\n # _ is invalid in charm names, so we know it's intended to be '-'\n return charm_name.replace(\"_\", \"-\")", "def clean_name(name, allowed_chars):\n ok = identifier_chars + allowed_chars\n newname = \"\".join(c if c in ok else \"-\" for c in name)\n newname = newname.lstrip(\"-\")\n if not newname:\n raise RuntimeError(f\"No valid chars in name '{name}'.\")\n return newname", "def MakeValidName(name):\n if name:\n goodName = []\n if not xml.is_name_start_char(name[0]):\n goodName.append(u'_')\n for c in name:\n if xml.is_name_char(c):\n goodName.append(c)\n else:\n goodName.append(u'_')\n return string.join(goodName, u'')\n else:\n return u'_'", "def __sanitize(name):\n if name[-1] == \"/\":\n return name[:-1]\n return name", "def sanitize_activation_name(activation_name: str) -> str:\n if activation_name in {ACT_MISH, ACT_SWISH, ACT_SWISH_NAIVE, ACT_MISH_NAIVE}:\n return ACT_LEAKY_RELU\n\n return activation_name", "def to_safe_name(name: str) -> str:\n return regex_replace(r'\\-|\\.|:', \"\", name.replace(' ', '_'))", "def safe_name(self, name):\n\n output = \"\"\n for char in name:\n if char not in '\\\\/<>:\"|?*':\n output += char\n\n return output", "def normalize_reference_name(name):\n return name.strip().lower().replace(\"-\", \"_\").replace(\" \", \"_\")", "def convert_packname_from_depsolver(depsolver_packname):\n return depsolver_packname.replace('_', '-')", "def _verify_name(name):\n if isinstance(name, str):\n name = name.encode(\"utf-8\")\n\n if not isinstance(name, bytes):\n raise TypeError(\n \"Name {!r} is not a string or byte string\".format(name)\n )\n\n if b\".\" in name:\n raise ValueError(\n \"Name {!r} cannot contain period characters\".format(name)\n )\n\n return name", "def unmangle_bucket_name(bucket):\n if bucket == u'monitoring':\n bucket = u'_monitoring' # to handle monitoring bucket. Bucket shouldn't start with special char\n bucket = bucket.replace('_dsh_', '-')\n return bucket", "def test_bad_names(self):\n self.do_test_bad_name('', 'tmp/frog')\n self.do_test_bad_name('.b', 'tmp/frog')\n self.do_test_bad_name('a b', 'tmp/frog') # FAILS\n self.do_test_bad_name('a-b', 'tmp/frog') # FAILS", "def _sanitize_to_identifer(name):\n n = name.strip()\n n = re.sub('/', ' ', n)\n n = re.sub('-', ' ', n)\n n = re.sub(' +', '_', n)\n n = re.sub('[\\W]+', '', n)\n return n", "def name_to_goodreads(name):\n name = to_ascii(name.title())\n for char in CHARS:\n name = name.replace(*char)\n return name", "def __init__(self, name, code):\n self.name_in_source = name\n if isinstance(name, text_type):\n strip_symbols_re = compile_re('-|_')\n self.canonical_name = strip_symbols_re.sub('', name.lower())\n else:\n self.canonical_name = name\n self.code = code", "def nameToPlug(name):\n\n pass", "def use_name(self):\n if self.is_strobe():\n return 'intsigr_%s' % self.name\n return 'intsig_%s' % self.name", "def fix_natural_language(name):\n\tfor ch in r\"\\`*{}[]()>#+-.!$\":\n\t\tif ch in name:\n\t\t\tname = name.replace(ch,\"_\")\n\treturn name", "def _get_otel_safe_name(name: str) -> str:\n otel_safe_name = name[:OTEL_NAME_MAX_LENGTH]\n if name != otel_safe_name:\n warnings.warn(\n f\"Metric name `{name}` exceeds OpenTelemetry's name length limit of \"\n f\"{OTEL_NAME_MAX_LENGTH} characters and will be truncated to `{otel_safe_name}`.\"\n )\n return otel_safe_name", "def provoke_and_handle_NameError():\n try:\n print(bliblablub)\n except NameError as ne:\n print(f\"Sorry! {ne}\")", "def removez_all(self,name):\n\t\tnew_name = string.replace(name,' ', '.')\n\t\tnew_name = self.remove_uploader(new_name)\n\t\tnew_name = string.replace(new_name,'..', '.')\n\t\t\n\t\t#new_name = string.replace(name,'\\&.', '.') BUG\n\t\t\n\t\tnew_name = string.replace(new_name,'-', '.')\n\t\tnew_name = string.replace(new_name,'_', '.')\t\t\n\t\tnew_name = string.replace(new_name,'(', '')\n\t\tnew_name = string.replace(new_name,')', '')\n\t\tnew_name = string.replace(new_name,'..', '.')\n\t\t\t\t\t\n\t\tnew_name = string.replace(new_name,'X264', 'x264')\n\t\tnew_name = string.replace(new_name,'XVID', 'XviD')\n\t\tnew_name = string.replace(new_name,'TRUEHD', 'TrueHD')\n\t\t\t\t\t\n\t\tnew_name = string.replace(new_name,'multi', 'MULTi')\n\t\tnew_name = string.replace(new_name,'Multi', 'MULTi')\n\t\tnew_name = string.replace(new_name,'MULTI', 'MULTi')\n\t\tnew_name = string.replace(new_name,'MULTiF', 'MULTi')\n\t\tnew_name = string.replace(new_name,'VO.VF','MULTi')\n\t\tnew_name = string.replace(new_name,'VF.VOSTFR','MULTi')\n\t\tnew_name = string.replace(new_name,'VF.VO+ST','MULTi')\n\t\t\n\t\t\n\t\tnew_name = string.replace(new_name,'TRUE.HD', 'TRUEHD')\n\t\tnew_name = string.replace(new_name,'blueray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'bluray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Bluray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BluraY', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu-Ray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu.Ray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu.ray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'(Bluray-rip)', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu-Ray Rip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BDRip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BDRIP', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BDRiP', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BRDRiP', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BRDRip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BRRip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BD', 'BluRay')\n\t\tnew_name = string.replace(new_name,'HD-DVDRiP', 'HDRiP')\n\t\tnew_name = string.replace(new_name,'HD.DVDRiP', 'HDRiP')\n\t\tnew_name = string.replace(new_name,'HDVD', 'HDRiP')\n\t\tnew_name = string.replace(new_name,'HDDVD', 'HDRiP')\t\t\t\t\n\t\tnew_name = string.replace(new_name,'DVDrip','DVDRiP')\n\t\tnew_name = string.replace(new_name,'DVDriP','DVDRiP')\n\t\tnew_name = string.replace(new_name,'dvdrip','DVDRiP')\n\t\tnew_name = string.replace(new_name,'DVD5','DVDRiP')\n\t\tnew_name = string.replace(new_name,'.DVD.','DVDRiP')\n\t\t\n\t\t\n\t\tnew_name = string.replace(new_name,'.DD.5.1','DD5.1')\n\t\tnew_name = string.replace(new_name,'6.Canaux','5.1')\t\n\t\tnew_name = string.replace(new_name,'dts', 'DTS')\n\t\tnew_name = string.replace(new_name,'Dts', 'DTS')\n\t\tnew_name = string.replace(new_name,'DtS', 'DTS')\n\t\tnew_name = string.replace(new_name,'DTS.DTS','DTS')\n\t\tnew_name = string.replace(new_name,'DTSHD.','DTS.')\n\t\tnew_name = string.replace(new_name,'.HD.','.')\n\t\t\n\t\tnew_name = string.replace(new_name,'hdma', 'HDMA')\n\t\tnew_name = string.replace(new_name,'HD MA', 'HDMA')\n\t\tnew_name = string.replace(new_name,'HD.MA', 'HDMA')\n\t\tnew_name = string.replace(new_name,'.MA.', '.HDMA.')\n\t\tnew_name = string.replace(new_name,'ac3','AC3')\n\t\tnew_name = string.replace(new_name,'Ac3','AC3')\n\t\tnew_name = string.replace(new_name,'AC.3.','AC3.')\n\t\t\n\t\tnew_name = string.replace(new_name,'HD.HRA','HRA') #High resolution audio\n\t\t#new_name = string.replace(new_name,'.HRA.', '.')\n\t\t\n\t\tnew_name = string.replace(new_name,'.fr.', '.FRENCH.')\n\t\tnew_name = string.replace(new_name,'.Fr.', '.FRENCH.')\n\t\tnew_name = string.replace(new_name,'.FR.', '.FRENCH.')\n\t\tnew_name = string.replace(new_name,'french', 'FRENCH')\n\t\tnew_name = string.replace(new_name,'French', 'FRENCH')\n\t\tnew_name = string.replace(new_name,'VF.', 'FRENCH.')\n\t\tnew_name = string.replace(new_name,'VFF', 'TRUEFRENCH')\t\t\n\t\tnew_name = string.replace(new_name,'truefrench', 'TRUEFRENCH')\n\t\tnew_name = string.replace(new_name,'Truefrench', 'TRUEFRENCH')\n\t\tnew_name = string.replace(new_name,'TrueFrench', 'TRUEFRENCH')\n\t\tnew_name = string.replace(new_name,'TrueFRENCH', 'TRUEFRENCH')\n\t\t\n\t\tnew_name = string.replace(new_name,'VF', 'FRENCH')\n\t\tnew_name = string.replace(new_name,'.PAL.', '.')\n\t\tnew_name = string.replace(new_name,'HD1080', '1080p')\n\t\tnew_name = string.replace(new_name,'1080P', '1080p')\n\t\tnew_name = string.replace(new_name,'720P', '720p')\n\t\t\n\t\tnew_name = string.replace(new_name,'VERSION.LONGUE','EXTENDED')\n\t\tnew_name = string.replace(new_name,'Version.Longue','EXTENDED')\n\t\tnew_name = string.replace(new_name,'Extended.Cut', 'EXTENDED')\n\t\tnew_name = string.replace(new_name,'Extended.Edition', 'EXTENDED')\n\t\tnew_name = string.replace(new_name,'Director\\'s.Cut', 'DIRECTOR.CUT')\n\t\tnew_name = string.replace(new_name,'Directors.Cut', 'DIRECTOR.CUT')\n\t\tnew_name = string.replace(new_name,'DC', 'DIRECTOR.CUT')\n\t\tnew_name = string.replace(new_name,'D/C', 'DIRECTOR.CUT')\t\t\n\t\tnew_name = string.replace(new_name,'Remastered','REMASTERED')\n\t\tnew_name = string.replace(new_name,'Theatrical.Cut','THEATRICAL.CUT')\n\t\tnew_name = string.replace(new_name,'Theatricul.Cut','THEATRICAL.CUT')\n\t\tnew_name = string.replace(new_name,'Sunshine.Edition','SUNSHINE.EDITION')\n\t\tnew_name = string.replace(new_name,'Revisited.The.Final.Cut','REVISITED.FiNAL.CUT')\t\t\n\t\tnew_name = string.replace(new_name,'LIMITED','LiMiTED')\n\t\t\n\t\tnew_name = string.replace(new_name,'iNT','iNTERNAL')\n\t\tnew_name = string.replace(new_name,'JKF.3D', 'JFK3D')\n\t\tnew_name = string.replace(new_name,'GAIA', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'Gaïa', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'GAÏA', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'GAϏA', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'GAiA', 'GAÏA')\n\t\t\n\t\tnew_name = string.replace(new_name,'dxva', 'DXVA') #<harwdare decode\n\t\tnew_name = string.replace(new_name,'rip','')\n\t\tnew_name = string.replace(new_name,'Rip','')\n\t\tnew_name = string.replace(new_name,'Ripp','')\n\t\tnew_name = string.replace(new_name,'.mkv.mkv', '.mkv')\n\t\t#new_name = string.replace(new_name,'..', '.')\t#USELESS\n\t\treturn self.refactor_line(new_name)", "def __normalize_name(self):\n self.normalized_name = normalizeSimplified(self.name)", "def _to_db_identifier(name):\n return name.replace('-', '_')", "def correctname(star):\n # Correction for the BPS stars.\n if star.startswith('BS') or star.startswith('CS'):\n star = 'BPS ' + star\n #\n return star", "def k8s_safe_name(name):\n return name.lower().replace('_', '-')", "def convert_back_arr_name(self, smt1_arr_name):\n return smt1_arr_name.split('_')[0]", "def _sanitizeName(name):\n\n name = name.lower() # lower.\n name = name.replace('.','') # remove periods.\n name = name.replace('-','') # remove dashes.\n name = name.replace(\"'\",'') # remove apostrophies.\n # return it.\n return name", "def get_name(self) -> str:\n return \"uncrustify\"", "def normalize_name(word):\n return word.strip(\"0123456789!@#$%^&*_() +=\\/?<>,.`~;:\").lower().replace(\" \",\"_\")", "def TransformNames(self) -> _n_2_t_0[str]:", "def clean_name(name: str) -> str:\n if not re.match(\"[a-zA-Z_]\", name[0]):\n name = \"_\" + name\n name = re.sub(\"[^0-9a-zA-Z_]+\", \"_\", name)\n if all(c == \"_\" for c in name):\n name = \"v\"\n return name", "def check_name(name, is_name_ok):\n try:\n name = unicode(name, 'utf-8')\n except:\n pass\n name = name[max(string.rfind(name,'/'),\n string.rfind(name,'\\\\'),\n string.rfind(name,':')\n )+1:]\n name = string.replace(name, u\"'\", u'_')\n name = string.replace(name, u'ä', u'ae')\n name = string.replace(name, u'ö', u'oe')\n name = string.replace(name, u'ü', u'ue')\n name = string.replace(name, u'Ä', u'Ae')\n name = string.replace(name, u'Ö', u'Oe')\n name = string.replace(name, u'Ü', u'Ue')\n name = string.replace(name, u'ß', u'ss')\n bad_chars = ' ,;()[]{}*\"#%+~!'\n good_chars = '________________'\n TRANSMAP = string.maketrans(bad_chars, good_chars)\n name = name.encode('iso-8859-1')\n name = string.translate(name, TRANSMAP)\n if is_name_ok:\n return name\n html = '.html'\n if name[-5:] != html :\n name += html\n return name", "def test_drudge_has_names(free_alg):\n\n p = free_alg.names\n\n # Range and dummy related.\n assert p.R == Range('R')\n assert len(p.R_dumms) == 6\n assert p.R_dumms[0] == p.i\n assert p.R_dumms[-1] == p.n\n\n # Vector bases.\n assert p.v == Vec('v')\n\n # Scalar bases.\n assert p.m == IndexedBase('m')", "def test_normalize_name_bug_1762789(self):\n name = u'Fu\\xdfball'\n self.assertEqual(u'CUSTOM_FU_BALL', utils.normalize_rc_name(name))", "def clean(self):\n pass\n #TODO check whether short name is really clean and short!", "def lookup(name):", "def lookup(name):", "def cast_name(key):\n special_symbols = set('{}{}'.format(punctuation, ' '))\n special_symbols.remove('_')\n new_key = ['_' if x in special_symbols else x for x in key]\n casted_key = ''.join(new_key)\n return casted_key", "def sanitize_name(self, value):\n if self.sanitize_names:\n new_value = re.sub('[^a-zA-Z0-9_]', '_', value[:127])\n else:\n new_value = value\n return new_value", "def package_name(string):\n return 'USymbol' + convert_name(string, False)", "def _normalize_package_name(self, name):\n return Prepared.normalize(name)", "def typeToName(type: int) -> unicode:\n ...", "def symbolize_sensorname(name):\n return name.lower().replace(\" \", \"_\")", "def convert_symbol_vt2tiger(symbol, exchange):\n if exchange == Exchange.SSE and symbol.startswith(\"0\"):\n symbol = symbol + \".SH\"\n else:\n symbol = symbol\n return symbol", "def to_legacy(self) -> object:\n pass", "def name_to_type(self, name):\n return self.CUSTOM_PREFIX + name", "def __to_key(name: str) -> str:\n return name.replace(\" \", \"-\")", "def genImportWithoutAsName(self, name):\n objectName = name.split('.')[0]\n absName = self._moreImportObject.getAbsName(name, '.'.join(self._module.getNames()))\n #if absName is None:\n # return str2ast('raise ImportError(\"no module named %s\")' % name)\n return str2ast(\"name = import_module('%s', None)\" % absName, name = objectName)", "def _make_name(self, name=None):\n\n if name:\n new_name = name.split(\"/\")[-1].split(\".png\")[0]\n if new_name.startswith((\"AWS-\", \"Amazon-\")):\n new_name = new_name.split(\"-\", 1)[1]\n # Replace non-alphanumeric with underscores (1:1 mapping)\n new_name = re.sub(r'\\W+', '_', new_name)\n return new_name", "def _transformed_name(key: Text) -> Text:\n return key + \"_xf\"", "def post_process_pair(name, value):\n if name == \"AddressBookObjectGuid\" and \\\n type(value) == bytes and \\\n len(value) == 16:\n value = str(UUID(bytes=value))\n\n if type(value) == bytes:\n # Base64 encode binary content\n value = b64encode(value).decode(\"ASCII\")\n\n if name == \"OfflineAddressBookTruncatedProperties\":\n value = get_field_name(value >> 16)\n\n if name == \"ObjectType\":\n if value == 3:\n value = \"Folder\"\n elif value == 6:\n value = \"User\"\n elif value == 8:\n value = \"Distribution List\"\n\n return value", "def name_collision(x):\r\n return x", "def name_collision(x):\r\n return x", "def simplifyOutName(name):\n return \"HLTNav_\" + name.replace(\"HLTNav_\", \"\").replace(\"Trig\", \"\").replace(\"Alg\", \"\")", "def fail_new_beer(name):\n return 'Doublon, la biere : %s' %name + ' existe deja'", "def unsuported_format(self, msg):\n raise UnsupportedError(self.file.name+\" linker map format not supported by parser:\\n \"+ msg)", "def _resolve_name(self, cls, name):\n attrs = name.split('.')\n part = cls\n while attrs:\n attr = attrs.pop(0)\n part = getattr(part, attr, UNSET)\n if part is UNSET:\n return name\n if not isinstance(part, basestring):\n raise TypeError(\"Invalid key: {!r}\".format(part))\n return part", "def friendly_name(self):\n return \"ED25519 SIGNATURE DEP B\"", "def _clean(cls, value, invalid):\r\n return re.sub('_+', '_', invalid.sub('_', value))", "def _unspecify_name(self, name):\n unspec = None\n path = name.split('.')[0]\n for module in messages.MESSAGES:\n if self._fuzzy_module_name_eq(module, path):\n prefix = module.__name__.split('.')[-1]\n return self._hash_name(prefix + name[len(path)+1:])", "def un_load(cls, name):\n gxapi_cy.WrapEMAPTEMPLATE._un_load(GXContext._get_tls_geo(), name.encode())", "def type(name):", "def __init__(self):\n super().__init__(self.__class__.__name__, 'BBEX3.D.XAU.USD.EA.AC.C04')", "def decompose_fullname(fullname):\r\n from r2.lib.db.thing import Thing,Relation\r\n if fullname[0] == 't':\r\n type_class = Thing\r\n elif fullname[0] == 'r':\r\n type_class = Relation\r\n\r\n type_id36, thing_id36 = fullname[1:].split('_')\r\n\r\n type_id = int(type_id36,36)\r\n id = int(thing_id36,36)\r\n\r\n return (type_class, type_id, id)", "def normalize_pipeline_name(name=''):\n normalized_name = name\n for bad in '\\\\/?%#':\n normalized_name = normalized_name.replace(bad, '_')\n return normalized_name" ]
[ "0.6321686", "0.594915", "0.58042735", "0.5773941", "0.56322277", "0.56275433", "0.5616114", "0.55334014", "0.55209816", "0.55155563", "0.5503195", "0.54807377", "0.5372777", "0.53538215", "0.5341635", "0.533382", "0.5289499", "0.52765995", "0.5246423", "0.5203556", "0.5201199", "0.51875657", "0.5181105", "0.5171965", "0.51576203", "0.51533115", "0.5149504", "0.5146544", "0.5144437", "0.51302046", "0.51247346", "0.5096715", "0.5086697", "0.5069507", "0.5066575", "0.5060991", "0.50532806", "0.5049477", "0.50469726", "0.5046792", "0.50420624", "0.5035603", "0.5027832", "0.50241405", "0.5017141", "0.5006497", "0.5006279", "0.5005842", "0.500132", "0.50003266", "0.49949354", "0.49800977", "0.49604222", "0.49578214", "0.4950379", "0.49475563", "0.4946462", "0.49456736", "0.49215245", "0.49190664", "0.4909745", "0.4901517", "0.4898113", "0.48980308", "0.489734", "0.48967233", "0.4885942", "0.48852634", "0.48850414", "0.4879636", "0.48788935", "0.48756263", "0.48756263", "0.487443", "0.4861047", "0.4859588", "0.48500392", "0.48499018", "0.4846804", "0.48419204", "0.4829553", "0.48279157", "0.48246846", "0.4821215", "0.48179543", "0.4817266", "0.4814883", "0.47963762", "0.47963762", "0.47951692", "0.478641", "0.4784313", "0.47790998", "0.47777006", "0.47745198", "0.47735548", "0.47663113", "0.47646832", "0.4764149", "0.4763311", "0.47571605" ]
0.0
-1
Returns a logger with the given name
def get_logger(name: str): logger = logging.getLogger(name) for handler in HANDLERS: logger.addHandler(handler) return logger
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_logger(name):\n # type: (str) -> Logger\n return logging.getLogger(name)", "def get_logger(name):\n return logging.getLogger(name)", "def get_logger(name: str) -> logging.Logger:\n \n return logging.getLogger(name)", "def get_logger(name: str) -> logging.Logger:\n try:\n p = Path(name)\n if p.exists():\n name = str(p.absolute().relative_to(Path.cwd()).as_posix())\n except:\n pass\n logger = logging.getLogger(name)\n # logger.addHandler(TqdmLoggingHandler())\n return logger", "def get_logger(name: str) -> logging.Logger:\n\n if name in LOGGER_TABLE:\n return LOGGER_TABLE[name]\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n logger.addHandler(STREAM_HANDLER)\n\n LOGGER_TABLE[name] = logger\n return logger", "def get_logger(name=\"LazySusan\"):\n level = get_level()\n _configure(level)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n\n return logger", "def getLogger(name):\n return logging.getLogger(name)", "def get_logger(self, name):\n if not isinstance(name, six.string_types):\n name = \"{}.{}\".format(name.__class__.__module__, name.__class__.__name__)\n\n return self.logger_store.setdefault(name, Logger(name=name,\n threshold=Logger.LOG_LEVELS[self.config.log[0].level]))", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(__lvl__)\n ch = logging.StreamHandler()\n ch.setLevel(__lvl__)\n preformat = f'[{logger.name}]'\n # [%(threadName)s/%(levelname)s] = [MainThread/INFO]\n ch.setFormatter(logging.Formatter(fmt=preformat + ' %(levelname)s [%(asctime)s] %(message)s',\n datefmt='%H:%M:%S'))\n logger.addHandler(ch)\n return logger", "def get(name):\r\n log = logging.getLogger(\"%s.%s\" % (ROOT_NAME, name))\r\n return log", "def get_logger(name: str):\n # setup logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n # clear handlers if they were created in other runs\n if (logger.hasHandlers()):\n logger.handlers.clear()\n logger.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n # create console handler add add to logger\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n # create file handler add add to logger when name is not None\n if name is not None:\n fh = logging.FileHandler(f'GNN-{name}.log')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n return logger", "def whLogger(name):\n return logging.getLogger('wh.'+name)", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(logging.DEBUG)\n if not logger.handlers:\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(\"[%(asctime)s] %(message)s\"))\n logger.addHandler(handler)\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n # clear handlers if they were created in other runs\n if (logger.hasHandlers()):\n logger.handlers.clear()\n logger.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n # create console handler add add to logger\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n # create file handler add add to logger when name is not None\n if name is not None:\n fh = logging.FileHandler(f'{name}.log')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n return logger", "def get_logger(logger_name='root'):\n return getLogger(logger_name)", "def get_named_logger(name, level='INFO', _cache={}):\n if name not in _cache:\n logger = logging.getLogger(name)\n handler = file_handler(name)\n logger.addHandler(handler)\n logger.setLevel(level)\n handler.setLevel(level)\n _cache[name] = logger\n return _cache[name]", "def logger(self, name):\n logger, _ = get_stdout_logger(name, verbosity=self.verbosity)\n return logger", "def get_logger(name):\n log = logging.getLogger(name)\n # we don't set the logger's level to inherit from the parent logger.\n if log.handlers:\n return log\n fmt = logging.Formatter(LOG_FMT)\n shdlr = logging.StreamHandler()\n shdlr.setFormatter(fmt)\n log.addHandler(shdlr)\n log.propagate = False\n return log", "def get_logger(name=\"unknown_logger\"):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(FORMATTER)\n logger.addHandler(handler)\n logger.propagate = False # to avoid printing the same logs multiple times\n return logger", "def logger(name=None):\r\n\r\n log = logging.getLogger(name or 'logging')\r\n if HANDLER and HANDLER not in log.handlers:\r\n log.addHandler(HANDLER)\r\n\r\n return log", "def get_logger(name=None):\n return logging.getLogger(\"bids-schema\" + (\".%s\" % name if name else \"\"))", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n logger.setLevel(logging.DEBUG)\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n level = get_module_log_level(name)\n logger.setLevel(level)\n handler = logging.FileHandler(get_log_file(name))\n handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]'\n ))\n handler.setLevel(level)\n logger.addHandler(handler)\n logger.info(\"returning a logger set to level: {} for module: {}\".format(level, name))\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n\n # Console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n\n logger.addHandler(ch)\n\n return logger", "def _get_logger(name=None, level=None):\n\n logger = logging.getLogger(name)\n if level is not None:\n logger.setLevel(level)\n\n return logger", "def get_logger(name):\n return StyleAdapter(logging.getLogger(name))", "def get_logger(name: str, level: str = LOG_LEVEL) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(level)\n coloredlogs.install(\n level=level, logger=logger, fmt='%(asctime)s %(name)s: %(lineno)s %(levelname)s: %(message)s', field_styles=FIELD_STYLES\n )\n return logger", "def get(name=None):\n return Adapter(logging.getLogger(name))", "def get_logger(log_name: str) -> logging.Logger:\n logger = logging.getLogger(log_name)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(asctime)s - %(name)s: %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def get_logger(name):\n logger = _root.getChild(name)\n if name.startswith(\"task.\") and _file_logging_enabled:\n _setup_task_logger(logger)\n return logger", "def get_named_logger(name, debug=False):\n logger = logging.getLogger(name)\n if debug:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.INFO)\n if not len(logger.handlers):\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('[ %(asctime)s ] [%(name)s] [%(levelname)s] %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def get_logger(name):\n loggers = {}\n if name in loggers:\n return loggers[name]\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n fmt = '%(asctime)s - %(threadName)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n loggers[name] = logger\n return loggers[name]", "def get_logger(name):\n logger = logging.getLogger(name)\n if not logger.handlers:\n logger.propagate = 1 # propagate to parent\n console = logging.StreamHandler()\n logger.addHandler(console)\n formatter = logging.Formatter(\n '%(name)s - [%(levelname)s] - %(message)s')\n console.setFormatter(formatter)\n return logger", "def get_logger(name):\n #### Configure Logger ####\n # Log to stdout\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s - %(message)s',\n '%m/%d/%Y %H:%M:%S')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n return logger", "def get_logger(name):\n\n logger = logging.getLogger(name)\n if not logger.handlers:\n out = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n fmt='%(asctime)s - %(name)s - %(levelname)s \\\n - %(module)s - %(message)s'\n )\n out.setFormatter(formatter)\n logger.addHandler(out)\n logger.setLevel(get_config('LOGGING_LEVEL'))\n logger.propagate = False\n return logger", "def get_logger(name, file_name_path='yang.log'):\n # check if file exists\n exists = False\n if os.path.isfile(file_name_path):\n exists = True\n FORMAT = '%(asctime)-15s %(levelname)-8s %(name)5s => %(message)s - %(lineno)d'\n DATEFMT = '%Y-%m-%d %H:%M:%S'\n logging.basicConfig(datefmt=DATEFMT, format=FORMAT, filename=file_name_path, level=logging.INFO)\n logger = logging.getLogger(name)\n # if file didn t exist we create it and now we can set chmod\n if not exists:\n os.chmod(file_name_path, 0o664 | stat.S_ISGID)\n return logger", "def get_logger(name):\n filename = \"file_sync.log\"\n _create_log_dir()\n filepath = os.path.join(FLASK_APP.config[\"LOG_DIR\"], filename)\n logger = logging.getLogger(name)\n handler = TimedRotatingFileHandler(filepath, when=\"midnight\")\n logger.setLevel(LOG_LEVELS[FLASK_APP.config[\"LOG_LEVEL\"]])\n handler.setLevel(LOG_LEVELS[FLASK_APP.config[\"LOG_LEVEL\"]])\n log_format = (\"%(asctime)s %(levelname)s %(pathname)s\"\n \":%(funcName)s: %(lineno)d - %(message)s\")\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def get_logger(*, logger_name):\n\n logger = logging.getLogger(logger_name)\n\n logger.setLevel(logging.INFO)\n\n logger.addHandler(get_console_handler())\n logger.addHandler(get_file_handler())\n logger.propagate = False\n\n return logger", "def get_logger(name: str):\n logger = logging.getLogger(name)\n logger.handlers = gunicorn_error_logger.handlers\n logger.addFilter(NameInjectionFilter())\n return logger", "def getLogger(name):\n return logging.getLogger(\".\".join([\"mpm\"] + name.split(\".\")[1:]))", "def get_logger(name: str, level: typing.Optional[str] = None) -> logging.Logger:\n\n if level is None:\n level = 'INFO'\n\n level = os.environ.get('LOGLEVEL', level)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n\n # Initialze the log level of the logger. Other possible values are `INFO`, `DEBUG` and `ERROR`\n logging.basicConfig(format='%(levelname)s (%(name)s) %(message)s')\n\n loggers.append(logger)\n\n return logger", "def get_logger(name=None, level=\"warn\"):\n logger_name = str(uuid.uuid4())[:8] if name is None else name\n logger = logging.getLogger(logger_name)\n level = os.environ.get(\"LOG_LEVEL\", level)\n\n msg_formats = {\n \"debug\": \"%(asctime)s [%(levelname)s] %(message)s [at %(filename)s:%(lineno)d]\",\n \"info\": \"%(asctime)s %(message)s [at %(filename)s:%(lineno)d]\",\n \"warn\": \"%(asctime)s %(message)s\",\n \"warning\": \"%(asctime)s %(message)s\",\n \"error\": \"%(asctime)s [%(levelname)s] %(message)s [at %(filename)s:%(lineno)d]\",\n \"critical\": \"%(asctime)s [%(levelname)s] %(message)s [at %(filename)s:%(lineno)d]\",\n }\n level_mapping = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warn\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n }\n\n date_format = \"%Y-%m-%d %H:%M:%S\"\n formatter = logging.Formatter(fmt=msg_formats[level.lower()], datefmt=date_format)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n if len(logger.handlers) > 0:\n rm_idx = [idx for idx, handler in enumerate(logger.handlers) if isinstance(handler, logging.StreamHandler)]\n for idx in rm_idx:\n del logger.handlers[idx]\n logger.addHandler(handler)\n logger.setLevel(level_mapping[level.lower()])\n return logger", "def get_logger(name):\n\n logfile = os.environ.get(\"LOGFILE\", \"/tmp/{}.log\".format(name))\n result_logger = logging.getLogger(name)\n stream_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(logfile, encoding=\"utf8\")\n formatter = logging.Formatter(\n \"[%(asctime)s]\"\n \"[%(processName)s %(process)-6d]\"\n \"[%(filename)s %(lineno)d]\"\n \"[%(funcName)s]\"\n \"[%(levelname)s]\"\n \"%(message)s\"\n )\n stream_handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n result_logger.addHandler(stream_handler)\n result_logger.addHandler(file_handler)\n result_logger.setLevel(logging.DEBUG)\n return result_logger", "def get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(LOGGING_LEVEL)\n logger.addHandler(_handler_file())\n logger.addHandler(_handler_stdout())\n logger.propagate = False\n return logger", "def getLogger(name):\n log = logging.getLogger(name)\n log.setLevel(logging.DEBUG)\n hnd2 = logging.StreamHandler(sys.stdout)\n fmt2 = logging.Formatter(fmt='%(name)-20s %(levelname)-8s %(message)s')\n hnd2.setLevel(logging.NOTSET)\n hnd2.addFilter(FilterLevel(True, [logging.INFO]))\n hnd2.setFormatter(fmt2)\n log.addHandler(hnd2)\n hnd1 = logging.StreamHandler(sys.stdout)\n fmt1 = logging.Formatter(fmt=('%(name)-20s %(levelname)-8s' +\n '%(filename)s:%(lineno)s %(message)s'))\n hnd1.setLevel(logging.NOTSET)\n hnd1.addFilter(FilterLevel(False, [logging.INFO]))\n hnd1.setFormatter(fmt1)\n log.addHandler(hnd1)\n return log", "def getLogger(logger_name='root'):\n if not CONFIGURATION_SET:\n set_config()\n return structlog.get_logger(logger_name, name=logger_name)", "def get_logger(name: str, log_path: str = os.path.join(os.path.dirname(__file__), \"main.log\"),\n console: bool = False) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n # ensure that logging handlers are not duplicated\n for handler in list(logger.handlers):\n logger.removeHandler(handler)\n\n # rotating file handler\n if log_path:\n fh = RotatingFileHandler(path_join(log_path),\n maxBytes=10 * 2 ** 20, # 10 MB\n backupCount=1) # 1 backup\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # console handler\n if console:\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # null handler\n if not (log_path or console):\n logger.addHandler(logging.NullHandler())\n\n return logger", "def get_logger(module_name):\n def _logger():\n \"\"\" Callable used to obtain current logger object. \"\"\"\n return logging.getLogger(module_name)\n return _logger", "def get_logger(name, fluentd_host='localhost', fluentd_port=24224):\n logger = logging.getLogger(name)\n fluent_handler = handler.FluentHandler(\n 'mole.logs',\n host=fluentd_host,\n port=fluentd_port,\n buffer_overflow_handler=overflow_handler\n )\n formatter = handler.FluentRecordFormatter(\n custom_format,\n format_json=False\n )\n fluent_handler.setFormatter(formatter)\n logger.addHandler(fluent_handler)\n return logger", "def getLogger(self, name):\n if not isinstance(name, str):\n raise TypeError('A logger name must be a string')\n\n if name in self.loggerDict:\n rv = self.loggerDict[name]\n if isinstance(rv, PlaceHolder):\n ph = rv\n rv = self.loggerClass(name)\n rv.manager = self\n self.loggerDict[name] = rv\n self._fixupChildren(ph, rv)\n self._fixupParents(rv)\n else:\n rv = self.loggerClass(name)\n rv.manager = self\n self.loggerDict[name] = rv\n self._fixupParents(rv)\n\n return rv", "def get_logger(name: str, log_file_path: str, log_debug_file_path: str) -> logging.Logger:\n logger = logging.getLogger(name)\n\n if os.path.isfile(log_debug_file_path):\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.WARNING)\n\n format = logging.Formatter(\n fmt='%(asctime)s %(name)-12s %(levelname)s %(process)-8d %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n\n iris_service_file_handler = logging.FileHandler(log_file_path)\n iris_service_file_handler.setFormatter(format)\n logger.addHandler(iris_service_file_handler)\n\n return logger", "def get_logger(self, name=\"amulet-logger\", level=logging.DEBUG):\n log = logging\n logger = log.getLogger(name)\n fmt = log.Formatter(\"%(asctime)s %(funcName)s \"\n \"%(levelname)s: %(message)s\")\n\n handler = log.StreamHandler(stream=sys.stdout)\n handler.setLevel(level)\n handler.setFormatter(fmt)\n\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def get_logger(module_name):\n return logging.getLogger(APP_LOGGER_NAME).getChild(module_name)", "def get_logger(name='default.log', level=logging.DEBUG):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n hdlr = logging.StreamHandler()\n hdlr.setLevel(level)\n fmt = PrettyFormatter()\n hdlr.setFormatter(fmt)\n logger.addHandler(hdlr)\n return logger", "def _getLogger(name):\n logger = logging.getLogger(name)\n # if not logging.root.handlers:\n # logger.disabled = 1\n return logger", "def logger() -> logging.Logger:\n return logging.getLogger(__name__)", "def get_logger(name='some script'):\n\n #timestamp for filename \n timestamp = datetime.now().strftime('%Y-%m-%d')\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n #custom formatter\n formatter = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(filename)s '\n '%(funcName)s line: %(lineno)s: %(msg)s'\n )\n handler = logging.FileHandler('/tmp/scripts_{0}.log'.format(timestamp))\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n #print to stdout if it's interactive, but file-only if not\n if sys.stdin.isatty():\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n \n return logger", "def get_logger(logger_name, logging_format, file_name, level=logging.INFO):\n path, prepared = '', True\n for cat in file_name.split('/')[1:-1]:\n path += '/%s' % cat\n if not os.path.exists(path):\n try:\n os.mkdir(path)\n except PermissionError:\n prepared = False\n break\n if not prepared:\n file_name = '/tmp/%s' % file_name.split('/')[-1]\n logging.basicConfig(level=level, format=logging_format)\n log = logging.getLogger(logger_name)\n handler = logging.FileHandler(file_name, encoding='utf8')\n handler.setFormatter(logging.Formatter(logging_format))\n log.addHandler(handler)\n log.setLevel(level=level)\n return log", "def get_logger(name: str, log_level: str = None):\n if log_level is None:\n log_level = os.environ.get(\"ACCELERATE_LOG_LEVEL\", None)\n logger = logging.getLogger(name)\n if log_level is not None:\n logger.setLevel(log_level.upper())\n logger.root.setLevel(log_level.upper())\n return MultiProcessAdapter(logger, {})", "def get_logger(name, log_dir, config_dir):\n config_dict = json.load(open(config_dir + 'log_config.json'))\n config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(name)\n\n std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logging.Formatter(std_out_format))\n logger.addHandler(consoleHandler)\n\n return logger", "def get_logger(module_name):\n # Gets or creates a logger\n logging.basicConfig(format=\"%(levelname)s-[%(filename)s:%(lineno)d]:%(message)s\")\n logger = logging.getLogger(module_name)\n # set log level\n logger.setLevel(logging.DEBUG)\n\n return logger", "def get_logger_inst(profile: Profile, logger_name) -> logging.Logger:\n did_ident = get_did_ident(profile)\n if did_ident:\n logger_name = f\"{logger_name}_{did_ident}\"\n return get_logger_with_handlers(\n settings=profile.settings,\n logger=logging.getLogger(logger_name),\n did_ident=did_ident,\n interval=profile.settings.get(\"log.handler_interval\") or 7,\n backup_count=profile.settings.get(\"log.handler_bakcount\") or 1,\n at_when=profile.settings.get(\"log.handler_when\") or \"d\",\n )", "def plog_use_logger(name):\r\n global logger, loglevels\r\n logger = logging.getLogger(name)", "def get_logger(logger_name):\n logger = logging.getLogger(logger_name)\n\n logger.setLevel(app_config.LOG_LEVEL)\n logger.addHandler(get_console_handler())\n\n # with this pattern, it's rarely necessary to propagate\n # the error up to parent\n logger.propagate = False\n\n return logger", "def get_logger(logger_name='default'):\n log = logging.getLogger(logger_name)\n log.setLevel(logging.DEBUG)\n log_format = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(log_format)\n if log.hasHandlers():\n log.handlers.clear()\n log.addHandler(ch)\n\n return log", "def get_logger(name, level=None):\n if not level:\n level = os.environ.get('LOGGER_LEVEL', 'INFO')\n\n logger = logging.getLogger(name)\n\n set_formatter(logger)\n\n try:\n logger.setLevel(level.upper())\n except (TypeError, ValueError) as err:\n logger.setLevel('INFO')\n logger.error('Defaulting to INFO logging: %s', str(err))\n\n return logger", "def logger():\n return logging.getLogger(__name__)", "def get_logger():\n logging.basicConfig(\n level=logging.DEBUG,\n format='[%(name)s] [%(asctime)s]: %(message)s')\n caller = whoami(offset=1)\n name = os.path.basename(caller)\n logger = logging.getLogger(name)\n return logger", "def get_logger(name, filename=None, stream_loglevel=\"INFO\", file_loglevel=\"DEBUG\"):\n if name in loggers:\n return loggers[name]\n logger = logging.getLogger(name)\n logger.propagate = False\n\n with_color = supports_color()\n\n pre1, suf1 = hash_coloured_escapes(name) if with_color else (\"\", \"\")\n pre2, suf2 = hash_coloured_escapes(name + \"salt\") if with_color else (\"\", \"\")\n formatter = logging.Formatter(\n \"%(asctime)s %(levelname)s {}+{}+{} \"\n \"%(name)s: %(message)s\".format(pre1, pre2, suf1),\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n if filename is not None:\n ch_file = logging.handlers.RotatingFileHandler(\n filename, maxBytes=5 * 1024 * 1024, backupCount=10\n )\n ch_file.setLevel(file_loglevel)\n ch_file.setFormatter(formatter)\n logger.addHandler(ch_file)\n ch = logging.StreamHandler()\n ch.setLevel(stream_loglevel)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n loggers[name] = logger\n\n logger.once_dict = {}\n\n return logger", "def logger(name, debug=True):\n logging.basicConfig() # errors and everything else (2 separate log groups)\n log = logging.getLogger(name)\n log.setLevel(logging.INFO)\n if debug:\n log.setLevel(logging.DEBUG)\n return log", "def Logger(name, level=None):\n logger = logging.getLogger(name)\n if level:\n logger.setLevel(level)\n return logger", "def get_logger(plugin_name):\n return logging.getLogger('sopel.externals.%s' % plugin_name)", "def get_logger(level=None, name=None, filename=None, log_dir=None):\n if isinstance(log_dir, str):\n log_dir = Path(log_dir)\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n logger = lg.getLogger(name)\n\n # if a logger with this name is not already set up\n if len(logger.handlers) == 0:\n\n # get today's date and construct a log filename\n todays_date = dt.datetime.today().strftime(\"%Y_%m_%d\")\n\n if not log_dir:\n log_dir = settings.logs_folder\n\n log_filename = log_dir / \"{}_{}.log\".format(filename, todays_date)\n\n # if the logs folder does not already exist, create it\n if not log_dir.exists():\n log_dir.makedirs_p()\n # create file handler and log formatter and set them up\n formatter = lg.Formatter(\n \"%(asctime)s [%(process)d] %(levelname)s - %(name)s - %(\" \"message)s\"\n )\n if settings.log_file:\n handler = lg.FileHandler(log_filename, encoding=\"utf-8\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n if settings.log_console:\n handler = lg.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def single_logger_factory(level_name: str): # pylint: disable=no-self-argument\n\n def single_logger(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n # pylint: disable=no-member, protected-access\n\n if (\n self.authorized\n and logging._nameToLevel[level_name.upper()] >= self.min_level\n ):\n try:\n logger = getattr(\n getattr(self, f\"{level_name.lower()}_logger\"),\n level_name.lower(),\n )\n\n if not logger:\n self.init_loggers()\n except AttributeError:\n self.init_loggers()\n\n logger = getattr(\n getattr(self, f\"{level_name.lower()}_logger\"),\n level_name.lower(),\n )\n\n return logger(*args, **kwargs, extra=self.get_origin())\n\n return func\n\n return wrapper\n\n return single_logger", "def get_logger(logger_name):\n logger_path = os.path.join(PATH, 'config', \"logging.conf\")\n if os.path.exists(logger_path):\n logging.config.fileConfig(logger_path)\n logging.info(\"%s started\" % logger_name)\n return logging.getLogger(logger_name)", "def debug_logger(name='test'):\n return LogAdapter(DebugLogger(), name)", "def get_logger(name, conf):\n\n try:\n # try absolute path\n lfile = conf['log_file']\n except KeyError:\n print('config warning: log file is not configured, logging to default.log')\n lfile = 'default.log'\n except:\n print('config error: log file directory does not exist')\n lfile = 'default.log'\n\n try:\n timezone = conf['time_zone']\n except KeyError:\n timezone = 'America/Chicago'\n\n tz = pytz.timezone(timezone)\n\n class Formatter(logging.Formatter):\n def converter(self, timestamp):\n return datetime.datetime.fromtimestamp(timestamp, tz)\n\n def formatTime(self, record, datefmt=None):\n dt = self.converter(record.created)\n if datefmt:\n s = dt.strftime(datefmt)\n else:\n t = dt.strftime(self.default_time_format)\n s = self.default_msec_format % (t, record.msecs)\n return s\n\n logger = logging.getLogger(name)\n handler = logging.FileHandler(lfile)\n handler.setFormatter(Formatter(\"%(asctime)s: %(levelname)s: %(name)s: %(message)s\", \"%Y-%m-%dT%H:%M:%S%z\"))\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n return logger", "def _get_logger():\n return logging.Logger(__name__)", "def __getattr__(self, name):\n return getattr(self.logger, name)", "def create_logger(name, log_file=None):\n l = logging.getLogger(name)\n formatter = logging.Formatter('[%(asctime)s] %(message)s')\n l.setLevel(logging.DEBUG)\n\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n sh.setLevel(logging.INFO)\n l.addHandler(sh)\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n l.addHandler(fh)\n\n return l", "def get_logger():\n return logging.getLogger(__name__)", "def get_logger_with_handler(self, logger_name):\n try:\n self.get_handler(logger_name)\n except KeyError:\n logger_name = logger_name.split(\".\")[0]\n self.get_handler(logger_name)\n return logger_name", "def get_logger(request, name=\"ot_api\"):\n\n# package_dir = os.path.dirname(module_path)\n# config_filepath = os.path.join(package_dir, _LOGGING_CONFIG_FILE)\n# if os.path.exists(config_filepath):\n# try:\n# logging.config.fileConfig(config_filepath)\n# logger_set = True\n# except:\n# logger_set = False\n logger = logging.getLogger(name)\n if len(logger.handlers) == 0:\n if request is None:\n level = _get_logging_level(os.environ.get(_LOGGING_LEVEL_ENVAR))\n logging_formatter = _get_logging_formatter(os.environ.get(_LOGGING_FORMAT_ENVAR))\n logging_filepath = os.environ.get(_LOGGING_FILE_PATH_ENVAR)\n else:\n level_str, logging_format_name, logging_filepath = read_logging_config(request)\n logging_formatter = _get_logging_formatter(logging_format_name)\n level = _get_logging_level(level_str)\n\n logger.setLevel(level)\n if logging_filepath is not None:\n log_dir = os.path.split(logging_filepath)[0]\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir)\n ch = logging.FileHandler(logging_filepath)\n else:\n ch = logging.StreamHandler()\n ch.setLevel(level)\n ch.setFormatter(logging_formatter)\n logger.addHandler(ch)\n return logger", "def get_logger(log_dir, name):\n class StreamHandlerWithTQDM(logging.Handler):\n \"\"\"Let `logging` print without breaking `tqdm` progress bars.\n See Also:\n > https://stackoverflow.com/questions/38543506\n \"\"\"\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n # Create logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # Log everything (i.e., DEBUG level and above) to a file\n log_path = os.path.join(log_dir, f'{name}.txt')\n file_handler = logging.FileHandler(log_path)\n file_handler.setLevel(logging.DEBUG)\n\n # Log everything except DEBUG level (i.e., INFO level and above) to console\n console_handler = StreamHandlerWithTQDM()\n console_handler.setLevel(logging.INFO)\n\n # Create format for the logs\n file_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n file_handler.setFormatter(file_formatter)\n console_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n console_handler.setFormatter(console_formatter)\n\n # add the handlers to the logger\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n return logger", "def setup_custom_logger(name):\n formatter = logging.Formatter(fmt=FORMAT, datefmt=DATEFMT)\n\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(LEVEL)\n logger.addHandler(handler)\n\n return logger", "def _logger():\n return logging.getLogger(module_name)", "def logger(self):\n my_id = id(self)\n name = self.__class__.__name__\n logger_name = '{name}.{my_id}'.format(my_id=my_id, name=name)\n\n logger = self.sdk.loggers.get(logger_name)\n if logger is None:\n self.sdk.loggers[logger_name] = getLogger(\n '{name}'.format(name=logger_name))\n\n return self.sdk.loggers[logger_name]", "def _logger(self) -> logging.Logger:\n return logging.getLogger(\n type(self).__name__\n )", "def init_logger(name, path=None):\n import logging.handlers\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.propagate = 0\n _nf = ['[%(asctime)s]',\n '[%(name)s]',\n '[%(filename)20s:%(funcName)15s:%(lineno)5d]',\n '[%(levelname)s]',\n ' %(message)s']\n _cf = ['$GREEN[%(asctime)s]$RESET',\n '[%(name)s]',\n '$BLUE[%(filename)20s:%(funcName)15s:%(lineno)5d]$RESET',\n '[%(levelname)s]',\n ' $CYAN%(message)s$RESET']\n nformatter = logging.Formatter('-'.join(_nf))\n cformatter = ColoredFormatter('-'.join(_cf))\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(cformatter)\n\n if path:\n path += '/' + name + '.log'\n else:\n path = get_path('log') + '/' + name + '.log'\n rf = logging.handlers.RotatingFileHandler(path, maxBytes=5 * 1024 * 1024, backupCount=5)\n rf.setLevel(logging.DEBUG)\n rf.setFormatter(nformatter)\n\n logger.addHandler(ch)\n logger.addHandler(rf)\n return logger", "def getLogger(self, *args, **kwargs):\r\n return loggers.getLogger(*args, **kwargs)", "def get_logger(logger_name, log_folder=None, timestamp=\"%Y%m%d\", level=logging.DEBUG):\n from config import LOG_FILE_ROOT\n # if doesn't specify a log folder, use the default one in config\n if not log_folder:\n log_folder = LOG_FILE_ROOT\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n if timestamp:\n logfile = os.path.join(log_folder, '%s_%s.log' % (logger_name, time.strftime(timestamp, datetime.datetime.now().timetuple())))\n else:\n logfile = os.path.join(log_folder, '%s.log' % logger_name)\n fmt = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s() ] - %(name)s - %(levelname)s -- %(message)s', datefmt=\"%H:%M:%S\")\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n fh = logging.FileHandler(logfile)\n fh.setFormatter(fmt)\n fh.name = \"logfile\"\n logger.addHandler(fh)\n return (logger, logfile)", "def getLogger():\n return logging.getLogger(__name__)", "def getLogger(output_dir: Optional[Path] = None, *, root: bool = False, name: str = \"\") -> Logger:\n if root:\n global _root\n _root = name if name else Path(previousframe(2).filename).stem\n logger = gL(__package__)\n for hndl in list(logger.handlers):\n logger.removeHandler(hndl)\n logger.setLevel(DEBUG)\n logger.addHandler(_MakeHandler(StreamHandler, min_level=INFO, max_level=INFO, stream=sys.stdout))\n logger.addHandler(_MakeHandler(StreamHandler, min_level=WARNING, stream=sys.stderr))\n if output_dir:\n log_dir = (output_dir / \"log\").mkdir_hidden()\n log_file = log_dir / f\"{output_dir.resolve().name}.log\"\n logger.addHandler(_MakeHandler(FileHandler, filename=log_file, min_level=DEBUG, encoding=\"utf-8\"))\n return logger", "def logger(self) -> logging.Logger:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(name)-15s - [%(levelname)-10s] %(message)s\"\n )\n return logging.getLogger(os.path.basename(__file__))", "def get_logger():\r\n global logger\r\n \r\n if logger:\r\n return logger\r\n else:\r\n return create_logger()", "def get_logger(self, logname, logfile, loglevel, propagate):\n # TODO: simplify\n logger = logging.getLogger(logname)\n logger_handler = WatchedFileHandler(logfile, mode='w')\n # removed \\t%(name)-6s\n log_fmt = '%(asctime)s\\t%(levelname)-8s\\t%(message)s'\n logger_handler.setFormatter(\n logging.Formatter(log_fmt, '%b %d %H:%M:%S'))\n logger.addHandler(logger_handler)\n logger.propagate = propagate\n logger.setLevel(loglevel)\n return logger", "def get_logger(args):\n logger_kind = 'tensorboard' if 'logger' not in args.__dict__ else args.logger\n if logger_kind == 'tensorboard':\n logger = pl.loggers.tensorboard.TensorBoardLogger(\n save_dir=os.path.join(os.getcwd(), 'tmp'),\n name=args.dataset,\n )\n\n elif logger_kind == 'wandb':\n logger = pl.loggers.WandbLogger(\n save_dir=os.path.join(os.getcwd(), 'tmp'),\n name=args.backbone,\n )\n\n else:\n raise Exception(f'Error. Logger \"{lokker_kind}\" is not supported.')\n return logger", "def get_logger(filename):\n daiquiri.setup(level=logging.INFO)\n return daiquiri.getLogger(filename)", "def _get_named_client_logger(\n name: str,\n host: str = \"localhost\",\n port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT,\n) -> logging.Logger:\n # Setup the logger configuration\n # We add client not only to identify that this is the client\n # communication part of the logger, but to make sure we have\n # a new singleton with the desired socket handlers\n local_logger = _create_logger(\"Client-\" + name)\n local_logger.propagate = False\n local_logger.setLevel(logging.DEBUG)\n\n try:\n # Ignore mypy logging.handlers.SocketHandler has no attribute port\n # This is not the case clearly, yet MyPy assumes this is not the case\n # Even when using direct casting or getattr\n ports = [\n getattr(handler, \"port\", None) for handler in local_logger.handlers\n ] # type: ignore[attr-defined]\n except AttributeError:\n # We do not want to log twice but adding multiple times the same\n # handler. So we check to what ports we communicate to\n # We can prevent errors with streamers not having a port with this try\n # block -- but it is a scenario that is unlikely to happen\n ports = []\n\n if port not in ports:\n socketHandler = logging.handlers.SocketHandler(host, port)\n local_logger.addHandler(socketHandler)\n\n return local_logger" ]
[ "0.86943036", "0.86648834", "0.86127335", "0.83303255", "0.8310802", "0.8274421", "0.82201195", "0.82107776", "0.81484", "0.81371915", "0.81099725", "0.80877364", "0.80632704", "0.7964971", "0.79618955", "0.7960622", "0.79571754", "0.7941737", "0.79340976", "0.7931048", "0.79277116", "0.7899922", "0.78584087", "0.7826838", "0.7759638", "0.77386034", "0.7717188", "0.77067024", "0.76871526", "0.76837534", "0.7673804", "0.766603", "0.76327467", "0.7622185", "0.7596606", "0.75939816", "0.7584505", "0.7577833", "0.7574117", "0.7568039", "0.7556089", "0.7532467", "0.7510924", "0.7500487", "0.7390525", "0.73782974", "0.7352824", "0.7344567", "0.73298043", "0.7303782", "0.7292223", "0.7289781", "0.72812086", "0.72508556", "0.72272587", "0.7222415", "0.7192162", "0.71591336", "0.71210515", "0.7113405", "0.7107292", "0.7107159", "0.7093892", "0.70850384", "0.7049228", "0.70480675", "0.7030781", "0.7028487", "0.7026912", "0.7011338", "0.7007497", "0.70031345", "0.6994034", "0.6977918", "0.69661903", "0.6949754", "0.6947269", "0.69339615", "0.69201434", "0.6919937", "0.69134486", "0.6892223", "0.6879002", "0.6859954", "0.6837114", "0.68197334", "0.6788548", "0.67865473", "0.6770256", "0.67538255", "0.67272866", "0.6686455", "0.66691136", "0.6651417", "0.6648674", "0.66439503", "0.6641903", "0.66341144", "0.66325414", "0.6618094" ]
0.79965186
13
Loads requirements file and outputs an array of dependencies
def parse_requirements(filename): lineiter = (line.strip() for line in open(filename)) return [line for line in lineiter if line and not line.startswith('#')]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_requirements():\r\n reqs_path = os.path.join('.', 'requirements.txt')\r\n with open(reqs_path, 'r') as f:\r\n requirements = [line.rstrip() for line in f]\r\n return requirements", "def read_requirements():\n reqs_path = path.join('.', 'requirements.txt')\n with open(reqs_path, 'r') as f:\n requirements = [line.rstrip() for line in f]\n return requirements", "def install_deps():\n with open('requirements.txt', 'r') as f:\n packages = f.readlines()\n new_pkgs = []\n for resource in packages:\n new_pkgs.append(resource.strip())\n return new_pkgs", "def read_requirements():\n with open('requirements.txt') as f:\n requirements = f.readlines()\n return [element.strip() for element in requirements]", "def _get_dependencies(requirements_file: Path) -> List[str]:\n lines = requirements_file.read_text().strip().split('\\n')\n return [line for line in lines if not line.startswith('#')]", "def read_deps():\n with open(\"./dependencies.txt\", 'r') as deps:\n return [d for d in re.split(r'\\s', ''.join(deps)) if d]", "def find_requirements():\n with open(\"requirements.txt\", 'r') as f:\n return f.read().splitlines()", "def learn_requirements():\n req_file = \"requirements.txt\"\n reqs = []\n\n import os\n\n path = os.path.dirname(__file__)\n req_file = os.path.join(path, \"..\", req_file)\n if not os.path.exists(req_file):\n # not needed with installed package\n return reqs\n\n excludes = \"versioneer coveralls coverage\".split()\n with open(req_file, \"r\") as fp:\n buf = fp.read().strip().splitlines()\n for req in buf:\n req = req.strip()\n if (\n req != \"\"\n and not req.startswith(\"#\")\n and req not in excludes\n ):\n reqs.append(req)\n return reqs", "def load_requirements(name: str) -> List[str]:\r\n dependencies: List[str] = []\r\n dep_matcher = re.compile('^[^=<>\\\\s]+\\\\s*[=<>]+\\\\s*[^=<>\\\\s]+$')\r\n with open(name, 'r') as fd:\r\n for line in fd.read().split(\"\\n\"):\r\n line.strip()\r\n if dep_matcher.match(line):\r\n dependencies.append(line)\r\n return dependencies", "def get_requirements():\n requirements_list = []\n\n if not os.path.isfile(REQUIREMENTS_FILE):\n # Check if requirements file did not exist.\n return requirements_list\n\n with open(REQUIREMENTS_FILE) as reqs:\n for install in reqs:\n requirements_list.append(install.strip())\n\n return requirements_list", "def read_requirements():\n reqs_path = os.path.join(__location__, 'requirements.txt')\n with open(reqs_path, encoding='utf8') as f:\n reqs = [line.strip() for line in f if not line.strip().startswith('#')]\n\n names = []\n links = []\n for req in reqs:\n if '://' in req:\n links.append(req)\n else:\n names.append(req)\n return {'install_requires': names, 'dependency_links': links}", "def get_requirements():\n\n with open('requirements.txt', 'r') as f:\n requirements = f.readlines()\n requires = []\n for require in requirements:\n if require.startswith(\"#\") or require.startswith(\"\\n\"):\n continue\n else:\n requires.append(require.replace(\"\\n\", \"\"))\n return requires", "def get_dependencies(path):\n dependencies_path = os.path.join(path, \"dependencies.txt\")\n dependencies_pattern = r\"([a-z]+(?:[_-][a-z]+)*)(.=)+(([1-9][0-9]*!)?(0|[1-9][0-9]*)(\\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))?(\\.post(0|[1-9][0-9]*))?(\\.dev(0|[1-9][0-9]*))?)\"\n \"\"\"\n Example:\n input: iobeam==0.7.15\n mrb-hw-info==0.0.25\n mrbeam-ledstrips==0.2.2-alpha.2\n output: [[iobeam][==][0.7.15]]\n [[mrb-hw-info][==][0.0.25]]\n [[mrbeam-ledstrips][==][0.2.2-alpha.2]]\n \"\"\"\n try:\n with open(dependencies_path, \"r\") as f:\n dependencies_content = f.read()\n dependencies = re.findall(dependencies_pattern, dependencies_content)\n dependencies = [{\"name\": dep[0], \"version\": dep[2]} for dep in dependencies]\n except IOError:\n raise RuntimeError(\"Could not load dependencies\")\n return dependencies", "def get_requirements():\n with open('requirements.txt') as fd:\n lines = fd.read().splitlines()\n requires, links = [], []\n for line in lines:\n if line.startswith('git+'):\n links.append(line)\n elif line:\n requires.append(line)\n return requires, links", "def parse_requirements_txt():\n root = os.path.dirname(os.path.abspath(__file__))\n\n requirements = []\n dependencies = []\n\n with open(os.path.join(root, 'requirements.txt'), 'r') as f:\n for line in f.readlines():\n line = line.rstrip()\n if not line or line.startswith('#'):\n continue\n\n egg = re.match('git\\+.*#egg=(.*)$', line)\n if egg is not None:\n egg = egg.groups()[0]\n requirements.append(egg)\n dependencies.append(line)\n else:\n requirements.append(line)\n\n return requirements, dependencies", "def get_requirements():\n raw_requirements = read(\"requirements.txt\")\n requirements = []\n dependencies = []\n\n for req in raw_requirements.splitlines():\n req = req.strip()\n if not req:\n continue\n\n if req.startswith(\"#\"):\n continue\n\n if \"+\" in req:\n dependencies.append(req)\n else:\n requirements.append(req)\n\n return requirements, dependencies", "def load_requirements(fn):\n with open(fn, 'r') as f:\n return [x.rstrip() for x in list(f) if x and not x.startswith('#')]", "def load_requirements(*requirements_paths):\n requirements = set()\n for path in requirements_paths:\n requirements.update(\n line.strip() for line in open(path).readlines()\n if is_requirement(line)\n )\n return list(requirements)", "def load_requirements(*requirements_paths):\n requirements = set()\n for path in requirements_paths:\n requirements.update(\n line.strip() for line in open(path).readlines()\n if is_requirement(line)\n )\n return list(requirements)", "def findRequirements():\n return [\n line.strip()\n for line in open(\"requirements.txt\").readlines()\n if not line.startswith(\"#\")\n ]", "def read_dependencies(filename):\n\n dependencies = []\n with open(filename) as f:\n for line in f.readlines():\n if not line or line.startswith('#'):\n continue\n dependencies.append(line.strip())\n return dependencies", "def install_requires():\n return reqs(\"requirements.txt\")", "def install_requires():\n return reqs('requirements.txt')", "def get_install_requires():\n requirements = []\n for line in open('requirements.txt').readlines():\n # skip to next iteration if comment or empty line\n if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):\n continue\n # add line to requirements\n requirements.append(line)\n return requirements", "def read_requirements(file_name):\n reqs = read_file(file_name).splitlines()\n if not reqs:\n raise RuntimeError(\n \"Unable to read requirements from the %s file\"\n \"That indicates this copy of the source code is incomplete.\"\n % file_name\n )\n return reqs", "def get_requirements():\n name = 'pypeit/requirements.txt'\n\n requirements_file = os.path.join(os.path.dirname(__file__), name)\n install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)\n if not line.strip().startswith('#') and line.strip() != '']\n return install_requires", "def main() -> None:\n verify_pip_is_installed()\n print('Regenerating \"requirements.txt\" file...')\n install_python_dev_dependencies.compile_pip_requirements(\n 'requirements.in', 'requirements.txt')\n # Adds a note to the beginning of the 'requirements.txt' file to make sure\n # developers understand that they should not append or change this\n # autogenerated file.\n with utils.open_file(\n common.COMPILED_REQUIREMENTS_FILE_PATH, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(\n '# Developers: Please do not modify this auto-generated file. If\\n'\n '# you want to add, remove, upgrade, or downgrade libraries,\\n'\n '# please change the `requirements.in` file, and then follow\\n'\n '# the instructions there to regenerate this file.\\n' + content)\n\n mismatches = get_mismatches()\n if mismatches:\n _rectify_third_party_directory(mismatches)\n validate_metadata_directories()\n else:\n print(\n 'All third-party Python libraries are already installed correctly.')", "def _read_requirements():\n LOG.info(\"Reading rally requirements...\")\n for file_name in RALLY_REQUIREMENTS_FILES:\n LOG.debug(\"Try to read '%s'.\", file_name)\n with open(file_name) as f:\n data = f.read()\n LOG.info(\"Parsing requirements from %s.\" % file_name)\n yield file_name, parse_data(data)", "def get_required_packages(file_contents):\n # Make sure the only ``install_requires`` happens in the\n # call to setup()\n if file_contents.count(INST_REQS_KWARG) != 1:\n raise ValueError('Expected only one use of keyword',\n INST_REQS_KWARG, file_contents)\n # Make sure the only usage of ``install_requires`` is to set\n # install_requires=REQUIREMENTS.\n keyword_stmt = INST_REQS_KWARG + '=' + REQ_VAR\n if file_contents.count(keyword_stmt) != 1:\n raise ValueError('Expected keyword to be set with variable',\n INST_REQS_KWARG, REQ_VAR, file_contents)\n # Split file on ``REQUIREMENTS`` variable while asserting that\n # it only appear twice.\n _, reqs_section, _ = file_contents.split(REQ_VAR)\n # Find ``REQUIREMENTS`` list variable defined in ``reqs_section``.\n reqs_begin = reqs_section.index('[')\n reqs_end = reqs_section.index(']') + 1\n\n # Convert the text to an actual list, but make sure no\n # locals or globals can be used.\n reqs_list_text = reqs_section[reqs_begin:reqs_end]\n # We use literal_eval() because it limits to evaluating\n # strings that only consist of a few Python literals: strings,\n # numbers, tuples, lists, dicts, booleans, and None.\n requirements = ast.literal_eval(reqs_list_text)\n\n # Take the list of requirements and strip off the package name\n # from each requirement.\n result = []\n for required in requirements:\n parts = required.split()\n result.append(parts[0])\n return result", "def install_deps():\n default = open('requirements.txt', 'r').readlines()\n new_pkgs = []\n links = []\n for resource in default:\n if 'git+https' in resource:\n pkg = resource.split('#')[-1]\n links.append(resource.strip())\n new_pkgs.append(pkg.replace('egg=', '').rstrip())\n else:\n new_pkgs.append(resource.strip())\n return new_pkgs, links", "def get_requirements(*args):\n requirements = set()\n with open(get_absolute_path(*args)) as handle:\n for line in handle:\n # Strip comments.\n line = re.sub(r'^#.*|\\s#.*', '', line)\n # Ignore empty lines\n if line and not line.isspace():\n requirements.add(re.sub(r'\\s+', '', line))\n return sorted(requirements)", "def read_requirements(filepath):\n with open(filepath, 'r') as fd:\n return fd.read().split('\\n')", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def read_requirements(path=\"requirements.txt\"):\n full_path = os.path.join(LOCAL_DIR, path)\n\n def yield_line(path):\n with open(path, \"r\") as fid:\n for line in fid.readlines():\n yield line\n\n return [\n requirement.strip()\n for requirement in yield_line(full_path)\n if not requirement.startswith(\"#\")\n ]", "def get_requirements(req):\n\n install_requires = []\n with open(req) as f:\n for line in f:\n if not line.startswith(\"#\"):\n install_requires.append(line.strip())\n return install_requires", "def rl_file_deps(file_deps, launch_file, verbose=False):\n parse_launch(launch_file, file_deps, verbose)", "def tidy_requirements(requirement_file):\n outdata = []\n with open(requirement_file) as dependencies:\n for line in dependencies:\n line = line.strip()\n if line and not line.startswith('#') and line not in outdata:\n outdata.append(line)\n return outdata", "def load_requirements(pkg_dir, pyver):\n pyver = pyver.replace('.', '')\n reqs_dir = os.path.join(pkg_dir, 'requirements')\n reqs_files = [\n 'main_py{0}.pip'.format(pyver),\n 'tests_py{0}.pip'.format(pyver),\n 'docs.pip',\n ]\n ret = []\n for rfile in [os.path.join(reqs_dir, item) for item in reqs_files]:\n with open(os.path.join(reqs_dir, rfile), 'r') as fobj:\n lines = [\n item.strip()\n for item in fobj.readlines()\n if item.strip()\n ]\n ret.extend(lines)\n return ret", "def parse_requirements(requirements_file='requirements.txt'):\n lines = []\n with open(requirements_file) as reqs:\n for _ in reqs:\n line = _.split('#')[0]\n if line.strip():\n lines.append(line)\n return lines", "def parse_reqs(req_path='./requirements.txt'):\n req = []\n with codecs.open(req_path, 'r') as handle:\n # remove comments and empty lines\n lines = (line.strip() for line in handle\n if line.strip() and not line.startswith('#'))\n\n for line in lines:\n # check for nested requirements files\n if line.startswith('-r'):\n # recursively call this function\n req += parse_reqs(req_path=line[3:])\n\n else:\n # add the line as a new requirement\n req.append(line)\n\n return req", "def get_dependencies_content():\n import trustedanalytics\n dependencies = []\n for filename in trustedanalytics.udf_dependencies:\n name, content = _get_file_content_as_str(filename)\n dependencies.append({'file_name': name, 'file_content': content})\n return dependencies", "def install_requires():\n skip_install_requires = environ.get('SKIP_INSTALL_REQUIRES')\n if not skip_install_requires:\n with open('requirements.pip') as r:\n return r.readlines()\n return []", "def get_dependencies(self):\n dependencies = self._dependencies\n if self.ansible is not None:\n dependencies.append(\"ansible=={}.*\".format(self.ansible))\n else:\n dependencies.append(\"ansible\")\n # Drivers can have their own dependencies\n if self.scenario.driver is not None \\\n and self.scenario.driver in DRIVER_DEPENDENCIES.keys():\n dependencies.extend(DRIVER_DEPENDENCIES[self.scenario.driver])\n # Scenarios can specify a requirements.txt\n if self.scenario.requirements is not None:\n dependencies.append(\"-r\" + self.scenario.requirements)\n return dependencies", "def read_requirements(path: str) -> List[str]:\n with open(path, 'r', encoding='utf-8') as f:\n all_reqs = f.read().split('\\n')\n\n # find embedded requirements inside (e.i., `-r <other requirements>`)\n # \"pip install -r <file>\" handles nested requirements, so do that too here\n root = os.path.dirname(path)\n sub_reqs = []\n\n filtered_reqs = []\n for x in all_reqs:\n m = re.findall(r'^-r\\s+(\\S+)', x)\n if len(m) == 1:\n sub_reqs += read_requirements(os.path.join(root, m[0]))\n elif len(x) > 0:\n filtered_reqs.append(x)\n return filtered_reqs + sub_reqs", "def read_requirements(*parts):\n requirements = []\n for line in read(*parts).splitlines():\n line_2 = re.sub(\n \"(\\s*)?#(?!egg=).*$\", # the space immediately before the hash mark, the hash mark, and anything that follows it, but not \"#egg=\" fragments\n \"\", # replace with a blank string\n line,\n )\n line_3 = re.sub(\n \"(\\s*)?-r.*$\", # we also can't reference other requirement files\n \"\", # replace with a blank string\n line_2,\n )\n if line_3: # i.e. we have a non-zero-length string\n requirements.append(line_3)\n return requirements", "def requires():\n install_reqs = parse_requirements(join(CWD, 'requirements', 'base.txt'),\n session=False)\n return [str(ir.req) for ir in install_reqs]", "def required_packages():\n with open(r\"required-packages.yml\") as file:\n inputs = yaml.load(file, Loader=yaml.FullLoader)\n return inputs[\"required_packages\"]", "def parse_requirements(filename, *args, **kwargs):\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]", "def parse_requirements(fn):\n with open(fn) as f:\n rv = []\n for line in f:\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n rv.append(line)\n return rv", "def parse_requirements(*filenames):\n requirements = []\n for f in filenames:\n for line in open(f, 'r').read().split('\\n'):\n # Comment lines. Skip.\n if re.match(r'(\\s*#)|(\\s*$)', line):\n continue\n # Editable matches. Put the egg name into our reqs list.\n if re.match(r'\\s*-e\\s+', line):\n pkg = re.sub(r'\\s*-e\\s+.*#egg=(.*)$', r'\\1', line)\n requirements.append(\"%s\" % pkg)\n # File-based installs not supported/needed. Skip.\n elif re.match(r'\\s*-f\\s+', line):\n pass\n else:\n requirements.append(line)\n return requirements", "def parse_requirements_from_pipfile():\n lineiter = (line.strip() for line in open('Pipfile'))\n requirements_pipfile_style = [line for line in lineiter]\n start_index = requirements_pipfile_style.index('[packages]') + 1\n end_index = requirements_pipfile_style.index('[requires]') - 1\n requirements = list(map(lambda x: x.replace(' = \"', '').replace('\"', ''),\n requirements_pipfile_style[start_index:end_index]))\n return requirements", "def freeze():\n dependencies = sh('pip freeze', capture=True).split(os.linesep)\n\n with open('requirements.txt', 'w') as file:\n for dep in dependencies:\n if not dep.startswith('bones-testing'):\n file.write(dep+'\\n')", "def _get_requirements_file_contents() -> Dict[str, str]:\n requirements_contents: Dict[str, str] = collections.defaultdict()\n with utils.open_file(\n common.COMPILED_REQUIREMENTS_FILE_PATH, 'r') as f:\n trimmed_lines = (line.strip() for line in f.readlines())\n for line_num, line in enumerate(trimmed_lines, start=1):\n if not line or line.startswith('#') or line.startswith('--hash='):\n continue\n\n if line.startswith('git'):\n match = GIT_DIRECT_URL_REQUIREMENT_PATTERN.match(line)\n if not match:\n raise Exception(\n '%r on line %d of %s does not match '\n 'GIT_DIRECT_URL_REQUIREMENT_PATTERN=%r' % (\n line, line_num,\n common.COMPILED_REQUIREMENTS_FILE_PATH,\n GIT_DIRECT_URL_REQUIREMENT_PATTERN.pattern))\n library_name, version_string = match.group(2, 1)\n\n else:\n library_name, version_string = line.split(' ')[0].split('==')\n\n # Libraries with different case are considered equivalent libraries:\n # e.g 'Flask' is the same library as 'flask'. Therefore, we\n # normalize all library names in order to compare libraries without\n # ambiguities.\n normalized_library_name = (\n normalize_python_library_name(library_name))\n requirements_contents[normalized_library_name] = version_string\n return requirements_contents", "def check_requirements():\n if not os.path.exists(REQUIREMENTS):\n sys.exit(\n ansi.error() + ' %s is missing. Please check it in.' % ansi.underline(REQUIREMENTS)\n )\n\n with open(REQUIREMENTS, 'r', encoding='utf-8') as f:\n dependencies = f.readlines()\n\n vcs = [d for d in dependencies if re.match(r'^(-e )?(git|svn|hg|bzr).*', d)]\n\n dependencies = list(set(dependencies) - set(vcs))\n\n missing = []\n try:\n pkg_resources.require(dependencies)\n except (\n pkg_resources.ContextualVersionConflict,\n pkg_resources.DistributionNotFound,\n pkg_resources.VersionConflict\n ) as error:\n missing.append(str(error))\n except pkg_resources.RequirementParseError:\n pass\n\n if missing:\n missing = ' missing requirement:\\n ' + os.linesep.join(missing)\n if '--env-checked' in sys.argv:\n sys.exit(ansi.error() + missing + '\\nRequirement installation failure, please check for errors in:\\n $ lore install\\n')\n else:\n print(ansi.warning() + missing)\n import lore.__main__\n lore.__main__.install_requirements(None)\n reboot('--env-checked')", "def dependencies(project_name):\n deps = []\n logging.info('Locating {}'.format(project_name))\n located = distlib.locators.locate(project_name, prereleases=True)\n if located is None:\n logging.warn('{} not found'.format(project_name))\n return []\n for dep in located.run_requires:\n # Drop any version details from the dependency name.\n deps.append(just_name(dep))\n return deps", "def Load():\n global items, libraries, _line_number, _groups_to_be_defined\n deps_file = open(\"dependencies.txt\")\n try:\n line = None\n current_type = None\n while True:\n while not line: line = _RemoveComment(deps_file.next())\n\n if line.startswith(\"library: \"):\n current_type = \"library\"\n name = line[9:].lstrip()\n _CheckLibraryName(name)\n if name in items:\n sys.exit(\"Error:%d: library definition using duplicate name %s\" % (_line_number, name))\n libraries.add(name)\n item = items[name] = {\"type\": \"library\", \"name\": name}\n line = _ReadFiles(deps_file, item, name)\n elif line.startswith(\"group: \"):\n current_type = \"group\"\n name = line[7:].lstrip()\n _CheckGroupName(name)\n if name not in items:\n sys.exit(\"Error:%d: group %s defined before mentioned as a dependency\" %\n (_line_number, name))\n if name not in _groups_to_be_defined:\n sys.exit(\"Error:%d: group definition using duplicate name %s\" % (_line_number, name))\n _groups_to_be_defined.remove(name)\n item = items[name]\n item[\"name\"] = name\n library_name = item.get(\"library\")\n if library_name:\n line = _ReadFiles(deps_file, item, library_name)\n else:\n line = _ReadSystemSymbols(deps_file, item)\n elif line == \" deps\":\n if current_type == \"library\":\n line = _ReadDeps(deps_file, items[name], name)\n elif current_type == \"group\":\n item = items[name]\n line = _ReadDeps(deps_file, item, item.get(\"library\"))\n elif current_type == \"system_symbols\":\n item = items[current_type]\n line = _ReadDeps(deps_file, item, None)\n else:\n sys.exit(\"Error:%d: deps before any library or group\" % _line_number)\n elif line == \"system_symbols:\":\n current_type = \"system_symbols\"\n if current_type in items:\n sys.exit(\"Error:%d: duplicate entry for system_symbols\" % _line_number)\n item = items[current_type] = {\"type\": current_type, \"name\": current_type}\n line = _ReadSystemSymbols(deps_file, item)\n else:\n sys.exit(\"Syntax error:%d: %s\" % (_line_number, line))\n except StopIteration:\n pass\n if _groups_to_be_defined:\n sys.exit(\"Error: some groups mentioned in dependencies are undefined: %s\" % _groups_to_be_defined)", "def _get_requirements_and_latest(\n filename,\n force=False,\n minor=[],\n patch=[],\n pre=[],\n index_urls=[],\n verify=True):\n session = PipSession()\n if verify:\n session.verify = verify\n finder = PackageFinder(\n session=session,\n find_links=[],\n index_urls=index_urls or [PyPI.simple_url],\n )\n\n _, content = get_file_content(filename, session=session)\n for line_number, line, orig_line in yield_lines(content):\n line = req_file.COMMENT_RE.sub('', line)\n line = line.strip()\n req = parse_requirement_line(line, filename, line_number, session, finder)\n if req is None or req.name is None or req_file.SCHEME_RE.match(req.name):\n yield (orig_line, None, None, None)\n continue\n spec_ver = current_version(req)\n if spec_ver or force:\n latest_ver = latest_version(req, spec_ver, session, finder,\n minor=minor, patch=patch, pre=pre)\n yield (orig_line, req, spec_ver, latest_ver)", "def get_requires(path=REQUIRE_PATH):\n for line in read(path).splitlines():\n line = line.strip()\n if line and not line.startswith('#'):\n yield line", "def tests_require():\n return reqs(\"test-requirements.txt\")", "def parse_requirements_file(filename):\n with open(filename) as input_file:\n return input_file.read().splitlines()", "def get_dependencies(self):\n return [\"make\", \"g++\", \"gcc\", \"cmake-2.8.12.1\", \"boost_1_56_0\"]", "def dependencies(self) -> List[Bundle]:\n return []", "def parse_deps():\n Files = []\n Dependencies = []\n TimeBins = ['recover_parameters', 'startup', 'wragh', 'paramcheck',\n 'preregridinitial', 'postregridinitial', 'basegrid', \n 'initial', 'postinitial', 'postrestrictinitial', \n 'postpostinitial', 'recover_variables', \n 'post_recover_variables', 'cpinitial', 'checkpoint', \n 'preregrid', 'postregrid', 'prestep', 'evol', 'postrestrict', \n 'poststep', 'analysis', 'terminate', 'shutdown']\n\n implement_re = re.compile('implements:\\s*(\\w+)', re.I)\n inherit_re = re.compile('inherits:\\s*(.+)', re.I)\n provides_function_re = re.compile('PROVIDES\\s+FUNCTION\\s+(\\w+)', re.I)\n uses_function_re = re.compile('USES\\s+FUNCTION\\s+(\\w+)', re.I)\n requires_function_re = re.compile('REQUIRES\\s+FUNCTION\\s+(\\w+)', re.I)\n shares_re = re.compile('shares:\\s*(\\w+)', re.I)\n requires_thorn_re = re.compile('REQUIRES\\s+(?!FUNCTION\\s*)(\\w+)', re.I)\n schedules_function_re = re.compile('schedule\\s+(?:group\\s+)?(\\w+)\\s+(?:in|at)\\s+(\\w+)', re.I)\n\n # find all interface.ccl and param.ccl files in cwd\n Cactus_Path = os.path.expanduser('~/Cactus/')\n for dirpath, dirnames, filenames in os.walk(Cactus_Path + 'arrangements', followlinks=True):\n for file in filenames:\n if file == 'interface.ccl':\n Files.append(os.path.join(dirpath, file))\n\n for file in Files:\n # first parse interface.ccl\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines = fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then parse param.ccl\n file = re.sub('interface.ccl', 'param.ccl', file)\n\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines += fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then configuration.ccl\n file = re.sub('param.ccl', 'configuration.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # then schedule.ccl\n file = re.sub('configuration.ccl', 'schedule.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # get the thorn dir and its parent\n thornname = os.path.basename(os.path.dirname(file))\n parentdir = os.path.basename(os.path.dirname(os.path.dirname(file)))\n thornname = os.path.join(parentdir, thornname)\n file_dict = {'name' : thornname.lower()}\n for line in lines:\n line = line.strip()\n m = re.match(implement_re, line)\n if m:\n file_dict['implements'] = m.group(1).lower()\n\n m = re.match(inherit_re, line)\n if m:\n inheritance = re.split('\\W+', m.group(1).lower())\n file_dict['inherits'] = inheritance\n\n m = re.match(provides_function_re, line)\n if m:\n try:\n file_dict['provides_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['provides_function'] = [m.group(1).lower()]\n\n m = re.match(uses_function_re, line)\n if m:\n try:\n file_dict['uses_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['uses_function'] = [m.group(1).lower()]\n\n m = re.match(requires_function_re, line)\n if m:\n try:\n file_dict['requires_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['requires_function'] = [m.group(1).lower()]\n\n m = re.match(requires_thorn_re, line)\n if m:\n requires = re.split('\\W+', m.group(1).lower())\n # sometimes we have 'REQUIRES THORNS' instead of 'REQUIRES'\n if requires[0].lower() == 'thorns':\n del requires[0]\n file_dict['requires_thorn'] = requires\n\n m = re.match(shares_re, line)\n if m:\n try:\n file_dict['shares'].append(m.group(1).lower())\n except KeyError:\n file_dict['shares'] = [m.group(1).lower()]\n\n m = re.match(schedules_function_re, line)\n if m:\n bin, func = m.group(2).lower(), m.group(1).lower()\n if bin in TimeBins:\n bin = 'cctk_' + bin\n func_dict = {bin : func}\n try:\n file_dict['schedules_function'].append(func_dict)\n except KeyError:\n file_dict['schedules_function'] = [func_dict]\n\n\n Dependencies.append(file_dict)\n\n return Dependencies", "def from_file(file_name: str = \"requirements.txt\", comment_char: str = \"#\"):\n with open(file_name, \"r\") as file:\n lines = [ln.strip() for ln in file.readlines()]\n reqs = []\n for ln in lines:\n # filer all comments\n if comment_char in ln:\n ln = ln[: ln.index(comment_char)].strip()\n # skip directly installed dependencies\n if ln.startswith(\"http\"):\n continue\n if ln: # if requirement is not empty\n reqs.append(ln)\n return reqs", "def build_reqs():\n requirements_path = Path.cwd() / \"src\" / \"requirements.in\"\n if not requirements_path.is_file():\n secho(\"No requirements.in found. Copying contents from requirements.txt...\")\n contents = (Path.cwd() / \"src\" / \"requirements.txt\").read_text()\n requirements_path.write_text(contents)\n python_call(\"piptools\", [\"compile\", str(requirements_path)])\n secho(\n (\n \"Requirements built! Please update requirements.in \"\n \"if you'd like to make a change in your project's dependencies, \"\n \"and re-run build-reqs to generate the new requirements.txt.\"\n )\n )", "def parse_requirements(filename, *args): # pragma: no cover\n # type: (str, str) -> Tuple[InstallReqSet, pip.index.PackageFinder]\n pip_options, session = build_pip_session(*args)\n repository = PyPiRepository(pip_options, session)\n requirements = pip.req.parse_requirements(\n filename,\n finder=repository.finder,\n session=repository.session,\n options=pip_options)\n return set(requirements), repository.finder", "def get_valid_requirements(req_path):\n return [r for r in open(req_path, \"r\").readlines() if r[0] != \"#\"]", "def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")", "def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")", "def format_requirements():\n for filename, requirements in _read_requirements():\n _write_requirements(filename, requirements)", "def get_requirements():\n command = ['pip', 'list']\n result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)\n assert not result.stderr, \"stderr not empty\"\n return result.stdout", "def set_dependency_files(context):\n path_to_direct_file = os.path.abspath('data/gemini_scan_data/direct-dependencies.txt')\n path_to_transitive_file = os.path.abspath('data/gemini_scan_data/transitive-dependencies.txt')\n context.dependency_files = list()\n with open(path_to_direct_file, 'rb') as f:\n context.dependency_files.append((\n \"dependencyFile[]\",\n (\n 'direct-dependencies.txt',\n f.read(),\n 'text/plain'\n )\n ))\n with open(path_to_transitive_file, 'rb') as f:\n context.dependency_files.append((\n \"dependencyFile[]\",\n (\n 'transitive-dependencies.txt',\n f.read(),\n 'text/plain'\n )\n ))", "def check_requirements():\n debug(\"check_requirements\")\n needed = Requirements(Project).find_missing_requirements()\n if needed:\n info(\"Please add the following to your %s file:\\n\" % 'requirements.txt')\n info(\"\\n\".join(str(needed)))\n else:\n info(\"Your %s includes all known herringlib task requirements\" % 'requirements.txt')", "def parse_requirements(requirements):\n for req in pyrequirements.parse(requirements):\n yield req", "def requires(self):\n return []", "def required_packages(cls) -> List[Text]:\n return []", "def freeze():\n proc = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE)\n with open('requirements.txt', 'wb') as fout:\n fout.write(proc.stdout)", "def projects_from_requirements(requirements_path):\n reqs = pip.req.parse_requirements(requirements_path)\n return [req.name for req in reqs]", "def parse_req_file(req_file, verbatim=False):\n req_list = []\n requirements = req_file.readlines()\n for requirement in requirements:\n requirement_no_comments = requirement.split(\"#\")[0].strip()\n\n # if matching requirement line (Thing==1.2.3), update dict, continue\n req_match = re.match(\n r\"\\s*(?P<package>[^\\s\\[\\]]+)(?P<extras>\\[\\S+\\])?==(?P<version>\\S+)\",\n requirement_no_comments,\n )\n req_ignore = requirement.strip().endswith(\" # norot\")\n\n if req_match:\n req_list.append(\n (req_match.group(\"package\"), req_match.group(\"version\"), req_ignore)\n )\n elif requirement_no_comments.startswith(\"-r\"):\n try:\n base_dir = os.path.dirname(os.path.abspath(req_file.name))\n except AttributeError:\n print(\n \"Recursive requirements are not supported in URL based \" \"lookups\"\n )\n continue\n\n # replace the -r and ensure there are no leading spaces\n file_name = requirement_no_comments.replace(\"-r\", \"\").strip()\n new_path = os.path.join(base_dir, file_name)\n try:\n if verbatim:\n req_list.append((None, requirement, req_ignore))\n req_list.extend(parse_req_file(open(new_path), verbatim=verbatim))\n except IOError:\n print(\"Failed to import {}\".format(file_name))\n elif verbatim:\n req_list.append((None, requirement, req_ignore))\n return req_list", "def getDependenciesList(self) -> List[Mapping[Any, Any]]:\n if self._dependencyList is not None:\n return self._dependencyList\n\n chartfile = self.getChartFile()\n if chartfile['apiVersion'] == 'v2':\n if 'dependencies' in chartfile:\n self._dependencyList = chartfile['dependencies']\n else:\n self._dependencyList = []\n elif chartfile['apiVersion'] == 'v1':\n self.readArchiveFiles()\n if self._archiveFiles is not None and 'requirements.yaml' in self._archiveFiles:\n self._dependencyList = self._getFile('requirements.yaml')['dependencies']\n else:\n self._dependencyList = []\n else:\n raise ConfigurationError('Unknown chart file version: {}'.format(chartfile))\n return self._dependencyList", "def load_requirements(self, req_filename):\n with open(req_filename, 'r') as req_file:\n reqs = yaml.load(req_file)\n\n for req in reqs:\n # some are double counted\n if 'also_count' in req:\n fulfills = [req['key'], req['also_count']]\n else:\n fulfills = [req['key']]\n\n self.requirements.append(Requirement(\n key=req['key'],\n title=req['title'],\n fulfills=fulfills,\n req_hrs=req['required']\n ))", "def requirements(self):\n requirements = []\n return requirements", "def get_requirement_info():\n links, requirements = [], []\n info = {'dependency_links': links, 'install_requires': requirements}\n requirements_path = 'requirements.txt'\n\n if not os.path.isfile(requirements_path):\n print('requirements.txt not found. Did you forget it?')\n return info\n\n reqs = filter(None, map(str.strip, open(requirements_path)))\n for line in reqs:\n if is_http(line):\n i = line.find('#egg=')\n if i == -1:\n raise SetupError('Missing \\'#egg=\\' in requirement link.')\n links.append(line[:i])\n requirements.append(line[i+5:])\n else:\n requirements.append(line)\n return info", "def checkRequiredDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # hard dependencies\n for req in self.reqmodules:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( self.name + \" requires \" + req \\\n + \" and it wasn't found in your config file!!\" )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version\n \n # build only dependencies\n if( self.mode == \"install\" ):\n mods = self.reqmodules_buildonly + self.reqmodules_external\n for req in mods:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( req + \" not found in your config file!! \" + self.name \\\n + \" cannot be built without \" + req )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print \" - \" + self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version", "def DEPENDENCIES(self):\n pass", "def _get_dependencies(self, requirement_name, version):\n pkg_metadata = self._get_metadata(requirement_name)\n versions = pkg_metadata.get('versions', dict())\n version = versions.get(str(version), dict())\n return sorted(version.get('dependencies', dict()).items())", "def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:\n raise NotImplementedError()", "def get_deps(cat, pkg, ns, rpkgs):\n with (settings.RAINBOARD_RPKG / cat / pkg / \"Makefile\").open() as file_handle:\n cont = file_handle.read()\n deps = [\n d_pkg\n for d_cat, d_pkg, _ in rpkgs\n if f\"\\ninclude ../../{d_cat}/{d_pkg}/depend.mk\\n\" in cont\n ]\n if pkg.startswith(\"py-\") and (cat, pkg[3:], ns) in rpkgs:\n deps.append(pkg[3:])\n deps_cache[pkg] = sorted(set(deps))\n return deps_cache[pkg]", "def __compute_dependencies(self):\n prefix = \"github.com/DataDog/datadog-agent/\"\n base_path = os.getcwd()\n mod_parser_path = os.path.join(base_path, \"internal\", \"tools\", \"modparser\")\n\n if not os.path.isdir(mod_parser_path):\n raise Exception(f\"Cannot find go.mod parser in {mod_parser_path}\")\n\n try:\n output = subprocess.check_output(\n [\"go\", \"run\", \".\", \"-path\", os.path.join(base_path, self.path), \"-prefix\", prefix],\n cwd=mod_parser_path,\n ).decode(\"utf-8\")\n except subprocess.CalledProcessError as e:\n print(f\"Error while calling go.mod parser: {e.output}\")\n raise e\n\n # Remove github.com/DataDog/datadog-agent/ from each line\n return [line[len(prefix) :] for line in output.strip().splitlines()]", "def get_dependencies():\n return config.check_driver_dependencies(\n __virtualname__, {\"profitbricks\": HAS_PROFITBRICKS}\n )", "def get_package_names_and_versions(requirements_file: str) -> list:\n with_ver_reqlist = {}\n\n for package in requirements_file:\n split_location = package.find(\"==\")\n if split_location > 0:\n package_name = package[:split_location].lower()\n pakcage_version = package[split_location+2:]\n\n with_ver_reqlist[package_name] = pakcage_version\n else:\n latest_version = get_latest_version_number(package)\n with_ver_reqlist[package] = latest_version\n\n return with_ver_reqlist", "def find_with_deps(self, package_names):", "def main():\n if len(sys.argv) == 1:\n print(\"No dependencies file to validate!\")\n return\n dependencies_file = sys.argv[1]\n try:\n dependencies = json.loads(open(dependencies_file, 'r').read())\n except json.decoder.JSONDecodeError:\n print(\"Invalid dependency file syntax! Make sure you don't have any commas at the end of your last dependency.\")\n return\n for dependency in dependencies:\n if 'target_path' in dependency and 'repository' in dependency:\n print(\"Validated {}\".format(dependency['target_path']))\n suggest_edits(dependency)\n elif 'target_path' not in dependency and 'repository' in dependency:\n print(\"Define target_path for dependency {}\".format(dependency['repository']))\n elif 'repository' not in dependency and 'target_path' in dependency:\n print(\"Define repository for dependency {}\".format(dependency['target_path']))\n else:\n print(\"Invalid format, missing repository and target_path for dependency {}\".format(dependencies.index(dependency)))", "def parse_requirements(filename):\n lines = (line.strip() for line in open(filename))\n return [line.strip() for line in lines if line and not line.strip().startswith(\"#\")]", "def deps(ctx):\n header(deps.__doc__)\n with ctx.cd(ROOT):\n ctx.run(\n \"pip install -r requirements/develop.pip -r requirements/doc.pip\", pty=True\n )", "def read_pipfile() -> List[str]:\n pfile = configparser.ConfigParser()\n pfile.read('Pipfile')\n req_specifiers = []\n for package, version in pfile['packages'].items():\n # normalize strings, since Pipenv likes to add quotes on some things\n package = package.strip('\\'\"')\n version = version.strip('\\'\"')\n spec = package + ('' if version == '*' else version)\n req_specifiers.append(spec)\n return req_specifiers", "def build():\n\timport subprocess\n\tfrom os import listdir, getcwd\n\tfrom os.path import isfile, join\n\tonlyfiles = [f for f in listdir(getcwd()) if isfile(join(getcwd(), f))]\n\n\tif not 'requirements.txt' in onlyfiles:\n\t\traise SystemExit('File including depencencies not found. You will have to install them manually.')\n\n\tsubprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])\n\n\tprint('All dependencies installed successfully.\\nYou can run Simplex now!')", "def check_requirements(config=None):\n for dependency, module_requirements in (\n requirements(config, include_conditional=False).items()):\n for module_requirement in module_requirements:\n if \">=\" in module_requirement:\n module_name, required_version = module_requirement.split(\">=\")\n version_test = \">=\"\n elif \"==\" in module_requirement:\n module_name, required_version = module_requirement.split(\"==\")\n version_test = \"==\"\n else:\n module_name = module_requirement\n version_test = None\n\n try:\n module = __import__(module_name)\n except ImportError:\n logging.exception(\n \"Can't import %r which is part of %r\",\n module_name, dependency\n )\n raise MissingRequirementError(\n \"Can't import %r which is part of %r\"\n % (module_name, dependency), module_name, dependency\n )\n version = getattr(module, \"__version__\", None)\n file_path = getattr(module, \"__file__\", None)\n logger.info(\n \"Using %r version %r from %r to satisfy %r\",\n module_name, version, file_path, dependency\n )\n\n if version_test == \">=\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) < LooseVersion(required_version):\n raise MissingRequirementError(\n \"Version of %r in %r is too old. %r < %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )\n elif version_test == \"==\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) != LooseVersion(required_version):\n raise MissingRequirementError(\n \"Unexpected version of %r in %r. %r != %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )", "def get_code_dependencies(self):\n pip_commands = ['pip', 'pip3', '/usr/local/bin/pip3']\n for pip_cmd in pip_commands:\n try:\n raw_stdout = subprocess.check_output([pip_cmd, 'freeze'])\n except FileNotFoundError:\n continue\n\n dependencies = raw_stdout.decode('ascii').split('\\n')[0:-1]\n if dependencies:\n return dependencies\n else:\n msg = \"Couldn't find pip executable in: {}\"\n raise ValueError(msg.format(','.join(pip_commands)))", "def parse_requirements(filename):\n try:\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]\n except OSError:\n return []", "def initial_dependencies(self) -> List[str]:\n return self.options[\"general\"][\"dependencies\"]" ]
[ "0.77573985", "0.775108", "0.77329814", "0.76370883", "0.76339567", "0.76128036", "0.76061934", "0.7439385", "0.7396733", "0.7377151", "0.73661613", "0.72941184", "0.72542316", "0.7222787", "0.7135786", "0.705487", "0.70474964", "0.70436454", "0.70436454", "0.7020663", "0.69667834", "0.6957161", "0.69220793", "0.6918644", "0.6908816", "0.6891343", "0.68772334", "0.68752927", "0.6872581", "0.6807131", "0.68057054", "0.67922735", "0.6780538", "0.6777302", "0.6752283", "0.67254037", "0.6708461", "0.6680829", "0.65942985", "0.6531078", "0.6522646", "0.65104234", "0.6487313", "0.64839137", "0.64640826", "0.6454524", "0.6444746", "0.64262736", "0.6407835", "0.63862693", "0.6383621", "0.63437617", "0.63378567", "0.6327073", "0.6321762", "0.631688", "0.63092875", "0.6298374", "0.6288304", "0.6270308", "0.62637156", "0.62611556", "0.6252404", "0.6245789", "0.6240843", "0.6224864", "0.6216575", "0.62089", "0.62089", "0.6201641", "0.6180614", "0.61599356", "0.6148109", "0.6118987", "0.61176515", "0.6113229", "0.6095499", "0.60873693", "0.6081681", "0.60804564", "0.607819", "0.607443", "0.6069752", "0.60667336", "0.6048545", "0.60484767", "0.6042727", "0.6033652", "0.6022816", "0.6021936", "0.6009174", "0.5998154", "0.5978559", "0.59764767", "0.5937356", "0.59294355", "0.59216523", "0.59207904", "0.59108394", "0.5910318", "0.59056073" ]
0.0
-1
Computes negative value of input tensor.
def neg(data, target=utils.CCE): utils.check_supported_target(target) utils.check_shape(data.shape) if target == utils.CCE: data_type = data.dtype utils.ops_dtype_check(data_type, [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32]) pone = akg.tvm.const(-1.0, dtype=data_type) res = akg.lang.ascend.vmuls(data, pone) if data_type == "int32": res = akg.topi.cast(res, "int32") else: res = akg.topi.negative(data) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __neg__(self):\n return TensorWithIndices(-self._tensor, \n self._con + '_' + self._cov)", "def abs(tensor):\n raise NotImplementedError", "def __neg__(self):\n\t\tval = -self.val\n\t\tder = -self.der if len(self.der.shape) else None\n\t\treturn Var(val, der)", "def __neg__(self):\n return self.coeff_mul(-1)", "def negIP(self):\n np.negative(self.t, out=self.t)\n return self", "def abs(tensor):\n return _elementary_op(tensor, np.abs, np.sign)", "def __neg__(self):\n return Factor().__build( VarSet(self.v) , np.negative(self.t) )", "def positive_eval(self, input_tensor: torch.Tensor, theta: float):\n y = self(input_tensor)\n return y, torch.square(y).mean(dim=1) - theta", "def sign(tensor):\n raise NotImplementedError", "def __neg__(self):\n return self.__mul__(-1)", "def neg(input_x, output_y, kernel_name=\"neg\"):\n shape_input = input_x.get(\"shape\")\n dtype_input = input_x.get(\"dtype\")\n\n util.check_kernel_name(kernel_name)\n util.check_shape_rule(shape_input)\n util.check_tensor_shape_size(shape_input)\n\n dtype_input = dtype_input.lower()\n check_list = (\"float16\", \"float32\", \"int32\", \"int8\")\n util.check_dtype_rule(dtype_input, check_list)\n\n shape_input = util.shape_refine(shape_input)\n shape_input = (functools_reduce(lambda x, y: x * y, shape_input[:]),)\n data_input = tvm.placeholder(shape_input, name=\"data_input\", dtype=dtype_input)\n\n res = neg_compute(data_input, output_y, kernel_name)\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n config = {\"name\": kernel_name,\n \"tensor_list\": [data_input, res]}\n te.lang.cce.cce_build_code(sch, config)", "def nonzero_sign(\n x: type_alias.TensorLike,\n name: str = 'nonzero_sign') -> tf.Tensor:\n with tf.name_scope(name):\n x = tf.convert_to_tensor(value=x)\n\n one = tf.ones_like(x)\n return tf.where(tf.greater_equal(x, 0.0), one, -one)", "def negative(data):\n return _make.negative(data)", "def __neg__(self):\n return (-1)*self", "def __rsub__(self, tensor):\n return -self + tensor", "def __neg__(self):\n a = -self._ar\n return Vector(a)", "def __neg__(self):\r\n\t\t\r\n\t\t# take negative\r\n\t\tn = self.scale(-1)\r\n\t\t\r\n\t\treturn n", "def is_neg(var):\r\n apply = var.owner\r\n if not apply:\r\n return None\r\n # First match against `tensor.neg`.\r\n if apply.op == tensor.neg:\r\n return apply.inputs[0]\r\n # Then match against a multiplication by -1.\r\n if apply.op == tensor.mul and len(apply.inputs) >= 2:\r\n for idx, mul_input in enumerate(apply.inputs):\r\n try:\r\n constant = opt.get_scalar_constant_value(mul_input)\r\n is_minus_1 = numpy.allclose(constant, -1)\r\n except NotScalarConstantError:\r\n is_minus_1 = False\r\n if is_minus_1:\r\n # Found a multiplication by -1.\r\n if len(apply.inputs) == 2:\r\n # Only return the other input.\r\n return apply.inputs[1 - idx]\r\n else:\r\n # Return the multiplication of all other inputs.\r\n return tensor.mul(*(apply.inputs[0:idx] +\r\n apply.inputs[idx + 1:]))\r\n # No match.\r\n return None", "def __neg__(self):\n return self.from_points(-v for v in self._vectors)", "def negative(self):\n return Vector(0-self.x, 0-self.y, 0-self.z)", "def __neg__(self):\n return Vector(-self.x, -self.y)", "def __neg__(self):\n return Vector(-self.x, -self.y)", "def __neg__(self):\n return 0 - self", "def negative_gradient(self, y, pred, **kargs):\n return y - expit(pred.ravel())", "def _randomly_negate_tensor(self, tensor):\r\n should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)\r\n final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)\r\n return final_tensor", "def convert_negative(node, **kwargs):\n return create_basic_op_node('Neg', node, kwargs)", "def __relu(x):\n return x if x > 0 else 0", "def __neg__(self):\n v = zeros_como(self)\n\n for i in range(self.n):\n v[i] = -self[i]\n\n return v", "def negative_gradient(self, y, y_pred, **kargs):", "def negate(x):\n return x ^ 1", "def __neg__(self):\n return Translation(-self.x, -self.y, -self.z)", "def __neg__(self):\r\n return mat4(map(lambda x: -x, self.mlist))", "def convert_minus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)", "def __neg__(self):\n retval = self.copy()\n retval._val = -retval._val\n return retval", "def neg(a):\n return prod(a, -1)", "def nonneg_softmax(expr,\n replace_nonpositives = -10):\n if replace_nonpositives != 0.0:\n ones = tf.ones(tf.shape(input=expr), tf.float32)\n expr = tf.where(expr > 0.0, expr, ones * replace_nonpositives)\n return tf.nn.softmax(expr)", "def __neg__(self):\n return self.scale(-1)", "def __neg__(self):\n return Quantity(-(self._value), self.unit)", "def __neg__(self):\n return Vector([-c for c in self.components])", "def neg(a):\n return -a;", "def has_negative(tensor, verbose=True):\n tensor_numpy = tensor.data.cpu().numpy().flatten()\n where_negative = np.argwhere(tensor_numpy < 0)\n\n if verbose:\n for idx in where_negative:\n value = float(tensor_numpy[idx])\n print(f\"Encountered negative value: {value:.5f}\")\n\n negative_count = len(where_negative)\n negative = negative_count != 0\n\n if verbose and negative:\n print(f\"Encountered {negative_count} negative values\")\n\n return negative", "def neg(self, a):\n return -a", "def neg(self):\n return self._new_rep(-self.rep)", "def __call__(self, tensor): \n tensor = (tensor - 127.5) / 127.5\n assert (torch.min(tensor) >= -1) and (torch.max(tensor) <= 1)\n return tensor", "def make_negative(number):\n if number < 0:\n return number\n else:\n return number * -1", "def __neg__(self) -> PointType:\n return self * -1", "def __neg__(self):\n return UnaryMinus(self)", "def neg(A):\n return A.from_rep(A.rep.neg())", "def __neg__(self):\n return self[::-1].complement", "def __sub__(self, tensor):\n return self.sub(tensor)", "def __neg__(self):\n return Complex(-self._reNum, -self._imNum)", "def negate(f):\n return lambda *args, **kwargs: -f(*args, **kwargs)", "def __neg__(self):\n retval = FixedPoint(0,self.int_bits, self.frac_bits) - self\n return retval", "def __neg__(self) -> 'SInt':\r\n return self.complement()", "def negate(f):\r\n return lambda *args, **kwargs: -f(*args, **kwargs)", "def negate(f):\n return lambda *args, **kwargs: -f(*args, **kwargs)", "def zeroslike_op(node, ctx=None):\n return ZerosLikeOp(node, ctx=ctx)", "def opposite(x):\n return -1*x", "def __neg__(self):\n return self.neg()", "def negate(val: PipeNumeric):\n num_type = val.get_type()\n assert isinstance(num_type, num.SignedFixedNumberType)\n\n if isinstance(val, PipeConstant):\n return PipeConstant(num_type, -val.get_value())\n\n node = OneCycleNode()\n\n node.add_inputs(val=val)\n res = PipeSignal(num_type, Signal(num_type.create()))\n node.add_output(res)\n node.set_name('fixed-negate')\n node.set_logic(negate_seq)\n\n return node", "def neg(self, variable):\n try:\n val = self._variables[variable]\n self._variables[variable] = -1*val\n except:\n print(f\"Could not negate {variable}\")", "def neg(f):\n\n y = limits(f)[0] + limits(f)[1] - f\n y = y.astype(f.dtype)\n return y", "def __neg__(self):\r\n return vec4(-self.x, -self.y, -self.z, -self.w)", "def nonneg_crossentropy(expr, target):\n expr_replacing_0_with_1 = tf.where(expr > 0, expr,\n tf.ones(tf.shape(input=expr), tf.float32))\n cross_entropies = tf.reduce_sum(\n input_tensor=-target * tf.math.log(expr_replacing_0_with_1), axis=1)\n return tf.reduce_mean(input_tensor=cross_entropies, axis=0)", "def generate(self, z):\n x = super().generate(z)\n return tf.clip_by_value(x, -1, 1)", "def positive_constraint(x: Tensor, bias: float = 0.1) -> Tensor:\n\n red_dims = [d for d in range(1, x.dim())]\n return torch.relu(-x).mean(red_dims).mean() + bias", "def convert_rminus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)", "def inverse_transform(self, data):\n return (tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) * data + self.mu", "def _fn(self, e_s, e_p, e_o):\n\n return tf.negative(\n tf.norm(e_s + e_p - e_o, ord=self.embedding_model_params.get('norm', DEFAULT_NORM_TRANSE), axis=1))", "def negM(M):\r\n M[np.where(M > 0)] = 0\r\n return M", "def relu(t: Tensor) -> Tensor:\n tensor = t if torch.is_tensor(t) else torch.tensor(t)\n return torch.max(tensor, torch.zeros_like(tensor))", "def true_y_abs(x):\n y = torch.abs(x)\n return y", "def loss(self,\r\n inputs,\r\n **kwargs):\r\n\r\n return tf.zeros([]), self.call(inputs, **kwargs)", "def neg(f):\n return f.per(dmp_neg(f.rep, f.lev, f.dom))", "def noop(x: torch.Tensor) -> torch.Tensor:\n return x", "def negate(func: Callable):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n return -func(*args, **kwargs)\n return _wrapper", "def backward_tensor(self, x):\n pass", "def forward(self, input):\r\n return np.maximum(0,input)", "def relu(x):\r\n return np.maximum(0, x)", "def negDSC(y_true, y_pred):\n\n # Constant tensor for binarization\n const = tf.constant([0.5])\n\n # Prepare inputs\n y_true = tf.cast(tf.math.greater(y_true, const), tf.float32)\n y_pred = tf.cast(tf.math.greater(y_pred, const), tf.float32)\n\n # negDSC = 1 - DSC\n return 1 - (tf.reduce_sum(y_true * y_pred) * 2.0\n / tf.reduce_sum(y_true + y_pred))", "def __neg__(self):\n return type(self)(self.parent(), self._simplify(-self._express))", "def positive(x):\n return np.maximum(x, 0.0)", "def negate(a):\n res = 0\n d = 1 if a < 0 else -1\n while a != 0:\n res += d\n a += d\n return res", "def tent(x: torch.Tensor) -> torch.Tensor:\n return -(x.softmax(1) * x.log_softmax(1)).sum(1).mean(0)", "def relu(self):\n return self * self.ge(0)", "def _neg_loss(outputs: torch.Tensor, targets: torch.Tensor):\n pos_inds = targets.eq(1).float()\n neg_inds = targets.lt(1).float()\n\n neg_weights = torch.pow(1 - targets, 4)\n\n loss = 0\n\n pos_loss = torch.log(outputs) * torch.pow(1 - outputs, 2) * pos_inds\n neg_loss = torch.log(1 - outputs) * torch.pow(outputs, 2) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss", "def relu(X):\n return np.maximum(0, X)", "def hard_negative_loss_mining(c_loss, negative_mask, k):\n # make sure at least one negative example\n k = tf.maximum(k, 1)\n # make sure at most all negative.\n k = tf.minimum(k, c_loss.shape[-1])\n neg_c_loss = c_loss * negative_mask\n neg_c_loss = tf.nn.top_k(neg_c_loss, k)[0]\n return tf.reduce_sum(neg_c_loss)", "def inverse_transform_var(self, data):\n return tf.square(tf.maximum(tf.sqrt(self.var)*self.sc_factor, 1e-2)) * data", "def negate_image(p):\n img = read_img(p)\n img_negative = negativo(img.reshape((-1)))\n show_imgs([img, img_negative.reshape(img.shape)])", "def __neg__(self):\n unit = -self.__unit\n return Factorization(list(self), unit, self.__cr,\n sort=False, simplify=False)", "def __neg__(self):\n data = [[-self[i, j] for j in range(self.n)] for i in range(self.m)]\n return self.__class__(self.m, self.n, data)", "def get_negative(self):\r\n return Literal(self.label, not self.positive_state)", "def __neg__(self) -> ColumnOperators:\n return self.operate(neg)", "def s_neg(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == -1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n running_total += -1 - ayxx\n return running_total", "def inverse(self, x):\n return self.mul(self.weights, x.unsqueeze(-1)).squeeze(-1) + self.shift\n #return self.mul(torch.inverse(self.weights), (x - self.shift).unsqueeze(-1)).squeeze(-1)", "def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return grad_input", "def __drelu(x):\n return 0 if x <= 0 else 1", "def negative_predictive_value(y_true, y_pred):\n\n cm = confusion_matrix(y_true, y_pred)\n return cm[0,0] / cm[:,0].sum()", "def __neg__(self):\n return self.negated()" ]
[ "0.75131583", "0.7114549", "0.692163", "0.69077533", "0.6887083", "0.67914915", "0.6730227", "0.66932213", "0.6687829", "0.66610897", "0.6652976", "0.664498", "0.6637376", "0.6635296", "0.6629944", "0.65936184", "0.65753895", "0.6546625", "0.6520362", "0.6507943", "0.64984477", "0.64984477", "0.64870423", "0.64868855", "0.64342356", "0.6417354", "0.641017", "0.63974833", "0.63667506", "0.6347763", "0.6328873", "0.63263106", "0.6315605", "0.629674", "0.62793934", "0.6279231", "0.627114", "0.6266815", "0.62571704", "0.6211701", "0.6185179", "0.6182165", "0.6181715", "0.61814505", "0.6165991", "0.61528546", "0.6148029", "0.6135886", "0.6128252", "0.6106265", "0.60848945", "0.6083498", "0.6075896", "0.6073191", "0.60637987", "0.6061571", "0.60531485", "0.6048559", "0.6043822", "0.6037468", "0.60287946", "0.6013907", "0.6006404", "0.59961236", "0.59807444", "0.59405756", "0.5939579", "0.59255344", "0.5911734", "0.590783", "0.5900708", "0.58838403", "0.5861048", "0.5850086", "0.5848879", "0.5844569", "0.5843751", "0.5820084", "0.5819173", "0.58097833", "0.5793714", "0.5790298", "0.5788882", "0.5785434", "0.57747257", "0.5773215", "0.5767515", "0.5760374", "0.5756154", "0.5747312", "0.57440156", "0.5735892", "0.5732217", "0.57252634", "0.5718487", "0.5716146", "0.5715835", "0.57040054", "0.5703729", "0.5703646" ]
0.66008675
15
Overloading the addition operator for particles types
def __add__(self, other): if isinstance(other, type(self)): # always create new particles, since otherwise c = a + b changes a as well! p = particles(self) p.pos[:] = self.pos + other.pos p.vel[:] = self.vel + other.vel p.m = self.m p.q = self.q return p else: raise DataError("Type error: cannot add %s to %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __add__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.add)", "def __iadd__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__iop(other, operator.add)", "def __add__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x+other.x, self.y+other.y, self.z+other.z, self.w+other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"", "def __iadd__(self, other):\r\n if isinstance(other, vec4):\r\n self.x+=other.x\r\n self.y+=other.y\r\n self.z+=other.z\r\n self.w+=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for +=\"", "def __add__(self, rhs: Union[float]):\n if isinstance(rhs, Pt):\n return Pt(self.x + rhs.x, self.y + rhs.y)\n else:\n return Pt(self.x + rhs, self.y + rhs)", "def ADD (self, n1, n2):", "def __add__(self, p):\n return Point(self.x + p.x, self.y + p.y)", "def add(self,particle):\n\n if not self.check_def(['E','px','py','pz']): \n sys.exit('Particle error: Quadri impulsion not define')\n if not particle.check_def(['E','px','py','pz']): \n sys.exit('Particle error: Quadri impulsion not define')\n \n neut=part_quadvec(self.E+particle.E,self.px+particle.px,self.py+particle.py,self.pz+particle.pz)\n neut.cal_mass()\n return neut", "def __add__(self,other):\n return Vector(self.x+other.x,self.y+other.y,self.z+other.z)", "def __add__(self,other):\n return Vector(self.x + other.x, self.y+other.y)\n pass", "def __iadd__(self,other):\n return Vector(self.x + other.x, self.y + other.y)\n pass", "def __add__(self,other):\n if isinstance(other, point):\n return self.add_points(other)\n else:\n return self.add_points_tuple(other)", "def __add__(self, other):\n raise NotImplementedError", "def __add__(self, other):\n raise NotImplementedError", "def __add__(self, other):\n return (self.x + other.x, self.y + other.y)", "def __add__(self, other):\n return Vector(self.x + other.x, self.y + other.y)", "def __iadd__(self, other):\n\n if isinstance(other, float):\n self.iadd_scalar(other)\n else:\n self.iadd(other)", "def __add__(self, other):\n cls = self.__class__\n return cls(self.x+other.x, self.y+other.y, self.z+other.z)", "def __add__(self, other):\n return add_mps(self, other)", "def __add__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Add.apply(self, other)", "def __add__(self, p: np.ndarray):\n return Quaternion(self.to_array() + p)", "def __add__(self, other):\n pass", "def __add__(self, other):\n pass", "def add(self, a, b):\n return a + b", "def vars_add ( self , var1 , var2 , name = '' , title = '' ) :\n \n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n res = float ( var1 ) + float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## shortcut \n if 0 == var1 : return var2 ## SHORTCUT\n #\n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_add ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_add ( var1 , var2 , name , title )\n \n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Addition ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def __add__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(self._real + value, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(self._real + value._real, self._imag + value._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )", "def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def __add__(self, other):\n return asarray(add(self, other))", "def __iadd__(self, other):\n self.x += other.x\n self.y += other.y\n return self", "def __add__(self, other):\n if len( self) != len(other):\n raise ValueError('Dimensions must match.')\n result = Vector(len(self))\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def __add__(self, other):\r\n return self.add(other)", "def __add__(self, argument):\n try:\n argument = type(self)(argument)\n except Exception:\n return NotImplemented\n return type(self)(float(self) + float(argument))", "def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)", "def __add__(self, other):\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (self[0] + ox, self[1] + oy))", "def __add__(self, other):\n return Vector([c1 + c2 for (c1, c2) in zip(self.components, other.components)])", "def __iadd__(self, other):\n\n return self + other", "def __add__(self, other):\n if len(self) != len(other):\n raise ValueError('As dimensões devem ser iguais')\n\n result = Vector(len(self)) # inicia um novo array do tamanho do próprio\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result", "def add(self, params):\n if len(params) < 2:\n return\n x = self.reg_dct[params[0]]\n y = self.reg_dct[params[1]]\n self.reg_dct[params[0]] = (x + y) % (2** 32)", "def add(self, x, y):\n pass", "def __add__(self, other):\n return Point(self.x+other.x, self.y+other.y)", "def __add__(self, other):\n\t\tif len(self) != len(other):\n\t\t\traise ValueError('dimensions must agree')\n\t\tresult = Vector(len(self))\n\t\tfor j in range(len(self)):\n\t\t\tresult[j] = self[j] + other[j]\n\t\treturn result", "def __add__(self, other):\n return Vec2d(self.v[0] + other[0], self.v[1] + other[1])", "def __add__(self, other):\n return self.add(other)", "def __add__(self, other):\n s = Shape([])\n for i,p in enumerate(self.pts):\n s.add_point(p + other.pts[i])\n return s", "def plus(self, a, b):\n return a + b", "def add(self, *args):\n sum = 0\n for arg in args:\n sum += float(arg)\n return sum", "def __add__(self, tensor):\n return self.add(tensor)", "def __add__(self, other):\n if other == 0:\n return self\n\n pmf = Pmf()\n for v1, p1 in self.items():\n for v2, p2 in other.items():\n pmf[v1 + v2] += p1 * p2\n return pmf", "def __add__(\n self,\n other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],\n ) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:\n return TensorWrappedPhiTensorPointer._apply_op(self, other, \"__add__\")", "def __add__(self, rhs: Union[float, Simpy]) -> Simpy:\n result: list[float] = []\n if isinstance(rhs, float):\n for item in self.values:\n result.append(item + rhs)\n else:\n assert len(self.values) == len(rhs.values)\n for i in range(len(self.values)):\n result.append(self.values[i] + rhs.values[i])\n return Simpy(result)", "def __add__(self, other):\n\n return self._binary_elementwise_op(other, np.add)", "def __iadd__(self, tensor):\n return self.add_(tensor)", "def __add__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value + other.value),\"\")", "def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps + other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps + other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)", "def __add__(self, other):\n if (len(self.arg) < len(other.arg)):\n summ = Polynomial(other.arg)\n i = len(self.arg) - 1\n for x in self.arg:\n summ.arg[i] = self.arg[i] + summ.arg[i]\n i = i - 1\n else:\n summ = Polynomial(self.arg)\n i = len(other.arg) - 1\n for x in other.arg:\n summ.arg[i] = other.arg[i] + summ.arg[i]\n i = i - 1\n return summ", "def __add__(self, other: Any) -> TypeValue:\n if isinstance(other, np.ndarray):\n return other + float(self)\n\n return self._like_self_from_float(\n float(self) + self._other_same_units(other)\n )", "def __add__(self, other):\n\t\ttry:\n\t\t\tval = self.val + other.val\n\n\t\t\t# Handle case when self.der or other.der contains None \n\t\t\t# i.e. self or other is a vector of scalars, not of Vars\n\t\t\tlen_self_der_shape = len(self.der.shape)\n\t\t\tlen_other_der_shape = len(other.der.shape)\n\n\t\t\tif not len_self_der_shape and len_other_der_shape:\n\t\t\t\tder = other.der\n\t\t\telif len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = self.der\n\t\t\telif not len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = None\n\t\t\telse:\n\t\t\t\tder = self.der + other.der\n\t\texcept AttributeError:\n\t\t\tval = self.val + other\n\t\t\tder = self.der\n\t\treturn Var(val, der)", "def __add__(self,that):\n return self.__opExpand2(that,np.add)", "def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)", "def __add__(self, other):\n # other is a scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Vector([i + other for i in self.data], self.column)\n # other is a Vector\n elif isinstance(other, Vector):\n if len(self.data) != len(other):\n raise Exception('Vectors are not of equal length')\n elif self.column != other.column:\n raise Exception('Vectors are not of equal orientation')\n else:\n return Vector([self.data[i] + other.data[i] for i in range(len(self.data))], self.column)\n # other is not a scalar or a Vector\n else:\n raise Exception('Argument is not a number or a Vector') from TypeError", "def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts + other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts + other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)", "def add(x, y):\n\n return x + y", "def __add__(self, rhs):\n if isinstance(rhs, UTPS):\n return UTPS(self.tc + rhs.tc)\n elif numpy.isscalar(rhs):\n retval = UTPS(numpy.copy(self.tc))\n retval.tc[0] += rhs\n return retval\n else:\n raise NotImplementedError", "def add(self, particles):\n if isinstance(particles, ParticleSet):\n particles = particles.particles\n if not isinstance(particles, Iterable):\n particles = [particles]\n self.particles = np.append(self.particles, particles)\n if True:#self.ptype.uses_jit:\n particles_data = [p._cptr for p in particles]\n self._particle_data = np.append(self._particle_data, particles_data)\n # Update C-pointer on particles\n for p, pdata in zip(self.particles, self._particle_data):\n p._cptr = pdata", "def __add__(self, other):\n if isinstance(other, Trit):\n value = (other,)\n else:\n value = tuple(other)\n return Trits(self.trits + value)", "def __add__(self, other):\n \"*** YOUR CODE HERE ***\"", "def __add__(self, other):\n raise NotImplementedError(\"Implement this if needed\")", "def _add(self, other):\n raise NotImplementedError(\n \"{} does not support addition\".format(type(self)))", "def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj", "def __add__(self, other) -> 'Tensor':\n return _add(self, ensure_tensor(other))", "def add(self, particles):\n if isinstance(particles, ParticleSet):\n particles = particles.particles\n if not isinstance(particles, collections.Iterable):\n particles = [particles]\n self.particles = np.append(self.particles, particles)\n if self.ptype.uses_jit:\n particles_data = [p._cptr for p in particles]\n self._particle_data = np.append(self._particle_data, particles_data)\n # Update C-pointer on particles\n for p, pdata in zip(self.particles, self._particle_data):\n p._cptr = pdata", "def __add__(self, other):\n if isinstance(other, Vector):\n a = self._ar + other._ar\n else:\n a = self._ar + numpy.array(other)\n return Vector(a)", "def __iadd__(self, other):\n raise NotImplementedError(\"Implement this if needed\")", "def addition(self):\n\t\treturn lambda anything: self.__class__(\n\t\t\t(self[:], disj, checked_proposition(anything)[:])\n\t\t)", "def __add__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Add, value)\n return out", "def __add__(self, other):\r\n if isinstance(other, mat4):\r\n return mat4(map(lambda x,y: x+y, self.mlist, other.mlist))\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"", "def __add__(self, v):\n return vector(self.x + v.x, self.y + v.y, self.z + v.z)", "def add(x,y):\n return x + y", "def add(x,y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def __add__(self, other):\n tmp = VectorHeat1D(self.size)\n tmp.set_values(self.get_values() + other.get_values())\n return tmp", "def __add__(self, other):\n if isinstance(other, Seq2):\n if len(self) == len(other):\n return other.from_points(\n a + b for a, b in zip(self, other))\n else:\n raise ValueError(\"cannot add arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n return self.from_points(a + b for a in self)", "def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec + other.elec\n p.magn[:] = self.magn + other.magn\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))", "def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)", "def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)", "def __add__(self,other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n resultnumerator = self.numerator+other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues", "def __iadd__( self, vector3 ):\n return self.add( vector3 )", "def add(\n self, y: Union[int, float, np.ndarray, torch.tensor, MPCTensor]\n ) -> MPCTensor:\n res = self.__apply_op(y, \"add\")\n return res" ]
[ "0.7108404", "0.70331186", "0.7007142", "0.69473356", "0.6846601", "0.6845393", "0.68245333", "0.6748368", "0.671954", "0.66836524", "0.6639666", "0.6628716", "0.6572368", "0.6572368", "0.65575284", "0.65301085", "0.65287226", "0.6522699", "0.64960563", "0.64930576", "0.649", "0.64833504", "0.64833504", "0.647256", "0.64678484", "0.6467019", "0.64559925", "0.6436312", "0.64347565", "0.6434374", "0.64337504", "0.64273304", "0.64231175", "0.6418845", "0.64151704", "0.6405545", "0.63973004", "0.63932765", "0.6388272", "0.63776505", "0.63759047", "0.63703585", "0.63571954", "0.6346776", "0.6342915", "0.6340674", "0.63352245", "0.63314307", "0.633019", "0.6327046", "0.63170165", "0.63138604", "0.6302371", "0.62989783", "0.6293297", "0.62871486", "0.6285818", "0.62833077", "0.62831366", "0.627776", "0.6271349", "0.6266806", "0.6264256", "0.62534", "0.62507874", "0.6229218", "0.6223647", "0.62197804", "0.62118864", "0.6196759", "0.6191261", "0.6190634", "0.6189616", "0.61864406", "0.6186416", "0.6182456", "0.6181391", "0.61759937", "0.61759937", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61740065", "0.61708117", "0.61678994", "0.61671865", "0.6161511", "0.6155918", "0.6154891", "0.6154072", "0.6150264" ]
0.7784703
0
Overloading the subtraction operator for particles types
def __sub__(self, other): if isinstance(other, type(self)): # always create new particles, since otherwise c = a - b changes a as well! p = particles(self) p.pos[:] = self.pos - other.pos p.vel[:] = self.vel - other.vel p.m = self.m p.q = self.q return p else: raise DataError("Type error: cannot subtract %s from %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __sub__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"", "def __sub__(self, other):\n\t\tif isinstance(other, int) or isinstance(other, float):\n\t\t\t# Maintain state of self and create new trace variable new_var\n\t\t\tnew_var = Var(self.val, self.der)\n\t\t\treturn new_var.__add__(-other)\n\t\treturn (-other).__add__(self)", "def __isub__(self, other):\r\n if isinstance(other, vec4):\r\n self.x-=other.x\r\n self.y-=other.y\r\n self.z-=other.z\r\n self.w-=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for -=\"", "def __sub__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.sub)", "def __sub__(self,other):\n return Vector(self.x - other.x, self.y-other.y)\n pass", "def vars_subtract ( self , var1 , var2 , name = '' , title = '' ) :\n\n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n ##\n res = float ( var1 ) - float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## \n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n\n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Subtraction ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec - other.elec\n p.magn[:] = self.magn - other.magn\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))", "def __sub__(self, other):\n return self.subtract(other)", "def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)", "def __sub__(self, argument):\n try:\n argument = type(self)(argument)\n except Exception:\n return NotImplemented\n return type(self)(float(self) - float(argument))", "def __sub__(self, other):\n return Vector([c1 - c2 for (c1, c2) in zip(self.components, other.components)])", "def __sub__(self, other):\n return self.__add__(other.__neg__())", "def __sub__(self, other):\n return self + other.__neg__()", "def __sub__(self, other):\n return self + other.__neg__()", "def __sub__(self,that):\n #return self.__opExpand1(that, np.subtract)\n return self.__opExpand2(that,np.subtract)", "def __isub__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__iop(other, operator.sub)", "def minus(self, a, b):\n return a - b", "def __sub__(self, other):\n tmp = VectorHeat1D(self.size)\n tmp.set_values(self.get_values() - other.get_values())\n return tmp", "def __sub__(self, other):\n return (self.x - other.x, self.y - other.y)", "def __sub__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value - other.value),\"\")", "def __sub__(self, other):\n return Point([c1 - c2 for (c1, c2) in zip(self, other)])", "def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj", "def __sub__(self, other):\n if isinstance(other, Vec2Array):\n if len(self) == len(other):\n return self.from_points(\n a - b for a, b in zip(self, other))\n else:\n raise ValueError(\n \"cannot subtract arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n return self.from_points(a - b for a in self)", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __sub__(self, other):\n if isinstance(other, Vector):\n a = self._ar - other._ar\n else:\n a = self._ar - numpy.array(other)\n return Vector(a)", "def __sub__(self, other):\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (self[0] - ox, self[1] - oy))", "def __neg__(self):\n return UnaryMinus(self)", "def __sub__(self, p: np.ndarray):\n return Quaternion(self.to_array() - p)", "def subtract(self, other):\n return self.add(other.neg())", "def __sub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: 1})", "def __sub__(self, other):\n n = len(self)\n\n if n != len(other):\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i] - other[i]\n\n return v", "def sub(self, a, b):\n return a - b", "def __sub__(self, other):\n return Vec2d(self.v[0] - other[0], self.v[1] - other[1])", "def __sub__(self, other):\n if hasattr(other, '_d'):\n return (self.micros() - other.micros()) / 86400000000.0\n else:\n return self.__add__(-(other))", "def __rsub__(self, other):\n\t\treturn (-self).__add__(float(other))", "def __sub__(self, other: Compound[Scalar]) -> Compound[Scalar]:\n return (self._pack_points(self._points_set - other._points_set\n if isinstance(other, Multipoint)\n else [point\n for point in self._points\n if point not in other])\n if isinstance(other, Compound)\n else NotImplemented)", "def __neg__(self) -> PointType:\n return self * -1", "def __sub__(self, tc):\n tc = TwosComplement(tc)._negative()\n return self.__add__(tc)", "def __sub__(self, other):\n output = Spectrum(self.wavelengths, self.intensities)\n for wavelength, intensity in other:\n if output[wavelength]:\n output[wavelength] -= intensity\n else:\n output[wavelength] = -intensity\n return output", "def subtract(x, y):\n\n return x - y", "def __sub__(self, other):\n return self._operation_sub(self, other)", "def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps - other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps - other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)", "def __sub__(self,other):", "def __rmul__(self, other):\n\n if isinstance(other, float):\n # always create new particles\n p = particles(self)\n p.pos[:] = other * self.pos\n p.vel[:] = other * self.vel\n p.m = self.m\n p.q = self.q\n return p\n else:\n raise DataError(\"Type error: cannot multiply %s to %s\" % (type(other), type(self)))", "def __sub__(self, other):\r\n if isinstance(other, mat4):\r\n return mat4(map(lambda x,y: x-y, self.mlist, other.mlist))\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"", "def __sub__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() - other", "def __sub__(self, other: TranslationType):\n return Translation(\n self.x - other.x,\n self.y - other.y,\n self.z - other.z)", "def convert_minus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)", "def __sub__(self, v):\n return self + (-1) * v", "def __sub__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during substraction to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Minus(self, other)", "def subtract(x, y):\n return x - y", "def subtract(x, y):\n return x - y", "def subtract(x, y):\n return x - y", "def __sub__ (self,other):\n if (self.debug): print(f'enter fraction.__sub__ with {other}')\n f2 = fraction(-1*other.value[0],other.value[1])\n f3 = self.__add__(f2)\n return f3", "def __sub__(self, other):\n return self.getArea() - other.getArea()", "def subtraction(x, y):\n return x - y", "def subtractVector(self, subtrahend):\n result = self.addVector(subtrahend.scalarMultiplication(-1.0))\n return result", "def __sub__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Subtract, value)\n return out", "def __sub__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Sub.apply(self, other)", "def __sub__(self, other):\n return Difference(self, other)", "def subtract(first, second):\n return first - second", "def subtract(a, b):\n return a - b", "def subtract(a, b):\n return a - b", "def subtract(lhs, rhs):\n return _make.subtract(lhs, rhs)", "def __rsub__(self, other):\n if isinstance(other, Seq2) or isinstance(other, tuple):\n if len(self) == len(other):\n return other.from_points(\n b - a for a, b in zip(self, other))\n else:\n raise ValueError(\n \"cannot subtract arrays with different lengths\")\n return NotImplemented", "def __sub__(self, tensor):\n return self.sub(tensor)", "def __neg__(self):\n return (-1)*self", "def __rsub__(self, tensor):\n return -self + tensor", "def subtract(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n difference = str(ft.reduce(oper.sub,values))\n\n return difference", "def __neg__(self):\n return Vector([-c for c in self.components])", "def subtract(self, m): \n f = m.negate()\n return self.add(f)", "def __sub__(self, other):\n return Point(self.x - other[0], self.y - other[1])", "def subtract(x, y):\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)", "def sub(x, y):\n return x - y", "def subtraction(a, b):\n return a - b", "def __sub__(self,other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n resultnumerator = self.numerator-other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues", "def __sub__(self,other):\n return np.linalg.norm(self.ngdv-other.ngdv)", "def SUB(self, n1, n2):", "def sub(self, first, second):\n try:\n if isinstance(second, str):\n second = self._variables[second]\n self._variables[first] -= second\n except:\n print(f\"Could not subtract {first} - {second}\")", "def __neg__(self):\n return self.__mul__(-1)", "def __sub__(self, other):\n raise NotImplementedError", "def __sub__(self, other):\n raise NotImplementedError", "def __neg__(self):\n return 0 - self", "def __isub__(self,that):\n #return self.__opExpand1(that,np.subtract, out=self)\n return self.__opExpand2(that,np.subtract, out=self)", "def __rsub__(self, other, **kwargs):\n kwargs.update({'sub': False, 'operator': 'add'})\n return Curve.__add__(self.__neg__(), other, **kwargs)", "def sub(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return FreeCAD.Vector(first.x-other.x, first.y-other.y, first.z-other.z)", "def __sub__(self,l):\r\n\t\t\r\n\t\t# add negative\r\n\t\ts = self.subtract(l)\r\n\t\t\r\n\t\treturn s", "def test_subtract_different_sizes():\n Vector(1.0) - Vector(2.0, 3.0)", "def __isub__( self, vector3 ):\n return self.subtract( vector3 )", "def substract(x, y):\n return y - x", "def __rsub__(self, other):\n if isinstance(other, int):\n return self.__neg__().__add__(- other)\n return NotImplemented", "def __sub__(self, other: 'SInt') -> 'SInt':\r\n return self + other.complement()", "def subtract(x: int, y: int):\n return x - y", "def __rsub__(self, other):\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (ox - self[0], oy - self[1]))", "def __neg__(self):\n return tuple.__new__(Vec2, (-self[0], -self[1]))", "def __rsub__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Sub.apply(other, self)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def subtract(self):\n return self._do_calc(self.subtracter)" ]
[ "0.7190107", "0.69218826", "0.6909827", "0.6842003", "0.6795763", "0.6745326", "0.6738838", "0.666937", "0.6658452", "0.6623842", "0.66205114", "0.6617514", "0.66050494", "0.65911543", "0.65911543", "0.658858", "0.65842706", "0.6576843", "0.65648913", "0.654615", "0.654368", "0.6481214", "0.64724463", "0.64706707", "0.64655066", "0.64655066", "0.64350855", "0.6430663", "0.6426268", "0.641007", "0.64062536", "0.6373992", "0.6369165", "0.6368171", "0.6366605", "0.635875", "0.6354117", "0.6352066", "0.6351983", "0.63484395", "0.6330952", "0.6329892", "0.6325227", "0.6321719", "0.63089263", "0.63045645", "0.63025093", "0.6300539", "0.62987447", "0.6287157", "0.6278087", "0.6267467", "0.6261343", "0.6261343", "0.6261343", "0.6260328", "0.6241424", "0.62334186", "0.6230386", "0.62237686", "0.62081945", "0.62076527", "0.62017787", "0.61938196", "0.61938196", "0.6191002", "0.61850643", "0.6175124", "0.6171405", "0.61541593", "0.6146065", "0.6143974", "0.6141407", "0.6131903", "0.6123848", "0.61228037", "0.61167884", "0.61155134", "0.61105156", "0.6103977", "0.61031526", "0.609498", "0.6093374", "0.6093374", "0.60907537", "0.6070059", "0.60678256", "0.60668445", "0.60664076", "0.6065792", "0.606185", "0.60559046", "0.60507923", "0.6043549", "0.6023523", "0.6023414", "0.6019304", "0.6009278", "0.60076416", "0.60076416" ]
0.7674791
0
Overloading the right multiply by factor operator for particles types
def __rmul__(self, other): if isinstance(other, float): # always create new particles p = particles(self) p.pos[:] = other * self.pos p.vel[:] = other * self.vel p.m = self.m p.q = self.q return p else: raise DataError("Type error: cannot multiply %s to %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multiplier(self) -> global___Expression:", "def __mul__(self, factor):\n def mul(output, target, params):\n return self(output, target, params) * factor\n return type(self)(type(self).__reserved_init, mul, factor * (1. if self._fact is None else self._fact), self._name)", "def __mul__(self, other):\n # print other\n if type(other) == int or type(other) == float:\n return self.scale(other)\n elif type(other) == Vector:\n return self.dot(other)\n else:\n return NotImplemented", "def __mul__(self, other):\r\n return self.prod(other)", "def __mul__(self, factor):\n if type(factor) == int or type(factor) == float:\n return Vector([c * factor for c in self.components])\n else:\n raise NotImplementedError\n raise Exception(\"Type \" + str(type(factor)) + \" is not valid. Expected float or int types.\")", "def __mul__(self: _TT, other: float) -> _TT:\n return type(self)(str(self.value * other),\"\")", "def multiply(self: T, other: T) -> T:", "def __mul__(self, A):\n pass", "def multiply(self, a, b):\n return a * b", "def _multiply(self, other):\n raise NotImplementedError(\n \"{} does not support scalar multiplication\".format(type(self)))", "def multiply(t):\n return mul(*t)", "def __mul__(self, other):\n if isinstance(other, numbers.Number):\n # scalar multiplication for numbers\n new_point = [x * other for x in self.coords]\n return self.__class__(new_point)", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def __mul__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.mul)", "def __mul__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Mul.apply(self, other)", "def __mul__(self, other):\r\n\r\n T = type(other)\r\n # vec4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x*other, self.y*other, self.z*other, self.w*other)\r\n # vec4*vec4\r\n if isinstance(other, vec4):\r\n return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w\r\n # unsupported\r\n else:\r\n # Try to delegate the operation to the other operand\r\n if getattr(other,\"__rmul__\",None)!=None:\r\n return other.__rmul__(self)\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"", "def __mul__(self,that):\n return self.__opExpand2(that, np.multiply)", "def mul(Z,X,Y):", "def __mul__(self, other):\n return Trits(self.trits * other)", "def __imul__(self, other):\r\n T = type(other)\r\n # vec4*=scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n self.x*=other\r\n self.y*=other\r\n self.z*=other\r\n self.w*=other\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for *=\"", "def __mul__(self, factor: float) -> Point:\n print(\"__mul__ was called\")\n return Point(self.x * factor, self.y * factor)", "def __mul__(self, other):\n if isinstance(other, Vector):\n return self.dot(other)\n else:\n raise TypeError(other)", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def __mul__(self,rhs): \n\n\t\tif isinstance(rhs,self.__class__):\n\n\t\t\tassert self.side_angle == rhs.side_angle\n\t\t\tassert self.data.shape == rhs.data.shape\n\n\t\t\tnew_data = self.data * rhs.data\n\n\t\telif isinstance(rhs,numbers.Number):\n\n\t\t\tnew_data = self.data * rhs\n\n\t\telif type(rhs) == np.ndarray:\n\n\t\t\tassert rhs.shape == self.data.shape\n\t\t\tnew_data = self.data * rhs\n\n\t\telse:\n\n\t\t\traise TypeError(\"Cannot multiply by the right hand side!!\")\n\n\t\t#Copy the extra attributes as well\n\t\tkwargs = dict()\n\t\tfor attribute in self._extra_attributes:\n\t\t\tkwargs[attribute] = getattr(self,attribute)\n\n\t\treturn self.__class__(new_data,self.side_angle,masked=self._masked,**kwargs)", "def multiply(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n product = str(ft.reduce(oper.mul,values))\n\n return product", "def __mul__(self, other):\n if type(other) == int or type(other) == float:\n return Ccy(self.value * other, self.unit)\n else:\n raise TypeError(\"unsupported operand type(s) for *: 'Ccy' and \" + type(other).__name__)", "def __mul__(self, tensor):\n return self.mul(tensor)", "def __mul__(self, other: '__class__') -> '__class__':", "def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)", "def __mul__(self,other):\n if type(other) is Vector:\n return(self.x*other.x + self.y*other.y + self.z*other.z)\n else:\n return(Vector(self.x*other,self.y*other,self.z*other))", "def multiply(value, multiplier):\n return value*multiplier", "def mul(x, y):\n return multiply(x, y)", "def __mul__ (self, other): \n if isinstance(other, Number):\n return self._scale(other)\n elif isinstance(other, Matrix):\n return self._mul(other)\n elif isinstance(other, Vector):\n return self._vecmul(other)\n else:\n return NotImplemented", "def __imul__(self, tensor):\n return self.mul_(tensor)", "def __mul__(self,value):\n x = self.clone()\n if isinstance(value,LiveStat):\n x.name = \"(\" + self.name + \"*\" + value.name + \")\"\n else:\n x.name = \"(\" + self.name + \"* scalar)\"\n x *= value\n return x", "def mul(self, a, b):\n return a * b", "def mul(self, other):\n\n return self._get(\"mul\", other, self.__class__)", "def __mul__(self,a):\n return Vector(self.x*a,self.y*a)\n pass", "def __mul__(self,l):\r\n\t\t\r\n\t\t# multiply\r\n\t\tm = self.multiply(l)\r\n\t\t\r\n\t\treturn m", "def multiply(value, arg):\n return value * arg", "def multiply(self):\n return self._do_calc(self.multiplier)", "def multiply(self):\n return self._do_calc(self.multiplier)", "def multiply(self):\n return self._do_calc(self.multiplier)", "def __mul__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Multiply, value)\n return out", "def scalarMultiplication(self, factor):\n components = self.components() * factor\n return Vector.initializeFromComponents(components)", "def mul(a,b):\r\n return a*b", "def __mul__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'mul')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(\n tf.multiply(self.tf, other.tf), self.type_name, provenance)\n else:\n provenance = NQExprProvenance(\n operation='mul',\n inner=self.provenance,\n other=NQExprProvenance(operation='constant', args=(None, other)))\n return self.context.as_nql(\n tf.multiply(self.tf, other), self.type_name, provenance)", "def mul(x, y):\n return x * y", "def mul(x, y):\n return x * y", "def multiply(self, layer):\n pass", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def __mul__(self, other: Any) -> ColumnOperators:\n return self.operate(mul, other)", "def my_mul(x, y):\n ##\n cmd = getattr(th, \"mul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n return int48module((x2y1 + x1y2) % int24field * int24field + x2y2)", "def multiply(self, other):\n from divisi2 import operators\n return operators.multiply(self, other)", "def product(self):\n raise NotImplementedError", "def __mul__(self, value):\n cls = self.__class__\n return cls(value*self.x, value*self.y, value*self.z)", "def __pow__(self, other, tensor=False):\r\n return self.prod(other, tensor=True)", "def mul(self, multiplier):\n result = {}\n for k, v in self.variables.items():\n a, b = self._broadcast(multiplier, v)\n result[k] = a * b\n return MultivariateDerivative(result)", "def __mul__(self, other):\n if is_unit(other):\n # print \"quantity * unit\"\n # Many other mul/div operations delegate to here because I was debugging\n # a dimensionless unit conversion problem, which I ended up fixing within\n # the reduce_unit() method.\n unit = self.unit * other\n return Quantity(self._value, unit).reduce_unit(self.unit)\n elif is_quantity(other):\n # print \"quantity * quantity\"\n # Situations where the units cancel can result in scale factors from the unit cancellation.\n # To simplify things, delegate Quantity * Quantity to (Quantity * scalar) * unit\n return (self * other._value) * other.unit\n else:\n # print \"quantity * scalar\"\n return self._change_units_with_factor(self.unit, other, post_multiply=False)", "def multiply(x, y):\n\n return x * y", "def _mul(a, b):\n return a * b", "def multiply(first, second):\n return first * second", "def scalar_mult(diagram, scalar):\n raise NotImplementedError", "def __mul__(self, other):\n try:\n total = {self.var: other.val, other.var: self.val}\n return AutoDiffReverse(self.val * other.val, None, total)\n except AttributeError:\n return AutoDiffReverse(self.val * other, None, {self.var: other})", "def vars_multiply ( self , var1 , var2 , name = '' , title = '' ) :\n \n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n res = float ( var1 ) * float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n # shortcut \n if 1 == var1 : return var2 ## SHORTCUT\n elif 0 == var1 : return ROOT.RooRealConstant.value ( 0 ) ## SHORTCUT\n # \n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_multiply ( var1 , var2 , name , title )\n elif f2 : \n # shortcut \n if 1 == var2 : return var1 ## SHORTCUT\n elif 0 == var2 : return ROOT.RooRealConstant.value ( 0 ) ## SHORTCUT\n # \n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_multiply ( var1 , var2 , name , title )\n \n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit. Product ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def multiply(lhs, rhs):\n return _make.multiply(lhs, rhs)", "def __rmul__(self, *args, **kwargs):\n return self.__mul__(*args, **kwargs)", "def __mul__(self,y): \n\n # BZO mulitplication\n if type(y)==type(self):\n Out = self._CreateSameType()\n \n for Ind1 in self.IndList():\n Obj1=self[Ind1]\n for Ind2 in y.IndList():\n Obj2=y[Ind2]\n \n Ind3 = tuple(add(Ind1,Ind2))\n \n Out[Ind3] += Obj1*Obj2\n \n # Scalar multiplicatin\n else:\n\n Out = self._CreateSameType()\n\n Out.SetLists(self.IndList(),[y*x for x in self.__ObjList])\n\n # Multiplication with item of its own type\n \n \n \n \n \n return Out", "def mul_(self, scalar):\n for idx in range(len(self)):\n self.parameters[idx] *= scalar", "def multiply(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n multiplier = str(args[0] * args[1])\n return multiplier", "def __imul__(self, other):\n\n return self * other", "def multiply(x, y):\n return x * y", "def multiply(x, y):\n return x * y", "def multiply(x, y):\n return x * y", "def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts * other, self.volt_unit, self.freq, self.freq_unit)\n else:\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the\"\n f\" same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.volts * other.volts\n return Volt(prod_sum, self.volt_unit, self.freq, self.freq_unit)", "def test_multiply_scalar(self):\n a = Vector(1, 2)\n c = a * 3\n assert c.x == 3\n assert c.y == 6", "def __mul__(self, number):\n if not isinstance(number, (int, float)):\n return NotImplemented\n new_gene = self.copy()\n new_gene.weight *= number\n return new_gene", "def __mul__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during multiplication '\n f'to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Times(self, other)", "def mul(self, b):\n self.a *= float(b)", "def __mul__( self, value ) :\n\n value_ = float( value )\n c_ls = self.copy( )\n for l, c_l in enumerate( self ) : c_ls[l] *= value\n return( c_ls )", "def __pow__(self, ???):", "def __imul__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__iop(other, operator.mul)", "def test_mul():\n assert_equal(Vector(3, 1) * 2, Vector(6, 2))\n assert_equal(2 * Vector(3, 1), Vector(6, 2))", "def kkMul(*args):\n if (None in args):\n return None\n product = 1\n for arg in args:\n product *= arg\n return product", "def _mul(self, other):\n return None", "def __mul__(self, _scalar):\n\t\tans = copy.deepcopy(self)\n\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] *= _scalar\n\t\treturn ans", "def multiplicacion(x, y):\n return x * y", "def mult(a, b):\n return a * b", "def Mult(self, *args):\n return _hypre.HyprePCG_Mult(self, *args)", "def __mul__(self, other):\n x = self.x * other\n y = self.y * other\n return vec(x, y)", "def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Power(self.power * other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' power units {self.power_unit} \"\n f\"and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_prod = self.power * other.power\n return Power(power_prod, self.power_unit, self.freq, self.freq_unit)", "def mul(num1, num2):\n return num1 * num2", "def mul(num1, num2):\n return num1 * num2", "def mul(num1, num2):\n return num1 * num2", "def multiply(a, b):\n return a * b", "def multiply(a, b):\n return a * b", "def multiply(a, b):\n return a * b", "def multiply(a, b):\n return a * b", "def multiply(a, b):\n return a * b", "def mult(value, arg):\n return int(value)*int(arg)" ]
[ "0.7431928", "0.7313898", "0.72634524", "0.71655995", "0.7143251", "0.69607747", "0.6940514", "0.68881065", "0.6880617", "0.6876777", "0.68679297", "0.68585974", "0.68542147", "0.6810146", "0.6792147", "0.6772574", "0.6749506", "0.67289525", "0.6717555", "0.67149484", "0.6702451", "0.6699927", "0.66994035", "0.6699063", "0.6698599", "0.66966194", "0.6686948", "0.6656218", "0.66522896", "0.664275", "0.6631896", "0.66286415", "0.662649", "0.66241217", "0.66105795", "0.6605313", "0.6589995", "0.65845394", "0.65772057", "0.65744936", "0.6569617", "0.6569617", "0.6569617", "0.6555128", "0.6551879", "0.65364194", "0.6531385", "0.6529958", "0.6529958", "0.6523987", "0.6516306", "0.6515065", "0.64966935", "0.64924335", "0.6447645", "0.6445093", "0.6431133", "0.6430173", "0.6429546", "0.6414701", "0.6410838", "0.6410356", "0.6408702", "0.6406", "0.64009565", "0.6397747", "0.6392054", "0.6383971", "0.6369759", "0.63506263", "0.6349173", "0.6345455", "0.6345455", "0.6345455", "0.6342379", "0.63371956", "0.6335633", "0.63356125", "0.6334022", "0.63320076", "0.6326591", "0.63202995", "0.63175315", "0.63112915", "0.63101053", "0.6306628", "0.6301459", "0.62982434", "0.6289155", "0.62847865", "0.6258572", "0.6257345", "0.6257345", "0.6257345", "0.62456995", "0.62456995", "0.62456995", "0.62456995", "0.62456995", "0.62401927" ]
0.67623055
16
Overloading the abs operator for particles types
def __abs__(self): abspos = abs(self.pos) absvel = abs(self.vel) return np.amax((abspos, absvel))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __abs__(self):\r\n raise TypeError(f\"bad operand type for abs(): '{type(self).__name__}'\")", "def abs_(a):", "def abs(self):\n return self * self.sign()", "def abs(self, a):\n return abs(a)", "def abs(x):\n pass", "def abs(self):\n\n return self._get(\"abs\", rtype=self.__class__)", "def __abs__(self):\n return self.magnitude()", "def __abs__(self):\r\n return math.sqrt(self*self)", "def scalar_abs(self, dst, src):\n return self._scalar_single_func('abs', dst, src)", "def abs_(arg):\n ...", "def __abs__( self ):\r\n\t\tif ( self < 0 ): return -self\r\n\t\telse: return self", "def __abs__(self) -> PointType:\n return Point(abs(self.x), abs(self.y))", "def abs(tensor):\n raise NotImplementedError", "def __abs__(self) -> numbers.Number:\n\n return np.sqrt(abs(self.mag2()))", "def abs(self):\n\n return Number.abs(self)", "def __abs__(self):\n out = self.copy()\n out.addFunction(Query.Function.Abs)\n return out", "def abs(number):\n if isinstance(number,(int,float,complex)): return builtins.abs(number)\n elif isinstance(number,(numpy.float64,numpy.complex128)): return numpy.abs(number)\n else: raise error(\"field_traits.abs executed on unavailable type\")", "def __abs__(self):\n return type(self)(abs(self.number))", "def convert_abs(node, **kwargs):\n return create_basic_op_node('Abs', node, kwargs)", "def abs(self):\n return math.sqrt(self['real'] * self['real'] + self['imaginary'] * self['imaginary'])", "def __abs__(self):\n return Vector.createFromPoint(self).norm", "def abs(data):\n return _make.abs(data)", "def __abs__(self):\n v = zeros_como(self)\n\n for i in range(self.n):\n v[i] = abs(self[i])\n\n return v", "def abs(tensor):\n return _elementary_op(tensor, np.abs, np.sign)", "def __abs__(self):\n\t\tval = abs(self.val)\n\t\tif 0 in self.val:\n\t\t\traise ValueError(\"Absolute value is not differentiable at 0.\")\n\n\t\tder_copy = np.copy(self.der)\n\t\tif len(der_copy.shape):\n\t\t\tfor i, val_i in enumerate(self.val):\n\t\t\t\tif val_i < 0:\n\t\t\t\t\tder_copy[i] = -1 * der_copy[i]\n\t\treturn Var(val, der_copy)", "def abs(value):\n return _abs(value)", "def abs(self) -> LinearOperator:\n return self.__class__(self._diag.abs())", "def __abs__(self):\n retval = self.copy()\n retval._val = abs(retval._val)\n return retval", "def _call_abs(vecObj):\n res = vecObj.abs()\n return res", "def abs(self: FrameLike) -> FrameLike:\n\n def abs(psser: \"Series\") -> Union[\"Series\", Column]:\n if isinstance(psser.spark.data_type, BooleanType):\n return psser\n elif isinstance(psser.spark.data_type, NumericType):\n return psser._with_new_scol(\n F.abs(psser.spark.column), field=psser._internal.data_fields[0]\n )\n else:\n raise TypeError(\n \"bad operand type for abs(): {} ({})\".format(\n spark_type_to_pandas_dtype(psser.spark.data_type),\n psser.spark.data_type.simpleString(),\n )\n )\n\n return self._apply_series_op(abs)", "def __neg__(self) -> PointType:\n return self * -1", "def test_abs():\n assert_equal(abs(Vector(3.0, 4.0)), 5.0)", "def abs(f):\n return f.per(dmp_abs(f.rep, f.lev, f.dom))", "def __abs__ (self) :\n return self.__class__ (abs (self.radians))", "def __abs__(self):\n if self.value == NEG:\n return TRIT_POS\n else:\n return self", "def toabs(self, value, isworld=-1):\n return _coordsys.coordsys_toabs(self, value, isworld)", "def __abs__(self):\r\n return (self._real.fma(self._real, self._imag*self._imag)).sqrt()", "def __abs__ (self) :\n return self.__class__ (abs (self.degrees))", "def abs(array):\n return np.abs(array)", "def test_abs():\n v1 = _Vector3(1,2,99999)\n v2 = Vector3(1,2,99999)\n assert abs(v1) == abs(v2)\n v1 = _Vector3(8,2,9)\n v2 = Vector3(8,2,9)\n assert abs(v1) == abs(v2)\n v1 = _Vector3(8,2,9)\n v2 = Vector3(1,2,3)\n assert abs(v1) != abs(v2)", "def _abs (x):\n\n return x if le(nil,x) else -x", "def __init__(self):\n GinacFunction.__init__(self, \"abs\", latex_name=r\"\\mathrm{abs}\",\n conversions=dict(sympy='Abs'))", "def copy_abs(self):\r\n return (self._real.fma(self._real, self._imag*self._imag)).sqrt()", "def test_abs():\n value = -42\n num_a = param.Integer(value=value)\n assert abs(num_a.value) == abs(value)", "def __abs__(self):\n return self.square() ** 0.5", "def __abs__(self):\n return Quantity(abs(self._value), self.unit)", "def abs__inplace(a):", "def testabs ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, expRes in self.knownAbsValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) )\r\n\t\t\tself.assertEqual ( abs (frac1).toString (), str ( expRes ))", "def calculate_abs(self):\n ref_spectra_raw = np.array(self.raw_data['spectrum_0'].attrs['reference'])\n self.ref_spectra_arr = np.subtract(ref_spectra_raw,self.back_spectra_arr)\n abs=-np.log10(self.pre_proc_data.div(self.ref_spectra_arr))\n self.abs_data=abs\n return self.abs_data", "def _cmplx_abs_ ( s ) :\n import math \n return math.sqrt ( s.norm () )", "def test_abs():\n \n assert (abs(Quantity(-1, unit('m'))) ==\n abs(Quantity(1, unit('m'))) ==\n Quantity(1, unit('m')))", "def __neg__(self):\n return (-1)*self", "def true_y_abs(x):\n y = torch.abs(x)\n return y", "def local_abs_merge(node):\r\n if node.op == T.mul and sum([i.owner.op == T.abs_ for i in node.inputs\r\n if i.owner]) > 1:\r\n inputs = []\r\n for i in node.inputs:\r\n if i.owner and i.owner.op == T.abs_:\r\n inputs.append(i.owner.inputs[0])\r\n elif isinstance(i, Constant):\r\n try:\r\n const = get_scalar_constant_value(i)\r\n except NotScalarConstantError:\r\n return False\r\n if not (const >= 0).all():\r\n return False\r\n inputs.append(i)\r\n else:\r\n return False\r\n return [T.abs_(T.mul(*inputs))]\r\n if node.op == T.true_div and sum([i.owner.op == T.abs_ for i in\r\n node.inputs if i.owner]) == 2:\r\n return [T.abs_(T.true_div(node.inputs[0].owner.inputs[0],\r\n node.inputs[1].owner.inputs[0]))]", "def __abs__(self):\n return hypot(self.x, self.y)", "def abs(self) -> LinearOperator:\n return ConstantDiagLinearOperator(self.diag_values.abs(), diag_shape=self.diag_shape)", "def absolute_value(x):\n x_star = x.clone()\n x_star[1] *= -1\n return elementwise_mult(x, x_star)[0].sqrt_()", "def handle_abs(self):\n # pylint: disable=no-member\n x_raw = self.microbit.accelerometer.get_x()\n y_raw = self.microbit.accelerometer.get_y()\n x_abs = ('Absolute', 0x00, x_raw)\n y_abs = ('Absolute', 0x01, y_raw)\n return x_abs, y_abs", "def absmax(self):\n raise NotImplementedError", "def local_abs_lift(node):\r\n if node.op == T.abs_ and node.inputs[0].owner:\r\n assert node.nin == 1\r\n if node.inputs[0].owner.op == T.mul:\r\n return [T.mul(*[T.abs_(i) for i in node.inputs[0].owner.inputs])]\r\n if node.inputs[0].owner.op == T.true_div:\r\n i = node.inputs[0].owner.inputs\r\n return [T.true_div(T.abs_(i[0]), T.abs_(i[1]))]", "def neg(self, a):\n return -a", "def abs(self):\n return DataFrameDefault.register(pandas.DataFrame.abs)(self)", "def abs(a):\n if not type(a) is Blob:\n raise ValueError('`a` should be neoml.Blob.')\n\n if a.size == 0:\n raise ValueError(\"The blob shouldn't be empty.\")\n \n return Blob(PythonWrapper.blob_abs(a._internal))", "def abs(n):\n if n > 0:\n return n\n else:\n return -n", "def __neg__(self):\n return Vector(-self.x, -self.y)", "def __neg__(self):\n return Vector(-self.x, -self.y)", "def __neg__(self):\n return 0 - self", "def absolute(x):\n return -x if x < 0 else x", "def __abs__(self):\n return Factor().__build( VarSet(self.v) , np.fabs(self.t) )", "def absIP(self):\n np.fabs(self.t, out=self.t)\n return self", "def __neg__(self):\n retval = FixedPoint(0,self.int_bits, self.frac_bits) - self\n return retval", "def neg(a):\n return -a;", "def __neg__(self):\n return self.__mul__(-1)", "def absolute_magnitude(self):\n return self._absolute_magnitude", "def magnitude(self):\n return sqrt(self & self)", "def test_abs(doctest):", "def magabs(self):\n if not self.has_target():\n raise AttributeError(\"No target defined, I can't get the distance\")\n return self.mag - 5*( np.log10(self.target.distmpc*1.e6) - 1)", "def __neg__(self):\n a = -self._ar\n return Vector(a)", "def absolute_value(val):\n if val < 0:\n return val * -1\n else:\n return val", "def __neg__(self):\n return tuple.__new__(Vec2, (-self[0], -self[1]))", "def is_absolute(self) -> bool:\n return isinstance(self, AbsoluteFormula)", "def __neg__(self):\n return self.neg()", "def __mul__(self, other: 'SInt') -> 'SInt':\r\n if self.signe == other.signe == '0':\r\n return super().__mul__(other)\r\n if self.signe == other.signe:\r\n return abs(self) * abs(other)\r\n return -(abs(self) * abs(other))", "def __neg__(self):\n return self.scale(-1)", "def complex_abs(data):\n assert data.size(-1) == 2\n return (data ** 2).sum(dim=-1).sqrt()", "def complex_abs(data):\n assert data.size(-1) == 2\n return (data ** 2).sum(dim=-1).sqrt()", "def complex_abs(data):\n assert data.size(-1) == 2\n return (data ** 2).sum(dim=-1).sqrt()", "def negative(a: Decimal) -> Decimal:\n return -a", "def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new particles, since otherwise c = a - b changes a as well!\n p = particles(self)\n p.pos[:] = self.pos - other.pos\n p.vel[:] = self.vel - other.vel\n p.m = self.m\n p.q = self.q\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))", "def __neg__(self) -> 'SInt':\r\n return self.complement()", "def __neg__(self):\n return Translation(-self.x, -self.y, -self.z)", "def solve_absolute_mde(self):\n e = FTestPower().solve_power(\n effect_size=None\n ,df_num=self.df_denom\n ,df_denom=self.df_num\n ,alpha=self.alpha\n ,power=(1 - self.beta)\n ,ncc=1\n )\n\n Y = (self.test_splits * self.absolute_effects).sum()\n num1 = np.square(self.absolute_effects - Y)\n num = (self.test_splits * num1).sum()\n\n a = f * self.sigma / np.sqrt(num)\n return a * self.absolute_effects", "def int_abs(arr):\n arr = np.array(arr, copy=False)\n dt = arr.dtype\n if dt.kind == 'u':\n return arr\n if dt.kind != 'i':\n return np.absolute(arr)\n out = arr.astype(np.dtype(dt.str.replace('i', 'u')))\n return np.choose(arr < 0, (arr, arr * -1), out=out)", "def __neg__(self):\n return Complex(-self._reNum, -self._imNum)", "def zzX_abs(f):\n if poly_univariate_p(f):\n return zzx_abs(f)\n else:\n return [ zzX_abs(coeff) for coeff in f ]", "def get_abs_dist(self, pos1, pos2):\n\t\treturn min(abs(pos1 - pos2), abs(pos1 - pos2 + 360))", "def __neg__(self):\n return self.coeff_mul(-1)", "def get_trace_abs(self, norm=True):\n if self.Et is not None:\n # Center peak in time\n ind = np.argmax(abs(self.Et))\n shift = (self.Et.shape[0] / 2 - ind).astype(np.int)\n Et = np.abs(np.roll(self.Et, shift))\n if norm is True:\n Et /= Et.max()\n else:\n Et = None\n return Et", "def find_absolute_value(x):\n return math.fabs(x)", "def __neg__(self):\r\n\t\t\r\n\t\t# take negative\r\n\t\tn = self.scale(-1)\r\n\t\t\r\n\t\treturn n" ]
[ "0.7323492", "0.73157257", "0.71471566", "0.69704336", "0.6960074", "0.69372237", "0.6878583", "0.6811065", "0.67880434", "0.67874306", "0.6773758", "0.671703", "0.6713032", "0.6710696", "0.6704489", "0.6671311", "0.665533", "0.66214633", "0.6609428", "0.6576943", "0.654796", "0.6546441", "0.6518418", "0.64883596", "0.64457965", "0.6414845", "0.6352704", "0.63172686", "0.63030493", "0.62779415", "0.62580943", "0.6238638", "0.62278885", "0.62080157", "0.6202242", "0.61956364", "0.6180238", "0.61677194", "0.615738", "0.61375153", "0.6107162", "0.6105506", "0.6095597", "0.6073936", "0.5984774", "0.5945342", "0.58768654", "0.5875955", "0.58724976", "0.58447814", "0.5828078", "0.5800201", "0.5790954", "0.5779772", "0.5778525", "0.57238466", "0.57204634", "0.5711902", "0.57077056", "0.5705591", "0.5649386", "0.5636446", "0.56325847", "0.5628293", "0.5618077", "0.5618077", "0.5614805", "0.5601671", "0.55995905", "0.5597211", "0.55832523", "0.5578944", "0.5573823", "0.55730414", "0.5516303", "0.5495177", "0.54662246", "0.54514337", "0.5450304", "0.54438305", "0.5391616", "0.5386926", "0.538583", "0.5355935", "0.5328871", "0.5328871", "0.5328871", "0.5319942", "0.53121805", "0.5270995", "0.52472824", "0.52453655", "0.5244226", "0.52439713", "0.5240585", "0.5238353", "0.52375823", "0.52318966", "0.5218094", "0.52115315" ]
0.67304426
11
Routine for sending data forward in time (blocking)
def send(self, dest=None, tag=None, comm=None): comm.send(self, dest=dest, tag=tag) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _send_data(self, data, time):\n pass", "def sendall(self, data):\n while data and self.running:\n ret = self.sendFn(data[:MAX_SEND_SIZE])\n assert ret > 0\n data = data[ret:]", "def nonblocking_send(self, data):\n try:\n if len(data) == 0:\n return None\n self.amount_so_far += self.socket.send(data[self.amount_so_far:])\n except Exception as exc:\n active_sockets_dict.pop(self.socket, None)\n self.socket.close()\n print(\"An error occurred: %s\\n\" % exc)\n return -1\n ret = self.is_send_done()\n return ret", "def send(self, data):\n starttime = time.time()\n while 1:\n if self._waiting_response==1:\n if time.time() - starttime > self._maxrespdelay:\n break\n _LOGGER.debug(\"Send going to sleep\\n\")\n time.sleep(self._sleeptime)\n else:\n break\n\n currtime = time.time()\n if currtime - self._lastcall > self._maxtime:\n self.reset()\n self._lastcall = currtime\n _LOGGER.debug(\"Sending: %s\", data)\n if not testing:\n self.serial.reset_input_buffer()\n bytessent = self.serial.write(data.encode())\n return bytessent\n else:\n self._waiting_response = 1\n return len(data)", "def send(self):\n send_pos = 0 \n\n # send list not empty\n if(len(self.send_list) == 0): \n if(len(self.send_wait) == 0):\n return False\n self.send_list.append(self.send_wait[0]) # add send list\n self.send_pos_frame.append(0)\n self.send_wait.pop(0) # refresh send_wait\n\n send_now = self.send_list[send_pos]\n host = send_now[0] # port send frame\n frame = send_now[1] \n\n bit = frame.get_bit(self.send_pos_frame[send_pos])\n\n q = Queue()\n\n q.put((host,'s'))\n \n # 's': send, 'r': receive\n while not q.empty():\n front = q.get()\n name_port = front[0]\n tp = front[1] # type operation: 'r' o 's'\n\n name = Tools.get_device_name(name_port)\n port = Tools.get_device_port_index(name_port)\n\n dev = self.get_device(name)\n \n if(tp == 's'):\n new_dev = dev.send(bit, port)\n else: # tp == 'r'\n new_dev = dev.receive(bit, port)\n \n for i in new_dev:\n q.put(i)\n\n # signal time\n self.signal_count += 1\n if(self.signal_count == self.signal_time):\n self.send_pos_frame[send_pos] += 1\n if(self.send_pos_frame[send_pos] == frame.length()): # if frame send complete\n #reset send list\n self.send_list.pop(send_pos)\n self.send_pos_frame.pop(send_pos)\n\n self.signal_count = 0\n\n return True", "def _send(self) -> None:\n if not self.connected or now() < self.next_send:\n return\n self.next_send += self.poll_interval\n buff = []\n while self.outq:\n msg_id, tag, data = self.outq.popleft()\n buff.append(pickle.dumps((msg_id, tag, data)))\n if buff:\n stream = b\"\".join(buff)\n self.endpoint.sendall(stream)", "def done_sending(self):\r\n self._flush(True)", "def _flow_out(self):\n print(\"MESSENGER: flow_out online!\")\n while self.running:\n if self.sendbuffer:\n msg = self.sendbuffer.pop(0)\n for slc in (msg[i:i+1024] for i in range(0, len(msg), 1024)):\n self.sock.send(slc)\n time.sleep(self.sendtick)\n print(\"MESSENGER: flow_out exiting...\")", "def _send_data(self):\n pass", "def _send(self):\n while self.socket is not None:\n try:\n data = self._get_data_from_send_queue()\n if self.socket is not None:\n header = self._create_data_header(data)\n with self.socket_lock:\n self.socket.sendall(header + data)\n except Exception as err:\n getLogger(__name__).debug((\"Unexpected exception occurred,\"\n \" send thread may be in a\"\n \" corrupted state\\n\"\n \"Error: {}\".format(err)))", "def run(self):\n yield self.env.timeout(self.initial_delay)\n while self.env.now < self.finish:\n # wait for next transmission\n yield self.env.timeout(self.adist)\n self.packets_sent += 1\n p = Packet(self.env.now, self.sdist, self.packets_sent, src=self.id, flow_id=self.flow_id)\n self.out.put(p)", "def send(self,data):\r\n # Get the data length\r\n fullDataLength = len(data)\r\n \r\n # Input sanity\r\n if fullDataLength == 0:\r\n raise ValueError, \"Cannot send a null data-set!\"\r\n \r\n # Send chunks of data until it is all sent\r\n while True:\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Make sure we have available outgoing bandwidth\r\n self.socketLocks[\"outgoing\"].acquire()\r\n try:\r\n self.socketLocks[\"outgoing\"].release()\r\n except:\r\n # Some weird timing issues can cause an exception, but it is harmless\r\n pass\r\n \r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Get our own lock\r\n self.socketLocks[\"send\"].acquire()\r\n \r\n # How much outgoing traffic is available?\r\n outgoingAvailable = self.bufferInfo[\"outgoing\"]\r\n \r\n # If we can, just send it all at once\r\n if len(data) < outgoingAvailable:\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, data)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] -= len(data)\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # We need to explicitly leave the loop\r\n break\r\n \r\n # We need to send chunks, while waiting for more outgoing B/W\r\n else:\r\n # Get a chunk of data, and send it\r\n chunk = data[:outgoingAvailable]\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, chunk)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] = 0\r\n\r\n # Lock the outgoing lock, so that we block until we get a MULTIPLEXER_CONN_BUF_SIZE message\r\n self.socketLocks[\"outgoing\"].acquire()\r\n \r\n # Trim data to only what isn't sent syet\r\n data = data[outgoingAvailable:]\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # If there is no data left to send, then break\r\n if len(data) == 0:\r\n break\r\n \r\n # Return bytes sent, which is always the full message\r\n # since we will block indefinately until everything is sent.\r\n return fullDataLength", "def send_blocking_signal(self, compression=True):\n while not self._stop_receive.is_set():\n if len(self._send_queue) > 0:\n super(MastermindClientUDP, self).send(JSONSerializer.serialize(self._send_queue.pop()), compression)\n else:\n super(MastermindClientUDP, self).send(JSONSerializer.serialize(DummyEvent()), compression)\n time.sleep(1)", "def __send(self):\r\n self.msgLock.acquire()\r\n if self.numMsg > 0:\r\n self.socket.send(self.msg.pop(0))\r\n self.numMsg -= 1\r\n self.msgLock.release()", "def send(self, data):", "def sendBuffer():\n dislin.sendbf()", "def __frame_tx(self,data):\n\n if self._spy_frame_tx is not None:\n self._spy_frame_tx(data)\n\n data=self.__pad(data)\n\n if len(data) < self.other_bufferlen:\n self.com.tx(data)\n else:\n chunks = (len(data)-1) // self.other_bufferlen\n #print(\"__frame_tx: %d full chunks + last\"%chunks,flush=True)\n for i in range(0,chunks):\n self.com.tx(data[i*self.other_bufferlen:(i+1)*self.other_bufferlen])\n self.com.rx_ack()\n self.com.tx(data[chunks*self.other_bufferlen:])\n #print(\"__frame_tx done\",flush=True)", "def send (self, data):\n return self.sending.send(data)", "def send(self,data,timeout=None):\r\n # Set the timeout if None\r\n if timeout is None:\r\n timeout = self.timeout\r\n\r\n # Get the start time\r\n starttime = getruntime()\r\n\r\n # Block until we can write\r\n rblock, wblock = self.socket.willblock()\r\n while wblock:\r\n # Check if we should break\r\n if timeout > 0:\r\n # Get the elapsed time\r\n diff = getruntime() - starttime\r\n\r\n # Raise an exception\r\n if diff > timeout:\r\n raise SocketTimeoutError,\"send() timed out!\"\r\n\r\n # Sleep\r\n # Since switching to the fibonacci backoff, the nature of \r\n # this field has changed. Rather than implement the backoff \r\n # for checking block status (seems wasteful) we'll just use \r\n # a constant value. Ten ms seems appropriate.\r\n sleep(0.010)\r\n\r\n # Update rblock\r\n rblock, wblock = self.socket.willblock()\r\n\r\n # Do the recv\r\n return self.socket.send(data)", "def IRC_send_called_every_three_seconds(self):\n\n if (self.ircMessageBuffer):\n try:\n # print(\"Buffered\")\n stringToSend = str(self.ircMessageBuffer.popleft())\n print(\"string to send : \" + stringToSend)\n if self.ircSocket:\n self.ircSocket.send((stringToSend).encode('utf8'))\n except Exception as e:\n logging.error(\"IRC send error:\")\n logging.error(\"In IRCSendCalledEveryThreeSeconds\")\n logging.error(str(e))\n logging.exception(\"Exception : \")", "def send_message(data):\n if data is not None:\n logging.debug(data)\n queue.on_next(data)", "def send(self, data):\n print(\"sending: {}\".format(data))\n self.forward_in_sock.send_string(\"{}\\n\".format(data))", "def send(self):\n self.spi.send(self.startframe + self.buffer)", "async def send(self):", "def write_handler(socket, buf):\n while True:\n try:\n message = buf.pop()\n logging.debug(\"sending data : %s\", message)\n socket.send(message)\n except IndexError:\n time.sleep(WAIT_INTERVAL)", "async def send(self, data: dict):\n async with self.gateway_send_lock:\n current_time = time()\n if current_time >= self.gateway_send_reset:\n self.gateway_send_reset = current_time + self.gateway_send_per\n self.gateway_send_left = self.gateway_send_limit\n if self.gateway_send_left == 0:\n sleep_for = self.gateway_send_reset - current_time\n self.logger.debug(\n f\"Gateway ratelimited! Sleeping for {sleep_for}s\")\n await sleep(self.gateway_send_reset - current_time)\n self.logger.debug(\"Data sent: \" + str(data))\n await self.ws.send_json(data, dumps=dumps)", "def _writeloop(self):\r\n while self._ll_alive:\r\n ## Add a thread lock\r\n if not self._uart_tx_queue.empty():\r\n data = self._uart_tx_queue.get()\r\n #clear the response list before send the command\r\n #self._uart_rx_queue.clear()\r\n #self.log.debug(\"Uart send cmd:\",data)\r\n #time.sleep(0.01)\r", "def send(self, data: Union[ActionEvent, TurnEvent], compression=None):\n # pause_receive is irrelevant now\n # self._pause_receive.set()\n self._send_queue.append(data)\n # super(MastermindClientUDP, self).send(JSONSerializer.serialize(data), compression)\n # self._pause_receive.clear()\n return", "def await_data(self):\n self.data.append(self.socket.recv(1))", "def sending():\n # don't use socket.connect because it fixes a\n # remote address and causes problems when receiving\n # from other sockets and sending to them\n\n # we also need to cater for sending normal data\n\n global DATA\n start = current_time()\n while True:\n if(current_time() - start) < 10:\n time.sleep(2)\n continue\n else:\n data_to_send = pickle.dumps(DATA[\"distance_vec\"])\n for every_neighbor in DATA[\"neighbor\"]:\n send_address = (\"127.0.0.1\", every_neighbor[2])\n SOCKET1.sendto(data_to_send, send_address)\n start = current_time()", "def _send(self, data: bytes):\n if self._pre_send is not None:\n data = self._pre_send(data)\n if data is None:\n return\n\n self._transport.sendto(data, self._peer)", "def initiate_send(self):\n while self.producer_fifo and self.connected:\n first = self.producer_fifo[0]\n # handle empty string/buffer or None entry\n if not first:\n del self.producer_fifo[0]\n if first is None:\n self.transfer_finished = True\n self.handle_close()\n return\n\n # handle classic producer behavior\n obs = self.ac_out_buffer_size\n try:\n data = buffer(first, 0, obs)\n except TypeError:\n self.producer_fifo.appendleft(first.more())\n continue\n\n # send the data\n try:\n num_sent = self.send(data)\n except socket.error:\n self.handle_error()\n return\n\n if num_sent:\n self.tot_bytes_sent += num_sent\n if num_sent < len(data) or obs < len(first):\n self.producer_fifo[0] = first[num_sent:]\n else:\n del self.producer_fifo[0]\n # we tried to send some actual data\n return", "def send(self, data):\n pass", "def run(self):\n while True:\n get_request = self.transmitter_port.out_queue.get()\n message = yield get_request\n log.debug(\"{} transmission of {} started\".format(self, message))\n bytes_to_transmit = (ethernet.PREAMBLE_SIZE_BYTES +\n ethernet.SFD_SIZE_BYTES +\n message.size_bytes)\n # wait for the transmission + propagation time to elapse\n yield self.env.timeout(\n self.link.transmission_time_us(bytes_to_transmit) +\n self.link.propagation_delay_us)\n log.debug(\"{} transmission of {} finished\".format(self, message))\n self.receiver_port.in_queue.put(message)\n # wait for the duration of the ethernet interframe gap to elapse\n yield self.env.timeout(\n self.link.transmission_time_us(ethernet.IFG_SIZE_BYTES))\n log.debug(\"{} inter frame gap finished\".format(self))", "def send_recv(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('list(spi.send_recv(data, timeout=50000))\\r\\n'.encode('utf-8'))\n sleep(1)", "def write(self, data):\n if self.closed:\n raise ConnectionResetError(\n 'Transport closed - cannot write on %s' % self\n )\n else:\n t = self.transport\n if self._paused or self._buffer:\n self._buffer.appendleft(data)\n self._buffer_size += len(data)\n self._write_from_buffer()\n if self._buffer_size > 2 * self._b_limit:\n if self._waiter and not self._waiter.cancelled():\n self.logger.warning(\n '%s buffer size is %d: limit is %d ',\n self._buffer_size, self._b_limit\n )\n else:\n t.pause_reading()\n self._waiter = self._loop.create_future()\n else:\n t.write(data)\n self.changed()\n return self._waiter", "def send_events(sock):\n i=0\n while i<10:\n log.info('Sending message from publisher..')\n sock.send(\"even - hai i am publisher\")\n time.sleep(0.2)\n i += 1", "def wait(t):\n message = \"WAIT:\" + str(t) + '\\n'\n sock.sendall(message)\n time.sleep(t)\n return", "def _attempt_enabling_looping_send(self):\n if (\n not self._looping_send.running and\n self._state == State.CONNECTED and\n len(self._sending_window) < constants.WINDOW_SIZE and\n len(self._segment_queue)\n ):\n self._looping_send.start(0, now=True)", "def send(self, message):\n if not hasattr(message, '__iter__'):\n self.socket.send(message, constants.NOBLOCK)\n else:\n for m in message[:-1]:\n self.socket.send(m, constants.NOBLOCK | constants.SNDMORE)\n self.socket.send(message[-1], constants.NOBLOCK)\n\n if self.read_scheduled is None:\n self.read_scheduled = reactor.callLater(0, self.doRead)", "def __stream_triggered(self):\n # Call this every time period\n thread = Timer(self.stream_time, self.__stream_triggered)\n thread.start()\n self.__threads.append(thread)\n\n if len(self.__spike_buffer) > 2:\n speed = self.__get_speed()\n print(speed)\n self.__stream_send(speed)", "def onRecv(self, data):\n self.stream += data\n while self.handleStream(): pass", "def _start_send_to_queue(self):\n while True:\n message_to_send = str(self.send_message_queue.get())\n if self.verbose: print \"Sending\", message_to_send\n send_msg(self.TCPSock, message_to_send)\n # self.TCPSock.send(message_to_send)", "def begin_sending_packets():\n monitoru = main_monitoring.MainMonitoring()\n monitoru.start_monitor_loop()", "def sendData(self, data):\n self.tx.sendBuffer(data)", "def _flow_in(self):\n print(\"MESSENGER: flow_in online!\")\n while self.running:\n data = b\"\"\n while data[-5:] != b\"ROGER\" and self.running:\n try:\n slc = self.sock.recv(1024)\n except socket.timeout:\n time.sleep(0.1)\n except socket.error as E:\n print(\"MESSENGER: caught socket exception:\", E)\n self.teardown(1)\n except Exception as E:\n print(\"MESSENGER: generic exception:\", E)\n self.teardown(1)\n else:\n data += slc\n if not self.running:\n if data:\n print(\"MESSENGER: data left hanging:\" + data[:-5].decode(\"utf8\"))\n return\n data = data[:-5].decode(\"utf8\")\n self.recvbuffer.extend(data.split(\"ROGER\"))\n print(\"MESSENGER: flow_in exiting...\")", "async def write(self, data: bytes):\n while data:\n await self.wait_for_write()\n try:\n sent = self.socket.send(data)\n except OSError as e:\n self.logger.debug(\"Failed to write: %s\", e)\n raise asyncio.TimeoutError()\n data = data[sent:]", "def send_req(self):\n self.n_send_req += 1", "def send_p():\n while 1:\n if PACKET_QUEUE:\n mpkt = PACKET_QUEUE.pop()\n sendp(mpkt, iface=IFACE, loop=0) # forward spoofed packet to the victim", "def flushInput(self):\n self.sock.setblocking(0)\n try:\n while len(self.sock.recv(1)) > 0:\n pass\n except BaseException:\n pass\n self.sock.setblocking(1)\n self.sock.settimeout(self.__timeout)", "async def _resume_sending(self) -> None:\n random_wait_extension = (\n random.random() * self._received_busy_frames * BUSY_RANDOM_TIME_FACTOR\n )\n slowduration = self._received_busy_frames * BUSY_SLOWDURATION_TIME_FACTOR\n await asyncio.sleep(self._wait_time_ms / 1000 + random_wait_extension)\n\n self._ready.set()\n self._wait_start_time = None\n await asyncio.sleep(slowduration)\n while self._received_busy_frames > 0:\n await asyncio.sleep(BUSY_DECREMENT_TIME)\n self._received_busy_frames -= 1", "def stopAndWaitSendData(self, data:bytes):\n \n packetSent = self.send(data)\n log.info(\"Packet: \" + str(packetSent.seq) + \" sent. Length: \" + str(packetSent.length))\n ack = None\n while not (ack == packetSent.seq):\n try:\n ack = self.acknowledge()\n except socket.timeout:\n pass\n finally:\n if(ack is None) or (ack != packetSent.seq):\n self.sendPacket(packetSent)\n log.info(\"Packet resent\")\n else:\n log.info(\"Packet: \" + str(packetSent.seq) + \" ACKed\")\n break", "def runStep(self):\n if self.done:\n pass\n elif self.frame_num < self.num_iters:\n start, end = self.t, self.t + 1\n frame = self.data[start:end, :]\n t = time.time()\n id = self.client.put([self.t, frame], \"acq_bubble\" + str(self.frame_num))\n self.timestamp.append([time.time(), self.frame_num])\n try:\n self.q_out.put([str(self.frame_num), id])\n self.frame_num += 1\n self.t += self.l\n # also log to disk #TODO: spawn separate process here?\n except Exception as e:\n logger.error(\"Acquirer general exception: {}\".format(e))\n logger.error(traceback.format_exc())\n\n\n time.sleep(1/self.framerate) # pretend framerate\n self.total_times.append(time.time() - t)\n\n else: # simulating a done signal from the source\n logger.error(\"Done with all available frames: {0}\".format(self.frame_num))\n self.data = None\n self.q_comm.put(None)\n self.done = True # stay awake in case we get e.g. a shutdown signal", "def send(self, data: typing.Any):\n try:\n self._event_queue.put(data, block=False)\n except queue.Full as e:\n raise RuntimeError(\"Gateway queue is full - this should never happen!\") from e", "def sending_loop(self):\n try:\n while not self.done:\n msg = self.sending_queue.get()\n\n if not msg.startswith('dmd_frame'):\n self.log.debug('Sending \"%s\"', msg)\n\n try:\n self.connection.sendall(msg + '\\n')\n except (AttributeError, socket.error):\n pass\n # Do we just keep on trying, waiting until a new client\n # connects?\n\n self.socket.close()\n self.socket = None\n\n self.mc.socket_thread_stopped()\n\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join(line for line in lines)\n self.mc.crash_queue.put(msg)", "def sendAndReceive(self, request):\n count = 0\n while count < 100: # 5 seconds\n try:\n count += 1\n self.sock.sendto(request, self.server_addr)\n reply, _ = self.sock.recvfrom(1024)\n return reply\n except:\n pass", "def send(self, data):\n while not self.stopped():\n try:\n self.ws.send(data)\n return\n except websocket.WebSocketConnectionClosedException:\n # config.LOGGER.debug('WebSocket closed, retrying send.') # TODO(investigate infinite loop)\n time.sleep(0.1)", "def sendData(self):\n\n while self.keep_running:\n self.connection = pika.BlockingConnection(self.params)\n self.channel = self.connection.channel()\n\n # The fanout exchange broadcasts all the messages it receives to all the queues it knows.\n # That is what we need for our logger.\n self.channel.exchange_declare(exchange=self.logName,\n exchange_type='fanout')\n\n # Publish the data to the exchange\n self.channel.basic_publish(exchange=self.logName,\n routing_key='',\n body=self.message)\n\n self.connection.close()\n\n time.sleep(self.loopTime)", "def start_sync(self):\r\n self.send_queue.put(('sync', time.time()))\r\n self.awaiting_sync = True", "def _got_remote(self, data):\n self._recv_buffer += data", "def inject_send(data):\n tsent = 0\n bytes = len(data)\n chunksize = filesize / 100\n if chunksize < 4096:\n chunksize = 4096\n while bytes > 0:\n sent = imap.sslobj.write(data[:chunksize])\n if sent == bytes:\n common.progress(filesize, bytes)\n break # avoid copy\n tsent += sent\n common.progress(filesize, tsent)\n data = data[sent:]\n bytes = bytes - sent", "def send(self, data:bytes):\n packet = Rudp.Packet(self.seq, 0, data)\n packet.timesamp = time()\n self.sendPacket(packet)\n self.seqPlusOne()\n return(packet)", "def send_data_control_experiment(ecg, emg, gsr):\n\ti = 0\n\tj = 0\n\tk = 0\n\twhile True:\n\t\tif i == len(ecg): break\n\t\tskt.send(bytes(ecg[i], 'utf-8'))\n\t\ti += 1\n\t\t# blocking - always wait for ACK before sending the next packet\n\t\t# - can change this and handle out of order packets\n\t\t# ACK = soc.recv(1024)\n\n\t\t# wait for 1 sec before sending next packet\n\t\t# simulate a real time situation\n\t\t# time.sleep(1)\n\n\twhile True:\n\t\tif j == len(emg): break\n\t\tskt.send(bytes(emg[j], 'utf-8'))\n\t\tj += 1\n\n\twhile True:\n\t\tif k == len(gsr): break\n\t\tskt.send(bytes(gsr[k], 'utf-8'))\n\t\tk += 1\n\n\tstart = time.time()\n\tskt.sendall(b'A'*1024)\n\tend = time.time()\n\tprint(end - start)", "def _sendingCommand(self): \n\n while True:\n self.tello.send_command('command') \n time.sleep(5)", "def run(self):\r\n waiting_packet = None\r\n while True:\r\n if waiting_packet is not None:\r\n packet = waiting_packet\r\n waiting_packet = None\r\n else:\r\n packet = yield self.buffer.get()\r\n self.channel.add_sender(self)\r\n yield self.env.timeout(packet.size/self.service_rate)\r\n self.channel.remove_sender(self)\r\n packet.output_timestamp= env.now\r\n if self.destination is None:\r\n self.packet_list.append(packet)\r\n if (not self.collision):\r\n if self.destination is not None:\r\n self.destination.put(packet)\r\n self.channel.packet_list.append(packet)\r\n else:\r\n if self.debug:\r\n print(\"Packet %d is discarded. Reason: Collision\" \r\n % (packet.id))\r\n self.packets_drop += 1\r\n waiting_packet = packet\r\n self.collision = False\r\n yield self.env.timeout(self.random_delay())", "def handle_function(self):\n while True:\n # pack the data into a dictionary\n data = {\n 'steer': global_steer\n }\n\n # use struct to make sure we have a consistent endianness on the length\n length = pack('>Q', len(pickle.dumps(data)))\n\n # sendall to make sure it blocks if there's back-pressure on the socket\n self.socket.sendall(length)\n self.socket.sendall(pickle.dumps(data))\n\n # receive the success token\n ack = self.socket.recv(1)", "def _send_frame(self, dest, data):\n self._log.debug(\"write {} to {}\".format(len(data), dest)) \n # send to endpoint\n self._conn.sendto(data, (dest,0))", "def __sendHeartbeat(self):\n \n while not rospy.is_shutdown():\n rospy.sleep(5)\n self.setOutput(self.write_start+1,0)", "async def sender(self):\n out = await self.output_queue.get()\n if not out.ready():\n logger.info(\">>> Requeuing {}\".format(out))\n await self.output_queue.put(out)\n await asyncio.sleep(0.05)\n return\n if out.expired():\n logger.info(\">>> Discarding {}\".format(out))\n out.discarded = True\n return\n content = [out.content] if type(out.content) is str else out.content\n logger.info(\">>> Sending:\\n{}\".format(content))\n await self.websocket.send(json.dumps(content))\n out.sent = True\n await asyncio.sleep(len(content) * 0.5)", "def sendpkt(self, data, retries=10): \n wire_data = self.pack(data).encode()\n self.logger.debug('sending> %s', data) \n self.s.send(wire_data)\n res = self.rxqueue.get()\n while res != '+':\n self.s.send(wire_data)\n res = self.rxqueue.get()\n retries -= 1\n if retries == 0:\n raise ValueError(\"retry fail\")", "def send_next_packet():\n #\"global\" required here to be able to read and write to SEQUENCE \n global SEQUENCE\n data = sys.stdin.buffer.read(DATA_SIZE)\n if (len(data) > 0):\n rtt_start = time.time()\n msg_obj = {\"sequence\": SEQUENCE, \"data\": b64encode(data).decode(), \"ack\": True, \"eof\": False}\n if handle_packet_send(msg_obj):\n log(f\"Sequence number: \" + str(SEQUENCE))\n SEQUENCE += len(data)\n log(f'updating sender seq: {SEQUENCE}')\n return PacketInfo(msg_obj, rtt_start)\n return False", "def simple_send():\n i = None\n while True:\n i = yield i", "def handle_write(self):\n self.initiate_send()", "def send_data(self, msg):\n totalsent = 0\n # tt= struct.unpack('c'*len(msg), msg)\n # print(tt)\n while totalsent < len(msg):\n try:\n sent = self.sockfd.send(msg)\n except:\n print(f'{self.ip} socket failed')\n break\n if sent == 0:\n raise RuntimeError(\"Socket connection broken\")\n totalsent = totalsent + sent", "def sync():\n while read():\n pause()\n while not read():\n pass", "def flush(self, data):", "def run(self):\n errors = 0\n while ALIVE:\n try:\n self.maintainConn()\n try:\n line = self.reader.readerq.get(True, 5)\n except Empty:\n continue\n self.sendq.append(line)\n time.sleep(SENDER_SLEEP_TIME)\n while True:\n try:\n line = self.reader.readerq.get(False)\n except Empty:\n break\n self.sendq.append(line)\n\n self.sendData()\n errors = 0\n except (ArithmeticError, EOFError, EnvironmentError, LookupError,\n ValueError), e:\n errors += 1\n if errors > MAX_UNCAUGHT_EXCEPTIONS:\n shutdown()\n raise\n LOG.exception('Uncaught exception in SenderThread, ignoring')\n time.sleep(1)\n continue\n except:\n LOG.exception('Uncaught exception in SenderThread, ignoring')\n time.sleep(1)\n continue", "async def send_data(self, data, stream_id):\n while data:\n while self.conn.local_flow_control_window(stream_id) < 1:\n try:\n await self.wait_for_flow_control(stream_id)\n except asyncio.CancelledError as e:\n print(e)\n return\n\n chunk_size = min(\n self.conn.local_flow_control_window(stream_id),\n len(data),\n self.conn.max_outbound_frame_size,\n )\n\n try:\n self.conn.send_data(\n stream_id,\n data[:chunk_size],\n end_stream=(chunk_size == len(data))\n )\n except (StreamClosedError, ProtocolError) as e:\n print(e)\n # The stream got closed and we didn't get told. We're done\n # here.\n break\n\n self.transport.write(self.conn.data_to_send())\n data = data[chunk_size:]", "def send_out_tuples(self):\n self._flush_remaining()", "def sender(outgoing: mp.Queue, pipe: mp.Pipe) -> NoReturn: # thread that manages sending out data\n while True:\n if pipe.poll(\n 0): # check for any new settings updates or other instructions from the main thread\n\n settings = pipe.recv() # if there are any then receive them\n # use these as inputs to the settings update function\n setting_update(settings[0], settings[1])\n frame = outgoing.get() # wait for data in stack to be sent (is blocking)\n sendData(frame.bytes, frame.repeats)", "async def send(self):\n message = b'foo\\nbar\\nbaz\\nqux\\n'\n for b in message:\n await asyncio.sleep(0.5)\n self.transport.serial.write(bytes([b]))\n print(f'Writer sent: {bytes([b])}')\n self.transport.close()", "def run(self):\n macId, sensorId = getRequest(self.socket)\n try:\n while True:\n self.condition.acquire()\n while True:\n data = self.lastmessage.readData(macId, sensorId)\n self.socket.send(str(data) + '\\n')\n #self.socket.send(\"done\\n\")\n self.condition.wait(5)\n except socket.error, e:\n print \"Catching broken pipe\"\n self.condition.release()\n self.socket.close()\n\n\n \"\"\"\n macId, sensorId = getRequest(self.socket)\n print macId + \"|\" +sensorId\n self.condition.acquire()\n self.condition.wait()\n data = self.lastmessage.readData(macId, sensorId)\n self.condition.release()\n print str(data)\n self.socket.send(str(data) + '\\n')\n self.socket.send(\"done\\n\")\n self.socket.close()\n\"\"\"", "def _do_send_packet(self, seqnum):\n sch_packet = self._sending_window[seqnum]\n if sch_packet.retries >= constants.MAX_RETRANSMISSIONS:\n self.shutdown()\n else:\n self._proto.send_datagram(sch_packet.rudp_packet, self.relay_addr)\n sch_packet.timeout_cb = REACTOR.callLater(\n sch_packet.timeout,\n self._do_send_packet,\n seqnum\n )\n sch_packet.retries += 1\n self._cancel_ack_timeout()", "def _fill_send_buffer(self):\n first_machine_time_step = FecDataView.get_first_machine_time_step()\n run_until_timesteps = FecDataView.get_current_run_timesteps()\n if (self._first_machine_time_step == first_machine_time_step and\n self._run_until_timesteps == run_until_timesteps):\n return\n self._first_machine_time_step = first_machine_time_step\n self._run_until_timesteps = run_until_timesteps\n key_to_send = self._virtual_key\n if self._virtual_key is None:\n key_to_send = 0\n\n if self._send_buffer is not None:\n self._send_buffer.clear()\n if (self._send_buffer_times is not None and\n len(self._send_buffer_times)):\n if hasattr(self._send_buffer_times[0], \"__len__\"):\n # Works with a list-of-lists\n self._fill_send_buffer_2d(key_to_send)\n else:\n # Work with a single list\n self._fill_send_buffer_1d(key_to_send)", "def send_data(self, data, retry=True):\n self.seq_number = RDTSegment.increment(self.seq_number)\n self.send_pkt(data)\n while True:\n try:\n if self.tries == RDTSocket.N_TRIES:\n raise Exception(\"Connection lost\")\n pkt = self.receive_pkt(0)\n except socket.timeout:\n if not retry:\n self.logger.debug(\"got timeout.\")\n raise socket.timeout\n self.logger.debug(f\"got timeout. resending seq_num {self.seq_number}\")\n self.send_pkt(data)\n self.tries += 1\n continue\n\n if pkt.seq_num == self.remote_number and not pkt.ack:\n self.logger.debug(f\"got repeated package. resending ACK. pkt=[{pkt}]\")\n self.send_pkt(ack=1, seq_number=pkt.seq_num)\n\n if pkt.seq_num == self.seq_number and pkt.ack:\n self.logger.debug(f\"got ACK. ending. pkt=[{pkt}]\")\n break\n\n self.tries = 0", "def _send(self):\n data = self.output_buffer.view()\n if not data:\n return\n if self.closed():\n raise self.Error(\"Failed to write to closed connection {!r}\".format(self.server.address))\n if self.defunct():\n raise self.Error(\"Failed to write to defunct connection {!r}\".format(self.server.address))\n self.socket.sendall(data)\n self.output_buffer.clear()", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def pre_send(self, sock):\n\n # default to doing nothing.\n pass", "def send(self, data):\n return False", "def send_message(self, data):\n self.agent_msg_queue.put(data)\n self._send_counter += 1", "def move_forward(power):\n message = \"FORWARD:\" + str(power) + '\\n'\n sock.sendall(message)\n return", "def process(self, data):\n if self.__head:\n self.__head.send(Element(\n stream_id=self.id,\n data=data))", "def send(self):\n while True:\n for neighbor_name in self.neighbors:\n if not self.neighbors[neighbor_name].is_killed:\n if self.neighbors[neighbor_name].update_ready:\n self.send_update(self.neighbors[neighbor_name])\n if self.neighbors[neighbor_name].linkup_ready:\n self.send_linkup(self.neighbors[neighbor_name])\n if self.neighbors[neighbor_name].linkdown_ready:\n self.send_linkdown(self.neighbors[neighbor_name])", "def _done_sending():\n sys.stdout.write('\\n')\n sys.stdout.flush()", "def send_packet(self):\n amountfreed = 0\n bitstransmitted = 0\n # If we are at or have passed the time at which we should send the next\n # packet, we should try to send the next packet.\n if (self.next_packet_send_time <= globals.systime):\n # If there is nothing currently in the buffer, we have nothing to\n # send at this time.\n if (len(self.buffer) == 0):\n self.next_packet_send_time = \\\n self.next_packet_send_time + globals.dt\n\n # Otherwise, It's time to send the packet at the front of the buffer\n else:\n packet_to_send = self.buffer.pop(0)\n amountfreed = packet_to_send.get_size()\n # Updates buffersize to reflect that we removed the packet\n # at the front of the buffer from the buffer.\n self.buffersize = self.buffersize - amountfreed\n\n # Time represents the amount of time in the previous dt that we\n # were transmitting. (i.e. between the previous systime and the\n # current)\n time = self.next_packet_send_time - (globals.systime - globals.dt)\n # bitstransmitted represents the number of bits that were\n # transmitted in the previous dt\n bitstransmitted = time * self.rate\n\n # Now we need to add the packet that we removed from the\n # buffer to the lists that keep track of the propegation of the\n # packets.\n self.packets_in_transmission.append(packet_to_send)\n self.packet_arrival_times.append(self.next_packet_send_time + self.delay)\n\n # If there are still packets in the buffer, update the time\n # to send the next packet to be when it would finish transmitting\n if (len(self.buffer) > 0):\n next_packet_size = self.buffer[0].get_size()\n self.next_packet_send_time = self.next_packet_send_time + \\\n next_packet_size * (1/self.rate)\n # If we finished transmitting a packet and immediately\n # started sending another, we transmitted the entire time\n # step.\n bitstransmitted = globals.dt * self.rate\n\n # the buffer is empty so we will just set the time to try to\n # send the next packet to be the next time step.\n else:\n self.next_packet_send_time = self.next_packet_send_time + \\\n globals.dt\n\n # in one of two cases: either buffer is empty or we used link to capacity\n # in last dt.\n else:\n # if the buffer is nonempty, we must have been transmitting for\n # the entire duration of the last timestep.\n if (len(self.buffer) != 0):\n bitstransmitted = globals.dt * self.rate\n else:\n pass\n\n # Now, we compute and update the effective rate of the link.\n rate = 0\n self.lrsteps.append(bitstransmitted)\n if(globals.systime <= self.lrwindow):\n if (globals.systime != 0):\n rate = sum(self.lrsteps)/(globals.systime + globals.dt)\n # when the time is 0, we will just set the rate to be 0.\n else:\n pass\n else:\n self.lrsteps.pop(0)\n rate = sum(self.lrsteps)/self.lrwindow\n self.effectiverate = rate\n\n # If we are tracking this HalfLink, we will also record its current\n # rate.\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.LINKRATE\n dict = globals.statistics[key][globals.systime] = rate\n\n # Now we will check if any packets should be arriving at their\n # destination.\n if (len(self.packet_arrival_times) > 0):\n # If the time has passed the arrival time at the front of the list\n # of packet_arrival_times, we should remove the first item of the\n # list of packet_arrival_times, as well as the corresponding first\n # element of the list of packets_in_transmission and we should send\n # that packet to its destination.\n if (self.packet_arrival_times[0] <= globals.systime):\n packet_to_send = self.packets_in_transmission.pop(0)\n self.packet_arrival_times.pop(0)\n dest_type = ''\n if self.destination[0] == 'H':\n dest_type = 'hosts'\n else:\n dest_type = 'routers'\n receiver = globals.idmapping[dest_type][self.destination]\n receiver.receive_packet(packet_to_send, self.id)\n return amountfreed", "def send_pulses(index):\n path = str(DAL.__file__[:-7])\n conn = DAL.connect(path + r'\\DBProject.db')\n patients = [item[0] for item in DAL.get_patient(conn)]\n\n while get_running():\n time.sleep(1)\n thread_input = get_thread_input(index)\n with PRINT_LOCK:\n print(\"Thread num:\", index, \", input: \", thread_input)\n data = {}\n #TO DO-complete thread_input =1,2,3\n data[\"input\"] = thread_input\n data[\"client_num\"] = patients[index]\n data[\"position\"] = \"123\"\n data[\"event_time\"] = datetime.datetime.now()\n data[\"value\"] = \"123\"\n\n\n\n #{\"input\": thread_input, \"client num\": index, \"start_time\": \"123\"}\n requests.post(\n f\"http://{SERVER_IP}:{SERVER_PORT}/add_data\", data)\n if (thread_input==b\"2\"):\n change_thread_input(index)", "def send_bytes(self, data):\n raw_data = bytes(data)\n\n attempts = 0\n while True:\n try:\n self._sock.sendall(raw_data)\n return\n except (socket.timeout, BrokenPipeError):\n print('in socket exeption....')\n if (attempts < self._retries):\n attempts += 1\n self._sock.close()\n self._sock.connect((self._ip, self._port))\n else:\n raise", "def send(self, data):\n self._send(data)", "def send(self, data: bytes):" ]
[ "0.74057233", "0.7014785", "0.6927097", "0.6825883", "0.67526937", "0.6658176", "0.6651976", "0.65532917", "0.6422756", "0.6384636", "0.638339", "0.63811976", "0.6377756", "0.63681", "0.63496214", "0.633937", "0.63225573", "0.63097596", "0.63071275", "0.6304865", "0.6235367", "0.6195512", "0.6193564", "0.617823", "0.6174053", "0.6143688", "0.61424094", "0.6126987", "0.6104846", "0.6102847", "0.6097916", "0.609223", "0.6088618", "0.6085964", "0.60857266", "0.6084908", "0.6081397", "0.6079856", "0.6074609", "0.60740864", "0.60677457", "0.6067602", "0.60631436", "0.6061891", "0.6060207", "0.60582656", "0.6050086", "0.60443515", "0.6039446", "0.60361344", "0.60313153", "0.60289216", "0.6026548", "0.6022407", "0.6019063", "0.60116816", "0.6006971", "0.6003382", "0.59982914", "0.5997592", "0.5996421", "0.5981792", "0.59784806", "0.5973383", "0.59722424", "0.5970844", "0.5970232", "0.59656805", "0.59520084", "0.59477353", "0.59474796", "0.5947032", "0.5943904", "0.5940472", "0.5940076", "0.59396833", "0.59358096", "0.5931249", "0.5917356", "0.5914757", "0.5905732", "0.5902988", "0.5900409", "0.58981276", "0.5897028", "0.58938473", "0.5889525", "0.5889525", "0.5889525", "0.5886401", "0.5883944", "0.58834165", "0.5881678", "0.587301", "0.58720577", "0.58696437", "0.5865424", "0.58649474", "0.58634126", "0.5852929", "0.5840589" ]
0.0
-1
Routine for sending data forward in time (nonblocking)
def isend(self, dest=None, tag=None, comm=None): return comm.isend(self, dest=dest, tag=tag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _send_data(self, data, time):\n pass", "def sendall(self, data):\n while data and self.running:\n ret = self.sendFn(data[:MAX_SEND_SIZE])\n assert ret > 0\n data = data[ret:]", "def nonblocking_send(self, data):\n try:\n if len(data) == 0:\n return None\n self.amount_so_far += self.socket.send(data[self.amount_so_far:])\n except Exception as exc:\n active_sockets_dict.pop(self.socket, None)\n self.socket.close()\n print(\"An error occurred: %s\\n\" % exc)\n return -1\n ret = self.is_send_done()\n return ret", "def send(self):\n send_pos = 0 \n\n # send list not empty\n if(len(self.send_list) == 0): \n if(len(self.send_wait) == 0):\n return False\n self.send_list.append(self.send_wait[0]) # add send list\n self.send_pos_frame.append(0)\n self.send_wait.pop(0) # refresh send_wait\n\n send_now = self.send_list[send_pos]\n host = send_now[0] # port send frame\n frame = send_now[1] \n\n bit = frame.get_bit(self.send_pos_frame[send_pos])\n\n q = Queue()\n\n q.put((host,'s'))\n \n # 's': send, 'r': receive\n while not q.empty():\n front = q.get()\n name_port = front[0]\n tp = front[1] # type operation: 'r' o 's'\n\n name = Tools.get_device_name(name_port)\n port = Tools.get_device_port_index(name_port)\n\n dev = self.get_device(name)\n \n if(tp == 's'):\n new_dev = dev.send(bit, port)\n else: # tp == 'r'\n new_dev = dev.receive(bit, port)\n \n for i in new_dev:\n q.put(i)\n\n # signal time\n self.signal_count += 1\n if(self.signal_count == self.signal_time):\n self.send_pos_frame[send_pos] += 1\n if(self.send_pos_frame[send_pos] == frame.length()): # if frame send complete\n #reset send list\n self.send_list.pop(send_pos)\n self.send_pos_frame.pop(send_pos)\n\n self.signal_count = 0\n\n return True", "def send(self, data):\n starttime = time.time()\n while 1:\n if self._waiting_response==1:\n if time.time() - starttime > self._maxrespdelay:\n break\n _LOGGER.debug(\"Send going to sleep\\n\")\n time.sleep(self._sleeptime)\n else:\n break\n\n currtime = time.time()\n if currtime - self._lastcall > self._maxtime:\n self.reset()\n self._lastcall = currtime\n _LOGGER.debug(\"Sending: %s\", data)\n if not testing:\n self.serial.reset_input_buffer()\n bytessent = self.serial.write(data.encode())\n return bytessent\n else:\n self._waiting_response = 1\n return len(data)", "def _send(self) -> None:\n if not self.connected or now() < self.next_send:\n return\n self.next_send += self.poll_interval\n buff = []\n while self.outq:\n msg_id, tag, data = self.outq.popleft()\n buff.append(pickle.dumps((msg_id, tag, data)))\n if buff:\n stream = b\"\".join(buff)\n self.endpoint.sendall(stream)", "def send(self,data):\r\n # Get the data length\r\n fullDataLength = len(data)\r\n \r\n # Input sanity\r\n if fullDataLength == 0:\r\n raise ValueError, \"Cannot send a null data-set!\"\r\n \r\n # Send chunks of data until it is all sent\r\n while True:\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Make sure we have available outgoing bandwidth\r\n self.socketLocks[\"outgoing\"].acquire()\r\n try:\r\n self.socketLocks[\"outgoing\"].release()\r\n except:\r\n # Some weird timing issues can cause an exception, but it is harmless\r\n pass\r\n \r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Get our own lock\r\n self.socketLocks[\"send\"].acquire()\r\n \r\n # How much outgoing traffic is available?\r\n outgoingAvailable = self.bufferInfo[\"outgoing\"]\r\n \r\n # If we can, just send it all at once\r\n if len(data) < outgoingAvailable:\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, data)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] -= len(data)\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # We need to explicitly leave the loop\r\n break\r\n \r\n # We need to send chunks, while waiting for more outgoing B/W\r\n else:\r\n # Get a chunk of data, and send it\r\n chunk = data[:outgoingAvailable]\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, chunk)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] = 0\r\n\r\n # Lock the outgoing lock, so that we block until we get a MULTIPLEXER_CONN_BUF_SIZE message\r\n self.socketLocks[\"outgoing\"].acquire()\r\n \r\n # Trim data to only what isn't sent syet\r\n data = data[outgoingAvailable:]\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # If there is no data left to send, then break\r\n if len(data) == 0:\r\n break\r\n \r\n # Return bytes sent, which is always the full message\r\n # since we will block indefinately until everything is sent.\r\n return fullDataLength", "def run(self):\n yield self.env.timeout(self.initial_delay)\n while self.env.now < self.finish:\n # wait for next transmission\n yield self.env.timeout(self.adist)\n self.packets_sent += 1\n p = Packet(self.env.now, self.sdist, self.packets_sent, src=self.id, flow_id=self.flow_id)\n self.out.put(p)", "def done_sending(self):\r\n self._flush(True)", "def _flow_out(self):\n print(\"MESSENGER: flow_out online!\")\n while self.running:\n if self.sendbuffer:\n msg = self.sendbuffer.pop(0)\n for slc in (msg[i:i+1024] for i in range(0, len(msg), 1024)):\n self.sock.send(slc)\n time.sleep(self.sendtick)\n print(\"MESSENGER: flow_out exiting...\")", "def send(self,data,timeout=None):\r\n # Set the timeout if None\r\n if timeout is None:\r\n timeout = self.timeout\r\n\r\n # Get the start time\r\n starttime = getruntime()\r\n\r\n # Block until we can write\r\n rblock, wblock = self.socket.willblock()\r\n while wblock:\r\n # Check if we should break\r\n if timeout > 0:\r\n # Get the elapsed time\r\n diff = getruntime() - starttime\r\n\r\n # Raise an exception\r\n if diff > timeout:\r\n raise SocketTimeoutError,\"send() timed out!\"\r\n\r\n # Sleep\r\n # Since switching to the fibonacci backoff, the nature of \r\n # this field has changed. Rather than implement the backoff \r\n # for checking block status (seems wasteful) we'll just use \r\n # a constant value. Ten ms seems appropriate.\r\n sleep(0.010)\r\n\r\n # Update rblock\r\n rblock, wblock = self.socket.willblock()\r\n\r\n # Do the recv\r\n return self.socket.send(data)", "def __frame_tx(self,data):\n\n if self._spy_frame_tx is not None:\n self._spy_frame_tx(data)\n\n data=self.__pad(data)\n\n if len(data) < self.other_bufferlen:\n self.com.tx(data)\n else:\n chunks = (len(data)-1) // self.other_bufferlen\n #print(\"__frame_tx: %d full chunks + last\"%chunks,flush=True)\n for i in range(0,chunks):\n self.com.tx(data[i*self.other_bufferlen:(i+1)*self.other_bufferlen])\n self.com.rx_ack()\n self.com.tx(data[chunks*self.other_bufferlen:])\n #print(\"__frame_tx done\",flush=True)", "def _send(self):\n while self.socket is not None:\n try:\n data = self._get_data_from_send_queue()\n if self.socket is not None:\n header = self._create_data_header(data)\n with self.socket_lock:\n self.socket.sendall(header + data)\n except Exception as err:\n getLogger(__name__).debug((\"Unexpected exception occurred,\"\n \" send thread may be in a\"\n \" corrupted state\\n\"\n \"Error: {}\".format(err)))", "def __send(self):\r\n self.msgLock.acquire()\r\n if self.numMsg > 0:\r\n self.socket.send(self.msg.pop(0))\r\n self.numMsg -= 1\r\n self.msgLock.release()", "def IRC_send_called_every_three_seconds(self):\n\n if (self.ircMessageBuffer):\n try:\n # print(\"Buffered\")\n stringToSend = str(self.ircMessageBuffer.popleft())\n print(\"string to send : \" + stringToSend)\n if self.ircSocket:\n self.ircSocket.send((stringToSend).encode('utf8'))\n except Exception as e:\n logging.error(\"IRC send error:\")\n logging.error(\"In IRCSendCalledEveryThreeSeconds\")\n logging.error(str(e))\n logging.exception(\"Exception : \")", "def send (self, data):\n return self.sending.send(data)", "def send(self, data):", "def send(self):\n self.spi.send(self.startframe + self.buffer)", "def send(self, data):\n print(\"sending: {}\".format(data))\n self.forward_in_sock.send_string(\"{}\\n\".format(data))", "def sendBuffer():\n dislin.sendbf()", "def sending():\n # don't use socket.connect because it fixes a\n # remote address and causes problems when receiving\n # from other sockets and sending to them\n\n # we also need to cater for sending normal data\n\n global DATA\n start = current_time()\n while True:\n if(current_time() - start) < 10:\n time.sleep(2)\n continue\n else:\n data_to_send = pickle.dumps(DATA[\"distance_vec\"])\n for every_neighbor in DATA[\"neighbor\"]:\n send_address = (\"127.0.0.1\", every_neighbor[2])\n SOCKET1.sendto(data_to_send, send_address)\n start = current_time()", "def send_events(sock):\n i=0\n while i<10:\n log.info('Sending message from publisher..')\n sock.send(\"even - hai i am publisher\")\n time.sleep(0.2)\n i += 1", "def send(self, data:bytes):\n packet = Rudp.Packet(self.seq, 0, data)\n packet.timesamp = time()\n self.sendPacket(packet)\n self.seqPlusOne()\n return(packet)", "def send_blocking_signal(self, compression=True):\n while not self._stop_receive.is_set():\n if len(self._send_queue) > 0:\n super(MastermindClientUDP, self).send(JSONSerializer.serialize(self._send_queue.pop()), compression)\n else:\n super(MastermindClientUDP, self).send(JSONSerializer.serialize(DummyEvent()), compression)\n time.sleep(1)", "def write_handler(socket, buf):\n while True:\n try:\n message = buf.pop()\n logging.debug(\"sending data : %s\", message)\n socket.send(message)\n except IndexError:\n time.sleep(WAIT_INTERVAL)", "def send_message(data):\n if data is not None:\n logging.debug(data)\n queue.on_next(data)", "def begin_sending_packets():\n monitoru = main_monitoring.MainMonitoring()\n monitoru.start_monitor_loop()", "def _send_data(self):\n pass", "def onRecv(self, data):\n self.stream += data\n while self.handleStream(): pass", "def __stream_triggered(self):\n # Call this every time period\n thread = Timer(self.stream_time, self.__stream_triggered)\n thread.start()\n self.__threads.append(thread)\n\n if len(self.__spike_buffer) > 2:\n speed = self.__get_speed()\n print(speed)\n self.__stream_send(speed)", "def send_next_packet():\n #\"global\" required here to be able to read and write to SEQUENCE \n global SEQUENCE\n data = sys.stdin.buffer.read(DATA_SIZE)\n if (len(data) > 0):\n rtt_start = time.time()\n msg_obj = {\"sequence\": SEQUENCE, \"data\": b64encode(data).decode(), \"ack\": True, \"eof\": False}\n if handle_packet_send(msg_obj):\n log(f\"Sequence number: \" + str(SEQUENCE))\n SEQUENCE += len(data)\n log(f'updating sender seq: {SEQUENCE}')\n return PacketInfo(msg_obj, rtt_start)\n return False", "def run(self):\n while True:\n get_request = self.transmitter_port.out_queue.get()\n message = yield get_request\n log.debug(\"{} transmission of {} started\".format(self, message))\n bytes_to_transmit = (ethernet.PREAMBLE_SIZE_BYTES +\n ethernet.SFD_SIZE_BYTES +\n message.size_bytes)\n # wait for the transmission + propagation time to elapse\n yield self.env.timeout(\n self.link.transmission_time_us(bytes_to_transmit) +\n self.link.propagation_delay_us)\n log.debug(\"{} transmission of {} finished\".format(self, message))\n self.receiver_port.in_queue.put(message)\n # wait for the duration of the ethernet interframe gap to elapse\n yield self.env.timeout(\n self.link.transmission_time_us(ethernet.IFG_SIZE_BYTES))\n log.debug(\"{} inter frame gap finished\".format(self))", "def start_sync(self):\r\n self.send_queue.put(('sync', time.time()))\r\n self.awaiting_sync = True", "def sendAndReceive(self, request):\n count = 0\n while count < 100: # 5 seconds\n try:\n count += 1\n self.sock.sendto(request, self.server_addr)\n reply, _ = self.sock.recvfrom(1024)\n return reply\n except:\n pass", "def initiate_send(self):\n while self.producer_fifo and self.connected:\n first = self.producer_fifo[0]\n # handle empty string/buffer or None entry\n if not first:\n del self.producer_fifo[0]\n if first is None:\n self.transfer_finished = True\n self.handle_close()\n return\n\n # handle classic producer behavior\n obs = self.ac_out_buffer_size\n try:\n data = buffer(first, 0, obs)\n except TypeError:\n self.producer_fifo.appendleft(first.more())\n continue\n\n # send the data\n try:\n num_sent = self.send(data)\n except socket.error:\n self.handle_error()\n return\n\n if num_sent:\n self.tot_bytes_sent += num_sent\n if num_sent < len(data) or obs < len(first):\n self.producer_fifo[0] = first[num_sent:]\n else:\n del self.producer_fifo[0]\n # we tried to send some actual data\n return", "def _send(self, data: bytes):\n if self._pre_send is not None:\n data = self._pre_send(data)\n if data is None:\n return\n\n self._transport.sendto(data, self._peer)", "def send_data_control_experiment(ecg, emg, gsr):\n\ti = 0\n\tj = 0\n\tk = 0\n\twhile True:\n\t\tif i == len(ecg): break\n\t\tskt.send(bytes(ecg[i], 'utf-8'))\n\t\ti += 1\n\t\t# blocking - always wait for ACK before sending the next packet\n\t\t# - can change this and handle out of order packets\n\t\t# ACK = soc.recv(1024)\n\n\t\t# wait for 1 sec before sending next packet\n\t\t# simulate a real time situation\n\t\t# time.sleep(1)\n\n\twhile True:\n\t\tif j == len(emg): break\n\t\tskt.send(bytes(emg[j], 'utf-8'))\n\t\tj += 1\n\n\twhile True:\n\t\tif k == len(gsr): break\n\t\tskt.send(bytes(gsr[k], 'utf-8'))\n\t\tk += 1\n\n\tstart = time.time()\n\tskt.sendall(b'A'*1024)\n\tend = time.time()\n\tprint(end - start)", "def send(self, data: Union[ActionEvent, TurnEvent], compression=None):\n # pause_receive is irrelevant now\n # self._pause_receive.set()\n self._send_queue.append(data)\n # super(MastermindClientUDP, self).send(JSONSerializer.serialize(data), compression)\n # self._pause_receive.clear()\n return", "async def send(self, data: dict):\n async with self.gateway_send_lock:\n current_time = time()\n if current_time >= self.gateway_send_reset:\n self.gateway_send_reset = current_time + self.gateway_send_per\n self.gateway_send_left = self.gateway_send_limit\n if self.gateway_send_left == 0:\n sleep_for = self.gateway_send_reset - current_time\n self.logger.debug(\n f\"Gateway ratelimited! Sleeping for {sleep_for}s\")\n await sleep(self.gateway_send_reset - current_time)\n self.logger.debug(\"Data sent: \" + str(data))\n await self.ws.send_json(data, dumps=dumps)", "def send_p():\n while 1:\n if PACKET_QUEUE:\n mpkt = PACKET_QUEUE.pop()\n sendp(mpkt, iface=IFACE, loop=0) # forward spoofed packet to the victim", "async def write(self, data: bytes):\n while data:\n await self.wait_for_write()\n try:\n sent = self.socket.send(data)\n except OSError as e:\n self.logger.debug(\"Failed to write: %s\", e)\n raise asyncio.TimeoutError()\n data = data[sent:]", "def send(self, message):\n if not hasattr(message, '__iter__'):\n self.socket.send(message, constants.NOBLOCK)\n else:\n for m in message[:-1]:\n self.socket.send(m, constants.NOBLOCK | constants.SNDMORE)\n self.socket.send(message[-1], constants.NOBLOCK)\n\n if self.read_scheduled is None:\n self.read_scheduled = reactor.callLater(0, self.doRead)", "def _send_frame(self, dest, data):\n self._log.debug(\"write {} to {}\".format(len(data), dest)) \n # send to endpoint\n self._conn.sendto(data, (dest,0))", "def inject_send(data):\n tsent = 0\n bytes = len(data)\n chunksize = filesize / 100\n if chunksize < 4096:\n chunksize = 4096\n while bytes > 0:\n sent = imap.sslobj.write(data[:chunksize])\n if sent == bytes:\n common.progress(filesize, bytes)\n break # avoid copy\n tsent += sent\n common.progress(filesize, tsent)\n data = data[sent:]\n bytes = bytes - sent", "async def send(self):", "def send_recv(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('list(spi.send_recv(data, timeout=50000))\\r\\n'.encode('utf-8'))\n sleep(1)", "def _got_remote(self, data):\n self._recv_buffer += data", "def send_data(self, msg):\n totalsent = 0\n # tt= struct.unpack('c'*len(msg), msg)\n # print(tt)\n while totalsent < len(msg):\n try:\n sent = self.sockfd.send(msg)\n except:\n print(f'{self.ip} socket failed')\n break\n if sent == 0:\n raise RuntimeError(\"Socket connection broken\")\n totalsent = totalsent + sent", "def runStep(self):\n if self.done:\n pass\n elif self.frame_num < self.num_iters:\n start, end = self.t, self.t + 1\n frame = self.data[start:end, :]\n t = time.time()\n id = self.client.put([self.t, frame], \"acq_bubble\" + str(self.frame_num))\n self.timestamp.append([time.time(), self.frame_num])\n try:\n self.q_out.put([str(self.frame_num), id])\n self.frame_num += 1\n self.t += self.l\n # also log to disk #TODO: spawn separate process here?\n except Exception as e:\n logger.error(\"Acquirer general exception: {}\".format(e))\n logger.error(traceback.format_exc())\n\n\n time.sleep(1/self.framerate) # pretend framerate\n self.total_times.append(time.time() - t)\n\n else: # simulating a done signal from the source\n logger.error(\"Done with all available frames: {0}\".format(self.frame_num))\n self.data = None\n self.q_comm.put(None)\n self.done = True # stay awake in case we get e.g. a shutdown signal", "def sync():\n while read():\n pause()\n while not read():\n pass", "async def send_data(self, data, stream_id):\n while data:\n while self.conn.local_flow_control_window(stream_id) < 1:\n try:\n await self.wait_for_flow_control(stream_id)\n except asyncio.CancelledError as e:\n print(e)\n return\n\n chunk_size = min(\n self.conn.local_flow_control_window(stream_id),\n len(data),\n self.conn.max_outbound_frame_size,\n )\n\n try:\n self.conn.send_data(\n stream_id,\n data[:chunk_size],\n end_stream=(chunk_size == len(data))\n )\n except (StreamClosedError, ProtocolError) as e:\n print(e)\n # The stream got closed and we didn't get told. We're done\n # here.\n break\n\n self.transport.write(self.conn.data_to_send())\n data = data[chunk_size:]", "def simple_send():\n i = None\n while True:\n i = yield i", "def sender(outgoing: mp.Queue, pipe: mp.Pipe) -> NoReturn: # thread that manages sending out data\n while True:\n if pipe.poll(\n 0): # check for any new settings updates or other instructions from the main thread\n\n settings = pipe.recv() # if there are any then receive them\n # use these as inputs to the settings update function\n setting_update(settings[0], settings[1])\n frame = outgoing.get() # wait for data in stack to be sent (is blocking)\n sendData(frame.bytes, frame.repeats)", "def move_forward(power):\n message = \"FORWARD:\" + str(power) + '\\n'\n sock.sendall(message)\n return", "def sendpkt(self, data, retries=10): \n wire_data = self.pack(data).encode()\n self.logger.debug('sending> %s', data) \n self.s.send(wire_data)\n res = self.rxqueue.get()\n while res != '+':\n self.s.send(wire_data)\n res = self.rxqueue.get()\n retries -= 1\n if retries == 0:\n raise ValueError(\"retry fail\")", "def run(self):\r\n waiting_packet = None\r\n while True:\r\n if waiting_packet is not None:\r\n packet = waiting_packet\r\n waiting_packet = None\r\n else:\r\n packet = yield self.buffer.get()\r\n self.channel.add_sender(self)\r\n yield self.env.timeout(packet.size/self.service_rate)\r\n self.channel.remove_sender(self)\r\n packet.output_timestamp= env.now\r\n if self.destination is None:\r\n self.packet_list.append(packet)\r\n if (not self.collision):\r\n if self.destination is not None:\r\n self.destination.put(packet)\r\n self.channel.packet_list.append(packet)\r\n else:\r\n if self.debug:\r\n print(\"Packet %d is discarded. Reason: Collision\" \r\n % (packet.id))\r\n self.packets_drop += 1\r\n waiting_packet = packet\r\n self.collision = False\r\n yield self.env.timeout(self.random_delay())", "def send(self, data):\n pass", "def send_packet(self):\n amountfreed = 0\n bitstransmitted = 0\n # If we are at or have passed the time at which we should send the next\n # packet, we should try to send the next packet.\n if (self.next_packet_send_time <= globals.systime):\n # If there is nothing currently in the buffer, we have nothing to\n # send at this time.\n if (len(self.buffer) == 0):\n self.next_packet_send_time = \\\n self.next_packet_send_time + globals.dt\n\n # Otherwise, It's time to send the packet at the front of the buffer\n else:\n packet_to_send = self.buffer.pop(0)\n amountfreed = packet_to_send.get_size()\n # Updates buffersize to reflect that we removed the packet\n # at the front of the buffer from the buffer.\n self.buffersize = self.buffersize - amountfreed\n\n # Time represents the amount of time in the previous dt that we\n # were transmitting. (i.e. between the previous systime and the\n # current)\n time = self.next_packet_send_time - (globals.systime - globals.dt)\n # bitstransmitted represents the number of bits that were\n # transmitted in the previous dt\n bitstransmitted = time * self.rate\n\n # Now we need to add the packet that we removed from the\n # buffer to the lists that keep track of the propegation of the\n # packets.\n self.packets_in_transmission.append(packet_to_send)\n self.packet_arrival_times.append(self.next_packet_send_time + self.delay)\n\n # If there are still packets in the buffer, update the time\n # to send the next packet to be when it would finish transmitting\n if (len(self.buffer) > 0):\n next_packet_size = self.buffer[0].get_size()\n self.next_packet_send_time = self.next_packet_send_time + \\\n next_packet_size * (1/self.rate)\n # If we finished transmitting a packet and immediately\n # started sending another, we transmitted the entire time\n # step.\n bitstransmitted = globals.dt * self.rate\n\n # the buffer is empty so we will just set the time to try to\n # send the next packet to be the next time step.\n else:\n self.next_packet_send_time = self.next_packet_send_time + \\\n globals.dt\n\n # in one of two cases: either buffer is empty or we used link to capacity\n # in last dt.\n else:\n # if the buffer is nonempty, we must have been transmitting for\n # the entire duration of the last timestep.\n if (len(self.buffer) != 0):\n bitstransmitted = globals.dt * self.rate\n else:\n pass\n\n # Now, we compute and update the effective rate of the link.\n rate = 0\n self.lrsteps.append(bitstransmitted)\n if(globals.systime <= self.lrwindow):\n if (globals.systime != 0):\n rate = sum(self.lrsteps)/(globals.systime + globals.dt)\n # when the time is 0, we will just set the rate to be 0.\n else:\n pass\n else:\n self.lrsteps.pop(0)\n rate = sum(self.lrsteps)/self.lrwindow\n self.effectiverate = rate\n\n # If we are tracking this HalfLink, we will also record its current\n # rate.\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.LINKRATE\n dict = globals.statistics[key][globals.systime] = rate\n\n # Now we will check if any packets should be arriving at their\n # destination.\n if (len(self.packet_arrival_times) > 0):\n # If the time has passed the arrival time at the front of the list\n # of packet_arrival_times, we should remove the first item of the\n # list of packet_arrival_times, as well as the corresponding first\n # element of the list of packets_in_transmission and we should send\n # that packet to its destination.\n if (self.packet_arrival_times[0] <= globals.systime):\n packet_to_send = self.packets_in_transmission.pop(0)\n self.packet_arrival_times.pop(0)\n dest_type = ''\n if self.destination[0] == 'H':\n dest_type = 'hosts'\n else:\n dest_type = 'routers'\n receiver = globals.idmapping[dest_type][self.destination]\n receiver.receive_packet(packet_to_send, self.id)\n return amountfreed", "def _start_send_to_queue(self):\n while True:\n message_to_send = str(self.send_message_queue.get())\n if self.verbose: print \"Sending\", message_to_send\n send_msg(self.TCPSock, message_to_send)\n # self.TCPSock.send(message_to_send)", "def write(self, data):\n if self.closed:\n raise ConnectionResetError(\n 'Transport closed - cannot write on %s' % self\n )\n else:\n t = self.transport\n if self._paused or self._buffer:\n self._buffer.appendleft(data)\n self._buffer_size += len(data)\n self._write_from_buffer()\n if self._buffer_size > 2 * self._b_limit:\n if self._waiter and not self._waiter.cancelled():\n self.logger.warning(\n '%s buffer size is %d: limit is %d ',\n self._buffer_size, self._b_limit\n )\n else:\n t.pause_reading()\n self._waiter = self._loop.create_future()\n else:\n t.write(data)\n self.changed()\n return self._waiter", "def sendData(self, data):\n self.tx.sendBuffer(data)", "def __sendHeartbeat(self):\n \n while not rospy.is_shutdown():\n rospy.sleep(5)\n self.setOutput(self.write_start+1,0)", "def send(self, data):\n while not self.stopped():\n try:\n self.ws.send(data)\n return\n except websocket.WebSocketConnectionClosedException:\n # config.LOGGER.debug('WebSocket closed, retrying send.') # TODO(investigate infinite loop)\n time.sleep(0.1)", "def next(p):\n threading.Thread(target=forward, args=(p,)).start()\n threading.Thread(target=insertData, args=(p,)).start()\n threading.Thread(target=clientListen, args=(p,)).start()\n sys.exit()", "def send_req(self):\n self.n_send_req += 1", "def _attempt_enabling_looping_send(self):\n if (\n not self._looping_send.running and\n self._state == State.CONNECTED and\n len(self._sending_window) < constants.WINDOW_SIZE and\n len(self._segment_queue)\n ):\n self._looping_send.start(0, now=True)", "def await_data(self):\n self.data.append(self.socket.recv(1))", "def send(self, data: typing.Any):\n try:\n self._event_queue.put(data, block=False)\n except queue.Full as e:\n raise RuntimeError(\"Gateway queue is full - this should never happen!\") from e", "def send_pulses(index):\n path = str(DAL.__file__[:-7])\n conn = DAL.connect(path + r'\\DBProject.db')\n patients = [item[0] for item in DAL.get_patient(conn)]\n\n while get_running():\n time.sleep(1)\n thread_input = get_thread_input(index)\n with PRINT_LOCK:\n print(\"Thread num:\", index, \", input: \", thread_input)\n data = {}\n #TO DO-complete thread_input =1,2,3\n data[\"input\"] = thread_input\n data[\"client_num\"] = patients[index]\n data[\"position\"] = \"123\"\n data[\"event_time\"] = datetime.datetime.now()\n data[\"value\"] = \"123\"\n\n\n\n #{\"input\": thread_input, \"client num\": index, \"start_time\": \"123\"}\n requests.post(\n f\"http://{SERVER_IP}:{SERVER_PORT}/add_data\", data)\n if (thread_input==b\"2\"):\n change_thread_input(index)", "def _writeloop(self):\r\n while self._ll_alive:\r\n ## Add a thread lock\r\n if not self._uart_tx_queue.empty():\r\n data = self._uart_tx_queue.get()\r\n #clear the response list before send the command\r\n #self._uart_rx_queue.clear()\r\n #self.log.debug(\"Uart send cmd:\",data)\r\n #time.sleep(0.01)\r", "def send_data(self, data, retry=True):\n self.seq_number = RDTSegment.increment(self.seq_number)\n self.send_pkt(data)\n while True:\n try:\n if self.tries == RDTSocket.N_TRIES:\n raise Exception(\"Connection lost\")\n pkt = self.receive_pkt(0)\n except socket.timeout:\n if not retry:\n self.logger.debug(\"got timeout.\")\n raise socket.timeout\n self.logger.debug(f\"got timeout. resending seq_num {self.seq_number}\")\n self.send_pkt(data)\n self.tries += 1\n continue\n\n if pkt.seq_num == self.remote_number and not pkt.ack:\n self.logger.debug(f\"got repeated package. resending ACK. pkt=[{pkt}]\")\n self.send_pkt(ack=1, seq_number=pkt.seq_num)\n\n if pkt.seq_num == self.seq_number and pkt.ack:\n self.logger.debug(f\"got ACK. ending. pkt=[{pkt}]\")\n break\n\n self.tries = 0", "def sendData(self):\n\n while self.keep_running:\n self.connection = pika.BlockingConnection(self.params)\n self.channel = self.connection.channel()\n\n # The fanout exchange broadcasts all the messages it receives to all the queues it knows.\n # That is what we need for our logger.\n self.channel.exchange_declare(exchange=self.logName,\n exchange_type='fanout')\n\n # Publish the data to the exchange\n self.channel.basic_publish(exchange=self.logName,\n routing_key='',\n body=self.message)\n\n self.connection.close()\n\n time.sleep(self.loopTime)", "def sending_loop(self):\n try:\n while not self.done:\n msg = self.sending_queue.get()\n\n if not msg.startswith('dmd_frame'):\n self.log.debug('Sending \"%s\"', msg)\n\n try:\n self.connection.sendall(msg + '\\n')\n except (AttributeError, socket.error):\n pass\n # Do we just keep on trying, waiting until a new client\n # connects?\n\n self.socket.close()\n self.socket = None\n\n self.mc.socket_thread_stopped()\n\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join(line for line in lines)\n self.mc.crash_queue.put(msg)", "def wait(t):\n message = \"WAIT:\" + str(t) + '\\n'\n sock.sendall(message)\n time.sleep(t)\n return", "def process(self, data):\n if self.__head:\n self.__head.send(Element(\n stream_id=self.id,\n data=data))", "def _fill_send_buffer(self):\n first_machine_time_step = FecDataView.get_first_machine_time_step()\n run_until_timesteps = FecDataView.get_current_run_timesteps()\n if (self._first_machine_time_step == first_machine_time_step and\n self._run_until_timesteps == run_until_timesteps):\n return\n self._first_machine_time_step = first_machine_time_step\n self._run_until_timesteps = run_until_timesteps\n key_to_send = self._virtual_key\n if self._virtual_key is None:\n key_to_send = 0\n\n if self._send_buffer is not None:\n self._send_buffer.clear()\n if (self._send_buffer_times is not None and\n len(self._send_buffer_times)):\n if hasattr(self._send_buffer_times[0], \"__len__\"):\n # Works with a list-of-lists\n self._fill_send_buffer_2d(key_to_send)\n else:\n # Work with a single list\n self._fill_send_buffer_1d(key_to_send)", "def handle_function(self):\n while True:\n # pack the data into a dictionary\n data = {\n 'steer': global_steer\n }\n\n # use struct to make sure we have a consistent endianness on the length\n length = pack('>Q', len(pickle.dumps(data)))\n\n # sendall to make sure it blocks if there's back-pressure on the socket\n self.socket.sendall(length)\n self.socket.sendall(pickle.dumps(data))\n\n # receive the success token\n ack = self.socket.recv(1)", "def send_message(self, data):\n self.agent_msg_queue.put(data)\n self._send_counter += 1", "async def sender(self):\n out = await self.output_queue.get()\n if not out.ready():\n logger.info(\">>> Requeuing {}\".format(out))\n await self.output_queue.put(out)\n await asyncio.sleep(0.05)\n return\n if out.expired():\n logger.info(\">>> Discarding {}\".format(out))\n out.discarded = True\n return\n content = [out.content] if type(out.content) is str else out.content\n logger.info(\">>> Sending:\\n{}\".format(content))\n await self.websocket.send(json.dumps(content))\n out.sent = True\n await asyncio.sleep(len(content) * 0.5)", "def _flow_in(self):\n print(\"MESSENGER: flow_in online!\")\n while self.running:\n data = b\"\"\n while data[-5:] != b\"ROGER\" and self.running:\n try:\n slc = self.sock.recv(1024)\n except socket.timeout:\n time.sleep(0.1)\n except socket.error as E:\n print(\"MESSENGER: caught socket exception:\", E)\n self.teardown(1)\n except Exception as E:\n print(\"MESSENGER: generic exception:\", E)\n self.teardown(1)\n else:\n data += slc\n if not self.running:\n if data:\n print(\"MESSENGER: data left hanging:\" + data[:-5].decode(\"utf8\"))\n return\n data = data[:-5].decode(\"utf8\")\n self.recvbuffer.extend(data.split(\"ROGER\"))\n print(\"MESSENGER: flow_in exiting...\")", "def _do_send_packet(self, seqnum):\n sch_packet = self._sending_window[seqnum]\n if sch_packet.retries >= constants.MAX_RETRANSMISSIONS:\n self.shutdown()\n else:\n self._proto.send_datagram(sch_packet.rudp_packet, self.relay_addr)\n sch_packet.timeout_cb = REACTOR.callLater(\n sch_packet.timeout,\n self._do_send_packet,\n seqnum\n )\n sch_packet.retries += 1\n self._cancel_ack_timeout()", "async def send(self):\n message = b'foo\\nbar\\nbaz\\nqux\\n'\n for b in message:\n await asyncio.sleep(0.5)\n self.transport.serial.write(bytes([b]))\n print(f'Writer sent: {bytes([b])}')\n self.transport.close()", "def flushInput(self):\n self.sock.setblocking(0)\n try:\n while len(self.sock.recv(1)) > 0:\n pass\n except BaseException:\n pass\n self.sock.setblocking(1)\n self.sock.settimeout(self.__timeout)", "def stopAndWaitSendData(self, data:bytes):\n \n packetSent = self.send(data)\n log.info(\"Packet: \" + str(packetSent.seq) + \" sent. Length: \" + str(packetSent.length))\n ack = None\n while not (ack == packetSent.seq):\n try:\n ack = self.acknowledge()\n except socket.timeout:\n pass\n finally:\n if(ack is None) or (ack != packetSent.seq):\n self.sendPacket(packetSent)\n log.info(\"Packet resent\")\n else:\n log.info(\"Packet: \" + str(packetSent.seq) + \" ACKed\")\n break", "def callback(indata, frames, time, status):\n if status:\n print(status, flush=True)\n queue.put(indata.copy())", "def send_out_tuples(self):\n self._flush_remaining()", "def live_sequence(socket):\n pass", "def send_bytes(self, data):\n raw_data = bytes(data)\n\n attempts = 0\n while True:\n try:\n self._sock.sendall(raw_data)\n return\n except (socket.timeout, BrokenPipeError):\n print('in socket exeption....')\n if (attempts < self._retries):\n attempts += 1\n self._sock.close()\n self._sock.connect((self._ip, self._port))\n else:\n raise", "def run(self):\n errors = 0\n while ALIVE:\n try:\n self.maintainConn()\n try:\n line = self.reader.readerq.get(True, 5)\n except Empty:\n continue\n self.sendq.append(line)\n time.sleep(SENDER_SLEEP_TIME)\n while True:\n try:\n line = self.reader.readerq.get(False)\n except Empty:\n break\n self.sendq.append(line)\n\n self.sendData()\n errors = 0\n except (ArithmeticError, EOFError, EnvironmentError, LookupError,\n ValueError), e:\n errors += 1\n if errors > MAX_UNCAUGHT_EXCEPTIONS:\n shutdown()\n raise\n LOG.exception('Uncaught exception in SenderThread, ignoring')\n time.sleep(1)\n continue\n except:\n LOG.exception('Uncaught exception in SenderThread, ignoring')\n time.sleep(1)\n continue", "def send(self, data: bytes):", "def sendData(data):\n\n\tslen = struct.pack('<I', len(data))\n\t#connSock.sendall(slen + data)\n\tconnSock.sendall(slen)\n\tconnSock.sendall(data)\n\n\treturn 0", "def _push_from_buffer(self):\r\n if len(self.buffer) > 0:\r\n if time.time() - self.last_sent_time > 5:\r\n try:\r\n message = self.buffer.pop(0)\r\n self._send_now(message)\r\n finally:\r\n self.last_sent_time = time.time()", "def _sendingCommand(self): \n\n while True:\n self.tello.send_command('command') \n time.sleep(5)", "def send(self):\n while True:\n for neighbor_name in self.neighbors:\n if not self.neighbors[neighbor_name].is_killed:\n if self.neighbors[neighbor_name].update_ready:\n self.send_update(self.neighbors[neighbor_name])\n if self.neighbors[neighbor_name].linkup_ready:\n self.send_linkup(self.neighbors[neighbor_name])\n if self.neighbors[neighbor_name].linkdown_ready:\n self.send_linkdown(self.neighbors[neighbor_name])", "def background_thread():\n count = 0\n while True:\n socketio.sleep(1)\n count += 1\n t = time.strftime('%M:%S', time.localtime())\n cpus = [1,2,3,4] #\n print('sending')\n socketio.emit('server_response',\n {'data': [t, cpus[0],cpus[1],cpus[2],cpus[3]], 'count': count})", "def __send(self, cmd, delay=.1):\n\n self.__write(cmd)\n\n if delay is not None:\n print(\"wait: %d seconds\" % delay)\n time.sleep(delay)\n\n return self.__read()", "def flush(self, data):", "def send(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('spi.send(data, timeout=50000)\\r\\n'.encode('utf-8'))\n sleep(1)", "def _send_data(self, data, time):\n \n # Prepare data string with the values in data buffer\n data_string = ''\n # Timestamp\n data_string += '&time=' + str(time)\n # Node ID\n data_string += '&node=' + str(data[0])\n # Data\n data_string += '&json={'\n for i, val in enumerate(data[1:]):\n data_string += str(i+1) + ':' + str(val)\n data_string += ','\n # Remove trailing comma and close braces\n data_string = data_string[0:-1]+'}'\n self._log.debug(\"Data string: \" + data_string)\n \n # Prepare URL string of the form\n # 'http://domain.tld/emoncms/input/post.json?apikey=12345\n # &node=10&json={1:1806, 2:1664}'\n url_string = self._settings['protocol'] + self._settings['domain'] + \\\n self._settings['path'] + '/input/post.json?apikey=' + \\\n self._settings['apikey'] + data_string\n self._log.debug(\"URL string: \" + url_string)\n\n # Send data to server\n self._log.info(\"Sending to \" + \n self._settings['domain'] + self._settings['path'])\n try:\n result = urllib2.urlopen(url_string, timeout=60)\n except urllib2.HTTPError as e:\n self._log.warning(\"Couldn't send to server, HTTPError: \" + \n str(e.code))\n except urllib2.URLError as e:\n self._log.warning(\"Couldn't send to server, URLError: \" + \n str(e.reason))\n except httplib.HTTPException:\n self._log.warning(\"Couldn't send to server, HTTPException\")\n except Exception:\n import traceback\n self._log.warning(\"Couldn't send to server, Exception: \" + \n traceback.format_exc())\n else:\n if (result.readline() == 'ok'):\n self._log.debug(\"Send ok\")\n return True\n else:\n self._log.warning(\"Send failure\")", "def send_line(self, data, nowait=False):\n data = data.replace('\\n', ' ').replace('\\r', ' ')\n f = self.loop.create_future()\n if self.queue is not None and nowait is False:\n self.queue.put_nowait((f, data))\n else:\n self.send(data.replace('\\n', ' ').replace('\\r', ' '))\n f.set_result(True)\n return f", "def send(self, data):\n self.sock.send(data)" ]
[ "0.731677", "0.69155127", "0.683366", "0.67629844", "0.66871953", "0.6625277", "0.64126647", "0.63910866", "0.63096064", "0.6301206", "0.6290626", "0.6284213", "0.6281181", "0.62581146", "0.62326896", "0.62172097", "0.6213326", "0.62094146", "0.61989105", "0.61908275", "0.6174689", "0.6152816", "0.6139198", "0.61272234", "0.6121274", "0.6090591", "0.6075602", "0.60568345", "0.6051468", "0.6041564", "0.6025421", "0.6022943", "0.6021873", "0.60202026", "0.60160065", "0.60134363", "0.6010051", "0.6000595", "0.59977734", "0.59955084", "0.5991736", "0.59840304", "0.5982107", "0.59681493", "0.5958614", "0.5958298", "0.5956413", "0.59475076", "0.5944281", "0.5929586", "0.59288245", "0.5920114", "0.591965", "0.591544", "0.591384", "0.59135", "0.59098154", "0.5907756", "0.59061605", "0.59043676", "0.58946246", "0.58910066", "0.58898216", "0.58815134", "0.58771783", "0.5876592", "0.58763367", "0.58663565", "0.58466566", "0.58444834", "0.5843081", "0.5841226", "0.5836702", "0.5836515", "0.5833704", "0.5807917", "0.5804683", "0.5796639", "0.57905185", "0.578009", "0.57799137", "0.57734746", "0.57683015", "0.5761105", "0.5759083", "0.5757302", "0.57532996", "0.5753122", "0.574495", "0.57428175", "0.5741366", "0.5737172", "0.57329154", "0.57311654", "0.5727547", "0.5726793", "0.5723666", "0.57223564", "0.57200307", "0.5716785", "0.5714768" ]
0.0
-1
Routine for receiving in time
def recv(self, source=None, tag=None, comm=None): part = comm.recv(source=source, tag=tag) self.pos[:] = part.pos.copy() self.vel[:] = part.vel.copy() self.m = part.m.copy() self.q = part.q.copy() return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recv_ts(self) -> int:\n pass", "def receive(self, packet, time):\n raise NotImplementedError", "def handleReturnTime(rtt):\n pass", "def sendTime(self):\n timestamp = datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")\n self.send(timestamp)", "def receive_event(self):\n msg = self.msg_queue.get()\n\n # get the logical clock time of the machine that sent the message\n other_system_clock = msg[msg.index(\":\") + 1:] \n \n # set the clock time to the maximum of self's clock time and other \n # system's clock time\n self.clock_time = max(self.clock_time, int(other_system_clock))\n\n # increment the logical clock time and log that a message was received\n self.clock_time += 1\n self.log(\" Received message from \" + str(msg[:msg.index(\":\")]) + \n \" with LC time \" + str(msg[msg.index(\":\") + 2:]) + \n \"; messages left to process: \" + str(self.msg_queue.qsize()))", "def _send_data(self, data, time):\n pass", "def receive(self, data: Packet, addr: Tuple[str, int], time: datetime) -> None:\n # region Docstring\n # endregion\n\n n = UDP_P2P.latency(\n datetime.strptime(time.strftime(\"%H%M%S%f\"), \"%H%M%S%f\"),\n datetime.strptime(data.time + \"000\", \"%H%M%S%f\"),\n )\n\n self.record += f\"<b>({data.nick.strip()} - {addr[0]} - {n}ms): </b><br>{data.msg.strip()}<br>\"", "def _send_time(self):\n if 'time' not in self.loopback_guard:\n content = {'time': self.time.isoformat()}\n self.send_action('set_time', content)", "def update(self, rcv_time, msg):\n raise NotImplementedError", "def on_timer(self):\n self.read_serial_data()\n # self.update_monitor()", "def slot_receive(self):\n\n self.start_time = str(time.strftime(\"%Y%m%d%H%M%S\", time.localtime()))\n\n self.setter.start()\n self.getter.start()\n self.timer.start(1)\n # self.plotter.start()", "def on_tick(self, time):\n pass", "def ReceiveTimeout(self) -> int:", "def ReceiveTimeout(self) -> int:", "def remaining_ms():", "def receive():\n pass", "def slot_timer(self, _sender, _data):\r\n if self.connected:\r\n if time.time() - self._time_last_received > 60:\r\n self.debug(\"### did not receive anything for a long time, disconnecting.\")\r\n self.force_reconnect()\r\n self.connected = False\r\n if time.time() - self._time_last_subscribed > 1800:\r\n # sometimes after running for a few hours it\r\n # will lose some of the subscriptons for no\r\n # obvious reason. I've seen it losing the trades\r\n # and the lag channel channel already, and maybe\r\n # even others. Simply subscribing again completely\r\n # fixes this condition. For this reason we renew\r\n # all channel subscriptions once every hour.\r\n self.debug(\"### refreshing channel subscriptions\")\r\n self.channel_subscribe(False)", "def realtime(self):", "def timed_msg(self, t_sec, msg):\n self._msg_hdr(msg)\n self.main_loop.set_alarm_in(t_sec, self._msg_cb, '')", "def incomingMsg(self, ip):\n #print(\"[ACTM] Receiving data for IP\", ip)\n if ip in self.ip:\n idx = self.ip.index(ip)\n sID = idx + 1 #karena index mulai dari 0\n self.actChanged.emit(sID, True) #lalu notifikasi dashboard\n self.timer[idx].start() #lalu jalankan timernya", "def time_automation_listener(now):\n action()", "async def log_time(self, event):\n sender = await event.get_sender()\n user = utils.get_display_name(sender)\n\n message = event.message\n\n time = message.date.astimezone(self.__to_zone).time().hour\n\n logging.debug(\"Got the following message: \\\"\" + event.raw_text + \"\\\" at time \" + str(time))\n\n self.__contact_times.labels(user).observe(time)", "def tcp_time_updatetime(localport):\r\n\r\n # Get the ips and ports of servers hosting time_server.repy, retrying nine\r\n # times if there is an exception.\r\n gotval = False\r\n attemptretrieval = 0\r\n while attemptretrieval < 2:\r\n try:\r\n serveraddresses = advertise_lookup(\"time_server\")\r\n except Exception:\r\n attemptretrieval = attemptretrieval + 1\r\n sleep(2) # Look up the value again in 10 seconds\r\n else:\r\n if serveraddresses != [] and serveraddresses[0] != '':\r\n gotval = True\t # Successfully obtained the value\r\n break\r\n else:\r\n attemptretrieval = attemptretrieval + 1\r\n\r\n\r\n if not gotval:\r\n raise Exception(\"Unable to locate any servers running time_server.repy\")\r\n\r\n\r\n timelength = 25 # Max length of string, representing the time, to be received\r\n shuffledserveraddresses = random_sample(serveraddresses,min(len(serveraddresses),5))\r\n\r\n # Open a connection with a random server hosting time_server.repy\r\n timeobtained = False\r\n serverindex = 0\r\n while serverindex < len(shuffledserveraddresses):\r\n remoteaddress = shuffledserveraddresses[serverindex].split(':')\r\n remoteip = remoteaddress[0]\r\n remoteport = int(remoteaddress[1])\r\n\r\n try:\r\n sockobject = timeout_openconn(remoteip,remoteport)\r\n except Exception:\r\n serverindex +=1\r\n else:\r\n timeobtained = True\r\n break\r\n\r\n\r\n if not timeobtained:\r\n raise Exception(\"Unable to open connection with any of the \",len(shuffledserveraddresses),\"servers running time_server.repy.\")\r\n\r\n\r\n currenttime =''\r\n while '$' not in currenttime:\r\n currenttime += sockobject.recv(20)\r\n sockobject.close()\r\n currenttime = float(currenttime[:-1])\r\n\r\n # finally, set the time\r\n time_settime(currenttime)\r\n\r\n return shuffledserveraddresses[serverindex]", "def time(self):\n raise \"use method time of class ReactorNet\"\n #return _cantera.reactor_time(self.__reactor_id)", "def tcp_server_thread():\r\n while True:\r\n temp_socket, _ = server.accept()\r\n time_now = datetime.datetime.utcnow().timestamp()\r\n print(time_now)\r\n time_now = struct.pack(\"!f\", time_now)\r\n temp_socket.send(time_now)\r\n temp_socket.close()", "def receiver():\r\n global data\r\n DW1000.newReceive()\r\n DW1000.receivePermanently()\r\n DW1000.startReceive()", "def receive(self, timeout=None) -> bytes:", "def receiver(): \n global data\n DW1000.newReceive()\n DW1000.receivePermanently()\n DW1000.startReceive()", "def receive(self):\n pass", "def get_timed(self):\n ret = self.send(\"?T\", recv=True)\n ret = int(ret, 10)\n # FIXME: range?\n assert 1 <= ret <= 9999\n return ret", "def rcvDataTimeOut(self, num=1, tou=0.1):\r\n\t\t# pilisco il buffer prima della ricezione\r\n\t\tself.buf = \"\"\r\n\t\t# contatore caratteri da ricevere\r\n\t\tself.cou = 0\r\n\r\n\t\t# referenzio lo start per il tOut\r\n\t\ttim = clock()\r\n\r\n\t\t# flag\r\n\t\tflg = True\r\n\t\twhile flg:\r\n\t\t\t# calcolo il tempo trascorso\r\n\t\t\tnow = (clock() - tim)\r\n\t\t\tif now > tou:\r\n\t\t\t\t# forzo l'uscita (tempo scaduto)\r\n\t\t\t\tflg = False\r\n\t\t\t# verifico dati in ricezione\r\n\t\t\tcou, dat = self.rcvString(num)\r\n\t\t\tif cou:\r\n\t\t\t\t# salvo i dati ricevuti\r\n\t\t\t\tself.buf += dat\r\n\t\t\t\tself.cou += cou\r\n\t\t\t\tif self.cou >= num:\r\n\t\t\t\t\tflg = False\r\n\t\t# ritorno False se è scaduto il tempo\r\n\t\tif now > tou:\r\n\t\t\tret = False\r\n\t\t# ritorno True se ho ricevuto tutti i dati\r\n\t\telse:\r\n\t\t\tret = True\r\n\t\treturn (ret, self.buf)", "def datagramReceived(self, datagram_, address):\n #if DEBUG: print \"Datagram received from \"+ repr(address) \n datagram = simplejson.loads(datagram_)\n if not hasattr(datagram,'keys'):\n if DEBUG: print \"unknown UDP message:\\n\", datagram\n pdb.set_trace()\n return\n if 'loop_started' in datagram.keys():\n return\n if 'shotnumber_started' in datagram.keys():\n #dc.get('_exp_sync').shotnumber = datagram['shotnumber_started']\n #return\n self.server.pxi_time = float(datagram['time'])\n self.server.pxi_time_server_time = float(datagram['time']) - float(time.time())#Make this so that it synchronizes the clocks CP\n\n msg = {\"data_context\": 'PXI',\n \"shotnumber\":datagram['shotnumber_started']}\n msg = simplejson.dumps(msg, ensure_ascii = False).encode('utf8')\n self.server.broadcast(msg) \n if DEBUG: print datagram\n \n self.server.active_parser_ip = datagram['server_ip_in_charge']#Make this so that it synchronizes the clocks CP\n self.server.active_parser_port = datagram['server_port_in_charge']#Make this so that it synchronizes the clocks CP\n dc = self.server.command_library.__determineContext__({'data_context':'PXI'}) \n if not dc.dict.has_key('_exp_sync'):\n exp_sync = sync.Experiment_Sync_Group(self.server, dc.name)\n dc.update({'_exp_sync':exp_sync})\n dc.get('_exp_sync').shotnumber = int(datagram['shotnumber_started'])\n print \"Shot started:\", datagram['shotnumber_started'], \"pxi_time:\", self.server.pxi_time, \"time.time():\", float(time.time())\n return\n \n \n if 'fake_shotnumber_started' in datagram.keys():\n if self.server.ip == '10.1.1.124':\n return\n print datagram\n msg = {\"data_context\": datagram['data_context'],\n \"shotnumber\":datagram['fake_shotnumber_started']}\n msg = simplejson.dumps(msg, ensure_ascii = False).encode('utf8')\n self.server.broadcast(msg) \n dc = self.server.command_library.__determineContext__(datagram) \n if not dc.dict.has_key('_exp_sync'):\n exp_sync = sync.Experiment_Sync_Group(self.server, dc.name)\n dc.update({'_exp_sync':exp_sync})\n dc.get('_exp_sync').shotnumber = int(datagram['fake_shotnumber_started'])\n if DEBUG: print \"Fake Shot started:\", datagram['fake_shotnumber_started'], \"pxi_time:\", datagram['time'], \"time.time():\", float(time.time())\n dc.update({'Test_instrument':glab_instrument.Glab_Instrument(params={'server':self.server,'create_example_pollcallback':True})})\n return\n \n try:\n datagram[\"server_ping\"] \n except KeyError:\n if DEBUG: print \"unknown UDP message:\\n\", datagram\n return\n ping_command = commands.ServerCommand(self.server, self.server.catch_ping, datagram)\n self.server.command_queue.add(ping_command)", "def entrytime_message_received(msg):\n data = IndexIntData.from_msg(msg)\n if data is None:\n _LOGGER.error(\"Undable to parse MQTT message\")\n\n if data.value == 1:\n self._entrytime.add(data.index)\n else:\n self._entrytime.discard(data.index)\n\n _LOGGER.debug(\"Entry Time: %s\", self._entrytime)\n self.async_write_ha_state()", "def LingerTime(self) -> int:", "def lastMessageReceived():", "def receive_packet(self, time=0):\n if time == 0:\n try:\n return self.in_queue.get(False)\n except queue.Empty:\n return None\n elif time < 0:\n try:\n return self.in_queue.get(True)\n except queue.Empty:\n return None\n else:\n try:\n return self.in_queue.get(True, time)\n except queue.Empty:\n return None", "def receive_and_probing_time(self):\r\n latest_completion = 0\r\n for probe in self.__probes.values():\r\n\t\t \t if probe.complete():\r\n\t\t\t \t\t latest_completion = max(latest_completion, probe.completion_time)\r\n return latest_completion - self.__arrival_time", "def receive(self, message):", "async def time(self, ctx):\r\n time = market_time()\r\n await ctx.send(f'It is currently {time.time().strftime(\"%H:%M:%S\")} EDT for the market.')", "async def time(self, ctx):\n global time_msg\n if timer > 0:\n if time_msg:\n await time_msg.delete()\n time_msg = None\n minutes = timer // 60\n seconds = timer % 60 if timer % 60 > 9 else '0' + str(timer % 60)\n time_msg = await ctx.send(embed=make_time_embed('work'))\n else:\n # await ctx.send(\"No timer active.\")\n await send_msg(ctx, \"❌\", \"No Timer Active\", color='error')\n await ctx.message.delete()", "def time_thread(self):\n while self.time > 0:\n t.sleep(1)\n self.time -= 1\n self.end_round(\"Time is up\")", "def _slot_timer_info_later(self, _sender, _data):\r\n self.request_info()\r\n self._info_timer = None", "def total_timer(msg):\n start = timer()\n yield\n t = timer() - start\n _TOTAL_TIMER_DATA[msg].feed(t)", "def request_realtime_info(self):\n self.socket_datastream.sendto(b\"!r\", self.ip_port_arduino_datastream)\n self.socket_datastream.sendto(b\"!s\", self.ip_port_arduino_datastream)", "def listenRtp(self):\r\n\t\twhile True:\r\n\t\t\tstartTime = time()\r\n\t\t\tdata, address = self.rtpSocket_client.recvfrom(16384)\r\n\t\t\tendTime = time()\r\n\r\n\t\t\tif (data):\r\n\t\t\t\tself.recvRtpPacket.decode(data)\r\n\t\t\t\tself.cacheFile = self.writeFrame(self.recvRtpPacket.getPayload())\r\n\t\t\t\tself.updateMovie(self.cacheFile)\r\n\r\n\t\t\t\tcurrentFrameNbr = self.recvRtpPacket.seqNum()\r\n\t\t\t\tcurrent = self.totalTime - 0.05 * currentFrameNbr\r\n\t\t\t\tcurrMin = current / 60\r\n\t\t\t\tcurrSec = current % 60\r\n\t\t\t\t\r\n\t\t\t\tself.progress['value'] = 0.05 * currentFrameNbr\r\n\r\n\t\t\t\tif currMin < 10:\r\n\t\t\t\t\tself.time.configure(text=\"Time Left: 0%d:%d\" % (currMin, currSec), width=12, heigh=2)\r\n\t\t\t\t\tif currSec < 10:\r\n\t\t\t\t\t\tself.time.configure(text=\"Time Left: 0%d:0%d\" % (currMin, currSec), width=12, heigh=2)\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.time.configure(text=\"Time Left: %d:%d\" % (currMin, currSec), width=12, heigh=2)\r\n\t\t\t\t\tif currSec < 10:\r\n\t\t\t\t\t\tself.time.configure(text=\"Time Left: %d:0%d\" % (currMin, currSec), width=12, heigh=2)\r\n\r\n\t\t\t\tself.networkStat.receivedPacketCount += 1\r\n\t\t\t\tself.networkStat.totalADR += (sys.getsizeof(data) / (endTime - startTime))\r\n\t\t\t\r\n\t\t\telse:\r\n\t\t\t\tcontinue", "def send_time_length_info(self):\n min_rounds = self.min_num_turns\n wiz_time = sec_to_min_pretty(self.wizard_time_out)\n app_time = sec_to_min_pretty(self.apprentice_time_out)\n for agent in self.agents:\n message = f'This conversation continues for at least {min_rounds} rounds.\\n'\n t = wiz_time if _is_wiz(agent) else app_time\n message += (\n f'In your turn, please send your message within {t} minutes. '\n 'Otherwise you may be disqualified. '\n )\n if not _is_wiz(agent):\n message += (\n f'Note that you might have to wait up to {wiz_time} '\n 'mintes to receive a response from the other person.'\n )\n agent.observe(\n {\n 'id': constants.COORDINATOR_AGENT,\n 'text': message,\n 'episode_done': False,\n }\n )", "def main(connection, info, conf) :\r\n connection.rawsend(\"NOTICE %s :\u0001TIME %s\u0001\\n\" % (info[\"sender\"], time.strftime(\"%b %d %Y, %H:%M:%S %Z\")))", "def time( self, mess, args):\n return str(datetime.datetime.now())", "def time( self, mess, args):\n return str(datetime.datetime.now())", "def wait (self, seconds=0.0):\r\n\t\tstart_time = time.time()\r\n\t\twhile time.time() < start_time + seconds:\r\n\t\t\tself.receive()", "def on_timer(context, data_type, data):\n pass", "def exittime_message_received(msg):\n data = IndexIntData.from_msg(msg)\n if data is None:\n _LOGGER.error(\"Undable to parse MQTT message\")\n\n if data.value == 1:\n self._exittime.add(data.index)\n else:\n self._exittime.discard(data.index)\n\n _LOGGER.debug(\"Arming <10s: %s\", self._exittime)\n self.async_write_ha_state()", "def sync_time(self, event=None):\n if self.collect: return\n time_obj= localtime()\n serial_time = strftime(\"t%Y,%m,%d,%H,%M,%S\", time_obj)\n print(serial_time)\n self.system_timestamp = f\"\\nSystem start time is: {serial_time}\"\n print(serial_time.encode(encoding=\"ascii\"))\n self.ser.write(serial_time.encode(encoding=\"ascii\"))", "def ServerSyncReceived(self,message):", "def time(self):\r\n raise NotImplementedError", "def timer_handler():\r\n \r\n global elapsed_time\r\n elapsed_time += 1", "async def by_interval(self, ctx, *, time):\n try:\n t = str(time)\n t = ''.join(c for c in t if c.isdigit())\n a = dt.strptime(t, \"%Y%m%d%H%M\")\n b = dt.utcnow()\n pass\n except ValueError:\n return await self.bot.send_cmd_help(ctx)\n if a >= b:\n return await self.bot.send_cmd_help(ctx)\n if ctx.message.author.id in self.users:\n return await self.bot.say(\"You already have a drawing in progress\")\n if ctx.message.channel.id in self.queues:\n return await self.bot.say(\"That channel has a drawing in progress\")\n\n self.initialize(ctx.message.channel.id)\n await self.mkqueue(a, b, ctx.message.channel)\n self.users.append(ctx.message.author.id)\n await self.validate(ctx.message.channel, ctx.message.author)", "def example2():\n arrive_time=rand_arr_time.rand_arr_time(6,100000,1000) # Get packet arrive time, with option 2, 100000 packets, expected in 1000 seconds.\n return arrive_time", "def negotiate_time(self, update, context):\n chat_id = update.effective_chat.id\n response_code = update.callback_query[\"data\"] # eta_later, eta_never, eta_20:45, etc.\n log.info(\"Offer @%s raw: @%s\", update.effective_chat.id, response_code)\n\n if response_code == \"eta_never\":\n # the user pressed the button to say they're cancelling their offer\n self.send_message(chat_id, c.MSG_THANKS_NOTHANKS)\n context.user_data[\"reviewed_request\"] = None\n context.user_data[\"state\"] = c.State.AVAILABLE\n\n elif response_code == \"eta_later\":\n # Show them more options in the interactive menu\n self.updater.bot.send_message(\n chat_id=chat_id,\n text=\"Alege timpul\",\n reply_markup=InlineKeyboardMarkup(k.build_dynamic_keyboard()),\n )\n else:\n # This is an actual offer, ot looks like `eta_20:40`, extract the actual timestamp in UTC\n offer = response_code.split(\"_\")[-1]\n log.info(\n \"Relaying offer @%s UTC (%s %s)\", offer, utc_short_to_user_short(offer), c.TIMEZONE\n )\n\n # tell the backend about it\n request_id = context.user_data[\"reviewed_request\"]\n self.backend.relay_offer(request_id, chat_id, offer)\n\n # tell the user that this is now processed by the server\n self.send_message(\n chat_id, (c.MSG_ACK_TIME % utc_short_to_user_short(offer)) + c.MSG_COORDINATING\n )", "def recv_time(self) -> float:\n return ntp_to_system_time(self.recv_timestamp)", "def time_left(self):\n t=self.transport\n return (t.stoptime or t.get_length())-t.get_time()", "def packet_arrival():\r\n return 1.0", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def __stream_triggered(self):\n # Call this every time period\n thread = Timer(self.stream_time, self.__stream_triggered)\n thread.start()\n self.__threads.append(thread)\n\n if len(self.__spike_buffer) > 2:\n speed = self.__get_speed()\n print(speed)\n self.__stream_send(speed)", "def rcvStrTimeOut(self, num=1, tou=0.1):\r\n\t\treturn self.rcvDataTimeOut(num, tou)", "def handle_incoming_message(self, new_message, queue_len):\n sender, other_machine_logical_time = new_message\n # fast-forward clock if other machine's time is ahead\n if other_machine_logical_time > self.logical_clock:\n self.update_clock(newtime=other_machine_logical_time)\n\n # increment time by 1, regardless of whether other machine's clock is ahead\n self.update_clock()\n\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"receive\",\n fromname=sender,\n queue_len=queue_len\n ))", "def time(self):\r\n time = datetime.datetime.now().strftime(\"%I:%M:%S\")\r\n self.speak(\"the current time is\")\r\n self.speak(time)", "def keepAliveReceived(self):", "def update_time(self):\n pass # Do nothing", "def post_time(self, amt):\n amtOfTime = amt + 1\n Publisher().sendMessage(\"update\", amtOfTime)", "def collect_data(self):\r\n self.vcp.read(self.vcp.inWaiting())\r\n while True:\r\n data = self.vcp.readline()\r\n data = data.decode(\"ASCII\")\r\n timestamp = \",\" + datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n data_timestamp = data + timestamp\r\n if not self.data_pause:\r\n self.update_anemometer_log.emit(data_timestamp)\r\n if self.stop_timer:\r\n break", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "def clock_callback(data):\n global current_second\n current_second = data.clock.secs", "def track_tx(self) -> None:\n\n start = time()\n self.waiting = True\n while self.waiting:\n sleep(self.timer_tick)\n if time() - start >= self.time_threshold:\n if self.tx_type:\n msg = (\n f\"🕔 * {self.tx_type} * - tx sent from {str(self.sender.address)}\"\n f\" has exceeded threshold of {str(self.time_threshold)} seconds\"\n )\n else:\n msg = (\n f\"🕔 tx sent from {str(self.sender.address)} has exceeded threshold of \"\n f\"{str(self.time_threshold)} seconds\"\n )\n self.alert(msg)\n self.waiting = False\n self.sender = None\n self.tx_type = \"\"", "async def on_receive(self, room_id, inp_type, params):\n pass", "def test_time_request_message(self):\n expected_topic = self.factory.common_topic + WAPMF.TIME\n expected_payload = None\n expected_message = Message(expected_topic, expected_payload)\n\n serialized_message = self.factory.make_time_request()\n\n self.assertEqual(expected_message, serialized_message)", "def REP_watcher(self):\n while True:\n sleep(self.patience) # how often to check\n try:\n recent_REQ_sent_time = self.REQ_sent_time.popleft()\n # if we got here; we have a recent_REQ_sent_time\n sleep(1.0) # allow time for receipt of a REP\n try:\n recent_REP_recd_time = self.REP_recd_time.popleft()\n except IndexError: # there was a REQ, but no REP was received\n self.fix_comm_link()\n # if we got here; we have a recent_REP_recd_time\n interval = recent_REP_recd_time - recent_REQ_sent_time\n if interval.total_seconds() <= 0.0:\n # recent_REP_recd_time is not later than recent_REQ_sent_time\n self.fix_comm_link()\n except IndexError: # there wasn't a time in REQ_sent_time\n # so there is no REP expected,\n # ... so continue to loop until there is a time in REQ_sent_time\n pass", "def dataReceived(self, data):", "def check_time_server(self):\n ack = self.check_server_activity()\n if self.am_leader:\n t = Timer(5, self.set_offset_for_processes)\n t.daemon = True\n t.start()\n else:\n t = Timer(10, self.check_time_server)\n t.daemon = True\n t.start()\n return ack", "def SendTimeout(self) -> int:", "def SendTimeout(self) -> int:", "async def by_times(self, ctx, *, times):\n try:\n t = str(times)\n start, end = t.split(' ')\n start = ''.join(c for c in start if c.isdigit())\n end = ''.join(c for c in end if c.isdigit())\n a = dt.strptime(start, \"%Y%m%d%H%M\")\n b = dt.strptime(end, \"%Y%m%d%H%M\")\n except ValueError:\n return await self.bot.send_cmd_help(ctx)\n if a >= b:\n a, b = b, a\n if b > ctx.message.timestamp:\n b = ctx.message.timestamp\n if a >= ctx.message.timestamp:\n return await self.bot.say(\"I can't read the future.\")\n if ctx.channel.id in self.queues:\n return await self.bot.say(\"That channel has a drawing in progress\")\n if ctx.message.author.id in self.users:\n return await self.bot.say(\"You already have a drawing in progress\")\n\n self.initialize(a.channel.id)\n await self.mkqueue(a, b, ctx.message.channel)\n self.users.append(ctx.message.author.id)\n await self.validate(ctx.message.channel, ctx.message.author)", "def simplecallback(self,u,t):\n print \"time {0:.2}\".format(t)", "async def mutetime(self, ctx):\n self.data_check(ctx)\n server = ctx.message.server\n \n await self.bot.say(\"Please make sure to set the time with the correct time prefix at the end. (*For minutes 'm', for hours 'h'*)\\n\\nPlease type your timeframe now.\")\n muteroletime = await self.bot.wait_for_message(channel = ctx.message.channel, author = ctx.message.author)\n\n if \"m\" in muteroletime.content or \"s\" in muteroletime.content or \"h\" in muteroletime.content:\n self.riceCog2[server.id][\"mutetime\"] = muteroletime.content\n dataIO.save_json(self.warning_settings,\n self.riceCog2)\n await self.bot.say(\"Default mute time is now: **{}**\".format(muteroletime.content))\n else:\n await self.bot.say(\"You've done something wrong! Please make sure that the format is correct!\")\n return", "def _calibrate(self, t_send, t_recv, server_timestamp):\n pass", "def run_timer():\n \n start_time = time.time()\n print(start_time)\n stopper = input(\"Press enter to stop\")\n end_time = time.time()\n print(\"You have finished collecting the blocks!\")\n duration = int(end_time - start_time)\n if duration > 25:\n print(\"You were too slow collecting the blocks, better luck next time\")\n else: \n print(\"Good job speedy, you collected all the blocks before time ran out!\")", "def text(message):\n room = session.get('room')\n emit('timerupdate', {'msg': message}, room=room)", "def takeoff_second():\n\tglobal c2\n\tglobal a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the armed message\n\tprint a2, ' >> ', msg\n\tif msg != 'Armed':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'TAKEOFF'\n\t\tnew_msg['arg1'] = init2[2]\n\t\tc2.send(json.dumps(new_msg))\n\t\tstate += 1", "def timing(self, stat, time, sample_rate=1):\n stats = {stat: \"%f|ms\" % time}\n self.send(stats, sample_rate)", "def GAME_TIME_ADVANCE(dt):", "def time(self) -> int:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def set_time(self, time_fn):\n self.time_fn = time_fn\n self.socket.send_string(f'T {time_fn()}')\n return self.socket.recv_string()", "def slot_keepalive_timer(self, _sender, _data):\r\n if self.connected:\r\n #self.debug(\"### sending keepalive\")\r\n self._try_send_raw(\"2::\")", "def REP_watcher():\n global REQ_sent_time, REP_recd_time, pid, patience_seconds\n while True:\n time.sleep(patience_seconds) # how often to check\n try:\n recent_REQ_sent_time = REQ_sent_time.popleft()\n # if we got here; we have a recent_REQ_sent_time\n time.sleep(patience_seconds) # allow time for receipt of the REP\n try:\n recent_REP_recd_time = REP_recd_time.popleft()\n # if we got here; we have a recent_REP_recd_time\n interval = recent_REP_recd_time - recent_REQ_sent_time\n if interval.total_seconds() <= 0.0:\n # recent_REP_recd_time is not later than recent_REQ_sent_time\n print('After image send in REP_watcher test,')\n print('No REP received within', patience_seconds, 'seconds.')\n print('Ending sending program.')\n os.kill(pid, signal.SIGTERM)\n pass\n continue # Got REP after REQ so continue to next REQ\n except IndexError: # there was a REQ, but no timely REP\n print('After image send in REP_watcher test,')\n print('No REP received within', patience_seconds, 'seconds.')\n print('Ending sending program.')\n os.kill(pid, signal.SIGTERM)\n pass\n except IndexError: # there wasn't a time in REQ_sent_time\n # so there is no REP expected,\n # ... continue to loop until there is a time in REQ_sent_time\n pass", "async def _time(self, ctx):\n try:\n await self.bot.say('@{0}:'.format(ctx.message.author.name) + '\\nDate is: **' + time.strftime(\"%A, %B %d, %Y\") + '**' + '\\nTime is: **' + time.strftime(\"%I:%M:%S %p\") + '**')\n except Exception as e:\n await self.bot.say(code.format(type(e).__name__ + ': ' + str(e)))", "async def current_time_handler():\n\n return time_millis()", "async def _monitor_recv(self):\n\n while True:\n await RisingEdge(self.clock)\n await ReadOnly()\n if self.bus.valid.value:\n self._recv(int(self.bus.data.value))", "def round_trip_time(self):\n ..." ]
[ "0.70089287", "0.6884526", "0.6857238", "0.6805326", "0.6590571", "0.65366685", "0.6528338", "0.64993405", "0.6413793", "0.6379032", "0.63445044", "0.6328772", "0.62992907", "0.62992907", "0.6283762", "0.6253673", "0.6244733", "0.6216846", "0.62057245", "0.6201179", "0.62001115", "0.6127045", "0.61028826", "0.6061117", "0.60587126", "0.6058679", "0.60187", "0.5997536", "0.59686345", "0.5929494", "0.59264094", "0.5914918", "0.5903547", "0.58958834", "0.5890203", "0.5887984", "0.58806294", "0.58453554", "0.5843179", "0.5829134", "0.5820824", "0.5816194", "0.5803766", "0.5802108", "0.5800809", "0.5799338", "0.57974726", "0.5781871", "0.5781871", "0.5778525", "0.57635814", "0.5763389", "0.5758354", "0.5757629", "0.5755631", "0.57468385", "0.57408893", "0.573905", "0.57378113", "0.5733024", "0.57247764", "0.5724705", "0.5718902", "0.5709118", "0.5701206", "0.5690583", "0.56735027", "0.56612086", "0.56551373", "0.5653146", "0.5637873", "0.56318885", "0.5630881", "0.562153", "0.5614142", "0.5613706", "0.5612205", "0.5601361", "0.55973107", "0.55956256", "0.55956256", "0.5594872", "0.558349", "0.557674", "0.5571685", "0.5567834", "0.55607235", "0.5559556", "0.5559258", "0.555915", "0.5555585", "0.55541825", "0.55541825", "0.55541825", "0.5552345", "0.55521417", "0.55478275", "0.553972", "0.5537687", "0.55263877", "0.5524506" ]
0.0
-1
Overloading the addition operator for fields types
def __add__(self, other): if isinstance(other, type(self)): # always create new fields, since otherwise c = a - b changes a as well! p = fields(self) p.elec[:] = self.elec + other.elec p.magn[:] = self.magn + other.magn return p else: raise DataError("Type error: cannot add %s to %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)", "def __add__(self, other):\n try:\n total = {self.var: 1, other.var: 1}\n return AutoDiffReverse(self.val + other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val + other, None, {self.var: 1})", "def __add__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(self._real + value, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(self._real + value._real, self._imag + value._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )", "def __add__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value + other.value),\"\")", "def __radd__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(value + self._real, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(value._real + self._real, value._imag + self._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n value.__class__.__name__, self.__class__.__name__\r\n )\r\n )", "def __iadd__(self, other):\n\n return self + other", "def __iadd__(self, other):\n\n if isinstance(other, float):\n self.iadd_scalar(other)\n else:\n self.iadd(other)", "def __add__(self, other):\r\n return self.add(other)", "def __add__(self, other):\n return self.__class__(\n {\n name:\n self.__getattribute__(name) + other.__getattribute__(name)\n for name in self._fields\n }\n )", "def __radd__(self, other):\n return self + other", "def __radd__(self, other):\n return self + other", "def __iadd__(self, other):\r\n if isinstance(other, vec4):\r\n self.x+=other.x\r\n self.y+=other.y\r\n self.z+=other.z\r\n self.w+=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for +=\"", "def __add__(self, other):\n pass", "def __add__(self, other):\n pass", "def __add__(self, other):\n return self.add(other)", "def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)", "def __add__(self, other: Any) -> TypeValue:\n if isinstance(other, np.ndarray):\n return other + float(self)\n\n return self._like_self_from_float(\n float(self) + self._other_same_units(other)\n )", "def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)", "def __add__(self, other):\n if isinstance(other, EncryptedNumber):\n return self._add_encrypted(other)\n else:\n return self._add_scalar(other)", "def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)", "def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)", "def plus(self, a, b):\n return a + b", "def __add__(self, other):\n raise NotImplementedError", "def __add__(self, other):\n raise NotImplementedError", "def __add__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Add.apply(self, other)", "def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps + other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps + other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)", "def add(self, a, b):\n return a + b", "def __iadd__(self, other):\n return (hasattr(other, '__iter__') and self.applyMaterFunc or self.applyScalarFunc)(other, '__add__')", "def _add(self, other):\n raise NotImplementedError(\n \"{} does not support addition\".format(type(self)))", "def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj", "def __add__(self, other):\n base = deepcopy(self)\n base += other # (+=) == __iadd__\n return base", "def __add__(self, other):\n\n return self._binary_elementwise_op(other, np.add)", "def __add__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x+other.x, self.y+other.y, self.z+other.z, self.w+other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"", "def __add__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Add, value)\n return out", "def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts + other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts + other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)", "def __add__(self, argument):\n try:\n argument = type(self)(argument)\n except Exception:\n return NotImplemented\n return type(self)(float(self) + float(argument))", "def __add__(self, other):\n \"*** YOUR CODE HERE ***\"", "def __add__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() + other", "def __radd__(self, other):\n return self.__add__(other)", "def __radd__(self, other):\n return self.__add__(other)", "def __radd__(self, other):\n return self.__add__(other)", "def __radd__(self, other):\n return self.__add__(other)", "def __radd__(self, other):\n return self.__add__(other)", "def __radd__(self, other):\n return self.__add__(other)", "def __radd__(self, other):\n return self.__add__(other)", "def __radd__(self, argument):\n try:\n argument = type(self)(argument)\n except Exception:\n return NotImplemented\n return type(self)(float(self) + float(argument))", "def __iadd__(self, other):\n raise NotImplementedError(\"Implement this if needed\")", "def __add__(self, other):\n return add_mps(self, other)", "def ADD (self, n1, n2):", "def __radd__(self, other):\n\n return self.__add__(other)", "def __iadd__(self,l):\r\n\t\t\r\n\t\treturn self.add(l)", "def __add__(self, other: PgvOrInt) -> ZqValue:\n\n if isinstance(other, int):\n return ZqValue(self.group, (self._value + other) % self.group.q)\n\n if isinstance(other, ZqValue):\n if self.group is not other.group:\n raise TypeError(\"Group mismatch\")\n\n return ZqValue(self.group, (self._value + other._value) % self.group.q)\n\n return NotImplemented", "def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)", "def __add__(self, other):\n raise NotImplementedError(\"Implement this if needed\")", "def add(line, *fieldnames):\n return sum(map(safe, map(lambda fieldname: line[fieldname], fieldnames)))", "def __add__(self, other):\n return asarray(add(self, other))", "def add(a: Decimal, b: Decimal) -> Decimal:\n return a + b", "def __add__(self,that):\n return self.__opExpand2(that,np.add)", "def __add__(self, other):\n\t\tif isinstance(other, Value):\n\t\t\treturn Value(self.val + other.val, sqrt(self.error**2 + other.error**2))\n\t\telse:\n\t\t\treturn Value(self.val + other, self.error)", "def sum(cls, field):\n return lambda x,y: ((type(x)==int) and [x+1] or [2])[0]", "def __add__(self, other):\r\n if isinstance(other, mat4):\r\n return mat4(map(lambda x,y: x+y, self.mlist, other.mlist))\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"", "def __radd__(self, left):\n return self.value() + left", "def __add__(self, other):\n # YOUR CODE HERE\n raise NotImplementedError()", "def addition(self):\n\t\treturn lambda anything: self.__class__(\n\t\t\t(self[:], disj, checked_proposition(anything)[:])\n\t\t)", "def __add__(self,other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n resultnumerator = self.numerator+other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues", "def __radd__(self, other):\n return self.runtime.add(self, other)", "def __add__(self, other):\n cls = self.__class__\n return cls(self.x+other.x, self.y+other.y, self.z+other.z)", "def __radd__(self, other):\n if isinstance(other, int):\n return self.__add__(other)\n return NotImplemented", "def __add__(self, other):\n if isinstance(other, complex):\n return Power(self.power + other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power + other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)", "def __iadd__(self, increment):\n self.update(self.val + increment)\n return self", "def add(self, value):", "def __iadd__(self, other):\n if not isinstance(other, type(self)):\n raise TypeError(\"Only DFs of the same type can be combined.\")\n self.dfs.extend(other.dfs)\n self.counts.extend(other.counts)\n self._unique = False\n self._original += other._original\n if self.label is None:\n if other.label is not None:\n self.label = other.label\n else:\n if other.label is not None:\n self.label += \"+\" + other.label\n self.tags.update(other.tags)\n self._average = None\n return self", "def __add__(self, other):\n self.sum_complex_num = Complex((self.real + other.real), (self.imaginary + other.imaginary))\n return self.sum_complex_num", "def __add__(self, other):\n if isinstance(other, Trit):\n value = (other,)\n else:\n value = tuple(other)\n return Trits(self.trits + value)", "def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def __radd__(self, other):\n other = _to_complex(other)\n return ComplexVal(other.r + self.r, other.i + self.i)", "def __add__(self,l):\r\n\t\t\r\n\t\t# add\r\n\t\ta = self.add(l)\r\n\t\t\r\n\t\treturn a", "def __iadd__(self, other):\n\n other_data = self._setup_numeric(other)\n self.data[:] = self.data + other_data\n\n return self", "def __iadd__(self, other):\n self.x += other.x\n self.y += other.y\n return self", "def add(self, x):\n if type(x) is int:\n self.real += x\n else:\n self.real = self.real + x.real\n self.imag = self.imag + x.imag", "def __add__(self, other):\n\t\ttry:\n\t\t\tval = self.val + other.val\n\n\t\t\t# Handle case when self.der or other.der contains None \n\t\t\t# i.e. self or other is a vector of scalars, not of Vars\n\t\t\tlen_self_der_shape = len(self.der.shape)\n\t\t\tlen_other_der_shape = len(other.der.shape)\n\n\t\t\tif not len_self_der_shape and len_other_der_shape:\n\t\t\t\tder = other.der\n\t\t\telif len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = self.der\n\t\t\telif not len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = None\n\t\t\telse:\n\t\t\t\tder = self.der + other.der\n\t\texcept AttributeError:\n\t\t\tval = self.val + other\n\t\t\tder = self.der\n\t\treturn Var(val, der)", "def __add__(self, other):\n if other == 0:\n return self\n\n pmf = Pmf()\n for v1, p1 in self.items():\n for v2, p2 in other.items():\n pmf[v1 + v2] += p1 * p2\n return pmf", "def __add__(self, other):\n # other is a scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Vector([i + other for i in self.data], self.column)\n # other is a Vector\n elif isinstance(other, Vector):\n if len(self.data) != len(other):\n raise Exception('Vectors are not of equal length')\n elif self.column != other.column:\n raise Exception('Vectors are not of equal orientation')\n else:\n return Vector([self.data[i] + other.data[i] for i in range(len(self.data))], self.column)\n # other is not a scalar or a Vector\n else:\n raise Exception('Argument is not a number or a Vector') from TypeError", "def _add_fields(self, fields):\n for field in fields:\n self.add(field)", "def __add__(self,rhs):\n\n\t\tif isinstance(rhs,self.__class__):\n\n\t\t\tassert self.side_angle == rhs.side_angle\n\t\t\tassert self.data.shape == rhs.data.shape\n\n\t\t\tnew_data = self.data + rhs.data\n\n\t\telif isinstance(rhs,numbers.Number):\n\n\t\t\tnew_data = self.data + rhs\n\n\t\telif type(rhs) == np.ndarray:\n\n\t\t\tassert rhs.shape == self.data.shape\n\t\t\tnew_data = self.data + rhs\n\n\t\telse:\n\n\t\t\traise TypeError(\"The right hand side cannot be added!!\")\n\n\n\t\t#Copy the extra attributes as well\n\t\tkwargs = dict()\n\t\tfor attribute in self._extra_attributes:\n\t\t\tkwargs[attribute] = getattr(self,attribute)\n\n\t\treturn self.__class__(new_data,self.side_angle,masked=self._masked,**kwargs)", "def addition(self, first_value, second_value):\n return first_value + second_value", "def addition(a, b):\n pass", "def __add__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.add)", "def __add__(self, other):\n if not isinstance(other, (timedelta, relativedelta, number_types)):\n return NotImplemented\n\n if isinstance(other, number_types):\n other = timedelta(seconds=other)\n\n if isinstance(other, timedelta):\n result = super(self.__class__, self).__add__(other)\n else:\n result = other.__add__(self)\n\n return self.fromdatetime(result)", "def __iadd__(self, other: t.Any) -> te.Self:\n return self._op_inplace('__iadd__', other)", "def vars_add ( self , var1 , var2 , name = '' , title = '' ) :\n \n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n res = float ( var1 ) + float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## shortcut \n if 0 == var1 : return var2 ## SHORTCUT\n #\n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_add ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_add ( var1 , var2 , name , title )\n \n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Addition ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def __add__(self, other):\n return union(self, other, check_convex=True)", "def __iadd__(self, m):\n if self.__mm_type(m):\n ls=len(self)\n for i in self.desc():\n for j in range(ls):\n self.g_val(self.val(i,j)+m.val(i,j),i,j)\n return self", "def jsonrpc_add(self, a, b):\n return a + b", "def add(self, *args):\n sum = 0\n for arg in args:\n sum += float(arg)\n return sum", "def __add__(self, other):\n return (self.x + other.x, self.y + other.y)", "def addition(x, y):\n\n if isinstance(x, int) and isinstance(y, int):\n return x + y\n else:\n return (\"Invalid type.\")", "def __add__(self, other):\n result = self.__class__()\n result._terms.extend(self)\n\n if isinstance(other, self._term_class):\n if any(\n isinstance(other, term.__class__) and other.name == term.name\n for term in self._terms\n ):\n msg = (\n f\"There is already a term of type {other.__class__} with name \"\n f\"'{other.name}' in {self.__class__}. Please provide a different \"\n f\"name for {other}.\"\n )\n raise ValueError(msg)\n else:\n result._terms.append(other)\n elif isinstance(other, self.__class__):\n for term in other:\n result += term\n else:\n msg = f\"Unsupported operand type(s) for +: {type(self)} and {type(other)}.\"\n raise TypeError(msg)\n\n return result", "def add( a, b ):\n return a + b", "def __add__(self, rhs):\n if isinstance(rhs, UTPS):\n return UTPS(self.tc + rhs.tc)\n elif numpy.isscalar(rhs):\n retval = UTPS(numpy.copy(self.tc))\n retval.tc[0] += rhs\n return retval\n else:\n raise NotImplementedError" ]
[ "0.70417506", "0.69787914", "0.688292", "0.6869126", "0.6797251", "0.67966515", "0.67823577", "0.6694425", "0.66731805", "0.6661316", "0.6661316", "0.66475755", "0.6635631", "0.6635631", "0.6613988", "0.66052085", "0.65744835", "0.6570402", "0.6558253", "0.65581644", "0.65564525", "0.65500927", "0.6512995", "0.6512995", "0.6503215", "0.6497609", "0.64697087", "0.64539814", "0.6446988", "0.6446401", "0.64417356", "0.6424081", "0.64236265", "0.6422569", "0.6418945", "0.6396743", "0.63637114", "0.6350255", "0.63307804", "0.63307804", "0.63307804", "0.63307804", "0.63307804", "0.63307804", "0.63307804", "0.6323252", "0.6315378", "0.630451", "0.62967885", "0.62756896", "0.62439024", "0.62328285", "0.62327033", "0.62103623", "0.6200261", "0.61945605", "0.61907774", "0.61894095", "0.6172528", "0.6125412", "0.6123893", "0.6114955", "0.61149454", "0.6113527", "0.6112825", "0.6110127", "0.6107273", "0.61059415", "0.61010593", "0.6098292", "0.60981673", "0.6093959", "0.6087527", "0.60870445", "0.608083", "0.60773903", "0.6077183", "0.6058992", "0.6058923", "0.6058682", "0.60578424", "0.6031394", "0.60297453", "0.6025654", "0.60164344", "0.6014812", "0.6009027", "0.5992644", "0.59861124", "0.59827244", "0.5980922", "0.59752315", "0.5970713", "0.5969477", "0.5963351", "0.5959166", "0.5951704", "0.5950573", "0.59454125", "0.59435475" ]
0.7395196
0
Overloading the subtraction operator for fields types
def __sub__(self, other): if isinstance(other, type(self)): # always create new fields, since otherwise c = a - b changes a as well! p = fields(self) p.elec[:] = self.elec - other.elec p.magn[:] = self.magn - other.magn return p else: raise DataError("Type error: cannot subtract %s from %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __sub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: 1})", "def __rsub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: -1})", "def __sub__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value - other.value),\"\")", "def minus(self, a, b):\n return a - b", "def __rsub__(self, other):\n\t\treturn (-self).__add__(float(other))", "def __rmul__(self, other):\n\n if isinstance(other, float):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = other * self.elec\n p.magn[:] = other * self.magn\n return p\n else:\n raise DataError(\"Type error: cannot multiply %s with %s\" % (type(other), type(self)))", "def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)", "def __sub__(self, other):\n return self.__add__(other.__neg__())", "def __sub__(self, other):\n return self.subtract(other)", "def subtract(self, other, label=None, atol=1.0E-12):\n # check the two solutions share the same grid\n assert numpy.allclose(self.x, other.x, atol=atol)\n assert numpy.allclose(self.y, other.y, atol=atol)\n assert self.values.shape == other.values.shape\n if not label:\n label = self.label + '-subtracted'\n return Field(label=label,\n time_step=self.time_step,\n x=self.x, y=self.y,\n values=self.values - other.values)", "def __sub__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"", "def __isub__(self, other):\r\n if isinstance(other, vec4):\r\n self.x-=other.x\r\n self.y-=other.y\r\n self.z-=other.z\r\n self.w-=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for -=\"", "def __sub__(self, other):\n\t\tif isinstance(other, int) or isinstance(other, float):\n\t\t\t# Maintain state of self and create new trace variable new_var\n\t\t\tnew_var = Var(self.val, self.der)\n\t\t\treturn new_var.__add__(-other)\n\t\treturn (-other).__add__(self)", "def __sub__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Subtract, value)\n return out", "def __sub__(self, other):\n if hasattr(other, '_d'):\n return (self.micros() - other.micros()) / 86400000000.0\n else:\n return self.__add__(-(other))", "def __sub__(self, other):\n return self + other.__neg__()", "def __sub__(self, other):\n return self + other.__neg__()", "def subtract(self, other):\n return self.add(other.neg())", "def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __neg__(self):\n return UnaryMinus(self)", "def __sub__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() - other", "def __sub__(self, other: Any) -> ColumnOperators:\n return self.operate(sub, other)", "def __sub__(self, argument):\n try:\n argument = type(self)(argument)\n except Exception:\n return NotImplemented\n return type(self)(float(self) - float(argument))", "def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps - other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps - other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)", "def subtract(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n difference = str(ft.reduce(oper.sub,values))\n\n return difference", "def vars_subtract ( self , var1 , var2 , name = '' , title = '' ) :\n\n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n ##\n res = float ( var1 ) - float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## \n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n\n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Subtraction ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def __sub__(self,that):\n #return self.__opExpand1(that, np.subtract)\n return self.__opExpand2(that,np.subtract)", "def __neg__(self) -> ColumnOperators:\n return self.operate(neg)", "def __sub__ (self,other):\n if (self.debug): print(f'enter fraction.__sub__ with {other}')\n f2 = fraction(-1*other.value[0],other.value[1])\n f3 = self.__add__(f2)\n return f3", "def __rsub__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(sub, other)", "def __rsub__(self, other):\n if isinstance(other, int):\n return self.__neg__().__add__(- other)\n return NotImplemented", "def __neg__(self):\n return 0 - self", "def __sub__(self, other):\n return self._operation_sub(self, other)", "def decrease(obj: Any, field: AnyStr):\n if hasattr(obj, field):\n value = getattr(obj, field)\n if isinstance(value, int):\n setattr(obj, field, value - 1)", "def __sub__(self, other):\n return Difference(self, other)", "def __sub__(self, tc):\n tc = TwosComplement(tc)._negative()\n return self.__add__(tc)", "def __rsub__(self, left):\n return left - self.value()", "def __sub__(self, other):\n if isinstance(other, real_timedelta):\n return self + timedelta(-other.days)\n if isinstance(other, real_date):\n days1 = self.toordinal()\n days2 = other.toordinal()\n return timedelta(days1 - days2)\n return NotImplemented", "def __sub__(self, other):\r\n if isinstance(other, mat4):\r\n return mat4(map(lambda x,y: x-y, self.mlist, other.mlist))\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"", "def __rtruediv__(self, other):\n value = -1 / (self.val * self.val)\n total = {self.var: other * value}\n return AutoDiffReverse(other / self.val, None, total)", "def __sub__(self,other):", "def __rtruediv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(truediv, other)", "def __neg__(self):\n return self.neg()", "def __neg__(self):\n return (-1)*self", "def __neg__(self):\n return self.__mul__(-1)", "def sub(self, a, b):\n return a - b", "def __rsub__(self, other):\r\n return other + (-self)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def subtract(self):\n return self._do_calc(self.subtracter)", "def __sub__(self,other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n resultnumerator = self.numerator-other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues", "def subtract(self, m): \n f = m.negate()\n return self.add(f)", "def __rsub__(self, other):\n return self._operation_sub(other, self)", "def __sub__(self, other):\n if not hasattr(other, \"dtype\") or self.dtype != other.dtype:\n raise TypeError(\"Can only calculate distance between two DFs. of \"\n \"the same type.\")\n return abs(np.dot(self.df, other.df)/(self.norm*other.norm)-1.)", "def __rsub__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Sub.apply(other, self)", "def subtract(self, other):\n if is_matrix(other):\n return self._sum_matrix(other, -1)\n elif mathutil.is_scalar(other):\n return self._sum_scalar(other, -1)\n else:\n self._logger.error(\n \"'Matrix' instance, int, float or complex expected, not '{}'\".format(type(other)))\n raise TypeError(\n \"'Matrix' instance, int, float or complex expected, not '{}'\".format(type(other)))", "def __sub__(self, v):\n return self + (-1) * v", "def subtraction(a, b):\n return a - b", "def __sub__(self, other):\n if (not isinstance(other, self.__class__) and\n isinstance(other, datetime)):\n other = self.fromdatetime(other)\n\n result = super(self.__class__, self).__sub__(other)\n\n if isinstance(result, datetime):\n return self.fromdatetime(result)\n elif isinstance(result, timedelta):\n return Delta.fromtimedelta(result)\n else:\n return result", "def convert_minus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)", "def subtract(a, b):\n return a - b", "def subtract(a, b):\n return a - b", "def __truediv__(self, other: FieldElement) -> FieldElement:\n if self.prime != other.prime:\n raise TypeError(\"Cannot divide two numbers in different Fields\")\n num = (self.num * pow(other.num, self.prime - 2, self.prime)) % self.prime\n return self.__class__(num, self.prime)", "def decrement(self, field_name, value, **kwargs):\n self.properties.update(kwargs)\n model = self.model.get_subclass_model(**self.properties)\n\n self.validate(field_name, value, model, operation_type='decrement')\n\n return self.process(field_name, value, operation_type='decrement', **kwargs)", "def __rmul__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mul, other)", "def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec + other.elec\n p.magn[:] = self.magn + other.magn\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))", "def __sub__(self, other):\n return self.getArea() - other.getArea()", "def __sub__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Sub.apply(self, other)", "def subtract(lhs, rhs):\n return _make.subtract(lhs, rhs)", "def subtraction(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a - b", "def __sub__(self, other):\n if not self.unit.is_compatible(other.unit):\n raise TypeError('Cannot subtract two quantities with incompatible units \"%s\" and \"%s\".' % (self.unit, other.unit))\n value = self._value - other.value_in_unit(self.unit)\n unit = self.unit\n return Quantity(value, unit)", "def __sub__(self, other):\n if isinstance(other, complex):\n return Power(self.power - other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power - other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)", "def subtract(self, other=None, **units):\n if isinstance(other, (datetime, timedelta, relativedelta)):\n return self - other\n\n units = {unit: -units.get(unit, 0) for unit in SHIFT_UNITS}\n\n return self.shift(**units)", "def subtraction():\r\n error_handler()\r\n f1.delete(0, END)\r\n s1 = float(operand.get())\r\n s2 = float(operator.get())\r\n result = s1 - s2\r\n f1.insert(10, str(result))", "def __sub__(self, obj):\n if isinstance(obj, Matrix):\n if self.m != obj.m or self.n != obj.n:\n raise exc.ComformabilityError(\n \"matrices must have the same dimensions\")\n if type(self) is not type(obj):\n raise TypeError(\n \"matrices must be the same type\")\n data = [[self[i, j] - obj[i, j]\n for j in range(self.n)]\n for i in range(self.m)]\n elif Matrix.is_numeric(obj):\n self._validate_scalar(obj)\n data = [[self[i, j] - obj\n for j in range(self.n)]\n for i in range(self.m)]\n else:\n raise TypeError(\n \"cannot subtract object of type \" + type(obj).__name__ +\n \" to matrix\")\n return self.__class__(self.m, self.n, data)", "def __rsub__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other)\n if not mv:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return other - obj\n newValue = other.value - self.value\n\n return self._newMV(newValue)", "def __sub__(self, other):\n # type: (object) -> Fraction\n dx = other\n if type(other) is float:\n dx = dectofr(other)\n return Fraction(self.numerator * dx.denominator - self.denominator * dx.numerator,\n self.denominator * dx.denominator)", "def __neg__(self) -> 'SInt':\r\n return self.complement()", "def convert_rminus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)", "def __sub__(self, other):\n return mldivide(self, other)", "def subtractAllNumericHas (self, other):\n \n if self.hasEpoch():\n if other.hasEpoch():\n self.epoch -= other.epoch\n \n if self.hasUtcOffsetMinutes():\n if other.hasUtcOffsetMinutes():\n self.utcOffsetMinutes -= other.utcOffsetMinutes\n \n \n pass", "def sub(self, b):\n self.a -= float(b)", "def __sub__(self, other: 'SInt') -> 'SInt':\r\n return self + other.complement()", "def __sub__(self,l):\r\n\t\t\r\n\t\t# add negative\r\n\t\ts = self.subtract(l)\r\n\t\t\r\n\t\treturn s", "def __truediv__(self, other: Any) -> ColumnOperators:\n return self.operate(truediv, other)", "def __sub__(self,other):\n\t\treal = self.realPart - other.realPart\n\t\timaginary = self.imaginaryPart - other.imaginaryPart\n\n\t\t#create and return complexNumber\n\t\treturn real,imaginary", "def __sub__(self, other):\n return (self.x - other.x, self.y - other.y)", "def __sub__(self, other):\n n = len(self)\n\n if n != len(other):\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i] - other[i]\n\n return v", "def __sub__(self, other: 'ModelParameters') -> 'ModelParameters':\n return ModelParameters([self[idx] - other[idx] for idx in range(len(self))])", "def __neg__(self):\n return Quantity(-(self._value), self.unit)", "def subtract(self, value):\n return self.number - value", "def __sub__(self, other):\n result = self.__class__()\n result._terms.extend(self)\n\n if isinstance(other, self._term_class):\n if other not in result:\n msg = f\"Term {other} not in {self.__class__}.\"\n raise ValueError(msg)\n else:\n result._terms.remove(other)\n elif isinstance(other, self.__class__):\n for term in other:\n result -= term\n else:\n msg = f\"Unsupported operand type(s) for +: {type(self)} and {type(other)}.\"\n raise TypeError(msg)\n\n return result", "def subtraction(x, y):\n return x - y", "def subtract(first, second):\n return first - second", "def __sub__(self, other):\n if isinstance(other, Vector):\n a = self._ar - other._ar\n else:\n a = self._ar - numpy.array(other)\n return Vector(a)", "def __isub__(self, other):\n other_data = self._setup_numeric(other)\n self.data[:] = self.data - other_data\n\n return self", "def subtract(x, y):\n\n return x - y" ]
[ "0.68123215", "0.65827966", "0.658205", "0.65601283", "0.6536032", "0.6514197", "0.6397643", "0.6376854", "0.6373891", "0.63729715", "0.6359867", "0.6323568", "0.63116324", "0.6266902", "0.62532926", "0.624705", "0.624705", "0.6241416", "0.62308973", "0.6229982", "0.6229982", "0.62059516", "0.62044346", "0.6200343", "0.61568284", "0.6128242", "0.6122729", "0.61196035", "0.6110786", "0.60993844", "0.60900253", "0.6085398", "0.60837835", "0.60688645", "0.60635555", "0.60377324", "0.6022836", "0.59841627", "0.5971921", "0.5953815", "0.59489775", "0.5928725", "0.59222317", "0.58899015", "0.5887835", "0.5872523", "0.58635205", "0.5857382", "0.58570963", "0.58275956", "0.5821262", "0.5821262", "0.5821262", "0.58071196", "0.58018184", "0.58017755", "0.5783013", "0.57781327", "0.5768169", "0.5767194", "0.5764656", "0.575409", "0.5745777", "0.5727783", "0.5727783", "0.57182145", "0.5710504", "0.5706783", "0.5705312", "0.57045037", "0.56952125", "0.5689747", "0.5688099", "0.56729484", "0.5669398", "0.5664304", "0.56639665", "0.56619346", "0.5659585", "0.565863", "0.5655496", "0.5654137", "0.56483704", "0.5646744", "0.5643842", "0.5638404", "0.5637064", "0.5628897", "0.5628277", "0.562617", "0.56253934", "0.5617032", "0.56119424", "0.56103677", "0.5609827", "0.55895", "0.5587331", "0.5569237", "0.5563495", "0.55631185" ]
0.7506139
0
Overloading the multiply with factor from right operator for fields types
def __rmul__(self, other): if isinstance(other, float): # always create new fields, since otherwise c = a - b changes a as well! p = fields(self) p.elec[:] = other * self.elec p.magn[:] = other * self.magn return p else: raise DataError("Type error: cannot multiply %s with %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __mul__(self, other: Any) -> ColumnOperators:\n return self.operate(mul, other)", "def __mul__(self: _TT, other: float) -> _TT:\n return type(self)(str(self.value * other),\"\")", "def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)", "def multiplier(self) -> global___Expression:", "def __mul__(self, other):\n try:\n total = {self.var: other.val, other.var: self.val}\n return AutoDiffReverse(self.val * other.val, None, total)\n except AttributeError:\n return AutoDiffReverse(self.val * other, None, {self.var: other})", "def __mul__(self, other):\r\n return self.prod(other)", "def __mul__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Mul.apply(self, other)", "def __mul__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'mul')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(\n tf.multiply(self.tf, other.tf), self.type_name, provenance)\n else:\n provenance = NQExprProvenance(\n operation='mul',\n inner=self.provenance,\n other=NQExprProvenance(operation='constant', args=(None, other)))\n return self.context.as_nql(\n tf.multiply(self.tf, other), self.type_name, provenance)", "def multiply(self: T, other: T) -> T:", "def __mul__(self, A):\n pass", "def __mul__(self, other):\r\n\r\n T = type(other)\r\n # vec4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x*other, self.y*other, self.z*other, self.w*other)\r\n # vec4*vec4\r\n if isinstance(other, vec4):\r\n return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w\r\n # unsupported\r\n else:\r\n # Try to delegate the operation to the other operand\r\n if getattr(other,\"__rmul__\",None)!=None:\r\n return other.__rmul__(self)\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"", "def __mul__(self, other):\n # print other\n if type(other) == int or type(other) == float:\n return self.scale(other)\n elif type(other) == Vector:\n return self.dot(other)\n else:\n return NotImplemented", "def _multiply(self, other):\n raise NotImplementedError(\n \"{} does not support scalar multiplication\".format(type(self)))", "def mul(self, other):\n\n return self._get(\"mul\", other, self.__class__)", "def __mul__(self, other: '__class__') -> '__class__':", "def __mul__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during multiplication '\n f'to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Times(self, other)", "def __mul__(self,rhs): \n\n\t\tif isinstance(rhs,self.__class__):\n\n\t\t\tassert self.side_angle == rhs.side_angle\n\t\t\tassert self.data.shape == rhs.data.shape\n\n\t\t\tnew_data = self.data * rhs.data\n\n\t\telif isinstance(rhs,numbers.Number):\n\n\t\t\tnew_data = self.data * rhs\n\n\t\telif type(rhs) == np.ndarray:\n\n\t\t\tassert rhs.shape == self.data.shape\n\t\t\tnew_data = self.data * rhs\n\n\t\telse:\n\n\t\t\traise TypeError(\"Cannot multiply by the right hand side!!\")\n\n\t\t#Copy the extra attributes as well\n\t\tkwargs = dict()\n\t\tfor attribute in self._extra_attributes:\n\t\t\tkwargs[attribute] = getattr(self,attribute)\n\n\t\treturn self.__class__(new_data,self.side_angle,masked=self._masked,**kwargs)", "def __mul__(self, other):\n if type(other) == int or type(other) == float:\n return Ccy(self.value * other, self.unit)\n else:\n raise TypeError(\"unsupported operand type(s) for *: 'Ccy' and \" + type(other).__name__)", "def multiply(self, a, b):\n return a * b", "def __mul__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Multiply, value)\n return out", "def __mul__(self,that):\n return self.__opExpand2(that, np.multiply)", "def __mul__(self, factor):\n def mul(output, target, params):\n return self(output, target, params) * factor\n return type(self)(type(self).__reserved_init, mul, factor * (1. if self._fact is None else self._fact), self._name)", "def _mul(self, other):\n return None", "def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Power(self.power * other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' power units {self.power_unit} \"\n f\"and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_prod = self.power * other.power\n return Power(power_prod, self.power_unit, self.freq, self.freq_unit)", "def __mul__(self, other):\n if is_unit(other):\n # print \"quantity * unit\"\n # Many other mul/div operations delegate to here because I was debugging\n # a dimensionless unit conversion problem, which I ended up fixing within\n # the reduce_unit() method.\n unit = self.unit * other\n return Quantity(self._value, unit).reduce_unit(self.unit)\n elif is_quantity(other):\n # print \"quantity * quantity\"\n # Situations where the units cancel can result in scale factors from the unit cancellation.\n # To simplify things, delegate Quantity * Quantity to (Quantity * scalar) * unit\n return (self * other._value) * other.unit\n else:\n # print \"quantity * scalar\"\n return self._change_units_with_factor(self.unit, other, post_multiply=False)", "def __mul__(self, other) -> Union[float, TypeValue]:\n if isinstance(other, np.ndarray):\n return other * float(self)\n\n if isinstance(other, Value):\n logger.warning(\n \"Multiplying autode.Value returns a float with no units\"\n )\n return float(self) * self._other_same_units(other)\n\n return self._like_self_from_float(\n float(self) * self._other_same_units(other)\n )", "def __rmul__(self, *args, **kwargs):\n return self.__mul__(*args, **kwargs)", "def __rmul__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(mul, other)", "def _mul(a, b):\n return a * b", "def mul(self, a, b):\n return a * b", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def mul(a: Decimal, b: Decimal) -> Decimal:\n return a * b", "def multiply(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n product = str(ft.reduce(oper.mul,values))\n\n return product", "def __mul__ (self, other): \n if isinstance(other, Number):\n return self._scale(other)\n elif isinstance(other, Matrix):\n return self._mul(other)\n elif isinstance(other, Vector):\n return self._vecmul(other)\n else:\n return NotImplemented", "def __imul__(self, other):\r\n T = type(other)\r\n # vec4*=scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n self.x*=other\r\n self.y*=other\r\n self.z*=other\r\n self.w*=other\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for *=\"", "def mul(x, y):\n # dispatch to sparse methods\n if issparse(x):\n return x.multiply(y)\n elif issparse(y):\n return y.multiply(x)\n\n return mul_dense(x, y)", "def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts * other, self.volt_unit, self.freq, self.freq_unit)\n else:\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the\"\n f\" same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.volts * other.volts\n return Volt(prod_sum, self.volt_unit, self.freq, self.freq_unit)", "def __mul__(self, other):\n\n return self._mul_div(other, div=False)", "def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps * other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.amps * other.amps\n return Amp(prod_sum, self.amp_unit, self.freq, self.freq_unit)", "def __mul__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.mul)", "def testMulDouble(self):\n\n i = IntObject(4)\n result = i.call(u\"multiply\", [DoubleObject(2.1)])\n self.assertTrue(isinstance(result, DoubleObject))\n self.assertEqual(result.getDouble(), 8.4)", "def multiply(t):\n return mul(*t)", "def __mul__(self,l):\r\n\t\t\r\n\t\t# multiply\r\n\t\tm = self.multiply(l)\r\n\t\t\r\n\t\treturn m", "def __mul__(self,y): \n\n # BZO mulitplication\n if type(y)==type(self):\n Out = self._CreateSameType()\n \n for Ind1 in self.IndList():\n Obj1=self[Ind1]\n for Ind2 in y.IndList():\n Obj2=y[Ind2]\n \n Ind3 = tuple(add(Ind1,Ind2))\n \n Out[Ind3] += Obj1*Obj2\n \n # Scalar multiplicatin\n else:\n\n Out = self._CreateSameType()\n\n Out.SetLists(self.IndList(),[y*x for x in self.__ObjList])\n\n # Multiplication with item of its own type\n \n \n \n \n \n return Out", "def __mul__(self, other):\n\n return self._binary_elementwise_op(other, np.multiply)", "def my_mul(x, y):\n ##\n cmd = getattr(th, \"mul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n return int48module((x2y1 + x1y2) % int24field * int24field + x2y2)", "def __mul__(self, other):\n return Trits(self.trits * other)", "def convert_mul_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Mul', **kwargs)", "def multiply(self, other):\n from divisi2 import operators\n return operators.multiply(self, other)", "def __mul__(self, other):\n if isinstance(other, numbers.Number):\n # scalar multiplication for numbers\n new_point = [x * other for x in self.coords]\n return self.__class__(new_point)", "def __mul__ (self,other):\n if (self.debug): print(f'enter fraction.__mul__ with {other}')\n f3 = fraction(self.value[0]*other.value[0],self.value[1]*other.value[1])\n if (self.debug): print(f3, self, other)\n f3.simplify()\n return f3", "def convert_elemwise_mul(node, **kwargs):\n return create_basic_op_node('Mul', node, kwargs)", "def mul(self, b):\n self.a *= float(b)", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def test_scalar_multiplication(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n a2 = a1 * 3.5\n a3 = a1 * 0.5\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 3.5, -7, 10.5, -14))\n self.assertEqual(a3,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def mul(a, b):\n c = Calculator()\n result = c.mul(a, b)\n click.echo('{} * {} = {}'.format(a, b, result))", "def __rmul__(self, other):\n print(\"Right multiplication with other\")\n \n if isinstance(other, int): \n self.coefficients *= other\n \n return self", "def mul(a,b):\r\n return a*b", "def multiply(self):\n return self._do_calc(self.multiplier)", "def multiply(self):\n return self._do_calc(self.multiplier)", "def multiply(self):\n return self._do_calc(self.multiplier)", "def __mul__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum * other, self._imNum * other)\n\n if isinstance(other, complex):\n a = self._reNum * other.real\n b = self._reNum * other.imag\n c = self._imNum * other.real\n d = self._imNum * other.imag\n return Complex(a - d, c + b)\n\n a = self._reNum * other._reNum\n b = self._reNum * other._imNum\n c = self._imNum * other._reNum\n d = self._imNum * other._imNum\n return Complex(a - d, c + b)", "def __mul__(self, other):\n if isinstance(other, (int, float)):\n return Matrix([[self.values[row][index] * other\n for index in range(len(self.values[0]))]\n for row in range(len(self.values))])\n\n elif isinstance(other, Vector):\n return Vector([other.dot(Vector(row)) for row in self.values])\n\n elif isinstance(other, Matrix):\n return Matrix([(other.transpose() * Vector(row)).values\n for row in self.values])", "def kkMul(*args):\n if (None in args):\n return None\n product = 1\n for arg in args:\n product *= arg\n return product", "def __imul__(self, other):\n\n return self * other", "def __mul__(self, other):\n if isinstance(other, Vector):\n return self.dot(other)\n else:\n raise TypeError(other)", "def __rmul__(self, other):\n return self.runtime.mul(self, other)", "def mul(x, y):\n return multiply(x, y)", "def test_mul():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = value * 2\n num_a.value *= 2\n assert num_a.value == new_value", "def __mul__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other, coerce=False)\n\n if mv:\n newValue = self.layout.gmt_func(self.value, other.value)\n else:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj*other\n\n newValue = other * self.value\n\n return self._newMV(newValue)", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def multiply(lhs, rhs):\n return _make.multiply(lhs, rhs)", "def mul(a: PipeNumeric, b: PipeNumeric):\n assert a.get_type() == b.get_type()\n num_type = a.get_type()\n assert isinstance(num_type, num.SignedFixedNumberType)\n\n if isinstance(a, PipeConstant) and isinstance(b, PipeConstant):\n reg_max = 2 ** (num_type.nonfraction_bits + 2 * num_type.fraction_bits)\n return PipeConstant(num_type, int(intbv(\n num_type.create_from_constant(a.get_value()) * num_type.create_from_constant(b.get_value()),\n min=-reg_max,\n max=reg_max\n )[1 + num_type.nonfraction_bits + 2 * num_type.fraction_bits:num_type.fraction_bits].signed()))\n elif isinstance(a, PipeConstant) or isinstance(b, PipeConstant):\n if isinstance(a, PipeConstant):\n static_value = a.get_value()\n dynamic_value = b\n else:\n static_value = b.get_value()\n dynamic_value = a\n\n if static_value == 0:\n return PipeConstant.from_float(0)\n elif bin(static_value).count('1') == 1:\n # This multiplication can be implemented ny shifts.\n bin_repr = bin(static_value)\n shift_by = len(bin_repr) - 1 - bin_repr.index('1') - num_type.fraction_bits\n print('Implemented multiplication as shift by: ', shift_by)\n if shift_by == 0:\n # Just return the dynamic_value\n return dynamic_value\n\n node = ZeroCycleNode()\n node.add_inputs(value=dynamic_value)\n res = PipeSignal(num_type, Signal(num_type.create()))\n node.add_output(res)\n node.set_name('fixed-mul_by_shift')\n\n if shift_by > 0:\n node.add_inputs(shift_by=shift_by)\n\n node.set_logic(mul_by_shift_left)\n elif shift_by < 0:\n shift_by = -shift_by\n node.add_inputs(shift_by=shift_by)\n\n node.set_logic(mul_by_shift_right)\n return node\n else:\n node = OneCycleNode()\n\n node.add_inputs(dynamic_value=dynamic_value, static_value=static_value)\n res = PipeSignal(num_type, Signal(num_type.create()))\n node.add_output(res)\n node.set_name('fixed-mul')\n\n node.set_logic(mul_dsp_c)\n return node\n else:\n node = OneCycleNode()\n\n node.add_inputs(a=a, b=b)\n res = PipeSignal(num_type, Signal(num_type.create()))\n node.add_output(res)\n node.set_name('fixed-mul')\n\n node.set_logic(mul_dsp)\n return node", "def multiplication(a, b):\n pass", "def __mul__(self, other):\r\n if isinstance(other, tuple):\r\n return self.transform_point(other)\r\n if isinstance(other, LinearTransformation):\r\n return self.left_composition(other)\r\n else:\r\n print(other, type(other))\r\n raise NotImplementedError", "def __mul__(self, tensor):\n return self.mul(tensor)", "def mul(x, y):\n return x * y", "def mul(x, y):\n return x * y", "def __mul__(self,other):\n if type(other) is Vector:\n return(self.x*other.x + self.y*other.y + self.z*other.z)\n else:\n return(Vector(self.x*other,self.y*other,self.z*other))", "def __rmul__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during multiplication '\n f'to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Times(other, self)", "def __mul__(self,value):\n x = self.clone()\n if isinstance(value,LiveStat):\n x.name = \"(\" + self.name + \"*\" + value.name + \")\"\n else:\n x.name = \"(\" + self.name + \"* scalar)\"\n x *= value\n return x", "def multiply(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n multiplier = str(args[0] * args[1])\n return multiplier", "def multiply(value, multiplier):\n return value*multiplier", "def __mul__(self, other):\n new_num = self.num * other.num\n new_denom = self.denom * other.denom\n return Fraction(new_num, new_denom)", "def __mul__(self, other):\r\n T = type(other)\r\n # mat4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return mat4(map(lambda x,other=other: x*other, self.mlist))\r\n # mat4*vec3\r\n if isinstance(other, _vec3):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n w = float(m41*other.x + m42*other.y + m43*other.z + m44)\r\n return _vec3(m11*other.x + m12*other.y + m13*other.z + m14, \r\n m21*other.x + m22*other.y + m23*other.z + m24, \r\n m31*other.x + m32*other.y + m33*other.z + m34)/w\r\n # mat4*vec4\r\n if isinstance(other, _vec4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n return _vec4(m11*other.x + m12*other.y + m13*other.z + m14*other.w, \r\n m21*other.x + m22*other.y + m23*other.z + m24*other.w, \r\n m31*other.x + m32*other.y + m33*other.z + m34*other.w,\r\n m41*other.x + m42*other.y + m43*other.z + m44*other.w)\r\n # mat4*mat4\r\n if isinstance(other, mat4):\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n n11,n12,n13,n14,n21,n22,n23,n24,n31,n32,n33,n34,n41,n42,n43,n44 = other.mlist\r\n return mat4( m11*n11+m12*n21+m13*n31+m14*n41,\r\n m11*n12+m12*n22+m13*n32+m14*n42,\r\n m11*n13+m12*n23+m13*n33+m14*n43,\r\n m11*n14+m12*n24+m13*n34+m14*n44,\r\n\r\n m21*n11+m22*n21+m23*n31+m24*n41,\r\n m21*n12+m22*n22+m23*n32+m24*n42,\r\n m21*n13+m22*n23+m23*n33+m24*n43,\r\n m21*n14+m22*n24+m23*n34+m24*n44,\r\n\r\n m31*n11+m32*n21+m33*n31+m34*n41,\r\n m31*n12+m32*n22+m33*n32+m34*n42,\r\n m31*n13+m32*n23+m33*n33+m34*n43,\r\n m31*n14+m32*n24+m33*n34+m34*n44,\r\n\r\n m41*n11+m42*n21+m43*n31+m44*n41,\r\n m41*n12+m42*n22+m43*n32+m44*n42,\r\n m41*n13+m42*n23+m43*n33+m44*n43,\r\n m41*n14+m42*n24+m43*n34+m44*n44)\r\n # unsupported\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"", "def __rmul__(self, _scalar):\n\t\treturn self * _scalar", "def __pow__(self, ???):", "def EvaluateFields(self, *float, **kwargs):\n ...", "def mul(self, a: 'PFElement', b: 'PFElement') -> 'PFElement':\n return self(self._pf_mul(a.value, b.value, self.multiplicative_group))", "def __mul__(self, other):\n def leftmost_bit(x):\n assert x > 0\n result = 1\n while result <= x:\n result = 2 * result\n return result // 2\n e = other\n if self.__p:\n e = e % self.__p\n if e == 0:\n return INFINITY\n if self == INFINITY:\n return INFINITY\n assert e > 0\n e3 = 3 * e\n negative_self = ECPoint(self.__p, self.__a, self.__b, self.__x, -self.__y, self.__n)\n i = leftmost_bit(e3) // 2\n result = self\n # print_(\"Multiplying %s by %d (e3 = %d):\" % (self, other, e3))\n while i > 1:\n result = result.double()\n if (e3 & i) != 0 and (e & i) == 0:\n result = result + self\n if (e3 & i) == 0 and (e & i) != 0:\n result = result + negative_self\n # print_(\". . . i = %d, result = %s\" % ( i, result ))\n i = i // 2\n return result", "def mul(self, multiplier):\n result = {}\n for k, v in self.variables.items():\n a, b = self._broadcast(multiplier, v)\n result[k] = a * b\n return MultivariateDerivative(result)", "def multiply(value, arg):\n return value * arg", "def __mul__(self, other):\n return sum(self._ar * other._ar)", "def mul(Z,X,Y):", "def __mul__(self, other):\n if hasattr(other, 'as_homogenous_transformation'):\n return basis(homogenous_transformation = self.as_homogenous_transformation() * other.as_homogenous_transformation())\n elif hasattr(other, 'n'):\n if other.n == (3,1):\n b = matrix.col((other[0], other[1], other[2], 1))\n elif other.n == (4,1):\n b = other\n else:\n raise TypeError(b, \"Incompatible matrices\")\n p = self.as_homogenous_transformation() * b\n if other.n == (3,1):\n return matrix.col(p[0:3])\n else:\n return p\n else:\n raise TypeError(b)", "def __mul__(self, other):\n if isinstance(other, (int, float, str)):\n other = RationalFrac(other)\n if isinstance(other, RationalFrac):\n prod = RationalFrac(0, empty=True)\n prod.numer = self.numer + other.numer\n prod.denom = self.denom + other.denom\n prod.neg = not (self.neg == other.neg)\n prod.simplify()\n return prod\n else:\n return NotImplemented", "def mul(self, other):\n return self._new_rep(self.rep * other)", "def __imul__(self, tensor):\n return self.mul_(tensor)", "def test_mul():\n l = [1, 2, 3, 4]\n assert s7.mul(*l) == 1 * 2 * 3 * 4\n assert s7.mul(10, 20) == 200\n assert s7.mul(1.0, 2.0, 100.0) == 200.0", "def __mul__(self,other):\n if(self.denominator*other.denominator<0):\n resultnumerator = -1*self.numerator*other.numerator\n resultdenominator = abs(self.denominator*other.denominator) \n else:\n resultnumerator = self.numerator*other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues" ]
[ "0.7637235", "0.75161517", "0.7407263", "0.7326037", "0.7278116", "0.72107786", "0.71714556", "0.7157849", "0.7142031", "0.7124826", "0.7087941", "0.707973", "0.7029224", "0.70278025", "0.7017292", "0.7013551", "0.6969518", "0.69458455", "0.6943746", "0.6943483", "0.6938702", "0.6927752", "0.6915659", "0.69010264", "0.68876916", "0.686513", "0.6844689", "0.6843671", "0.6806144", "0.67876846", "0.677678", "0.67701536", "0.67563325", "0.67531294", "0.67442083", "0.67267776", "0.6682954", "0.66662", "0.6661234", "0.664948", "0.6635358", "0.66179216", "0.6599478", "0.659853", "0.65977305", "0.6596632", "0.65908086", "0.65638655", "0.65477115", "0.65249646", "0.6519985", "0.6516587", "0.6510977", "0.650561", "0.6503771", "0.64902097", "0.6482013", "0.644877", "0.6448092", "0.6448092", "0.6448092", "0.64371365", "0.64345735", "0.6430227", "0.6429493", "0.6420246", "0.6417903", "0.63982624", "0.6387118", "0.63844866", "0.6363816", "0.63630265", "0.63243866", "0.6310197", "0.6310122", "0.63077897", "0.62916154", "0.62916154", "0.62793714", "0.62740767", "0.62688655", "0.6257135", "0.6256262", "0.62527066", "0.62468624", "0.62467605", "0.62465215", "0.6233135", "0.6226945", "0.62131095", "0.62095374", "0.620848", "0.6192649", "0.6189082", "0.61887527", "0.6188095", "0.61872125", "0.6179473", "0.6172326", "0.61656994" ]
0.69146836
23
PSNR between two images
def _psnr(img1, img2): mse = np.mean((img1 - img2) ** 2) if mse == 0: return 100 PIXEL_MAX = 1 return (20 * math.log10(PIXEL_MAX)) - (10 * math.log10(mse))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def computePSNR(img1, img2, pad_y=0, pad_x=0):\n if pad_y != 0 and pad_x != 0:\n img1_u = (np.clip(img1, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n else:\n img1_u = (np.clip(img1, 0, 255.0)).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)).astype(dtype=np.uint8)\n imdiff = (img1_u).astype(dtype=np.float32) - (img2_u).astype(dtype=np.float32)\n rmse = np.sqrt(np.mean(np.power(imdiff[:], 2)))\n return 20.0 * np.log10(255.0 / rmse)", "def cal_psnr(im1, im2):\n # assert pixel value range is 0-255 and type is uint8\n mse = ((im1.astype(np.float) - im2.astype(np.float)) ** 2).mean()\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr", "def PSNR(self, imageA, imageB):\n mse = self.MSE(imageA, imageB)\n if mse == 0:\n return 100\n return 20 * log10(255.0 / sqrt(mse))", "def calculate_psnr(img0, img1, data_range=None):\n psnr = skm.peak_signal_noise_ratio(img0, img1, data_range=data_range) \n return psnr", "def tf_psnr(im1, im2):\n mse = tf.losses.mean_squared_error(labels=im2 * 255.0, predictions=im1 * 255.0)\n return 10.0 * (tf.log(255.0 ** 2 / mse) / tf.log(10.0))", "def psnr(img_a, img_b, max_img_value=255):\n mse = tf.reduce_mean((img_a - img_b) ** 2)\n return 20 * log_n(max_img_value, 10) - 10 * log_n(mse, 10)", "def _comput_PSNR(self, input, target):\n shave = 4\n ch, h, w = input.size()\n input_Y = rgb2ycbcrT(input.cpu())\n target_Y = rgb2ycbcrT(target.cpu())\n diff = (input_Y - target_Y).view(1, h, w)\n\n diff = diff[:, shave:(h - shave), shave:(w - shave)]\n mse = diff.pow(2).mean()\n psnr = -10 * np.log10(mse)\n return psnr", "def psnr(img1, img2, crop_border=0, input_order='HWC'):\n\n assert img1.shape == img2.shape, (\n f'Image shapes are differnet: {img1.shape}, {img2.shape}.')\n if input_order not in ['HWC', 'CHW']:\n raise ValueError(\n f'Wrong input_order {input_order}. Supported input_orders are '\n '\"HWC\" and \"CHW\"')\n img1 = reorder_image(img1, input_order=input_order)\n img2 = reorder_image(img2, input_order=input_order)\n\n if crop_border != 0:\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]\n\n mse = np.mean((img1 - img2)**2)\n if mse == 0:\n return float('inf')\n return 20. * np.log10(255. / np.sqrt(mse))", "def psnr(image1: np.ndarray, image2: np.ndarray, **kwargs) -> np.ndarray:\n n, h, w = image1.shape\n assert (n, h, w) == image2.shape\n psnr_ = np.zeros(n)\n for ii in range(n):\n psnr_[ii] = peak_signal_noise_ratio(image1[ii], image2[ii], **kwargs)\n return psnr_", "def psnr(im1, im2):\n\n def log10(real_number):\n \"\"\" Calculate the base-ten log of a given real number.\n\n Args:\n real_number: a real number.\n Returns:\n the base-ten log of the given real number.\n \"\"\"\n numerator = tf.math.log(real_number)\n denominator = tf.math.log(tf.constant(10, dtype=numerator.dtype))\n return numerator / denominator\n\n mse = tf.reduce_mean(tf.math.squared_difference(im1, im2))\n result = tf.constant(1, dtype=tf.float32) / mse\n result = tf.math.multiply(tf.constant(10, dtype=tf.float32), log10(result))\n return result", "def PSNR(y_true, y_pred):\n return tf.image.psnr(y_true,y_pred,1)", "def test_psnr_with_two_completely_different_sets(self):\n low = np.zeros((10, 500, 500, 1), dtype=np.uint8)\n high = np.ones((10, 500, 500, 1), dtype=np.uint8) * 255\n\n avg_psnr = np.array(psnr(high, low)).mean()\n self.assertEqual(avg_psnr, 0.0)", "def PSNR(orimg, estimg, pattern):\n PSNR = [0]*3\n _, mask = keep_measures(orimg[:, :, 0], pattern)\n for i in range(3):\n diff = orimg[:,:,i] - estimg[:,:,i]\n PSNR[i] = 10*np.log10(255**2/(np.linalg.norm((1-mask[:,:,i])*diff)**2/(1-mask[:,:,i]).sum()))\n \n return tuple(PSNR)", "def compute_psnr_and_ssim(image1, image2, border_size=0):\r\n if len(image1.shape) == 2:\r\n image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)\r\n if len(image2.shape) == 2:\r\n image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)\r\n\r\n if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:\r\n return None\r\n\r\n image1 = trim_image_as_file(image1)\r\n image2 = trim_image_as_file(image2)\r\n\r\n if border_size > 0:\r\n image1 = image1[border_size:-border_size, border_size:-border_size, :]\r\n image2 = image2[border_size:-border_size, border_size:-border_size, :]\r\n\r\n psnr = peak_signal_noise_ratio(image1, image2, data_range=255)\r\n ssim = structural_similarity(image1, image2, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01, K2=0.03,\r\n sigma=1.5, data_range=255)\r\n return psnr, ssim", "def psnr(label, outputs, max_val=1.):\n label = label.cpu().detach().numpy()\n outputs = outputs.cpu().detach().numpy()\n # PSNR = -10. * np.log10(np.mean(np.square(outputs - label)))\n img_diff = outputs - label\n rmse = math.sqrt(np.mean((img_diff) ** 2))\n if rmse == 0:\n return 100\n else:\n PSNR = 20 * math.log10(max_val / rmse)\n return PSNR", "def batch_psnr(test_image, target_image, max=1.):\n psnr = 0\n num_images = test_image.shape[0]\n for i in range(num_images):\n psnr += calc_psnr(test_image[i], target_image[i], max=max)\n psnr /= num_images\n return psnr", "def PSNR(ground_truth_images: np.ndarray, noisy_images: np.ndarray) -> List[float]:\n validate_inputs(ground_truth_images, noisy_images)\n\n psnr_acumulated = []\n\n quantity_of_images = ground_truth_images.shape[0]\n\n if need_to_normalize(ground_truth_images):\n ground_truth_images = normalize(ground_truth_images, \\\n interval=(0,255), data_type='int')\n \n if need_to_normalize(noisy_images):\n noisy_images = normalize(noisy_images, \\\n interval=(0,255), data_type='int')\n \n for i in range(quantity_of_images):\n psnr_image = psnr(\n ground_truth_images[i,:,:,0], \n noisy_images[i,:,:,0],\n data_range=256\n )\n psnr_acumulated.append(psnr_image)\n\n # psnr_acumulated = np.array(psnr_acumulated)\n\n # return psnr_acumulated.mean()\n return psnr_acumulated", "def psnr(gt, pred):\n return compare_psnr(gt, pred, data_range=gt.max())", "def psnr(gt, pred):\n return compare_psnr(gt, pred, data_range=gt.max())", "def compute_psnr(array_0_uint8, array_1_uint8):\n if array_0_uint8.dtype != numpy.uint8:\n raise TypeError('`array_0_uint8.dtype` is not equal to `numpy.uint8`.')\n if array_1_uint8.dtype != numpy.uint8:\n raise TypeError('`array_1_uint8.dtype` is not equal to `numpy.uint8`.')\n array_0_float64 = array_0_uint8.astype(numpy.float64)\n array_1_float64 = array_1_uint8.astype(numpy.float64)\n mse_float64 = numpy.mean((array_0_float64 - array_1_float64)**2)\n \n # `array_0_float64` and `array_1_float64` might be identical.\n # 1.e-6 is added to `mse_float64` to avoid dividing by 0.\n # The precedence of ...**... (exponentiation) is higher\n # than the precedence of .../... (division).\n return 10.*numpy.log10(255.**2/(mse_float64 + 1.e-6))", "def _check_PSNR(self, dataset, is_test=False):\n\n # process one image per iter for test phase\n if is_test:\n batch_size = 1\n else:\n batch_size = 1 # self.batch_size\n\n dataloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=1)\n\n avr_psnr = 0\n avr_ssim = 0\n\n # book keeping variables for test phase\n psnrs = [] # psnr for each image\n ssims = [] # ssim for each image\n proc_time = [] # processing time\n outputs = [] # output for each image\n names = []\n\n for batch, sample in enumerate(dataloader):\n input_batch, label_batch, name = sample['lr'], sample['hr'], sample['im_name']\n\n # Wrap with torch Variable\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n\n if is_test:\n start = time.time()\n if self.model_name in ['TDAN']:\n output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = forward_x8(input_batch, self.model).unsqueeze(0)\n #print(output_batch.size())\n # _, lrs = self.model(input_batch)\n # output_batch = lrs[:, -1, :, :, :]\n else:\n output_batch = self.model(input_batch)\n elapsed_time = time.time() - start\n else:\n if self.model_name in ['TDAN']:\n #output_batch, _ = self.model(input_batch)\n output_batch = chop_forward(input_batch, self.model, 4)\n else:\n output_batch = self.model(input_batch)\n # ssim is calculated with the normalize (range [0, 1]) image\n ssim = pytorch_ssim.ssim(output_batch + 0.5, label_batch + 0.5, size_average=False)\n ssim = torch.sum(ssim.data)\n avr_ssim += ssim\n\n # calculate PSRN\n output = output_batch.data\n label = label_batch.data\n\n output = (output + 0.5) * 255\n label = (label + 0.5) * 255\n\n output = quantize(output, 255)\n label = quantize(label, 255)\n # diff = input - target\n\n output = output.squeeze(dim=0)\n label = label.squeeze(dim=0)\n\n psnr = self._comput_PSNR(output / 255.0, label / 255.0)\n # print(psnr)\n avr_psnr += psnr\n\n # save psnrs and outputs for statistics and generate image at test time\n if is_test:\n psnrs.append(psnr)\n ssims.append(ssim)\n proc_time.append(elapsed_time)\n np_output = output.cpu().numpy()\n outputs.append(np_output)\n names.append(name)\n\n epoch_size = len(dataset)\n avr_psnr /= epoch_size\n avr_ssim /= epoch_size\n stats = (psnrs, ssims, proc_time)\n\n return avr_psnr, avr_ssim, stats, outputs, names", "def ps(image):\n\timage = image.astype(float)\n\tps_img = abs(pow(fft2(image), 2))\n\treturn ps_img", "def calculateSNR(self):\n pass", "def photon_fraction(r, r1, r2):\n return rotate_phasor(r, r1, r2).real", "def _rawprng(self):\n self.p += 1 \n if self.p >= self.o:\n\t\t\tself.p = 0\n t = 1768863 * self.s[self.p] + self.c * 2.3283064365386963e-10\n self.c = int(t) | 0\n self.s[self.p] = t - self.c\n return self.s[self.p]", "def compute_rate_psnr(reference_uint8, mean_training, std_training, entropy_ae,\n bin_width, nb_vertically, path_to_reconstruction):\n # The function `svhn.svhn.preprocess_svhn` checks\n # that `reference_uint8.dtype` is equal to `numpy.uint8`\n # and `reference_uint8.ndim` is equal to 2.\n reference_float64 = svhn.svhn.preprocess_svhn(reference_uint8,\n mean_training,\n std_training)\n (nb_images, nb_pixels) = reference_uint8.shape\n \n # At training time, the decoder was fed with\n # latent variables perturbed by uniform noise.\n # However, at test time, the decoder is fed with\n # quantized latent variables.\n y = entropy_ae.encoder(reference_float64)[1]\n quantized_y = tls.quantization(y, bin_width)\n \n # In the function `tls.discrete_entropy`, `quantized_y`\n # is flattened to compute the entropy.\n disc_entropy = tls.discrete_entropy(quantized_y, bin_width)\n rate = entropy_ae.nb_y*disc_entropy/nb_pixels\n reconstruction_float64 = entropy_ae.decoder(quantized_y)[1]\n rec_rescaled_float64 = reconstruction_float64*std_training + \\\n numpy.tile(mean_training, (nb_images, 1))\n reconstruction_uint8 = tls.cast_float_to_uint8(rec_rescaled_float64)\n psnr = tls.mean_psnr(reference_uint8, reconstruction_uint8)\n tls.visualize_rows(reconstruction_uint8,\n 32,\n 32,\n nb_vertically,\n path_to_reconstruction)\n return (rate, psnr)", "def snr(p1, l1x, l1y, p2, l2x, l2y, var):\n ip12 = inner_product(p1, l1x, l1y, p2, l2x, l2y, var)\n ip11 = inner_product(p1, l1x, l1y, p1, l1x, l1y, var)\n ip22 = inner_product(p2, l2x, l2y, p2, l2x, l2y, var)\n\n return ip11 / (ip11 + ip22 - 2 * ip12)", "def psnr(y_true, y_pred):\n return 1/(10.0 * np.log(1.0 / (np.mean(np.square(y_pred - y_true)))) / np.log(10.0))", "def rsrp_snr_est(self,iqcpx1,iqcpx2):\n LLTFpos = np.int_(np.concatenate((np.arange((self._FFT-self._LLTF)/2,self._FFT/2),\n np.arange(self._FFT/2+1,self._FFT/2+self._LLTF/2+1))))\n # use an offset of -1/2 cyclic prefix\n idx = 4*self._GI+2*self._FFT-self._GI/2\n # extract the two LTF symbols of both channels\n yLLtf1 = iqcpx1[idx:idx+2*self._FFT]\n yLLtf2 = iqcpx2[idx:idx+2*self._FFT]\n # do the FFT on both LLTF pairs\n ysLLtf1a = np.fft.fftshift(np.fft.fft(yLLtf1[0:self._FFT],self._FFT))/np.sqrt(self._STS*self._LLTF)\n ysLLtf1b = np.fft.fftshift(np.fft.fft(yLLtf1[self._FFT:2*self._FFT],self._FFT))/np.sqrt(self._STS*self._LLTF)\n ysLLtf2a = np.fft.fftshift(np.fft.fft(yLLtf2[0:self._FFT],self._FFT))/np.sqrt(self._STS*self._LLTF)\n ysLLtf2b = np.fft.fftshift(np.fft.fft(yLLtf2[self._FFT:2*self._FFT],self._FFT))/np.sqrt(self._STS*self._LLTF)\n # Channel 1\n Ps1 = np.real(np.dot(ysLLtf1a[LLTFpos].conj().transpose(),ysLLtf1a[LLTFpos]))\n Ps2 = np.real(np.dot(ysLLtf1b[LLTFpos].conj().transpose(),ysLLtf1b[LLTFpos]))\n # divide average symbol power by number of symbols and pilot subcarriers\n rsrp1 = 10*np.log10((Ps1+Ps2)/(2*np.size(LLTFpos)))\n Pn = np.sqrt(2)*np.sum(np.power(np.abs(ysLLtf1b[LLTFpos])-np.abs(ysLLtf1a[LLTFpos]),2))\n snr1 = 10*np.log10((Ps1-Pn)/Pn)\n # Channel 2\n Ps1 = np.real(np.dot(ysLLtf2a[LLTFpos].conj().transpose(),ysLLtf2a[LLTFpos]))\n Ps2 = np.real(np.dot(ysLLtf2b[LLTFpos].conj().transpose(),ysLLtf2b[LLTFpos]))\n # divide average symbol power by number of symbols and pilot subcarriers\n rsrp2 = 10*np.log10((Ps1+Ps2)/(2*np.size(LLTFpos)))\n Pn = np.sqrt(2)*np.sum(np.power(np.abs(ysLLtf2b[LLTFpos])-np.abs(ysLLtf2a[LLTFpos]),2))\n snr2 = 10*np.log10((Ps1-Pn)/Pn)\n return rsrp1, rsrp2, snr1, snr2", "def psnr_folders(test_dir, out_dir):\r\n logging.info('Evaluating the results...')\r\n psnr_errs = []\r\n psnr_ycbcr_errs = []\r\n for gt_im_path in test_dir.glob('*' + args.image_extension):\r\n interpolated_im_path = Path(out_dir / gt_im_path.name)\r\n # read the files into tensor\r\n gt_im = imread(gt_im_path, un_squeeze=False, un_normalize=True)\r\n interpolated_im = imread(interpolated_im_path, un_squeeze=False, un_normalize=True)\r\n # find psnr in RGB\r\n psnr_err = psnr(gt_im, interpolated_im)\r\n psnr_errs.append(psnr_err)\r\n # find psnr in ycbcr\r\n psnr_ycbcr_err = psnr_ycbcr(gt_im, interpolated_im)\r\n psnr_ycbcr_errs.append(psnr_ycbcr_err)\r\n\r\n psnr_errors = [f for f in psnr_errs if f != float(\"inf\")]\r\n min_psnr = min(psnr_errors)\r\n psnr_ycbcr_errs = [i for i in psnr_ycbcr_errs if i != None] # remove None from the list\r\n mean_psnr_ycbcr = statistics.mean(psnr_ycbcr_errs)\r\n return min_psnr, mean_psnr_ycbcr", "def get_psnr(self, predictions, ground_truth):\n batch_size, _, _, _ = predictions.shape\n pred = predictions.detach().cpu().numpy()\n gt = ground_truth.detach().cpu().numpy()\n\n return skimage.measure.compare_psnr(gt, pred, data_range=2)", "def SNR(op0, op1):\n result = len(op0)*np.abs(np.mean(op1) - np.mean(op0))**2/((np.var(op1)+np.var(op0))/2)\n \n return result", "def get_ssim(img1, img2):\r\n img1 = cv2.cvtColor(numpy.array(img1), cv2.COLOR_GRAY2BGR)\r\n img2 = cv2.cvtColor(numpy.array(img2), cv2.COLOR_GRAY2BGR)\r\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\r\n img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\r\n s_value = ssim(img1, img2)\r\n return s_value", "def PSNR (originalArray, reconstructedArray):\n # Convert both to float.\n if originalArray.dtype == np.int16:\n originalArray = originalArray.astype(np.float32) / 32768\n if reconstructedArray.dtype == np.int16:\n reconstructedArray = reconstructedArray.astype(np.float32) / 32768\n\n # Correct for any differences in dynamic range, which might be caused\n # by attempts of compression libraries like mp3 to avoid overflow.\n reconstructedArray *= np.sqrt((originalArray ** 2).sum() /\n (reconstructedArray ** 2).sum())\n\n\n max_value = float(np.max(np.abs(originalArray)))\n mean_square_error = ((originalArray - reconstructedArray) ** 2).sum() / originalArray.size\n if mean_square_error != 0:\n psnr = 20 * math.log10(max_value) - 10 * math.log10(mean_square_error)\n else:\n psnr = math.inf\n\n return psnr", "def NR(ip, fp):\n x = fp[0] - ip[0]\n y = fp[1] - ip[1]\n r = sqrt(x ** 2 + y ** 2)\n return array([-y / r, x / r])", "def preprocess(image):\n return (image / 255) * 2 - 1", "def getRMSE(image1, image2):\n im1 = readImage(image1, grayscale=False)\n im2 = readImage(image2, grayscale=False)\n return np.sqrt( ((im1 - im2)**2).mean() )", "def observed_snr(psrsnr, psrra, psrdec):\n global beam_profile\n return lambda obsra,obsdec: psrsnr*beam_profile.gain_at_angular_offset(angsep_arcmin(psrra, psrdec, obsra, obsdec))", "def Power2SNR(self, plt_PS=False, plt_SNR=False):\n\n self.bkg = self.estimate_background(log_width=0.1)\n self.snr = self.ds.power/self.bkg\n if np.isnan(self.snr[0]) == True: self.snr[0] = 1e-5\n\n if plt_PS: self.plot_ps()\n if plt_SNR: self.plot_snr()", "def psnr(y_true, y_pred, y_mask):\n y_shape = tf.shape(y_true)\n border = 3\n max_pixels_shifts = 2*border\n size_image = HR_SIZE\n size_croped_image = size_image - max_pixels_shifts\n clear_pixels = size_croped_image*size_croped_image\n cropped_predictions = y_pred[:, border:size_image -\n border, border:size_image-border]\n\n X = []\n for i in range(max_pixels_shifts+1): # range(7)\n for j in range(max_pixels_shifts+1): # range(7)\n cropped_labels = y_true[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n cropped_y_mask = y_mask[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n\n cropped_y_mask = tf.cast(cropped_y_mask, tf.float32)\n\n cropped_predictions_masked = tf.cast(\n cropped_predictions, tf.float32)*cropped_y_mask\n cropped_labels_masked = tf.cast(\n cropped_labels, tf.float32)*cropped_y_mask\n\n total_pixels_masked = tf.reduce_sum(cropped_y_mask, axis=[1, 2])\n\n # bias brightness\n b = (1.0/total_pixels_masked)*tf.reduce_sum(\n tf.subtract(cropped_labels_masked, cropped_predictions_masked),\n axis=[1, 2])\n\n b = tf.reshape(b, [y_shape[0], 1, 1, 1])\n\n corrected_cropped_predictions = cropped_predictions_masked+b\n corrected_cropped_predictions = corrected_cropped_predictions*cropped_y_mask\n\n corrected_mse = (1.0/total_pixels_masked)*tf.reduce_sum(\n tf.square(\n tf.subtract(cropped_labels_masked,\n corrected_cropped_predictions)\n ), axis=[1, 2])\n\n cPSNR = 10.0*log10((65535.0**2)/corrected_mse)\n X.append(cPSNR)\n\n X = tf.stack(X)\n max_cPSNR = tf.reduce_max(X, axis=0) \n return tf.reduce_mean(max_cPSNR)", "def compare_images(img1, img2):\n #normalize scene pixel values\n img1_mean = img1.mean() \n img1_std = img1.std()\n for i in np.nditer(img1, op_flags=['readwrite']):\n i[...] = (i-img1_mean)/img1_std\n\n #normalize template pixel values\n img2_mean = img2.mean() \n img2_std = img2.std()\n for i in np.nditer(img2, op_flags=['readwrite']):\n i[...] = (i-img2_mean)/img2_std\n\n #sums error\n error_array = img1 - img2\n error_array = error_array.astype(np.int8)\n ss_error = 0\n for i in np.nditer(error_array):\n ss_error += abs(i/255.0)**0.5\n #print ss_error\n return ss_error", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def Pp(nccd):\n return (128.1-56.9) * (nccd - 1) / (6-1) + 56.9", "def prescaler(self) -> int:", "def psnr_error(gen_frames, gt_frames):\n shape = tf.shape(gen_frames)\n num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])\n square_diff = tf.square(gt_frames - gen_frames)\n\n batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(square_diff, [1, 2, 3])))\n return tf.reduce_mean(batch_errors)", "def same_landmark_images(path_1: str, path_2: str) -> float:\n img_1_greyscale = read_image_greyscale(path_1)\n img_2_greyscale = read_image_greyscale(path_2)\n img_1_rgb_separated = np.array([read_image_color(path_1, component) for component in RGB_COMPONENTS])\n img_2_rgb_separated = np.array([read_image_color(path_2, component) for component in RGB_COMPONENTS])\n\n similarity_hog = similarity_two_images_hog(img_1_greyscale, img_2_greyscale)\n similiarities_rgb = np.array([similarity_two_images_color(img_1_rgb_separated[i], img_2_rgb_separated[i])\n for i in range(0, len(RGB_COMPONENTS))])\n similarity_color = np.mean(similiarities_rgb)\n\n similarity_percentage = np.average([similarity_hog, similarity_color], weights=[1.2, 1])\n return float(similarity_percentage)", "def ComputeNrb(self):\r\n pass", "def pairing(left, right):\n # same class: 0\n if left[label] == right[label]:\n flag = 0\n # not same: 1\n else:\n flag = 1\n return tf.cast(left[\"image\"], tf.float32) / 255., tf.cast(right[\"image\"], tf.float32) / 255., tf.cast(flag, tf.float32)", "def color_correct_panstarrs(self):\n PS1_r = self.pan['rmag']\n PS1_g = self.pan['gmag']\n self.pan_gr_color = self.pan['gmag'] - self.pan['rmag'] \n if self.filter == 'R' and ((self.instrument == 'h') | (self.instrument == 'm')): # this should be the only observations through an R filter\n print(\"correcting color for R filter at KPNO\") \n ###################################\n # Calculate Johnson R\n # from http://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php\n ###################################\n #self.R = self.pan['rmag'] + (-0.153)*(self.pan['rmag']-self.pan['imag']) - 0.117\n ###################################\n # Other transformations from \n # https://arxiv.org/pdf/1706.06147.pdf\n # R - r = C0 + C1 x (r-i) (-0.166, -0.275)\n # R - r = C0 + C1 x (g-r) (-0.142, -0.166)\n ###################################\n #\n #if self.useri:\n # self.R = self.pan['rmag'] + (-0.166)*(self.pan['rmag']-self.pan['imag']) - 0.275\n #else:\n # self.R = self.pan['rmag'] + (-0.142)*(self.pan['gmag']-self.pan['rmag']) - 0.142\n\n # from Matteo Fossati\n #Best fit quadratic KPHr - PS1_r = 0.0170*(PS1_g-PS1_r)^2 + -0.1864*(PS1_g-PS1_r) + 0.0213\n self.R = PS1_r + 0.0170*(PS1_g-PS1_r)**2 + -0.1864*(PS1_g-PS1_r) + 0.0213\n\n elif self.filter == 'r' and self.instrument == 'i':\n print(\"correcting color for r filter at INT\") \n #self.R = self.pan['rmag']\n #Best fit quadratic INTSr - PS1_r = 0.0023*(PS1_g-PS1_r)^2 + -0.0122*(PS1_g-PS1_r) + 0.0003\n self.R = PS1_r + 0.0023*(PS1_g-PS1_r)**2 + -0.0122*(PS1_g-PS1_r) + 0.0003 \n # which filter is the bok telescope using?\n elif self.filter == 'r' and self.instrument == 'b':\n print(\"correcting color for r filter at BOK\") \n #self.R = self.pan['rmag']\n #Best fit quadratic KPSr - PS1_r = 0.0084*(PS1_g-PS1_r)^2 + -0.0420*(PS1_g-PS1_r) + 0.0036\n self.R = PS1_r + 0.0084*(PS1_g-PS1_r)**2 + -0.0420*(PS1_g-PS1_r) + 0.0036 \n # this is the kpno r filter\n elif self.filter == 'r' and self.instrument == 'h':\n print(\"correcting color for r filter at KPNO\") \n #Best fit quadratic KPSr - PS1_r = 0.0084*(PS1_g-PS1_r)^2 + -0.0420*(PS1_g-PS1_r) + 0.0036\n self.R = self.pan['rmag']\n self.R = PS1_r + 0.0084*(PS1_g-PS1_r)**2 + -0.0420*(PS1_g-PS1_r) + 0.0036 \n\n # halpha filters\n elif self.filter == 'ha' and self.instrument == 'i':\n print(\"correcting color for halpha filter at INT\")\n #Best fit quadratic Intha - PS1_r = 0.0182*(PS1_g-PS1_r)^2 + -0.2662*(PS1_g-PS1_r) + 0.0774\n self.R = PS1_r + 0.0182*(PS1_g-PS1_r)**2 + -0.2662*(PS1_g-PS1_r) + 0.0774\n\n\n #self.R = self.pan['rmag']\n # bok is using the kpno halpha+4nm filter, so use the same correction for these\n elif self.filter == 'ha' and ((self.instrument == 'b') | (self.instrument == 'h') | (self.instrument == 'm')) :\n print(\"correcting color for ha filter at KPNO\") \n #Best fit quadratic Ha4 - PS1_r = 0.0016*(PS1_g-PS1_r)^2 + -0.2134*(PS1_g-PS1_r) + 0.0168\n #self.R = self.pan['rmag']\n self.R = PS1_r + 0.0016*(PS1_g-PS1_r)**2 + -0.2134*(PS1_g-PS1_r) + 0.0168\n else:\n print(\"ruh - roh! did not find the panstarrs color transformation!!!\")\n print(\"setting instrumental r mag to panstarrs r mag\")\n print()\n self.R = self.pan['rmag']", "def computeSNR(self,doppMatchLow,doppMatchHigh,windowWidth):\n # print(f'SNR params: low {doppMatchLow} high {doppMatchHigh} width {windowWidth}')\n doppMatchLow_FFT_idx = self.doppCyperSymNorm[doppMatchLow]\n doppMatchHigh_FFT_idx = self.doppCyperSymNorm[doppMatchHigh]\n # print(f'SNR {doppMatchLow_FFT_idx} {doppMatchHigh_FFT_idx}')\n noiseIdxLow_FFT_idx = (doppMatchLow_FFT_idx + int(self.Nfft//2)) % self.Nfft\n noiseIdxHigh_FFT_idx = (doppMatchHigh_FFT_idx + int(self.Nfft//2)) % self.Nfft\n \n t = time.time()\n cuda.Context.synchronize()\n\n if doppMatchLow_FFT_idx > doppMatchHigh_FFT_idx: # the signal is around zero Hz IF\n sigPwr = np.mean(np.concatenate((np.abs(self.GPU_bufSignalFreq_cpu_handle[doppMatchLow_FFT_idx-windowWidth:]),np.abs(self.GPU_bufSignalFreq_cpu_handle[:doppMatchHigh_FFT_idx+windowWidth]))))\n else:\n sigPwr = np.mean(np.abs(self.GPU_bufSignalFreq_cpu_handle[doppMatchLow_FFT_idx-windowWidth:doppMatchHigh_FFT_idx+windowWidth]))\n\n if noiseIdxLow_FFT_idx > noiseIdxHigh_FFT_idx: # the signal is around zero Hz IF\n noisePwr = np.mean(np.concatenate((np.abs(self.GPU_bufSignalFreq_cpu_handle[noiseIdxLow_FFT_idx-windowWidth:]),np.abs(self.GPU_bufSignalFreq_cpu_handle[:noiseIdxHigh_FFT_idx+windowWidth]))))\n else:\n noisePwr = np.mean(np.abs(self.GPU_bufSignalFreq_cpu_handle[noiseIdxLow_FFT_idx-windowWidth:noiseIdxHigh_FFT_idx+windowWidth]))\n \n SNR = 20*np.log10(sigPwr/noisePwr - 1)\n # print(f'SNR {SNR:.1f} sigPwr {sigPwr:.6f} noisePwr {noisePwr:.6f} dopp idx: {doppMatchLow_FFT_idx} {doppMatchHigh_FFT_idx} noise idx : {noiseIdxLow_FFT_idx} {noiseIdxHigh_FFT_idx}')\n\n # log.error(f'time SNR {(time.time()-t)*1000:.3f} ms')\n return SNR", "def dotted_P(self, R, i, j, offset):\n xpix , ypix = self.radiiPoints(R)\n N = len(xpix)\n k = int(N/offset)\n N2 = int(len(xpix)/2)\n if k < 0 : k = N2\n\n xpix2, ypix2 = xpix[np.arange(-k, len(xpix)-k)], ypix[np.arange(-k, len(ypix)-k)]\n\n xpair, ypair, xpair2, ypair2 = self.get_pairs(xpix+i,ypix+j,k)\n P1 = self.image[xpair , ypair ]\n P2 = self.image[xpair2, ypair2]\n N2 = int(len(xpair)/2)\n temp = 1-np.abs(P1[:N2]-P2[:N2])\n #temp = np.cos(np.arccos(P1[:N2])-np.arccos(P2[:N2]))\n #temp = P1[:N2]*P2[:N2]\n\n if len(temp) > 0 :\n return np.sum(temp)/len(temp)\n else :\n return 0.", "def compare_images(self):\r\n m = round(self.mse(self.image_a, self.image_b), 4)\r\n s = round(ssim(self.image_a, self.image_b) * 100, 5)\r\n return (\r\n m, s)", "def part_1b():\n shift_0 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'Shift0.png'), 0) / 255.\n shift_r10 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'ShiftR10.png'), 0) / 255.\n shift_r20 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'ShiftR20.png'), 0) / 255.\n shift_r40 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'ShiftR40.png'), 0) / 255.\n\n raise NotImplementedError", "def mse(img1, img2):\n # TODO: implement this function.", "def pxrd(self):\n rank = range(len(self.theta2)) #np.argsort(self.theta2)\n PL = []\n last = []\n for i in rank:\n if self.xrd_intensity[i] > 0.01:\n angle = np.degrees(self.theta2[i])\n if PL is None:\n PL.append([angle, self.d_hkl[i], \\\n self.hkl_list[i,0], self.hkl_list[i,1], self.hkl_list[i,2], \\\n self.xrd_intensity[i]])\n elif abs(angle-last) < 1e-2:\n PL[-1][-1] += self.xrd_intensity[i]\n else:\n PL.append([angle, self.d_hkl[i], \\\n self.hkl_list[i,0], self.hkl_list[i,1], self.hkl_list[i,2], \\\n self.xrd_intensity[i]])\n last = angle\n PL = (np.array(PL))\n PL[:,-1] = PL[:,-1]/max(PL[:,-1])\n self.pxrd = PL", "def match_percentage(image1_pixels, image2_pixels):\n\n match, total = 0, 0\n for i in range(len(image1_pixels)):\n if image1_pixels[i] == image2_pixels[i]:\n match += 1\n total += 1\n else:\n total += 1\n return float(match) / float(total)", "def grocepre(img):\n img_prepro = pre.get_grayscale(img)\n img_prepro = pre.remove_noise(img_prepro)\n img_prepro = pre.dilate(img_prepro)\n img_prepro = pre.erode(img_prepro)\n img_prepro = pre.opening(img_prepro)\n img_prepro = pre.thresholding(img_prepro)\n\n return img_prepro", "def tirageRnp1CondRn(self, rn):\n\n proba = np.zeros(shape=(3))\n if rn == 0.:\n proba[0] = self.__alpha0/self.__D0\n proba[1] = self.__beta /self.__D0\n \n elif rn == 1.:\n proba[0] = self.__beta /self.__D1\n proba[1] = self.__alpha1/self.__D1\n else:\n Dr1 = 1.5 + rn - rn*rn\n proba[0] = (1.-rn) / Dr1\n proba[1] = rn / Dr1\n proba[2] = 1. - (proba[0]+proba[1])\n \n typeSample = random.choices(population=['0.', '1.', 'F'], weights=proba)[0]\n \n if typeSample != 'F':\n rnp1 = float(typeSample)\n else:\n if rn == 0.:\n rnp1 = self.__rv_pente0.rvs()\n elif rn == 1.:\n rnp1 = self.__rv_pente1.rvs()\n else:\n rnp1 = self.__rv_triangle.rvs(self.__alpha0, self.__alpha1, self.__beta, rn)\n\n return rnp1", "def prob_3_4(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return mirrorImg ######", "def compare_images(originalImg, modifiedImg):\n fig, axes = plt.subplots(nrows=1, ncols=2, sharex='all', sharey='all',dpi=144)\n # ax = axes.ravel()\n\n psnr_orig = msr.compare_psnr(originalImg, originalImg)\n ssim_orig = msr.compare_ssim(\n originalImg, originalImg, multichannel=True)\n\n psnr_mod = msr.compare_psnr(originalImg, modifiedImg)\n ssim_mod = msr.compare_ssim(\n originalImg, modifiedImg, multichannel=True)\n\n label = 'PSNR: {:.2f}, SSIM: {:.2f}'\n\n axes[0].imshow(originalImg, cmap=plt.cm.gray)\n axes[0].set_xlabel(label.format(psnr_orig, ssim_orig))\n axes[0].set_title('Original image')\n\n axes[1].imshow(modifiedImg, cmap=plt.cm.gray)\n axes[1].set_xlabel(label.format(psnr_mod, ssim_mod))\n axes[1].set_title('Modified image')\n\n plt.show()", "def salt_and_pepper_noise(image, prob):\n output = np.zeros(image.shape,np.uint8)\n thres = 1 - prob \n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n output[i][j] = 0\n elif rdn > thres:\n output[i][j] = 255\n else:\n output[i][j] = image[i][j]\n return output", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def ndsi(self,\n img):\n return img.normalizedDifference(['GREEN', 'SWIR1']).select([0], ['NDSI']).multiply(self.scale_factor)", "def func_Ip_318(pp, pd):\n return pp/(np.pi*(pd/2)**2)", "def _calculate_snr_spread(self):\n\n dmSpacing, percentage = 100, 0\n while percentage < 0.5: \n x = np.linspace(self.centerDm - dmSpacing, self.centerDm + dmSpacing, 500)\n y = np.array([self.effective_snr(self.effective_width(self.pulseWidth, self.centerDm - dm_val, self.bandwidth, self.freq), self.pulseWidth * 20) for dm_val in x])\n y = (y / (np.max(y) * 1.0)) if np.max(y) > 0 else y\n percentage = np.size(np.where(y > 0)) / 1000.0\n dmSpacing = dmSpacing*0.6\n \n return x, y", "def r0(self):\n return self.p[0] / self.p[1]", "def snr_f(self, image):\n image_ps = self.pow_spec(image)\n noise_level = numpy.sum(self.rim*image_ps)/numpy.sum(self.rim)\n return numpy.sqrt(image_ps[int(self.size/2), int(self.size/2)]/noise_level)", "def PETImageProcess(PET_Scan):\n PET_Scan = normalise(PET_Scan)\n return PET_Scan", "def lpointbiserialr(x,y):\r\n TINY = 1e-30\r\n if len(x) <> len(y):\r\n raise ValueError, 'INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.'\r\n data = pstats.abut(x,y)\r\n categories = pstats.unique(x)\r\n if len(categories) <> 2:\r\n raise ValueError, \"Exactly 2 categories required for pointbiserialr().\"\r\n else: # there are 2 categories, continue\r\n codemap = pstats.abut(categories,range(2))\r\n recoded = pstats.recode(data,codemap,0)\r\n x = pstats.linexand(data,0,categories[0])\r\n y = pstats.linexand(data,0,categories[1])\r\n xmean = mean(pstats.colex(x,1))\r\n ymean = mean(pstats.colex(y,1))\r\n n = len(data)\r\n adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))\r\n rpb = (ymean - xmean)/samplestdev(pstats.colex(data,1))*adjust\r\n df = n-2\r\n t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))\r\n prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float\r\n return rpb, prob", "def prepro(I):\n# I = env.reset() # Use this to verify, whats happening\n# plt.imshow(I)\n I = I[35:195] # crop and keep only the play area\n I = I[::2,::2,0] # downsample by factor of 2, take every second row and column, and take only \"R\" component out of RGB image\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (but paddles, ball) just set to 1\n return I.astype(np.float).ravel() # convert to 1D array and return", "def msk_dwnsp(img, r):\n\n lx, ly = img.shape\n mask = np.ones([int(r), int(r)])\n img_sub = cv2d(img, mask, boundary='fill', mode='valid') / r**2\n\n return img_sub", "def noisy_sensor_resolution(p1,p2,error_rate=30):\n # set lenslet array parameters\n nu = 27\n nv = 27\n ns = 21\n nt = 21\n ulens_pitch = 125\n ulens_focal_length = 2426\n objective_magnification = 20\n objective_na = 0.5\n medium_index = 1.33\n\n # Construct lenslet array object\n lenslet_array = LensletArray(nu, nv, ns, nt,\n ulens_pitch, ulens_focal_length, ulens_focal_length,\n objective_magnification, objective_na, medium_index,\n ulens_fill_factor = 1.0, pixel_fill_factor = 1.0,\n circular_ulens_profile = False, \n center_wavelength = 509) # Units: nanometers\n\n # Input list with (intensity,x,y,z,num_lenslets_in_psf,lenslet_array,wavelength_nm)\n # to compute_light_field_psf; wavelength currently fixed at 510nm with intensity = 1.0.\n psf0 = compute_light_field_psf( None, 1.0, p1[0], p1[1], p1[2], ns, lenslet_array, 510 )\n psf1 = compute_light_field_psf( None, 1.0, p2[0], p2[1], p2[2], ns, lenslet_array, 510 )\n\n # Add gaussian noise (making poisson intensity >30 assumption)\n # The shot noise variance for each nonzero pixel should be linearly \n # related to the mean intensity of the corresponding pixel in p1.\n noise = psf0 * np.random.normal(loc=0.0, scale=1.0, size=psf0.shape)\n signal = psf1 - psf0 + np.random.normal(loc=0.0, scale=1.0, size=psf0.shape)\n\n # log likelihood ratio on continuous data (based on poisson shot noise)\n l0 = 2*psf0\n la = psf1 + psf0\n logL = np.sum( la*(np.log(la) - np.log(l0) - 1.0) + l0 )\n\n # log likelihood ratio on discrete (16-bit) data (based on poisson shot noise)\n psf_max = 2*np.max(psf0)\n psf0_discrete = (65535*(psf0/psf_max)).astype(np.uint16)\n psf1_discrete = (65535*(psf1/psf_max)).astype(np.uint16)\n l0 = 2.0*psf0_discrete\n la = psf1_discrete + psf0_discrete\n log_la = np.log(la); log_la[np.where(log_la==-np.inf)[0]]=0.0\n log_l0 = np.log(l0); log_l0[np.where(log_l0==-np.inf)[0]]=0.0\n logL_discrete = np.sum( la*(log_la - log_l0 - 1.0) + l0 )\n\n # save 16-bit pngs\n save_image('/home/logan/Documents/Results/Resolution/sensor/psf0.png',psf0_discrete)\n save_image('/home/logan/Documents/Results/Resolution/sensor/psf1.png',psf1_discrete)\n \n # KS test\n ks, pval = ks_2samp( signal.flatten(), noise.flatten() )\n print \"KS statistic:\",ks\n print \"KS p-value:\",pval\n print \"log Likelihood ratio:\",logL\n print \"Discrete log Likelihood ratio:\",logL_discrete\n return ks,pval,logL,logL_discrete", "def llr2_to_prob(llr):\n return 1 / (1 + math.pow(2, -llr))", "def paf_o_r(rr_o_r, alpha):\n return ((rr_o_r - 1) * (1 - alpha)) / ((rr_o_r - 1) * (1 - alpha) + 1)", "def psnr(y, y_pred, verbose=True):\n psnr_sum = 0\n\n for i in range(len(y)):\n psnr_sum += _psnr(y[i], y_pred[i])\n\n if verbose:\n print(f\"Mean PSNR {psnr_sum / len(y)}\")\n\n return psnr_sum / len(y)", "def PImageAdd (in1Image, in2Image, outImage, err, \\\n chkPos=False, factor1=1.0, factor2=1.0):\n ################################################################\n # Checks\n if not Image.PIsA(in1Image):\n raise TypeError,\"in1Image MUST be a Python Obit Image\"\n if not Image.PIsA(in2Image):\n raise TypeError,\"in2Image MUST be a Python Obit Image\"\n if not Image.PIsA(outImage):\n raise TypeError,\"outImage MUST be a Python Obit Image\"\n if not OErr.OErrIsA(err):\n raise TypeError,\"err MUST be an OErr\"\n #\n # Clone output from input 1\n in1Image.Clone (outImage, err)\n # Open images\n Image.POpen (in1Image, Image.READONLY, err)\n Image.POpen (in2Image, Image.READONLY, err)\n Image.POpen (outImage, Image.WRITEONLY, err)\n # Get input descriptor to see how many planes\n in1Desc = in1Image.Desc\n in2Desc = in2Image.Desc\n # Check compatibility\n ImageDesc.PCheckCompat (in1Desc, in2Desc, chkPos=chkPos)\n inDescDict = in1Desc.Dict\n ndim = inDescDict[\"naxis\"]\n inNaxis = inDescDict[\"inaxes\"]\n # Work buffer\n inImageArray = Image.PGetFArray(in1Image)\n ImageBuffer1 = FArray.PCopy(inImageArray, err)\n ImageBuffer2 = FArray.PCopy(inImageArray, err)\n\n # list of planes to loop over (0-rel)\n if (ndim>0) and (inNaxis[2]>0): \n planes = range(inNaxis[2])\n else:\n planes = [0]\n \n # Loop over planes\n for iPlane in planes:\n doPlane = [iPlane+1,1,1,1,1]\n # Get image planes\n Image.PGetPlane (in1Image, ImageBuffer1, doPlane, err)\n Image.PGetPlane (in2Image, ImageBuffer2, doPlane, err)\n\n # Scale\n FArray.PSMul(ImageBuffer1, factor1)\n FArray.PSMul(ImageBuffer2, factor2)\n\n # Add\n FArray.PAdd(ImageBuffer1, ImageBuffer2, ImageBuffer2)\n\n # Write output\n Image.PPutPlane (outImage, ImageBuffer2, doPlane, err)\n\n # end loop over planes\n # Close\n in2Image.Close(err)\n in2Image.Close(err)\n outImage.Close(err)\n # Error?\n if err.isErr:\n OErr.printErrMsg(err, \"Error subtracting Images\")\n # Write history\n in1History = History.History(\"history\", in1Image.List, err)\n in2History = History.History(\"history\", in2Image.List, err)\n outHistory = History.History(\"history\", outImage.List, err)\n # Copy Histories\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1, \"/ PImageAdd Input 1 History\",err)\n outHistory.Close(err)\n info = in1Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in1History, outHistory, err)\n #Not needed History.PCopy(in1History, outHistory, err)\n outHistory.Open(History.READWRITE, err)\n outHistory.WriteRec(-1, \"/ \",err)\n outHistory.WriteRec(-1, \"/ ****** PImageAdd Input 2 History\",err)\n outHistory.Close(err)\n info = in2Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in2History, outHistory, err)\n History.PCopy(in2History, outHistory, err)\n # Add this programs history\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor1 = \"+str(factor1),err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor2 = \"+str(factor2),err)\n outHistory.Close(err)", "def registration(im1, im2, num = 10, opt = 'py', outputPath = 'None'):\n\n # determin which one is the right side of the breast\n b_size = 5\n n_row, n_col = im1.shape\n side = 0\n if np.sum(im1[0:b_size,0:b_size]) < np.sum(im1[0:b_size,n_col-b_size:n_col]):\n side = 1 \n\n # flip the right side image\n if side == 1:\n im1 = np.fliplr(im1)\n else:\n im2 = np.fliplr(im2) \n\n # find edges of both images\n edge1 = findEdge(im1)\n edge2 = findEdge(im2)\n\n # tune edges of both side\n edge1 = tuneEdge(edge1,im1.shape)\n edge2 = tuneEdge(edge2,im2.shape)\n\n # samping from both side\n points1 = contour_sampling(edge1, num)\n points2 = contour_sampling(edge2, num)\n\n # for debugging .........................\n sam_im1 = np.zeros(im1.shape,np.float32)\n for point in points1:\n sam_im1[point[0],point[1]] = 1\n\n sam_im2 = np.zeros(im2.shape,np.float32)\n for point in points2:\n sam_im2[point[0],point[1]] = 1\n \n selem = disk(15)\n dilated1 = ndimage.convolve(sam_im1, selem, mode='constant', cval=0)\n dilated2 = ndimage.convolve(sam_im2, selem, mode='constant', cval=0)\n\n points1 = np.asarray(points1)\n points2 = np.asarray(points2)\n \n # Thin Plate Spline interpolation\n dst = np.zeros(im1.shape)\n # im1 as source\n if opt == 'py': \n tps = TPSpline.TPSpline()\n tps.setCorrespondences(points1, points2)\n dst = tps.warpImage(im1)\n return dst\n\n if opt == 'c':\n print \"Please run the interpolation with C++ exe file!\"\n print \"./TPSpline /home/yanbin/Tomosynthesis/libs/TPSpline/test/ps.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/pd.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/5016_test.tif /home/yanbin/Tomosynthesis/libs/TPSpline/test/dst.tif\"\n np.savetxt(outputPath + 'ps.txt', points1, '%d', delimiter=' ') # X is an array\n np.savetxt(outputPath + 'pd.txt', points2, '%d', delimiter=' ') # X is an array\n tiffLib.imsave(outputPath + 'im1.tif',im1)\n return None", "def ComputeSNR(self):\n for epi in self.entry_map['epi']:\n epifile = self.info[epi]['imgfile_final'] + self.info[epi]['suffix']\n prefix = self.info[epi]['imgfile_final'] + '_snr'\n if not os.path.exists('%s_snr.png' % prefix):\n if self.verbose:\n print 'TemporalSnr(epifile=%s, prefix=%s)' % \\\n (epifile, prefix)\n try:\n TemporalSnr(epifile=epifile, prefix=prefix)()\n except:\n print(\"Error computing temporal SNR\")", "def dice(img1: np.array, img2: np.array) -> float:\n img1 = np.asarray(img1).astype(np.bool)\n img2 = np.asarray(img2).astype(np.bool)\n\n intersection = np.logical_and(img1, img2)\n\n return 2.0 * intersection.sum() / (img1.sum() + img2.sum())", "def wR(r, rc):\n nr = norm_numba(r)\n return (1 - nr / rc) if nr / rc < 1.0 else 0.0", "def do_pnp(pts3d_for_pnp, pts2d_for_pnp, K, iterations=200, reprojThresh=5):\n list_pts3d_for_pnp = pts3d_for_pnp\n list_pts2d_for_pnp = pts2d_for_pnp\n pts3d_for_pnp = np.array(pts3d_for_pnp)\n # pts2d_for_pnp = np.expand_dims(np.squeeze(np.array(pts2d_for_pnp)), axis=1)\n # print(pts3d_for_pnp)\n # print(pts2d_for_pnp.shape)\n num_pts = len(pts3d_for_pnp)\n print(num_pts)\n highest_inliers = 0\n for j in range(iterations):\n pt_idxs = np.random.choice(num_pts, 6, replace=False)\n pts3 = np.array([pts3d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n # print(\"pts\",pts3)\n pts2 = np.array([pts2d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n _, rvec, tvec = cv2.solvePnP(pts3, pts2, K, distCoeffs=np.array([]), flags=cv2.SOLVEPNP_ITERATIVE)\n R, _ = cv2.Rodrigues(rvec)\n pnp_errors, projpts, avg_err, perc_inliers = test_reproj_pnp_points(list_pts3d_for_pnp, list_pts2d_for_pnp, R, tvec, K, rep_thresh=reprojThresh)\n if highest_inliers < perc_inliers:\n highest_inliers = perc_inliers\n best_R = R\n best_tvec = tvec\n R = best_R\n tvec = best_tvec\n # print('rvec:', rvec,'\\n\\ntvec:', tvec)\n print(\"avg\",avg_err)\n print(\"inlier\",perc_inliers)\n return R, tvec", "def s2profile(r,r0,A,B):\n x = r/r0\n res = A*4./(np.exp(x)+np.exp(-x))**2 + B\n return res", "def srrc_imp(sps, alpha=.35, M=6):\n\n n = np.arange(-M*sps,M*sps+1)\n b = np.zeros(len(n))\n sps *= 1.0\n a = alpha\n for i in range(len(n)):\n if abs(1 - 16*a**2*(n[i]/sps)**2) <= np.finfo(np.float).eps/2:\n b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a)))\n else:\n b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/sps)**2))\n b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/sps) + np.sinc((1-a)*n[i]/sps)*(1-a)*np.pi/(4.*a))\n\n return b / np.sqrt(sum(b**2))", "def pxrdf(self):\n \n rank = range(len(self.theta2)) #np.argsort(self.theta2)\n PL = []\n last = 0\n for i in rank:\n if self.xrd_intensity[i] > 0.01:\n angle = self.theta2[i]\n if abs(angle-last) < 1e-4:\n PL[-1][-1] += self.xrd_intensity[i]\n else:\n PL.append([angle, self.d_hkls[i], \\\n self.hkl_labels[i][0][\"hkl\"][0], \\\n self.hkl_labels[i][0][\"hkl\"][1], \\\n self.hkl_labels[i][0][\"hkl\"][2], \\\n self.xrd_intensity[i]])\n last = angle\n\n PL = (np.array(PL))\n PL[:,-1] = PL[:,-1]/max(PL[:,-1])\n self.pxrd = PL\n # print(PL[0],PL[-1])", "def npcr(mat1, mat2):\n\tnpcr = 0\n\tw, h = mat1.shape\n\tif mat1.shape != mat2.shape:\n\t\treturn -1\n\tfor i in range(w):\n\t\tfor j in range(h):\n\t\t\tif mat1[i,j] != mat2[i,j]:\n\t\t\t\tnpcr += 1\n\tnpcr /= (w*h)\n\treturn npcr*100", "def gridratio( grid1, grid2):\n\n nx1 = grid1.img_width\n ny1 = grid1.img_height\n nx2 = grid2.img_width\n ny2 = grid2.img_height\n\n ratio = 0.\n rms = 0.\n\n if nx1 != nx2:\n print(\"GridRatio: Nx1 != Nx2 (%d, %d)\" % (nx1, nx2))\n return ratio, rms\n\n if ny1 != ny2:\n print(\"GridRatio: Ny1 != Ny2 (%d, %d)\" % (ny1, ny2))\n return ratio, rms\n\n count = 0\n nonzero = np.zeros(nx1*ny1)\n\n # copy to ratio array\n gridratio = copy.deepcopy( grid1)\n\n for iii in range(nx1):\n for jjj in range(ny1):\n # put in zero as default\n gridratio.image[jjj,iii] = 0.\n if grid1.image[jjj,iii] > EPSILON:\n if grid2.image[jjj,iii] > EPSILON:\n nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]\n count = count + 1\n if count < 2:\n print (\"No overlap in non-zero samples\")\n return ratio, rms, gridratio\n\n nonzero = nonzero[0:count]\n asum = np.sum( nonzero)\n ratio = asum/float(count)\n rms = np.std( nonzero)\n print (\"Grid Ratio: %.4f +/- %.4f for %d samples\" % (ratio, rms/np.sqrt(count), count))\n # return the ratio grid \n return ratio, rms, gridratio", "def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))", "def ppix(self):\n return self._ppix", "def lsnr_mapping(\n self, lsnr: Tensor, lsnr_thresh: float, lsnr_min: Optional[float] = None\n ) -> Tensor:\n # s = a * lsnr + b\n lsnr_min = float(self.lsnr_min) if lsnr_min is None else lsnr_min\n a_ = 1 / (lsnr_thresh - lsnr_min)\n b_ = -a_ * lsnr_min\n return 1 - torch.clamp(a_ * lsnr + b_, 0.0, 1.0)", "def rotate_phasor(r, r1, r2):\n return (r - r2) / (r1 - r2)", "def prob_3_1(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return swapImg ######", "def read_next_image(m, lcr, X_center, X_left, X_right, Y_train):\n offset = 1.0\n dist = 20.0\n steering = Y_train[m]\n\n if lcr == 0:\n image = plt.imread(normalize_path(X_left[m]))\n dsteering = offset / dist * 360 / (2 * np.pi) / 25.0\n steering += dsteering\n elif lcr == 1:\n image = plt.imread(normalize_path(X_center[m]))\n elif lcr == 2:\n image = plt.imread(normalize_path(X_right[m]))\n dsteering = -offset / dist * 360 / (2 * np.pi) / 25.0\n steering += dsteering\n else:\n print('Invalid lcr value :', lcr)\n\n return image, steering", "def _snr_preprocessing(self):\n if self.flux is None or self.fluxerr is None:\n return np.ones(len(self.stamps), dtype=bool)\n\n snrs = self.flux.astype(float) / self.fluxerr.astype(float)\n return snrs > self.snr_threshold", "def prob_3_6(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return addNoiseImg ######", "def preprocess(image):\n return image - MEAN_PIXEL", "def MixR2VaporPress(qv,p):\n\n return qv*p/(Epsilon+qv)", "def frc(im1, im2, annulus_width=1, edgetaper=5, edgeloc=8 / 20.0, loc=(0, 0), smooth=None, working_mask=None, x=None,\n y=None, rmax=None, taper=None):\n\n # 2012-02-25 20:40 IJMC: Empty bins now have numel=0, not nan.\n # 2012-02-04 17:41 IJMC: Added \"SUM\" flag\n # 2010-11-19 16:36 IJC: Updated documentation for Sphinx\n # 2010-03-10 19:22 IJC: Ported to python from Matlab\n # 2005/12/19 Added 'working_region' option (IJC)\n # 2005/12/15 Switched order of outputs (IJC)\n # 2005/12/12 IJC: Removed decifact, changed name, wrote comments.\n # 2005/11/04 by Ian Crossfield at the Jet Propulsion Laboratory\n\n import numpy as ny\n\n class radialDat:\n \"\"\"Empty object container.\n \"\"\"\n\n def __init__(self):\n self.num = None\n self.denom = None\n self.T1bit = None\n self.Thalfbit = None\n\n # ---------------------\n # Set up input parameters\n # ---------------------\n\n if working_mask == None:\n working_mask = ny.ones(im1.shape, bool)\n\n npix, npiy = im1.shape\n if taper is not None:\n taper0 = taper\n else:\n taper0 = tapercircle(im1.shape, edgetaper, edgeloc, loc)\n f, a = plt.subplots()\n a.imshow(imsave(im1 * taper0))\n plt.title('tapered')\n plt.show()\n # taper0 = 1\n F1 = fftshift(fft2(ifftshift(im1 * taper0)))\n F2 = fftshift(fft2(ifftshift(im2 * taper0)))\n F1F2_star = F1 * F2.conj()\n\n if x == None or y == None:\n x1 = ny.arange(-npix / 2., npix / 2.)\n y1 = ny.arange(-npiy / 2., npiy / 2.)\n x, y = ny.meshgrid(y1, x1)\n\n r = abs(x + 1j * y)\n\n if rmax == None:\n rmax = r[working_mask].max()\n\n # ---------------------\n # Prepare the data container\n # ---------------------\n dr = ny.abs([x[0, 0] - x[0, 1]]) * annulus_width\n radial = ny.arange(rmax / dr) * dr + dr / 2.\n nrad = len(radial)\n radialdata = radialDat()\n radialdata.num = ny.zeros(nrad)\n radialdata.denom = ny.zeros(nrad)\n radialdata.T1bit = ny.zeros(nrad)\n radialdata.Thalfbit = ny.zeros(nrad)\n radialdata.r = radial / (npix / 2)\n\n # ---------------------\n # Loop through the bins\n # ---------------------\n for irad in range(nrad): # = 1:numel(radial)\n minrad = irad * dr\n maxrad = minrad + dr\n thisindex = (r >= minrad) * (r < maxrad) * working_mask\n # import pylab as py\n # pdb.set_trace()\n if not thisindex.ravel().any():\n radialdata.num[irad] = ny.nan\n radialdata.denom[irad] = ny.nan\n radialdata.T1bit[irad] = ny.nan\n radialdata.Thalfbit[irad] = ny.nan\n else:\n sqrt_n = np.sqrt(thisindex.astype(np.int).sum())\n radialdata.num[irad] = np.real(F1F2_star[thisindex].sum())\n radialdata.denom[irad] = ny.sqrt((ny.abs(F1[thisindex]) ** 2).sum() * (ny.abs(F2[thisindex]) ** 2).sum())\n radialdata.T1bit[irad] = (0.5 + 2.4142 / sqrt_n) / (1.5 + 1.4142 / sqrt_n)\n radialdata.Thalfbit[irad] = (0.2071 + 1.9102 / sqrt_n) / (1.2071 + 0.9102 / sqrt_n)\n\n # ---------------------\n # Return with data\n # ---------------------\n radialdata.frc = ny.nan_to_num(radialdata.num / radialdata.denom)\n radialdata.frc[radialdata.frc < 0] = 0\n if smooth is not None:\n radialdata.frc = gaussian_filter1d(radialdata.frc, smooth)\n take = radialdata.r <= 1.1\n radialdata.r = radialdata.r[take]\n radialdata.frc = radialdata.frc[take]\n radialdata.T1bit = radialdata.T1bit[take]\n radialdata.Thalfbit = radialdata.Thalfbit[take]\n return radialdata", "def prob_3_2(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return grayImg ######", "def PreProcess(cls, imgStream):\n threshold = 175\n img = Image.open(imgStream)\n return img.convert('L').point(lambda p: 0 if p < threshold else 255, '1')" ]
[ "0.8011122", "0.791417", "0.7270669", "0.7199645", "0.7080047", "0.6986541", "0.69765127", "0.6821519", "0.67978144", "0.67174155", "0.6548654", "0.6511339", "0.6409773", "0.637189", "0.6161479", "0.60819566", "0.5994104", "0.58915156", "0.58915156", "0.58762085", "0.5791571", "0.5766734", "0.571575", "0.5691651", "0.5683146", "0.5639049", "0.56214285", "0.55981904", "0.5558087", "0.55506784", "0.55264246", "0.54949546", "0.54770154", "0.5474343", "0.546734", "0.54577315", "0.54015887", "0.5389403", "0.5384784", "0.53839284", "0.5366567", "0.53517836", "0.53517836", "0.5334648", "0.5332135", "0.53109705", "0.53091925", "0.5306277", "0.5293141", "0.5264996", "0.52641416", "0.5252785", "0.52524835", "0.52474844", "0.52348495", "0.5231009", "0.5228623", "0.5215063", "0.5211925", "0.51771116", "0.5161095", "0.51520354", "0.5151345", "0.5141669", "0.5131928", "0.5131145", "0.51262766", "0.5123434", "0.51131034", "0.51009315", "0.5093758", "0.5089732", "0.5081236", "0.50781536", "0.50753826", "0.50725454", "0.5070993", "0.5061197", "0.50562936", "0.5034982", "0.50290334", "0.5025087", "0.5023899", "0.5023867", "0.502341", "0.5020078", "0.50134945", "0.5003297", "0.5002675", "0.50020236", "0.5001099", "0.49872196", "0.49824613", "0.49807164", "0.49647513", "0.49640656", "0.49554926", "0.49547583", "0.49542433", "0.49530435" ]
0.79002017
2
PSNR between two arrays
def psnr(y, y_pred, verbose=True): psnr_sum = 0 for i in range(len(y)): psnr_sum += _psnr(y[i], y_pred[i]) if verbose: print(f"Mean PSNR {psnr_sum / len(y)}") return psnr_sum / len(y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_psnr_with_two_completely_different_sets(self):\n low = np.zeros((10, 500, 500, 1), dtype=np.uint8)\n high = np.ones((10, 500, 500, 1), dtype=np.uint8) * 255\n\n avg_psnr = np.array(psnr(high, low)).mean()\n self.assertEqual(avg_psnr, 0.0)", "def _comput_PSNR(self, input, target):\n shave = 4\n ch, h, w = input.size()\n input_Y = rgb2ycbcrT(input.cpu())\n target_Y = rgb2ycbcrT(target.cpu())\n diff = (input_Y - target_Y).view(1, h, w)\n\n diff = diff[:, shave:(h - shave), shave:(w - shave)]\n mse = diff.pow(2).mean()\n psnr = -10 * np.log10(mse)\n return psnr", "def _psnr(img1, img2):\n mse = np.mean((img1 - img2) ** 2)\n if mse == 0:\n return 100\n PIXEL_MAX = 1\n return (20 * math.log10(PIXEL_MAX)) - (10 * math.log10(mse))", "def computePSNR(img1, img2, pad_y=0, pad_x=0):\n if pad_y != 0 and pad_x != 0:\n img1_u = (np.clip(img1, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n else:\n img1_u = (np.clip(img1, 0, 255.0)).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)).astype(dtype=np.uint8)\n imdiff = (img1_u).astype(dtype=np.float32) - (img2_u).astype(dtype=np.float32)\n rmse = np.sqrt(np.mean(np.power(imdiff[:], 2)))\n return 20.0 * np.log10(255.0 / rmse)", "def cal_psnr(im1, im2):\n # assert pixel value range is 0-255 and type is uint8\n mse = ((im1.astype(np.float) - im2.astype(np.float)) ** 2).mean()\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr", "def compute_psnr(array_0_uint8, array_1_uint8):\n if array_0_uint8.dtype != numpy.uint8:\n raise TypeError('`array_0_uint8.dtype` is not equal to `numpy.uint8`.')\n if array_1_uint8.dtype != numpy.uint8:\n raise TypeError('`array_1_uint8.dtype` is not equal to `numpy.uint8`.')\n array_0_float64 = array_0_uint8.astype(numpy.float64)\n array_1_float64 = array_1_uint8.astype(numpy.float64)\n mse_float64 = numpy.mean((array_0_float64 - array_1_float64)**2)\n \n # `array_0_float64` and `array_1_float64` might be identical.\n # 1.e-6 is added to `mse_float64` to avoid dividing by 0.\n # The precedence of ...**... (exponentiation) is higher\n # than the precedence of .../... (division).\n return 10.*numpy.log10(255.**2/(mse_float64 + 1.e-6))", "def calculate_psnr(img0, img1, data_range=None):\n psnr = skm.peak_signal_noise_ratio(img0, img1, data_range=data_range) \n return psnr", "def psnr(image1: np.ndarray, image2: np.ndarray, **kwargs) -> np.ndarray:\n n, h, w = image1.shape\n assert (n, h, w) == image2.shape\n psnr_ = np.zeros(n)\n for ii in range(n):\n psnr_[ii] = peak_signal_noise_ratio(image1[ii], image2[ii], **kwargs)\n return psnr_", "def PSNR (originalArray, reconstructedArray):\n # Convert both to float.\n if originalArray.dtype == np.int16:\n originalArray = originalArray.astype(np.float32) / 32768\n if reconstructedArray.dtype == np.int16:\n reconstructedArray = reconstructedArray.astype(np.float32) / 32768\n\n # Correct for any differences in dynamic range, which might be caused\n # by attempts of compression libraries like mp3 to avoid overflow.\n reconstructedArray *= np.sqrt((originalArray ** 2).sum() /\n (reconstructedArray ** 2).sum())\n\n\n max_value = float(np.max(np.abs(originalArray)))\n mean_square_error = ((originalArray - reconstructedArray) ** 2).sum() / originalArray.size\n if mean_square_error != 0:\n psnr = 20 * math.log10(max_value) - 10 * math.log10(mean_square_error)\n else:\n psnr = math.inf\n\n return psnr", "def npairs(data1, data2, rbins, period=None):\n \n #work with arrays!\n data1 = np.asarray(data1)\n if data1.ndim ==1: data1 = np.array([data1])\n data2 = np.asarray(data2)\n if data2.ndim ==1: data2 = np.array([data2])\n rbins = np.asarray(rbins)\n if rbins.size ==1: rbins = np.array([rbins])\n \n #Check to make sure both data sets have the same dimension. Otherwise, throw an error!\n if np.shape(data1)[-1]!=np.shape(data2)[-1]:\n raise ValueError(\"data1 and data2 inputs do not have the same dimension.\")\n return None\n \n #Process period entry and check for consistency.\n if period is None:\n period = np.array([np.inf]*np.shape(data1)[-1])\n else:\n period = np.asarray(period).astype(\"float64\")\n if np.shape(period) == ():\n period = np.array([period]*np.shape(data1)[-1])\n elif np.shape(period)[0] != np.shape(data1)[-1]:\n raise ValueError(\"period should have len == dimension of points\")\n return None\n \n N1 = len(data1)\n N2 = len(data2)\n dd = np.zeros((N1*N2,)) #store radial pair seperations \n for i in range(0,N1): #calculate distance between every point and every other point\n x1 = data1[i,:]\n x2 = data2\n dd[i*N2:i*N2+N2] = distance(x1, x2, period)\n \n #sort results\n dd.sort()\n #count number less than r\n n = np.zeros((rbins.size,), dtype=np.int)\n for i in range(rbins.size): #this is ugly... is there a sexier way?\n if rbins[i]>np.min(period)/2.0:\n print(\"Warning: counting pairs with seperations larger than period/2 is awkward.\")\n print(\"r=\", rbins[i], \" min(period)/2=\",np.min(period)/2.0)\n n[i] = len(np.where(dd<=rbins[i])[0])\n \n return n", "def psnr(gt, pred):\n return compare_psnr(gt, pred, data_range=gt.max())", "def psnr(gt, pred):\n return compare_psnr(gt, pred, data_range=gt.max())", "def SNR(op0, op1):\n result = len(op0)*np.abs(np.mean(op1) - np.mean(op0))**2/((np.var(op1)+np.var(op0))/2)\n \n return result", "def _rawprng(self):\n self.p += 1 \n if self.p >= self.o:\n\t\t\tself.p = 0\n t = 1768863 * self.s[self.p] + self.c * 2.3283064365386963e-10\n self.c = int(t) | 0\n self.s[self.p] = t - self.c\n return self.s[self.p]", "def get_n_p(a_mr: float, a_or: float, a_nr: float, calender: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray):\n\n n_p_mr_wd = np.array([0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 0, 0, 1, 1, 0, 0, 1, 2, 2, 3, 3, 2, 1, 1]) * a_mr / 29.81\n n_p_or_wd = np.array([4, 4, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 2, 3]) * a_or / 51.34\n n_p_nr_wd = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) * a_nr / 38.93\n n_p_mr_hd = np.array([0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 2, 2, 2, 1, 0, 0, 2, 3, 3, 4, 2, 2, 1, 0]) * a_mr / 29.81\n n_p_or_hd = np.array([4, 4, 4, 4, 4, 4, 4, 3, 1, 2, 2, 2, 1, 0, 0, 0, 1, 1, 1, 0, 2, 2, 2, 3]) * a_or / 51.34\n n_p_nr_hd = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) * a_nr / 38.93\n\n n_p_mr = np.tile(n_p_mr_wd, 365) * (calender == '平日') + np.tile(n_p_mr_hd, 365) * (calender == '休日')\n n_p_or = np.tile(n_p_or_wd, 365) * (calender == '平日') + np.tile(n_p_or_hd, 365) * (calender == '休日')\n n_p_nr = np.tile(n_p_nr_wd, 365) * (calender == '平日') + np.tile(n_p_nr_hd, 365) * (calender == '休日')\n\n n_p = n_p_mr + n_p_or + n_p_nr\n\n return n_p, n_p_mr, n_p_or, n_p_nr", "def rmsd(array1, array2):\n total = 0\n for n1, n2 in zip(array1, array2):\n total += (n1 - n2) ** 2\n total /= len(array1)\n\n return math.sqrt(total)", "def psnr(label, outputs, max_val=1.):\n label = label.cpu().detach().numpy()\n outputs = outputs.cpu().detach().numpy()\n # PSNR = -10. * np.log10(np.mean(np.square(outputs - label)))\n img_diff = outputs - label\n rmse = math.sqrt(np.mean((img_diff) ** 2))\n if rmse == 0:\n return 100\n else:\n PSNR = 20 * math.log10(max_val / rmse)\n return PSNR", "def calculateSNR(self):\n pass", "def PSNR(self, imageA, imageB):\n mse = self.MSE(imageA, imageB)\n if mse == 0:\n return 100\n return 20 * log10(255.0 / sqrt(mse))", "def NR(ip, fp):\n x = fp[0] - ip[0]\n y = fp[1] - ip[1]\n r = sqrt(x ** 2 + y ** 2)\n return array([-y / r, x / r])", "def snr(p1, l1x, l1y, p2, l2x, l2y, var):\n ip12 = inner_product(p1, l1x, l1y, p2, l2x, l2y, var)\n ip11 = inner_product(p1, l1x, l1y, p1, l1x, l1y, var)\n ip22 = inner_product(p2, l2x, l2y, p2, l2x, l2y, var)\n\n return ip11 / (ip11 + ip22 - 2 * ip12)", "def psnr(img_a, img_b, max_img_value=255):\n mse = tf.reduce_mean((img_a - img_b) ** 2)\n return 20 * log_n(max_img_value, 10) - 10 * log_n(mse, 10)", "def observed_snr(psrsnr, psrra, psrdec):\n global beam_profile\n return lambda obsra,obsdec: psrsnr*beam_profile.gain_at_angular_offset(angsep_arcmin(psrra, psrdec, obsra, obsdec))", "def rsrp_snr_est(self,iqcpx1,iqcpx2):\n LLTFpos = np.int_(np.concatenate((np.arange((self._FFT-self._LLTF)/2,self._FFT/2),\n np.arange(self._FFT/2+1,self._FFT/2+self._LLTF/2+1))))\n # use an offset of -1/2 cyclic prefix\n idx = 4*self._GI+2*self._FFT-self._GI/2\n # extract the two LTF symbols of both channels\n yLLtf1 = iqcpx1[idx:idx+2*self._FFT]\n yLLtf2 = iqcpx2[idx:idx+2*self._FFT]\n # do the FFT on both LLTF pairs\n ysLLtf1a = np.fft.fftshift(np.fft.fft(yLLtf1[0:self._FFT],self._FFT))/np.sqrt(self._STS*self._LLTF)\n ysLLtf1b = np.fft.fftshift(np.fft.fft(yLLtf1[self._FFT:2*self._FFT],self._FFT))/np.sqrt(self._STS*self._LLTF)\n ysLLtf2a = np.fft.fftshift(np.fft.fft(yLLtf2[0:self._FFT],self._FFT))/np.sqrt(self._STS*self._LLTF)\n ysLLtf2b = np.fft.fftshift(np.fft.fft(yLLtf2[self._FFT:2*self._FFT],self._FFT))/np.sqrt(self._STS*self._LLTF)\n # Channel 1\n Ps1 = np.real(np.dot(ysLLtf1a[LLTFpos].conj().transpose(),ysLLtf1a[LLTFpos]))\n Ps2 = np.real(np.dot(ysLLtf1b[LLTFpos].conj().transpose(),ysLLtf1b[LLTFpos]))\n # divide average symbol power by number of symbols and pilot subcarriers\n rsrp1 = 10*np.log10((Ps1+Ps2)/(2*np.size(LLTFpos)))\n Pn = np.sqrt(2)*np.sum(np.power(np.abs(ysLLtf1b[LLTFpos])-np.abs(ysLLtf1a[LLTFpos]),2))\n snr1 = 10*np.log10((Ps1-Pn)/Pn)\n # Channel 2\n Ps1 = np.real(np.dot(ysLLtf2a[LLTFpos].conj().transpose(),ysLLtf2a[LLTFpos]))\n Ps2 = np.real(np.dot(ysLLtf2b[LLTFpos].conj().transpose(),ysLLtf2b[LLTFpos]))\n # divide average symbol power by number of symbols and pilot subcarriers\n rsrp2 = 10*np.log10((Ps1+Ps2)/(2*np.size(LLTFpos)))\n Pn = np.sqrt(2)*np.sum(np.power(np.abs(ysLLtf2b[LLTFpos])-np.abs(ysLLtf2a[LLTFpos]),2))\n snr2 = 10*np.log10((Ps1-Pn)/Pn)\n return rsrp1, rsrp2, snr1, snr2", "def _pdist(a, b):\n a, b = np.asarray(a), np.asarray(b)\n if len(a) == 0 or len(b) == 0:\n return np.zeros((len(a), len(b)))\n a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)\n r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]\n r2 = np.clip(r2, 0., float(np.inf))\n return r2", "def _pdist(a, b):\n a, b = np.asarray(a), np.asarray(b)\n if len(a) == 0 or len(b) == 0:\n return np.zeros((len(a), len(b)))\n a2, b2 = np.square(a).sum(axis=1), np.square(b).sum(axis=1)\n r2 = -2. * np.dot(a, b.T) + a2[:, None] + b2[None, :]\n r2 = np.clip(r2, 0., float(np.inf))\n return r2", "def PSNR(y_true, y_pred):\n return tf.image.psnr(y_true,y_pred,1)", "def prbs(Tmax, Tmin, initstate=\"random\"):\n if not isinstance(Tmax, int):\n raise TypeError(\"`Tmax` must be an integer\")\n\n if Tmax < 2:\n raise ValueError(\"`Tmax` must be > 2\")\n\n if not isinstance(Tmin, int):\n raise TypeError(\"`Tmax` must be an integer\")\n\n if Tmin < 1:\n raise ValueError(\"`Tmin` must be > 1\")\n\n if Tmin >= Tmax:\n raise ValueError(\"`Tmax` must be strictly superior to `Tmin`\")\n\n __init_availabble__ = [\"random\", \"ones\"]\n if initstate not in __init_availabble__:\n raise ValueError(f\"`initstate` must be either {__init_availabble__}\")\n\n # get the register length\n n = np.ceil(Tmax / Tmin)\n if n < 2 or n > 31:\n raise ValueError(\n \"The PRBS cannot be generated, \" \"decompose the signal in two sequences\"\n )\n\n # Linear feedback register up to 32 bits\n fpoly = {\n 2: [2, 1],\n 3: [3, 1],\n 4: [4, 1],\n 5: [5, 2],\n 6: [6, 1],\n 7: [7, 1],\n 8: [8, 4, 3, 2],\n 9: [9, 4],\n 10: [10, 3],\n 11: [11, 2],\n 12: [12, 6, 4, 1],\n 13: [13, 4, 3, 1],\n 14: [14, 8, 6, 1],\n 15: [15, 1],\n 16: [16, 12, 3, 1],\n 17: [17, 3],\n 18: [18, 7],\n 19: [19, 5, 2, 1],\n 20: [20, 3],\n 21: [21, 2],\n 22: [22, 1],\n 23: [23, 5],\n 24: [24, 7, 2, 1],\n 25: [25, 3],\n 26: [26, 6, 2, 1],\n 27: [27, 5, 2, 1],\n 28: [28, 3],\n 29: [29, 2],\n 30: [30, 23, 2, 1],\n 31: [31, 3],\n }\n\n L = LFSR(fpoly=fpoly[n], initstate=initstate, verbose=False)\n\n seq = []\n for n in range(L.expectedPeriod):\n L.next()\n seq.append(L.state[0])\n\n seq_padded = np.repeat(seq, Tmin)\n\n # check generated PRBS\n assert seq_padded.shape[0] == L.expectedPeriod * Tmin\n assert max(len(list(v)) for g, v in itertools.groupby(seq_padded)) == Tmax\n assert min(len(list(v)) for g, v in itertools.groupby(seq_padded)) == Tmin\n\n return seq_padded", "def compute_rate_psnr(reference_uint8, mean_training, std_training, entropy_ae,\n bin_width, nb_vertically, path_to_reconstruction):\n # The function `svhn.svhn.preprocess_svhn` checks\n # that `reference_uint8.dtype` is equal to `numpy.uint8`\n # and `reference_uint8.ndim` is equal to 2.\n reference_float64 = svhn.svhn.preprocess_svhn(reference_uint8,\n mean_training,\n std_training)\n (nb_images, nb_pixels) = reference_uint8.shape\n \n # At training time, the decoder was fed with\n # latent variables perturbed by uniform noise.\n # However, at test time, the decoder is fed with\n # quantized latent variables.\n y = entropy_ae.encoder(reference_float64)[1]\n quantized_y = tls.quantization(y, bin_width)\n \n # In the function `tls.discrete_entropy`, `quantized_y`\n # is flattened to compute the entropy.\n disc_entropy = tls.discrete_entropy(quantized_y, bin_width)\n rate = entropy_ae.nb_y*disc_entropy/nb_pixels\n reconstruction_float64 = entropy_ae.decoder(quantized_y)[1]\n rec_rescaled_float64 = reconstruction_float64*std_training + \\\n numpy.tile(mean_training, (nb_images, 1))\n reconstruction_uint8 = tls.cast_float_to_uint8(rec_rescaled_float64)\n psnr = tls.mean_psnr(reference_uint8, reconstruction_uint8)\n tls.visualize_rows(reconstruction_uint8,\n 32,\n 32,\n nb_vertically,\n path_to_reconstruction)\n return (rate, psnr)", "def P(lag):\n N = len(SP)\n ratios = SP[lag:N]/SP[0:N-lag]\n P = 100.*(ratios-1.)\n return P", "def llr2_to_prob(llr):\n return 1 / (1 + math.pow(2, -llr))", "def psnr(y_true, y_pred):\n return 1/(10.0 * np.log(1.0 / (np.mean(np.square(y_pred - y_true)))) / np.log(10.0))", "def PSNR(ground_truth_images: np.ndarray, noisy_images: np.ndarray) -> List[float]:\n validate_inputs(ground_truth_images, noisy_images)\n\n psnr_acumulated = []\n\n quantity_of_images = ground_truth_images.shape[0]\n\n if need_to_normalize(ground_truth_images):\n ground_truth_images = normalize(ground_truth_images, \\\n interval=(0,255), data_type='int')\n \n if need_to_normalize(noisy_images):\n noisy_images = normalize(noisy_images, \\\n interval=(0,255), data_type='int')\n \n for i in range(quantity_of_images):\n psnr_image = psnr(\n ground_truth_images[i,:,:,0], \n noisy_images[i,:,:,0],\n data_range=256\n )\n psnr_acumulated.append(psnr_image)\n\n # psnr_acumulated = np.array(psnr_acumulated)\n\n # return psnr_acumulated.mean()\n return psnr_acumulated", "def SNR(S, S0):\n return np.var(S) / np.var(S - S0)", "def bdsnr(metric_set1, metric_set2):\n rate1 = [x[0] for x in metric_set1]\n psnr1 = [x[1] for x in metric_set1]\n rate2 = [x[0] for x in metric_set2]\n psnr2 = [x[1] for x in metric_set2]\n\n log_rate1 = map(lambda x: math.log(x), rate1)\n log_rate2 = map(lambda x: math.log(x), rate2)\n\n # Best cubic poly fit for graph represented by log_ratex, psrn_x.\n p1 = numpy.polyfit(log_rate1, psnr1, 3)\n p2 = numpy.polyfit(log_rate2, psnr2, 3)\n\n # Integration interval.\n min_int = max([min(log_rate1),min(log_rate2)])\n max_int = min([max(log_rate1),max(log_rate2)])\n\n # Integrate p1, and p2.\n p_int1 = numpy.polyint(p1)\n p_int2 = numpy.polyint(p2)\n\n # Calculate the integrated value over the interval we care about.\n int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)\n int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)\n\n # Calculate the average improvement.\n avg_diff = (int2 - int1) / (max_int - min_int)\n return avg_diff", "def compute_power(pvals, SNPs):\n\tnsnps = len(pvals)\n\tall_snps = np.arange(0, nsnps)\n\tpos = SNPs\n\tnegs = list(set(all_snps) - set(SNPs))\n\n\tpvals_rank = rank_array(pvals)\n\n\trocr = np.zeros((nsnps, 2))\n\tfor i in all_snps:\n\t\tv = pvals_rank[0:i] # test positives\n\t\tz = list(set(all_snps) - set(v)) # test negatives\n\n\t\tTP = len(set(v) & set(pos))\n\t\tFP = len(set(v) & set(negs))\n\t\tTN = len(set(z) & set(negs))\n\t\tFN = len(set(z) & set(pos))\n\n\t\tTPR = 1.0*TP/(TP+FN); FPR = 1.0*FP/(FP+TN); #FDR = 1.0*FP/(FP+TP)\n\n\t\trocr[i, :] = [FPR, TPR]\n\n\treturn rocr", "def RDF(self, dP, rx, fast=True):\n parts = np.zeros((len(dP), len(rx)))\n for i, dPi in enumerate(dP):\n w = np.sign(dPi[1])*np.sqrt(np.sqrt(np.abs(dPi[1])))\n parts[i,:] = w*self.apnl(dPi, rx, fast=fast)\n return np.sum(parts, axis=0)", "def get_psnr(self, predictions, ground_truth):\n batch_size, _, _, _ = predictions.shape\n pred = predictions.detach().cpu().numpy()\n gt = ground_truth.detach().cpu().numpy()\n\n return skimage.measure.compare_psnr(gt, pred, data_range=2)", "def lpointbiserialr(x,y):\r\n TINY = 1e-30\r\n if len(x) <> len(y):\r\n raise ValueError, 'INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.'\r\n data = pstats.abut(x,y)\r\n categories = pstats.unique(x)\r\n if len(categories) <> 2:\r\n raise ValueError, \"Exactly 2 categories required for pointbiserialr().\"\r\n else: # there are 2 categories, continue\r\n codemap = pstats.abut(categories,range(2))\r\n recoded = pstats.recode(data,codemap,0)\r\n x = pstats.linexand(data,0,categories[0])\r\n y = pstats.linexand(data,0,categories[1])\r\n xmean = mean(pstats.colex(x,1))\r\n ymean = mean(pstats.colex(y,1))\r\n n = len(data)\r\n adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))\r\n rpb = (ymean - xmean)/samplestdev(pstats.colex(data,1))*adjust\r\n df = n-2\r\n t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))\r\n prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float\r\n return rpb, prob", "def PSNR(orimg, estimg, pattern):\n PSNR = [0]*3\n _, mask = keep_measures(orimg[:, :, 0], pattern)\n for i in range(3):\n diff = orimg[:,:,i] - estimg[:,:,i]\n PSNR[i] = 10*np.log10(255**2/(np.linalg.norm((1-mask[:,:,i])*diff)**2/(1-mask[:,:,i]).sum()))\n \n return tuple(PSNR)", "def pv(rate, n_years):\n return 1 / fv(rate, n_years)", "def psnr(im1, im2):\n\n def log10(real_number):\n \"\"\" Calculate the base-ten log of a given real number.\n\n Args:\n real_number: a real number.\n Returns:\n the base-ten log of the given real number.\n \"\"\"\n numerator = tf.math.log(real_number)\n denominator = tf.math.log(tf.constant(10, dtype=numerator.dtype))\n return numerator / denominator\n\n mse = tf.reduce_mean(tf.math.squared_difference(im1, im2))\n result = tf.constant(1, dtype=tf.float32) / mse\n result = tf.math.multiply(tf.constant(10, dtype=tf.float32), log10(result))\n return result", "def func_Ip_318(pp, pd):\n return pp/(np.pi*(pd/2)**2)", "def npairs(data1, data2, rbins, period=None):\n \n #work with arrays!\n data1 = np.asarray(data1)\n if data1.ndim ==1: data1 = np.array([data1])\n data2 = np.asarray(data2)\n if data2.ndim ==1: data2 = np.array([data2])\n rbins = np.asarray(rbins)\n if rbins.size ==1: rbins = np.array([rbins])\n \n #Check to make sure both data sets have the same dimension. Otherwise, throw an error!\n if np.shape(data1)[-1]!=np.shape(data2)[-1]:\n raise ValueError(\"data1 and data2 inputs do not have the same dimension.\")\n return None\n \n #Process period entry and check for consistency.\n if period is None:\n period = np.array([np.inf]*np.shape(data1)[-1])\n else:\n period = np.asarray(period).astype(\"float64\")\n if np.shape(period) == ():\n period = np.array([period]*np.shape(data1)[-1])\n elif np.shape(period)[0] != np.shape(data1)[-1]:\n raise ValueError(\"period should have len == dimension of points\")\n return None\n \n tree_1 = cKDTree(data1)\n tree_2 = cKDTree(data2)\n \n n = tree_1.count_neighbors(tree_2,rbins,period=period)\n \n return n", "def get_n1(r,N):\n n1 = N - np.sum(r)\n return n1", "def pxrdf(self):\n \n rank = range(len(self.theta2)) #np.argsort(self.theta2)\n PL = []\n last = 0\n for i in rank:\n if self.xrd_intensity[i] > 0.01:\n angle = self.theta2[i]\n if abs(angle-last) < 1e-4:\n PL[-1][-1] += self.xrd_intensity[i]\n else:\n PL.append([angle, self.d_hkls[i], \\\n self.hkl_labels[i][0][\"hkl\"][0], \\\n self.hkl_labels[i][0][\"hkl\"][1], \\\n self.hkl_labels[i][0][\"hkl\"][2], \\\n self.xrd_intensity[i]])\n last = angle\n\n PL = (np.array(PL))\n PL[:,-1] = PL[:,-1]/max(PL[:,-1])\n self.pxrd = PL\n # print(PL[0],PL[-1])", "def batch_psnr(test_image, target_image, max=1.):\n psnr = 0\n num_images = test_image.shape[0]\n for i in range(num_images):\n psnr += calc_psnr(test_image[i], target_image[i], max=max)\n psnr /= num_images\n return psnr", "def interpolation_array(a1, a2):\n return (a1+a2)/2.", "def cal_pn(grams_set, grams, candidate, reference):\n count = 0\n for gram in grams_set:\n # print(gram)\n count += count_clip(gram, grams, reference)\n # calculate log() for p, so '+10**-8' avoid 'p==0'\n p = count / len(grams) + 10**-8 \n return p", "def calculateP(SD, numDiff):\n return numDiff/SD", "def calculateP(SD, numDiff):\n return numDiff/SD", "def perp_vector(p, q, r):\n v = cross(q - r, q - p)\n return v / mod(v) + q", "def non_param_unpaired_ci(sample1, sample2, alpha=0.05):\n n1 = len(sample1)\n n2 = len(sample2)\n N = norm.ppf(1 - alpha/2)\n diffs = sorted([i-j for i in sample1 for j in sample2])\n k = np.math.ceil(n1*n2/2 - (N * (n1*n2*(n1+n2+1)/12)**0.5))\n CI = (round(diffs[k-1], 3), round(diffs[len(diffs)-k], 3))\n return CI", "def tf_psnr(im1, im2):\n mse = tf.losses.mean_squared_error(labels=im2 * 255.0, predictions=im1 * 255.0)\n return 10.0 * (tf.log(255.0 ** 2 / mse) / tf.log(10.0))", "def robbins(counts):\n return float(singles(counts))/counts.sum()", "def compare_sums_ks(array1, array2):\n return stats.ks_2samp(array1, array2)", "def NPV(B,C,BV,CV,d,pb,pc):\n b=[BV[0] if x=='L' else BV[1] for x in B] #decoding revenue\n c=[CV[0] if x=='L' else CV[1] for x in C] #decoding cost\n z=[b_i - c_i for b_i, c_i in zip(b, c)] #profit at each time\n npv=np.npv(d, z)\n pnpv=pb*pc\n return (npv,pnpv)", "def prodoftwoterms(self, ii, ll, r):\n prod = 1\n for k in range(r):\n prod = prod * (ii-k) * (ll-k) \n return prod", "def compute_ap(ranks, nres):\n\n # number of images ranked by the system\n nimgranks = len(ranks)\n\n # accumulate trapezoids in PR-plot\n ap = 0\n\n recall_step = 1. / nres\n\n for j in np.arange(nimgranks):\n rank = ranks[j]\n\n if rank == 0:\n precision_0 = 1.\n else:\n precision_0 = float(j) / rank\n\n precision_1 = float(j + 1) / (rank + 1)\n\n ap += (precision_0 + precision_1) * recall_step / 2.\n\n return ap", "def sample_rate(P1, P2):\n v = (P1[0] - P2[0], P1[1] - P2[1], P1[2] - P2[2])\n # Project v onto the xy plane\n # xvect is a unit vector on that plane\n normalized = (1. / np.sqrt(2), 1. / np.sqrt(2), 0.)\n \n angle = np.dot(normalized, v) / modulus(v)\n \n # We need 1 / cosA\n return 1. / np.cos(angle)", "def aks_2samp (data1,data2):\r\n j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE\r\n j2 = 0 # N.zeros(data2.shape[1:])\r\n fn1 = 0.0 # N.zeros(data1.shape[1:],N.float_)\r\n fn2 = 0.0 # N.zeros(data2.shape[1:],N.float_)\r\n n1 = data1.shape[0]\r\n n2 = data2.shape[0]\r\n en1 = n1*1\r\n en2 = n2*1\r\n d = N.zeros(data1.shape[1:],N.float_)\r\n data1 = N.sort(data1,0)\r\n data2 = N.sort(data2,0)\r\n while j1 < n1 and j2 < n2:\r\n d1=data1[j1]\r\n d2=data2[j2]\r\n if d1 <= d2:\r\n fn1 = (j1)/float(en1)\r\n j1 = j1 + 1\r\n if d2 <= d1:\r\n fn2 = (j2)/float(en2)\r\n j2 = j2 + 1\r\n dt = (fn2-fn1)\r\n if abs(dt) > abs(d):\r\n d = dt\r\n# try:\r\n en = math.sqrt(en1*en2/float(en1+en2))\r\n prob = aksprob((en+0.12+0.11/en)*N.fabs(d))\r\n# except:\r\n# prob = 1.0\r\n return d, prob", "def pi(self, other):\n\n s = len(self)\n r = len(other)\n\n if s != r:\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n d = 0\n for i in range(s):\n d += self[i] * other[i]\n\n return d", "def getRPSA(ChargeSA):\n temp=0.0\n for i in ChargeSA:\n temp=temp+i[2]\n if temp == 0.0:\n return 0.0\n return getPSA(ChargeSA)/temp", "def score(stripe1, stripe2):\n scr = 0\n count = 0\n for p1, p2 in zip(stripe1, stripe2):\n r = abs(p1[0] - p2[0])\n g = abs(p1[1] - p2[1])\n b = abs(p1[2] - p2[2])\n scr += r + g + b\n return scr", "def fdr_correction(pvals):\r\n tmp = array(pvals).astype(float) # this converts Nones to nans\r\n return tmp * tmp.size / (1. + argsort(argsort(tmp)).astype(float))", "def lks_2samp (data1,data2):\r\n j1 = 0\r\n j2 = 0\r\n fn1 = 0.0\r\n fn2 = 0.0\r\n n1 = len(data1)\r\n n2 = len(data2)\r\n en1 = n1\r\n en2 = n2\r\n d = 0.0\r\n data1.sort()\r\n data2.sort()\r\n while j1 < n1 and j2 < n2:\r\n d1=data1[j1]\r\n d2=data2[j2]\r\n if d1 <= d2:\r\n fn1 = (j1)/float(en1)\r\n j1 = j1 + 1\r\n if d2 <= d1:\r\n fn2 = (j2)/float(en2)\r\n j2 = j2 + 1\r\n dt = (fn2-fn1)\r\n if math.fabs(dt) > math.fabs(d):\r\n d = dt\r\n try:\r\n en = math.sqrt(en1*en2/float(en1+en2))\r\n prob = ksprob((en+0.12+0.11/en)*abs(d))\r\n except:\r\n prob = 1.0\r\n return d, prob", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def _calculate_snr_spread(self):\n\n dmSpacing, percentage = 100, 0\n while percentage < 0.5: \n x = np.linspace(self.centerDm - dmSpacing, self.centerDm + dmSpacing, 500)\n y = np.array([self.effective_snr(self.effective_width(self.pulseWidth, self.centerDm - dm_val, self.bandwidth, self.freq), self.pulseWidth * 20) for dm_val in x])\n y = (y / (np.max(y) * 1.0)) if np.max(y) > 0 else y\n percentage = np.size(np.where(y > 0)) / 1000.0\n dmSpacing = dmSpacing*0.6\n \n return x, y", "def probability(s, a, b):\r\n return s.cdf(b) - s.cdf(a)", "def ComputeNrb(self):\r\n pass", "def calculate_rn_ratios(vn_event_arrays):\n vn_event_arrays = array(vn_event_arrays)\n rn_arrays = []\n for iorder in range(3, 6):\n # compute r2, r3, r4\n rn_array = []\n for itrig in range(3, len(vn_event_arrays[0, :, 0])):\n pT_trig = real(vn_event_arrays[0, itrig, 0])\n dN_trig = real(vn_event_arrays[:, itrig, 1])\n Qn_trig_array = dN_trig*vn_event_arrays[:, itrig, iorder]\n nev = len(Qn_trig_array)\n\n denorm2_dN = dN_trig*(dN_trig - 1.)\n denorm2_array = abs(Qn_trig_array)**2. - dN_trig\n\n for iasso in range(0, itrig+1):\n pT_asso = real(vn_event_arrays[0, iasso, 0])\n dN_asso = real(vn_event_arrays[:, iasso, 1])\n Qn_asso_array = dN_asso*vn_event_arrays[:, iasso, iorder]\n\n num_dN = dN_trig*dN_asso\n num_array = real(Qn_asso_array*conj(Qn_trig_array))\n if iasso == itrig:\n num_dN -= dN_asso\n num_array = (real(Qn_asso_array*conj(Qn_trig_array))\n - dN_asso)\n\n denorm1_dN = dN_asso*(dN_asso - 1.)\n denorm1_array = abs(Qn_asso_array)**2. - dN_asso\n\n rn_jackknife = zeros(nev)\n for iev in range(nev):\n array_idx = [True]*nev\n array_idx[iev] = False\n array_idx = array(array_idx)\n\n num = mean(num_array[array_idx])/mean(num_dN[array_idx])\n denorm1 = (mean(denorm1_array[array_idx])\n /mean(denorm1_dN[array_idx]))\n denorm2 = (mean(denorm2_array[array_idx])\n /mean(denorm2_dN[array_idx]))\n\n if denorm1 > 0. and denorm2 > 0.:\n rn_jackknife[iev] = num/sqrt(denorm1*denorm2)\n\n rn_mean = mean(rn_jackknife)\n rn_err = sqrt((nev - 1.)/nev*sum((rn_jackknife - rn_mean)**2.))\n rn_array.append([pT_trig - pT_asso, rn_mean, rn_err])\n rn_arrays.append(rn_array)\n rn_arrays = array(rn_arrays)\n return(rn_arrays)", "def hps(self, arr):\n r = arr\n d2 = []\n d3 = []\n i = 0\n # Diezmar en 2\n for v in arr:\n if i % 2 == 0:\n d2.append(v)\n i += 1\n # Diezmar en 3\n i = 0\n for v in arr:\n if i % 3 == 0:\n d3.append(v)\n i += 1\n d2 = np.array(d2)\n d3 = np.array(d3)\n # Multiplicar por d2\n i = 0\n for v in d2:\n r[i] = r[i] * v\n i += 1\n # Multiplicar por d3\n i = 0\n for v in d3:\n r[i] = r[i] * v\n i += 1\n return r", "def _ppndf(cum_prob):\n SPLIT = 0.42\n A0 = 2.5066282388\n A1 = -18.6150006252\n A2 = 41.3911977353\n A3 = -25.4410604963\n B1 = -8.4735109309\n B2 = 23.0833674374\n B3 = -21.0622410182\n B4 = 3.1308290983\n C0 = -2.7871893113\n C1 = -2.2979647913\n C2 = 4.8501412713\n C3 = 2.3212127685\n D1 = 3.5438892476\n D2 = 1.6370678189\n # ====== preprocess ====== #\n cum_prob = np.array(cum_prob)\n eps = np.finfo(cum_prob.dtype).eps\n cum_prob = np.clip(cum_prob, eps, 1 - eps)\n adj_prob = cum_prob - 0.5\n # ====== init ====== #\n R = np.empty_like(cum_prob)\n norm_dev = np.empty_like(cum_prob)\n # ====== transform ====== #\n centerindexes = np.argwhere(np.abs(adj_prob) <= SPLIT).ravel()\n tailindexes = np.argwhere(np.abs(adj_prob) > SPLIT).ravel()\n # do centerstuff first\n R[centerindexes] = adj_prob[centerindexes] * adj_prob[centerindexes]\n norm_dev[centerindexes] = adj_prob[centerindexes] * \\\n (((A3 * R[centerindexes] + A2) * R[centerindexes] + A1) * R[centerindexes] + A0)\n norm_dev[centerindexes] = norm_dev[centerindexes] /\\\n ((((B4 * R[centerindexes] + B3) * R[centerindexes] + B2) * R[centerindexes] + B1) * R[centerindexes] + 1.0)\n #find left and right tails\n right = np.argwhere(cum_prob[tailindexes] > 0.5).ravel()\n left = np.argwhere(cum_prob[tailindexes] < 0.5).ravel()\n # do tail stuff\n R[tailindexes] = cum_prob[tailindexes]\n R[tailindexes[right]] = 1 - cum_prob[tailindexes[right]]\n R[tailindexes] = np.sqrt((-1.0) * np.log(R[tailindexes]))\n norm_dev[tailindexes] = ((\n (C3 * R[tailindexes] + C2) * R[tailindexes] + C1) * R[tailindexes] + C0)\n norm_dev[tailindexes] = norm_dev[tailindexes] / (\n (D2 * R[tailindexes] + D1) * R[tailindexes] + 1.0)\n # swap sign on left tail\n norm_dev[tailindexes[left]] = norm_dev[tailindexes[left]] * -1.0\n return norm_dev", "def rmse2 (a, p) :\n s = len(a)\n z = zip(a, p)\n v = 0.0\n for x, y in z :\n v += sqre_diff(x, y)\n return math.sqrt(v / s)", "def RPS(y_true, y_pred) -> float:\n output = 0.\n data_num = len(y_true)\n for i in range(data_num):\n times = len(y_true[i]) - 1 \n cumulative_sum = 0.\n score = 0.\n for time in range(times):\n cumulative_sum += y_true[i,time] - y_pred[i,time]\n score += cumulative_sum ** 2\n score /= times\n output += score\n \n output /= data_num\n return output", "def photon_fraction(r, r1, r2):\n return rotate_phasor(r, r1, r2).real", "def prodi(items: Iterable[float]) -> float:\n p: float = 1\n for n in items:\n p *= n\n return p", "def tirageRnp1CondRn(self, rn):\n\n proba = np.zeros(shape=(3))\n if rn == 0.:\n proba[0] = self.__alpha0/self.__D0\n proba[1] = self.__beta /self.__D0\n \n elif rn == 1.:\n proba[0] = self.__beta /self.__D1\n proba[1] = self.__alpha1/self.__D1\n else:\n Dr1 = 1.5 + rn - rn*rn\n proba[0] = (1.-rn) / Dr1\n proba[1] = rn / Dr1\n proba[2] = 1. - (proba[0]+proba[1])\n \n typeSample = random.choices(population=['0.', '1.', 'F'], weights=proba)[0]\n \n if typeSample != 'F':\n rnp1 = float(typeSample)\n else:\n if rn == 0.:\n rnp1 = self.__rv_pente0.rvs()\n elif rn == 1.:\n rnp1 = self.__rv_pente1.rvs()\n else:\n rnp1 = self.__rv_triangle.rvs(self.__alpha0, self.__alpha1, self.__beta, rn)\n\n return rnp1", "def predict_proba(self):\n if self.rank_prob is None:\n raise ValueError('No results available. Did you already call predict(...)?')\n\n return np.array([sum(map(lambda x: x[1], result)) / len(result) for result in self.rank_prob])", "def Pp(nccd):\n return (128.1-56.9) * (nccd - 1) / (6-1) + 56.9", "def rmse1 (a, p) :\n s = len(a)\n i = 0\n v = 0.0\n while i != s :\n v += sqre_diff(a[i], p[i])\n i += 1\n return math.sqrt(v / s)", "def rmse4 (a, p) :\n s = len(a)\n z = zip(a, p)\n v = sum(map(lambda (x, y) : sqre_diff(x, y), z), 0.0)\n return math.sqrt(v / s)", "def non_param_paired_ci(sample1, sample2, alpha):\n n = len(sample1)\n N = norm.ppf(1 - alpha/2)\n diff_sample = sorted(list(map(operator.sub, sample2, sample1)))\n averages = sorted([(s1+s2)/2 for i, s1 in enumerate(diff_sample)\n for _, s2 in enumerate(diff_sample[i:])])\n k = np.math.ceil(n*(n+1)/4 - (N * (n*(n+1)*(2*n+1)/24)**0.5))\n CI = (round(averages[k-1], 3), round(averages[len(averages)-k], 3))\n return CI", "def calculate_vn_arrays_for_rn_ratios(data):\n pT_boundaries = [0.3, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0]\n npT = 50\n vn_arrays = []\n for ipT in range(len(pT_boundaries)-1):\n pT_low = pT_boundaries[ipT]\n pT_high = pT_boundaries[ipT + 1]\n pT_mid = (pT_low + pT_high)/2.\n vn_array = calcualte_inte_vn(pT_low, pT_high, data)\n vn_array.insert(0, pT_mid)\n vn_arrays.append(vn_array)\n return(vn_arrays)", "def pxrd(self):\n rank = range(len(self.theta2)) #np.argsort(self.theta2)\n PL = []\n last = []\n for i in rank:\n if self.xrd_intensity[i] > 0.01:\n angle = np.degrees(self.theta2[i])\n if PL is None:\n PL.append([angle, self.d_hkl[i], \\\n self.hkl_list[i,0], self.hkl_list[i,1], self.hkl_list[i,2], \\\n self.xrd_intensity[i]])\n elif abs(angle-last) < 1e-2:\n PL[-1][-1] += self.xrd_intensity[i]\n else:\n PL.append([angle, self.d_hkl[i], \\\n self.hkl_list[i,0], self.hkl_list[i,1], self.hkl_list[i,2], \\\n self.xrd_intensity[i]])\n last = angle\n PL = (np.array(PL))\n PL[:,-1] = PL[:,-1]/max(PL[:,-1])\n self.pxrd = PL", "def calc_snp_log_odds(readData): # do not modify this line\n no_snp = 1\n snp = 1\n for item in readData:\n snp = snp * ((item[0]*0.8) + (item[1]*0.2))\n no_snp = no_snp * (item[0])\n\n return (log(0.001) + log(snp))-(log(0.999) + log(no_snp))", "def _precompute_xl(self, p: int) -> List[int]:\n res = [1]\n val = 1\n for _ in range(len(self._s)):\n val = (val * self.X) % p\n res.append(val)\n return res", "def get_pd(self, R):\n assert R.ndim == 3\n n_samples, n_atoms, _ = R.shape\n n_pd = int(n_atoms * ((n_atoms - 1) / 2))\n R_pd = np.zeros((n_samples, n_pd))\n\n for i, r in enumerate(R):\n R_pd[i] = pdist(r)\n\n return R_pd", "def lcs_dp(A, B):\n m = len(A)\n n = len(B) \n # array for storing the intermediate calculations \n temp_arr = [[None]*(n+1) for ]", "def wR(r, rc):\n nr = norm_numba(r)\n return (1 - nr / rc) if nr / rc < 1.0 else 0.0", "def pamhRt(sps, ptype, pparms=[]):\n pt = pampt(int(sps), ptype, pparms)\n hrt = multiply(pt,1/float(np.sum(np.power(pt,2))))\n hrt = hrt[::-1]\n return hrt", "def _calculate_npv(\n net_sequestration_rasters, prices_by_year, discount_rate,\n baseline_year, target_raster_years_and_paths):\n for target_raster_year, target_raster_path in sorted(\n target_raster_years_and_paths.items()):\n\n valuation_factor = 0\n for years_since_baseline, year in enumerate(\n range(baseline_year, target_raster_year)):\n valuation_factor += (\n prices_by_year[year] / (\n (1 + discount_rate) ** years_since_baseline))\n\n def _npv(*sequestration_matrices):\n npv = numpy.empty(sequestration_matrices[0].shape,\n dtype=numpy.float32)\n npv[:] = NODATA_FLOAT32_MIN\n\n matrix_sum = numpy.zeros(npv.shape, dtype=numpy.float32)\n valid_pixels = numpy.ones(npv.shape, dtype=bool)\n for matrix in sequestration_matrices:\n valid_pixels &= ~utils.array_equals_nodata(matrix, NODATA_FLOAT32_MIN)\n matrix_sum[valid_pixels] += matrix[valid_pixels]\n\n npv[valid_pixels] = (\n matrix_sum[valid_pixels] * valuation_factor)\n return npv\n\n raster_path_band_tuples = [\n (path, 1) for (year, path) in net_sequestration_rasters.items() if\n year <= target_raster_year]\n\n pygeoprocessing.raster_calculator(\n raster_path_band_tuples, _npv, target_raster_path,\n gdal.GDT_Float32, NODATA_FLOAT32_MIN)", "def r0(self):\n return self.p[0] / self.p[1]", "def FPI_operator(weight, val_pre, val_new, N_skip=0):\n for i in range(N_skip, size(val_pre)):\n val_new[i] = (1. - weight)*val_pre[i] + weight*val_new[i]\n return 0", "def lrt_2_pval(self, lrt):\n ind = (self.val < lrt)[::-1].argmax()\n pval = 1-self.cumprob[ind]\n return pval", "def PV(rate, nper, pmt, fv):\n if type(pmt) == int:\n pmt = np.array([pmt])\n else:\n pmt = np.array(pmt)\n if nper <= 0:\n print(\"nper needs to be greater than zero.\")\n elif nper != len(pmt) and sum(pmt) != 0:\n print(\"pmt vector length needs to match nper or be zero.\")\n else:\n pv_fv = fv / (1 + rate) ** nper\n fv_pmt = [(pmt[i - 1] / (1 + rate) ** i) for i in np.arange(1, len(pmt) + 1, 1)]\n return(sum(fv_pmt) + pv_fv)", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def genshell(r1, r2, npt, ndim, rstate=None):\n x = rstate.standard_normal(size=(npt, ndim))\n xnorm = x / ((x**2).sum(axis=1)**.5)[:, None]\n # normed vector\n # radii are distributed like R^(ndim-1)\n # cumul (R^ndim-r1^ndim)/(r2^ndim-r1^ndim)=y\n rs = ((r2**ndim - r1**ndim) * rstate.uniform(size=npt) + r1**ndim)**(1. /\n ndim)\n return rs[:, None] * xnorm", "def _check_PSNR(self, dataset, is_test=False):\n\n # process one image per iter for test phase\n if is_test:\n batch_size = 1\n else:\n batch_size = 1 # self.batch_size\n\n dataloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=1)\n\n avr_psnr = 0\n avr_ssim = 0\n\n # book keeping variables for test phase\n psnrs = [] # psnr for each image\n ssims = [] # ssim for each image\n proc_time = [] # processing time\n outputs = [] # output for each image\n names = []\n\n for batch, sample in enumerate(dataloader):\n input_batch, label_batch, name = sample['lr'], sample['hr'], sample['im_name']\n\n # Wrap with torch Variable\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n\n if is_test:\n start = time.time()\n if self.model_name in ['TDAN']:\n output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = forward_x8(input_batch, self.model).unsqueeze(0)\n #print(output_batch.size())\n # _, lrs = self.model(input_batch)\n # output_batch = lrs[:, -1, :, :, :]\n else:\n output_batch = self.model(input_batch)\n elapsed_time = time.time() - start\n else:\n if self.model_name in ['TDAN']:\n #output_batch, _ = self.model(input_batch)\n output_batch = chop_forward(input_batch, self.model, 4)\n else:\n output_batch = self.model(input_batch)\n # ssim is calculated with the normalize (range [0, 1]) image\n ssim = pytorch_ssim.ssim(output_batch + 0.5, label_batch + 0.5, size_average=False)\n ssim = torch.sum(ssim.data)\n avr_ssim += ssim\n\n # calculate PSRN\n output = output_batch.data\n label = label_batch.data\n\n output = (output + 0.5) * 255\n label = (label + 0.5) * 255\n\n output = quantize(output, 255)\n label = quantize(label, 255)\n # diff = input - target\n\n output = output.squeeze(dim=0)\n label = label.squeeze(dim=0)\n\n psnr = self._comput_PSNR(output / 255.0, label / 255.0)\n # print(psnr)\n avr_psnr += psnr\n\n # save psnrs and outputs for statistics and generate image at test time\n if is_test:\n psnrs.append(psnr)\n ssims.append(ssim)\n proc_time.append(elapsed_time)\n np_output = output.cpu().numpy()\n outputs.append(np_output)\n names.append(name)\n\n epoch_size = len(dataset)\n avr_psnr /= epoch_size\n avr_ssim /= epoch_size\n stats = (psnrs, ssims, proc_time)\n\n return avr_psnr, avr_ssim, stats, outputs, names", "def der2_nppow(a: np.ndarray, b: Union[int, float, np.ndarray]) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\n mina = np.min(a)\n if mina <= 0:\n print_stars(\"All elements of a must be positive in der2_nppow!\")\n sys.exit(1)\n\n if isinstance(b, (int, float)):\n b1 = b - 1.0\n a_pow_b = a ** b\n a_pow_b1 = a_pow_b / a\n log_a = np.log(a)\n return b * b1 * a_pow_b1 / a, b * a_pow_b1 * log_a, a_pow_b * log_a * log_a\n else:\n if a.shape != b.shape:\n print_stars(\n \"nppow: b is not a number or an array of the same shape as a!\")\n sys.exit(1)\n avec = a.ravel()\n bvec = b.ravel()\n a_pow_b = avec ** bvec\n a_pow_b1 = a_pow_b / avec\n b1 = bvec - 1.0\n log_avec = nplog(avec)\n der2_wrt_aa = bvec * b1 * a_pow_b1 / avec\n der2_wrt_ab = a_pow_b1 * (1.0 + bvec * log_avec)\n der2_wrt_bb = a_pow_b * log_avec * log_avec\n return der2_wrt_aa.reshape(a.shape), der2_wrt_ab.reshape(a.shape), der2_wrt_bb.reshape(a.shape)" ]
[ "0.67593676", "0.6583941", "0.6571986", "0.6554812", "0.6508913", "0.6467811", "0.6447499", "0.62350047", "0.6199426", "0.6098049", "0.60345674", "0.60345674", "0.5925496", "0.58263636", "0.58061653", "0.58047163", "0.57209736", "0.5715212", "0.56880504", "0.56373477", "0.56356364", "0.56283987", "0.5615351", "0.56080383", "0.5599237", "0.5599237", "0.5554132", "0.5546344", "0.5525891", "0.5510407", "0.54991865", "0.54877865", "0.54728484", "0.5469631", "0.5461569", "0.5452937", "0.5452823", "0.54506886", "0.54477817", "0.5415136", "0.5388864", "0.53775513", "0.53694713", "0.53639084", "0.53623545", "0.5353472", "0.5342497", "0.5323189", "0.53173304", "0.5311389", "0.5311389", "0.530961", "0.53088987", "0.530678", "0.5303516", "0.5291909", "0.528222", "0.5281832", "0.5277298", "0.52727234", "0.52638596", "0.5256279", "0.5241296", "0.52386916", "0.5223207", "0.5213289", "0.5204791", "0.5204452", "0.52000284", "0.51938194", "0.5182709", "0.5180898", "0.5180138", "0.5171253", "0.51683456", "0.5162181", "0.5159597", "0.51551855", "0.5152289", "0.51511616", "0.5149761", "0.51455677", "0.5133425", "0.5132798", "0.5131231", "0.512394", "0.5122225", "0.5115838", "0.5110967", "0.5108398", "0.5105289", "0.50915277", "0.5091073", "0.50901574", "0.5089957", "0.5084489", "0.5080078", "0.50777847", "0.50763226", "0.50690883" ]
0.54374504
39
MSE between two arrays
def mse(y, y_pred, verbose=True): mse_sum = 0 for i in range(len(y)): mse_sum += mean_squared_error(y[i], y_pred[i]) if verbose: print(f"Mean MSE {mse_sum / len(y)}") return mse_sum / len(y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mse(a, b):\n a = numpy(a)\n b = numpy(b)\n return ((a - b) ** 2).sum()", "def rmse_calc(arr1, arr2):\n assert arr1.shape==arr2.shape\n \n return np.sqrt(np.mean((arr2-arr1)**2))", "def MSE(a,b,axis):\n return ((a-b)**2).mean(axis=axis)", "def mse(image1: np.ndarray, image2: np.ndarray) -> np.ndarray:\n return np.sqrt(np.power((image1 - image2), 2).mean(axis=(-1, -2)))", "def mse(A, B):\n return ((A - B) ** 2).mean(axis=0)", "def mse(self, image_a, image_b):\r\n data = numpy.sum((image_a.astype('float') - image_b.astype('float')) ** 2)\r\n data /= float(image_a.shape[0] * image_a.shape[1])\r\n return data", "def mse(x1, x2, axis=0):\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.mean((x1 - x2) ** 2, axis=axis)", "def mse ( target_array ):\n return np.mean ( ( target_array - np.mean ( target_array ) ) ** 2 )\n # End mse()", "def mse(x, y):\n\n return (x - y).pow(2).sum(dim=1, keepdim=True).mean() / x.size(1)", "def mse(self, X, Y):\n\t\treturn mse_k(X, to_1_of_K(Y))", "def mse(img1, img2):\n # TODO: implement this function.", "def negative_mse ( target_array ):\n return -1 * mse ( target_array )\n # End negative_mse()", "def mse (vec1, vec2):\n sum = 0.0 #Initializes sum to 0\n count = len(vec1) #Number of total elements in each vector\n for i in range(count):\n sum += (vec2[i]-vec1[i])**2 #Adds the square of the difference between the values at each position in the two vectors\n return sum/count", "def mse(mat1, mat2):\n\tmse = 0\n\tw, h = mat1.shape\n\tif mat1.shape != mat2.shape:\n\t\treturn -1\n\tprint(\"inside mse\")\n\tprint(mat1)\n\tprint(mat2)\n\tfor i in range(w):\n\t\tfor j in range(h):\n\t\t\tmse += \tpow((int(mat1[i,j]) - int(mat2[i,j])), 2)\n\treturn mse/ (w*h)", "def sse(x, y):\n return sum(se(x, y))", "def MSE(self, imageA, imageB):\n return np.mean((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)", "def d_mse(x, y):\n\n return 2 * (x - y) / x.size(0) / x.size(1)", "def rmse(x: np.ndarray, y: np.ndarray):\n x, y = np.copy(x), np.copy(y)\n if x.ndim > 1:\n return np.sqrt(np.nanmean((x-y)**2, axis=1))\n return np.sqrt(np.nanmean((x-y)**2))", "def mse(image1, image2):\n err = np.sum((image1 - image2) ** 2)\n err /= float(image1.shape[0] * image1.shape[1])\n # return the MSE, the lower the error, the more \"similar\"\n # the two images are\n return err", "def rmse(a, b):\n\n n = len(a)\n return np.linalg.norm(a - b) / np.sqrt(n)", "def mse(self, data, *args, **kwargs):\n return self._mse(np.array(data), *args, **kwargs)", "def calculate_mse(img0, img1):\n mse = skm.mean_squared_error(img0, img1)\n return mse", "def computeMse(data_target, Y):\n if data_target.shape != Y.shape:\n print \"the shapes does not correspond\",\n print data_target.shape,\n print Y.shape\n exit(-1)\n return np.sum(np.square(data_target - Y) / Y.shape[0])", "def SSE(pointsA, pointsB):\n return sum(array(pointsA[:, 0:3]-pointsB[:, 0:3])**2.0)", "def mse(img1, img2):\n err = (np.square(img1 - img2)).mean(axis=None)\n # return the MSE, the lower the error, the more \"similar\"\n # the two images are\n return err", "def rmse(x1, x2, axis=0):\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.sqrt(mse(x1, x2, axis=axis))", "def compute_mse(y, tx, w):\n print(\"y.shape = \", y.shape)\n print(\"tx.shape = \", tx.shape)\n e = y - tx.dot(w)\n print(\"e.shape = \", e.shape)\n mse = e.T.dot(e) / (2 * len(e))\n return mse", "def mse(actual,expected):\n return np.mean(se(actual,expected))", "def mae(self, x_train, y_train):\n # number of training examples\n m = x_train.shape[0]\n error = 0\n for pair, r in zip(x_train, y_train):\n u, i = pair\n error += abs(r - np.dot(self.P[u], self.Q[i]))\n return error / m", "def mse(datax,datay,w):\n return np.mean((datax.dot(w.T)-datay)**2)", "def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:\n return np.mean(np.power(y_true - y_pred, 2))", "def mse(image_a, image_b):\n # Credit Adrian Rosebrock\n # https://www.pyimagesearch.com/2014/09/15/python-compare-two-images/\n err = np.sum((image_a.astype(\"float\") - image_b.astype(\"float\")) ** 2)\n err /= float(image_a.shape[0] * image_a.shape[1])\n return err", "def MSE(y,yhat):\r\n #\r\n y = np.asarray(y)\r\n yhat = np.asarray(yhat)\r\n if y.size != yhat.size:\r\n raise(ValueError(\"y and yhat should be of same size now\\n\\\r\n size(y) = %d and size(yhat) = %d\"%(y.size,yhat.size)))\r\n N = yhat.size\r\n y = y.reshape(N,)\r\n yhat = yhat.reshape(N,)\r\n \r\n res = y - yhat\r\n sse = np.sum(res**2) #sum squared errors\r\n MSE = sse/N\r\n return(MSE)", "def mse(predicted, actual):\n diff = predicted - actual\n return np.average(diff * diff, axis=0)", "def rmse2 (a, p) :\n s = len(a)\n z = zip(a, p)\n v = 0.0\n for x, y in z :\n v += sqre_diff(x, y)\n return math.sqrt(v / s)", "def compute_MSE(e):\n\n return 1/2*np.mean(e**2)", "def calculate_mse(e):\n return 1/2*np.mean(e.dot(e))", "def mse(Y_truth, Y_new):\n Y_new_mean = np.mean(Y_new, axis = 1)[:,None]\n mse_calc = np.mean((Y_new_mean - Y_truth[:,None]) ** 2)\n return mse_calc", "def _mse(self):\n error = self._input * self._weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_/self._input.shape[0]", "def evse(self, data, *args, **kwargs):\n darr = np.array(data)\n d = darr if len(darr.shape) == 1 else darr[0] / darr[1]\n return (d - self.evs(darr, *args, **kwargs))**2", "def mse(result, expected):\n total_square_sum = 0\n for index1 in range(0, len(result)):\n total_square_sum += (result[index1] - expected[index1]) ** 2\n return total_square_sum / float(len(result))", "def compute_mse(y, tx, w):\n e = y-tx@w\n return 1/(2*y.shape[0])*e.transpose()@e", "def rmse(x, y):\n return mse(x, y) ** .5", "def _mse(self, weights):\n error = self._input * weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_ / self._input.shape[0]", "def compute_mse(y, tx, w):\n e = y[:, np.newaxis] - tx @ w\n return (e * e).sum() / (2.0 * len(y))", "def _rmses(A, X, Y):\n return npext.rms(Y - np.dot(A, X), axis=0)", "def sse(matrix,motif):\n return sum([site_error(matrix,site)**2\n for site in motif])", "def mse(response_vector, prediction_vector):\n return np.power(response_vector - prediction_vector, 2).mean()", "def _mse_distance(feature_map_1: np.ndarray,\n feature_map_2: np.ndarray) -> np.ndarray:\n dist = np.mean((feature_map_1 - feature_map_2) ** 2, axis=-1)\n return dist", "def mae(y_true: np.ndarray, y_pred: np.ndarray):\n return np.mean(np.abs(y_true - y_pred))", "def mse(X, Y, W):\n\n # TODO\n mse = np.sum((X@W-Y)**2)/(2*X.shape[0])\n # END TODO\n\n return mse", "def err_rmse(x1, x2, axis=0):\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.sqrt(err_mse(x1, x2, axis=axis))", "def rmse(y_preds: ndarray, y_actual: ndarray) -> float:\n\n return np.sqrt(np.mean(np.power(y_preds - y_actual, 2)))", "def mse(y, yhat):\n return 0.5 * jnp.mean((y - yhat)**2)", "def error_MSE(resid):\n if resid.ndim == 2:\n return (norm(np.asarray(resid).ravel())**2)/float(resid.shape[1])\n elif resid.ndim == 1:\n return (norm(np.asarray(resid).ravel())**2)\n else:\n raise Exception(\"array passed to error_MSE has incorrect shape\")", "def calculate_mse(e):\r\n return 1/2*np.mean(e**2)", "def msre(x1, x2, axis=0):\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.mean((((x1 - x2) ** 2) / x1), axis=axis)", "def MSE(X, y, w):\r\n n = X.shape[0]\r\n f = X @ w\r\n J = np.sum(np.power((y - f), 2)) / n\r\n return J", "def compute_mse(theta_0, theta_1, data):\n\n mse=0\n sum=0\n quad=0\n i=0\n\n while(i < len(data)):\n h = theta_0 + (theta_1*data[i][0]) \n aux = h - data[i][1]\n quad = aux**2\n sum = sum + quad\n i = i + 1\n\n mse = sum/len(data)\n\n return mse", "def mse(targets: List[float], preds: List[float]) -> float:\n return mean_squared_error(targets, preds)", "def mse(self):\n xs, ys = self.R.nonzero()\n predicted = self.full_matrix()\n error = 0\n for x, y in zip(xs, ys):\n # print(predicted[x, y], self.R[x, y] )\n error += pow(self.R[x, y] - predicted[x, y], 2)\n return np.sqrt(error)", "def _mse2(self, trace, **inputs):\n exp = np.dot(inputs['gwas_gen'],\n trace['beta_med'].mean(axis=0).T)\n phen_pred = exp * trace['alpha'].mean()\n mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n return mse", "def MSE(actual, noisy):\n mean_squared_error(actual, noisy)", "def multi_mse(true, pred):\n try:\n pred = [t.mean() for t in pred]\n except:\n pass\n\n pred = np.array([p.numpy() for p in pred]).squeeze()\n return tf.keras.metrics.mean_squared_error(true.to_numpy().T, pred)", "def rmse(a,b):\n \n ### Import modules\n import numpy as np\n \n ### Calculate RMSE\n rmse_stat = np.sqrt(np.mean((a - b)**2))\n \n return rmse_stat", "def mse(gt, pred):\n return np.mean((gt - pred) ** 2)", "def mse(gt, pred):\n return np.mean((gt - pred) ** 2)", "def rmse(y_true: np.ndarray, y_pred: np.ndarray):\n return np.sqrt(np.mean(np.power(y_true - y_pred, 2)))", "def RMSE(observed: np.ndarray, targets: np.ndarray) -> np.float64:\n\treturn np.sqrt(np.mean((observed-targets)**2))", "def eeg_diss_t(array1,array2):\t\n\t# first, create scaled array (i.e. for each time-point, divide the value by its instantaneous rms value to get unitary strength)\n\tv1 = array1/eeg_rms(array1)\n\tv2 = array2/eeg_rms(array2)\t\n\tdiss = np.sqrt(np.mean((v1-v2)**2,axis=0))\n\treturn diss", "def inner(self, a: np.ndarray, b: np.ndarray) -> float:\n return a.T @ (self.mass @ b)", "def rmsd(array1, array2):\n total = 0\n for n1, n2 in zip(array1, array2):\n total += (n1 - n2) ** 2\n total /= len(array1)\n\n return math.sqrt(total)", "def _mse(self, trace, **inputs):\n phen_mse = []\n for idx in np.random.randint(0, len(trace), 500):\n step = self.trace[idx]\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n mean_mse = np.mean(phen_mse)\n return mean_mse", "def mse(observed, predicted):\n return np.sqrt(np.mean((observed - predicted)**2))", "def rmse(y_true, y_pred): # -> Any:\n ...", "def rmse(actual: np.ndarray, predicted: np.ndarray):\n return np.sqrt(np.mean(np.square(_error(actual, predicted))))", "def SMAPE(y_true, y_pred):\n return smape(y_true, y_pred) / 2", "def _calc_msve(self):\n v = []\n for state in self._env.state_iterator():\n feature_vector = self._features.vector(state)\n v.append(utils.state_value(feature_vector, self.theta))\n\n self.msve.append(utils.rmse(v, self._true_values))", "def mse_g(datax,datay,w):\n n,d=datax.shape\n return (2*datax.T).dot((datax.dot((w.T)-datay)))\n #retrouner les adjustement des poids:donc taille d*1", "def calculate_mse(in_img, out_img):\n error = 0\n for x in range(in_img.shape[0]):\n for y in range(in_img.shape[1]):\n error += (in_img[x, y] - out_img[x, y]) ** 2\n return error / (in_img.shape[0] * in_img.shape[1])", "def mse(self, x_tensors=None):\n\n return self.se(x_tensors)/self.y.size", "def rmse(X, Y):\n\n assert X.shape == Y.shape\n\n N = X.shape[0]\n\n if N < 9:\n print(\"Not enough points. {} datapoints given. At least 9 is required\".format(N))\n return\n\n diff = X - Y\n diff = diff**2\n rmse = np.sqrt(diff.mean())\n\n le = rmse * (1.0 - np.sqrt(1-1.96*np.sqrt(2.0)/np.sqrt(N-1)))\n ue = rmse * (np.sqrt(1 + 1.96*np.sqrt(2.0)/np.sqrt(N-1))-1)\n\n return rmse, le, ue", "def nmse(actual: np.ndarray, predicted: np.ndarray):\n return np.mean(np.square(actual - predicted)) / (np.mean(actual) * np.mean(predicted) + np.finfo(float).eps)", "def mse_k(self, X, Y):\n\t\treturn np.power(Y - self.predict_soft(X), 2).sum(1).mean(0)", "def _RMSE2(phen, dstack, dates, nan_replace, nGS):\n # original data - dstack\n xarray = xr.DataArray(dstack)\n xarray.coords['dim_0'] = dates.dt.dayofyear\n # sort basds according to day-of-the-year\n xarray = xarray.sortby('dim_0')\n if nan_replace is not None:\n xarray = xarray.where(xarray.values != nan_replace)\n # xarray.values = np.apply_along_axis(_fillNaN, 0, xarray.values)\n x = xarray.dim_0.values\n y = xarray.values\n\n xnew = np.linspace(np.min(x), np.max(x), nGS, dtype='int16')\n # change shape from 3D to 2D matrix\n y2 = y.reshape(y.shape[0], (y.shape[1] * y.shape[2]))\n ynew = phen.reshape(phen.shape[0], (y.shape[1] * y.shape[2]))\n\n rmse = np.zeros((y.shape[1] * y.shape[2]))\n for i in tqdm(range(y.shape[1] * y.shape[2])):\n # print(i)\n rmse[i] = _RMSE(x, y2[:, i], xnew, ynew[:, i])\n\n # reshape from 2D to 3D\n return rmse.reshape(1, phen.shape[1], y.shape[2])", "def rmse(y_hat, y):\n\tif type(y) == list:\n\t\tpass\n\telse:\n\t\ty = y.values.tolist()\n\tm = len(y)\n\tsum = 0\n\tfor i in range(m):\n\t\tsum += ((y_hat[i] - y[i]) ** 2 / m)\n\terror = np.sqrt(sum)\n\treturn error", "def es(d1, d2, verbose=False):\n\n d1 = assure_numpy_array(d1)\n d2 = assure_numpy_array(d2)\n\n es, pvalue = stats.epps_singleton_2samp(d1, d2)\n\n return es, pvalue", "def MSE(ratings, range):\n\n def squared_err(pair):\n (r, rP) = pair\n return (r-rP)**2\n\n return (1/len(ratings)) * sum(map(squared_err, ratings))", "def rmse4 (a, p) :\n s = len(a)\n z = zip(a, p)\n v = sum(map(lambda (x, y) : sqre_diff(x, y), z), 0.0)\n return math.sqrt(v / s)", "def _RMSE(x, y, xnew, ynew):\n\n inds = np.isnan(ynew) # check if array has NaN values\n inds2 = np.isnan(y)\n if inds.any(): # check is all values are NaN\n return np.nan\n else:\n if inds2.any():\n y = _fillNaN(y)\n ypred2 = np.interp(x, xnew, ynew)\n\n return np.sqrt(mean_squared_error(ypred2, y))", "def calc_mse(data, ax=0):\n return ((data[:, 0] - data[:, 1]) ** 2).mean(axis=ax)", "def mse_loss(self, x, y):\n loss = tf.reduce_mean(tf.square(x - y))\n return loss", "def batch_mse_torch(estimation, origin):\n mse1 = torch.sqrt(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])\n mse2 = torch.sqrt(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])\n return torch.stack((mse1, mse2),1).min(1)[0]", "def compute_mse(y, tx, w):\n e = y - tx@w\n mse = e.T.dot(e) /(2*len(e))\n return mse", "def mse(o, r):\r\n\r\n return np.mean(np.square((np.abs(o).astype(float) - np.abs(r).astype(float))))", "def calc_rmse(data1, data2):\n num_users = len(data1)\n\n SE = 0 #the accumulated Squared Error\n num_total = 0 #the accumulated number of ratings evaluated\n for i in range(num_users):\n data1_dict = dict(data1[i])\n for movie, rating2 in data2[i]:\n #Make one of the datasets into a dictionary to make the search more efficient\n rating1 = data1_dict.get(movie, -1)\n SE += (rating1-rating2)**2\n num_total += 1\n\n if rating1 == -1:\n print('Could not find rating for movie %i at user %i in data1'%(movie, i))\n rmse = np.sqrt(SE/num_total)\n return rmse", "def seToSE( x ):\n x = asarray(x,dtype=float)\n if x.shape != (6,):\n raise ValueError(\"shape must be (6,); got %s\" % str(x.shape))\n #\n return expM(screw(x))", "def rmse(true, predictions):\n true = np.array(true)\n predictions = np.array(predictions)\n return mean_squared_error(true, predictions) ** 0.5", "def se(actual,expected):\n return np.power(np.subtract(actual,expected),2)", "def compute_mse(y, tx, w):\n e = y - tx.dot(w)\n mse = e.dot(e) / (2 * len(e))\n return mse", "def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum" ]
[ "0.7542618", "0.7358524", "0.71128005", "0.7101528", "0.7070503", "0.70539194", "0.70428056", "0.7029061", "0.70196426", "0.6944558", "0.6933053", "0.68064827", "0.67791283", "0.67727405", "0.67626035", "0.67504066", "0.6732543", "0.67134595", "0.6677729", "0.65477544", "0.6540024", "0.65312886", "0.6494742", "0.647205", "0.64575917", "0.6448081", "0.6442453", "0.64346504", "0.6373121", "0.63409764", "0.6311388", "0.6286937", "0.6268435", "0.6262911", "0.62475204", "0.6246847", "0.6241172", "0.62371665", "0.6235957", "0.6207015", "0.61829513", "0.6168113", "0.6167312", "0.6161691", "0.6155055", "0.6146976", "0.61378515", "0.6131291", "0.61266136", "0.61234754", "0.61002", "0.6097526", "0.60884076", "0.60776216", "0.60710496", "0.60680026", "0.6061198", "0.60608155", "0.6058712", "0.60446", "0.60404336", "0.60041565", "0.60018414", "0.5986405", "0.5977825", "0.5961908", "0.5961908", "0.5942561", "0.5934924", "0.5928533", "0.591032", "0.59046334", "0.59030765", "0.5896663", "0.5895796", "0.58899933", "0.5856721", "0.58528084", "0.58493775", "0.5848716", "0.58346295", "0.58315736", "0.5824369", "0.58103424", "0.57983696", "0.57931346", "0.5787641", "0.5787304", "0.5769008", "0.57661957", "0.5761749", "0.57600445", "0.5757978", "0.5748516", "0.5747093", "0.5743082", "0.5743081", "0.5738399", "0.5736687", "0.5734309", "0.5724279" ]
0.0
-1
SSIM between two arrays of images
def ssim(y, y_pred, verbose=True): ssim_sum = 0 for i in range(len(y)): ssim_sum += structural_similarity(y[i], y_pred[i]) if verbose: print(f"Mean SSIM {ssim_sum / len(y)}") return ssim_sum / len(y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform_images(img1,img2):", "def computeSSIM(img1, img2, pad_y=0, pad_x=0):\n if pad_y != 0 and pad_x != 0:\n img1_u = (np.clip(img1, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)[pad_y:-pad_y, pad_x:-pad_x, ...]).astype(dtype=np.uint8)\n else:\n img1_u = (np.clip(img1, 0, 255.0)).astype(dtype=np.uint8)\n img2_u = (np.clip(img2, 0, 255.0)).astype(dtype=np.uint8)\n return ssim(img1_u, img2_u)", "def get_ssim(img1, img2):\r\n img1 = cv2.cvtColor(numpy.array(img1), cv2.COLOR_GRAY2BGR)\r\n img2 = cv2.cvtColor(numpy.array(img2), cv2.COLOR_GRAY2BGR)\r\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\r\n img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\r\n s_value = ssim(img1, img2)\r\n return s_value", "def _execute_with_array_two_images(self, image1, image2):\n\t\tpil_image1 = [PIL.Image.fromarray((image1*255.0).astype('uint8'))]\n\t\tpil_image2 = [PIL.Image.fromarray((image2*255.0).astype('uint8'))]\n\t\tprint(pil_image1)\n\t\tprint(pil_image2)\n\t\tfor operation in self.operations:\n\t\t\tr = np.round(random.uniform(0, 1), 1)\n\t\t\tif r <= operation.probability:\n\t\t\t\tnew_seed = random.random()\n\t\t\t\trandom.seed(new_seed)\n\t\t\t\tpil_image1 = operation.perform_operation(pil_image1)\n\t\t\t\trandom.seed(new_seed)\n\t\t\t\tpil_image2 = operation.perform_operation(pil_image2)\n\n\t\t# numpy_array1 = np.asarray(pil_image1).astype('float32')/255.0\n\t\t# numpy_array2 = np.asarray(pil_image2).astype('float32')/255.0\n\t\tnumpy_array1 = np.array(pil_image1[0]).astype(np.float32)\n\t\tnumpy_array2 = np.array(pil_image2[0]).astype(np.float32)\n\n\t\treturn numpy_array1,numpy_array2", "def compare_images(img1, img2):\n #normalize scene pixel values\n img1_mean = img1.mean() \n img1_std = img1.std()\n for i in np.nditer(img1, op_flags=['readwrite']):\n i[...] = (i-img1_mean)/img1_std\n\n #normalize template pixel values\n img2_mean = img2.mean() \n img2_std = img2.std()\n for i in np.nditer(img2, op_flags=['readwrite']):\n i[...] = (i-img2_mean)/img2_std\n\n #sums error\n error_array = img1 - img2\n error_array = error_array.astype(np.int8)\n ss_error = 0\n for i in np.nditer(error_array):\n ss_error += abs(i/255.0)**0.5\n #print ss_error\n return ss_error", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def mse(img1, img2):\n # TODO: implement this function.", "def transform(self, images):\n return np.array([self.transform_single(i) for i in images])", "def processImage(imgs):\r\n imgs = imgs.astype(np.float32)\r\n for i, img in enumerate(imgs):\r\n m = img.mean()\r\n s = img.std()\r\n imgs[i] = (img - m) / s\r\n return imgs", "def normalise(image):", "def concat_images_vert(imga, imgb):\n ha,wa = imga.shape[:2]\n hb,wb = imgb.shape[:2]\n max_width = np.max([wa, wb])\n total_height = ha+hb\n new_img = np.zeros(shape=(total_height, max_width, 3), dtype=np.uint8)\n new_img[:ha,:wa]=imga\n #new_img[:hb,wa:wa+wb]=imgb\n new_img[ha:ha+hb,:wb]=imgb\n return new_img", "def norm_and_stack(images):\n imagestack = np.dstack(tuple([cv2.imread(image, cv2.IMREAD_UNCHANGED) for image in images]))\n mean = np.mean(imagestack)\n std = np.std(imagestack)\n new_im = (imagestack - mean)/std \n \n return new_im, mean, std", "def appendimages(im1, im2):\n row1 = im1.shape[0]\n row2 = im2.shape[0]\n\n if row1 < row2:\n im1 = concatenate((im1, zeros((row2 - row1, im1.shape[1]))), axis=0)\n elif row1 > row2:\n im2 = concatenate((im2, zeros((row1 - row2, im2.shape[1]))), axis=0)\n\n return concatenate((im1, im2), axis=1)", "def compare_images(self):\r\n m = round(self.mse(self.image_a, self.image_b), 4)\r\n s = round(ssim(self.image_a, self.image_b) * 100, 5)\r\n return (\r\n m, s)", "def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk", "def calculate_ssim(img0, img1, data_range=None):\n ssim = skm.structural_similarity(img0, img1, data_range=data_range)\n return ssim", "def mold_image(images, config):\n return images.astype(np.float32) - config.MEAN_PIXEL", "def mold_image(images, config):\n return images.astype(np.float32) - config.MEAN_PIXEL", "def joinImages(imgs):\n d = imgs.shape[0]\n h, w = imgs.shape[1], imgs.shape[2]\n colour = imgs.shape[3]\n img = np.zeros((h, w * d, colour))\n for idx, image in enumerate(imgs):\n i = idx\n img[0:h, i * w:i * w + w, :] = image\n return ((img * 255.) + 1) * 2", "def test_separate_ims():\n\n df1, df2 = setup()\n\n # Test 1\n im = separate_ims(df1)\n size = df1['imdims'][0]\n assert im.size == (size[0]*2, size[1])\n\n # Test 2\n im = separate_ims(df2)\n size = df2['imdims'][0]\n assert im.size == (size[0], size[1])", "def set_images(self, first, second):\n self.first = remove_extrema(first.T)\n self.second = remove_extrema(np.flipud(second.T))\n\n if self.first.shape != self.second.shape:\n LOG.warn(\"Shape {} of {} is different to {} of {}\".\n format(self.first.shape, self.first, self.second.shape, self.second))\n\n self.slider.setRange(0, self.first.shape[0])\n self.slider.setSliderPosition(self.first.shape[0] / 2)\n self.update_image()", "def concat_images(imga, imgb):\n ha, wa = imga.shape[:2]\n hb, wb = imgb.shape[:2]\n max_height = np.max([ha, hb])\n total_width = wa + wb\n\n new_img = np.zeros(shape=(max_height, total_width))\n new_img -= 1\n\n new_img[:ha, :wa] = imga\n new_img[:hb, wa:wa + wb] = imgb\n\n return new_img", "def AppendImages(im1, im2):\r\n im1cols, im1rows = im1.size\r\n im2cols, im2rows = im2.size\r\n im3 = Image.new('RGB', (im1cols+im2cols, max(im1rows,im2rows)))\r\n im3.paste(im1,(0,0))\r\n im3.paste(im2,(im1cols,0))\r\n return im3", "def single_channel_stacking_unlimited(tifs):\n results=[]\n \n for i in range(len(tifs)-1):\n r1=gdal_array.LoadFile(tifs[i])\n r2=gdal_array.LoadFile(tifs[i+1])\n print(tifs[i])\n print(tifs[i+1])\n result=ird.similarity(r1,r2 , numiter=1, order=1)\n print(result['tvec'])\n print(result['scale'])\n print(result['angle'])\n results.append(result)\n \n \n print(i)\n \n x0y0=(0,0)\n x_max_y_max=(r1.shape[1], r1.shape[0])\n cords=np.array([[x0y0[0], x_max_y_max[0],x0y0[1], x_max_y_max[1] ]])\n plt.scatter((cords[0,0], cords[0, 1]), (cords[0,2], cords[0, 3]))\n\n \n\n for i in range(len(tifs)-1):\n \n print(i)\n scale=0\n tvec_x=0\n tvec_y=0\n angle=0\n x0y0=(0,0)\n x_max_y_max=(r1.shape[1], r1.shape[0])\n \n for j in range(i+1):\n print(j)\n result=results[j]\n scale=result['scale']\n tvec_x=tvec_x+result['tvec'][1]\n tvec_y=tvec_y+result['tvec'][0]\n angle=angle+result['angle']\n M=Affine.translation(tvec_x,tvec_y )*Affine.scale(scale)*Affine.rotation(angle)\n print(M)\n x0y0=M*x0y0\n x_max_y_max=M*x_max_y_max\n \n cords=np.append(cords, [[x0y0[0], x_max_y_max[0],x0y0[1], x_max_y_max[1]]], axis=0)\n print(x0y0)\n print(x_max_y_max)\n \n plt.scatter((cords[i+1,0], cords[i+1, 1]), (cords[i+1,2], cords[i+1, 3]))\n \n \n xmin=np.min(cords[:,0:2])\n xmax=np.max(cords[:,0:2])\n ymin=np.min(cords[:,2:])\n ymax=np.max(cords[:,2:])\n \n print(cords)\n cords[:,0:2]=cords[:,0:2]-xmin\n cords[:,2:]=cords[:,2:]-ymin\n \n print(xmin,xmax, ymin,ymax)\n print(cords)\n \n \n \n final_array_shape=(int(np.abs(ymin-ymax)), int(np.abs(xmin-xmax)))\n print(final_array_shape)\n \n raster=np.zeros(final_array_shape)\n avg_raster=np.zeros(final_array_shape)\n \n # Mzero=Affine.translation(int(cords[0,0])+1, int(cords[0,2])+1)\n tif_raster=gdal_array.LoadFile(tifs[0])\n ones_raster=np.full_like(tif_raster, 1) # ones_raster=np.full(tif_raster.shape, 1)\n \n pad_raster=np.zeros_like(raster)\n pad_raster[0:tif_raster.shape[0],0:tif_raster.shape[1]]=tif_raster\n pad_ones_raster=np.zeros_like(raster)\n pad_ones_raster[0:tif_raster.shape[0],0:tif_raster.shape[1]]=ones_raster\n \n \n pad_raster=ird.transform_img(pad_raster,tvec=(int(cords[0,2]),int(cords[0,0])), bgval=0)\n pad_raster=cut_transformed_array_borders(pad_raster)\n pad_ones_raster=ird.transform_img(pad_ones_raster,tvec=(int(cords[0,2]),int(cords[0,0])), bgval=0)\n pad_ones_raster=cut_transformed_array_borders(pad_ones_raster)\n # ones_raster=ird.transform_img(pad_raster,tvec=(int(cords[0,2])+1,int(cords[0,0])+1))\n # where_ones=np.where(pad_raster>0)\n # ones_raster[where_ones]=1\n raster=raster+pad_raster\n avg_raster=avg_raster+pad_ones_raster\n \n # for i in range(zero_raster.shape[0]):\n # for j in range(zero_raster.shape[1]):\n # xy=(j,i)\n # new_xy=Mzero*xy\n # new_xy=[new_xy[0], new_xy[1]]\n # new_xy[0]=int(new_xy[0])\n # new_xy[1]=int(new_xy[1])\n \n # raster[new_xy[1], new_xy[0]]=zero_raster[i,j]\n # avg_raster[new_xy[1], new_xy[0]]=avg_raster[new_xy[1], new_xy[0]]+1\n \n for k,tif in enumerate(tifs, start=1):\n print(tif)\n if k==1:\n continue\n scale=1\n tvec_x=0\n tvec_y=0\n angle=0\n \n for r in range(k-1):\n result=results[r]\n scale=scale*result['scale']\n tvec_x=tvec_x+result['tvec'][1]\n tvec_y=tvec_y+result['tvec'][0]\n angle=angle+result['angle']\n tvec_x=tvec_x-xmin\n tvec_y=tvec_y-ymin\n M=Affine.translation(tvec_x,tvec_y )*Affine.scale(scale)*Affine.rotation(angle)\n print(M)\n x0y0=M*x0y0\n x_max_y_max=M*x_max_y_max\n \n tif_raster=gdal_array.LoadFile(tif)\n \n \n # for i in tqdm(range(tif_raster.shape[0]), desc=\"transforming: \"+tif):\n # for j in range(tif_raster.shape[1]):\n # xy=(j,i)\n # new_xy=M*xy\n # new_xy=[new_xy[0], new_xy[1]]\n # new_xy[0]=int(new_xy[0])\n # new_xy[1]=int(new_xy[1])\n \n # raster[new_xy[1], new_xy[0]]=raster[new_xy[1], new_xy[0]]+tif_raster[i,j]\n # avg_raster[new_xy[1], new_xy[0]]=avg_raster[new_xy[1], new_xy[0]]+1\n \n \n pad_raster=np.zeros_like(raster)\n pad_raster[0:tif_raster.shape[0],0:tif_raster.shape[1]]=tif_raster\n ones_raster=np.full_like(tif_raster, 1)\n pad_ones_raster=np.zeros_like(raster)\n pad_ones_raster[0:tif_raster.shape[0],0:tif_raster.shape[1]]=ones_raster\n \n \n \n pad_raster=ird.transform_img(pad_raster,scale=scale, angle=angle, tvec=(tvec_y, tvec_x), mode='constant', bgval=0)\n pad_ones_raster=ird.transform_img(pad_ones_raster,scale=scale, angle=angle, tvec=(tvec_y, tvec_x), mode='constant', bgval=0)\n # ones_raster=ird.transform_img(pad_raster,tvec=(int(cords[0,2])+1,int(cords[0,0])+1))\n # where_ones=np.where(pad_raster>0)\n # ones_raster[where_ones]=1\n raster=raster+pad_raster\n # avg_raster=avg_raster+ones_raster\n avg_raster=avg_raster+pad_ones_raster\n \n # left_border=xmin\n # upper_border=ymax \n # print(raster.shape)\n\n\n \n\n\n\n\n\n plt.show()\n plt.close() \n \n gtz=np.where(avg_raster>0)\n \n raster[gtz]=raster[gtz]/avg_raster[gtz]\n basename=os.path.basename(tif)\n gdal_array.SaveArray(raster, os.path.dirname(os.path.abspath(tif))+\"/stacked/\"+basename[:-16]+\"_py_corr_stackeg_big_.tif\")\n \n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(15,10)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n \n \n plt.imshow(avg_raster)\n plt.show()\n plt.close()\n \n plt.imshow(raster)\n plt.show()\n plt.close()", "def data_augmentation_and_vectorization(self,imlist, lb,im_labels, average_image = None):\n\t\tX,Y,X_original = [] ,[], []\n\n\t\ti = 0\n\t\tfor im in imlist:\n\t\t\tim=Image.fromarray(im,mode=self.mode)\n\t\t\t#try:\n\t\t\t#im_ini = im\n\t\t\tim_original = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t#im = self.substract_average_image(im, average_image)\n\t\t\t#print 'i:{} is a: {}' .format(i,im_labels[i])\n\t\t\t#im.show()\n\t\t\tX_original.append(im_original)\n\n\t\t\t#Rotations \n\t\t\t#im_r = im.rotate(15)\n\t\t\t# im_r_2 = im.rotate(-15)\n\t\t\t# im_r_3 = im.rotate(180)\n\t\t\t#im_r.show()\n\t\t\t#im_r_2.show()\n\n\t\t\t#Filters\n\t\t\t#im_f = im_ini.filter(ImageFilter.DETAIL)\n\t\t\t#im_f = im.filter(ImageFilter.FIND_EDGES)\n\t\t\t\n\t\t\tif self.mode == 'RGB':\n\t\t\t\tim = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t\t#Uncomment this if you want to use cross-correlate for 2D arrays http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.correlate2d.html\n\t\t\t\t# im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\t# im = sp.inner(im, [299, 587, 114]) / 1000.0\n\t\t\t\t# im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\t# # normalize per http://en.wikipedia.org/wiki/Cross-correlation\n\t\t\t\t# im = (im - im.mean()) / im.std()\n\n\t\t\tif self.mode == 'L':\n\t\t\t\t# im = np.asarray(im, dtype='float64')\n\t\t\t\t# im = filters.sobel(im)\n\t\t\t\t#im = filters.roberts(im)\n\t\t\t\tim = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\n\t\t\t#im = np.asarray(im, dtype=np.uint8)\n\t\t\t#print im.shape\n\t\t\t#print im.shape\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t#im = self.flaten_aux(im)\n\t\t\t#print im.shape\n\t\t\t#im = data.coins() # or any NumPy arr\n\t\t\t#print im.shape\n\t\t\t#image = data.coins() # or any NumPy array!\n\t\t\t#print im\n\t\t\t#im = filter.sobel(im)\n\t\t\t#im = filter.roberts(im)\n\n\t\t\t# im_original = sp.inner(im, [299, 587, 114]) / 1000.0\n\t\t\t# im_original = np.asarray(im_original, dtype=theano.config.floatX)\n\t\t\t# # normalize per http://en.wikipedia.org/wiki/Cross-correlation\n\t\t\t# im = (im_original - im_original.mean()) / im_original.std()\n\t\t\t#print im.shape\n\t\t\t#print edges\n\t\t\t# edges = np.asarray(edges, dtype=np.uint8)\n\t\t\t#Image.fromarray(edges,mode=self.mode).show()\n\n\t\t\t#print edges\n\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX) / 256.\n\n\t\t\t#print edges.shape\n\t\t\t# io.imshow(im)\n\t\t\t# io.show()\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\n\t\t\t# plt.suptitle(im_labels[i], size=16)\n\t\t\t# plt.imshow(im, cmap=plt.cm.gray, interpolation='nearest')\n\t\t\t# plt.show()\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t#print im.shape\n\t\t\t#self.reconstructImage(im).show()\n\n\t\t\t#im_r = np.asarray(im_r, dtype=theano.config.floatX) / 256.\n\t\t\t# im_r_2 = np.asarray(im_r_2, dtype=theano.config.floatX) / 256.\n\t\t\t# im_r_3 = np.asarray(im_r_3, dtype=theano.config.floatX) / 256.\n\t\t\t#im_f = np.asarray(im_f, dtype=theano.config.floatX) / 256.\n\t\t\t\n\t\t\t#im = im.transpose(2, 0, 1)\n\t\t\t#X.append(np.array(im, dtype=theano.config.floatX))\n\t\t\t#X.append(np.array(im_raw, dtype=theano.config.floatX))\n\t\t\t#X.append(im)\n\t\t\tX.append(im)\n\t\t\t# if i % 100 == 0:\n\t\t\t# \tX.append(im)\n\t\t\t#X.append(im_r)\n\t\t\t# X.append(im_r_2)\n\t\t\t# X.append(im_r_3)\n\t\t\t#X.append(im_f)\n\t\t\t#X_original.append(im)\n\n\t\t\t# X.append(np.array(im_r, dtype=theano.config.floatX))\n\t\t\t# X.append(np.array(im_r_2, dtype=theano.config.floatX))\n\n\t\t\t#Uncomment this if you want to work with monochrome\n\t\t\t# im = im.convert('L')\n\t\t\t# pixels_monochrome = np.array(list(im.getdata()), dtype=np.float)\n\t\t\t\t\t\t\n\t\t\t# # scale between 0-1 to speed up computations\n\t\t\t# min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0,1), copy=True)\n\t\t\t# pixels_monochrome = min_max_scaler.fit_transform(pixels_monochrome)\n\n\t\t\t# X.append(pixels_monochrome)\n\n\t\t\t#Y.append(lb.transform([im_labels[i]])[0][0])\n\t\t\t#print lb.transform([im_labels[i]])\n\t\t\t\n\t\t\tlabel = lb.transform([im_labels[i]])[0][0]\n\t\t\t#print lb.transform([im_labels[i]])\n\t\t\t# label_vector = lb.transform([im_labels[i]])[0]\n\t\t\t# label = np.where( label_vector == 1 )[0][0]\n\t\t\t# print \"Label: {}\".format(label)\n\t\t\t#print label\n\t\t\t#Y.append(label)\n\t\t\tY.append(label)\n\t\t\t#Y.append(im_labels[i])\t\n\n\t\t\t\n\t\t\t#Y.append(label)\t\n\t\t\t# Y.append(label)\t\n\t\t\t# except Exception, e:\n\t\t\t# \tprint e\n\t\t\t# \t#raise e\n\n\t\t\t# if i == 30:\n\t\t\t# \tbreak\n\n\t\t\ti += 1\n\t\t\tif self.verbose:\n\t\t\t\tsys.stdout.write(\"\\r Process: {0}/{1}\".format(i, len(imlist)))\n\t\t\t\tsys.stdout.flush()\n\t\t\n\t\t# output = open(self.data_path + 'X_original.pkl', 'wb')\n\t\t# cPickle.dump(X_original, output,protocol=-1)\n\t\t# output.close()\n\n\t\treturn X,Y", "def similarImages2(self,X,start_i,stop_i,cpu_index):\n\n\t\tduplicated_items = []\n\t\titems_list = []\n\t\t# print \"this is going to iterate for:{}\".format(len(xrange(start_i,stop_i)))\n\t\tfor i in xrange(start_i,stop_i):\n\t\t\ta = X[i]\n\n\t\t\t#print a\n\t\t\t# We just need to fill the upper diagonal\n\t\t\t# Remember to uncomment this if you are not using the autoencoder\n\t\t\t#for j in xrange(i,len(X)):\n\t\t\t#print \"this is going to iterate for:{}\".format(len(xrange(i,len(X))))\n\t\t\tfor j in xrange(i,len(X)):\n\t\t\t\taux = {'item_id_x' : None , 'item_id_y' : None , 'distance' : None}\n\t\t\t\tb = X[j]\n\t\t\t\t#uncomment this if you want to use the cosine distance\n\t\t\t\td = self.cosine_distance(a,b)\n\t\t\t\t#Uncomment this if you want to use cross-correlate for 2D arrays http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.correlate2d.html\n\t\t\t\t# c = c2d(a, b, mode='same')\n\t\t\t\t# d = c.max()\n\t\t\t\t#Uncomment this for euclidean distance\n\t\t\t\t# d = dist.euclidean(a, b)\n\t\t\t\t#d = dist.cityblock(a, b)\n\n\t\t\t\t# Uncomment this if you want to use the euclidean distance\n\t\t\t\t# if d == 0 and i != j:\n\t\t\t\t# \tduplicated_items.append(self.im_index[j])\n\n\t\t\t\t# if i == j or d == 0:\n\t\t\t\t# \td = -np.inf\n\n\t\t\t\t# Uncomment this if you are going to use the cosine distance\n\t\t\t\tif d == 1 and i != j:\n\t\t\t\t\t#print \"Im1 {} Im2 {}\".format(self.im_index[i], self.im_index[j])\n\t\t\t\t\tduplicated_items.append(self.im_index[j])\n\n\t\t\t\tif i == j or d == 1:\n\t\t\t\t\td = -np.inf\n\n\t\t\t\taux['item_id_x'] = self.im_index[i]\n\t\t\t\taux['item_id_y'] = self.im_index[j]\n\t\t\t\taux['distance'] = d\n\t\t\t\titems_list.append(aux)\n\t\t#print len(duplicated_items)\n\t\tself.appendToCSV(items_list,addHeader=False,file_name='_data_'+str(cpu_index)+'.csv',duplicates=False)\n\t\t\n\t\tprint \"cpu_index:{} num_iterations:{}\".format(cpu_index,len(items_list))\n\n\t\tif len(duplicated_items) > 0:\n\t\t\tself.appendToCSV(duplicated_items,addHeader=False,file_name='_duplicated_items_'+str(cpu_index)+'.csv',duplicates=True)", "def immerge(images, row, col):\n\n h, w = images.shape[1], images.shape[2]\n if images.ndim == 4:\n img = np.zeros((h * row, w * col, images.shape[3]))\n elif images.ndim == 3:\n img = np.zeros((h * row, w * col))\n for idx, image in enumerate(images):\n i = idx % col\n j = idx // col\n img[j * h:j * h + h, i * w:i * w + w, ...] = image\n\n return img", "def prepare_images(images):\n images = color.rgb2lab(images)\n\n l = images[:,:,:,:1]/100.\n ab = images[:,:,:,1:]/200. + 0.5\n\n return l, ab", "def collate_images(images: Union[Dict[str, np.ndarray], Iterable[np.ndarray]],\n skip_commented: bool = False) -> np.ndarray:\n if not isinstance(images, dict):\n images = {f'{i:03d}': img for i, img in enumerate(images)}\n\n if skip_commented:\n images = {k: v for k, v in images.items() if '#' not in k}\n\n n = len(images)\n dim0, dim1 = _get_consistent_shape(images.values())\n k0, k1 = _find_grid(n, dim0, dim1)\n\n output = np.zeros((k0 * dim0, k1 * dim1, 3), dtype='uint8')\n\n for i, (name, img) in enumerate(images.items()):\n pos0 = i // k1\n pos1 = i % k1\n img = homogenize_image(img)\n img = add_caption(img, caption=name)\n output[pos0 * dim0:(pos0 + 1) * dim0, pos1 * dim1:(pos1 + 1) * dim1, :] = img\n\n return output", "def SSIM(ground_truth_images: np.ndarray, noisy_images: np.ndarray) -> List[float]:\n validate_inputs(ground_truth_images, noisy_images)\n\n ssim_accumulated = []\n\n quantity_of_images = ground_truth_images.shape[0]\n\n if need_to_normalize(ground_truth_images):\n ground_truth_images = normalize(ground_truth_images, \\\n interval=(0,255), data_type='int')\n \n if need_to_normalize(noisy_images):\n noisy_images = normalize(noisy_images, \\\n interval=(0,255), data_type='int')\n\n for i in range(quantity_of_images):\n ssim_image = ssim(\n ground_truth_images[i,:,:,0], \n noisy_images[i,:,:,0],\n data_range=256\n )\n ssim_accumulated.append(ssim_image)\n \n return ssim_accumulated\n # ssim_accumulated = np.array(ssim_accumulated)\n\n # return ssim_accumulated.mean()", "def appendimages(im1,im2):\n # select the image with the fewest rows and fill in enough empty rows\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)\n # if none of these cases they are equal, no filling needed.\n return np.concatenate((im1,im2), axis=1)", "def read_images(image_info, image_dims):\r\n num_examples = len(image_info)\r\n num_pixels = int(image_dims[0]*image_dims[1]*image_dims[2])\r\n locations, classes = zip(*image_info)\r\n output_array = np.zeros((num_examples, num_pixels+1), dtype=np.float32)\r\n for entry in range(num_examples):\r\n if entry % 100 == 0:\r\n print('reading image: '+str(entry)+'/'+str(num_examples))\r\n output_array[entry, 0] = classes[entry] # image classes\r\n input_image = skio.imread(locations[entry], as_grey=False) # read in a grayscale image\r\n output_image = sktf.resize(input_image, image_dims) # interpolate down to image_dims (including channels)\r\n \"\"\"normalize images by color channel, with fuzz factor to avoid div0\"\"\"\r\n maxpx = np.zeros((1, image_dims[2])) # store max/min for each channel\r\n minpx = np.zeros((1, image_dims[2]))\r\n for i in range(image_dims[2]): # find max/min for each channel\r\n maxpx[0, i] = np.max(output_image[:, :, i])\r\n if maxpx[0, i] == float(0):\r\n maxpx[0, i] = 1e-12 # fuzz factor\r\n minpx[0, i] = np.min(output_image[:, :, i])\r\n \"\"\"flatten and store\"\"\"\r\n for i in range(image_dims[2]):\r\n output_array[entry, 1+i*(image_dims[0]*image_dims[1]):1+(i+1)*(image_dims[0]*image_dims[1])] = \\\r\n np.ravel((output_image[:, :, i] - minpx[0, i]) / (maxpx[0, i] - minpx[0, i]))\r\n return output_array", "def combine_images(args):\n\n # Read all images into a cube (TODO: think about the RAM)\n with fits.open(args.input[0]) as im0:\n lx, ly = im0[0].data.shape\n ref_hdr = im0[0].header\n\n headers = [fits.open(im_name)[0].header for im_name in args.input]\n cube = numpy.ma.zeros((len(args.input), lx, ly))\n cube.mask = numpy.zeros_like(cube.data)\n for ii, im_name in enumerate(args.input):\n with astroim.Astroim(im_name) as im:\n cube.data[ii, :,:] = im.chips[0].data\n if im.chips[0].mask is not None:\n cube.mask[ii,:,:] = im.chips[0].mask\n\n # Scale images\n scale_functions = {\"median\": numpy.ma.median,\n \"mean\": numpy.ma.mean,\n \"mode\": scipy.stats.mstats.mode,\n \"none\": lambda x: 1}\n for ii, im_name in enumerate(args.input):\n func = scale_functions[args.scale.lower()]\n cube[ii,:,:] /= func(cube[ii,:,:])\n\n\n # Reproject all images to the ref_hdr\n for ii, _ in enumerate(args.input):\n if ii == 0:\n continue\n cube.data[ii,:,:], footprint = reproject_interp((cube.data[ii,:,:], headers[ii]), ref_hdr)\n cube.mask[ii,:,:], footprint = reproject_interp((cube.mask[ii,:,:], headers[ii]), ref_hdr)\n #whr = numpy.isnan(cube.data[ii,:,:])\n #cube.mask[ii,:,:][whr] = True\n\n # Do average\n average_functions = {\"median\": numpy.ma.median, \"mean\": numpy.ma.mean, \"sum\": numpy.ma.sum}\n func = average_functions[args.average.lower()]\n final_image = func(cube, axis=0)\n ref_hdr[\"NCOMBINE\"] = len(args.input)\n\n mask_name = utilities.replace_extension(args.output, \".fits.msk\")\n mask_name_header = utilities.replace_extension(os.path.basename(args.output), \".fits.msk\")\n ref_hdr[\"MASK\"] = mask_name_header\n fits.writeto(args.output, final_image.data, ref_hdr, clobber=True )\n fits.writeto(mask_name, numpy.array(final_image.mask, dtype=int), clobber=True)\n\n return args.output", "def appendimages(im1,im2):\n # select the image with the fewest rows and fill in enough empty rows\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)\n # if none of these cases they are equal, no filling needed.\n return np.concatenate((im1,im2), axis=1)", "def problem2():\n \n pts_array, feats_array = p2.load_pts_features('data/pts_feats.npz')\n\n # points and features for image1 and image2\n pts1, pts2 = pts_array\n fts1, fts2 = feats_array\n\n # Loading images\n img1 = Image.open('data/img1.png')\n img2 = Image.open('data/img2.png')\n\n im1 = np.array(img1)\n im2 = np.array(img2)\n\n plt.figure(1)\n plt.subplot(1, 2, 1)\n plt.imshow(im1)\n plt.plot(pts1[:, 0], pts1[:, 1], 'ro', markersize=1.3)\n plt.subplot(1, 2, 2)\n plt.imshow(im2)\n plt.plot(pts2[:, 0], pts2[:, 1], 'ro', markersize=1.3)\n\n # display algined image\n H, ix1, ix2 = p2.final_homography(pts1, pts2, feats_array[0],\n feats_array[1])\n\n pts1 = pts1[ix1]\n pts2 = pts2[ix2]\n\n plt.figure(2)\n plt.subplot(1, 3, 1).set_title('Image 1')\n plt.imshow(im1)\n plt.plot(pts1[:, 0],\n pts1[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 2).set_title('Image 2')\n plt.imshow(im2)\n plt.plot(pts2[:, 0],\n pts2[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 3).set_title('Algined image 1')\n\n H_inv = np.linalg.inv(H)\n H_inv /= H_inv[2, 2]\n im3 = img1.transform(size=(im1.shape[1], im1.shape[0]),\n method=Image.PERSPECTIVE,\n data=H_inv.ravel(),\n resample=Image.BICUBIC)\n\n plt.show()", "def appendimages(im1,im2):\n # select the image with the fewest rows and fill in enough empty rows\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1,np.zeros((rows2-rows1,im1.shape[1]))),axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2,np.zeros((rows1-rows2,im2.shape[1]))),axis=0)\n # if none of these cases they are equal, no filling needed.\n return np.concatenate((im1,im2), axis=1)", "def cast_and_normalise_images(images):\n images = (tf.cast(images, tf.float32) / 255.0) - 0.5\n return images", "def cli(fig1, fig2, out):\n click.echo('\\n' + '.' * 50)\n\n # open first image\n image1 = Image.open(fig1)\n\n # open second image\n image2 = Image.open(fig2)\n\n # retrieve the image dimensions.\n width, height = image1.size\n width2, height2 = image2.size\n\n if [width, height] != [width2, height2]:\n print(\"Image dimensions do not match! The Two inputs must have equal dimensions\")\n exit(1)\n else:\n print(\"Fig1 dimensions: \", image1.size)\n print(\"Fig2 dimensions: \", image2.size)\n # Create a new image object.\n merged = Image.new('RGB', image1.size)\n\n for i in range(0, width):\n for j in range(0, height):\n ima1 = list(image1.getpixel((i, j)))\n ima2 = list(image2.getpixel((i, j)))\n if ima1 == ima2:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] == [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] != [0, 0, 0]:\n r, g, b, a = ima2\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] == [0, 0, 0]:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and ima2 == [255, 255, 255, 255]:\n r, g, b, a = ima1\n elif [ima2[0], ima2[1], ima2[2]] != [0, 0, 0] and ima1 == [255, 255, 255, 255]:\n r, g, b, a = ima2\n else:\n # print ima1,ima2\n r = (ima1[0] + ima2[0]) // 2\n g = (ima1[1] + ima2[1]) // 2\n b = (ima1[2] + ima2[2]) // 2\n a = 255\n # print [r,g,b,a]\n\n merged.putpixel((i, j), (r, g, b, a))\n merged.save(out)\n click.echo('\\n' + '.' * 50)", "def concat_images(imga, imgb, xoffset=0, yoffset=0, direction='horizontal',\n ontop=True, adjust_z=False, center_offset=True):\n if direction == 'horizontal':\n max_dim = np.maximum.reduce([imga.shape, imgb.shape])\n\n center_a = np.array(np.divide(imga.shape, 2), dtype=int)\n center_b = np.array(np.divide(imgb.shape, 2), dtype=int)\n offset = (abs(yoffset), abs(xoffset))\n\n if center_offset:\n new_offset = np.subtract(center_a, np.add(center_b, offset))\n\n if (max_dim == imgb.shape).all():\n tmp = np.copy(imgb)\n imgb = np.copy(imga)\n imga = np.copy(tmp)\n ontop = toggle(ontop)\n xoffset *= -1\n yoffset *= -1\n\n # elif not (max_dim == imga.shape).all():\n # for i, m in enumerate(max_dim):\n # if m not in imga.shape:\n # new_offset[i] = center_a[i] - (center_b[i] + offset[i])\n # else:\n # new_offset[i] = center_a[i] + offset[i] - center_b[i]\n\n new_offset[new_offset > 0] = 0\n center_new = np.array(np.divide(max_dim, 2), dtype=int)\n new_img = np.full(np.add(max_dim, np.abs(new_offset)), np.nan)\n\n Sa0 = slice(int(center_new[0] - imga.shape[0]/2 + 0.5),\n int(center_new[0] + imga.shape[0]/2 + 0.5))\n Sa1 = slice(int(center_new[1] - imga.shape[1]/2 + 0.5),\n int(center_new[1] + imga.shape[1]/2 + 0.5))\n Sb0 = slice(int(center_new[0] + abs(yoffset) - imgb.shape[0]/2 + 0.5),\n int(center_new[0] + abs(yoffset) + imgb.shape[0]/2 + 0.5))\n Sb1 = slice(int(center_new[1] + abs(xoffset) - imgb.shape[1]/2 + 0.5),\n int(center_new[1] + abs(xoffset) + imgb.shape[1]/2 + 0.5))\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n\n imga = imga[::ydir, ::xdir]\n imgb = imgb[::ydir, ::xdir]\n\n if adjust_z:\n top_img = 1 * new_img\n top_img[Sa0, Sa1] = imga\n top_img[Sb0, Sb1] = imgb\n low_img = 1 * new_img\n low_img[Sb0, Sb1] = imgb\n low_img[Sa0, Sa1] = imga\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb -= add\n\n if ontop:\n new_img[Sa0, Sa1] = imga\n new_img[Sb0, Sb1] = imgb\n else:\n new_img[Sb0, Sb1] = imgb\n new_img[Sa0, Sa1] = imga\n\n return new_img[::ydir, ::xdir]", "def test_CCI_SM_v33_025Img_img_reading_2D():\n parameter = ['sm']\n img_c = CCI_SM_025Img(\n os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"combined\", \"2016\",\n \"ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-20160101000000-fv03.3.nc\"),\n parameter=parameter)\n\n image_c = img_c.read()\n\n assert sorted(image_c.data.keys()) == sorted(parameter)\n assert image_c.data['sm'].shape == (720, 1440)\n assert image_c.lon[0, 0] == -179.875\n assert image_c.lon[0, 1439] == 179.875\n assert image_c.lat[0, 0] == 89.875\n assert image_c.lat[719, 0] == -89.875\n assert abs(image_c.data['sm'][203, 693] - 0.23484) <= 1e-5\n assert image_c.lon.shape == image_c.lat.shape == (720, 1440)\n\n\n parameter = ['sm']\n img_a = CCI_SM_025Img(\n os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"active\", \"2016\",\n \"ESACCI-SOILMOISTURE-L3S-SSMS-ACTIVE-20160101000000-fv03.3.nc\"),\n parameter=parameter)\n\n image_a = img_a.read()\n\n assert sorted(image_a.data.keys()) == sorted(parameter)\n assert image_a.data['sm'].shape == (720, 1440)\n assert image_a.lon[0, 0] == -179.875\n assert image_a.lon[0, 1439] == 179.875\n assert image_a.lat[0, 0] == 89.875\n assert image_a.lat[719, 0] == -89.875\n assert abs(image_a.data['sm'][203, 693] - 67.70157) <= 1e-5\n assert image_a.lon.shape == image_a.lat.shape == (720, 1440)\n\n\n parameter = ['sm']\n img_p = CCI_SM_025Img(\n os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"passive\", \"2016\",\n \"ESACCI-SOILMOISTURE-L3S-SSMV-PASSIVE-20160101000000-fv03.3.nc\"),\n parameter=parameter)\n\n image_p = img_p.read()\n\n assert sorted(image_p.data.keys()) == sorted(parameter)\n assert image_p.data['sm'].shape == (720, 1440)\n assert image_p.lon[0, 0] == -179.875\n assert image_p.lon[0, 1439] == 179.875\n assert image_p.lat[0, 0] == 89.875\n assert image_p.lat[719, 0] == -89.875\n assert abs(image_p.data['sm'][203, 693] - 0.322685) <= 1e-5\n assert image_p.lon.shape == image_p.lat.shape == (720, 1440)", "def imageAvg(img1, img2):\n return myimg.imageAvg(img1.tolist(), img2.tolist())", "def appendimages(im1, im2):\n\n # select the image with the fewest rows and fill in enough empty rows\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n\n if rows1 < rows2:\n im1 = concatenate((im1, zeros((rows2-rows1, im1.shape[1]))), axis=0)\n elif rows1 > rows2:\n im2 = concatenate((im2, zeros((rows1-rows2, im2.shape[1]))), axis=0)\n # if none of these cases they are equal, no filling needed.\n assert (im1.shape[0] != im2.shape[0])\n return concatenate((im1, im2), axis=1)", "def concat_images_horiz(imga, imgb):\n ha,wa = imga.shape[:2]\n hb,wb = imgb.shape[:2]\n max_height = np.max([ha, hb])\n total_width = wa+wb\n new_img = np.zeros(shape=(max_height, total_width, 3), dtype=np.uint8)\n new_img[:ha,:wa]=imga\n new_img[:hb,wa:wa+wb]=imgb\n return new_img", "def im2(data1, data2, xlab='', ylab='', tit='', bar=False, newfig=True, \\\n cl=None, x=[], y=[], fontsize=16):\n from pylab import figure, subplot, colorbar, xlabel, ylabel, title, clim\n from nsdata import imshow\n\n if newfig: figure()\n subplot(211)\n imshow(data1,x=x,y=y); \n if clim<>None: clim(cl)\n if bar: colorbar()\n xlabel(xlab, fontsize=fontsize); ylabel(ylab, fontsize=fontsize)\n title(tit)\n\n subplot(212)\n imshow(data2,x=x,y=y); \n if clim<>None: clim(cl)\n if bar: colorbar()\n xlabel(xlab, fontsize=fontsize); ylabel(ylab, fontsize=fontsize)\n\n return", "def image_align(first_image, second_image):\r\n\r\n high_diff = (second_image.shape[0] - first_image.shape[0]) // 2\r\n width_diff = (second_image.shape[1] - first_image.shape[1]) // 2\r\n\r\n align_image = second_image[high_diff: high_diff + first_image.shape[0],\r\n width_diff: width_diff + first_image.shape[1],\r\n :]\r\n\r\n\r\n assert align_image.shape == first_image.shape\r\n\r\n return align_image", "def merge(images, size, c_dim):\n h, w = images.shape[1], images.shape[2]\n \n img = np.zeros((h*size[0], w*size[1], c_dim))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h : j * h + h,i * w : i * w + w, :] = image\n #cv2.imshow(\"srimg\",img)\n #cv2.waitKey(0)\n \n return img", "def problem1():\n\n img = load_image(\"data/a1p1.png\")\n display_image(img)\n\n save_as_npy(\"a1p1.npy\", img)\n\n img1 = load_npy(\"a1p1.npy\")\n display_image(img1)\n\n img2 = mirror_horizontal(img1)\n display_image(img2)\n\n display_images(img1, img2)", "def convert_to_image_multiprocessing(pers_diag_array):\n pixels = [50,50]\n spread = 0.15\n max_death = 3.9096455574035645\n pim = PersImage(specs={\"minBD\": 0, \"maxBD\": max_death}, spread=spread, pixels=pixels, verbose=False)\n img = pim.transform(pers_diag_array)\n return img", "def transform_single_imgs(\n self, imgs, confounds=None, sample_mask=None, copy=True\n ):\n raise NotImplementedError()", "def transform(self, previousimage):", "def _imequalize(self, results):\n for key in results.get('img_fields', ['image']):\n img = results[key]\n results[key] = mmcv.imequalize(img).astype(img.dtype)", "def concat_3dimages(imga, imgb, xoffset=0, yoffset=0, zoffset=0,\n transpose=True, ontop=True, center_offset=True,\n adjust_z=(0, 1)):\n if transpose:\n print(\"Transpose images\")\n imga = np.transpose(imga, axes=(0, 2, 1))\n imgb = np.transpose(imgb, axes=(0, 2, 1))\n\n max_dim = np.maximum.reduce([imga.shape, imgb.shape])\n\n center_a = np.array(np.divide(imga.shape, 2), dtype=int)\n center_b = np.array(np.divide(imgb.shape, 2), dtype=int)\n offset = (abs(zoffset), abs(yoffset), abs(xoffset))\n\n if center_offset:\n new_offset = np.subtract(center_a, np.add(center_b, offset))\n else:\n new_offset = np.array(offset)\n\n if (max_dim == imgb.shape).all():\n tmp = np.copy(imgb)\n imgb = np.copy(imga)\n imga = np.copy(tmp)\n ontop = toggle(ontop)\n xoffset *= -1\n yoffset *= -1\n zoffset *= -1\n\n new_offset[new_offset > 0] = 0\n center_new = np.array(np.divide(max_dim, 2), dtype=int)\n new_img = np.full(np.add(max_dim, np.abs(new_offset)), np.nan)\n\n Sa0 = slice(int(center_new[0] - imga.shape[0]/2 + 0.5),\n int(center_new[0] + imga.shape[0]/2 + 0.5))\n Sa1 = slice(int(center_new[1] - imga.shape[1]/2 + 0.5),\n int(center_new[1] + imga.shape[1]/2 + 0.5))\n Sa2 = slice(int(center_new[2] - imga.shape[2]/2 + 0.5),\n int(center_new[2] + imga.shape[2]/2 + 0.5))\n Sb0 = slice(int(center_new[0] + abs(zoffset) - imgb.shape[0]/2 + 0.5),\n int(center_new[0] + abs(zoffset) + imgb.shape[0]/2 + 0.5))\n Sb1 = slice(int(center_new[1] + abs(yoffset) - imgb.shape[1]/2 + 0.5),\n int(center_new[1] + abs(yoffset) + imgb.shape[1]/2 + 0.5))\n Sb2 = slice(int(center_new[2] + abs(xoffset) - imgb.shape[2]/2 + 0.5),\n int(center_new[2] + abs(xoffset) + imgb.shape[2]/2 + 0.5))\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n zdir = np.sign(zoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n if zdir == 0:\n zdir = 1\n\n imga = imga[::zdir, ::ydir, ::xdir]\n imgb = imgb[::zdir, ::ydir, ::xdir]\n\n if adjust_z:\n for ix in adjust_z:\n top_img = 1 * new_img[ix]\n top_img[Sa1, Sa2] = imga[ix]\n top_img[Sb1, Sb2] = imgb[ix]\n low_img = 1 * new_img[ix]\n low_img[Sb1, Sb2] = imgb[ix]\n low_img[Sa1, Sa2] = imga[ix]\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb[ix] -= add\n\n print(\"new_img shape: \", new_img.shape)\n\n if ontop:\n new_img[Sa0, Sa1, Sa2] = imga\n new_img[Sb0, Sb1, Sb2] = imgb\n else:\n new_img[Sb0, Sb1, Sb2] = imgb\n new_img[Sa0, Sa1, Sa2] = imga\n\n if transpose:\n print(\"Transpose back\")\n return np.transpose(new_img[::zdir, ::ydir, ::xdir], axes=(0, 2, 1))\n else:\n return new_img[::zdir, ::ydir, ::xdir]", "def unmold_image(normalized_images, config):\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)", "def test_2d_inputs():\n reseed()\n\n base_img1 = np.array([[0, 0, 1, 1],\n [0, 0, 1, 1],\n [0, 1, 1, 1]], dtype=np.uint8)\n base_img2 = np.array([[0, 0, 1, 1],\n [0, 1, 1, 1],\n [0, 1, 0, 0]], dtype=np.uint8)\n\n base_img1_flipped = np.array([[1, 1, 0, 0],\n [1, 1, 0, 0],\n [1, 1, 1, 0]], dtype=np.uint8)\n base_img2_flipped = np.array([[1, 1, 0, 0],\n [1, 1, 1, 0],\n [0, 0, 1, 0]], dtype=np.uint8)\n\n images = np.array([base_img1, base_img2])\n images_flipped = np.array([base_img1_flipped, base_img2_flipped])\n images_list = [base_img1, base_img2]\n images_flipped_list = [base_img1_flipped, base_img2_flipped]\n images_list2d3d = [base_img1, base_img2[:, :, np.newaxis]]\n images_flipped_list2d3d = [base_img1_flipped, base_img2_flipped[:, :, np.newaxis]]\n\n aug = iaa.Fliplr(1.0)\n noaug = iaa.Fliplr(0.0)\n\n # one numpy array as input\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_flipped)\n\n observed = noaug.augment_images(images)\n assert np.array_equal(observed, images)\n\n # list of 2d images\n observed = aug.augment_images(images_list)\n assert array_equal_lists(observed, images_flipped_list)\n\n observed = noaug.augment_images(images_list)\n assert array_equal_lists(observed, images_list)\n\n # list of images, one 2d and one 3d\n observed = aug.augment_images(images_list2d3d)\n assert array_equal_lists(observed, images_flipped_list2d3d)\n\n observed = noaug.augment_images(images_list2d3d)\n assert array_equal_lists(observed, images_list2d3d)", "def diffSmoothImages(imgA, imgB):\n\n smoothImgA = smoothImage(imgA)\n smoothImgB = smoothImage(imgB)\n\n return diffImages(smoothImgA, smoothImgB)", "def vis_imgs2(X, y_, y, path):\n if y.ndim == 2:\n y = y[:,:,np.newaxis]\n if y_.ndim == 2:\n y_ = y_[:,:,np.newaxis]\n assert X.ndim == 3\n tl.vis.save_images(np.asarray([X[:,:,0,np.newaxis],\n X[:,:,1,np.newaxis], X[:,:,2,np.newaxis], y_, y]), size=(1, 5),\n image_path=path)", "def divi_img(x, h1, h2):\n return x[:, :, :, h1:h2]", "def mse(image1: np.ndarray, image2: np.ndarray) -> np.ndarray:\n return np.sqrt(np.power((image1 - image2), 2).mean(axis=(-1, -2)))", "def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output", "def _augment_images(self, images, random_state, parents, hooks):\n nb_images = len(images)\n samples = self.p.draw_samples((nb_images,), random_state=random_state)\n for i in sm.xrange(nb_images):\n if samples[i] == 1:\n if self.axis == 1:\n images[i] = np.fliplr(images[i])\n elif self.axis == 0:\n images[i] = np.flipud(images[i])\n self.samples = samples\n return images", "def vimage(cat1, cat2, dmax, psize, fwhm):\n\n NHALF = int(dmax/psize)\n NSIDE = 2*NHALF+1\n mshift = (NHALF+0.5)*psize\n img = np.zeros((NSIDE,NSIDE))\n x2s, y2s = cat2[:,0], cat2[:,1]\n for x1, y1 in cat1:\n ok = (x2s > x1-mshift) & (x2s < x1+mshift) & \\\n (y2s > y1-mshift) & (y2s < y1+mshift)\n for x2, y2 in cat2[ok]:\n ix = NHALF+int(round((x2-x1)/psize))\n iy = NHALF+int(round((y2-y1)/psize))\n img[iy,ix] += 1\n\n # smooth image\n img = gaussian_filter(img,fwhm/psize/2.3548,mode='constant')\n\n # identify maximum pixel\n ind = np.arange(NSIDE)\n ix, iy = np.meshgrid(ind, ind)\n peak = img == img.max()\n #if len(ix[peak]) > 1:\n # raise Exception(\"Found more than one maximum pixel\")\n\n # now have first approximation to the shift\n ixp = ix[peak][0]\n iyp = iy[peak][0]\n xp = psize*(ixp-NHALF)\n yp = psize*(iyp-NHALF)\n if ixp == 0 or ixp == NSIDE-1 or iyp == 0 or iyp == NSIDE-1:\n # max pixel at edge of array. Just return pixel position\n # as \"refined\" position\n xr = xp\n yr = yp\n\n else:\n # Make a quadratic approx to refine the peak position.\n # Estimate first and second partial derivatives from\n # 3x3 pixels centred on peak\n fx = (img[iyp,ixp+1] - img[iyp,ixp-1])/2.\n fy = (img[iyp+1,ixp] - img[iyp-1,ixp])/2.\n fxx = img[iyp,ixp-1] + img[iyp,ixp+1] - 2*img[iyp,ixp]\n fyy = img[iyp-1,ixp] + img[iyp+1,ixp] - 2*img[iyp,ixp]\n fxy = (img[iyp+1,ixp+1] + img[iyp-1,ixp-1] -\n img[iyp+1,ixp-1] - img[iyp-1,ixp+1])/4.\n b = np.array((fx,fy)).T\n A = np.array(((fxx,fxy),(fxy,fyy)))\n x = solve(A,b)\n xr = xp - psize*x[0]\n yr = yp - psize*x[1]\n return (img, xp,yp,xr,yr)", "def stitch_images(images, margin=5, cols=5):\n n, w, h, = images.shape\n n_rows = max(1, int(math.ceil(n / cols)))\n n_cols = min(n, cols)\n\n out_w = n_cols * w + (n_cols - 1) * margin\n out_h = n_rows * h + (n_rows - 1) * margin\n stitched_images = np.zeros((out_h, out_w), dtype=images.dtype)\n\n for row in range(n_rows):\n for col in range(n_cols):\n img_idx = row * cols + col\n if img_idx >= n:\n break\n\n stitched_images[(h + margin) * row : (h + margin) * row + h,\n (w + margin) * col : (w + margin) * col + w] = images[img_idx]\n\n return stitched_images", "def _ssim_for_multiscale(img1, img2, max_val=255, filter_size=11,\n filter_sigma=1.5, k1=0.01, k2=0.03):\n\n _, height, width, _ = img1.shape\n\n # Filter size can't be larger than height or width of images.\n #size = tf.min(filter_size, height, width)\n size = filter_size\n\n # Scale down sigma if a smaller filter size is used.\n sigma = size * filter_sigma / filter_size if filter_size else 0\n\n if filter_size:\n window = broadcast_to(tf.reshape(_f_special_gauss(size, sigma),\n (size, size, 1, 1)), (size, size, 3,1))\n mu1 = conv(img1, window)\n mu2 = conv(img2, window)\n sigma11 = conv(img1 * img1, window)\n sigma22 = conv(img2 * img2, window)\n sigma12 = conv(img1 * img2, window)\n else:\n # Empty blur kernel so no need to convolve.\n mu1, mu2 = img1, img2\n sigma11 = img1 * img1\n sigma22 = img2 * img2\n sigma12 = img1 * img2\n\n mu11 = mu1 * mu1\n mu22 = mu2 * mu2\n mu12 = mu1 * mu2\n sigma11 -= mu11\n sigma22 -= mu22\n sigma12 -= mu12\n\n # Calculate intermediate values used by both ssim and cs_map.\n c1 = (k1 * max_val) ** 2\n c2 = (k2 * max_val) ** 2\n v1 = 2.0 * sigma12 + c2\n v2 = sigma11 + sigma22 + c2\n ssim = tf.reduce_mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))\n cs = tf.reduce_mean(v1 / v2)\n return ssim, cs", "def augment_images(images, measurements, correction=0.0):\r\n aug_imgs, aug_msrs = [], []\r\n for image, measurement, in zip(images, measurements):\r\n corr_msr = measurement + correction\r\n aug_imgs.append(image)\r\n aug_msrs.append(corr_msr)\r\n aug_imgs.append(cv2.flip(image, 1))\r\n aug_msrs.append(corr_msr*-1)\r\n return aug_imgs, aug_msrs", "def SSIM(self, imageA, imageB):\n return measure.compare_ssim(imageA, imageB)", "def hard_blending(im1, im2):\n assert(im1.shape == im2.shape)\n h, w, c = im1.shape\n new_im = im2.copy()\n new_im[:,:(w//2),:] = im1[:,:(w//2),:]\n return new_im", "def immerge(images, row, col):\n\n if images.ndim == 4:\n c = images.shape[3]\n elif images.ndim == 3:\n c = 1\n\n h, w = images.shape[1], images.shape[2]\n if c > 1:\n img = np.zeros((h * row, w * col, c))\n else:\n img = np.zeros((h * row, w * col))\n for idx, image in enumerate(images):\n i = idx % col\n j = idx // col\n img[j * h:j * h + h, i * w:i * w + w, ...] = image\n\n return img", "def verticalConcat(image1, image2):\n shape1 = image1.shape\n shape2 = image2.shape\n if shape1[1] > shape2[1]:\n resizeMaintainAspectRatio(image2, width=shape1[1])\n elif shape2[1] > shape1[1]:\n resizeMaintainAspectRatio(image1, width=shape2[1])\n\n return np.hstack((image1, image2))", "def observation(self, img):\r\n img = img.transpose(1, 2, 0)\r\n return img", "def stitch_images(images, margin=5, cols=5):\n if len(images) == 0:\n return None\n\n h, w, c = images[0].shape\n n_rows = int(math.ceil(len(images) / cols))\n n_cols = min(len(images), cols)\n\n out_w = n_cols * w + (n_cols - 1) * margin\n out_h = n_rows * h + (n_rows - 1) * margin\n stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype)\n\n for row in range(n_rows):\n for col in range(n_cols):\n img_idx = row * cols + col\n if img_idx >= len(images):\n break\n\n stitched_images[(h + margin) * row : (h + margin) * row + h,\n (w + margin) * col : (w + margin) * col + w, :] = images[img_idx]\n\n return stitched_images", "def immerge(images, n_rows=None, n_cols=None, padding=0, pad_value=0):\n # 将几张小图片整合到一张大图片中(大图片每行每列会显示好几张小图片)\n images = np.array(images)\n n = images.shape[0]\n if n_rows:\n n_rows = max(min(n_rows, n), 1)\n n_cols = int(n - 0.5) // n_rows + 1\n elif n_cols:\n n_cols = max(min(n_cols, n), 1)\n n_rows = int(n - 0.5) // n_cols + 1\n else:\n n_rows = int(n ** 0.5)\n n_cols = int(n - 0.5) // n_rows + 1\n\n h, w = images.shape[1], images.shape[2]\n shape = (h * n_rows + padding * (n_rows - 1),\n w * n_cols + padding * (n_cols - 1))\n if images.ndim == 4:\n shape += (images.shape[3],)\n img = np.full(shape, pad_value, dtype=images.dtype)\n\n for idx, image in enumerate(images):\n i = idx % n_cols\n j = idx // n_cols\n img[j * (h + padding):j * (h + padding) + h,\n i * (w + padding):i * (w + padding) + w, ...] = image\n\n return img", "def motionDeflicker(imgs):\n b = [x[:,:,0] for x in imgs] \n g = [x[:,:,1] for x in imgs] \n r = [x[:,:,2] for x in imgs] \n b_corrected = single_deflicker(b)\n g_corrected = single_deflicker(g)\n r_corrected = single_deflicker(r)\n return cv2.merge((np.uint8(b_corrected),np.uint8(g_corrected),np.uint8(r_corrected)))", "def denormalize(images, min_, max_):\n return [((i + 1) / 2 * (max_ - min_)) + min_ for i in images]", "def postprocess(self, images):\n if not isinstance(images, np.ndarray):\n raise ValueError(f'Images should be with type `numpy.ndarray`!')\n\n if images.ndim != 4 or images.shape[1] != self.image_channels:\n raise ValueError(f'Input should be with shape [batch_size, channel, '\n f'height, width], where channel equals to '\n f'{self.image_channels}!\\n'\n f'But {images.shape} is received!')\n images = (images - self.min_val) * 255 / (self.max_val - self.min_val)\n images = np.clip(images + 0.5, 0, 255).astype(np.uint8)\n images = images.transpose(0, 2, 3, 1)\n if self.image_channels == 3 and self.channel_order == 'BGR':\n images = images[:, :, :, ::-1]\n\n return images", "def join_images_horizontally(images):\n array = np.concatenate((images[0], images[1]), axis=1)\n return Image.fromarray(np.uint8(array))", "def PImageAdd (in1Image, in2Image, outImage, err, \\\n chkPos=False, factor1=1.0, factor2=1.0):\n ################################################################\n # Checks\n if not Image.PIsA(in1Image):\n raise TypeError,\"in1Image MUST be a Python Obit Image\"\n if not Image.PIsA(in2Image):\n raise TypeError,\"in2Image MUST be a Python Obit Image\"\n if not Image.PIsA(outImage):\n raise TypeError,\"outImage MUST be a Python Obit Image\"\n if not OErr.OErrIsA(err):\n raise TypeError,\"err MUST be an OErr\"\n #\n # Clone output from input 1\n in1Image.Clone (outImage, err)\n # Open images\n Image.POpen (in1Image, Image.READONLY, err)\n Image.POpen (in2Image, Image.READONLY, err)\n Image.POpen (outImage, Image.WRITEONLY, err)\n # Get input descriptor to see how many planes\n in1Desc = in1Image.Desc\n in2Desc = in2Image.Desc\n # Check compatibility\n ImageDesc.PCheckCompat (in1Desc, in2Desc, chkPos=chkPos)\n inDescDict = in1Desc.Dict\n ndim = inDescDict[\"naxis\"]\n inNaxis = inDescDict[\"inaxes\"]\n # Work buffer\n inImageArray = Image.PGetFArray(in1Image)\n ImageBuffer1 = FArray.PCopy(inImageArray, err)\n ImageBuffer2 = FArray.PCopy(inImageArray, err)\n\n # list of planes to loop over (0-rel)\n if (ndim>0) and (inNaxis[2]>0): \n planes = range(inNaxis[2])\n else:\n planes = [0]\n \n # Loop over planes\n for iPlane in planes:\n doPlane = [iPlane+1,1,1,1,1]\n # Get image planes\n Image.PGetPlane (in1Image, ImageBuffer1, doPlane, err)\n Image.PGetPlane (in2Image, ImageBuffer2, doPlane, err)\n\n # Scale\n FArray.PSMul(ImageBuffer1, factor1)\n FArray.PSMul(ImageBuffer2, factor2)\n\n # Add\n FArray.PAdd(ImageBuffer1, ImageBuffer2, ImageBuffer2)\n\n # Write output\n Image.PPutPlane (outImage, ImageBuffer2, doPlane, err)\n\n # end loop over planes\n # Close\n in2Image.Close(err)\n in2Image.Close(err)\n outImage.Close(err)\n # Error?\n if err.isErr:\n OErr.printErrMsg(err, \"Error subtracting Images\")\n # Write history\n in1History = History.History(\"history\", in1Image.List, err)\n in2History = History.History(\"history\", in2Image.List, err)\n outHistory = History.History(\"history\", outImage.List, err)\n # Copy Histories\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1, \"/ PImageAdd Input 1 History\",err)\n outHistory.Close(err)\n info = in1Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in1History, outHistory, err)\n #Not needed History.PCopy(in1History, outHistory, err)\n outHistory.Open(History.READWRITE, err)\n outHistory.WriteRec(-1, \"/ \",err)\n outHistory.WriteRec(-1, \"/ ****** PImageAdd Input 2 History\",err)\n outHistory.Close(err)\n info = in2Image.List.Dict\n # FITS? - copy header\n if (\"FileType\" in info) and (info[\"FileType\"][2][0]==0):\n History.PCopyHeader(in2History, outHistory, err)\n History.PCopy(in2History, outHistory, err)\n # Add this programs history\n outHistory.Open(History.READWRITE, err)\n outHistory.TimeStamp(\" Start Obit PImageAdd\",err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor1 = \"+str(factor1),err)\n outHistory.WriteRec(-1,OSystem.PGetPgmName()+\" factor2 = \"+str(factor2),err)\n outHistory.Close(err)", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def viz2(img1, interest_points1, img2, interest_points2, matches, PATCH_SIZE, threshold, min_sigma, max_sigma, num_sigma):\n \n\n\tfig = plt.figure(figsize=(10,5))\n\tax1 = fig.add_subplot(121)\n\tax2 = fig.add_subplot(122)\n\n #adding the two images to axes \n\tax1.imshow(img1, cmap='gray')\n\tax2.imshow(img2, cmap='gray')\n\n\tpositionimg1 = ax1.get_position()\n\tnew_pos = [positionimg1.x0+0.09, positionimg1.y0+0.025, \\\n\t\tpositionimg1.width / 1.1, positionimg1.height / 1.1] \n\tax1.set_position(new_pos)\n\n\tx1 = [a[1] for a in interest_points1] #blob detection x axis\n\ty1 = [a[0] for a in interest_points1] #blob detection y axis\n\ts1 = [a[2] for a in interest_points1] #blob detected at sigma \n \n\tx2 = [a[1] for a in interest_points2] #blob detection x axis\n\ty2 = [a[0] for a in interest_points2] #blob detection y axis\n\ts2 = [a[2] for a in interest_points2] #blob detected at sigma \n \n\tdifferences = [a[2] for a in matches]\n\n\n\tweighted_differences = normalize(differences)\n\n #iterating through the input list of matches\n\tfor coordinates, difference in zip(matches, weighted_differences):\n\t\tcord_a = (coordinates[0][1], coordinates[0][0]) #extracting coordinates for interest point in img1\n\t\tcord_b = (coordinates[1][1], coordinates[1][0]) #extracting coordinates for interest point in img2\n\t\tif difference <=0.33:\n\t\t\tcolor = \"green\"\n\t\telif difference > 0.33 and difference <= 0.66:\n\t\t\tcolor = \"yellow\"\n\t\telse:\n\t\t\tcolor = \"red\"\n\n\t#defining the path from cord_a to cord_b\n\t\tcon = ConnectionPatch(xyA=cord_a, xyB=cord_b, coordsA=\"data\", coordsB=\"data\",\n\t\t\t\t\t\t\t axesA=ax2, axesB=ax1, color=color) #arrowstyle='->')\n\t#adding line to axes2 \n\t\tax2.add_artist(con)\n\n #showing the image // can be changed to saving the image locally \n\tfor x, y, s in zip(x1, y1, s1):\n\t\tax1.scatter(x, y, alpha=1, facecolors='none', edgecolors='r', s=s**2) #plotting the input interest points for img1\n\tfor x, y, s in zip(x2, y2, s2):\n\t\tax2.scatter(x, y, alpha=1, facecolors='none', edgecolors='r', s=s**2) #plotting the input interest points for img2\n\tax1.axis('off')\n\tax2.axis('off')\n\ttitle = 'Patch Size=' + str(PATCH_SIZE) + ', Threshold=' + str(threshold) + ', min sigma=' + \\\n\tstr(min_sigma) + ', max sigma=' + str(max_sigma) + ', num sigma=' + str(num_sigma)\n\tplt.title(title, x=+0.1)\n\t#plt.show()\n\tplt.savefig(title+'.png')\n\n\n\treturn", "def compare_images(im1, im2):\n errors = (im1 - im2) / 255\n return np.mean(np.square(errors))", "def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)", "def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))", "def augment_images(images, measurements, correction=0.0):\n aug_imgs, aug_msrs = [], []\n for image, measurement, in zip(images, measurements):\n corr_msr = measurement + correction\n aug_imgs.append(image)\n aug_msrs.append(corr_msr)\n return aug_imgs, aug_msrs", "def _compute_ij_images_for_source_line(\n src_j0: int,\n src_x_image: np.ndarray,\n src_y_image: np.ndarray,\n src_i_min: int,\n src_j_min: int,\n dst_src_ij_images: np.ndarray,\n dst_x_offset: float,\n dst_y_offset: float,\n dst_x_scale: float,\n dst_y_scale: float,\n uv_delta: float\n):\n src_width = src_x_image.shape[-1]\n\n dst_width = dst_src_ij_images.shape[-1]\n dst_height = dst_src_ij_images.shape[-2]\n\n dst_px = np.zeros(4, dtype=src_x_image.dtype)\n dst_py = np.zeros(4, dtype=src_y_image.dtype)\n\n u_min = v_min = -uv_delta\n uv_max = 1.0 + 2 * uv_delta\n\n for src_i0 in range(src_width - 1):\n src_i1 = src_i0 + 1\n src_j1 = src_j0 + 1\n\n dst_px[0] = dst_p0x = src_x_image[src_j0, src_i0]\n dst_px[1] = dst_p1x = src_x_image[src_j0, src_i1]\n dst_px[2] = dst_p2x = src_x_image[src_j1, src_i0]\n dst_px[3] = dst_p3x = src_x_image[src_j1, src_i1]\n\n dst_py[0] = dst_p0y = src_y_image[src_j0, src_i0]\n dst_py[1] = dst_p1y = src_y_image[src_j0, src_i1]\n dst_py[2] = dst_p2y = src_y_image[src_j1, src_i0]\n dst_py[3] = dst_p3y = src_y_image[src_j1, src_i1]\n\n dst_pi = np.floor((dst_px - dst_x_offset)\n / dst_x_scale).astype(np.int64)\n dst_pj = np.floor((dst_py - dst_y_offset)\n / dst_y_scale).astype(np.int64)\n\n dst_i_min = np.min(dst_pi)\n dst_i_max = np.max(dst_pi)\n dst_j_min = np.min(dst_pj)\n dst_j_max = np.max(dst_pj)\n\n if dst_i_max < 0 \\\n or dst_j_max < 0 \\\n or dst_i_min >= dst_width \\\n or dst_j_min >= dst_height:\n continue\n\n if dst_i_min < 0:\n dst_i_min = 0\n\n if dst_i_max >= dst_width:\n dst_i_max = dst_width - 1\n\n if dst_j_min < 0:\n dst_j_min = 0\n\n if dst_j_max >= dst_height:\n dst_j_max = dst_height - 1\n\n # u from p0 right to p1, v from p0 down to p2\n det_a = _fdet(dst_p0x, dst_p0y, dst_p1x, dst_p1y, dst_p2x, dst_p2y)\n # u from p3 left to p2, v from p3 up to p1\n det_b = _fdet(dst_p3x, dst_p3y, dst_p2x, dst_p2y, dst_p1x, dst_p1y)\n\n if np.isnan(det_a) or np.isnan(det_b):\n # print('no plane at:', src_i0, src_j0)\n continue\n\n for dst_j in range(dst_j_min, dst_j_max + 1):\n dst_y = dst_y_offset + (dst_j + 0.5) * dst_y_scale\n for dst_i in range(dst_i_min, dst_i_max + 1):\n dst_x = dst_x_offset + (dst_i + 0.5) * dst_x_scale\n\n # TODO: use two other combinations,\n # if one of the dst_px<n>,dst_py<n> pairs is missing.\n\n src_i = src_j = -1\n\n if det_a != 0.0:\n u = _fu(dst_x, dst_y,\n dst_p0x, dst_p0y, dst_p2x, dst_p2y) / det_a\n v = _fv(dst_x, dst_y,\n dst_p0x, dst_p0y, dst_p1x, dst_p1y) / det_a\n if u >= u_min and v >= v_min and u + v <= uv_max:\n src_i = src_i0 + _fclamp(u, 0.0, 1.0)\n src_j = src_j0 + _fclamp(v, 0.0, 1.0)\n if src_i == -1 and det_b != 0.0:\n u = _fu(dst_x, dst_y,\n dst_p3x, dst_p3y, dst_p1x, dst_p1y) / det_b\n v = _fv(dst_x, dst_y,\n dst_p3x, dst_p3y, dst_p2x, dst_p2y) / det_b\n if u >= u_min and v >= v_min and u + v <= uv_max:\n src_i = src_i1 - _fclamp(u, 0.0, 1.0)\n src_j = src_j1 - _fclamp(v, 0.0, 1.0)\n if src_i != -1:\n dst_src_ij_images[0, dst_j, dst_i] = src_i_min + src_i\n dst_src_ij_images[1, dst_j, dst_i] = src_j_min + src_j", "def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):\n # Detect keypoints in each image\n keypoints = [] # keypoints[i] corresponds to imgs[i]\n for img in imgs:\n kypnts = corner_peaks(harris_corners(img, window_size=3),\n threshold_rel=0.05,\n exclude_border=8)\n keypoints.append(kypnts)\n # Describe keypoints\n descriptors = [] # descriptors[i] corresponds to keypoints[i]\n for i, kypnts in enumerate(keypoints):\n desc = describe_keypoints(imgs[i], kypnts,\n desc_func=desc_func,\n patch_size=patch_size)\n descriptors.append(desc)\n # Match keypoints in neighboring images\n matches = [] # matches[i] corresponds to matches between\n # descriptors[i] and descriptors[i+1]\n for i in range(len(imgs)-1):\n mtchs = match_descriptors(descriptors[i], descriptors[i+1], 0.7)\n matches.append(mtchs)\n\n ### YOUR CODE HERE\n raise NotImplementedError() # Delete this line\n ### END YOUR CODE\n\n return panorama", "def myHybridImages(lowImage: np.ndarray, lowSigma: float, highImage: np.ndarray, highSigma: float) -> np.ndarray:\n\n # Your code here.\n lowFilteredImage = convolve(lowImage, makeGaussianKernel(lowSigma))\n print(\"the picture should be below\")\n plt.imshow(lowFilteredImage)\n #plt.show()\n print(\"the picture should be upper\")\n \n highFilteredImage = highImage - convolve(highImage, makeGaussianKernel(highSigma)\n plt.imshow(highFilteredImage)\n plt.show()\n hybridImage = lowFilteredImage + highFilteredImage\n #print(lowFilteredImage)\n #print(highFilteredImage)\n #print(hybridImage)\n return hybridImage", "def registration(im1, im2, num = 10, opt = 'py', outputPath = 'None'):\n\n # determin which one is the right side of the breast\n b_size = 5\n n_row, n_col = im1.shape\n side = 0\n if np.sum(im1[0:b_size,0:b_size]) < np.sum(im1[0:b_size,n_col-b_size:n_col]):\n side = 1 \n\n # flip the right side image\n if side == 1:\n im1 = np.fliplr(im1)\n else:\n im2 = np.fliplr(im2) \n\n # find edges of both images\n edge1 = findEdge(im1)\n edge2 = findEdge(im2)\n\n # tune edges of both side\n edge1 = tuneEdge(edge1,im1.shape)\n edge2 = tuneEdge(edge2,im2.shape)\n\n # samping from both side\n points1 = contour_sampling(edge1, num)\n points2 = contour_sampling(edge2, num)\n\n # for debugging .........................\n sam_im1 = np.zeros(im1.shape,np.float32)\n for point in points1:\n sam_im1[point[0],point[1]] = 1\n\n sam_im2 = np.zeros(im2.shape,np.float32)\n for point in points2:\n sam_im2[point[0],point[1]] = 1\n \n selem = disk(15)\n dilated1 = ndimage.convolve(sam_im1, selem, mode='constant', cval=0)\n dilated2 = ndimage.convolve(sam_im2, selem, mode='constant', cval=0)\n\n points1 = np.asarray(points1)\n points2 = np.asarray(points2)\n \n # Thin Plate Spline interpolation\n dst = np.zeros(im1.shape)\n # im1 as source\n if opt == 'py': \n tps = TPSpline.TPSpline()\n tps.setCorrespondences(points1, points2)\n dst = tps.warpImage(im1)\n return dst\n\n if opt == 'c':\n print \"Please run the interpolation with C++ exe file!\"\n print \"./TPSpline /home/yanbin/Tomosynthesis/libs/TPSpline/test/ps.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/pd.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/5016_test.tif /home/yanbin/Tomosynthesis/libs/TPSpline/test/dst.tif\"\n np.savetxt(outputPath + 'ps.txt', points1, '%d', delimiter=' ') # X is an array\n np.savetxt(outputPath + 'pd.txt', points2, '%d', delimiter=' ') # X is an array\n tiffLib.imsave(outputPath + 'im1.tif',im1)\n return None", "def from_0_1_to_m1_1(images):\n\n # shifting from [0, 1) to [-1, 1) is equivalent to assuming 0.5 mean\n mean = 0.5\n proimages = (images - mean) / mean\n\n return proimages", "def image_comparison(unaligned_image_ccd_lst,aligned_image_ccd_lst,stacked_img_ccd,outputs_path,obsdate):\n source_hdu = CCDData(unaligned_image_ccd_lst[0],unit='adu')\n source_image_hdr = source_hdu.header\n run_filename = source_image_hdr['RUN'].strip(' ')\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n # compare unaligned vs aligned images\n for i, unaligned_img in enumerate(unaligned_image_ccd_lst[1:]):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10), tight_layout=True)\n \n # source_hdu = CCDData(unaligned_image_ccd_lst[0],unit='adu')\n image_hdr = unaligned_img.header\n run_filename = image_hdr['RUN'].strip(' ')\n target_name = image_hdr['FIELD'].strip(' ')\n exptime = image_hdr['EXPTIME']\n chip_num = image_hdr['CHIP']\n \n show_image(unaligned_img, cmap='gray', ax=ax1, fig=fig, percl=90)\n ax1.set_title('Unaligned Image for {}-{}-{}-{}s ({})'.format(run_filename,target_name,chip_num,exptime,obsdate))\n \n show_image(aligned_image_ccd_lst[i], cmap='gray', ax=ax2, fig=fig, percl=90)\n ax2.set_title('Aligned Image for {}-{}-{}-{}s ({})'.format(run_filename,target_name,chip_num,exptime,obsdate))\n \n plt.savefig(outputs_path/\"unaligned_vs_aligned_{}-{}-{}-{}.jpg\".format(run_filename,target_name,chip_num,exptime),dpi=900)\n plt.show()\n \n # compare source image to stacked image\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10), tight_layout=True)\n \n show_image(unaligned_image_ccd_lst[0], cmap='gray', ax=ax1, fig=fig, percl=90)\n ax1.set_title('Unaligned Source Image for {}-{}-{}s ({})'.format(target_name,chip_num,exptime,obsdate))\n \n show_image(stacked_img_ccd, cmap='gray', ax=ax2, fig=fig, percl=90)\n ax2.set_title('Aligned Stacked Image for {}-{}-{}s ({})'.format(target_name,chip_num,exptime,obsdate))\n \n plt.savefig(outputs_path/\"source_vs_stacked_{}-{}-{}.jpg\".format(target_name,chip_num,exptime),dpi=900)\n plt.show()", "def __call__(self, src, label):\r\n # img = mx.nd.image.to_tensor(src)\r\n # img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n src = mx.nd.array(src)\r\n img = mx.nd.image.to_tensor(src)\r\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n return img, mx.nd.array(label, dtype=img.dtype)", "def same_landmark_images(path_1: str, path_2: str) -> float:\n img_1_greyscale = read_image_greyscale(path_1)\n img_2_greyscale = read_image_greyscale(path_2)\n img_1_rgb_separated = np.array([read_image_color(path_1, component) for component in RGB_COMPONENTS])\n img_2_rgb_separated = np.array([read_image_color(path_2, component) for component in RGB_COMPONENTS])\n\n similarity_hog = similarity_two_images_hog(img_1_greyscale, img_2_greyscale)\n similiarities_rgb = np.array([similarity_two_images_color(img_1_rgb_separated[i], img_2_rgb_separated[i])\n for i in range(0, len(RGB_COMPONENTS))])\n similarity_color = np.mean(similiarities_rgb)\n\n similarity_percentage = np.average([similarity_hog, similarity_color], weights=[1.2, 1])\n return float(similarity_percentage)", "def pre_process(self, images: Union[np.ndarray, List]) -> np.ndarray:\n images = validate_image(images)\n image_sizes = []\n image_arr = []\n for image in images:\n image_sizes.append(image.shape)\n image = resize(image,\n height=self.in_h,\n width=self.in_w)\n image = normalize(image)\n image_arr.append(image)\n image_arr = np.array(image_arr)\n return image_arr, image_sizes", "def compute_psnr_and_ssim(image1, image2, border_size=0):\r\n if len(image1.shape) == 2:\r\n image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)\r\n if len(image2.shape) == 2:\r\n image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)\r\n\r\n if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:\r\n return None\r\n\r\n image1 = trim_image_as_file(image1)\r\n image2 = trim_image_as_file(image2)\r\n\r\n if border_size > 0:\r\n image1 = image1[border_size:-border_size, border_size:-border_size, :]\r\n image2 = image2[border_size:-border_size, border_size:-border_size, :]\r\n\r\n psnr = peak_signal_noise_ratio(image1, image2, data_range=255)\r\n ssim = structural_similarity(image1, image2, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01, K2=0.03,\r\n sigma=1.5, data_range=255)\r\n return psnr, ssim", "def mse(self, image_a, image_b):\r\n data = numpy.sum((image_a.astype('float') - image_b.astype('float')) ** 2)\r\n data /= float(image_a.shape[0] * image_a.shape[1])\r\n return data", "def load_images(input_img: str, output_img: str) -> Union[np.ndarray, np.ndarray]:\r\n input_img = Image.open(input_img).convert(\"RGB\")\r\n output_img = Image.open(output_img).resize(input_img.size).convert(\"RGB\")\r\n return np.array(input_img).copy(), np.array(output_img).copy()", "def transform(self, images: Sequence[np.ndarray]) -> List[Optional[torch.Tensor]]:\n for img in images:\n if img is not None:\n assert img.dtype == np.float32\n assert img.shape[2] == 1, img.shape\n\n T = A.Compose([\n A.Resize(48, 48),\n AT.ToTensor(),\n ])\n images = [T(image=img)['image'].expand(3, -1, -1) if img is not None else None\n for img in images]\n return images", "def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs", "def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images_aug = [x[\"image_color\"].to(self.device) for x in batched_inputs]\n\n images = [self.normalizer(x) for x in images]\n images_aug = [self.normalizer(x) for x in images_aug]\n\n images = ImageList.from_tensors(images,\n self.backbone.size_divisibility)\n images_aug = ImageList.from_tensors(images_aug,\n self.backbone.size_divisibility)\n return images, images_aug", "def reconstruct_image(img_a, nnf):\r\n final_img = np.zeros_like(img_a)\r\n size = nnf.shape[0]\r\n scale = img_a.shape[0] // nnf.shape[0]\r\n for i in range(size):\r\n for j in range(size):\r\n x, y = nnf[i, j]\r\n if final_img[scale * i:scale * (i + 1), scale * j:scale * (j + 1)].shape == img_a[scale * y:scale * (y + 1),\r\n scale * x:scale * (x + 1)].shape:\r\n final_img[scale * i:scale * (i + 1), scale * j:scale * (j + 1)] = img_a[scale * y:scale * (y + 1),\r\n scale * x:scale * (x + 1)]\r\n return final_img", "def gradient_merge_arrays(cls, image_one, image_two):\n if image_one.shape != image_two.shape:\n raise AttributeError(\"shapes do not match: {} vs {}\".format(image_one.shape, image_two.shape))\n height = image_one.shape[0]\n vector_one = numpy.array([1.0 - float(i + 1) / (height + 1) for i in range(height)])\n vector_two = numpy.array([float(i + 1) / (height + 1) for i in range(height)])\n return (image_one * vector_one[:, numpy.newaxis]) + (image_two * vector_two[:, numpy.newaxis])", "def preprocess(imgs):\n imgs_p = np.ndarray((len(imgs), img_rows, img_cols), dtype=np.float32)\n for i in range(len(imgs)):\n imgs_p[i] = imgs[i].reshape((img_rows, img_cols))/255.\n\n imgs_p = imgs_p[..., np.newaxis]\n\n # Perform data normalization\n mean = imgs_p.mean()\n std = imgs_p.std()\n imgs_p -= mean\n imgs_p /= std\n\n return imgs_p", "def SSIM(img1, img2, max_val=255, filter_size=11,\n filter_sigma=1.5, k1=0.01, k2=0.03, weights=None):\n if img1.shape != img2.shape:\n raise RuntimeError('Input images must have the same shape (%s vs. %s).',\n img1.shape, img2.shape)\n\n if DEBUG:\n cv2.imshow(\"a\", img1)\n cv2.imshow(\"b\", img2)\n cv2.waitKey(0)\n\n height, width = img1.shape\n\n # Filter size can't be larger than height or width of images.\n size = min(filter_size, height, width)\n\n # Scale down sigma if a smaller filter size is used.\n sigma = size * filter_sigma / filter_size if filter_size else 0\n\n # this uses a magic standard dev calculation:\n # see 1) http://matlabtricks.com/post-19/calculating-standard-deviation-using-minimal-memory\n # 2) http://matlabtricks.com/post-20/calculate-standard-deviation-case-of-sliding-window\n\n if filter_size:\n window = np.reshape(_FSpecialGauss(size, sigma), (size, size))\n mu1 = signal.fftconvolve(img1, window, mode='valid')\n mu2 = signal.fftconvolve(img2, window, mode='valid')\n sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')\n sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')\n sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')\n else:\n # Empty blur kernel so no need to convolve.\n mu1, mu2 = img1, img2\n sigma11 = img1 * img1\n sigma22 = img2 * img2\n sigma12 = img1 * img2\n\n mu11 = mu1 * mu1\n mu22 = mu2 * mu2\n mu12 = mu1 * mu2\n sigma11 -= mu11\n sigma22 -= mu22\n sigma12 -= mu12\n\n # Calculate intermediate values used by both ssim and cs_map.\n c1 = (k1 * max_val) ** 2\n c2 = (k2 * max_val) ** 2\n v1 = 2.0 * sigma12 + c2\n v2 = sigma11 + sigma22 + c2\n\n ssim = (((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2))\n\n if DEBUG:\n cv2.imshow(\"ssim\", ssim)\n cv2.waitKey(0)\n\n\n if weights is not None:\n \n padding = math.floor(size/2)\n weights = weights[padding:-padding, padding:-padding]\n\n if DEBUG:\n cv2.imshow(\"weighted ssim\", ssim*weights)\n cv2.waitKey(0)\n\n ssim = np.average(ssim, weights=weights)\n else:\n ssim = np.mean(ssim)\n\n cs = np.mean(v1 / v2)\n return ssim, cs" ]
[ "0.74889344", "0.67036545", "0.6571887", "0.65122443", "0.64808476", "0.6370916", "0.6308732", "0.62787664", "0.62620807", "0.62355393", "0.6224311", "0.6201802", "0.61937666", "0.61699677", "0.6163589", "0.6148197", "0.60919136", "0.60919136", "0.6080456", "0.6079969", "0.6039764", "0.60374504", "0.6019286", "0.6006788", "0.59876794", "0.59782344", "0.5961141", "0.5925856", "0.5920038", "0.5902386", "0.58810735", "0.5875467", "0.5865723", "0.5860493", "0.58420074", "0.58343184", "0.5828903", "0.5827729", "0.5826608", "0.58257526", "0.5821733", "0.58199394", "0.58144623", "0.58144575", "0.58137923", "0.58102065", "0.58029544", "0.5796616", "0.57921016", "0.5781502", "0.57807744", "0.57779723", "0.57639796", "0.5757464", "0.5750101", "0.5748335", "0.57409906", "0.57393956", "0.5733842", "0.5730414", "0.5726348", "0.57186025", "0.57040983", "0.5695031", "0.5676291", "0.56715745", "0.56671125", "0.5656305", "0.56542486", "0.56513816", "0.5651131", "0.564937", "0.56478584", "0.5641953", "0.5633956", "0.56214345", "0.5614278", "0.5606792", "0.5605722", "0.5596196", "0.55886346", "0.5587004", "0.5586452", "0.55838925", "0.55799425", "0.55794764", "0.5578989", "0.5573706", "0.55577964", "0.5557148", "0.5552089", "0.55486596", "0.5547518", "0.5538414", "0.5537399", "0.55307996", "0.5529967", "0.5529179", "0.5526105", "0.55252606", "0.552021" ]
0.0
-1
The names of the roles performed by the model. This is required by QtQuick
def roleNames(self): return self._roles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roles(self):\n return self._roles", "def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"roles\")", "def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"roles\")", "def object_role_names(self):\n return [object_role.name for object_role in self.object_roles]", "def roles(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"roles\")", "def getRoles(self):", "def roles(self):\r\n return self._roles_str.split(\",\")", "def getRoles(self):\n return [self.getRole(), {\"roleName\":\"policajti\", \"roleTitle\":\"Svestky\"}]", "def get_roles(self):\n\t\tif not self.roles:\n\t\t\tself.roles = get_roles(self.name)\n\t\treturn self.roles", "def roles(self) -> List[str]:\n\n role_list = []\n for spec in self.specs.values():\n role = spec.role()\n if role not in role_list:\n role_list.append(role)\n return role_list", "def rolenames(self):\n try:\n return self.roles.split(',')\n except Exception:\n return []", "def roles(self) -> Optional[Sequence['outputs.AssessmentRole']]:\n return pulumi.get(self, \"roles\")", "def roles(self):\n params = {\n \"f\" : \"json\"\n }\n uURL = self._url + \"/roles\"\n return self._con.get(path=uURL, params=params)", "def listRoleInfo(self):\n return self._roles.values()", "def list(self):\n return self.client.find_all_roles()", "def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]", "def present_roles(self):\n print(\"User\" + str(self.unique_id) + \": roles=\")\n for group in self._roles:\n print(\"\\tGroup\" + str(group) + \" -> [\"\n + self.get_role_from_type(group, roles_influence) + \", \"\n + self.get_role_from_type(group, roles_neighbors) + \", \"\n + self.get_role_from_type(group, roles_activities) + \", \"\n + self.get_role_from_type(group, roles_attitude) + \"]\")\n print('')", "def get_roles(role):", "def roles(self):\n # TODO: The admin interface only allows a subset of the roles\n # listed in model.py since it uses the OPDS representation of\n # the data, and some of the roles map to the same MARC code.\n CODES = Contributor.MARC_ROLE_CODES\n marc_to_role = dict()\n for role in [\n Contributor.ACTOR_ROLE,\n Contributor.ADAPTER_ROLE,\n Contributor.AFTERWORD_ROLE,\n Contributor.ARTIST_ROLE,\n Contributor.ASSOCIATED_ROLE,\n Contributor.AUTHOR_ROLE,\n Contributor.COMPILER_ROLE,\n Contributor.COMPOSER_ROLE,\n Contributor.CONTRIBUTOR_ROLE,\n Contributor.COPYRIGHT_HOLDER_ROLE,\n Contributor.DESIGNER_ROLE,\n Contributor.DIRECTOR_ROLE,\n Contributor.EDITOR_ROLE,\n Contributor.ENGINEER_ROLE,\n Contributor.FOREWORD_ROLE,\n Contributor.ILLUSTRATOR_ROLE,\n Contributor.INTRODUCTION_ROLE,\n Contributor.LYRICIST_ROLE,\n Contributor.MUSICIAN_ROLE,\n Contributor.NARRATOR_ROLE,\n Contributor.PERFORMER_ROLE,\n Contributor.PHOTOGRAPHER_ROLE,\n Contributor.PRODUCER_ROLE,\n Contributor.TRANSCRIBER_ROLE,\n Contributor.TRANSLATOR_ROLE,\n ]:\n marc_to_role[CODES[role]] = role\n return marc_to_role", "def get_roles():\r\n global _roles\r\n return _roles", "def get_roles_list(self):\n try:\n roles = self.db_handler.get_roles_list()\n self.logger.write_to_log('roles got', 'model')\n return roles\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def role(self):\r\n roles = {\r\n 'student': u'Student',\r\n 'staff': u'Administrator',\r\n 'instructor': u'Instructor',\r\n }\r\n return roles.get(self.system.get_user_role(), u'Student')", "def role_strings(self):\n return [s[RoleInfo.STRING] for s in [v for item in self.role_strings_info.values() for v in item] if s[RoleInfo.STRING]]", "def list_roles(self, hints):\n raise exception.NotImplemented() # pragma: no cover", "def get_granted_roles(self):", "def list(self, **kwargs):\n params = {}\n url = '/openstack/roles?%(params)s' % {\n 'params': parse.urlencode(params, True)\n }\n return self._list(url, 'roles')", "def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")", "async def roles(self, ctx):\n\n pass", "def roles(self):\n # type: (...) -> Set[Role]\n return self._roles", "def editor_role_values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"editor_role_values\")", "def role(self) -> str:\n return pulumi.get(self, \"role\")", "def get_roles(self):\n return [role.role_id for role in self.roles if role]", "def editor_role_values(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"editor_role_values\")", "def get_roles(self):\n path = \"%s/services/impala/roles\" % self.__base_path\n response = self.__session.get(path)\n self.__check_status_code(response.status_code)\n return response.json()", "def list_roles():\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles\".format(url=get_registry_url()))\n\treturn response.json()[\"results\"]", "def getRoles(self):\n\t\tpayload = ''\n\t\tif self.Roles:\n\t\t\tif type(self.Roles) != int:\n\t\t\t\tfor x in range(0,len(self.Roles)):\n\t\t\t\t\tpayload += \"%s\" % (self.Roles[x])\n\t\t\t\treturn self.Roles\n\t\t\telse:\n\t\t\t\treturn None", "def _get_roles(self):\n return api.tuskar.OvercloudRole.list(self.request)", "def listRoles(self):\n return self._client.listRoles()", "def get_roles():\n return config.get_cfg_storage(ID_ROLE)", "def get_roles():\n check_admin()\n roles = Role.query.all()\n\n return render_template('admin/roles/roles.html', roles=roles, title=\"Roles\")", "def test_list_roles(self):\n pass", "def roles_fieldset(self):\n for role in self._get_roles():\n yield (\n role.id,\n role.name,\n list(tuskar_ui.forms.fieldset(\n self, prefix=get_field_name_from_role_id_and_flavor_id(\n str(role.id)))),\n )", "def __repr__(self):\n return '<Role %r>' % self.name", "def roles(self):\n db = self['__store'].db\n my_roles = {\n group_id\n for group_id, in db(\"\"\"\n select distinct\n groups.id\n from `groups`, subgroups\n where\n groups.id = subgroups.group_id\n and subgroup_id = %s\n and groups.type = 'U'\n \"\"\",\n self._id)\n }\n return my_roles", "def list(self, **kwargs):\n # TODO(adriant): Look up user by name/id\n url = '/openstack/users/%s/roles' % kwargs['user']\n return self._list(url, 'roles')", "def getRoleString(self):\n return _libsbml.SpeciesReferenceGlyph_getRoleString(self)", "def getRole(self):\n return _libsbml.ReferenceGlyph_getRole(self)", "def roles(self):\n roles = self.request.POST.get(\"roles\", \"\")\n # Remove all spaces from the string and extra trailing or leading commas\n roles = re.sub(r\"[\\s+]\", \"\", roles).strip(\",\")\n # Return a set of the roles mentioned in the request\n return set(roles.lower().split(\",\")) if roles else set()", "def role(self):\n return self._role", "def role(self):\n return self._role", "def role(self):\n return self._role", "def __repr__(self):\n return '<Role({name})>'.format(name=self.name)", "def manageableRoles(self):\n return roleinfo.AUTHOR_ROLES", "def role(self):\n return ['Server', 'Client'][self.is_client()]", "def getRoles(context):\n\n pmemb = getToolByName(getSite(), 'portal_membership')\n roles = [role for role in pmemb.getPortalRoles() if role != 'Owner']\n return SimpleVocabulary.fromValues(roles)", "def list_roles(self):\n resp, body = self.get(\"roles\")\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['roles'])", "def listRoleIds(self):\n return self._roles.keys()", "def _generateRoleName(self, obj, **args):\n # Subclasses must override this.\n return []", "def __repr__(self):\n return \"Role(name=%r, permissions=%r)\" % (self.name, self._permissions)", "def get(self):\n return self._roles.get(self._id)", "def test_list_role(self):\n pass", "def getRole(self):\n return _libsbml.SpeciesReferenceGlyph_getRole(self)", "def roles(self):\n role_ids = self.role_ids\n if role_ids is None:\n roles = None\n else:\n roles = sorted(create_partial_role_from_id(role_id) for role_id in self.role_ids)\n \n return roles", "def system_roles(self) -> api.SystemRoles:\n return self._get_model(model=api.SystemRoles)", "def editor_role_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"editor_role_values\")", "def get_user_roles(self):\n url = 'userroles'\n result = self.get(url)\n return result.get('userroles', result)", "def test_roles_widget(self, admin_dashboard):\n admin_roles_tab = admin_dashboard.select_roles()\n expected_dict = self._role_el.ROLE_SCOPES_DICT\n actual_dict = admin_roles_tab.get_role_scopes_text_as_dict()\n assert admin_dashboard.tab_roles.member_count == len(expected_dict)\n assert expected_dict == actual_dict, (\n messages.AssertionMessages.\n format_err_msg_equal(expected_dict, expected_dict))", "def get_roles(self, **search_args):\n return self.openbis.get_role_assignments(person=self, **search_args)", "def role(self):\n\n return self._role", "def test03_perm_roles(self):\n print_ln('test16_perm_roles')\n \n try:\n pList = review.find_perms(Perm(obj_name='py-obj*', op_name='*'))\n for perm in pList: \n print_ln(\"Role Perm obj name=\" + perm.obj_name + ', op=' + perm.op_name + ', id=' + perm.obj_id)\n rList = review.perm_roles(perm)\n for role in rList:\n print_ln(\"Assigned role=\" + role, 1)\n except Exception as e:\n self.fail('test16_perm_roles failed, exception=' + e.msg)", "def list_roles(self, name_filter=None):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n\n org_filter = None\n resource_type = 'role'\n if self.client.is_sysadmin():\n resource_type = 'adminRole'\n org_filter = 'org==%s' % self.resource.get('href')\n\n query = self.client.get_typed_query(\n resource_type,\n query_result_format=QueryResultFormat.RECORDS,\n equality_filter=name_filter,\n qfilter=org_filter)\n result = []\n for r in list(query.execute()):\n result.append(\n to_dict(\n r,\n resource_type=resource_type,\n exclude=['org', 'orgName']))\n return result", "def get_role(self):\n return self.role", "async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)", "def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')", "def getUserRole(self):\n\n # general question concerning the user's role (hersteller, beauftragter...)\n self.roleView.getUserRole()", "async def list_roles(self, ctx: commands.Context):\n all_roles = await self.config.guild(ctx.guild).autoroles()\n maybe_not_found = []\n message = \"\"\n for role in all_roles:\n fetched_role = ctx.guild.get_role(role)\n if not fetched_role:\n maybe_not_found.append(role)\n continue\n message += \"- {name} (`{id}`).\\n\".format(name=fetched_role.name, id=fetched_role.id)\n if maybe_not_found:\n clean_list = list(set(all_roles) - set(maybe_not_found))\n await self.config.guild(ctx.guild).autoroles.set(clean_list)\n message += \"\\nSome roles has been removed since I was unable to find them.\"\n if message:\n for line in pagify(message):\n await ctx.send(line)\n else:\n await ctx.send(\"No role has been added.\")", "def getRoleInfo(self, role):", "def _get_role(self):\n return self.__role", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")", "def getRoles():\n return jsonify(listRoles(ROLES_DIR))", "def _determine_roles(self):\n if self._roles_callback is not None:\n return self._roles_callback()\n else:\n return self._current_roles", "def test_list_namespaced_role(self):\n pass", "def roles(self, user):\n return {}", "async def command_rolecall(self, context):\n print(self._fetch_category_roles(context))\n print(self._fetch_category_roles(context, COSMETIC_CATEGORY_NAME))", "def server_roles(self) -> Sequence[str]:\n return pulumi.get(self, \"server_roles\")", "def roles_str(person: Member, roles: commands.Greedy[Role]) -> str:\n message = \"role\" if len(roles) == 1 else \"roles\"\n roleIds = [role.name for role in roles]\n\n return f\"{message} for {person}: {roleIds}\"", "def role(self):\n return MacroView.Unknown", "def get_roles(self) -> requests.models.Response:\n return self.get('v1/roles')", "def main_role_list(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n roles = client.list_roles(opts[\"formation\"])\n click.echo(\n tabulate.tabulate(\n [{\"Name\": i.name, \"Id\": i.id_} for i in roles],\n headers=\"keys\",\n ),\n )", "def get_role_choices(my_role):\n roles = get_all_roles(my_role)\n for r in roles :\n yield ( r.id, u'%s (%s)' % (r.description, r.name) )", "def admin_role_values(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"admin_role_values\")", "def roles(self, roles):\n\n self._roles = roles", "def roles(self, roles):\n\n self._roles = roles", "def roles(self, roles):\n\n self._roles = roles", "def getRole(self, desired=None):\n return {\"roleName\":\"hasici\",\n \"roleTitle\":\"Soptici\"}", "def test_ipam_roles_list(self):\n pass", "def tags(self):\n return ['HostRoles/component_name', \\\n 'HostRoles/host_name', \\\n 'HostRoles/cluster_name']", "def admin_role_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"admin_role_values\")" ]
[ "0.7476474", "0.73596126", "0.73596126", "0.73139435", "0.7310977", "0.7254139", "0.7241115", "0.7161311", "0.7130106", "0.71001154", "0.7057589", "0.7041748", "0.7031347", "0.69231516", "0.69207555", "0.68998986", "0.6880031", "0.6854992", "0.6836892", "0.6824553", "0.67943746", "0.67850405", "0.67571616", "0.6625915", "0.6618259", "0.6588489", "0.6525221", "0.65175635", "0.65133345", "0.6500767", "0.64986145", "0.6488851", "0.64763224", "0.6473898", "0.6468332", "0.64682496", "0.646327", "0.64257795", "0.6394947", "0.63734347", "0.6371293", "0.63559914", "0.63517547", "0.6329341", "0.6303447", "0.62952393", "0.62931556", "0.6276794", "0.6262866", "0.6262866", "0.6262866", "0.62607074", "0.62435013", "0.62400657", "0.6234086", "0.62319225", "0.6195197", "0.61873126", "0.6178451", "0.61781514", "0.6163298", "0.61514837", "0.61236954", "0.6118509", "0.6110189", "0.6104422", "0.60945666", "0.6078097", "0.6074281", "0.6067984", "0.60614264", "0.6060974", "0.6059903", "0.60339606", "0.60275984", "0.6026955", "0.6000086", "0.5999138", "0.5997116", "0.5997116", "0.5997116", "0.5972029", "0.5964568", "0.5959036", "0.5958831", "0.5954777", "0.5953275", "0.59446657", "0.5932262", "0.5916635", "0.58902806", "0.5878238", "0.5865861", "0.5863661", "0.5863661", "0.5863661", "0.5839563", "0.58100826", "0.57920593", "0.5778132" ]
0.7644932
0
Save the instrument script to a file
def export(self, path): path = path[7:] if path[0] == "/" and path[2] == ":": path = path[1:] # Windows fix with open(path, "w") as outfile: outfile.write(self.script)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveauto(self):\n self.inp.getedge()\n ss=ss=strftime(\"_%Y-%m-%d_%H:%M:%S\", gmtime())\n fn=os.environ['VMEWORKDIR'] +\"/WORK/phases/\"+self.name+ss+self.inp.edge+\"_\"+self.inp.inpnum+\"_\"+self.inp.ctpnum+\".ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)\n else:\n print \"File \",fn, \" saved.\"", "def save( self, save_path=\"/home/rts2obs/.rts2scripts\", save_file=None ):\n\n self.id = self.create_target_api()\n\n commer=rts2comm()\n\t # the following line should be handle by rts2.ini and not here. \n #commer.setscript(self.id, script=\"exe /home/rts2obs/.local/bin/targetscript.py\")\n\n if save_file is None:\n save_file = \"{}.json\".format( self.name )\n fpath = os.path.join( save_path, save_file )\n\n with open(fpath, 'w') as fd:\n json.dump( self.dictify(), fd, indent=2 )", "def save(self, filename):\n pass", "def save(self):\n fn=os.environ['VMEWORKDIR'] +\"/WORK/\"+\"phase.ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)", "def to_file(self, file_path, smirnoff_data):\n pass", "def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")", "def Save(self):\n if not self.simFilePath:\n path = self.PromptPathSaveAs()\n if not path: return\n else: self.simFilePath = path\n \n #Why bother doing new code if the cmd interface does it already\n if self.shell.interp != self.sim42interp:\n self.UseCommandInterface(True)\n self.shell.run('store %s' %self.simFilePath)\n #self.sim42interp.cmd.Store(self.simFilePath)\n\n self.SetTitle('Simulation --> ' + self.simFilePath)", "def save(self, filename):\n o = open(filename, 'w')\n o.write(self.write())\n o.close()", "def save(self, export_path: str):", "def save_model(self, filename) -> None:\n #t.save(self, filename)\n traced=t.jit.script(self)\n t.jit.save(traced,filename)", "def save(self, filename):\n pass", "def save(self, filename):\n raise NotImplementedError", "def save(self, filename):\n \n raise NotImplementedError(\"not implemented!\")", "def save(self, file_path, filename=\"tracer\"):\r\n with open(path.join(file_path, f\"{filename}.pickle\"), \"wb\") as f:\r\n pickle.dump(self, f)", "def save(self,file):\n\n with open(file,\"w\") as f:\n f.write(self.to_string())", "def saver(filename = None):\n save(self, filename)", "def write_to_file(self, filename: str) -> None:", "def save(self, fname):\n pass", "def save(self, file=\"\"):\n if not file:\n now = datetime.now()\n file = self.optimizer + \" \" + \\\n self.transformation + \" \" + str(now) + \".json\"\n output = open(file, 'w')\n result = self.get_result_dict()\n output.write(json.dumps(result, indent=4))\n output.close()\n return", "def saveAs(self):\n self.saveFile()", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def save_to_file(self, filename: str):\n prepare = asdict(self)\n for sequencer in prepare['Sequencers']:\n for step in sequencer['Sequence']:\n if 'Name' in step.keys() and step['Name'] == '':\n step.pop('Name')\n if 'StartingFrom' in step.keys():\n step['Repeat'] = {}\n step['Repeat']['StartingFrom'] = step['StartingFrom']\n step['Repeat']['Count'] = step['Count']\n step.pop('StartingFrom')\n step.pop('Count')\n pprint.sorted = lambda x, key=None: x\n text: str = pprint.pformat(prepare, indent=0)\n text = text.replace(r\"'\", \"\")\n text = text[1:-1]\n f = open(filename, \"w\", encoding='utf-8')\n f.write(text)", "def save_to_file(self, string):\n with open(self.output_path, \"w\") as text_file:\n text_file.write(string)\n print \"Saved to file \" + self.output_path", "def SAV(self, loc):\n cmd = f\"*SAV {loc}\"\n self.instr.write(cmd)", "def save_simulation_file(self):\n a = self.ui.inputfile.text()\n a = self.get_root_file_name(a)\n a = a.split('_a.txt')\n output_suffix = self.ui.output_suffix.text()\n simfile_name = self.input_dir+'/'+sgGL.SIMFILES_PATH + a[0] + '_' +\\\n sgcom.create_file_suffix(self.algorithm,output_suffix,self.ciclos)+\\\n '.sim'\n simulation_selected_filename = QtGui.QFileDialog.getSaveFileName(self,\n \"Save simulation parameters\",\n simfile_name)\n if len(simulation_selected_filename)>0:\n simulation_params.write2file(simulation_selected_filename)", "def save(self, filename: str):\n dump(self, filename)", "def to_file(self, file_path, smirnoff_data):\n xml_string = self.to_string(smirnoff_data)\n with open(file_path, \"w\") as of:\n of.write(xml_string)", "def saveOutput(self,code):\r\n\t\tCodeSaver().save(code,self.savePath)", "def saveto(file, tmpfile):\n args = {\"file\": file, \"tmpfile\": tmpfile}\n send_command(\"saveto\", args)", "def save_calibration(filename):\n pass", "def save(self,filename):\n f = open(filename,'w')\n f.write('Test results for %s v%s\\n' % (self.description,self.version))\n f.write('Series ran by %s\\n\\n' % self.person_name)\n for result in self.values():\n f.write('%-70s : %s\\n' % (result.id,result.outcome))\n if result.outcome != Result.PASS:\n for (kind, annotation) in result.annotations.items():\n f.write('%s:\\n%s\\n' % (kind, as_utf8(annotation)))\n f.write('\\n')\n f.write('\\n\\nPasses: %i\\n' % self.get_pass_count())\n f.write('Fails: %i\\n' % self.get_fail_count())\n f.write('Errors: %i\\n' % self.get_error_count())\n f.write('Untested: %i\\n' % self.get_untested_count())\n f.write('Skipped: %i\\n' % self.get_skipped_count())\n f.close()", "def save(self, filepath=None):\n raise NotImplementedError()", "def save(self):\n # TODO: save the file", "def save_postscript(self, filename):\n with open(filename, 'w') as savefile:\n savefile.write(self.canvas.postscript())", "def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump(self.tape, f, protocol=pickle.HIGHEST_PROTOCOL)", "def save_file(self, filename):\n if self.t3data:\n np.savetxt(filename, self.t3data)\n else:\n self.export_to_ascii()", "def save_script(title):\n script = title_html(title)\n script = script.replace('</b>','')\n script = script.replace('<b>','\\n')\n\n cwd = os.getcwd()\n filepath = os.path.join(cwd,'scripts','%s.txt' % title)\n file = open(filepath, 'w')\n file.write(script)\n file.close()", "def save_output(self, output_file_path):\r\n self.output_file.save(output_file_path)", "def save(self, output, data):", "def make_file(self):\n\n f = open(get_output_path(), \"w\")\n \n f.write(self.export())\n \n f.close()\n\n return self", "def saveCodeToFile():\n save_interface = Tk()\n save_interface.filename = filedialog.asksaveasfilename(initialdir = os.getcwd(), defaultextension=\".btu\", title = \"Save as\",filetypes = ((\"Bit Tune Image File\",\"*.btu\"),(\"All Files\",\"*.*\")))\n save_interface.destroy()\t\n\n btuCode = notes_text.get( \"1.0\", \"end-1c\" )\n\n with open (save_interface.filename,'w') as f:\n f.write(str(btuCode))", "def save(self):\n super(YacoFile, self).save(self._filename)", "def output_beat_to_file(file_name, e):\n print(\"Writing to file:\", file_name)\n routine = gp.compile(e,pset)\n with open(file_name+\".raw\",'w') as f:\n for t in range(200000):\n f.write(chr(int(routine(t+1))%256))\n # Now convert to wav\n subprocess.call(SOX_COMMAND + \" \" + file_name + \".raw\" + \" \" + file_name + \".wav\", shell=True)\n subprocess.call(LAME_COMMAND + \" \" + file_name + \".wav\", shell=True)", "def save_mc(filename):\n global simulator\n if simulator is None:\n print \"program is not started\"\n else:\n simulator.save_mc(filename)", "def save(self, filename='test'):\n file = open(filename+'.txt','w')\n pickle.dump(self, file)\n file.close()", "def save(self):\n with open(\"samples.txt\", \"a\") as f:\n f.write(str(self) + \"\\n\")", "def save(self, filename):\n with open(filename, \"w\") as fp:\n dump(self, fp)", "def save_output(file, option):\n if not os.path.isfile(file):\n raise AssertionError()\n directory = os.path.join(os.getcwd(), 'estimation_output')\n os.rename(file, option)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n os.rename(os.path.join(os.getcwd(), option),\n os.path.join(directory, option))", "def intf_MMSAVE(E):\n global SAVEFILE\n with open(SAVEFILE,'w') as f:\n f.write( MMEL.simplistic_mm_save_format() )\n print(\"Model script written to: %s\\n\" % SAVEFILE)", "def save(self, fname, snver=None):\n self._io.save(fname)", "def to_file(self, outfile):\n\n with open(outfile, \"w\") as outf:\n outf.write(self.to_string())", "def save(self, filename=None):\n exporter = aspecd.io.AdfExporter()\n exporter.target = filename\n exporter.export_from(self)", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def store(self, filename):", "def save(self, filename:str):\n dump(self, filename=filename)", "def saveToFile(self, filePath):\n d = self.save()\n with open(filePath, 'wb') as f:\n f.write(d)", "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "def write_script(script, game_title):\n try:\n script_name = '{}{}.sh'.format(roms_directory, game_title.replace(\":\", \"\"))\n print('Writing {} to disk...'.format(script_name))\n f = open(script_name, \"w+\")\n f.write(script)\n f.close()\n\n st = os.stat(script_name)\n os.chmod(script_name, st.st_mode | stat.S_IEXEC)\n except Exception as write_exception:\n print(write_exception)", "def do_save(self, line):\n cmd_args = io.parse_cmd_args(line, io.output_cmd_pattern)\n if cmd_args:\n success = self.manager.save_to_file(**cmd_args)\n if success:\n self.console_print(\"Yippee! saved successfully!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again tho.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "def savefile(self, x, o):\n self.sep('save')\n with open(o, 'w') as f:\n f.write(x)\n sys.exit('all done (%s bytes).. saved as %s' % (len(x), o))", "def save(self, filename):\n result = self.render()\n\n with open(filename, 'w') as f:\n f.write(result)", "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "def save(self):\n file = open(self.path, 'w')\n self.parser.write(file)\n file.close()", "def _toFile(self):\n pass", "def save():\n click.echo(\"Not implemented yet. In the future, this command will be used for saving.\")\n sys.exit(-2)", "def save_to_file(self):\n # Create a new file name based off date and time\n file_name = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S_RTI_CFG.txt\")\n file_path = os.path.expanduser(\"~\\\\Desktop\\\\\"+file_name)\n\n file = open(file_path, 'w')\n file.write(self.commandFileTextBrowser.toPlainText())\n file.close()\n\n self.parent.statusBar().showMessage('File saved to ' + file_path)", "def __export_file(self, filename, output):\n outfile = open(filename, \"w\")\n outfile.write(output)\n outfile.close\n print(\"Output written to file: \" + filename + \"\\n\")", "def save_enu(self, filename):\n x, y, z = self.get_coords_enu()\n coords = np.vstack([x, y, z]).T\n np.savetxt(filename, coords, fmt=b'%.12e')", "def save(self):\n return self.save_as(self.filename)", "def saveFile(self):\n fName = str(self.ui.lineEditPath.text()) + '/' + \\\n str(self.ui.lineEditFileName.text())\n try:\n self.caller.raw.save(fName)\n self.caller.setRaw(fName, self.parent)\n except IOError as e:\n self.error = True\n sys.stderr.write(\"Could not save!\\n\")\n sys.stderr.write(str(e))\n finally:\n self.e.set()", "def save_file(self, *args):\n if args[0] == 'Save':\n filename = args[0]\n\n # Checks if user input is valid or null for filename. if null, assigns a default filename\n if args[1].text_field.text:\n filename = args[1].text_field.text\n\n f = open('output/' + filename + '.txt', 'w')\n f.write('\\nRegister Content: \\n')\n for k, v in REGISTER.items():\n f.write('\\n' + k.upper() + ':' + ' ' + v.upper() + '\\n')\n\n i = 0\n f.write('\\nMemory Content: \\n')\n while i < len(RAM):\n f.write(f'\\n{RAM[i]} {RAM[i + 1]}')\n i += 2\n\n toast('File saved in output folder as ' + filename + '.txt')\n f.close()\n\n else:\n toast('File save cancelled')", "def saveTrans(self):\n modtranDataDir = os.getenv('MODTRAN_DATADIR')\n outputfile = '{0}/{1}_final.plt'.format(\n self.outfilename, self.outfilename)\n outputpath = os.path.join(modtranDataDir, outputfile)\n with open(outputpath, 'w') as transmf:\n transmf.write('$ FINAL ATMOSPHERE TRANSMISSION\\n')\n for val in range(len(self.modtran_wl)):\n data = '\\t'.join('{0:f}'.format(self.transmittance[run][val])\n for run in range(len(self.modtran_wl)))\n line = '{0}\\t{1}\\n'.format(self.modtran_wl[val], data)\n transmf.write(line)", "def save(self, output_path):\n with open(output_path, \"wb\") as file:\n dill.dump(self, file)", "def magic_save(self,parameter_s = ''):\n\n args = parameter_s.split()\n fname,ranges = args[0], args[1:]\n if not fname.endswith('.py'):\n fname += '.py'\n if os.path.isfile(fname):\n ans = raw_input('File `%s` exists. Overwrite (y/[N])? ' % fname)\n if ans.lower() not in ['y','yes']:\n print 'Operation cancelled.'\n return\n cmds = ''.join(self.extract_input_slices(ranges))\n f = file(fname,'w')\n f.write(cmds)\n f.close()\n print 'The following commands were written to file `%s`:' % fname\n print cmds", "def saveFile(self, data, filelocation):\n with open(filelocation, 'w+') as f:\n f.write(data)", "def call(self, *args):\n self.formula.to_file(self.output_file)", "def save_stowage_plan(self, path):\n with open(str(path) + \"_StowagePlan.txt\", 'w') as stowage_plan:\n stowage_plan.write('Stowage Plan and Loading Sequence \\n')\n stowage_plan.write(self._get_grid_representations())\n\n # Write Loading Sequence\n with open(str(path) + \"_LoadingSequence.txt\", 'w') as loading_seq:\n loading_seq.write(self.loading_sequence)", "def saveMacro(self):\r\n\t\tCodeSaver().save('Loadfile(\"'+ self.savePath + '\")',self.macroPath)", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def save_to(self, filename):\n from .io import save\n return save(self, filename)", "def save(self, filename):\n if self.model.convert_to_format == \"python\":\n # We currently cannot save models in the 'python' format\n raise NotImplementedError(\n \"\"\"\n Cannot save simulation if model format is python.\n Set model.convert_to_format = 'casadi' instead.\n \"\"\"\n )\n # Clear solver problem (not pickle-able, will automatically be recomputed)\n if (\n isinstance(self._solver, pybamm.CasadiSolver)\n and self._solver.integrator_specs != {}\n ):\n self._solver.integrator_specs = {}\n\n if self.op_conds_to_built_solvers is not None:\n for solver in self.op_conds_to_built_solvers.values():\n if (\n isinstance(solver, pybamm.CasadiSolver)\n and solver.integrator_specs != {}\n ):\n solver.integrator_specs = {}\n\n with open(filename, \"wb\") as f:\n pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)", "def save_arch(model, save_folder):\n with open(save_folder + '/architecture.txt','w') as a_save:\n model.summary(print_fn=lambda x: a_save.write(x + '\\n'))", "def save_file(filepath):\n # Store context to workfile before save\n context = {\n \"project\": api.Session[\"AVALON_PROJECT\"],\n \"asset\": api.Session[\"AVALON_ASSET\"],\n \"task\": api.Session[\"AVALON_TASK\"]\n }\n save_current_workfile_context(context)\n\n # Execute george script to save workfile.\n george_script = \"tv_SaveProject {}\".format(filepath.replace(\"\\\\\", \"/\"))\n return CommunicationWrapper.execute_george(george_script)", "def save(self, filepath):\n save_ckpt = {\n 'ae': self.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n try:\n torch.save(save_ckpt, os.path.join(filepath, 'ckpt_ae.pth'))\n except:\n print('Cannot save autoencoder.')", "def save(self, inst):\n n = inst.dimensions[\"n\"]\n with open(self.location, \"wt\") as f:\n f.write(f\"measurements: {n}\\n\")\n f.write(f\"time temperature\\n\")\n for time, temp in zip(inst.time, inst.temperature):\n f.write(f\"{time:4} {temp:12}\\n\")", "def save_specs(self, filename):\n pass", "def save_specs(self, filename):\n pass", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def save(self,fileInfo):\n fileRep = FileRep(fileInfo)\n fileRep.load(factory={'SCPT':Scpt})\n fileRep.unpackRecords(set(('SCPT',)))\n fileRep.indexRecords(set(('SCPT',)))\n #--Add scripts\n for className in self.classStats.keys():\n print className\n id = 'wr_lev%sGS' % (className,)\n script = fileRep.getRecord('SCPT',id,Scpt)\n script.setCode(self.getScript(className))\n #--Done\n fileRep.sortRecords()\n fileRep.safeSave()", "def writeScript(self, content):\n path = self.mktemp()\n with open(path, \"wb\") as f:\n f.write(content.encode(\"ascii\"))\n return self.FakeFilePath(path)", "def save(file_name):\n setup()\n plt.savefig(file_name)", "def save_file(self, filename):\r\n \r\n f = open(filename,'w')\r\n f.write(self.body)\r\n f.close", "def clickSave(self, event):\n\n self.animation.event_source.stop()\n filepath = \"spectrum.csv\"\n try:\n filepath = backends.backend_macosx._macosx.choose_save_file('Save the data',filepath)\n except:\n import tkinter as tk\n from tkinter import filedialog\n\n root = tk.Tk()\n root.withdraw()\n filepath = filedialog.asksaveasfilename()\n\n if filepath is not None:\n self.spectrometer.saveSpectrum(filepath, spectrum=self.lastSpectrum, \n whiteReference=self.whiteReference,\n darkReference=self.darkReference)\n\n self.animation.event_source.start()", "def make_inj_file(self, *args, **kwargs):\n options = self._optparser(*args, **kwargs)\n CMD = f'{self._exe} {options} --output {self._file}'\n return CallCommand(CMD)", "def outfile(self):\n\n return f\"{self.name}.run.out\"", "def save():", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def writeScript( script, writeDir=None ):\n fd, name = tempfile.mkstemp( suffix = '_pilotWrapper.py', prefix = 'DIRAC_', dir=writeDir )\n pilotWrapper = os.fdopen(fd, 'w')\n pilotWrapper.write( script )\n pilotWrapper.close()\n return name" ]
[ "0.70398337", "0.66688", "0.6626497", "0.6604999", "0.6573816", "0.6538797", "0.6495883", "0.6472696", "0.64520216", "0.64416486", "0.6366099", "0.6365805", "0.63635427", "0.63568205", "0.63320476", "0.63086146", "0.62524873", "0.62345755", "0.62287915", "0.62133014", "0.61727214", "0.61644655", "0.61644655", "0.61558706", "0.6154694", "0.6126699", "0.61175585", "0.61133033", "0.6108152", "0.61056674", "0.6097161", "0.6059274", "0.6048455", "0.60404015", "0.6037329", "0.60357845", "0.6034945", "0.6028947", "0.60267574", "0.60250723", "0.6021749", "0.6020448", "0.60085917", "0.6004178", "0.60033786", "0.6002534", "0.59991133", "0.5992697", "0.59878546", "0.5981467", "0.59631675", "0.5949428", "0.5930436", "0.592553", "0.5922732", "0.59156746", "0.59121877", "0.590854", "0.5908338", "0.5893197", "0.58892727", "0.58736557", "0.58712775", "0.58699733", "0.58631074", "0.58585006", "0.5858312", "0.5853066", "0.5840964", "0.5840111", "0.5839604", "0.58393997", "0.5835382", "0.58320934", "0.58240855", "0.582251", "0.5816901", "0.57999206", "0.5799465", "0.57972264", "0.579708", "0.57908386", "0.57885736", "0.57880896", "0.5784069", "0.5783853", "0.57768106", "0.5773843", "0.5773843", "0.5772664", "0.5772229", "0.5766933", "0.57665646", "0.57589626", "0.5752547", "0.5747244", "0.57423115", "0.5739765", "0.5739693", "0.57378155" ]
0.6128726
25
Save the current state to a file
def save(self, path, alignment, positions): path = path[7:] if path[-5:] != ".json": path += ".json" if path[0] == "/" and path[2] == ":": path = path[1:] # Windows fix with open(path, "w") as outfile: value = { "angleCommand": self._angle_command, "horizontalCommand": self._horizontal_command, "verticalCommand": self._vertical_command, "origin": self._origin, "frameWidth": self._frame_width, "frameHeight": self._frame_height, "runs": [r.to_json() for r in self._runs], "alignment": alignment.to_dict(), "positions": positions.to_dict() } json.dump(value, outfile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveState(self, file):\n state = self.context.getState(getPositions=True, getVelocities=True, getParameters=True, getIntegratorParameters=True)\n xml = mm.XmlSerializer.serialize(state)\n if isinstance(file, str):\n with open(file, 'w') as f:\n f.write(xml)\n else:\n file.write(xml)", "def saveStateOfThisRun(self):\n with open('stateFile.json', 'w') as statefile:\n json.dump(self.fileTobeUploaded, statefile, indent=4)", "def _save_state(self, filename=\".logs_state.json\"):\n curr_state = self.current_state\n with open(join(self.logs_dir, filename), 'w') as fh:\n json.dump(curr_state, fh)", "def _save_state(self) -> None:\n state_file = self._get_state_file()\n logger.info(\"Saving state to %s\", state_file)\n\n data = {}\n data[\"version\"] = mopidy.__version__\n data[\"state\"] = CoreState(\n tracklist=self.tracklist._save_state(),\n history=self.history._save_state(),\n playback=self.playback._save_state(),\n mixer=self.mixer._save_state(),\n )\n storage.dump(state_file, data)\n logger.debug(\"Saving state done\")", "def save_state(self):\n state_dir = path.dirname(self.state_filename)\n\n if not path.isdir(state_dir):\n os.makedirs(state_dir)\n\n with open(self.state_filename, 'w') as df:\n log.debug(\"Saving state of program %s to %s\" % (self.name, self.state_filename))\n yaml.safe_dump(self.state, df, default_flow_style=False)", "def saveState(self,filename=None):\n # For now we just use pickle for convenience. In the future, could use np.savez or HDF5 (or FITS)\n if filename is None:\n if self.statefile:\n filename = self.statefile\n else:\n filename = self.filename + '.cysolve.pkl'\n orig_statefile = self.statefile\n orig_ar = self.ar\n self.ar = None\n fh = open(filename,'w')\n cPickle.dump(self,fh,protocol=-1)\n fh.close()\n self.ar = orig_ar\n self.statefile = orig_statefile\n print \"Saved state in:\", filename", "def save(self, filename):\n\n torch.save(self.state_dict(), filename)", "def save_state(self):\n pass", "def writeState(self, saveState: ghidra.framework.options.SaveState) -> None:\n ...", "def write_state_file(self, state):\r\n with open(StudentModuleHistoryCleaner.STATE_FILE, \"w\") as state_file:\r\n state_file.write(state)", "def _saveState(self, fname=None, save_backup=True):\n if fname is None:\n fname = self.filename\n filepath = Path(fname).resolve()\n\n # it is good to backup this file in caseit exists\n if save_backup:\n if filepath.exists(): # pylint: disable=no-member\n # gets folder/filename.* and transforms into folder/filename_{timestamp}.json\n filepath_backup = Path(filepath).with_name(\n \"{}_{}.json\".format(filepath.stem, timestamp_string()))\n logger.debug(\"Backup %s to %s\", filepath, filepath_backup)\n shutil.copy2(filepath, filepath_backup)\n\n # save to filepath, overwriting\n filepath.touch() # pylint: disable=no-member\n with open(filepath, 'w') as file:\n json_state = self.__toJSON()\n file.write(json.encode(json_state))\n self.__sha256__ = json_state[\"__sha256__\"]\n logger.debug(\"%s's sha: %s\", fname, json_state[\"__sha256__\"])", "def _save_state(self):\n with open(self.histFile,'wb') as hf:\n hf.write(self.dbFile.Value)", "def saveState( self, state ):\n with open( self.settings.statusFilepath(), 'w' ) as statusFile:\n json.dump( {\n 'state': state\n }, statusFile )", "def savestate(self, state):\n pass", "def save_state(self) -> None:\n raise NotImplementedError(\"Save state is is not implemented.\")", "def saveState(self) -> None:\n # TODO: Saves State\n pass", "def save(self, file_name):\n\n self._state.save(file_name)", "def save_state(self, training_state: _TrainingState, fname: str):\n with open(fname, \"wb\") as fp:\n pickle.dump(training_state, fp)", "def save(self, to_path):\n with open(to_path, 'wb') as f:\n torch.save(self.state_dict(), f)", "def save(self, to_path):\n with open(to_path, 'wb') as f:\n torch.save(self.state_dict(), f)", "def save(self):\n # TODO: save the file", "def save(self):\n pickle.dump(self, open(self.path, \"wb\"))", "def save_state(self):\n\t\tf = open('output.csv', 'a')\n\t\tstate = ';'.join([str(datetime.now()), str(self.thin._actuation_value), str(self.thin.temperature), str(self.thin.presence), str(self.outside.temperature)])\n\t\tprint(state)\n\t\tf.write(state + '\\n')\n\t\tf.close()", "def write_to_file(self):\n\t\tfile = open(\"states.txt\", \"w\")\n\t\t\n\t\tpointer = self.head\n\t\twhile pointer != None:\n\t\t\tfile.write(pointer.state + \"\\t\" + pointer.info)\t\n\t\t\tpointer = pointer.next\n\n\t\tfile.close()", "def saveGame(self) -> None:\n self.state[\"phase\"] = self._phase\n\n state_as_string = json.dumps(self.state)\n with open(self.save_location, \"w\") as File:\n File.write(state_as_string)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "def save_map(self, filename):\n with open(filename, 'wb') as file:\n pickle.dump(self.current_obstacles, file)\n pickle.dump(self.current_goal, file)\n pickle.dump(getstate(), file)", "def on_save(self):\n filename = QtGui.QFileDialog.getSaveFileName(self, \"Save file\", \"\", \"*.scc\")\n if filename == \"\":\n return\n print(\"Save file \", filename)\n f = open(filename, mode=\"wb\")\n state = self.mdl.cmp.get_state()\n pickle.dump(state, f, pickle.HIGHEST_PROTOCOL)\n f.close()", "def save_checkpoint(self, filename=None):\n filename = os.path.join(self.args.checkpoint_dir, filename)\n state = {\n 'epoch': self.current_epoch + 1,\n 'iteration': self.current_iter,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_MIou':self.best_MIou\n }\n torch.save(state, filename)", "def save(self):\n torch.save(self.state_dict(), self.checkpoint_path)\n with open(self.config_path, 'w') as f:\n print(self, file=f)", "def save_checkpoint(self, filename='checkpoint.pth'):\n torch.save(self.state_dict(), filename)", "def _save_state(self):\n with open(os.path.join(self._workdir, '.git', 'drover'), 'wb') as f:\n cPickle.dump(self, f)", "def save(self, filename):\n pass", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def save(self, fname):\n pass", "def save(self, path=\"./trained_model.checkpoint\"):\n torch.save({\"state_dict\":self.working_q.state_dict}, path)", "def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)", "def saveTS(tournament, fileName):\n fd = open(fileName)\n pickle.dump(tournament, fd)\n TournamentSystem._logger.debug(\"Dumped game state to %s\", fileName)", "def write_to_file(self):\n with open('learning_player/states.json') as json_file:\n states = json.load(json_file)\n\n states.append(self.__dict__)\n with open('learning_player/states.json', 'w') as json_file:\n json.dump(states, json_file)", "def save(self, filename):\n o = open(filename, 'w')\n o.write(self.write())\n o.close()", "def save_state(self, file):\n np.savez(file, z_mn=self.z_mn, theta=self.theta, phi=self.phi,\n z_best=self.z_best, ll_best=self.ll_best, log=self.log)", "def save(self, f, save_states_and_transitions = False):\n if save_states_and_transitions:\n self.A.save(f)\n for state in self.S:\n state.save(f)\n\n if self.P is not None:\n for i in range(len(self.S)):\n self.S[i].prior = self.P[i]\n\n f.write('~h \"%s\"\\n' % self.identifier)\n f.write('<BEGINHMM>\\n')\n f.write('<NUMSTATES> %d\\n' % len(self.S))\n if self.left_to_right:\n _range_ = range(1, len(self.S) - 1)\n else:\n _range_ = range(len(self.S))\n for i in _range_:\n s = self.S[i]\n f.write('<STATE> %d\\n' % (i + 1))\n f.write('~s \"%s\"\\n' % str(s))\n f.write('~t \"%s\"\\n' % str(self.A))\n f.write('<ENDHMM>\\n')", "def save_state(self):\r\n state = {\r\n 'next_student_module_id': self.next_student_module_id,\r\n }\r\n with open(self.STATE_FILE, \"w\") as state_file:\r\n json.dump(state, state_file)\r\n self.say(\"Saved state: {}\".format(json.dumps(state, sort_keys=True)))", "def save():", "def save(self):\n memento = self.create_memento()\n import datetime\n f = open(str(datetime.datetime.now()).replace(' ','_')+'.saved_story','w')\n cPickle.dump(memento,f)\n f.close()\n zcanvas.message(\"Saved!\")", "def save_state(actor, sfile):\n import json, base64\n state_file = open(sfile, 'wb')\n state_file.write(base64.b64encode(json.dumps(actor.dump_state()).encode()))", "def save(self):\n return self.save_as(self.filename)", "def save(self, file_name):\n saved_data = { \"start_config\" : self.start_config, \"action_storage\" : self.action_storage } \n with open(file_name, 'wb') as fh:\n pickle.dump(saved_data, fh)", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def save(self, path: str):\n torch.save(self.model.state_dict(), path)", "def save(self, path: str):\n torch.save(self.model.state_dict(), path)", "def save(self, filename):\n with open(filename, \"w\") as fp:\n dump(self, fp)", "def save(state, settings):\n\n state_file = settings.paths('f_state')\n logger.info(f\"Writing flight state to file '{truncate_filepath(state_file)}'\")\n\n output = {}\n\n for key in ['aero']:\n output[key] = dict(getattr(state, key))\n\n with open(state_file, 'w') as fp:\n dump_pretty_json(output, fp)", "def save_state():\n logger.debug(\"called\")\n pwd_gate.save()\n preferences.save()\n shareBuffer.save()\n contacts.save()\n secrets.save()", "def save_checkpoint(state, filename):\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint", "def save(self,filename):\n with open(filename,'wb') as f:\n pickle.dump(self,f)", "def save(self):\n if self.hasChanged:\n filePath = self.path\n tempPath = filePath+'.tmp'\n fileDir = os.path.split(filePath)[0]\n if not os.path.exists(fileDir): os.makedirs(fileDir)\n cPickle.dump(self.data,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)\n self.hasChanged = False", "def save(self, path):\n with path.open('wb') as f:\n torch.save(self.align.state_dict(), f)", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def save(self, path):\n torch.save({\n 'model_state_dict': self.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n }, path)", "def save(self, path):\n torch.save({\n 'model_state_dict': self.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n }, path)", "def save(self, p):\n pickle.dump(p, open('save.dat', 'wb'))\n print(\"Game Saved!\")", "def save(self,filename): \n with open(filename, 'wb') as f:\n pickle.dump(self,f)", "def save_checkpoint(state, filename):\n torch.save(state, filename) # save checkpoint", "def save(self, filename):\n pickle.dump(self, open(filename + '.p', 'wb'), 2)", "def save(self, fp):\n fp.write(self.dump())", "def save(self, filename='test'):\n file = open(filename+'.txt','w')\n pickle.dump(self, file)\n file.close()", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def save(self, path):\n save(self.actor_net.state_dict(), path + '_actor.pkl')\n save(self.critic_net.state_dict(), path + '_critic.pkl')", "def save(self, filename: str):\n dump(self, filename)", "def save(self):\n\n pattern = '{}_{}_{}ep.pt' if self.checkpoint_filename_pattern is None else self.checkpoint_filename_pattern\n filename = pattern.format('sherlock1', time.strftime(\"%Y-%m-%d_%H-%M-%S\"),\n self.monitors['loss_train'].num_epochs)\n full_filename = self.full_path(filename)\n c = {\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'monitors': self.monitors,\n 'parent': self.parent,\n 'args': vars(args) # convert args to dict\n }\n torch.save(c, full_filename)\n if not args.tuning and args.delete and self.last_checkpoint is not None:\n os.remove(self.last_checkpoint)\n self.last_checkpoint = full_filename\n return filename", "def save_states(self, checkpoint):\n raise NotImplementedError()", "def save(self, filename):\n raise NotImplementedError", "def saveAs(self):\n self.saveFile()", "def save(self,filename):\n f = open(filename, 'wb')\n pickle.dump(self,f)\n f.close()", "def get_bot_save_state_to_file(self):\n return self.bot_data_file[\"bot_status\"][\"save_to_file\"][\"save_state_to_file\"]", "def save(self, filename):\n \n raise NotImplementedError(\"not implemented!\")", "def save(self):\n #test output\n pywikibot.output('PICKLING %s records at %s' % (len(self.historyDict),datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n with open(self.datfilename, 'wb') as f:\n pickle.dump(self.historyDict, f, protocol=config.pickle_protocol)", "def save_game(self, path):\n try:\n file = open(path, \"wb\")\n for i in self.state_stack.states:\n i.on_save()\n pic.dump(self.state_stack, file)\n for i in self.state_stack.states:\n i.on_load()\n except IOError or pic.PicklingError as e:\n print(\"Game save error: {}\".format(e))", "def save(self,file):\n\n with open(file,\"w\") as f:\n f.write(self.to_string())", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def save_state(self, path='/home/lukas/weights/'):\r\n stuff_in_path = os.listdir(path)\r\n counter = 0\r\n for i in stuff_in_path:\r\n if 'parameters' in i:\r\n counter += 1\r\n with open(path + 'info.txt', mode='a') as f:\r\n f.write('counter: %i \\taccuracy: %.8f%% \\tloss: %.8f\\n' % (counter, returnList(self.accuracy)[-1] * 100, returnList(self.loss)[-1]))\r\n\r\n parameters = [ self.batchsize_train,\r\n self.iterator,\r\n self.n_hidden_layers,\r\n self.n_hidden_neurons,\r\n self.n_input_neurons,\r\n self.n_output_neurons,\r\n self.hid_transfer.__name__,\r\n self.out_transfer.__name__]\r\n try:\r\n print '[Network] Saving network status ...'\r\n np.save(path + 'parameters' + str(counter), parameters)\r\n np.save(path + 'weights' + str(counter), self.weights)\r\n np.save(path + 'bias' + str(counter), self.bias)\r\n np.save(path + 'weights_gradient' + str(counter), self.weights_gradient)\r\n np.save(path + 'bias_gradient' + str(counter), self.bias_gradient)\r\n np.save(path + 'loss' + str(counter), self.loss)\r\n np.save(path + 'accuracy' + str(counter), self.accuracy)\r\n np.save(path + 'r_weights' + str(counter), self.r_weights)\r\n np.save(path + 'r_bias' + str(counter), self.r_bias)\r\n print '\\033[92m' + '[Network] Network status succesfully saved' + '\\033[0m'\r\n\r\n except Exception as e:\r\n print '\\033[1m' + '\\033[91m' + '[Network] Could not correctly save network status:' + '\\033[0m'\r\n print e.message", "def write_state(self, state, file):\n try:\n with open(file, 'w') as f:\n json.dump(state, f)\n except IOError as e:\n logger.error(\n \"Cannot open state file '%s' for writing: %s\",\n file, e)\n except Exception as e:\n logger.error(\n \"Problem saving state to file '%s': %s\",\n file, e)\n logger.debug(\n \"State: %s\",\n state)\n else:\n logger.debug(\n \"State written to file '%s'.\",\n file)", "def save_(self):\n if not self._edited:\n return\n data = {'history': self.dump()}\n with open(os.path.join(os.path.dirname(self.arch_handler.dicomdir_path), self.SAVE_NAME), \"w\") as outfile:\n json.dump(data, outfile)\n self._edited = False", "def save(self, path):\n pickle.dump(self, open(path, 'wb'))", "def save():\n pass", "def saveState(self, fname):\n data = { 'ksize': self.ksize,\n 'alpha': self.alpha,\n 'id': self.node.id,\n 'neighbors': self.bootstrappableNeighbors() }\n if len(data['neighbors']) == 0:\n self.log.warning(\"No known neighbors, so not writing to cache.\")\n return\n with open(fname, 'wb') as f:\n pickle.dump(data, f)", "def save(self):\n file = open(self.path, 'w')\n self.parser.write(file)\n file.close()", "def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump({'wi': self.W_input_to_hidden, 'wo': self.W_hidden_to_output}, f)", "def write_to_disk(self):\n text_file = open(self.file_path, \"w\")\n text_file.write(str(self))\n text_file.close()\n # dump to pickle\n pickle.dump(self.blockchain, open(self.pickle_path, \"wb\"))", "def save(self):\n pickle.dump([self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word], open(self.save_file, 'wb'), protocol=4)", "def save(self, filename):\n with open(filename, \"wb\") as f:\n pkl.dump(self, f)", "def save_model(self, path=\"/model\"):\n state = {\n 'epoch': self.epoch_counter,\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n torch.save(state, path)", "def save_snapshot(self,save_dir):\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n keys=[\n 'dna_size',\n 'pop_size',\n 'cross_rate',\n 'mutate_rate',\n 'eta_c',\n 'eta_m',\n 'mp_size',\n 'elitism',\n 'generations',\n 'pop',\n 'fitness',\n 'fitness_rank',\n 'mp',\n 'offspring',\n 'start_state',\n 'iters'\n ]\n running_states={}\n for key in keys:\n assert hasattr(self,key)\n running_states[key]=getattr(self,key)\n th.save(running_states,os.path.join(save_dir,'state_gen%d'%(self.iters)) )", "def save(self,\n filename):\n\n if self.model is None:\n raise ValueError('No model -- train or load model before saving!')\n\n # Check paths\n create_missing_folders([os.path.dirname(filename)])\n\n # Save settings\n logging.info('Saving settings to %s_settings.json', filename)\n\n settings = {'method': self.method,\n 'method_type': self.method_type,\n 'n_observables': self.n_observables,\n 'n_parameters': self.n_parameters,\n 'n_hidden': list(self.n_hidden),\n 'activation': self.activation}\n\n with open(filename + '_settings.json', 'w') as f:\n json.dump(settings, f)\n\n # Save state dict\n logging.info('Saving state dictionary to %s_state_dict.pt', filename)\n torch.save(self.model.state_dict(), filename + '_state_dict.pt')", "def saveState(self, state_fname):\n byte_string_state_fname = state_fname.encode('utf-8')\n nes_lib.saveState.argtypes = [c_void_p, c_char_p]\n nes_lib.saveState.restype = None\n return nes_lib.saveState(self.obj, byte_string_state_fname)", "def save(self):\n # Sanity checks\n assert len(self.actions) == len(self.rewards)\n assert len(self.actions) == len(self.normalized_states)\n assert len(self.actions) == len(self.states)\n\n data = {\n 'rewards': np.array(self.rewards),\n 'actions': np.array(self.actions),\n 'states': np.array(self.states),\n 'normalized_states': np.array(self.normalized_states),\n }\n\n np.savez('{}/full_log.npz'.format(self.log_folder), **data)\n np.savez('{}/states_rewards.npz'.format(self.log_folder),\n **{'states': data['states'], 'rewards': data['rewards']})\n np.savez('{}/normalized_states_rewards.npz'.format(self.log_folder),\n **{'states': data['normalized_states'], 'rewards': data['rewards']})", "def _save_transform_state(books):\n utils.clear_data()\n transform_state = {\n 'current_state': 1,\n 'books': books\n }\n with open(f'{PATH_TO_STATE}/current_state.json', 'w') as f:\n json.dump(transform_state, f)", "def save(self, target):\n from six.moves.cPickle import dump\n data = self.serialize()\n with open(target, 'wb') as f:\n dump(data, f)", "def save(self, target):\n from six.moves.cPickle import dump\n data = self.serialize()\n with open(target, 'wb') as f:\n dump(data, f)" ]
[ "0.8169854", "0.802114", "0.79456836", "0.7888457", "0.78155065", "0.7731117", "0.7627129", "0.75945497", "0.7590737", "0.75843394", "0.7554501", "0.7541444", "0.74861246", "0.7482532", "0.74790096", "0.7468032", "0.7460777", "0.74439883", "0.7396383", "0.7396383", "0.7384259", "0.73599046", "0.7340877", "0.7337511", "0.7322932", "0.7275275", "0.7275275", "0.72600895", "0.7248595", "0.723948", "0.723844", "0.7204767", "0.71895725", "0.7180712", "0.71579134", "0.715667", "0.71564656", "0.71477884", "0.71274495", "0.70926625", "0.70598745", "0.7059481", "0.7058468", "0.7058238", "0.70559126", "0.7045866", "0.7038416", "0.70359176", "0.7025638", "0.70233303", "0.70178735", "0.70178735", "0.70154524", "0.7005868", "0.6989831", "0.69881904", "0.69879085", "0.6987863", "0.6978374", "0.6958464", "0.6954422", "0.6954422", "0.6953294", "0.6948387", "0.6946291", "0.69403535", "0.69354534", "0.6932474", "0.69318545", "0.6928552", "0.6908425", "0.69000596", "0.6895493", "0.6889111", "0.68802804", "0.6875444", "0.6870493", "0.6868576", "0.6854684", "0.68525845", "0.6851149", "0.6849362", "0.6844546", "0.68438506", "0.6838485", "0.6836639", "0.683281", "0.6832355", "0.68299216", "0.68285334", "0.6813674", "0.6810577", "0.6810441", "0.68089455", "0.6802123", "0.6800809", "0.6794791", "0.67913187", "0.6781757", "0.67808694", "0.67808694" ]
0.0
-1
Read the state from a file
def load(self, path, alignment, positions): path = path[7:] if path[-5:] != ".json": path += ".json" if path[0] == "/" and path[2] == ":": path = path[1:] # Windows fix with open(path, "r") as infile: value = json.load(infile) self._angle_command = value["angleCommand"] self._horizontal_command = value["horizontalCommand"] self._vertical_command = value["verticalCommand"] self._frame_width = value["frameWidth"] self.frameWidthChanged.emit() self._frame_height = value["frameHeight"] self._origin = value["origin"] self.beginRemoveRows(QModelIndex(),0,len(self._runs)-1) self.endRemoveRows() if value["runs"]: self.beginInsertRows(QModelIndex(), 0, len(value["runs"])-1) self._runs = [SingleRun.from_json(self, r) for r in value["runs"]] self.endInsertRows() alignment.from_dict(value["alignment"]) positions.from_dict(value["positions"]) self.frameHeightChanged.emit() self.validChanged.emit() self.scriptChanged.emit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_state_file(self):\r\n with open(StudentModuleHistoryCleaner.STATE_FILE) as state_file:\r\n return state_file.read()", "def read_state(self, file):\n try:\n with open(file, 'r') as f:\n state = json.load(f)\n except IOError as e:\n logger.warning(\n \"No state restored, \"\n \"because there was a problem opening the state file '%s': %s\",\n file, e)\n return\n except json.JSONDecodeError as e:\n logger.error(\n \"Error reading state from file '%s': %s\",\n file, e)\n return\n else:\n logger.debug(\n \"State read from file '%s'.\",\n file)\n\n return state", "def read_file(self, filename):\n with open(filename, 'r') as file:\n for line in file:\n l = line.strip()\n\n if l == ST_POS0:\n self._state = ST_POS0\n elif l == ST_TRNS:\n self._state = ST_TRNS\n elif l == ST_POS1:\n self._state = ST_POS1\n else:\n self._parse_line(l)\n self._state = None", "def read_from_file(self, filename: str) -> None:", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def get_file_state(self, filename):\n # load the pickle file\n pkl_fid = open(OUTPUT_PATH + '/sio.pckl')\n sio_db = pickle.load(pkl_fid)\n pkl_fid.close()\n\n # get the file state for this filename\n return sio_db.file_state.get(filename)", "def load_state(self):\n return self.state.read()", "def ReadFile(filepath: str):\n\n with open(filepath, \"r\") as fin:\n lines = [x.split() for x in fin.readlines()]\n N, M = (len(lines) - 1) // 2, len(lines[0])\n \n if len(lines[N]) != 0:\n raise IOError(\"Invalid input file!\")\n\n state.Initialize(lines[:N], lines[N + 1:])", "def read_state(path: str):\n state = State(0, (0, 0), [])\n n = 0\n with open(path, \"r\") as file:\n first_line = True\n line_counter = 0\n bin_width = 0\n bin_height = 0\n lines = file.readlines()\n for line in lines:\n line_counter += 1\n values = line.strip().split(' ')\n # Ignore comments in the file\n if values[0] != \"%\":\n # bin size is in the first line\n if first_line:\n if len(values) == 2:\n bin_width, bin_height = values\n try:\n bin_width = int(bin_width)\n except ValueError:\n print(f'File is not valid, in line {line_counter} {width} cannot be converted to int!')\n try:\n bin_height = int(bin_height)\n except ValueError:\n print(f'File is not valid, in line {line_counter} {height} cannot be converted to int!')\n state.bin_size = (bin_width, bin_height)\n state.open_new_bin()\n else:\n raise IOError(f'Wrong format of first line: \\n\\t {line} should be of format: \\n\\t bin_width'\n f'bin_height')\n first_line = False\n else:\n if len(values) == 2:\n width, height = values\n try:\n width = int(width)\n except ValueError:\n print(f'File is not valid, in line {line_counter} {width} cannot be converted to int!')\n try:\n height = int(height)\n except ValueError:\n print(f'File is not valid, in line {line_counter} {height} cannot be converted to int!')\n state.boxes_open.append(Box(width, height, n=n))\n n += 1\n elif len(values) == 5:\n width, height, box_x, box_y, bin_id = values\n while len(state.bins) < int(bin_id) + 1:\n state.bins.append(Bin(bin_width, bin_height))\n validation = state.bins[int(bin_id)].place_box_at_pnt(\n Box(int(width), int(height), n=n), Point(int(box_x), int(box_y)))\n n += 1\n if not validation:\n raise IOError(\n f'File contains no valid configuration, in line {line_counter} the box in bin {bin_id} with size {(width, height)} and position {(box_x, box_y)} is overlapping with some other box.')\n else:\n raise IOError(f'Wrong format of line {line_counter} should be of format: \\n\\t box_width '\n f'box_height box_x box_y bin_width bin_height bin_id \\n\\t or \\n\\t box_width '\n f'box_height')\n return state", "def read_from_file(self, flo):\n # get initial states\n match = re.match(r\"Initial State\\:\\s*\\{(.*)\\}\", flo.readline())\n self.initial_state = int(match.groups()[0])\n\n # get final states\n match = re.match(r\"Final States\\:\\s*\\{(.*)\\}\", flo.readline())\n self.final_states = [\n int(state) for state in match.groups()[0].split(',')]\n\n # get state count - we don't actually need this\n match = re.match(r\"Total States\\:\\s*(\\d*)$\", flo.readline())\n num_states = int(match.groups()[0])\n\n # get state names\n match = re.match(r\"State\\s*(.*)\\s*$\", flo.readline())\n symbol_names = [name.strip() for name in match.groups()[0].split()]\n\n # get transitions\n state_pattern = r\"(\\d*)\\s*\" + r\"\\s*\".join(\n r\"\\{(.*)\\}\" for _ in symbol_names)\n reo = re.compile(state_pattern)\n transitions = {}\n for state_string in flo.readlines():\n groups = reo.match(state_string).groups()\n from_state = int(groups[0])\n end_state_strings = groups[1:]\n transitions[from_state] = {}\n for symbol, end_states in zip(symbol_names, end_state_strings):\n if end_states:\n transitions[from_state][symbol] = [\n int(state) for state in end_states.split(\",\")]\n self.transitions = transitions\n\n symbol_names.remove(NULL) # get alphabet by removing null symbol\n self.alphabet = symbol_names", "def read_file(self, fp):\n try:\n self.steps = []\n f = open(fp, 'r')\n file_arr = f.read().splitlines()\n # Get number of processes.\n self.processes = int(file_arr.pop(0).split(' ')[0])\n # Get number of resources.\n self.resources = int(file_arr.pop(0).split(' ')[0])\n print(\"\\n%d processes and %d resources.\" % (self.processes, self.resources))\n # Load each step.\n for line in file_arr:\n line_arr = line.split(' ')\n # Get process num.\n p = int(line_arr[0].strip('p'))\n # Get request/release.\n if line_arr[1] == 'requests':\n re = 1\n else:\n re = 0\n # Get resource num.\n r = int(line_arr[2].strip('r'))\n # Store as tuple in our steps.\n self.steps.append((p, re, r))\n print(\"%d total steps in simulation.\\n\" % len(self.steps))\n self.state_string[0] = str(self.processes) + \" processes and \" + str(self.resources) + \" resources. \"\n self.state_string[1] = str(len(self.steps)) + \" total steps in simulation.\"\n except IOError:\n print(\"Cannot find the file at\", fp)", "def load(self):\n logger.debug('Loading state from file %s', self.file_path)\n\n with open(self.file_path, 'rb') as f:\n self.data = pickle.load(f)", "def readFromFile(filename):\n raise NotImplementedError", "def read(self, filename):\n raise NotImplementedError", "def loadState(self, file):\n if isinstance(file, str):\n with open(file, 'r') as f:\n xml = f.read()\n else:\n xml = file.read()\n self.context.setState(mm.XmlSerializer.deserialize(xml))", "def readState(self, saveState: ghidra.framework.options.SaveState) -> None:\n ...", "def ReadClientStateFile(self):\n if self.client_state_file is None:\n return\n file_timestamp = os.stat(self.client_state_file).st_mtime\n if file_timestamp == self.client_state_file_timestamp:\n return\n logging.info('load client state')\n file_contents = open(self.client_state_file).read()\n self._registered_tokens = json.loads(file_contents, strict=False)\n self.client_state_file_timestamp = file_timestamp", "def read_snapshot(self, fname):\n f = gzip.open(fname, 'rb')\n state = pickle.load(f)\n self._setstate(state)", "def read(path):", "def readState(f: TextIOWrapper) -> StateNode:\n table = []\n line = f.readline().strip()\n while len(line) > 0:\n table.append(line)\n line = f.readline().strip()\n line_lengths = [len(x) for x in table]\n\n # print(\"Table: \", table)\n # print(\"Lengths of table: \", line_lengths)\n\n if len(table) == 0:\n raise ValueError(\"State is missing first line of data!\")\n if min(line_lengths) != max(line_lengths):\n raise ValueError(\"State doesn't have all lines of equal size!\")\n return StateNode(\n table, \n (list(range(len(table))), list(range(len(table[0])))), \n ([], []), \n 0, \n None\n )", "def read(self, fname):\n return self.read_using_fguide(fname, self.fguide)", "def read_file(path_to_file):\n 8", "def load_state(self, fname: str) -> _TrainingState:\n training_state = None\n with open(fname, \"rb\") as fp:\n training_state = pickle.load(fp)\n return training_state", "def read(self, filename): # real signature unknown; restored from __doc__\n pass", "def _load_state(self):\n \n if os.path.isfile(self.histFile):\n with open(self.histFile,'rb') as hf:\n oldFile = hf.read()\n \n if os.path.isfile(oldFile):\n self.dbFile.Value = oldFile", "def read_file(self, filename=None):\n print(f'reading file')\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read second line in file to get number of variables\n f.seek(0)\n f.readline()\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= (nvars_file + 2)\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i+1\n except KeyError:\n pass\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n base_r[i] = variables[2]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[i, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def load(self, from_path):\n with open(from_path, 'rb') as f:\n self.load_state_dict(torch.load(f))", "def load(self, from_path):\n with open(from_path, 'rb') as f:\n self.load_state_dict(torch.load(f))", "def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])", "def reload_state(self):\n\n log.debug(\"Reload state from file %s\" % self.state_filename)\n if path.isfile(self.state_filename):\n with open(self.state_filename) as sf:\n self.state = yaml.safe_load(sf)\n\n if self.state is None:\n log.debug(\"Statefile returned none\")\n else:\n log.debug(\"Statefile does not exist\")\n self.state = {}", "def load_state(self):\r\n try:\r\n state_file = open(self.STATE_FILE)\r\n except IOError:\r\n self.say(\"No stored state\")\r\n self.next_student_module_id = 0\r\n else:\r\n with state_file:\r\n state = json.load(state_file)\r\n self.say(\r\n \"Loaded stored state: {}\".format(\r\n json.dumps(state, sort_keys=True)\r\n )\r\n )\r\n self.next_student_module_id = state['next_student_module_id']", "def load(settings):\n\n state_file = settings.paths('f_state')\n logger.info(f\"Reading flight state from file '{truncate_filepath(state_file)}'...\")\n\n if not os.path.exists(state_file):\n raise IOError(f\"File '{state_file}' not found\")\n\n with open(state_file, 'r') as fp:\n state_dict = json.load(fp)\n\n state = FlightState()\n state.update_from_dict(**state_dict)\n return state", "def load_state(actor, sfile):\n import os.path, json, base64\n if os.path.isfile(sfile):\n state_file = open(sfile, 'rb')\n state = json.loads(base64.b64decode(state_file.read()).decode(\"ascii\"))\n actor.load_state(state)\n return True\n return False", "def read(self, file, path):\n pos, = struct.unpack('<Q', file.read(8))\n if pos == 0:\n raise VergeMLError(\"Invalid cache file: {}\".format(path))\n file.seek(pos)\n self.index, self.meta, self.info = pickle.load(file)", "def readFromFile(self, path):\n log(logging.DEBUG, \"Read from file: \" + path)\n with open(path, \"r\") as f:\n return f.read()", "def read(self, f):\n return self.parse(f.read())", "def load(self, file_name):\n\n self._state.load(file_name)", "def load_map(self, filename):\n with open(filename, 'rb') as file:\n self.current_obstacles = pickle.load(file)\n self.current_goal = pickle.load(file)\n try:\n setstate(pickle.load(file))\n except EOFError:\n print(\"No random state stored\")", "def read_file(file_path):\n scan = nib.load(filename=file_path)\n scan = scan.get_fdata()\n return scan", "def _read(fname):\n fpath = os.path.dirname(__file__)\n fpath = os.path.join(fpath, fname)\n with open(fpath, 'r') as file_:\n return file_.read()", "def load_model_state(filename: str) -> OrderedDictType[str, torch.Tensor]:\n return torch.load(filename)", "def read():\n # TODO", "def read_file(self):\n self._apply_shared_lock()\n\n self.handle = self._open_file_r()\n out = self._deserialize(self.handle)\n self.handle.close()\n\n self._release_lock()\n\n return out", "def get_state(self):\n try:\n json_data = open(self.state_file)\n data = json.load(json_data)\n self.state_timestamp = data[\"timestamp\"]\n json_data.close()\n\n except IOError:\n self.logger.info(\"'%s' not found: an initial state file will be create\" % \\\n self.state_file)\n data = {\"timestamp\": self.state_timestamp}\n with open(self.state_file, 'w') as out_file:\n json.dump(data, out_file, indent=4)\n out_file.close()", "def read (self, file):\n\t\tself.unpack (file.read (self.size()))", "def _read(self, file_name):\n f = open(file_name)\n lines = f.readlines()\n begin = 0\n end = 0\n while end < len(lines):\n op = ''\n for l in lines[begin:]:\n end += 1\n op = l.split()[0]\n if op in operations:\n self.operations.append(op)\n break\n if op == '=push':\n nfa = Automaton(lines[begin:end - 1])\n self.aut_to_push.append(nfa)\n begin = end\n f.close()", "def loadState(self):\n\t\tif not path.exists(STATEFILE):\n\t\t\tprint \"No previous statefile, assuming first run\"\n\t\t\tself.state['lastrun'] = datetime.datetime.now()-datetime.timedelta(days=365)\n\t\telse:\n\t\t\tsfile = open(STATEFILE,'r')\n\t\t\tself.state = cPickle.load(sfile)\n\t\tself.lastrun = self.state['lastrun']", "def load_input(filename: str) -> list:\n\n text_stream = io.open(filename, 'r', encoding='utf-8', errors='ignore', newline='\\n')\n \"\"\" Calls Python's io function to read the file with the specified name.\"\"\"\n\n initial_state = []\n for i in range(0, 4):\n initial_state.append(list(map(int, text_stream.readline().rstrip().split(' '))))\n \"\"\" The rstrip method removes all trailing whitespace of the string. The split \n method uses the given character as the delimiter to break down the string and \n return a list of the substrings. The map function takes that list, converts \n the substrings into integers and returns a map object, which is eventually \n converted into a list by the exterior call to the list function. \"\"\"\n\n \"\"\" A state is represented as a multi-layer list. The first layer contains \n the four rows, each of which is a second layer that consists of four tiles. \"\"\"\n\n blank_line = text_stream.readline()\n \"\"\" In the input file, there is a blank line in between the two states.\"\"\"\n\n goal_state = []\n for i in range(0, 4):\n goal_state.append(list(map(int, text_stream.readline().rstrip().split(' '))))\n \"\"\" The construct of this part is identical to the one above. \"\"\"\n\n text_stream.close()\n\n ret = [initial_state, goal_state]\n \"\"\" Returns the two lists that represent the initial and goal states, \n respectively. \"\"\"\n return ret", "def read_file(self, path):\n with open(path) as f:\n return self.read_file_obj(f)", "def load(self):\r\n self.read(self.filename)", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()", "def load(self, path):\n self.load_state_dict(torch.load(path))", "def load(self, path):\n self.load_state_dict(torch.load(path))", "def read_file(self,fname):\n try:\n self.raw=spiketrain.read_file(fname)\n except Exception:\n self.raw=None\n raise", "def _read_input_file(self):\n pass", "def read(fname):\n f = fabio.open(fname)\n data = f.data\n del f; # close file\n return data", "def readFile(self, fname):\n res = None\n with open(self.PATH + fname, 'rb') as handle:\n res = pickle.load(handle)\n return res", "def read_from_file(path):\n with io.open(path, 'rb') as ios:\n return read(ios)", "def read_file(self, filename):\n import pycbf\n self.cbf_handle = pycbf.cbf_handle_struct()\n self.cbf_handle.read_file(filename, pycbf.MSG_DIGEST)\n self.cbf_handle.rewind_datablock()", "def read_file(self, file_name):\n f = file(file_name, \"r\")\n temp = f.read()\n f.close()", "def Load(self, filename):\n\n self.sm['state'] = self.AddState\n self.sm['condition'] = self.AddCondition\n exec(open(filename).read(), self.sm)\n self.name = self.sm['name']\n if not self.name.isalnum():\n raise Exception(\"State machine name must consist of only alphanumeric\"\n \"characters.\")\n self.comment = self.sm['comment']", "def read(self, filename):\n f = open(filename, 'r')\n m = f.readline()\n n = f.readline()\n lst = []\n for line in f.readlines():\n lst.append(int(line))\n f.closed\n self.__init__(int(m), int(n), lst)", "def read_config(self, config_filename):", "def loadState(self, state_fname):\n byte_string_state_fname = state_fname.encode('utf-8')\n nes_lib.loadState.argtypes = [c_void_p, c_char_p]\n nes_lib.loadState.restype = c_bool\n return nes_lib.loadState(self.obj, byte_string_state_fname)", "def read_states():\n loc_file = open(loc_file_path, 'r')\n prev_file = open(prev_file_path, 'r')\n\n global loc, prev\n\n try:\n loc = json.loads(loc_file.read())\n prev = json.loads(prev_file.read())\n except json.decoder.JSONDecodeError:\n\n # 3d array of strings String[18][11][...] this 3d [] is used for storing which locations are at this address\n loc = [[[] for _ in range(11)] for _ in range(18)]\n\n prev = [] # empty list\n write_states()\n\n loc_file.close()\n prev_file.close()", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def read(file):\n with open(file, 'r') as file:\n return file.read()", "def __loadFromFile(self):\n try:\n f=open(self.__fileR, \"r\")\n line =f.readline().strip()\n rez=[]\n while line!=\"\":\n attrs=line.split(\",\")\n rt=Rent(attrs[0], attrs[1], attrs[2], attrs[3])\n rez.append(rt)\n line=f.readline().strip()\n f.close()\n return rez\n #the file cannot be reached\n except IOError:\n return None", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "async def load(self, file: IO) -> dict:", "def read(cls):\n x_i=\"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.read()\n return file", "def read_states(self, filename: str, comment: str = None) -> pd.DataFrame:\n self.states = self._parse(filename, comment=comment)\n self.states['name'] = self.states['name'].astype('str')", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read().strip()", "def read_from_file(filename):\n with open(filename, \"r\") as f:\n f.readlines()", "def _load_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\ttry:\r\n\t\t\twith open(self._state_file, 'rb') as tmp:\r\n\t\t\t\tlogger.debug(\"There is a file.\")\r\n\t\t\t\ttmp_dict = pickle.load(tmp)\r\n\t\t\t\tlogger.debug(\"Dictionary loaded from file: %s\" % tmp_dict)\r\n\t\texcept IOError as e: # File doesn't exists\r\n\t\t\tlogger.debug(\"Exit - No file. Error message: %s\" % e)\r\n\t\t\ttmp_dict = {}\r\n\t\t\t\r\n\t\treturn tmp_dict", "def get_state_output(self, state: str) -> Dict[str, Any]:\n return read_yaml(self._dir_path / f'{state}.yaml')", "def load(self, filename):\n\n c = torch.load(filename)\n\n if type(c) is dict:\n sd = c['state_dict']\n self.net.load_state_dict(sd)\n if 'monitors' in c: # Remove the branching eventually\n self.monitors = c['monitors']\n else:\n self.monitors = {'loss_train': c['train_monitor'], 'loss_val': c['val_monitor'],\n 'accu_train': MetricHistory(), 'accu_val': MetricHistory()}\n if 'optimizer' in c: # Remove the branching eventually\n self.optimizer.load_state_dict(c['optimizer'])\n else:\n raise RuntimeError('Unsupported checkpoint. (Not a dict)')\n\n self.parent = filename\n self.last_checkpoint = filename\n self.start_epoch = self.monitors['loss_train'].num_epochs", "def get_state_as_file_object(self) -> io.BytesIO:\n raise NotImplementedError(\"Get state as file object not implemented.\")", "def load_state(self, X, file):\n self._initializing_corpus(X, file)\n self.loaded = True", "def read(filename):\n with open(os.path.join(os.path.dirname(__file__), filename)) as f:\n return f.read()", "def read (cls, fileName):\n out = cls ()\n \n with open(fileName) as fid:\n for line in fid:\n line = line.strip()\n \n if line == 'ENERGY':\n cls._scan_energy (out, fid)\n elif line == 'TRANSMISSION':\n cls._scan_matrix (out, fid, 'TE_op')\n elif line == 'CURRENT':\n cls._scan_matrix (out, fid, 'I_op')\n elif line == 'DOS':\n cls._scan_matrix (out, fid, 'DOS_op')\n elif line == 'n':\n cls._scan_matrix (out, fid, 'n_op')\n elif line == 'neq':\n cls._scan_matrix (out, fid, 'neq_op')\n \n return out", "def read_file(self, file):\n fd = open(file)\n data = fd.read()\n fd.close()\n return data", "def readFastaFile(filename):", "def test_read_file(self):\n restart_path = os.path.join(arc_path, 'arc', 'testing', 'restart(H,H2O2,N2H3,CH3CO2).yml')\n input_dict = read_file(restart_path)\n self.assertIsInstance(input_dict, dict)\n self.assertTrue('reactions' in input_dict)\n self.assertTrue('freq_level' in input_dict)\n self.assertTrue('use_bac' in input_dict)\n self.assertTrue('ts_guess_level' in input_dict)\n self.assertTrue('running_jobs' in input_dict)\n\n with self.assertRaises(InputError):\n read_file('nopath')", "def load(fp, *args, **kwargs): \n state = json.load(fp, *args, **kwargs)\n return unserialize(state)", "def pickle_read(file_path):\n\n with open(file_path, 'rb') as file:\n return pickle.load(file)", "def load(self, path):\n states = torch.load(path, map_location=lambda cpu, _: cpu)\n return states", "def load_state_dict_from_filename(filename, model):\n assert len(glob.glob(os.path.join(*[CHECKPOINT_DIR, filename]))) == 1\n\n # LOAD FILENAME\n\n # If state_dict in keys, use that as the loader\n right_dict = lambda d: d.get('state_dict', d)\n\n model.load_state_dict(right_dict(torch.load(\n os.path.join(*[CHECKPOINT_DIR, filename]))))\n return model", "def read_pickle(file_path):\n with open(file_path, 'rb') as file:\n return pickle.load(file)", "def read(self):\n\t\treturn self.input_file.read(1)", "def __read_file(self, filename):\n with open(filename) as f:\n content = f.readlines()\n \n return content", "def _Read(filename):\n with open(filename, 'rb') as f:\n return f.read()", "def load(self, path):\n\n # Restore\n self.checkpoint.restore(path).expect_partial()\n print(\"> Loaded:\", path)\n\n # Read counters\n with open(self.counters_file) as f:\n data = json.load(f)\n return data[path]", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'Gauge':\n self.read_metgauge(line, f)\n elif self.cleantag(line) == 'ObservationData':\n self.read_obsgauge(line, f)\n # Next line\n line = f.nexttag()", "def _read_file(self, filename, start, end):\n dates, pnl = read_pnl_from_file(filename, start, end)\n if self._dates is None:\n self._dates = dates\n return pnl", "def read(filename):\n\n path = os.path.join(os.path.dirname(__file__), filename)\n\n with open(path) as f:\n return f.read()", "def load_state_file(statefile_path):\n logging.getLogger(\"root\").info(\"Loading state file\")\n f = open(statefile_path, 'r')\n json_content = f.read()\n f.close()\n state = json.loads(json_content)\n cloud_connector = configure_connector(state['provider'])\n deployment = Deployment()\n deployment.deserialize(state['deployment'], cloud_connector)\n return deployment, cloud_connector", "def _read(self, in_file):\n self.string = in_file.readline().decode().strip()", "def load(filename):\r\n wholeTract= nib.streamlines.load(filename) \r\n wholeTract = wholeTract.streamlines\r\n return wholeTract" ]
[ "0.79414666", "0.73601097", "0.73104596", "0.71004415", "0.70303845", "0.70303845", "0.701008", "0.6921969", "0.689979", "0.68704414", "0.6858411", "0.68411195", "0.68014264", "0.67528075", "0.67309463", "0.6665738", "0.66562074", "0.6639629", "0.6635025", "0.6634156", "0.6577014", "0.6534646", "0.6527201", "0.6480194", "0.6435091", "0.6340988", "0.63167", "0.6267828", "0.6267828", "0.62569803", "0.62553954", "0.62420225", "0.6240373", "0.62071055", "0.6198413", "0.61954784", "0.6190426", "0.61839676", "0.6179033", "0.61760396", "0.61525655", "0.61429566", "0.61373323", "0.6128654", "0.6112504", "0.61077", "0.6107693", "0.6091091", "0.6082868", "0.60668844", "0.60455644", "0.6029752", "0.6029752", "0.6026571", "0.6026571", "0.60243297", "0.60185075", "0.6016776", "0.59882313", "0.5979055", "0.59734344", "0.59728134", "0.5971362", "0.59675515", "0.5956102", "0.5951217", "0.5936499", "0.5932971", "0.5928543", "0.5919765", "0.59157866", "0.589288", "0.58903897", "0.58859926", "0.58786476", "0.5875954", "0.5875884", "0.5873743", "0.58681977", "0.58641803", "0.5858854", "0.5852774", "0.5835721", "0.5833352", "0.5831965", "0.5825689", "0.5817204", "0.58166593", "0.5815512", "0.58051825", "0.579827", "0.57978654", "0.57949495", "0.5790205", "0.5777698", "0.57703114", "0.5767684", "0.5765172", "0.5759709", "0.5749871", "0.5745622" ]
0.0
-1
The horizontal center of rotation.
def origin(self): return self._origin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def horiz_center(self):\n return self._horiz_center", "def horiz_center(self):\n return self._horiz_center", "def rotation_center_x(self, *args, **kwargs) -> Any:\n pass", "def center(self):\n return self.pos + self.axis / 2.0", "def rotation_center(self, *args, **kwargs) -> Any:\n pass", "def centerx(self):\n return self.left + self.width / 2", "def center(self):\n return (self.upper_right + self.lower_left) * 0.5", "def center(self):\n return np.array([0,0,1/self.C+self.pos()])", "def rotation_pivot_to_center(self):\n pass", "def center(self):\n return self._lower + 0.5 * (self._upper - self._lower)", "def center(self):\n\n return (\n self.x() + (self.width() / 2),\n self.y() + (self.height() / 2)\n )", "def center(self):\n return self._center", "def horizontal_angle(cX):\n\n return atan(((FRAME_CENTER[0] + .5) - cX) / FOCAL_LENGTH)", "def center(self):\n # minz to offset the heights to 0\n mz = (self.maxz-self.minz)/2\n #mz = self.minz\n return (self.minx + self.width / 2, self.miny + self.height / 2, mz)", "def center(self):\n return self.center_x, self.center_y", "def get_center(self):\n return self.center", "def _rad_center(self):\n return ((self.rad_hi + self.rad_lo) / 2).to(\"deg\")", "def getcenter(self):\n return self.centro.cartesianas()", "def center(self):\n return Point(self.width/2, self.height/2)", "def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center", "def vert_center(self):\n return self._vert_center", "def vert_center(self):\n return self._vert_center", "def center(self):\n xc = (self.x.max() + self.x.min())/2.\n yc = (self.y.max() + self.y.min())/2.\n return (xc, yc)", "def center(self):\n return self['center']", "def getCenter(self):\n if self.__center is None:\n raise ValueError, \"Center is undefined.\"\n return self.__center", "def center(self):\n return (self.centerx, self.centery)", "def center(self):\n return self.centralizer(self)", "def heading(self):\n x, y = self._orient\n result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0\n result /= self._degreesPerAU\n return (self._angleOffset + self._angleOrient*result) % self._fullcircle", "def center(self) -> Tuple[float, float]:\n return self.x + self.width / 2, self.y + self.height / 2", "def centerAxis():\n dislin.center()", "def getCenter(self):\n return [self.tx/self.tw, self.ty/self.tw]", "def center(self):\n if not hasattr(self, '_center'):\n self._center = np.unique(self.points, axis=0).mean(axis=0)\n return self._center", "def center_horizontal(self, rect):\n self.rect.centerx = rect.centerx", "def getCenter(self):\n return Point.average(self.points)", "def center(x):\n return x - x.mean()", "def pix_center(self):\n return self._pix_center", "def get_centre(self):\n return self.c", "def mean_centered(self):\n return self._scala.meanCentered()", "def get_center_point(self):\n raise NotImplementedError()", "def compute_platform_center(self):\n base = self.platform_vertices[1] - self.platform_vertices[0] # base of triangle, vector\n x = np.linalg.norm(base) # base length, scalar\n m = self.platform_vertices[0] + base/2 # midpoint on the base, vector\n cm = x/(2*np.sqrt(3)) # length from m to center c, scalar\n cm_dir = self.platform_vertices[2] - m # direction to center from midpoint, vector\n cm_vec = cm_dir*cm/np.linalg.norm(cm_dir) # make cm_dir a unit vector and multiply by the length, vector\n c = m + cm_vec # center position, vector\n return c", "def find_center(self):\n return(Point(self.corner.x + self.width/2.0, self.corner.y + self.height/2.0))", "def horizontal(self):\n return self._horizontal", "def center(self) -> Line:\n return Line(self.shape.pos, self.shape.pos + self.velocity)", "def getCentroid(self) -> Vec3:\n return self.centroid()", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def GetCenter(self):\n ...", "def get_center(self,lonlat=False):\n lon, lat = np.asarray(self.rotator.rots[0][0:2])*180/pi\n if lonlat: return lon,lat\n else: return pi/2.-lat*dtor, lon*dtor", "def Centroid(self):\n return Vector(self.centroid)", "def get_center_at_intersection(self) -> Point:\n return self._reference + (self.intersection - self.intersected_trajectory.start)", "def center(self):\n return self.map_.geom.center_skydir", "def _centre(self, period):\n if self.direction():\n mx = self.data[-1]\n else:\n mx = self.data[0]\n\n return ((mx // period) * period).squeeze()", "def getCenter(self):\r\n pixels = np.argwhere(self.array)\r\n center = np.mean(pixels.astype(np.float32), axis=0)\r\n return np.round(center).astype(np.int)", "def safe_north_point(self):\n ifMutexAcquire(self.use_mutex)\n try:\n x, y, z = self.read_magnetometer()\n except:\n x, y, z = 0,0,0\n finally:\n ifMutexRelease(self.use_mutex)\n\n # using the x and z axis because the sensor is mounted vertically\n # the sensor's top face is oriented towards the front of the robot\n\n heading = -atan2(-x, z) * 180 / pi\n\n # adjust it to 360 degrees range\n\n if heading < 0:\n heading += 360\n elif heading > 360:\n heading -= 360\n\n return heading", "def center_horizontal_paddle(self):\n self.top_center = self.screen_rect.centerx - (self.screen_rect.centerx/2)\n self.bot_center = self.screen_rect.centerx - (self.screen_rect.centerx/2)", "def center(self, X):\n X = X - self.mu\n X = X / numpy.where(self.sigma == 0, 1e-30, self.sigma)\n return X", "def get_center(self):\n\n x = np.array(self.x)\n y = np.array(self.y)\n return np.mean(x), np.mean(y)", "def calculate_center(self):\n return [(self.startX + self.endX) / 2., (self.startY + self.endY) / 2.]", "def center(self):\n bounds = self.bounds\n x = (bounds[1] + bounds[0]) / 2\n y = (bounds[3] + bounds[2]) / 2\n z = (bounds[5] + bounds[4]) / 2\n return [x, y, z]", "def center(self):\n if self.pos != 0.0:\n self.pos = 0.0", "def get_center_scr(self):\r\n return self.rect.center", "def phase_center(self):\n try:\n rx_number = extract_channel_number(self.title)\n ph_center = (_np.array(self.GPRI_tx_coord) + _np.array(\n getattr(self, \"GPRI_rx{num}_coord\".format(num=rx_number)))) / 2\n return ph_center\n except AttributeError:\n return 0", "def center(self):\n x0, y0, width, height = self._rect_bbox\n return x0 + width / 2., y0 + height / 2.", "def calculateCenter(self):\n y_avg = int(sum(self.points[:,0])/float(len(self.points)))\n x_avg = int(sum(self.points[:,1])/float(len(self.points)))\n self.center = (x_avg, y_avg)\n return(x_avg,y_avg)", "def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]", "def center_ava(self):\n\t\tself.rect.midbottom = self.screen_rect.midbottom\n\t\tself.x = float(self.rect.x)", "def horizontalMirror(self, mirror):\n mirror_x = self.x\n distToMir = self.y - mirror\n mirror_y = self.y - (2 * distToMir)\n return Point(mirror_x, mirror_y)", "def horizontalMirror(self, mirror):\n mirror_x = self.x\n distToMir = self.y - mirror\n mirror_y = self.y - (2 * distToMir)\n return Point(mirror_x, mirror_y)", "def getCentroid(self):\r\n return self._centroid", "def center_airfield(self):\n\n x = self.WINDOW_WIDTH / 2 - (Airfield.FIELD_WIDTH / 2)\n y = self.WINDOW_HEIGHT / 2 - (Airfield.FIELD_HEIGHT / 2)\n return (x, y)", "def circle_center(self):\n return self.container.width / 2, self.container.height / 2", "def raw_heading(self):\n\n self._heading = math.atan2(self._mag[X], self._mag[Y])\n\n if self._heading < 0:\n self._heading += 2*math.pi\n if self._heading > 2*math.pi:\n self._heading -= 2*math.pi\n\n self._heading_degrees = round(math.degrees(self._heading),2)\n\n return self._heading_degrees", "def get_center(self):\n lon, lat = self.coordinates\n\n dimx = lon.shape[0]\n dimy = lon.shape[1]\n \n return (lon[dimx/2][dimy/2],lat[dimx/2][dimy/2])", "def _get_pose_center(self, landmarks):\n left_hip = landmarks[self._landmark_names.index(\"left_hip\")]\n right_hip = landmarks[self._landmark_names.index(\"right_hip\")]\n center = (left_hip + right_hip) * 0.5\n return center", "def get_center(self) -> Tuple[int, int]:\n raise NotImplementedError()", "def rot_center(self):\n loc = self.rect.center\n self.image = pygame.transform.rotate(self.current_sprite_alpha, self.rot)\n self.rect = self.image.get_rect()\n self.rect.center = loc", "def relativeRotation(self):\n return self.rotation()", "def get_horizontal_alignment ( self, object ):\n return self.horizontal_alignment", "def get_horizontal_alignment ( self, object ):\n return self.horizontal_alignment", "def getPosHeading(self) :\n\t\treturn (self.avatarNP.getX(), self.avatarNP.getY(), \\\n\t\t\tself.avatarNP.getZ(), (self.avatarNP.getHpr()[0])%360)", "def center(width, height):\n return width/2, height/2", "def worldToCameraCentricXform(self):\n return self.rotateAlignXform().dot(self.translateToOriginXform())", "def start_angle(self):\n return self._start_angle", "def start_angle(self):\n return self._start_angle", "def middle(self):\n return self.point_and_heading_at_offset(self.length/2)", "def center(self):\n\n ca_atoms = self.ca_atoms\n ca_atom_vectors = ca_atoms[\"ca.atom\"].to_list()\n ca_atom_vectors = [i for i in ca_atom_vectors if i is not None]\n centroid = self.center_of_mass(ca_atom_vectors, geometric=False)\n centroid = Vector(centroid)\n\n return centroid", "def center(self):\n return np.sum(self.bbox, 0) / 2", "def get_arc_center(self):\n # First two anchors and handles\n a1, h1, h2, a2 = self.points[:4]\n # Tangent vectors\n t1 = h1 - a1\n t2 = h2 - a2\n # Normals\n n1 = rotate_vector(t1, TAU / 4)\n n2 = rotate_vector(t2, TAU / 4)\n try:\n return line_intersection(\n line1=(a1, a1 + n1),\n line2=(a2, a2 + n2),\n )\n except Exception:\n warnings.warn(\"Can't find Arc center, using ORIGIN instead\")\n return np.array(ORIGIN)", "def x(self) -> int:\n return self.data.x_centre >> 4", "def _get_pose_center(self, landmarks):\n left_hip = landmarks[self._landmark_names.index('left_hip')]\n right_hip = landmarks[self._landmark_names.index('right_hip')]\n center = (left_hip + right_hip) * 0.5\n return center", "def center(self) -> Point:\n return Point(*np.sum(self.normalized_array[:, :-1], axis=0))", "def centralAngle(self):\n global central_angle\n central_angle = always_redraw(\n lambda : Angle(radius_horiz, radius_ang, radius=0.25, stroke_color=YELLOW)\n )\n\n global central_angle_label\n central_angle_label = always_redraw(\n lambda : MathTex(\"x\", stroke_color=GREEN).scale(0.75).move_to(\n LEFT*5+UP*(0.3*self.x_max*np.sin(0.5*theta.get_value()*DEGREES))+RIGHT*(0.3*self.x_max*np.cos(0.5*theta.get_value()*DEGREES)))\n )\n\n self.play(Write(central_angle), Write(central_angle_label))", "def get_center(self):\n x = round(self.x_pos)\n y = round(self.y_pos)\n return [int(x),int(y)]", "def getFieldCenter(self):\n return (self.field_limits[:, 1] + self.field_limits[:, 0]) / 2", "def get_min_mag_center(self):\r\n\t\treturn self.min_mag + self.bin_width / 2", "def center(self, obj):\n return self.phy2abs.center(obj)", "def get_center_tile(self):\n mid_x = int(len(self.map)/2)\n mid_y = int(len(self.map[0])/2)\n return self.map[mid_x][mid_y]", "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)", "def get_center_coordinates(self):\n totalX = 0\n totalY = 0\n totalZ = 0\n for atom in self.get_atoms():\n totalX += atom.get_x()\n totalY += atom.get_y()\n totalZ += atom.get_z()\n \n xCenter = totalX / len(self.get_atoms())\n yCenter = totalY / len(self.get_atoms())\n zCenter = totalZ / len(self.get_atoms())\n \n return xCenter, yCenter, zCenter", "def center(self):\r\n self.centerx = self.screen_rect.centerx \r\n self.centery = self.screen_rect.centery" ]
[ "0.7565997", "0.7565997", "0.7496786", "0.74307644", "0.7374164", "0.73705566", "0.7350087", "0.7348408", "0.73358035", "0.71673024", "0.70586854", "0.70101935", "0.7006534", "0.69662935", "0.69427985", "0.6927156", "0.69266474", "0.68949395", "0.68809116", "0.68760055", "0.68594587", "0.68594587", "0.6800831", "0.6780894", "0.6742585", "0.6741191", "0.67367727", "0.6736065", "0.668955", "0.6656835", "0.6648877", "0.66458905", "0.6638921", "0.66294897", "0.65965027", "0.657744", "0.65522426", "0.6492935", "0.64847755", "0.64836645", "0.64256495", "0.6409578", "0.64094347", "0.6397117", "0.6380458", "0.6380458", "0.6380458", "0.6380458", "0.63612807", "0.6351234", "0.63486564", "0.63443345", "0.632641", "0.62962055", "0.6289787", "0.62844545", "0.62743986", "0.62707347", "0.626865", "0.6265117", "0.62642306", "0.6263561", "0.62508315", "0.6245446", "0.621627", "0.6211175", "0.6205268", "0.6200716", "0.6200716", "0.6188205", "0.61824566", "0.6170302", "0.6167921", "0.6157768", "0.6152479", "0.6145578", "0.61451197", "0.6133748", "0.6118049", "0.6118049", "0.6117937", "0.6106238", "0.6101902", "0.60997677", "0.60997677", "0.6098902", "0.6080054", "0.60692024", "0.606396", "0.60634154", "0.6053428", "0.6049619", "0.60423416", "0.60262895", "0.6013411", "0.60108703", "0.6004664", "0.5989681", "0.5979976", "0.59782773", "0.5975521" ]
0.0
-1
The physical width in mm of the reference frame in the photograph
def frameWidth(self): return self._frame_width
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getWidth(self):\r\n width = 1\r\n if self.orientation == \"h\":\r\n width = self.size\r\n return width", "def getWidth(self):\n area = self.getArea()\n length = self.getLength()\n return area / length", "def get_width(self) -> int:\n return self.rsimulator.get_frame_width()", "def width(self):\n return (self.scene.shape[2] - self.size) // self.size + 1", "def getWidth(self):\n return frameWidth", "def getWidth(self) -> int:\n ...", "def width(self):\n return self.figure.scene.get_size()[0]", "def get_width(self) -> int:\n return int(self._surface.get_width())", "def getWidth(self):\n return _libsbml.Dimensions_getWidth(self)", "def get_frame_width(self) -> int:\n return self.__sim.frame_size()[0]", "def width(self) -> int:\n return self._image_data.width", "def width(self) -> float:\n return self._width", "def pixwidth(self):\n return self._labelWidth * self.transform.scale[0]", "def width(self) -> int:\r\n return self.rect_uv.w", "def filmWidth(self):\r\n cls = mxs.classof(self._nativePointer)\r\n width = None\r\n if cls == mxs.VRayPhysicalCamera:\r\n width = self._nativePointer.film_width\r\n\r\n elif cls == mxs.Physical:\r\n width = self._nativePointer.film_width_mm\r\n\r\n if not width:\r\n \r\n # If we failed to get a width from a camera, return the scene aperture setting.\r\n width = mxs.getRendApertureWidth()\r\n\r\n return width", "def get_width(self):\n return max(map(TextImage.get_width, self.frames))", "def width(self):\n self._updateExtents()\n return self._mWidth", "def getWidth(self):\n return self._image.width()", "def width(self):\n # type: () -> float\n return self._width", "def get_width ( self ):\n return self.width", "def w(self):\n return self.width", "def getWidth(self):\n return self.width", "def getWidth(self):\n return self.width", "def width(self):\n return _libsbml.Dimensions_width(self)", "def width(self) :\n return self.m_width", "def get_width(self):\n return self.width", "def frame_width(self) -> int:\n pass", "def get_width(self):\r\n return self._width", "def get_width(self):\r\n return self._width", "def get_width(self):\r\n return self._width", "def getWidth(self):\n wsum = 0.0\n for quad in self._quadrilaterals:\n wsum = wsum + get_quad_width(quad)\n mwidth = (wsum / len(self._quadrilaterals)) / 1000.0\n return mwidth", "def width (self):\n return self._w", "def width(self):\n return self._get_mean_and_samples_attribute('width')", "def get_width(self):\n return self.__width", "def getWidth(self):\n return self._width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__size[0]", "def get_dimension_width(self):\n pass", "def calculate_width(self):\n return self.endX - self.startX", "def get_width(self):\n return self._width", "def get_width(self):\n return self._width", "def get_width(self):\n return self._width", "def get_width(self):\n return self._width", "def width(self):\n xmin, _, xmax, _ = self.viewport\n return self.parent.window_size[0] * (xmax - xmin)", "def width(self):\n return (self.norm / max(self.transmit)) * Unit(self.wavelength_unit)", "def width(self):\n return self.maxx - self.minx", "def width(self) -> int:\n return self.__width", "def width(self) -> int:", "def width(self) -> int:", "def width(self):\n xx = self.xx\n return max(xx) - min(xx)", "def width(self):\n return(self.SCREEN_W)", "def getNormalisedWidth( self, width ):\n\t\treturn int( self.waveread.getnframes() * float(width) )", "def width(self):\n return self['width']", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def getWidth(self):\n return constants.DEFAULT_WIDTH", "def get_width(self):\n width = np.size(self.img, 0)\n return width", "def width(self) -> int:\n return self._width", "def r_width(self) -> int:\n return math.ceil(self.t_width / REGION_DIM)", "def getWidth(self):\n return _tkCall(self.image.width)", "def full_frame_length(self):\n return self.height * self.width * 3", "def expectedWormLengthPixels(self):\n return self.expectedWormLength * self.pixelSize", "def width(self) -> int:\n\t\treturn self._raw_result['data']['width']", "def width(self):\n return self.x.max() - self.x.min()", "def twidth(self) -> int:\n return self.isize[0].to_pixels(self.parent.width)", "def width(self):\n return self.get_delta_value(self.X_INDEX)", "def getWidth(self):\n return DEFAULT_WIDTH", "def expectedWormWidthPixels(self):\n return self.expectedWormWidth * self.pixelSize", "def getWidth(self):\n\t\tif (self.position==[]):\n\t\t\treturn 0\n\t\treturn abs(self.position[1][0]-self.position[0][0])", "def getWidth(self):\n return _libsbml.BoundingBox_getWidth(self)", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width" ]
[ "0.7777585", "0.77179724", "0.768844", "0.7662584", "0.758126", "0.757602", "0.7558276", "0.749808", "0.7468271", "0.74638665", "0.7462626", "0.7425399", "0.7418057", "0.7411394", "0.7389598", "0.73888046", "0.7365202", "0.7350629", "0.73443985", "0.7315457", "0.73142976", "0.7312096", "0.7312096", "0.7303535", "0.72889966", "0.72856486", "0.72741985", "0.72655445", "0.72655445", "0.72655445", "0.72448266", "0.7242859", "0.72427285", "0.7236777", "0.722664", "0.71925086", "0.71925086", "0.71925086", "0.71925086", "0.71925086", "0.71925086", "0.71925086", "0.71925086", "0.71925086", "0.71925086", "0.71925086", "0.71925086", "0.7187157", "0.7176695", "0.7169686", "0.7155712", "0.7155712", "0.7155712", "0.7155712", "0.7126829", "0.7112234", "0.71089375", "0.71011484", "0.70967233", "0.70967233", "0.70826274", "0.7063288", "0.7061676", "0.7061453", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.7042986", "0.70257616", "0.7025643", "0.70133287", "0.7009644", "0.6982489", "0.69692385", "0.6964264", "0.6958919", "0.69520795", "0.6931992", "0.6926862", "0.6920544", "0.69187695", "0.691766", "0.69125277", "0.6907804", "0.69059896", "0.69059896", "0.69059896", "0.69059896", "0.69059896", "0.69059896", "0.69059896" ]
0.71373034
54
The physical height in mm of the reference in the photograph
def frameHeight(self): return self._frame_height
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getHeight(self):\r\n height = 1\r\n if self.orientation == \"v\":\r\n height = self.size\r\n return height", "def height(self) -> int:", "def height(self) -> int:", "def height(self) -> int:", "def height(self):\n yy = self.yy\n return max(yy) - min(yy)", "def height(self):\n return self.__size[1]", "def height(self):\n\t\tpass", "def height(self):\n # type: () -> float\n return self._height", "def height (self):\n return self._h", "def height(self) :\n return self.m_height", "def getHeight(self):\n return self.height", "def getHeight(self):\n return self.height", "def height(self) -> int:\r\n return self.rect_uv.h", "def height(self) -> float:\n top = 0\n height_ = 0\n for part in self.line_parts:\n if part.state.rise > 0 and part.state.rise > top:\n top = part.state.rise\n if part.state.size > height_:\n height_ = part.state.size\n\n return height_ + self.top_margin + top", "def height(self):\n return self.client.call('GET', self.name + 'height')", "def getHeight(self):\n return _libsbml.Dimensions_getHeight(self)", "def height(self):\n self._updateExtents()\n return self._mHeight", "def height(self):\n return _libsbml.Dimensions_height(self)", "def height(self) -> int:\n return self._image_data.height", "def height(self):\n return self[\"height\"]", "def height(self):\n return self[\"height\"]", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def getHeight(self):\n return self._height", "def getHeight(self):\n return self._image.height()", "def height(self):\n return (self.__height)", "def height(self) -> int:\n return self.__height", "def get_height(self):\r\n return self.state['h']", "def geoidHeight(self):\n return self._geoidhgt", "def get_height():\n return resize.transforms[1].size", "def height(self):\n return (self.scene.shape[1] - self.size) // self.size + 1", "def output_height(self):\n\t\treturn self.output_shape_param('H')", "def height(self):\n return self.maxy - self.miny", "def get_dimension_height(self):\n pass", "def height(self):\r\n return self._height", "def footprint_height():", "def height(self) -> int:\n\t\treturn self._raw_result['data']['height']", "def get_height(self) -> int:\n return int(self._surface.get_height())", "def get_height(self):\n return self.__height", "def height(self):\n\n return self.__height", "def height(self):\n return self.config.get('resolution', {}).get('y',1080) #720", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def h(self):\n return self.height", "def height(self):\n return abs(self.end[1] - self.start[1])", "def getHeight(self):\n return _libsbml.BoundingBox_getHeight(self)", "def height(self) -> int:\n return self._height", "def height(self) -> int:\n return self._height", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def getHeights(self):\n if self.heights: return self.heights\n reader = self.getReader()\n subData = reader.findSubRecord('VHGT','LAND')\n if not subData: return None\n height0 = struct.unpack('f',subData[:4])[0]\n import array\n deltas = array.array('b',subData[4:4+65*65])\n iheights = array.array('i')\n iheights.append(0)\n for index in xrange(1,65*65):\n if index % 65:\n iheights.append(iheights[-1] + deltas[index])\n else:\n iheights.append(iheights[-65] + deltas[index])\n heights = self.heights = array.array('f')\n for index in xrange(65*65):\n heights.append(8*(height0 + iheights[index]))\n return self.heights", "def filmHeight(self):\r\n cls = mxs.classof(self._nativePointer)\r\n height = None\r\n if cls == mxs.VRayPhysicalCamera:\r\n\r\n # TODO: Why is that wrapped in a try except?\r\n try:\r\n height = self._nativePointer.film_height\r\n except AttributeError:\r\n pass\r\n\r\n elif cls == mxs.Physical:\r\n height = self._nativePointer.film_height_mm\r\n\r\n if not height:\r\n # If we failed to get a width from a camera, return the scene aperture setting.\r\n height = self.filmWidth() * (mxs.renderPixelAspect / mxs.getRendImageAspect())\r\n\r\n return height", "def get_height(self) -> int:\n return self.rsimulator.get_frame_height()", "def height(self):\n return self.y.max() - self.y.min()", "def pixheight(self):\n return self._labelHeight * self.y_sign * self.transform.scale[1]", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def height(self):\n return self.get_delta_value(self.Y_INDEX)", "def get_height(self,c):\r\n return self.h", "def get_height(self):\n height = np.size(self.img, 1)\n return height", "def calculate_height(self):\n return self.endY - self.startY", "def height(self) -> int:\n if self.props.max_height:\n max_height = UIMetric.parse(self.props.max_height).to_pixels(self.parent.height)\n return min(self.isize[1].to_pixels(self.parent.height), max_height)\n else:\n return self.isize[1].to_pixels(self.parent.height)", "def height(self, obj):\n if (obj.__class__.__name__ == 'PhysicalObject') or (issubclass(obj.__class__, laygo2.object.PhysicalObject)):\n return self.height(obj.bbox)\n else:\n _i = self.bbox(obj)\n return abs(_i[1, 1] - _i[0, 1])", "def getHeight(self):\n return _tkCall(self.image.height)", "def height(self):\n _, ymin, _, ymax = self.viewport\n return self.parent.window_size[1] * (ymax - ymin)", "def height(self):\n return(self.SCREEN_H)", "def height(self):\n return self.upper_right.y - self.lower_left.y", "def get_height(self):\n height = 0\n for layer, ldata in self.conf['Layers'].items():\n layer_t = ldata['params']['thickness']\n height += layer_t\n return height", "def expected_height(self):\n\t\treturn self.expected_tile_height * TILE_SIZE", "def height(self):\n return len(self._pixels)", "def height(self) -> int:\n return self._obj[self.y_dim].size", "def height(self):\n return self.i_node.distance(self.n_node)", "def height_percent(self):\n return self.container['height_percent']", "def bottom_height_px(self):\n return self.bottom_pieces * PipePair.PIECE_HEIGHT", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")" ]
[ "0.7840666", "0.7595656", "0.7595656", "0.7595656", "0.751236", "0.7481456", "0.7390257", "0.7385717", "0.7385174", "0.73824626", "0.73762196", "0.73762196", "0.7343244", "0.73425585", "0.73327744", "0.7330128", "0.7315331", "0.7312233", "0.7302953", "0.7289399", "0.7289399", "0.72220486", "0.72220486", "0.72220486", "0.72220486", "0.72220486", "0.72220486", "0.72220486", "0.72220486", "0.72220486", "0.72220486", "0.72220486", "0.72220486", "0.72027373", "0.7188635", "0.7187939", "0.71451724", "0.7142317", "0.7140948", "0.7133277", "0.7128497", "0.712768", "0.7123632", "0.71028376", "0.7089273", "0.7069279", "0.70632535", "0.7059561", "0.7054886", "0.7032563", "0.7029218", "0.70164967", "0.70164967", "0.70164967", "0.70164967", "0.70164967", "0.70164967", "0.70164967", "0.70164967", "0.70164967", "0.70164967", "0.7008045", "0.7005737", "0.6983937", "0.69794035", "0.69794035", "0.69774705", "0.69774705", "0.69774705", "0.69708055", "0.6949307", "0.6926627", "0.6919678", "0.6901192", "0.68949693", "0.68949693", "0.68949693", "0.68949693", "0.68937397", "0.68722206", "0.6871549", "0.68702686", "0.6869797", "0.6845333", "0.6814416", "0.681332", "0.68067247", "0.68029237", "0.67860556", "0.6775569", "0.67700595", "0.6760399", "0.6754858", "0.675285", "0.6714127", "0.66679084", "0.66679084", "0.66679084", "0.66679084", "0.66679084", "0.66679084" ]
0.0
-1
The outline of the command used to perform a angle run
def angleCommand(self): return self._angle_command
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd(self):", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli() -> None:", "def cli() -> None:", "def cmd_abor(args):", "def cmdline(self):\r\n raise NotImplementedError", "def command(facade, note):\n print facade, note", "def cli():\r\n pass", "def cli(ctx):", "def cli(ctx):", "def command():\n pass", "def cli(ctx):\n pass", "def cli(ctx):\n pass", "def cli():\n\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n ...", "def main(config):\n full_decor = \"=\"*80\n half_decor = \"=\"*35\n click.echo(\"\")\n click.echo(full_decor)\n click.echo( \"{} Raptors {}\".format( half_decor, half_decor))\n click.echo(full_decor)", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def commands():", "def main(ctx):\n\n print(\"Mode:\", ctx.invoked_subcommand)", "def cli(_):\n pass", "def cli(_):\n pass", "def cli(**_) -> None:\n pass", "def cli():\n return", "def cli() -> None:\n pass # pragma: no cover", "def command(self):\n raise NotImplementedError", "def usage():", "def usage():", "def print_command(self):\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n print(' '.join(command))", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def get_command(self):\n return 'figure'" ]
[ "0.64293647", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.62761986", "0.6247882", "0.6247882", "0.6243397", "0.6154068", "0.6151511", "0.608575", "0.60471576", "0.60471576", "0.60343117", "0.6013502", "0.6013502", "0.59673387", "0.59566283", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938367", "0.5938263", "0.58892316", "0.58822656", "0.58822656", "0.58822656", "0.58822656", "0.58822656", "0.58725893", "0.5868345", "0.5768645", "0.5768645", "0.57659", "0.575891", "0.5731838", "0.5723371", "0.57148767", "0.57148767", "0.5686686", "0.5680747", "0.5680747", "0.56680536" ]
0.6085803
34
The outline of the command used to perform a horizontal run
def horizontalCommand(self): return self._horizontal_command
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hr() -> None:\n width, _ = click.get_terminal_size()\n click.echo('-' * width)", "def _display_command(self):\n idx = self.current_idx # Local copy to avoid race condition updates\n output = self.outputs[idx]\n if output is None:\n self.screen.addstr('Waiting for command to run...')\n return\n\n # Set row limits\n top_line = self.top_lines[idx]\n top_line = 0 if len(output) < self.max_line else min(max(top_line, 0), len(output)-self.max_line)\n bottom_line = min(top_line+self.max_line, len(output)) # Last page may not be full\n self.top_lines[idx] = top_line\n\n # Crop output to fit screen height & width\n output = [line[:self.max_col-1] for line in output[top_line:bottom_line]]\n self.screen.addstr(b'\\n'.join(output))", "def cmd(self):", "def explainerdashboard_cli(ctx):", "def display_hline():\n for i in range(12):\n print(\"-\", end=\"\")\n print()", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def single_line():\n print (\"-------------------------------------------------------------\")", "def cmdline(self):\r\n raise NotImplementedError", "def get_command(self):\n return 'figure'", "def main():\n\n print('# <-- This is where the edge is')\n print('space_line(3, 5) -->')\n print(space_line(3, 5))", "def func(self):\n # check what aliases we have used\n if self.cmdstring == \"getinline\":\n self.switches.append(\"getinline\")\n if self.cmdstring == \"nextinline\":\n self.switches.append(\"nextinline\")\n if \"createline\" in self.switches:\n self.create_line()\n return\n if not self.args and not self.switches:\n self.display_line()\n return\n if not self.check_line:\n return\n if \"getinline\" in self.switches:\n self.join_line()\n return\n if \"nextinline\" in self.switches:\n self.next_in_line()\n return\n if \"dropout\" in self.switches:\n self.drop_out()\n return\n if \"dismiss\" in self.switches:\n self.dismiss()\n return\n if \"loop\" in self.switches:\n self.toggle_loop()\n return", "def display(self, grid):\n for i in range(grid.height):\n print(\"-\" + \"-------\"*grid.width)\n for j in range(grid.width):\n if not j:\n print(\"|\", end=\"\") # begin row with vertical line\n a = self.actions.get((i, j), ' ')\n print(\" %s |\" % a, end=\"\")\n print(\"\") # new line\n print(\"-\" + \"-------\"*grid.width, end='\\n\\n')", "def double_line():\n print (\"=============================================================\")", "def commands():", "def cli(ctx):", "def cli(ctx):", "def cli():\r\n pass", "def __call__(self):\n return '---'", "def cmd_h(self):\n self.delta = self.delta - 1\n self.get_text()", "def main(config):\n full_decor = \"=\"*80\n half_decor = \"=\"*35\n click.echo(\"\")\n click.echo(full_decor)\n click.echo( \"{} Raptors {}\".format( half_decor, half_decor))\n click.echo(full_decor)", "def get_command(self) -> str:\n return 'title'", "def subcommand_bar(self):\n print('bar')\n print(repr(c))", "def additional_command(self):\n pass", "def cli() -> None:", "def cli() -> None:", "def command():\n pass", "def _print_results_header(self):\n print(\"\\033[94m\"+\"Summary\\n\"+\"-\"*32+\"\\033[0m\")\n print(\"Subroutine: {}\".format(self.mc_sample.__name__))\n print(\"Num Runs: {:2.1e}\".format(self.num_runs))\n print(\"-\"*32+'\\n')", "def cli():\n ...", "def cli(ctx):\n pass", "def cli(ctx):\n pass", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "def verticalCommand(self):\n return self._vertical_command", "def cmdShell(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, clear: bool=True, command: Union[AnyStr, bool]=\"\",\n defineTemplate: AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", dragCallback:\n Script=None, dropCallback: Script=None, enable: bool=True, enableBackground:\n bool=True, enableKeyboardFocus: bool=True, exists: bool=True, fullPathName:\n bool=True, height: Union[int, bool]=0, highlightColor: Union[List[float, float,\n float], bool]=None, isObscured: bool=True, manage: bool=True, noBackground:\n bool=True, numberOfHistoryLines: Union[int, bool]=0, numberOfPopupMenus: bool=True,\n numberOfSavedLines: Union[int, bool]=0, parent: Union[AnyStr, bool]=\"\",\n popupMenuArray: bool=True, preventOverride: bool=True, prompt: Union[AnyStr,\n bool]=\"\", statusBarMessage: AnyStr=\"\", useTemplate: AnyStr=\"\", visible: bool=True,\n visibleChangeCommand: Union[Script, bool]=None, width: Union[int, bool]=0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def title_n(self):\n self.run_command('title_n')", "def cli():\n pass", "def help_shell(self):\n help_str = \"\"\"Execute a command as if at the OS prompt.\n\n Usage: shell cmd\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))", "def cli():\n\n pass", "def clowder_command(cmd):\n\n return colored(cmd, attrs=['bold'])", "def cmd_help(args):", "def render(console: Console) -> None:\n console.print(Rule(\"[bold blue]CLI File Manager\", style=\"red\"))\n console.print(Panel(\"[white]Welcome to The [bold]BETTER[/bold] File manager\\nFor help type: `help` or `h`\",\n style=\"green\"))", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass" ]
[ "0.6144864", "0.60430384", "0.60299814", "0.59511566", "0.5867638", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5772128", "0.57363", "0.5722831", "0.56652844", "0.5649374", "0.5648787", "0.56325436", "0.56323457", "0.5575098", "0.5575098", "0.5575016", "0.55641055", "0.55586463", "0.55569386", "0.5542467", "0.5538011", "0.55303234", "0.551343", "0.551343", "0.5489705", "0.5489084", "0.5479798", "0.5478903", "0.5478903", "0.54788035", "0.54523325", "0.54516214", "0.54483956", "0.5445773", "0.5444476", "0.5435299", "0.5411175", "0.54096854", "0.54085255", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504", "0.5408504" ]
0.64010274
0
The outline of the command used to perform a vertical run
def verticalCommand(self): return self._vertical_command
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cmd(self):", "def vertical_line(t, n):\n lt(t)\n fd(t,n)\n rt(t)", "def cmdline(self):\r\n raise NotImplementedError", "def main(ctx):\n\n print(\"Mode:\", ctx.invoked_subcommand)", "def cli() -> None:", "def cli() -> None:", "def cli():\r\n pass", "def subcommand_bar(self):\n print('bar')\n print(repr(c))", "def _display_command(self):\n idx = self.current_idx # Local copy to avoid race condition updates\n output = self.outputs[idx]\n if output is None:\n self.screen.addstr('Waiting for command to run...')\n return\n\n # Set row limits\n top_line = self.top_lines[idx]\n top_line = 0 if len(output) < self.max_line else min(max(top_line, 0), len(output)-self.max_line)\n bottom_line = min(top_line+self.max_line, len(output)) # Last page may not be full\n self.top_lines[idx] = top_line\n\n # Crop output to fit screen height & width\n output = [line[:self.max_col-1] for line in output[top_line:bottom_line]]\n self.screen.addstr(b'\\n'.join(output))", "def vertical_char(self):\n ...", "def cli():\n ...", "def vertical(self):\n return self._vertical", "def cli():\n\n pass", "def cli(ctx):\n pass", "def cli(ctx):\n pass", "def cli(ctx):", "def cli(ctx):", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def _render_vertical(self, gc, lx, ly, rx, ry, mx, my):\n mx = lx + (rx - lx) / 2.\n with gc:\n gc.set_line_width(20)\n gc.set_stroke_color(self._get_border_color())\n tee_v(gc, lx, ly, rx, mx, my)\n\n gc.set_line_width(10)\n self.set_fill_color(gc)\n tee_v(gc, lx, ly, rx, mx, my)", "def explainerdashboard_cli(ctx):", "def active_vertical_lines(self):\n val = ((self._block[1] & 0xF0) << 4) + self._block[0]\n return (val + 1) * 2", "def cli():\n return", "def func(self):\n # check what aliases we have used\n if self.cmdstring == \"getinline\":\n self.switches.append(\"getinline\")\n if self.cmdstring == \"nextinline\":\n self.switches.append(\"nextinline\")\n if \"createline\" in self.switches:\n self.create_line()\n return\n if not self.args and not self.switches:\n self.display_line()\n return\n if not self.check_line:\n return\n if \"getinline\" in self.switches:\n self.join_line()\n return\n if \"nextinline\" in self.switches:\n self.next_in_line()\n return\n if \"dropout\" in self.switches:\n self.drop_out()\n return\n if \"dismiss\" in self.switches:\n self.dismiss()\n return\n if \"loop\" in self.switches:\n self.toggle_loop()\n return", "def console(self, vm=None):\n raise NotImplementedError\n return \"\"", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli(_):\n pass", "def cli(_):\n pass", "def commands():", "def cli() -> None:\n pass # pragma: no cover", "def hr() -> None:\n width, _ = click.get_terminal_size()\n click.echo('-' * width)", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)" ]
[ "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.5884879", "0.5875417", "0.58678025", "0.5753796", "0.5730497", "0.5730497", "0.5702172", "0.56211096", "0.56174815", "0.5616135", "0.5613818", "0.56094646", "0.56023824", "0.5591637", "0.5591637", "0.5587621", "0.5587621", "0.5581956", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5566054", "0.5501421", "0.5499678", "0.5423582", "0.5404848", "0.5385628", "0.5363775", "0.5340023", "0.5340023", "0.5340023", "0.5340023", "0.5340023", "0.52881205", "0.52881205", "0.52767867", "0.5265293", "0.52133995", "0.5202612" ]
0.6988323
0
The current number of runs
def count(self): return len(self._runs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_run_idx(self):\n return self.num_runs", "def number_of_launches(self):\n return self._number_of_launches", "def number_of_iterations(self) -> int:\n pass", "def num_runs(self):\n return len(self._h5[RUNS])", "def run(self) -> int:\n self._times_called += 1\n return self._times_called", "def number_of_iterations(self) -> int:\n return self._stats[\"iter_count\"]", "def counter(self) -> int:", "def counter(self) -> int:", "def run_number(self):\n return self._runNumber", "def num_trials(self):", "def num_launches(self):\n return len(self.launches)", "def number_of_iterations(self):\n return self._solution.nit", "def count(self) -> int:\n return self.end_measure_num - self.start_measure_num + 1", "def count(self):\n return int()", "def count(self):\n return int()", "def number_of_iterations(self) -> int:\n return self._solution.info.iter", "def counter(self) -> int:\n return self._counter", "def iterations_in_epoch(self):\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.count\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.count\n return 0", "def tally(self):\n return self.count", "def number_of_iterations(self):\n return self._solution[\"iterations\"]", "def numRunningTotal(self):\n activeRuns = sum(run is not None for run in self.__running + self.__clientRunning)\n return activeRuns", "def __len__(self):\n return self.nb_iterations", "def execution_count(self):\n if not self._execution_count:\n self.fill_heatmap()\n return self._execution_count", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def get_number_of_evaluation(self):\n return self.n_eval", "def count():", "def num_run_cycles(self, run_idx):\n return self.num_traj_frames(run_idx, 0)", "def num_considered(self):\n return self._current", "def get_count(self):\r\n return self.count", "def IterationCount(self):\r\n\t\treturn self._get_attribute('iterationCount')", "def count_current():\n return current.count()", "def get_max_num_runs(self, db):\n res = db.session.query(func.max(db.ExperimentResult.run)).filter_by(experiment=self).first()\n if res is None or res[0] is None: return 0\n return res[0] + 1", "def count(self):\n # TODO not implemented yet\n return 0", "def count(self):\n with self._block:\n counter = re.search(r'count=(\\d+) ', repr(self))\n return int(counter.group(1))", "def execution_count(self) -> int:\n return pulumi.get(self, \"execution_count\")", "def num_steps(self) -> int:\n return self._num_steps", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def numRunning(self):\n #with self.__queueLock:\n # The size of the list does not change, only its contents, so I don't\n # think there should be any conflict if we are reading a variable from\n # one thread and updating it on the other thread.\n activeRuns = sum(run is not None for run in self.__running)\n\n return activeRuns", "def reps(self) -> int:\n return self._reps", "def get_numpins(self):\n return self.numpins", "def get_count(self):\n\n\t\treturn self.__count", "def get_number_of_testing(self):\n return self.n_test", "def length(self):\n return self.counter", "def num_processes(self, new_value):", "def pycount(self):\n\n self.count += 1\n return self.count", "def num_steps(self):\n return self.torsoStepCount() + 1", "def count(self):\n return self.get_count()", "def count_one_round(self):\n\t\tself.round_count+=1\n\t\treturn self.round_count", "def counter(): # Local function\n nonlocal count\n if count < n:\n count += 1\n return count", "def current(self):\n return self.counter.count", "def count(self) -> int:\n return self.__count", "def count(self):\n return clone_counter._count", "def get_steps_num():\n return 0", "def result(self) -> int:\n return self._count", "def get_iterations_made(self):\n return self.iterations_made", "def Numtrials(self):\n\t\treturn self._get_attribute('numtrials')", "def get_num_jobs(self):\n return str(self.num_jobs)", "def getNIterations(self):\n return self.getOrDefault(self.nIterations)", "def getNIterations(self):\n return self.getOrDefault(self.nIterations)", "def epoch(self):\n return len(self.history)", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def get_count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def getCount(self):\n return _osgAnimation.Target_getCount(self)", "def n_steps(self) -> int:\n return len(self) - 1 # subtract the base metric", "def repeat_count(self):\n if hasattr(self, '_m_repeat_count'):\n return self._m_repeat_count if hasattr(self, '_m_repeat_count') else None\n\n self._m_repeat_count = (self.repeat_count_m1 + 1)\n return self._m_repeat_count if hasattr(self, '_m_repeat_count') else None", "def get_run_count(self, file_path) -> int:\n stat = self._file_stats.get(file_path)\n return stat.run_count if stat else 0", "def COUNTER_TOTAL():\n return 3", "def last_count(self):\n return self.__last_count", "def get_nb_results(self):\n return self.nb_results", "def iteration(self) -> int:\n return len(self._history) - 1", "def iter_count(self):\n return self._iter_count", "def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives", "def getInstCount(self):\n return self.instCount", "def GetNumberOfResultsProcessed(self) -> int:\n return self.i", "def count() -> int:\n pass", "def num_timesteps(self):\n return self._num_timesteps", "def N ( self ) :\n return self.__N", "def current_node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"current_node_count\")", "def increment_counter(self) -> None:", "def next_run(self):\n self.load_run(run=self.run+1)", "def get_number_watched(self):\n movies_watched = 0\n for movie in self.movies:\n if movie.is_watched:\n movies_watched += 1\n return movies_watched", "def loopcount(self):\n return len(self.loopindices)" ]
[ "0.76630276", "0.75599295", "0.74130595", "0.7332917", "0.73038995", "0.72296184", "0.7181953", "0.7181953", "0.71678597", "0.7135644", "0.6975584", "0.6965288", "0.6940288", "0.6895412", "0.6895412", "0.68646985", "0.68560135", "0.68351436", "0.6822677", "0.68223685", "0.68212193", "0.6794721", "0.67787343", "0.6778261", "0.677366", "0.67671865", "0.675277", "0.67328835", "0.6730876", "0.6723504", "0.67155087", "0.669811", "0.66943604", "0.6682843", "0.66646785", "0.66620696", "0.6627053", "0.6627053", "0.66101605", "0.66017383", "0.6601529", "0.6591817", "0.65859437", "0.6578616", "0.6569222", "0.65688664", "0.6558056", "0.6548917", "0.6546039", "0.6531634", "0.65301186", "0.6529755", "0.6527138", "0.6526717", "0.65225977", "0.65217394", "0.64981574", "0.64973074", "0.6492074", "0.6487715", "0.6487715", "0.64862907", "0.6485155", "0.6485155", "0.6485155", "0.6485155", "0.6481281", "0.6481281", "0.6481281", "0.6479778", "0.64720947", "0.64720947", "0.64720947", "0.64720947", "0.64720947", "0.64720947", "0.64720947", "0.64720947", "0.64720947", "0.64720947", "0.6459298", "0.64518833", "0.64436793", "0.6443373", "0.64391637", "0.6434707", "0.6420886", "0.64146316", "0.63895935", "0.6388526", "0.6388119", "0.6366872", "0.63649887", "0.6354254", "0.6335572", "0.63289195", "0.6326199", "0.631613", "0.63151264", "0.6312535" ]
0.77989334
0
The current number of runs. This is required by QtQuick
def rowCount(self, index=QModelIndex()): return len(self._runs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_launches(self):\n return self._number_of_launches", "def next_run_idx(self):\n return self.num_runs", "def count(self):\n return len(self._runs)", "def num_launches(self):\n return len(self.launches)", "def num_trials(self):", "def num_runs(self):\n return len(self._h5[RUNS])", "def run_number(self):\n return self._runNumber", "def number_of_iterations(self) -> int:\n pass", "def run(self) -> int:\n self._times_called += 1\n return self._times_called", "def Numtrials(self):\n\t\treturn self._get_attribute('numtrials')", "def repeat_count(self):\n if hasattr(self, '_m_repeat_count'):\n return self._m_repeat_count if hasattr(self, '_m_repeat_count') else None\n\n self._m_repeat_count = (self.repeat_count_m1 + 1)\n return self._m_repeat_count if hasattr(self, '_m_repeat_count') else None", "def execution_count(self):\n if not self._execution_count:\n self.fill_heatmap()\n return self._execution_count", "def num_steps(self) -> int:\n return self._num_steps", "def n_trials(self):\n return self.getGlobalVariableByName(\"ntrials\")", "def number_of_iterations(self) -> int:\n return self._stats[\"iter_count\"]", "def execution_count(self) -> int:\n return pulumi.get(self, \"execution_count\")", "def count(self) -> int:\n return self.end_measure_num - self.start_measure_num + 1", "def count(self):\n return int()", "def count(self):\n return int()", "def n_tasks(self) -> int:\n pass", "def num_steps(self):\n return self.torsoStepCount() + 1", "def num_considered(self):\n return self._current", "def next_run(self):\n self.load_run(run=self.run+1)", "def get_steps_num():\n return 0", "def counter(self) -> int:", "def counter(self) -> int:", "def __len__(self):\n return self.nb_iterations", "def get_number_of_evaluation(self):\n return self.n_eval", "def getCount(self):\n return _osgAnimation.Target_getCount(self)", "def number_of_iterations(self):\n return self._solution.nit", "def get_number_of_testing(self):\n return self.n_test", "def number_of_iterations(self) -> int:\n return self._solution.info.iter", "def tally(self):\n return self.count", "def get_count(self):\r\n return self.count", "def number_of_iterations(self):\n return self._solution[\"iterations\"]", "def get_max_num_runs(self, db):\n res = db.session.query(func.max(db.ExperimentResult.run)).filter_by(experiment=self).first()\n if res is None or res[0] is None: return 0\n return res[0] + 1", "def reps(self) -> int:\n return self._reps", "def n_steps(self) -> int:\n return len(self) - 1 # subtract the base metric", "def num_timesteps(self):\n return self._num_timesteps", "def IterationCount(self):\r\n\t\treturn self._get_attribute('iterationCount')", "def get_rows(self) -> int:\r\n return 1 + self.display.get_rows() + 1", "def counter(self) -> int:\n return self._counter", "def numRunning(self):\n #with self.__queueLock:\n # The size of the list does not change, only its contents, so I don't\n # think there should be any conflict if we are reading a variable from\n # one thread and updating it on the other thread.\n activeRuns = sum(run is not None for run in self.__running)\n\n return activeRuns", "def getNrSamples(self): \r\n return self.numSamples", "def number_of_crew(self):\n return self._number_of_crew", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def iteration(self) -> int:\n return len(self._history) - 1", "def number_of_launches(self, number_of_launches):\n\n self._number_of_launches = number_of_launches", "def n_timesteps(self) -> int:\n return len(self.time)", "def length(self):\n return self.counter", "def count(self):\n # TODO not implemented yet\n return 0", "def num_tasks(self) -> int:\n return 1", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def numRunningTotal(self):\n activeRuns = sum(run is not None for run in self.__running + self.__clientRunning)\n return activeRuns", "def count(self):\n return self.get_count()", "def getNIterations(self):\n return self.getOrDefault(self.nIterations)", "def getNIterations(self):\n return self.getOrDefault(self.nIterations)", "def count(self) -> int:\n return self.__count", "def instance_count(self) -> int:\n return pulumi.get(self, \"instance_count\")", "def number_of_steps(self) -> int:\n return len(self.step_points)", "def pauses(self):\n return len(self._times) - 1 if self._times else 0", "def number_of_sequences(self):\n return self.sequence_last() + 1", "def count_current():\n return current.count()", "def get_added_timesteps(self) -> int:\n return self._num_timesteps_added", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def get_number_of_activities(self):\n self.__load_activities_from_file_into_memory()\n return super().get_number_of_activities()", "def get_num_jobs(self):\n return str(self.num_jobs)", "def remaining(self) -> int:\n\n return self.window[1]", "def number_results(self):\n pass", "def max_trials(self) -> int:\n return self._max_trials", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def epoch(self):\n return len(self.history)", "def set_runs_per_restart(self, num):\n raise NotImplementedError()", "def num_run_cycles(self, run_idx):\n return self.num_traj_frames(run_idx, 0)", "def number_of_sample_loops(self) -> int:\n return self.__number_of_sample_loops", "def num_worker(self):\n return self.config.get(\"jobs\", 4)", "def current(self):\n return self.counter.count", "def get_count(self):\n\n\t\treturn self.__count", "def first_loop_play_count(self) -> int:\n return self.__first_loop_play_count", "def get_iterations_made(self):\n return self.iterations_made", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def count(self) -> int:\n return self._adapter.count()", "def get_numpins(self):\n return self.numpins", "def get_nb_results(self):\n return self.nb_results", "def N ( self ) :\n return self.__N" ]
[ "0.7597703", "0.7347741", "0.72782546", "0.71634763", "0.70761245", "0.70040226", "0.6842851", "0.6821602", "0.67603767", "0.65716016", "0.6482918", "0.6470233", "0.6462451", "0.6448073", "0.64193875", "0.64025664", "0.63683856", "0.6359015", "0.6359015", "0.6350764", "0.6343887", "0.6316601", "0.6309038", "0.63028514", "0.6281804", "0.6281804", "0.6267295", "0.62669116", "0.62647307", "0.6262893", "0.6235421", "0.62346673", "0.6233533", "0.62295324", "0.62286085", "0.62248886", "0.62224865", "0.6211253", "0.62110734", "0.6211063", "0.62012994", "0.6199173", "0.61973476", "0.61940086", "0.61708355", "0.61536586", "0.6138777", "0.6137542", "0.61315644", "0.61236084", "0.6120225", "0.6116099", "0.6114762", "0.6114762", "0.61114186", "0.6104779", "0.61020917", "0.61020917", "0.6089039", "0.607758", "0.6068578", "0.60617334", "0.6059614", "0.6052158", "0.6051191", "0.6034238", "0.6034238", "0.6034238", "0.6018183", "0.60085195", "0.60012776", "0.59973246", "0.5986905", "0.598237", "0.598237", "0.598237", "0.598237", "0.598237", "0.598237", "0.598237", "0.598237", "0.598237", "0.598237", "0.59790415", "0.5978144", "0.5972302", "0.59671307", "0.5956613", "0.59530306", "0.59527665", "0.5950749", "0.5940981", "0.59390825", "0.59390825", "0.59390825", "0.59390825", "0.59372133", "0.59327817", "0.5928218", "0.59264463" ]
0.6444586
14