content
stringlengths
22
815k
id
int64
0
4.91M
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure(url=url, target_metadata=target_metadata, literal_binds=True) with context.begin_transaction(): context.run_migrations()
2,700
def euler719(n=10**12): """Solution for problem 719.""" return sum(i*i for i in range(2, 1 + int(math.sqrt(n))) if can_be_split_in_sum(i*i, i))
2,701
def mark_as_child(data): """ Marks the incoming data as child of celeryapplications """ kopf.adopt(data)
2,702
def inner(a, b): """ Inner product of two tensors. Ordinary inner product of vectors for 1-D tensors (without complex conjugation), in higher dimensions a sum product over the last axes. Note: Numpy argument out is not supported. On GPU, the supported dtypes are np.float16, and np.float32. On CPU, the supported dtypes are np.float16, np.float32, and np.float64. Args: a (Tensor): input tensor. If a and b are nonscalar, their last dimensions must match. b (Tensor): input tensor. If a and b are nonscalar, their last dimensions must match. Returns: Tensor or scalar, out.shape = a.shape[:-1] + b.shape[:-1]. Raises: ValueError: if x1.shape[-1] != x2.shape[-1]. Supported Platforms: Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore.numpy as np >>> a = np.ones((5, 3)) >>> b = np.ones((2, 7, 3)) >>> output = np.inner(a, b) >>> print(output) [[[3. 3. 3. 3. 3. 3. 3.] [3. 3. 3. 3. 3. 3. 3.]] [[3. 3. 3. 3. 3. 3. 3.] [3. 3. 3. 3. 3. 3. 3.]] [[3. 3. 3. 3. 3. 3. 3.] [3. 3. 3. 3. 3. 3. 3.]] [[3. 3. 3. 3. 3. 3. 3.] [3. 3. 3. 3. 3. 3. 3.]] [[3. 3. 3. 3. 3. 3. 3.] [3. 3. 3. 3. 3. 3. 3.]]] """ if F.rank(a) == 0 or F.rank(b) == 0: a = _expand(a, 1) b = _expand(b, 1) if F.rank(a) < F.rank(b): a, b = b, a return F.tensor_mul(a, b) _ = _check_shape_aligned(F.shape(a), F.shape(b)) aligned_shape_a = (F.shape_mul(F.shape(a)[:-1]), F.shape(a)[-1]) aligned_shape_b = (F.shape_mul(F.shape(b)[:-1]), F.shape(a)[-1]) a_aligned = F.reshape(a, aligned_shape_a) b_aligned = F.reshape(b, aligned_shape_b) res = _matmul_T(a_aligned, b_aligned) res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1]) return res
2,703
def create(): """ Creates settings and screens table. """ sql_exec( f'''CREATE TABLE settings ( {Settings.IP} text, {Settings.PASS} text, {Settings.PC} integer, {Settings.ENCRYPTION} integer )''' ) sql_exec("INSERT INTO settings VALUES (?, ?, ?, ?)", ( DEFAULTS[Settings.IP], DEFAULTS[Settings.PASS], DEFAULTS[Settings.PC], DEFAULTS[Settings.ENCRYPTION], ) ) sql_exec( f'''CREATE TABLE screens ( address text, top text, right text, bottom text, left text )''' ) add_screen({Screens.LEFT: None, Screens.TOP: None, Screens.RIGHT: None, Screens.BOTTOM: None}, 'main')
2,704
def cleanup_environment(): """ Shutdown the ZEO server process running in another thread and cleanup the temporary directory. """ SERV.terminate() shutil.rmtree(TMP_PATH) if os.path.exists(TMP_PATH): os.rmdir(TMP_PATH) global TMP_PATH TMP_PATH = None
2,705
def stern_warning(warn_msg: str) -> str: """Wraps warn_msg so that it prints in red.""" return _reg(colorama.Fore.RED, warn_msg)
2,706
def alt2temp_ratio(H, alt_units=default_alt_units): """ Return the temperature ratio (temperature / standard temperature for sea level). The altitude is specified in feet ('ft'), metres ('m'), statute miles, ('sm') or nautical miles ('nm'). If the units are not specified, the units in default_units.py are used. Examples: Calculate the temperature ratio at 8,000 (default altitude units) >>> alt2temp_ratio(8000) 0.94499531494013533 Calculate the temperature ratio at 8,000 m. >>> alt2temp_ratio(8000, alt_units = 'm') 0.81953843484296374 """ # function tested in tests/test_std_atm.py return alt2temp(H, alt_units, temp_units='K') / T0
2,707
def test_random_page_uses_given_language(mock_requests_get: Mock) -> None: """Is selects the specific Wikipedia language edition.""" wikipedia.random_page(language="de") args, _ = mock_requests_get.call_args assert "de.wikipedia.org" in args[0]
2,708
def set_background_priority(isBackground: bool): """Start or stop process as background prioity to ensure app does not interfer with performance""" processID = os.getpid() processHandle = ctypes.windll.kernel32.OpenProcess(ctypes.c_uint( 0x0200 | 0x0400), ctypes.c_bool(False), ctypes.c_uint(processID)) if isBackground: # PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000 processMode = 0x00100000 else: # PROCESS_MODE_BACKGROUND_END = 0x00200000 processMode = 0x00200000 ctypes.windll.kernel32.SetPriorityClass( processHandle, ctypes.c_uint(processMode)) ctypes.windll.kernel32.CloseHandle(processHandle)
2,709
def test_medicationusage_2(base_settings): """No. 2 tests collection for MedicationUsage. Test File: medicationusageexample3.json """ filename = base_settings["unittest_data_dir"] / "medicationusageexample3.json" inst = medicationusage.MedicationUsage.parse_file( filename, content_type="application/json", encoding="utf-8" ) assert "MedicationUsage" == inst.resource_type impl_medicationusage_2(inst) # testing reverse by generating data from itself and create again. data = inst.dict() assert "MedicationUsage" == data["resourceType"] inst2 = medicationusage.MedicationUsage(**data) impl_medicationusage_2(inst2)
2,710
def save_labels(labels, fn, overwrite=True): """ Save the labels to the given filename. Raises ------ * NotImplementedError if `fn` is an unsupported file type. * AssertionError if `fn` exists and `not overwrite`. Parameters ---------- labels : numpy.ndarray ... fn : str Where to save the data. Supported file types: {'.npy'} overwrite : bool, optional Whether to overwrite an existing file with the same name. """ assert isinstance(fn, str), f"fn {fn} is not a string!" assert overwrite or not os.path.exists(fn), f"File {fn} exists!" if isinstance(labels, list): labels = np.array(labels) assert isinstance(labels, np.ndarray) if fn.endswith('.npy'): np.save(fn, labels) else: raise NotImplementedError(f"Unsupported file type: {fn}")
2,711
def getFiles(searchpattern): """Append paths of all files that match search pattern to existingFiles""" results = glob.glob(searchpattern) for f in results: if os.path.isfile(f): existingFiles.append(f)
2,712
def test_set_precision_collapse(geometry, mode, expected): """Lines and polygons collapse to empty geometries if vertices are too close""" actual = pygeos.set_precision(geometry, 1, mode=mode) if pygeos.geos_version < (3, 9, 0): # pre GEOS 3.9 has difficulty comparing empty geometries exactly # normalize and compare by WKT instead assert pygeos.to_wkt(pygeos.normalize(actual)) == pygeos.to_wkt( pygeos.normalize(expected) ) else: # force to 2D because GEOS 3.10 yields 3D geometries when they are empty. assert_geometries_equal(pygeos.force_2d(actual), expected)
2,713
def verify_inputs(data, action): """ Simple error checking for input data. """ try: # Verify action for which we are validating the field (create or update). verify_action(action) if not data: message = 'Field validation requires event type, data dict and a defined action; data is empty.' raise Exception(message) if not isinstance(data, dict): message = 'Field validation requires data to be of type dictionary.' raise Exception(message) except Exception as err: message = str(err) raise Exception(message)
2,714
def encode_message(key, message): """ Encodes the message (string) using the key (string) and pybase64.urlsafe_b64encode functionality """ keycoded = [] if not key: key = chr(0) # iterating through the message for i in range(len(message)): # assigning a key_character based on the given key key_character = key[i % len(key)] # each char of the message has the key_char added (in ascii values) # and is converted back to normal, and appended to the keycoded values keycoded.append( chr((ord(message[i]) + ord(key_character)) % 256) ) encoded = pybase64.urlsafe_b64encode( "".join(keycoded).encode() # convert to bytes object (builtin) ).decode() # back to text return encoded
2,715
def test_file_open_close(): """ https://github.com/telegraphic/hickle/issues/20 """ import h5py f = h5py.File('test.hdf', 'w') a = np.arange(5) dump(a, 'test.hkl') dump(a, 'test.hkl') dump(a, f, mode='w') f.close() try: dump(a, f, mode='w') except hickle.hickle.ClosedFileError: print("Tests: Closed file exception caught")
2,716
def cfg(): """Configuration of argument parser.""" parser = argparse.ArgumentParser( description="Crawl SolarEdge and stores results in InfluxDB", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) # TODO: error message when missing env variable parser.add_argument('--gardena-email', required=False, default=os.getenv('GARDENA_EMAIL'), help="Gardena email") # noqa parser.add_argument('--gardena-password', required=False, default=os.getenv('GARDENA_PASSWORD'), help="Gardena password") # noqa parser.add_argument('--gardena-application-id', required=False, default=os.getenv('GARDENA_APPLICATION_ID'), help="Gardena application id") # noqa parser.add_argument('--influxdb-host', required=False, default=os.getenv('INFLUXDB_HOST'), help="influx db host") # noqa parser.add_argument('--influxdb-port', required=False, default=os.getenv('INFLUXDB_PORT'), help="influx db port") # noqa parser.add_argument('--influxdb-user', required=False, default=os.getenv('INFLUXDB_PORT'), help="influx db user") # noqa parser.add_argument('--influxdb-pass', required=False, default=os.getenv('INFLUXDB_PASS'), help="influx db password") # noqa parser.add_argument('--influxdb-db', required=False, default=os.getenv('INFLUXDB_DB'), help="influx db database") # noqa parser.add_argument('-v', '--verbose', action='store_true') return parser.parse_args()
2,717
def get_debugger(): """ Returns a debugger instance """ try: from IPython.core.debugger import Pdb pdb = Pdb() except ImportError: try: from IPython.Debugger import Pdb from IPython.Shell import IPShell IPShell(argv=[""]) pdb = Pdb() except ImportError: warnings.warn( 'pdb was selected as a debugger. If you want to use ipython as a debugger you have to "pip install radish-bdd[ipython-debugger]"' ) import pdb return pdb
2,718
def add_sibling(data, node_path, new_key, new_data, _i=0): """ Traversal-safe method to add a siblings data node. :param data: The data object you're traversing. :param node_path: List of path segments pointing to the node you're creating a sibling of. Same as node_path of traverse() :param new_key: The sibling key to create. :param new_data: The new data to be stored at the key. """ if _i < len(node_path) - 1: return add_sibling(data[node_path[_i]], node_path, new_key, new_data, _i + 1) else: data[new_key] = new_data
2,719
def draw_pnl(ax, df): """ Draw p&l line on the chart. """ ax.clear() ax.set_title('Performance') index = df.index.unique() dt = index.get_level_values(level=0) pnl = index.get_level_values(level=4) ax.plot( dt, pnl, '-', color='green', linewidth=1.0, label='Performance' ) def perc(val): return '{:2f}'.format(val) ax.format_ydata = perc set_legend(ax) format_ax(ax)
2,720
def maxRstat(Z, R, i): """ Return the maximum statistic for each non-singleton cluster and its children. Parameters ---------- Z : array_like The hierarchical clustering encoded as a matrix. See `linkage` for more information. R : array_like The inconsistency matrix. i : int The column of `R` to use as the statistic. Returns ------- MR : ndarray Calculates the maximum statistic for the i'th column of the inconsistency matrix `R` for each non-singleton cluster node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where ``Q(j)`` the set of all node ids corresponding to nodes below and including ``j``. See Also -------- linkage : for a description of what a linkage matrix is. inconsistent : for the creation of a inconsistency matrix. Examples -------- >>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat >>> from scipy.spatial.distance import pdist Given a data set ``X``, we can apply a clustering method to obtain a linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can be also used to obtain the inconsistency matrix ``R`` associated to this clustering process: >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] >>> Z = median(pdist(X)) >>> R = inconsistent(Z) >>> R array([[1. , 0. , 1. , 0. ], [1. , 0. , 1. , 0. ], [1. , 0. , 1. , 0. ], [1. , 0. , 1. , 0. ], [1.05901699, 0.08346263, 2. , 0.70710678], [1.05901699, 0.08346263, 2. , 0.70710678], [1.05901699, 0.08346263, 2. , 0.70710678], [1.05901699, 0.08346263, 2. , 0.70710678], [1.74535599, 1.08655358, 3. , 1.15470054], [1.91202266, 1.37522872, 3. , 1.15470054], [3.25 , 0.25 , 3. , 0. ]]) `scipy.cluster.hierarchy.maxRstat` can be used to compute the maximum value of each column of ``R``, for each non-singleton cluster and its children: >>> maxRstat(Z, R, 0) array([1. , 1. , 1. , 1. , 1.05901699, 1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266, 3.25 ]) >>> maxRstat(Z, R, 1) array([0. , 0. , 0. , 0. , 0.08346263, 0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872, 1.37522872]) >>> maxRstat(Z, R, 3) array([0. , 0. , 0. , 0. , 0.70710678, 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054, 1.15470054]) """ Z = np.asarray(Z, order='c') R = np.asarray(R, order='c') is_valid_linkage(Z, throw=True, name='Z') is_valid_im(R, throw=True, name='R') if type(i) is not int: raise TypeError('The third argument must be an integer.') if i < 0 or i > 3: raise ValueError('i must be an integer between 0 and 3 inclusive.') if Z.shape[0] != R.shape[0]: raise ValueError("The inconsistency matrix and linkage matrix each " "have a different number of rows.") n = Z.shape[0] + 1 MR = np.zeros((n - 1,)) [Z, R] = _copy_arrays_if_base_present([Z, R]) _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i) return MR
2,721
def strip_characters(text): """Strip characters in text.""" t = re.sub('\(|\)|:|,|;|\.|’|”|“|\?|%|>|<', '', text) t = re.sub('/', ' ', t) t = t.replace("'", '') return t
2,722
def get_trained_model(datapath, dataset, image_size, nb_labels): """Recover model weights stored on the file system, and assign them into the `model` structure Parameters ---------- datapath : str Path of the data on the file system dataset : str Name of the dataset image_size : int Image size, in pixels (height=width) nb_labels : int Number of output labels Returns ------- keras.models.Model Convolutional neural network """ K.clear_session() net = SemanticSegmentationNetwork( network_name="semseg_postprocessing", image_size=image_size, nb_labels=nb_labels, dropout=1.0, architecture="unet", ) model = Model(net.X, net.Y) output_folder = utils.prepare_output_folder( datapath, dataset, "semseg" ) checkpoint_filename = "best-model-" + str(image_size) + "-full" + ".h5" checkpoint_full_path = os.path.join(output_folder, checkpoint_filename) if os.path.isfile(checkpoint_full_path): model.load_weights(checkpoint_full_path) logger.info( "Model weights have been recovered from %s" % checkpoint_full_path ) else: logger.info( ( "No available trained model for this image size" " with optimized hyperparameters. The " "inference will be done on an untrained model" ) ) return model
2,723
def make_std_secgroup(name, desc="standard security group"): """ Returns a standarized resource group with rules for ping and ssh access. The returned resource can be further configured with additional rules by the caller. The name parameter is used to form the name of the ResourceGroup, and also provides the name of the SecGroup that is created in the ResourceGroup. """ return ResourceGroup("%s_std_secgroup" % name, group=SecGroup(name, desc), ping_rule=SecGroupRule("ping_rule", ctxt.comp.container.group, ip_protocol="icmp", from_port=-1, to_port=-1), ssh_rule=SecGroupRule("ssh_rule", ctxt.comp.container.group, ip_protocol="tcp", from_port=22, to_port=22), )
2,724
def parse_plot_args(*args, **options): """Parse the args the same way plt.plot does.""" x = None y = None style = None if len(args) == 1: y = args[0] elif len(args) == 2: if isinstance(args[1], str): y, style = args else: x, y = args elif len(args) == 3: x, y, style = args return x, y, style
2,725
def publications_classification_terms_get(search=None): # noqa: E501 """List of Classification Terms List of Classification Terms # noqa: E501 :param search: search term applied :type search: str :rtype: ApiOptions """ return 'do some magic!'
2,726
def test_empty_latest_listing(): """Test listing a 'backup-list LATEST' on an empty prefix.""" container_name = 'wal-e-test-empty-listing' layout = storage.StorageLayout('wabs://{0}/test-prefix' .format(container_name)) with FreshContainer(container_name) as fb: fb.create() bl = BackupList(fb.conn, layout, False) found = list(bl.find_all('LATEST')) assert len(found) == 0
2,727
def lu_decompose(tri_diagonal): """Decompose a tri-diagonal matrix into LU form. Parameters ---------- tri_diagonal : TriDiagonal Represents the matrix to decompose. """ # WHR Appendix B: perform LU decomposition # # d[0] = hd[0] # b[i] = hu[i] # # Iterative algorithm: # d[i] = hd[i] - hu[i-1] a[i-1] # a[i] = hl[i] / d[i] hd, hu, hl = tri_diagonal b = hu # We want to vectorize the calculation of d and a as much as possible, # instead of using WHR's iterative algorithm directly. # # Substitute a[i-1] into the expression for d[i] to get a recurrence # relation for d: # # d[i] = hd[i] - hu[i-1] a[i-1] # = hd[i] - hu[i-1] * hl[i-1] / d[i-1] # # Let c[i] = hu[i-1] * hl[i-1]. # c[0] = 0, which is meaningless but convenient for the helper. # # d[i] = hd[i] - c[i] / d[i-1] c = np.empty_like(hd) c[0] = 0.0 np.multiply(hu, hl, c[1:]) np.negative(c, c) d = hd.copy() solve_lu_d(c, d) # a[i] = hl[i] / d[i] a = np.divide(hl, d[:-1]) return TriDiagonalLU(d, b, a)
2,728
def _card(item): """Handle card entries Returns: title (append " - Card" to the name, username (Card brand), password (card number), url (none), notes (including all card info) """ notes = item.get('notes', "") or "" # Add card info to the notes notes = notes + ("\n".join([f"{i}: {j}" for i, j in item.get('card', "").items()])) return f"{item['name']} - Card", \ item.get('card', {}).get('brand', '') or "", \ item.get('card', {}).get('number', "") or "", \ "", \ notes
2,729
def s3(): """Boto3 S3 resource.""" return S3().resource
2,730
def tile2(i): """ This function handles OSC user input in address "/amazon/arduino/norm/baldosa2", with 1 arguments: i. Use pl.OSC decorator to define handles like this. Multiple scripts can listen to the same address simultaneously. """ print "baldosa2" videoPlayers[1].play()
2,731
def SUE(xmean=None,ymean=None,xstdev=None,ystdev=None,rho=None, \ xskew=None,yskew=None,xmin=None,xmax=None,ymin=None,ymax=None, \ Npt=300,xisln=False,yisln=False): """ SKEWED UNCERTAINTY ELLIPSES (SUE) Function to plot uncertainty SUEs (or 1 sigma contour of a bivariate split-normal distribution). The parameters are the means (xmean,ymean), the standard deviations (xstdev,ystdev), the skewnesses (xskew,yskew) and the correlation coefficients (rho). The optional bounds (xmin,xmax,ymin,ymax) have the effect of truncating the SUEs in case there is a range of parameter space that is forbidden. It is important to notice that the xisln/yisln parameters are not related to the log settings of the axes where we plot the SUE, but are here to indicate that the moments of the variable to plot correspond to the natural logarithm (ln) of the variable we want to display. For instance, for displaying the ellipses of (x,y) where, for x, the moments are those of lnx, we would write: SUE(xmean=mean_of_lnx,ymean=mean_of_y,xstdev=stdev_of_lnx, \ ystdev=stdev_of_y,xskew=skewness_of_lnx,yskew=skewness_of_y, \ rho=correl_coeff_of_lnx_and_y,xisln=True) """ # Rotation angle theta = 1./2 * np.arctan( 2*rho*xstdev*ystdev / (xstdev**2-ystdev**2) ) # Numerically solve for taux and tauy (tau=1.D2 ==> skew=0.99) taugrid = ramp(N=10000,x0=1.E-2,x1=1.E2,log=True) Ax = np.sqrt(np.pi/2) \ * ( (np.cos(theta))**3*xskew*xstdev**3 \ + (np.sin(theta))**3*yskew*ystdev**3 ) \ / ( (np.sin(theta))**6 + (np.cos(theta))**6 ) \ * ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \ / ( (np.cos(theta))**2*xstdev**2 \ - (np.sin(theta))**2*ystdev**2 ) )**1.5 Ay = np.sqrt(np.pi/2) \ * ( (np.cos(theta))**3*yskew*ystdev**3 \ - (np.sin(theta))**3*xskew*xstdev**3 ) \ / ( (np.cos(theta))**6 + (np.sin(theta))**6 ) \ * ( ( (np.cos(theta))**2 - (np.sin(theta))**2 ) \ / ( (np.cos(theta))**2*ystdev**2 \ - (np.sin(theta))**2*xstdev**2 ) )**1.5 taux = np.exp(np.interp(Ax,Ctau(taugrid)/(Btau(taugrid))**1.5, \ np.log(taugrid))) tauy = np.exp(np.interp(Ay,Ctau(taugrid)/(Btau(taugrid))**1.5, \ np.log(taugrid))) if (not np.isfinite(taux) or taux > 1.E2): taux = 1.E2 if (not np.isfinite(tauy) or tauy > 1.E2): tauy = 1.E2 # Rest of the parameters lambdax = np.sqrt( ( (np.cos(theta))**2*xstdev**2 \ - (np.sin(theta))**2*ystdev**2 ) \ / ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(taux) ) lambday = np.sqrt( ( (np.cos(theta))**2*ystdev**2 \ - (np.sin(theta))**2*xstdev**2 ) \ / ( (np.cos(theta))**2 - (np.sin(theta))**2 ) / Btau(tauy) ) x0 = xmean - np.sqrt(2/np.pi) * ( np.cos(theta)*lambdax*(taux-1) \ - np.sin(theta)*lambday*(tauy-1) ) y0 = ymean - np.sqrt(2/np.pi) * ( np.sin(theta)*lambdax*(taux-1) \ + np.cos(theta)*lambday*(tauy-1) ) # Draw the SUE matrot = np.array([ [ np.cos(theta), -np.sin(theta) ], \ [ np.sin(theta), np.cos(theta) ] ]) xell_ax1 = np.zeros(2) yell_ax1 = np.zeros(2) xell_ax2 = np.zeros(2) yell_ax2 = np.zeros(2) for k in np.arange(4): if (k == 0): xell_sub = ramp(N=Npt,x0=-lambdax,x1=0) + x0 rx = 1-(xell_sub-x0)**2/lambdax**2 yell_sub = np.zeros(Npt) yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0 yell_sub[rx < 0] = np.nan elif (k == 1): xell_sub = ramp(N=Npt,x0=0,x1=lambdax*taux) + x0 rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2 yell_sub = np.zeros(Npt) yell_sub[rx >= 0] = -lambday * np.sqrt(rx[rx >= 0]) + y0 yell_sub[rx < 0] = np.nan elif (k == 2): xell_sub = (ramp(N=Npt,x0=0,x1=lambdax*taux))[::-1] + x0 rx = 1-(xell_sub-x0)**2/lambdax**2/taux**2 yell_sub = np.zeros(Npt) yell_sub[rx >= 0] = lambday*tauy * np.sqrt(rx[rx >= 0]) + y0 yell_sub[rx < 0] = np.nan elif (k == 3): xell_sub = (ramp(N=Npt,x0=-lambdax,x1=0))[::-1] + x0 rx = 1-(xell_sub-x0)**2/lambdax**2 yell_sub = np.zeros(Npt) yell_sub[rx >= 0] = lambday*tauy * np.sqrt(rx[rx >= 0]) + y0 yell_sub[rx < 0] = np.nan # Add the limit case (half ellipse) mask = np.logical_and(np.isfinite(yell_sub),np.isfinite(xell_sub)) xell_sub = xell_sub[mask] yell_sub = yell_sub[mask] Nsub = np.count_nonzero(mask) # Rotate the ellipse for j in np.arange(Nsub): vecell = np.matmul(matrot, \ np.array([xell_sub[j]-x0,yell_sub[j]-y0])) xell_sub[j] = vecell[0] + x0 yell_sub[j] = vecell[1] + y0 if (k == 0): xell = xell_sub yell = yell_sub else: xell = np.concatenate((xell,xell_sub)) yell = np.concatenate((yell,yell_sub)) xplot = np.concatenate((xell,[xell[0]])) yplot = np.concatenate((yell,[yell[0]])) # Logs and limits if (xisln): xplot = np.exp(xplot) x0 = np.exp(x0) if (yisln): yplot = np.exp(yplot) y0 = np.exp(y0) if (xmin != None): xplot[xplot < xmin] = xmin if (x0 < xmin): x0 = xmin if (xmax != None): xplot[xplot > xmax] = xmax if (x0 > xmax): x0 = xmax if (ymin != None): yplot[yplot < ymin] = ymin if (y0 < ymin): y0 = ymin if (ymax != None): yplot[yplot > ymax] = ymax if (y0 > ymax): y0 = ymax return(xplot,yplot,x0,y0)
2,732
def create_label(places, size, corners, resolution=0.50, x=(0, 90), y=(-50, 50), z=(-4.5, 5.5), scale=4, min_value=np.array([0., -50., -4.5])): """Create training Labels which satisfy the range of experiment""" x_logical = np.logical_and((places[:, 0] < x[1]), (places[:, 0] >= x[0])) y_logical = np.logical_and((places[:, 1] < y[1]), (places[:, 1] >= y[0])) z_logical = np.logical_and((places[:, 2] + size[:, 0]/2. < z[1]), (places[:, 2] + size[:, 0]/2. >= z[0])) xyz_logical = np.logical_and(x_logical, np.logical_and(y_logical, z_logical)) center = places.copy() center[:, 2] = center[:, 2] + size[:, 0] / 2. # Move bottom to center sphere_center = ((center[xyz_logical] - min_value) / (resolution * scale)).astype(np.int32) train_corners = corners[xyz_logical].copy() anchor_center = sphere_to_center(sphere_center, resolution=resolution, scale=scale, min_value=min_value) #sphere to center for index, (corner, center) in enumerate(zip(corners[xyz_logical], anchor_center)): train_corners[index] = corner - center return sphere_center, train_corners
2,733
def usage(): """Serve the usage page.""" return render_template("meta/access.html")
2,734
def GetCache(name, create=False): """Returns the cache given a cache indentfier name. Args: name: The cache name to operate on. May be prefixed by "resource://" for resource cache names or "file://" for persistent file cache names. If only the prefix is specified then the default cache name for that prefix is used. create: Creates the persistent cache if it exists if True. Raises: CacheNotFound: If the cache does not exist. Returns: The cache object. """ types = { 'file': file_cache.Cache, 'resource': resource_cache.ResourceCache, } def _OpenCache(cache_class, name): try: return cache_class(name, create=create) except cache_exceptions.Error as e: raise Error(e) if name: for cache_id, cache_class in types.iteritems(): if name.startswith(cache_id + '://'): name = name[len(cache_id) + 3:] if not name: name = None return _OpenCache(cache_class, name) return _OpenCache(resource_cache.Cache, name)
2,735
def rf_local_divide_int(tile_col, scalar): """Divide a Tile by an integral scalar""" return _apply_scalar_to_tile('rf_local_divide_int', tile_col, scalar)
2,736
def or_(*children: Any) -> Dict[str, Any]: """Select devices that match at least one of the given selectors. >>> or_(tag('sports'), tag('business')) {'or': [{'tag': 'sports'}, {'tag': 'business'}]} """ return {"or": [child for child in children]}
2,737
def get_width_and_height_from_size(x): """ Obtains width and height from a int or tuple """ if isinstance(x, int): return x, x if isinstance(x, list) or isinstance(x, tuple): return x else: raise TypeError()
2,738
def remove_stopwords(lista,stopwords): """Function to remove stopwords Args: lista ([list]): list of texts stopwords ([list]): [description] Returns: [list]: List of texts without stopwords """ lista_out = list() for idx, text in enumerate(lista): text = ' '.join([word for word in text.split() if word not in stopwords]) text = text.strip() lista_out.append(text) #print("Len original: {} - Len processed stopwords: {}".format(len(lista),len(lista_out))) return lista_out
2,739
def Skeletonize3D(directory, crop=None, flip='y', dtype=None): """Skeletonize TrailMap results. Parameters ---------- directory : string Path to directory with segmented data. crop : dict (optional, default None) Dictionary with ImageJ-format cropping coordinates ({width:, height:, x:, y:,}) flip : string (optional, default 'y') Option to flip axis, can be any combination of 'xyz'. dtype : numpy dtype (optional, default None results in float32 images) Data type for output image. Set dtype=np.uint16 if you are going to combine with autofluo in Imaris. """ #Load Data: sample = directory.split('/')[-3] print("Started " + time.ctime()) ims = io.ImageCollection(os.path.join(directory, '*.tif'), load_func=io.imread) data = ims.concatenate() #Optionally crop: if crop: rawshape=data.shape data = data[:,crop['y']:crop['y']+crop['height'],crop['x']:crop['x']+crop['width']] print("Cropped data from " + str(rawshape) + " to " + str(data.shape) + " at " + time.ctime()) cat = np.zeros(shape=(data.shape), dtype='float32') #Create output array #Loop through thresholds 0.2 -> 0.9, extract signal, scale, and combine for i in range(2,10,1): print(str(i) + " started at " + time.ctime()) i=i/10 im = (data>i).astype('float32') skel = morphology.skeletonize_3d(im).astype('float32')*i print(str(i) + " completed at " + time.ctime()) cat = cat+skel #Optionally flip along the x, y, or z axis: if flip: if 'y' in flip: cat = np.flip(cat, axis=1) if 'x' in flip: cat = np.flip(cat, axis=2) if 'z' in flip: cat = np.flip(cat, axis=0) if dtype: cat = cat.astype(dtype) #have not tested that this results in same pixel values as changing image type in ImageJ. #Save the result image stack: try: io.imsave(os.path.join(directory, sample + '_ThresholdedSkeleton3D.tif'), cat, check_contrast=False) except PermissionError: print("You do not have write permissions for " + str(directory) + '\n' + "Saving to your home directory instead.") homedir = os.path.expanduser('~/') io.imsave(os.path.join(homedir, sample + '_ThresholdedSkeleton3D.tif'), cat, check_contrast=False) print("Finished " + sample + ' ' + time.ctime()) return cat
2,740
def run_smeagle(package, version, path, libname): """ Run smeagle for a library of interest """ print("Testing %s with smeagle" % libname) out_dir = "/results/{{ tester.name }}/{{ tester.version }}/%s/%s" % (package, version) lib = os.path.join(path, libname) libdir = os.path.dirname(libname) # The path to the lib might have an extra directory result_dir = os.path.join(out_dir, libdir) if not os.path.exists(result_dir): os.makedirs(result_dir) # Smeagle will generate yaml by default, also generate asp run("time -p Smeagle -l %s > %s/%s.json" % (lib, out_dir, libname))
2,741
async def test_PilotBuilder_speed(correct_bulb: wizlight) -> None: """Test speed.""" await correct_bulb.turn_on(PilotBuilder(scene=1, speed=50)) state = await correct_bulb.updateState() assert state and state.get_scene() == SCENES[1] assert state and state.get_speed() == 50
2,742
def get_users_report(valid_users, ibmcloud_account_users): """get_users_report()""" users_report = [] valid_account_users = [] invalid_account_users = [] # use case 1: find users in account not in valid_users for account_user in ibmcloud_account_users: # check if account user is in valid_users is_valid_user=False for valid_user in valid_users: if ( account_user["email"] == valid_user["email"] ): account_user["name"] = valid_user["name"] account_user["identities"] = valid_user["identities"] if "resourceGroups" in valid_user: account_user["resourceGroups"] = valid_user["resourceGroups"] account_user["manager"] = valid_user["manager"] account_user["association"] = valid_user["association"] is_valid_user=True if is_valid_user: valid_account_users.append(account_user) else: invalid_account_users.append(account_user) users_report = { "valid_account_users" : valid_account_users, "invalid_account_users" : invalid_account_users } return users_report
2,743
def insert_bn(names): """Insert bn layer after each conv. Args: names (list): The list of layer names. Returns: list: The list of layer names with bn layers. """ names_bn = [] for name in names: names_bn.append(name) if 'conv' in name: position = name.replace('conv', '') names_bn.append(f'bn{position}') return names_bn
2,744
def encrypt(KeyId=None, Plaintext=None, EncryptionContext=None, GrantTokens=None, EncryptionAlgorithm=None): """ Encrypts plaintext into ciphertext by using a customer master key (CMK). The Encrypt operation has two primary use cases: You don\'t need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key. When you encrypt data, you must specify a symmetric or asymmetric CMK to use in the encryption operation. The CMK must have a Key value of ENCRYPT_DECRYPT. To find the Key of a CMK, use the DescribeKey operation. If you use a symmetric CMK, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException . For more information, see Encryption Context in the AWS Key Management Service Developer Guide . If you specify an asymmetric CMK, you must also specify the encryption algorithm. The algorithm must be compatible with the CMK type. The maximum size of the data that you can encrypt varies with the type of CMK and the encryption algorithm that you choose. The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide . To perform this operation on a CMK in a different AWS account, specify the key ARN or alias ARN in the value of the KeyId parameter. See also: AWS API Documentation Exceptions Examples The following example encrypts data with the specified customer master key (CMK). Expected Output: :example: response = client.encrypt( KeyId='string', Plaintext=b'bytes', EncryptionContext={ 'string': 'string' }, GrantTokens=[ 'string', ], EncryptionAlgorithm='SYMMETRIC_DEFAULT'|'RSAES_OAEP_SHA_1'|'RSAES_OAEP_SHA_256' ) :type KeyId: string :param KeyId: [REQUIRED]\nA unique identifier for the customer master key (CMK).\nTo specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with 'alias/' . To specify a CMK in a different AWS account, you must use the key ARN or alias ARN.\nFor example:\n\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab\nKey ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\nAlias name: alias/ExampleAlias\nAlias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias\n\nTo get the key ID and key ARN for a CMK, use ListKeys or DescribeKey . To get the alias name and alias ARN, use ListAliases .\n :type Plaintext: bytes :param Plaintext: [REQUIRED]\nData to be encrypted.\n :type EncryptionContext: dict :param EncryptionContext: Specifies the encryption context that will be used to encrypt the data. An encryption context is valid only for cryptographic operations with a symmetric CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not support an encryption context.\nAn encryption context is a collection of non-secret key-value pairs that represents additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is optional when encrypting with a symmetric CMK, but it is highly recommended.\nFor more information, see Encryption Context in the AWS Key Management Service Developer Guide .\n\n(string) --\n(string) --\n\n\n\n :type GrantTokens: list :param GrantTokens: A list of grant tokens.\nFor more information, see Grant Tokens in the AWS Key Management Service Developer Guide .\n\n(string) --\n\n :type EncryptionAlgorithm: string :param EncryptionAlgorithm: Specifies the encryption algorithm that AWS KMS will use to encrypt the plaintext message. The algorithm must be compatible with the CMK that you specify.\nThis parameter is required only for asymmetric CMKs. The default value, SYMMETRIC_DEFAULT , is the algorithm used for symmetric CMKs. If you are using an asymmetric CMK, we recommend RSAES_OAEP_SHA_256.\n :rtype: dict ReturnsResponse Syntax { 'CiphertextBlob': b'bytes', 'KeyId': 'string', 'EncryptionAlgorithm': 'SYMMETRIC_DEFAULT'|'RSAES_OAEP_SHA_1'|'RSAES_OAEP_SHA_256' } Response Structure (dict) -- CiphertextBlob (bytes) -- The encrypted plaintext. When you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. KeyId (string) -- The ID of the key used during encryption. EncryptionAlgorithm (string) -- The encryption algorithm that was used to encrypt the plaintext. Exceptions KMS.Client.exceptions.NotFoundException KMS.Client.exceptions.DisabledException KMS.Client.exceptions.KeyUnavailableException KMS.Client.exceptions.DependencyTimeoutException KMS.Client.exceptions.InvalidKeyUsageException KMS.Client.exceptions.InvalidGrantTokenException KMS.Client.exceptions.KMSInternalException KMS.Client.exceptions.KMSInvalidStateException Examples The following example encrypts data with the specified customer master key (CMK). response = client.encrypt( # The identifier of the CMK to use for encryption. You can use the key ID or Amazon Resource Name (ARN) of the CMK, or the name or ARN of an alias that refers to the CMK. KeyId='1234abcd-12ab-34cd-56ef-1234567890ab', # The data to encrypt. Plaintext='<binary data>', ) print(response) Expected Output: { # The encrypted data (ciphertext). 'CiphertextBlob': '<binary data>', # The ARN of the CMK that was used to encrypt the data. 'KeyId': 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab', 'ResponseMetadata': { '...': '...', }, } :return: { 'CiphertextBlob': b'bytes', 'KeyId': 'string', 'EncryptionAlgorithm': 'SYMMETRIC_DEFAULT'|'RSAES_OAEP_SHA_1'|'RSAES_OAEP_SHA_256' } :returns: Symmetric CMKs SYMMETRIC_DEFAULT : 4096 bytes RSA_2048 RSAES_OAEP_SHA_1 : 214 bytes RSAES_OAEP_SHA_256 : 190 bytes RSA_3072 RSAES_OAEP_SHA_1 : 342 bytes RSAES_OAEP_SHA_256 : 318 bytes RSA_4096 RSAES_OAEP_SHA_1 : 470 bytes RSAES_OAEP_SHA_256 : 446 bytes """ pass
2,745
def convert_format(parameters): """Converts dictionary database type format to serial transmission format""" values = parameters.copy() for key, (index, format, value) in values.items(): if type(format) == type(db.Int): values[key] = (index, 'i', value) # signed 32 bit int (arduino long) elif type(format) == type(db.Int16): values[key] = (index, 'h', value) elif type(format) == type(db.Float): values[key] = (index, 'f', value) elif type(format) == type(db.String32): values[key] = (index, 's', value) elif type(format) == type(db.StringN): values[key] = (index, 's', value) elif type(format) == type(db.Time): values[key] = (index, 'd', value) return values
2,746
def tester_pr3(): """show calpost raster in different projection""" from plotter import calpost_reader as reader import rasterio import cartopy.crs as ccrs with open('../data/tseries_ch4_1min_conc_co_fl.dat') as f: dat = reader.Reader(f, slice(60 * 12, 60 * 12 + 10)) # background b = rasterio.open(bgfile) bext = [b.transform[2], b.transform[2] + b.transform[0] * b.width, b.transform[5] + b.transform[4] * b.height, b.transform[5]] plotter_options = {'extent': bext, 'projection': ccrs.epsg(3857), 'imshow_options': {'origin': 'lower', } } x = dat['x'] * 1000 y = dat['y'] * 1000 p = Plotter(dat['v'], dat['ts'], x=x, y=y, plotter_options=plotter_options) p(outdir / 'test_pr3.png')
2,747
async def test_addon_options_changed( hass, client, addon_installed, addon_running, install_addon, addon_options, start_addon, old_device, new_device, old_s0_legacy_key, new_s0_legacy_key, old_s2_access_control_key, new_s2_access_control_key, old_s2_authenticated_key, new_s2_authenticated_key, old_s2_unauthenticated_key, new_s2_unauthenticated_key, ): """Test update config entry data on entry setup if add-on options changed.""" addon_options["device"] = new_device addon_options["s0_legacy_key"] = new_s0_legacy_key addon_options["s2_access_control_key"] = new_s2_access_control_key addon_options["s2_authenticated_key"] = new_s2_authenticated_key addon_options["s2_unauthenticated_key"] = new_s2_unauthenticated_key entry = MockConfigEntry( domain=DOMAIN, title="Z-Wave JS", data={ "url": "ws://host1:3001", "use_addon": True, "usb_path": old_device, "s0_legacy_key": old_s0_legacy_key, "s2_access_control_key": old_s2_access_control_key, "s2_authenticated_key": old_s2_authenticated_key, "s2_unauthenticated_key": old_s2_unauthenticated_key, }, ) entry.add_to_hass(hass) await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() assert entry.state == ConfigEntryState.LOADED assert entry.data["usb_path"] == new_device assert entry.data["s0_legacy_key"] == new_s0_legacy_key assert entry.data["s2_access_control_key"] == new_s2_access_control_key assert entry.data["s2_authenticated_key"] == new_s2_authenticated_key assert entry.data["s2_unauthenticated_key"] == new_s2_unauthenticated_key assert install_addon.call_count == 0 assert start_addon.call_count == 0
2,748
def example_load_and_plot(filename=None, summary_dir=os.path.join(REPO, '..', 'summary')): """Example demonstrating loading an HDF file into Pandas and plotting.""" import matplotlib.pyplot as plt import pandas as pd if filename is None: filebase = 'merged_tract_4849_1,1_1,2.hdf5' filename = os.path.join(summary_dir, filebase) df = pd.read_hdf(filename) plt.hist2d(df['g_mag']-df['r_mag'], df['r_mag']-df['i_mag'], range=((-1, +2), (-1, +2)), bins=40) plt.colorbar() plt.xlabel('r-i') plt.ylabel('g-r') plt.show()
2,749
def prepare(_config): """ Preparation of the train and validation datasets for the training and initialization of the padertorch trainer, using the configuration dict. Args: _config: Configuration dict of the experiment Returns: 3-Tuple of the prepared datasets and the trainer. trainer: padertorch trainer train_dataset: training_dataset validate_dataset: dataset for validation """ # Extraction needed strings from the config dict train_dataset_name = _config['train_dataset_name'] validate_dataset_name = _config['validate_dataset_name'] database_json = _config['database_json'] # Initialization of the trainer trainer = pt.Trainer.from_config(_config["trainer"]) db = JsonDatabase(json_path=database_json) # Preparation of the datasets train_dataset = prepare_dataset(db, train_dataset_name, _config['batch_size'], prefetch = not _config['debug']) validate_dataset = prepare_dataset(db, validate_dataset_name, _config['batch_size'], prefetch = not _config['debug']) # Print the representations of the two datasets to the console. print(repr(train_dataset_name), repr(validate_dataset_name)) return (trainer, train_dataset, validate_dataset)
2,750
def getproj4(epsg): """ Get projection file (.prj) text for given epsg code from spatialreference.org. See: https://www.epsg-registry.org/ .. deprecated:: 3.2.11 This function will be removed in version 3.3.5. Use :py:class:`flopy.discretization.structuredgrid.StructuredGrid` instead. Parameters ---------- epsg : int epsg code for coordinate system Returns ------- prj : str text for a projection (*.prj) file. """ warnings.warn( "SpatialReference has been deprecated and will be removed in version " "3.3.5. Use StructuredGrid instead.", category=DeprecationWarning, ) return get_spatialreference(epsg, text="proj4")
2,751
def vpn_tunnel_inside_cidr(cidr): """ Property: VpnTunnelOptionsSpecification.TunnelInsideCidr """ reserved_cidrs = [ "169.254.0.0/30", "169.254.1.0/30", "169.254.2.0/30", "169.254.3.0/30", "169.254.4.0/30", "169.254.5.0/30", "169.254.169.252/30", ] cidr_match_re = compile( r"^169\.254\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)" r"\.(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\/30$" ) if cidr in reserved_cidrs: raise ValueError( 'The following CIDR blocks are reserved and cannot be used: "%s"' % (", ".join(reserved_cidrs)) ) elif not cidr_match_re.match(cidr): raise ValueError( "%s is not a valid CIDR." " A size /30 CIDR block from the 169.254.0.0/16 must be specified." % cidr ) return cidr
2,752
def choose_media_type(accept, resource_types): """choose_media_type(accept, resource_types) -> resource type select a media type for the response accept is the Accept header from the request. If there is no Accept header, '*/*' is assumed. If the Accept header cannot be parsed, HTTP400BadRequest is raised. resource_types is an ordered list of available resource types, with the most desirable type first. To find a match, the types in the Accept header are ordered by q value (descending), and each is compared with the available resource types in order. The first matching media type is returned. If not match is found, HTTP406NotAcceptable is raised. """ # This function is exposed in the script dpf_choose_media_type, # so if changes are made here, that script's documentation # should be updated to reflect them. # list of (type, subtype, q) accept_types = [] for part in accept.split(','): part = part.strip() if ';' not in part: mt = part q = 1.0 else: (mt, q) = part.split(';', 1) mt = mt.strip() q = q.strip() if not q.startswith('q='): raise HTTP400BadRequest('text/plain', 'Bad Accept header.\n') try: q = float(q[2:]) except ValueError: raise HTTP400BadRequest('text/plain', 'Bad Accept header.\n') if '/' not in mt: raise HTTP400BadRequest('text/plain', 'Bad Accept header.\n') (type, subtype) = mt.split('/', 1) accept_types.append((type, subtype, q)) accept_types.sort(cmp_accept_type) accept_types.reverse() for (type, subtype, q) in accept_types: for available_type in resource_types: (a_type, a_subtype) = available_type.split('/', 1) if type != '*' and type != a_type: continue if subtype != '*' and subtype != a_subtype: continue return available_type raise HTTP406NotAcceptable()
2,753
def get_border_removal_size(image: Image, border_removal_percentage: float = .04, patch_width: int = 8): """ This function will compute the border removal size. When computing the boarder removal the patch size becomes important the output shape of the image will always be an even factor of the patch size. This allows the later computations to evenly fit the image. :param image: input image to get the dimentions :param border_removal_percentage: how much of the boarder to remove :param patch_width: the width size of the patches in pixels. :return: how many pixes to be removed around the boarder """ w, h = image.size return int(math.ceil(w * border_removal_percentage / patch_width)) * patch_width
2,754
def sample(n, ds, model): """ Sample the potential of the given data source n times. """ i = 0 while i < n: random = np.random.rand(2) mock_params = [DataValue(value=random[0], type="float"), DataValue(value=random[1], type="float")] ds.run(model, mock_params) i += 1
2,755
def get_natural_num(msg): """ Get a valid natural number from the user! :param msg: message asking for a natural number :return: a positive integer converted from the user enter. """ valid_enter = False while not valid_enter: given_number = input(msg).strip() if given_number.isdigit(): num = int(given_number) valid_enter = True return num
2,756
def _decode_panoptic_or_depth_map(map_path: str) -> Optional[str]: """Decodes the panoptic or depth map from encoded image file. Args: map_path: Path to the panoptic or depth map image file. Returns: Panoptic or depth map as an encoded int32 numpy array bytes or None if not existing. """ if not tf.io.gfile.exists(map_path): return None with tf.io.gfile.GFile(map_path, 'rb') as f: decoded_map = np.array(Image.open(f)).astype(np.int32) if FLAGS.panoptic_divisor > 0 and map_path.endswith(_LABEL_SUFFIX): semantic_map = decoded_map[:, :, 0] instance_map = ( decoded_map[:, :, 1] * _ENCODED_INSTANCE_LABEL_DIVISOR + decoded_map[:, :, 2]) decoded_map = semantic_map * FLAGS.panoptic_divisor + instance_map return decoded_map.tobytes()
2,757
def splitstr(s, l=25): """ split string with max length < l "(i/n)" """ arr = [len(x) for x in s.split()] out = [] counter = 5 tmp_out = '' for i in xrange(len(arr)): if counter + arr[i] > l: out.append(tmp_out) tmp_out = '' counter = 5 else: tmp_out += s.split()[i] + ' ' counter = len(tmp_out) + 5 return out
2,758
def SplitRequirementSpecifier(requirement_specifier): """Splits the package name from the other components of a requirement spec. Only supports PEP 508 `name_req` requirement specifiers. Does not support requirement specifiers containing environment markers. Args: requirement_specifier: str, a PEP 508 requirement specifier that does not contain an environment marker. Returns: (string, string), a 2-tuple of the extracted package name and the tail of the requirement specifier which could contain extras and/or a version specifier. Raises: Error: No package name was found in the requirement spec. """ package = requirement_specifier.strip() tail_start_regex = r'(\[|\(|==|>=|!=|<=|<|>|~=|===)' tail_match = re.search(tail_start_regex, requirement_specifier) tail = '' if tail_match: package = requirement_specifier[:tail_match.start()].strip() tail = requirement_specifier[tail_match.start():].strip() if not package: raise Error(r'Missing package name in requirement specifier: \'{}\''.format( requirement_specifier)) return package, tail
2,759
def arctanh(var): """ Wrapper function for atanh """ return atanh(var)
2,760
def plot_roc_per_class(model, test_data, test_truth, labels, title, batch_size=32, prefix='./figures/'): """Plot a per class ROC curve. Arguments: model: The model whose predictions to evaluate. test_data: Input testing data in the shape the model expects. test_truth: The true labels of the testing data labels: dict specifying the class labels. title: the title to display on the plot. batch_size: Size of batches for prediction over the test data. prefix: path specifying where to save the plot. """ fpr, tpr, roc_auc = get_fpr_tpr_roc(model, test_data, test_truth, labels, batch_size) lw = 3 plt.figure(figsize=(28,22)) matplotlib.rcParams.update({'font.size': 34}) for key in labels.keys(): if key in key_colors: color = key_colors[key] else: color = np.random.choice(color_array) plt.plot(fpr[labels[key]], tpr[labels[key]], color=color, lw=lw, label=str(key)+' area under ROC: %0.3f'%roc_auc[labels[key]]) plt.plot([0, 1], [0, 1], 'k:', lw=0.5) plt.xlim([0.0, 1.0]) plt.ylim([-0.02, 1.03]) plt.xlabel(fallout_label) plt.ylabel(recall_label) plt.title('ROC:'+ title + '\n') matplotlib.rcParams.update({'font.size': 56}) plt.legend(loc="lower right") figure_path = prefix+"per_class_roc_"+title+image_ext if not os.path.exists(os.path.dirname(figure_path)): os.makedirs(os.path.dirname(figure_path)) plt.savefig(figure_path) print('Saved figure at:', figure_path)
2,761
def predict_sentiment(txt: str, direc: str = 'models/sentiment/saved_models/model50') -> float: """ predicts sentiment of string only use for testing not good for large data because model is loaded each time input is a txt string optional directory change for using different models returns a value from -1 to 1 Aproaching -1 being a negative sentiment Aproaching 1 being a positive sentiment """ vals = spacy.load(direc)(txt).cats return vals["pos"] if vals["pos"]>vals["neg"] else -1*vals["neg"]
2,762
def logfbank(signal, samplerate=16000, winlen=0.025, winstep=0.01, nfilt=26, nfft=512, lowfreq=0, highfreq=None, preemph=0.97, winfunc=lambda x: numpy.ones((x,))): """Compute log Mel-filterbank energy features from an audio signal. :param signal: the audio signal from which to compute features. Should be an N*1 array :param samplerate: the samplerate of the signal we are working with. :param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds) :param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds) :param nfilt: the number of filters in the filterbank, default 26. :param nfft: the FFT size. Default is 512. :param lowfreq: lowest band edge of mel filters. In Hz, default is 0. :param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2 :param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97. :param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming :returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. """ feat, energy = fbank(signal, samplerate, winlen, winstep, nfilt, nfft, lowfreq, highfreq, preemph, winfunc) return numpy.log(feat)
2,763
def write_logging_statement(): """Writes logging statements""" time.sleep(1) logger.debug('A debug statement') time.sleep(1) logger.info('An info statement') time.sleep(1) logger.warning('A warning statement') time.sleep(1) logger.critical('A critical warning statement') time.sleep(1) logger.error('An error statement\n') time.sleep(3)
2,764
def UpdateBufferStyles(sheet): """Update the style used in all buffers @param sheet: Style sheet to use """ # Only update if the sheet has changed if sheet is None or sheet == Profile_Get('SYNTHEME'): return Profile_Set('SYNTHEME', sheet) for mainw in wx.GetApp().GetMainWindows(): mainw.nb.UpdateTextControls('UpdateAllStyles') mainw.SetStatusText(_("Changed color scheme to %s") % \ sheet, ed_glob.SB_INFO)
2,765
def get_rmsd( pose, second_pose, overhang = 0): """ Get RMSD assuming they are both the same length! """ #id_map = get_mask_for_alignment(pose, second_pose, cdr, overhang) #rms = rms_at_corresponding_atoms_no_super(pose, second_pose, id_map) start = 1 + overhang end = pose.total_residue() - overhang l = Loop(start, end) loops = Loops() loops.push_back(l) rms = loop_rmsd(pose, second_pose, loops, False, True) return rms
2,766
def inverse(a: int, n: int): """ calc the inverse of a in the case of module n, where a and n must be mutually prime. a * x = 1 (mod n) :param a: (int) :param n: (int) :return: (int) x """ assert greatest_common_divisor(a, n) == 1 return greatest_common_divisor_with_coefficient(a, n)[1] % n
2,767
def create_app(): """Create the Flask application.""" return app
2,768
def generate_prime_candidate(length): """ Genera un integer impar aleatorimanete param size: tamanio del numero deseado return:integer """ p = big_int(length) p |= (1 << length - 1) | 1 return p
2,769
def test_read_record_member(memberAPI): """Test reading records.""" token = memberAPI.get_token(USER_EMAIL, USER_PASSWORD, REDIRECT_URL, '/read-limited') activities = memberAPI.read_record_member(USER_ORCID, 'activities', token) first_work = activities['works']['group'][0]['work-summary'][0] assert first_work['title'][ 'title']['value'] == WORK_NAME put_code = first_work['put-code'] work = memberAPI.read_record_member(USER_ORCID, 'work', token, put_code) assert work['type'] == u'JOURNAL_ARTICLE'
2,770
def test_find_with_one_cond(mock_devices, generate_data): """ test find method with one cond :param cond: condition to filter devices. like : where("sdk")==19 the details syntax See more at: http:// :type cond: where :return: len of device """ mock_devices.return_value = generate_data s = Selector() s.load() cond = where("sdk") == '19' s = s.find(cond=cond) assert s.count() == 2
2,771
def PathPrefix(vm): """Determines the prefix for a sysbench command based on the operating system. Args: vm: VM on which the sysbench command will be executed. Returns: A string representing the sysbench command prefix. """ if vm.OS_TYPE == os_types.RHEL: return INSTALL_DIR else: return '/usr/'
2,772
def _find_form_xobject_images(pdf, container, contentsinfo): """Find any images that are in Form XObjects in the container The container may be a page, or a parent Form XObject. """ if '/Resources' not in container: return resources = container['/Resources'] if '/XObject' not in resources: return for xobj in resources['/XObject']: candidate = resources['/XObject'][xobj] if candidate['/Subtype'] != '/Form': continue form_xobject = candidate for settings in contentsinfo.xobject_settings: if settings.name != xobj: continue # Find images once for each time this Form XObject is drawn. # This could be optimized to cache the multiple drawing events # but in practice both Form XObjects and multiple drawing of the # same object are both very rare. ctm_shorthand = settings.shorthand yield from _find_images(pdf, form_xobject, ctm_shorthand)
2,773
def get_next_valid_seq_number( address: str, client: SyncClient, ledger_index: Union[str, int] = "current" ) -> int: """ Query the ledger for the next available sequence number for an account. Args: address: the account to query. client: the network client used to make network calls. ledger_index: The ledger index to use for the request. Must be an integer ledger value or "current" (the current working version), "closed" (for the closed-and-proposed version), or "validated" (the most recent version validated by consensus). The default is "current". Returns: The next valid sequence number for the address. """ return asyncio.run(main.get_next_valid_seq_number(address, client, ledger_index))
2,774
def apps_list(api_filter, partial_name, **kwargs): """List all defined applications. If you give an optional command line argument, the apps are filtered by name using this string.""" params = {} if api_filter: params = {"filter": api_filter} rv = okta_manager.call_okta("/apps", REST.get, params=params) # now filter by name, if given if partial_name: matcher = re.compile(partial_name) rv = list(filter(lambda x: matcher.search(x["name"]), rv)) return rv
2,775
def jacquez(s_coords, t_coords, k, permutations=99): """ Jacquez k nearest neighbors test for spatio-temporal interaction. :cite:`Jacquez:1996` Parameters ---------- s_coords : array (n, 2), spatial coordinates. t_coords : array (n, 1), temporal coordinates. k : int the number of nearest neighbors to be searched. permutations : int, optional the number of permutations used to establish pseudo- significance (the default is 99). Returns ------- jacquez_result : dictionary contains the statistic (stat) for the test and the associated p-value (pvalue). stat : float value of the Jacquez k nearest neighbors test for the dataset. pvalue : float p-value associated with the statistic (normally distributed with k-1 df). Examples -------- >>> import numpy as np >>> import libpysal as lps >>> from pointpats import SpaceTimeEvents, jacquez Read in the example data and create an instance of SpaceTimeEvents. >>> path = lps.examples.get_path("burkitt.shp") >>> events = SpaceTimeEvents(path,'T') The Jacquez test counts the number of events that are k nearest neighbors in both time and space. The following runs the Jacquez test on the example data and reports the resulting statistic. In this case, there are 13 instances where events are nearest neighbors in both space and time. # turning off as kdtree changes from scipy < 0.12 return 13 >>> np.random.seed(100) >>> result = jacquez(events.space, events.t ,k=3,permutations=99) >>> print(result['stat']) 13 The significance of this can be assessed by calling the p- value from the results dictionary, as shown below. Again, no space-time interaction is observed. >>> result['pvalue'] < 0.01 False """ time = t_coords space = s_coords n = len(time) # calculate the nearest neighbors in space and time separately knnt = lps.weights.KNN.from_array(time, k) knns = lps.weights.KNN.from_array(space, k) nnt = knnt.neighbors nns = knns.neighbors knn_sum = 0 # determine which events are nearest neighbors in both space and time for i in range(n): t_neighbors = nnt[i] s_neighbors = nns[i] check = set(t_neighbors) inter = check.intersection(s_neighbors) count = len(inter) knn_sum += count stat = knn_sum # return the results (if no inference) if not permutations: return stat # loop for generating a random distribution to assess significance dist = [] for p in range(permutations): j = 0 trand = np.random.permutation(time) knnt = lps.weights.KNN.from_array(trand, k) nnt = knnt.neighbors for i in range(n): t_neighbors = nnt[i] s_neighbors = nns[i] check = set(t_neighbors) inter = check.intersection(s_neighbors) count = len(inter) j += count dist.append(j) # establish the pseudo significance of the observed statistic distribution = np.array(dist) greater = np.ma.masked_greater_equal(distribution, stat) count = np.ma.count_masked(greater) pvalue = (count + 1.0) / (permutations + 1.0) # report the results jacquez_result = {'stat': stat, 'pvalue': pvalue} return jacquez_result
2,776
def array2tensor(array, device='auto'): """Convert ndarray to tensor on ['cpu', 'gpu', 'auto'] """ assert device in ['cpu', 'gpu', 'auto'], "Invalid device" if device != 'auto': return t.tensor(array).float().to(t.device(device)) if device == 'auto': return t.tensor(array).float().to(t.device('cuda' if t.cuda.is_available() else 'cpu'))
2,777
def to_json(graph): """Convert this graph to a Node-Link JSON object. :param BELGraph graph: A BEL graph :return: A Node-Link JSON object representing the given graph :rtype: dict """ graph_json_dict = node_link_data(graph) # Convert annotation list definitions (which are sets) to canonicalized/sorted lists graph_json_dict['graph'][GRAPH_ANNOTATION_LIST] = { keyword: list(sorted(values)) for keyword, values in graph_json_dict['graph'][GRAPH_ANNOTATION_LIST].items() } # Convert set to list graph_json_dict['graph'][GRAPH_UNCACHED_NAMESPACES] = list(graph_json_dict['graph'][GRAPH_UNCACHED_NAMESPACES]) return graph_json_dict
2,778
def export(df: pd.DataFrame): """ From generated pandas dataframe to xml configuration :param df: computed pandas dataframe :return: """ return df
2,779
def has_same_attributes(link1, link2): """ Return True if the two links have the same attributes for our purposes, ie it is OK to merge them together into one link Parameters: link1 - Link object link2 - Link object Return value: True iff link1 and link2 have compatible attributes """ return (link1.linktype == link2.linktype and abs(link1.B - link2.B) < EPS and abs(link1.power - link2.power) < EPS and abs(link1.capacity - link2.capacity) < EPS)
2,780
def test_signals_creation(test_df, signal_algorithm): """Checks signal algorithms can create a signal in a Pandas dataframe.""" test_df_copy = test_df.copy() original_columns = test_df.columns # We check if the test series has the columns needed for the rule to calculate. required_columns = Api.required_inputs_for_algorithm(signal_algorithm) all_present = True for ii_requirement in required_columns: if ii_requirement not in original_columns: all_present = False # If columns are missing, we anticipate a KeyError will trigger. if not all_present: with pytest.raises(KeyError): Api.calculate_signal(test_df_copy, signal_algorithm) return True # Otherwise we expect to parse successfully. df_with_signal = Api.calculate_signal(test_df_copy, signal_algorithm) if not isinstance(df_with_signal, pd.DataFrame): print(df_with_signal) print("Type was: ", type(df_with_signal)) raise TypeError("Bad output format.") # Signal algorithms should be adding new columns with float, int or NaN data. new_columns = False for ii_column_name in df_with_signal.columns: if ii_column_name not in original_columns: new_columns = True for ii_value in df_with_signal[ii_column_name]: if not isinstance(ii_value, (float, int)): assert ii_value is "NaN" # At least one new column should have been added. Otherwise output is overriding input columns. if not new_columns: raise AssertionError( "No new columns were created by the signal function: ", df_with_signal.columns, " versus original of ", original_columns, )
2,781
def com_google_fonts_check_fontv(ttFont): """Check for font-v versioning.""" from fontv.libfv import FontVersion fv = FontVersion(ttFont) if fv.version and (fv.is_development or fv.is_release): yield PASS, "Font version string looks GREAT!" else: yield INFO,\ Message("bad-format", f'Version string is: "{fv.get_name_id5_version_string()}"\n' f'The version string must ideally include a git commit hash' f' and either a "dev" or a "release" suffix such as in the' f' example below:\n' f'"Version 1.3; git-0d08353-release"')
2,782
def test_qplad_integration_af_quantiles(): """ Test QPLAD correctly matches adjustmentfactor and quantiles for lat, dayofyear and for a specific quantile The strategy is to bias-correct a Dataset of ones, and then try to downscale it to two gridpoints with QPLAD. In one case we take the adjustment factors for a single dayofyear and manually change it to 0.0. Then check for the corresponding change in the output dataset. In the other case we take the adjustment factors for one of the two latitudes we're downscaling to and manually change it to 0.0. We then check for the corresponding change in the output dataset for that latitude. To check for a specific quantile, we choose a particular day of year with associated quantile from the bias corrected data, manually change the adjustment factor for that quantile and day of year, and check that the changed adjustment factor has been applied to the bias corrected day value. """ kind = "*" lat = [1.0, 1.5] time = xr.cftime_range(start="1994-12-17", end="2015-01-15", calendar="noleap") variable = "scen" data_ref = xr.DataArray( np.ones((len(time), len(lat)), dtype="float64"), coords={"time": time, "lat": lat}, attrs={"units": "K"}, dims=["time", "lat"], name=variable, ).chunk({"time": -1, "lat": -1}) data_train = data_ref + 2 data_train.attrs["units"] = "K" ref_fine = data_ref.to_dataset() ds_train = data_train.to_dataset() # take the mean across space to represent coarse reference data for AFs ds_ref_coarse = ref_fine.mean(["lat"], keep_attrs=True) ds_train = ds_train.mean(["lat"], keep_attrs=True) # tile the fine resolution grid with the coarse resolution ref data ref_coarse = ds_ref_coarse.broadcast_like(ref_fine) ds_bc = ds_train ds_bc[variable].attrs["units"] = "K" # this is an integration test between QDM and QPLAD, so use QDM services # for bias correction target_year = 2005 qdm_model = train_quantiledeltamapping( reference=ds_ref_coarse, historical=ds_train, variable=variable, kind=kind ) biascorrected_coarse = adjust_quantiledeltamapping( simulation=ds_bc, variable=variable, qdm=qdm_model.ds, years=[target_year], include_quantiles=True, ) # make bias corrected data on the fine resolution grid biascorrected_fine = biascorrected_coarse.broadcast_like( ref_fine.sel( time=slice("{}-01-01".format(target_year), "{}-12-31".format(target_year)) ) ) qplad_model = train_analogdownscaling( coarse_reference=ref_coarse, fine_reference=ref_fine, variable=variable, kind=kind, ) # TODO: These prob should be two separate tests with setup fixtures... spoiled_time = qplad_model.ds.copy(deep=True) spoiled_latitude = qplad_model.ds.copy(deep=True) spoiled_quantile = qplad_model.ds.copy(deep=True) # Spoil one dayoftheyear value in adjustment factors (force it to be 0.0) # and test that the spoiled value correctly propigates through to output. time_idx_to_spoil = 25 spoiled_time["af"][:, time_idx_to_spoil, :] = 0.0 qplad_model.ds = spoiled_time downscaled = adjust_analogdownscaling( simulation=biascorrected_fine.set_coords( ["sim_q"] ), # func assumes sim_q is coordinate... qplad=qplad_model, variable=variable, ) # All but two values should be 1.0... assert (downscaled[variable].values == 1.0).sum() == 728 # We should have 2 `0.0` entires. One in each lat... assert (downscaled[variable].values == 0.0).sum() == 2 # All our 0.0s should be in this dayofyear/time slice in output dataset. np.testing.assert_array_equal( downscaled[variable].values[time_idx_to_spoil, :], np.array([0.0, 0.0]) ) # Similar to above, spoil one lat value in adjustment factors # (force it to be 0.0) and test that the spoiled value correctly # propagates through to output. latitude_idx_to_spoil = 0 spoiled_latitude["af"][latitude_idx_to_spoil, ...] = 0.0 qplad_model.ds = spoiled_latitude downscaled = adjust_analogdownscaling( simulation=biascorrected_fine.set_coords( ["sim_q"] ), # func assumes sim_q is coordinate... qplad=qplad_model, variable=variable, ) # Half of values in output should be 1.0... assert (downscaled[variable].values == 1.0).sum() == 365 # The other half should be `0.0` due to the spoiled data... assert (downscaled[variable].values == 0.0).sum() == 365 # All our 0.0s should be in this single lat in output dataset. assert all(downscaled[variable].values[:, latitude_idx_to_spoil] == 0.0) # spoil one quantile in adjustment factors for one day of year # force it to be 200 and ensure that a bias corrected day with that # quantile gets the spoiled value after downscaling # pick a day of year doy = 100 # only do this for one lat pt lat_pt = 0 # get the quantile from the bias corrected data for this doy and latitude q_100 = biascorrected_fine.sim_q[doy, lat_pt].values # extract quantiles from afs to get the corresponding quantile index bc_quantiles = qplad_model.ds.af[0, 100, :].quantiles.values # get index of the af for that day q_idx = np.argmin(np.abs(q_100 - bc_quantiles)) # now spoil that doy quantile adjustment factor spoiled_quantile["af"][0, 100, q_idx] = 200 qplad_model.ds = spoiled_quantile downscaled = adjust_analogdownscaling( simulation=biascorrected_fine.set_coords( ["sim_q"] ), # func assumes sim_q is coordinate... qplad=qplad_model, variable=variable, ) # the 100th doy and corresponding quantile should be equal to the spoiled value assert np.max(downscaled[variable].values[:, lat_pt]) == 200 assert np.argmax(downscaled[variable].values[:, lat_pt]) == 100 # check that the adjustment factor did not get applied to any other days of the year assert (downscaled[variable].values[:, lat_pt]).sum() == 564
2,783
def get_prev_day(d): """ Returns the date of the previous day. """ curr = date(*map(int, d.split('-'))) prev = curr - timedelta(days=1) return str(prev)
2,784
def copy_function(old_func, updated_module): """Copies a function, updating it's globals to point to updated_module.""" new_func = types.FunctionType(old_func.__code__, updated_module.__dict__, name=old_func.__name__, argdefs=old_func.__defaults__, closure=old_func.__closure__) new_func.__dict__.update(old_func.__dict__) new_func.__module__ = updated_module.__name__ return new_func
2,785
def test_app_sanity_check_fail(app_scaffold, dev_db): # noQA """Create an application and see we don't start if migrations are not run.""" execute_ws_command('pserve', app_scaffold, assert_exit=1)
2,786
def update_diseases(all_diseases: Dict[StateYearPair, StrItemSet], cursor: MySQLdb.cursors.Cursor): """ Fetches rows from cursor and updates all_diseases with the rows Doesn't return anything, but updates all_diseases directly :param all_diseases: dict from place and time to set of diseases at that time and place :param cursor: a database cursor with rows containing Admin1Name, Year, and ConditionName """ current_row = cursor.fetchone() while current_row is not None: place_time = tuple(current_row[:2]) disease = current_row[2] all_diseases[place_time] = all_diseases[place_time].union({disease}) current_row = cursor.fetchone()
2,787
def get_random_color(): """ 获得一个随机的bootstrap颜色字符串标识 :return: bootstrap颜色字符串 """ color_str = [ 'primary', 'secondary', 'success', 'danger', 'warning', 'info', 'dark', ] return random.choice(color_str)
2,788
def lstm_cell_forward(xt, a_prev, c_prev, parameters): """ Implement a single forward step of the LSTM-cell as described in Figure (4) Arguments: xt -- your input data at timestep "t", numpy array of shape (n_x, m). a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m) c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m) parameters -- python dictionary containing: Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) bf -- Bias of the forget gate, numpy array of shape (n_a, 1) Wi -- Weight matrix of the save gate, numpy array of shape (n_a, n_a + n_x) bi -- Bias of the save gate, numpy array of shape (n_a, 1) Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x) bc -- Bias of the first "tanh", numpy array of shape (n_a, 1) Wo -- Weight matrix of the focus gate, numpy array of shape (n_a, n_a + n_x) bo -- Bias of the focus gate, numpy array of shape (n_a, 1) Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a_next -- next hidden state, of shape (n_a, m) c_next -- next memory state, of shape (n_a, m) yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m) cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters) Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilda), c stands for the memory value """ # Retrieve parameters from "parameters" Wf = parameters["Wf"] bf = parameters["bf"] Wi = parameters["Wi"] bi = parameters["bi"] Wc = parameters["Wc"] bc = parameters["bc"] Wo = parameters["Wo"] bo = parameters["bo"] Wy = parameters["Wy"] by = parameters["by"] # Retrieve dimensions from shapes of xt and Wy n_x, m = xt.shape n_y, n_a = Wy.shape # Concatenate a_prev and xt (≈3 lines) # todo: what the fuck of the shape concat = np.zeros((n_x + n_a, m)) concat[: n_a, :] = a_prev concat[n_a:, :] = xt # Compute values for ft, it, cct, c_next, ot, a_next using the formulas given figure (4) (≈6 lines) ft = sigmoid(np.dot(Wf, concat) + bf) it = sigmoid(np.dot(Wi, concat) + bi) cct = np.tanh(np.dot(Wc, concat) + bc) c_next = ft * c_prev + it * cct ot = sigmoid(np.dot(Wo, concat) + bo) a_next = ot * np.tanh(c_next) # Compute prediction of the LSTM cell (≈1 line) yt_pred = softmax(np.dot(Wy, a_next) + by) # store values needed for backward propagation in cache cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) return a_next, c_next, yt_pred, cache
2,789
def save_department_data(department_data, scraper, location): """Write department data to JSON file. Args: department_data: Dictionary of department data. scraper: Base scraper object. location: String location output files. """ filename = department_data['code'] filepath = '{}/departments'.format(location) scraper.write_data(department_data, filename, filepath) scraper.logger.debug('Department data saved')
2,790
def logged_run(cmd, buffer): """Return exit code.""" pid = Popen(cmd, stdout=PIPE, stderr=STDOUT) pid.wait() buffer.write(pid.stdout.read()) return pid.returncode
2,791
def download_file(project, bucket, orig_file_name, temp_file_name): """Download a file stored in a Google Cloud Storage bucket to the disk""" client = storage.Client(project=project) bucket = client.get_bucket(bucket) blob = bucket.blob(orig_file_name) blob.download_to_filename(temp_file_name) print('file {} downloaded'.format(orig_file_name))
2,792
def createPListFile(mapname,image): """ Create a xml plist file from sprite sheet image. :param mapname the name of file with information about frame of animation. :param image the image info of sprite sheet generated """ if __debug__ : print "Creating Plist file" print "Loading support file in :",path+mapname+fileMaptype mapf=open(path+mapname+fileMaptype, 'r') riga=mapf.readline() #dictionary PList is a dictionary of tag Plist. The key of dict. is the key in plist file and the value of dict. is the reporter value in Plist file. # refer at https://docs.python.org/2.7/library/plistlib.html#plistlib.dump and https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man5/plist.5.html dictPList={} dictAllFrames={} # Invio="\n" while len(riga)!=0: if __debug__ : print "ROW value read from file =",riga val="" # dictionary of current frame dictFrame={} riga=riga.replace("\n","") listaSplitsubImage=riga.split(" ") # extract the key of current frame chiave=listaSplitsubImage[0] if(len(listaSplitsubImage)<7): # 6 is the number of token in one row of map file ex. "frame = 0 0 100 100" if __debug__ : print "Len of List split :",len(listaSplitsubImage) #build the current tag for frame. val+="{"+listaSplitsubImage[2]+","+listaSplitsubImage[3]+"},{"+listaSplitsubImage[4]+","+listaSplitsubImage[5]+"}" val="{"+val+"}" #+Invio dictFrame[CurrentFrame]=val dictFrame[Offset]="{0,0}" dictFrame[Rotated]=False dictFrame[SourceColorRect]="{{0,0},"+listaSplitsubImage[4]+",{"+listaSplitsubImage[5]+"}}" #+Invio dictFrame[SourceSize]="{"+listaSplitsubImage[4]+","+listaSplitsubImage[5]+"}" #+Invio if __debug__ : print "dictionary frame :",dictFrame else: raise NameError,"Error format not recognized." # build key-value couple for current frame in dictAllFrames dictAllFrames[chiave]=dictFrame riga=mapf.readline() # next line from file. #End while #close map file mapf.close() #Create a meta data dictionary used for build a frames of animation filename=os.path.basename(image.filename) if __debug__: print "Texture file name :",filename dictmeta={} dictmeta[Format]=2 dictmeta[RealTextureFileName]=filename # image.filename get complete path and file name dictmeta[Size]="{"+str(image.width)+","+str(image.height)+"}" #+Invio dictmeta[SmartUpdate]="Created with GIMP "+time.strftime("%d/%m/%Y %H:%M:%S") dictmeta[TextureFileName]=filename # image.filename get complete path and file name dictPList[Frames]=dictAllFrames dictPList[MetaData]=dictmeta # create a plist file with data info in dictPList. # mapname is the same name of name of file plist writePlist(dictPList, path+mapname+filePlisttype) if __debug__ : print "End create Plist file" return
2,793
def _decode_and_format_b64_string(b64encoded_string, item_prefix=None, current_depth=1, current_index=1): """Decode string and return displayable content plus list of decoded artifacts.""" # Check if we recognize this as a known file type (_, f_type) = _is_known_b64_prefix(b64encoded_string) _debug_print_trace('Found type: ', f_type) output_files = _decode_b64_binary(b64encoded_string, f_type) if not output_files: return b64encoded_string, None if len(output_files) == 1: # get the first (only) item out_name, out_record = list(output_files.items())[0] _debug_print_trace('_decode_b64_binary returned a single record') _debug_print_trace('record:', out_record) # Build display string # If a string, include the decoded item in the output if out_record.encoding_type in ['utf-8', 'utf-16']: display_string = f'<decoded type=\'string\' name=\'{out_name}\' ' +\ f'index=\'{item_prefix}{current_index}\' ' +\ f'depth=\'{current_depth}\'>' +\ f'{out_record.decoded_string}</decoded>' return display_string, [out_record] else: # if a binary just record its presence display_string = f'<decoded value=\'binary\' name=\'{out_name}\' ' +\ f'type=\'{out_record.file_type}\' ' +\ f'index=\'{item_prefix}{current_index}\' ' +\ f'depth=\'{current_depth}\'/>' return display_string, [out_record] else: # Build header display string display_header = f'<decoded value=\'multiple binary\' type=\'multiple\' ' +\ f' index=\'{item_prefix}{current_index}\'>' child_display_strings = [] child_index = 1 child_depth = current_depth + 1 _debug_print_trace('_decode_b64_binary returned multiple records') # Build child display strings for child_name, child_rec in output_files.items(): _debug_print_trace('Child_decode: ', child_rec) child_index_string = f'{item_prefix}{current_index}.{child_index}' if child_rec.encoding_type in ['utf-8', 'utf-16']: # If a string, include the decoded item in the output child_display_string = f'<decoded type=\'string\' name=\'{child_name}\' ' +\ f'index=\'{child_index_string}\' ' +\ f'depth=\'{child_depth}\'>' +\ f'{child_rec.decoded_string}</decoded>' else: # if a binary just record its presence child_display_string = f'<decoded type=\'{child_rec.file_type}\' ' +\ f'name=\'{child_name}\' ' +\ f'index=\'{child_index_string}\' ' +\ f'depth=\'{child_depth}\'/>' child_display_strings.append(child_display_string) child_index += 1 display_string = display_header + ''.join(child_display_strings) + '</decoded>' return display_string, output_files.values()
2,794
def buildDMG(): """ Create DMG containing the rootDir """ outdir = os.path.join(WORKDIR, 'diskimage') if os.path.exists(outdir): shutil.rmtree(outdir) imagepath = os.path.join(outdir, 'python-%s-macosx'%(getFullVersion(),)) if INCLUDE_TIMESTAMP: imagepath = imagepath + '%04d-%02d-%02d'%(time.localtime()[:3]) imagepath = imagepath + '.dmg' os.mkdir(outdir) runCommand("hdiutil create -volname 'Univeral MacPython %s' -srcfolder %s %s"%( getFullVersion(), shellQuote(os.path.join(WORKDIR, 'installer')), shellQuote(imagepath))) return imagepath
2,795
def create_iam_role(iam_client): """Create an IAM role for the Redshift cluster to have read only access to S3. Arguments: iam_client (boto3.client) - IAM client Returns: role_arn (str) - ARN for the IAM Role """ # Create the role if it doesn't already exist. try: print('Creating IAM Role...') redshift_role = iam_client.create_role( Path="/", RoleName=IAM_ROLE_NAME, Description="Allows Redshift clusters to call AWS services", AssumeRolePolicyDocument=json.dumps( { 'Statement': [ { 'Action': 'sts:AssumeRole', 'Effect': 'Allow', 'Principal': {'Service': 'redshift.amazonaws.com'} } ], 'Version': '2012-10-17' } ) ) except Exception as e: print(e) # Attach the policy. try: iam_client.attach_role_policy( RoleName=IAM_ROLE_NAME, PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadonlyAccess" ) except Exception as e: print(e) # Return the Role ARN. role_arn = iam_client.get_role(RoleName=IAM_ROLE_NAME)['Role']['Arn'] print('Role ARN: %s' % role_arn) return role_arn
2,796
def pony(var, wrapper, message): """Toss a magical pony into the air and see what happens!""" wrapper.send(messages["pony_toss"].format(wrapper.source)) # 59/29/7/5 split rnd = random.random() if rnd < 0.59: pony = messages.get("pony_land", 0) elif rnd < 0.88: pony = messages.get("pony_land", 1) elif rnd < 0.95: pony = messages.get("pony_land", 2) else: pony = messages.get("pony_land", 3) wrapper.send(pony.format(nick=wrapper.source))
2,797
def cone_face_to_span(F): """ Compute the span matrix F^S of the face matrix F, that is, a matrix such that {F x <= 0} if and only if {x = F^S z, z >= 0}. """ b, A = zeros((F.shape[0], 1)), -F # H-representation: A x + b >= 0 F_cdd = Matrix(hstack([b, A]), number_type=NUMBER_TYPE) F_cdd.rep_type = RepType.INEQUALITY P = Polyhedron(F_cdd) V = array(P.get_generators()) for i in xrange(V.shape[0]): if V[i, 0] != 0: # 1 = vertex, 0 = ray raise NotConeFace(F) return V[:, 1:].T
2,798
def show_progress_bar(progress, start_time, msg): """ Well, it's a fancy progress bar, it looks like this: Msg: 50.0% [=========================> ] in 0.9s :param progress: range 0 to 100 :param start_time: looks like time.time() :param msg: message to show :return: """ screen_width = os.get_terminal_size().columns // 10 * 10 - 40 bar_width = int(progress * screen_width / 100) progress_bar = (msg + ": " + " " * 10)[:9] + \ (" " * 4 + str(int(progress)) + "%")[-6:] + \ (" [" + bar_width * "=" + ">" + " " * int(screen_width - bar_width) + "]") + \ (" in " + str(round(time() - start_time, 1)) + "s") sys.stdout.write(progress_bar + "\r") sys.stdout.flush()
2,799