code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def test_connect(self): <NEW_LINE> <INDENT> response = requests.get('https://www.materialsproject.org/rest/v1/api_check', {'API_KEY': API_KEY}) <NEW_LINE> self.assertTrue(response.ok) | Check the connection with the MP api_check | 625941ba9c8ee82313fbb607 |
def parse(zipcode,filter=None): <NEW_LINE> <INDENT> if filter=="newest": <NEW_LINE> <INDENT> url = "https://www.zillow.com/homes/for_sale/{0}/0_singlestory/days_sort".format(zipcode) <NEW_LINE> <DEDENT> elif filter == "cheapest": <NEW_LINE> <INDENT> url = "https://www.zillow.com/homes/for_sale/{0}/0_singlestory/pricea_sort/".format(zipcode) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> url = "https://www.zillow.com/homes/for_sale/{0}_rb/?fromHomePage=true&shouldFireSellPageImplicitClaimGA=false&fromHomePageTab=buy".format(zipcode) <NEW_LINE> <DEDENT> for i in range(5): <NEW_LINE> <INDENT> session = requests.Session() <NEW_LINE> response = session.get(url,headers=HEADERS,cookies=cookies) <NEW_LINE> print(response.status_code) <NEW_LINE> parser = html.fromstring(response.text) <NEW_LINE> search_results = parser.xpath("//div[@id='search-results']//article") <NEW_LINE> properties_list = [] <NEW_LINE> for properties in search_results: <NEW_LINE> <INDENT> raw_address = properties.xpath(".//span[@itemprop='address']//span[@itemprop='streetAddress']//text()") <NEW_LINE> raw_city = properties.xpath(".//span[@itemprop='address']//span[@itemprop='addressLocality']//text()") <NEW_LINE> raw_state= properties.xpath(".//span[@itemprop='address']//span[@itemprop='addressRegion']//text()") <NEW_LINE> raw_postal_code= properties.xpath(".//span[@itemprop='address']//span[@itemprop='postalCode']//text()") <NEW_LINE> raw_price = properties.xpath(".//span[@class='zsg-photo-card-price']//text()") <NEW_LINE> raw_info = properties.xpath(".//span[@class='zsg-photo-card-info']//text()") <NEW_LINE> raw_broker_name = properties.xpath(".//span[@class='zsg-photo-card-broker-name']//text()") <NEW_LINE> url = properties.xpath(".//a[contains(@class,'overlay-link')]/@href") <NEW_LINE> raw_title = properties.xpath(".//h4//text()") <NEW_LINE> address = ' '.join(' '.join(raw_address).split()) if raw_address else None <NEW_LINE> city = ''.join(raw_city).strip() if raw_city else None <NEW_LINE> state = ''.join(raw_state).strip() if raw_state else None <NEW_LINE> postal_code = ''.join(raw_postal_code).strip() if raw_postal_code else None <NEW_LINE> price = ''.join(raw_price).strip() if raw_price else None <NEW_LINE> info = ' '.join(' '.join(raw_info).split()).replace(u"\xb7",',') <NEW_LINE> broker = ''.join(raw_broker_name).strip() if raw_broker_name else None <NEW_LINE> title = ''.join(raw_title) if raw_title else None <NEW_LINE> property_url = "https://www.zillow.com"+url[0] if url else None <NEW_LINE> is_forsale = properties.xpath('.//span[@class="zsg-icon-for-sale"]') <NEW_LINE> properties = { 'address':address, 'city':city, 'state':state, 'postal_code':postal_code, 'price':price, 'facts and features':info, 'real estate provider':broker, 'url':property_url, 'title':title } <NEW_LINE> if is_forsale: <NEW_LINE> <INDENT> properties_list.append(properties) <NEW_LINE> <DEDENT> <DEDENT> return properties_list | Do the scraping. | 625941ba94891a1f4081b93b |
def RgbImageF(imageR, imageG, imageB, mapping): <NEW_LINE> <INDENT> return _RgbImageF(imageR, imageG, imageB, mapping) | !\deprecated Legacy API | 625941ba925a0f43d2549d06 |
def coinChange(self, coins, amount): <NEW_LINE> <INDENT> max_val = amount + 1 <NEW_LINE> dp = [max_val] * max_val <NEW_LINE> dp[0] = 0 <NEW_LINE> for i in range(1, amount + 1): <NEW_LINE> <INDENT> for coin in coins: <NEW_LINE> <INDENT> if coin <= i: <NEW_LINE> <INDENT> dp[i] = min(dp[i], dp[i-coin] + 1) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return dp[amount] if dp[amount] <= amount else -1 | :type coins: List[int]
:type amount: int
:rtype: int | 625941ba3539df3088e2e1de |
def test_validate_xml(self): <NEW_LINE> <INDENT> tree = etree.XML(self.xml) <NEW_LINE> self.xmlschema.assertValid(tree) | Assert the XML is valid according to schema | 625941ba8da39b475bd64e0a |
def raise_mask(dqarr, bitmask): <NEW_LINE> <INDENT> assert isinstance(bitmask, int) <NEW_LINE> return dqarr | bitmask |
Function that raises (sets) all the bits in 'dqarr' contained
in the bitmask.
:Parameters:
dqarr: numpy array or integer
numpy array which represents a dq plane (or part of it).
The function also works when dqarr is a scalar integer.
bitmask: integer
A bit mask specifying all the bits to be logically "raised"
in dqarr. For example,
* bitmask=1 = 2**0 will raise bit 0.
* bitmask=5 = 2**0 + 2**2 will raise bits 0 and 2.
:Returns:
newdqarr: numpy array or integer
Returns array 'dqarr' with the specified bits raised in all
elements (pixels). | 625941ba66656f66f7cbc03d |
def get_openssl_error(): <NEW_LINE> <INDENT> error_code = libcrypto.ERR_get_error() <NEW_LINE> error_string = create_string_buffer(ERROR_STRING_BUFFER_SIZE) <NEW_LINE> libcrypto.ERR_error_string_n(error_code, error_string, ERROR_STRING_BUFFER_SIZE) <NEW_LINE> return OpenSSLError(error_string.value) | Read the OpenSSL error queue and return an exception. | 625941baadb09d7d5db6c625 |
def __init__(__self__, __name__, __opts__=None, domain=None, user_pool_id=None): <NEW_LINE> <INDENT> if not __name__: <NEW_LINE> <INDENT> raise TypeError('Missing resource name argument (for URN creation)') <NEW_LINE> <DEDENT> if not isinstance(__name__, basestring): <NEW_LINE> <INDENT> raise TypeError('Expected resource name to be a string') <NEW_LINE> <DEDENT> if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): <NEW_LINE> <INDENT> raise TypeError('Expected resource options to be a ResourceOptions instance') <NEW_LINE> <DEDENT> __props__ = dict() <NEW_LINE> if not domain: <NEW_LINE> <INDENT> raise TypeError('Missing required property domain') <NEW_LINE> <DEDENT> elif not isinstance(domain, basestring): <NEW_LINE> <INDENT> raise TypeError('Expected property domain to be a basestring') <NEW_LINE> <DEDENT> __self__.domain = domain <NEW_LINE> __props__['domain'] = domain <NEW_LINE> if not user_pool_id: <NEW_LINE> <INDENT> raise TypeError('Missing required property user_pool_id') <NEW_LINE> <DEDENT> elif not isinstance(user_pool_id, basestring): <NEW_LINE> <INDENT> raise TypeError('Expected property user_pool_id to be a basestring') <NEW_LINE> <DEDENT> __self__.user_pool_id = user_pool_id <NEW_LINE> __props__['userPoolId'] = user_pool_id <NEW_LINE> __self__.aws_account_id = pulumi.runtime.UNKNOWN <NEW_LINE> __self__.cloudfront_distribution_arn = pulumi.runtime.UNKNOWN <NEW_LINE> __self__.s3_bucket = pulumi.runtime.UNKNOWN <NEW_LINE> __self__.version = pulumi.runtime.UNKNOWN <NEW_LINE> super(UserPoolDomain, __self__).__init__( 'aws:cognito/userPoolDomain:UserPoolDomain', __name__, __props__, __opts__) | Create a UserPoolDomain resource with the given unique name, props, and options. | 625941ba7047854f462a12a0 |
def getUpdate(self, updateName, forcedBindings): <NEW_LINE> <INDENT> self.logger.debug("=== JSAPObject::getUpdate invoked ===") <NEW_LINE> return self.getSparql(False, updateName, forcedBindings) | Method used to get the final update text | 625941ba851cf427c661a3a5 |
def get_tempo(artist, title): <NEW_LINE> <INDENT> artist = artist.replace(u'\n', u' ').strip() <NEW_LINE> title = title.replace(u'\n', u' ').strip() <NEW_LINE> if not artist or not title: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> for i in range(RETRIES): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> results = pyechonest.song.search( artist=artist, title=title, results=1, buckets=['audio_summary'] ) <NEW_LINE> <DEDENT> except pyechonest.util.EchoNestAPIError as e: <NEW_LINE> <INDENT> if e.code == 3: <NEW_LINE> <INDENT> time.sleep(RETRY_INTERVAL) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> log.warn(u'echonest_tempo: {0}'.format(e.args[0][0])) <NEW_LINE> return None <NEW_LINE> <DEDENT> <DEDENT> except (pyechonest.util.EchoNestIOError, socket.error) as e: <NEW_LINE> <INDENT> log.debug(u'echonest_tempo: IO error: {0}'.format(e)) <NEW_LINE> time.sleep(RETRY_INTERVAL) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> log.debug(u'echonest_tempo: exceeded retries') <NEW_LINE> return None <NEW_LINE> <DEDENT> for result in results: <NEW_LINE> <INDENT> if result.artist_name == artist and result.title == title: <NEW_LINE> <INDENT> return results[0].audio_summary['tempo'] | Get the tempo for a song. | 625941baeab8aa0e5d26d9f1 |
def __init__(self, matrix: Array) -> None: <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> from .algebra import compute_condition_number <NEW_LINE> self._condition = compute_condition_number(matrix) | Compute condition number of the matrix. | 625941ba5f7d997b8717492e |
def __init__(self, width, height): <NEW_LINE> <INDENT> Transformer.__init__(self, width, height) <NEW_LINE> self.code = 16 <NEW_LINE> k = random.randrange(3, min(width + 1, 15), step=2) <NEW_LINE> self.params = [k] | Constructor
:param width: image width
:param height: image height | 625941ba29b78933be1e554c |
@stats_api_bp.route("/countries/<country_id_or_name>/stats") <NEW_LINE> def country_stats(country_id_or_name): <NEW_LINE> <INDENT> country_res = get_country_by_id_or_name(country_id_or_name) <NEW_LINE> res_dict = {"country_id": country_res.country_id} <NEW_LINE> indicators_query = Indicator.query.filter( Indicator.country_id == country_res.country_id ).order_by(Indicator.year) <NEW_LINE> years = [int(i.year) for i in indicators_query.distinct(Indicator.year).all()] <NEW_LINE> res_dict["years"] = years <NEW_LINE> years_num_dict: Dict[int, int] = {v: k for k, v in enumerate(years)} <NEW_LINE> indicators = indicators_query.all() <NEW_LINE> indicators_dict: DefaultDict[str, List[Any]] = defaultdict( lambda: [None] * len(years) ) <NEW_LINE> for ind in indicators: <NEW_LINE> <INDENT> year_index: int = years_num_dict[ind.year] <NEW_LINE> indicators_dict[ind.indicator_id][year_index] = float(ind.indicator_value) <NEW_LINE> <DEDENT> res_dict["indicator_values"] = indicators_dict <NEW_LINE> return res_dict | stats for a country
indicator_ids - get parameter to specify list of indicators
(if not specified - all indicators) | 625941bab57a9660fec33713 |
def test_arg_coherency_boot_event(self): <NEW_LINE> <INDENT> sys.argv = ["cloubed", "boot", "--domain", "toto", "--event", "toto"] <NEW_LINE> parser = CloubedArgumentParser(u"test_description") <NEW_LINE> parser.add_args() <NEW_LINE> parser.parse_args() <NEW_LINE> self.assertRaisesRegexp(CloubedArgumentException, "--event is not compatible with boot action", parser.check_optionals) | Raises CloubedArgumentException because event nonsense with boot
action | 625941ba3317a56b86939afc |
def test_get_mean_and_stddevs(self): <NEW_LINE> <INDENT> gsim = GMPETable(gmpe_table=self.TABLE_FILE) <NEW_LINE> rctx = RuptureContext() <NEW_LINE> rctx.mag = 6.0 <NEW_LINE> dctx = DistancesContext() <NEW_LINE> dctx.rjb = np.array([0.5, 1.0, 10.0, 100.0, 500.0]) <NEW_LINE> sctx = SitesContext() <NEW_LINE> stddevs = [const.StdDev.TOTAL] <NEW_LINE> expected_mean = np.array([2.0, 2.0, 1.0, 0.5, 1.0E-20]) <NEW_LINE> mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx, imt_module.PGA(), stddevs) <NEW_LINE> np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5) <NEW_LINE> np.testing.assert_array_almost_equal(sigma[0], 0.5 * np.ones(5), 5) <NEW_LINE> mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx, imt_module.SA(1.0), stddevs) <NEW_LINE> np.testing.assert_array_almost_equal(np.exp(mean), expected_mean, 5) <NEW_LINE> np.testing.assert_array_almost_equal(sigma[0], 0.8 * np.ones(5), 5) <NEW_LINE> mean, sigma = gsim.get_mean_and_stddevs(sctx, rctx, dctx, imt_module.PGV(), stddevs) <NEW_LINE> np.testing.assert_array_almost_equal(np.exp(mean), 10. * expected_mean, 5) <NEW_LINE> np.testing.assert_array_almost_equal(sigma[0], 0.5 * np.ones(5), 5) | Tests mean and standard deviations without amplification | 625941ba091ae35668666df8 |
def setsockopt_mon(self, opt, value): <NEW_LINE> <INDENT> self._mon_sockopts.append((opt, value)) | Enqueue setsockopt(opt, value) for mon_socket
See zmq.Socket.setsockopt for details. | 625941bab7558d58953c4dae |
def padding(self, input_id, input_mask, segment_id): <NEW_LINE> <INDENT> if len(input_id) < self.sequence_length: <NEW_LINE> <INDENT> pad_input_id = input_id + [0] * (self.sequence_length - len(input_id)) <NEW_LINE> pad_input_mask = input_mask + [0] * (self.sequence_length - len(input_mask)) <NEW_LINE> pad_segment_id = segment_id + [0] * (self.sequence_length - len(segment_id)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pad_input_id = input_id[:self.sequence_length] <NEW_LINE> pad_input_mask = input_mask[:self.sequence_length] <NEW_LINE> pad_segment_id = segment_id[:self.sequence_length] <NEW_LINE> <DEDENT> return pad_input_id, pad_input_mask, pad_segment_id | 对序列进行补全
:param input_id:
:param input_mask:
:param segment_id:
:return: | 625941bae1aae11d1e749b47 |
def plotResiduals(self,x=None,y=None,clf=True,logplot='',**kwargs): <NEW_LINE> <INDENT> from .utils import mpl_context <NEW_LINE> if x is None: <NEW_LINE> <INDENT> if self.data is None: <NEW_LINE> <INDENT> raise ValueError("No xdata provided and no fitted data present - can't plot residuals") <NEW_LINE> <DEDENT> x = self.data[0] <NEW_LINE> <DEDENT> if y is None: <NEW_LINE> <INDENT> if self.data is None: <NEW_LINE> <INDENT> raise ValueError("No ydata provided and no fitted data present - can't plot residuals") <NEW_LINE> <DEDENT> y = self.data[1] <NEW_LINE> <DEDENT> with mpl_context(clf=clf) as plt: <NEW_LINE> <INDENT> if 'x' in logplot and 'y' in logplot: <NEW_LINE> <INDENT> plt.loglog() <NEW_LINE> <DEDENT> elif 'x' in logplot: <NEW_LINE> <INDENT> plt.semilogx() <NEW_LINE> <DEDENT> elif 'y' in logplot: <NEW_LINE> <INDENT> plt.semilogy() <NEW_LINE> <DEDENT> ym = self(x) <NEW_LINE> kwargs.setdefault('c','r') <NEW_LINE> plt.scatter(x,y-ym,**kwargs) | Plots the residuals of the provided data (:math:`y-y_{mod}`) against this
model.
:param x:
The x data to plot the residuals for, or None to get it from any
fitted data.
:type x: array-like or None
:param y:
The x data to plot the residuals for, or None to get it from any
fitted data.
:type y: array-like or None
:param bool clf: If True, the plot will be cleared first.
:param logplot: Sets which axes are logarithmic
:type logplot: '','x','y', or 'xy' string
Additional arguments and keywords are passed into
:func:`matplotlib.pyplot.scatter`. | 625941ba3cc13d1c6d3c7217 |
def load_params(path): <NEW_LINE> <INDENT> with open(path, 'r') as f: <NEW_LINE> <INDENT> return json.load(f) | Load parameters from file | 625941ba462c4b4f79d1d564 |
def tokenize(file_obj: IO[bytes]) -> Iterator[Any]: <NEW_LINE> <INDENT> status = subprocess.run([str(esprima_bin)], check=True, stdin=file_obj, stdout=subprocess.PIPE) <NEW_LINE> return json.loads(status.stdout.decode('UTF-8')) | Tokenizes a (real!) bytes file using Esprima. | 625941ba23849d37ff7b2f25 |
def _createSessionObject(self, request): <NEW_LINE> <INDENT> user = endpoints.get_current_user() <NEW_LINE> if not user: <NEW_LINE> <INDENT> raise endpoints.UnauthorizedException('Authorization required') <NEW_LINE> <DEDENT> user_id = user.email() <NEW_LINE> if not request.name: <NEW_LINE> <INDENT> raise endpoints.BadRequestException("Session 'name' field required") <NEW_LINE> <DEDENT> if not request.speaker: <NEW_LINE> <INDENT> raise endpoints.BadRequestException("Session 'speaker' field required") <NEW_LINE> <DEDENT> data = {field.name: getattr(request, field.name) for field in request.all_fields()} <NEW_LINE> data['conference'] = ndb.Key(urlsafe=request.websafeConferenceKey) <NEW_LINE> del data['websafeConferenceKey'] <NEW_LINE> del data['sessionKey'] <NEW_LINE> if data['startDate']: <NEW_LINE> <INDENT> data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date() <NEW_LINE> <DEDENT> new_key = Session(**data).put() <NEW_LINE> taskqueue.add(params={'conferenceKey': request.websafeConferenceKey, 'speaker': data['speaker']}, url='/tasks/get_featured_speaker') <NEW_LINE> request.sessionKey = new_key.urlsafe() <NEW_LINE> return self._copySessionToForm(request) | Create or update Session object, returning SessionForm/request. | 625941baab23a570cc250013 |
def gen_gain_func(self, entropy_func): <NEW_LINE> <INDENT> def gains_func( features: pd.DataFrame, target: pd.Series, weights: pd.Series ): <NEW_LINE> <INDENT> targ_ent = entropy_func(target, weights) <NEW_LINE> sum_weights = np.sum(weights) <NEW_LINE> col_gains = {} <NEW_LINE> for column in features.columns: <NEW_LINE> <INDENT> col_gain = targ_ent <NEW_LINE> if features[column].dtype == "int64": <NEW_LINE> <INDENT> median = np.median(features[column]) <NEW_LINE> filt = features[column] > median <NEW_LINE> top_targ = target[filt].to_numpy() <NEW_LINE> bot_targ = target[np.invert(filt)].to_numpy() <NEW_LINE> top_wght = weights[filt].to_numpy() <NEW_LINE> bot_wght = weights[np.invert(filt)].to_numpy() <NEW_LINE> top_prob = sum(top_wght) / sum_weights <NEW_LINE> top_entr = entropy_func(top_targ, top_wght) <NEW_LINE> top_entr *= top_prob <NEW_LINE> bot_prob = sum(bot_wght) / sum_weights <NEW_LINE> bot_entr = entropy_func(bot_targ, bot_wght) <NEW_LINE> bot_entr *= bot_prob <NEW_LINE> col_gain -= (bot_entr + top_entr) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> unique_elements = np.unique(features[column]) <NEW_LINE> col_gain = targ_ent <NEW_LINE> for e in unique_elements: <NEW_LINE> <INDENT> filt = features[column] == e <NEW_LINE> sub_targ = target [filt].to_numpy() <NEW_LINE> sub_wght = weights [filt].to_numpy() <NEW_LINE> prob = sum(sub_wght) / sum_weights <NEW_LINE> sub_ent = entropy_func( sub_targ, sub_wght ) <NEW_LINE> sub_ent *= prob <NEW_LINE> col_gain -= sub_ent <NEW_LINE> <DEDENT> <DEDENT> col_gains[column] = col_gain <NEW_LINE> <DEDENT> return col_gains <NEW_LINE> <DEDENT> return gains_func | Determine the gain of each column in the dataset S. Assumes the last
column is the target.
Parameters:
* entropy_func (func(vector): float): any function that inputs a
vector of data and outputs a floating point value between zero and
one representing how organized that data is.
Return:
* output (func(DataFrame, Series): dict): a function that takes in a
DataFrame of features and Series of targets, and outputs a dic-
tionary with the potential informationgain of each feature column.
| 625941ba0c0af96317bb807d |
def sigmoid_focal_loss(inputs, targets, num_boxes,ignore, alpha: float = 0.25, gamma: float = 2): <NEW_LINE> <INDENT> prob = inputs.sigmoid() <NEW_LINE> ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") <NEW_LINE> p_t = prob * targets + (1 - prob) * (1 - targets) <NEW_LINE> loss = ce_loss * ((1 - p_t) ** gamma) <NEW_LINE> if alpha >= 0: <NEW_LINE> <INDENT> alpha_t = alpha * targets + (1 - alpha) * (1 - targets) <NEW_LINE> loss = alpha_t * loss <NEW_LINE> <DEDENT> ignore_factor = torch.ones_like(loss) <NEW_LINE> if len(ignore[0]) > 0: <NEW_LINE> <INDENT> ignore_factor[ignore] = 0 <NEW_LINE> <DEDENT> loss *= ignore_factor <NEW_LINE> return loss.mean(1).sum() / num_boxes | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
ignore: tuple (batch_idx, corresponding ignore tensor idx)
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
We per- form the normalization by the number of assigned anchors,
not total anchors, since the vast majority of anchors are easy negatives and receive negligible loss values under the focal loss.
hence after mean(1) multiply again by number of anchors
Loss tensor | 625941baa219f33f34628808 |
def alcubierre(self): <NEW_LINE> <INDENT> xs = symbols('x_s')(x0) <NEW_LINE> vs = xs.diff(x0) <NEW_LINE> rs = sqrt((x1-xs)**2 + x2**2 + x3**2) <NEW_LINE> fs = tanh(s * (rs + R)) - tanh(s * (rs - R)) / (2 * tanh( s * R )) <NEW_LINE> alcubierre_spacetime = Matrix([ [ (vs**2 * fs**2 - 1), -2*vs*fs, -2*vs*fs, -2*vs*fs ], [ -2*vs*fs, -1, 0, 0 ], [ -2*vs*fs, 0, -1, 0 ], [ -2*vs*fs, 0, 0, -1 ] ]) | Description
===========
Returns the famous Alcubierre 'warp-drive' metric solution.
Examples
========
>>> from sympy import *
>>> print(Solution().alcubierre())
>>>
LaTeX representation
==================== | 625941ba2eb69b55b151c73f |
def index(self, pointer, index, dest_pointer_type=None): <NEW_LINE> <INDENT> if dest_pointer_type: <NEW_LINE> <INDENT> return self.index_multiple(pointer, [index], dest_pointer_type) <NEW_LINE> <DEDENT> return SingleIndexNode(self.pos, pointer.type.base_type, pointer, index) | Index a pointer with the given index node.
:param dest_pointer_type: if given, cast the result (*after* adding
the index) to the destination type and
dereference. | 625941bad4950a0f3b08c1e5 |
def get_fields_from_model(self, model, fields): <NEW_LINE> <INDENT> model_fields = {} <NEW_LINE> for field in fields: <NEW_LINE> <INDENT> if isinstance(field, tuple) and len(field) == 2: <NEW_LINE> <INDENT> field, verbose_name = field[0], field[1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> model_field = get_fields_from_path(model, field)[-1] <NEW_LINE> verbose_name = model_field.verbose_name <NEW_LINE> <DEDENT> except (FieldDoesNotExist, IndexError, TypeError) as e: <NEW_LINE> <INDENT> logger.warn("AdvancedFilterForm: skip invalid field " "- %s", e) <NEW_LINE> continue <NEW_LINE> <DEDENT> <DEDENT> model_fields[field] = verbose_name <NEW_LINE> <DEDENT> return model_fields | Iterate over given <field> names (in "orm query" notation) and find
the actual field given the initial <model>.
If <field> is a tuple of the format ('field_name', 'Verbose name'),
overwrite the field's verbose name with the given name for display
purposes. | 625941ba31939e2706e4cd03 |
def _d_ik(i, W, B): <NEW_LINE> <INDENT> Di = W[i,:].reshape((W.shape[1],1)) - B <NEW_LINE> Di = Di*Di <NEW_LINE> Di = Di.sum(axis = 0) <NEW_LINE> assigned_cluster = Di.argmin() <NEW_LINE> return assigned_cluster | The data cluster matrix A is updated using formula 10 from Li (2005) which is the same as
formula 2.3 in Li & Zhu (2005). The formula uses the squared distance between ith point and
the kth cluster. The point is then assigned to the closest cluster. The squared distance
between point i and data cluster k is computed by summing over the element-wise differences
between the i-th row and k-th row of W and B, respectively:
d[i,k] = SUM_{j in features} (W[i,j] - B[k,j])^2j
Parameters
----------
i : int
infdex of data point
W : np.array
binary data matrix
B : np.array
feature cluster assignment matrix
Returns
-------
int
index of assigned cluster | 625941ba4d74a7450ccd4056 |
def setUp(self): <NEW_LINE> <INDENT> self.i = indexer.Indexer('dbase') | create an object of Indexer class
| 625941ba21bff66bcd6847e9 |
def parse_xgen_file(filepath): <NEW_LINE> <INDENT> modules = [] <NEW_LINE> with open(filepath) as f: <NEW_LINE> <INDENT> module = [] <NEW_LINE> for line in f: <NEW_LINE> <INDENT> line = line.strip(' \t\r\n') <NEW_LINE> if not line: <NEW_LINE> <INDENT> if module: <NEW_LINE> <INDENT> modules.append(module) <NEW_LINE> module = [] <NEW_LINE> <DEDENT> continue <NEW_LINE> <DEDENT> if line.startswith("#") or line.startswith('FileVersion'): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> module.append(line) <NEW_LINE> <DEDENT> <DEDENT> parsed_modules = collections.defaultdict(list) <NEW_LINE> for module in modules: <NEW_LINE> <INDENT> parsed_module = {} <NEW_LINE> module_type = module.pop(0).strip(' \t\r\n') <NEW_LINE> for line in module: <NEW_LINE> <INDENT> parts = [v.strip() for v in line.split(None, 1)] <NEW_LINE> attr_name = parts.pop(0) <NEW_LINE> attr_value = parts[0] if parts else "" <NEW_LINE> parsed_module[attr_name] = attr_value <NEW_LINE> <DEDENT> parsed_modules[module_type].append(parsed_module) <NEW_LINE> <DEDENT> return dict(parsed_modules) | TODO(LWS): I hope we can get rid of this ASAP if/when xgen provides an api to read .xgen files
Crude .xgen file parser for reading palette files.
Return a dictionary where the key is the module type, and the value is a list of modules of that
type. Each module is a dictionary of data, where the key is the property/attribute name, and the
value is the raw value of the property.
example input file content:
Palette
name robert_xgen_coll
parent
xgDataPath /test/robert/collections/robert_xgen_coll
xgProjectPath /test/shot_100/lighting/
xgDogTag
endAttrs
example output:
{"Palette": [
{
'name': 'robert_xgen_coll',
'parent': '',
'xgDataPath': '/test/robert/collections/robert_xgen_coll',
'xgDogTag': '',
'xgProjectPath': '/test/shot_100/lighting/',
'endAttrs': '',
}
]} | 625941ba85dfad0860c3aced |
def _wii_b(self, data, xx): <NEW_LINE> <INDENT> address, f, b = data <NEW_LINE> self.b = b and True or False | Callback method that registers the B button | 625941ba507cdc57c6306b67 |
def file_read_symlink(self, path): <NEW_LINE> <INDENT> return self.request( "file-read-symlink", { 'path': [ path, 'path', [ basestring, 'None' ], False ], }, { 'symlink': [ basestring, False ], } ) | Read the contents of a symlink.
:param path: Path of the symlink file to read.
The value is expected to begin with /vol/<volumename>. | 625941ba63b5f9789fde6f79 |
def _init_sample_imgs_inputs(self): <NEW_LINE> <INDENT> self.sample_row_n = FLAGS.batch_size // FLAGS.sample_col_n <NEW_LINE> self.sample_style_ids = np.repeat(np.arange(0, FLAGS.style_ids_n), self.char_embedding_n)[:FLAGS.batch_size] <NEW_LINE> self.sample_char_ids = np.tile(np.arange(0, self.char_embedding_n), FLAGS.style_ids_n)[:FLAGS.batch_size] | Initialize inputs for generating sample images
Sample images are generated once every FLAGS.sample_imgs_interval times.
These' inputs are given by this method. | 625941ba293b9510aa2c312c |
def handle_lut(self, pkt): <NEW_LINE> <INDENT> self.logger.debug("handle lut") <NEW_LINE> if pkt.subunit & COMMAND: <NEW_LINE> <INDENT> data_type = str(pkt.nbytes / 2) + 'h' <NEW_LINE> line = pkt.datain.read(pkt.nbytes) <NEW_LINE> n = len(line) <NEW_LINE> if (n < pkt.nbytes): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> x = struct.unpack(data_type, line) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> self.logger.error("Error unpacking struct: %s" % (str(e))) <NEW_LINE> return <NEW_LINE> <DEDENT> if len(x) < 14: <NEW_LINE> <INDENT> y = [] <NEW_LINE> for i in range(14): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> y.append(x[i]) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> y.append(0) <NEW_LINE> <DEDENT> <DEDENT> x = y <NEW_LINE> del(y) <NEW_LINE> <DEDENT> if len(x) == 14: <NEW_LINE> <INDENT> z = int(x[0]) <NEW_LINE> self.frame = self.decode_frameno(z) - 1 <NEW_LINE> if (self.frame > MAX_FRAMES): <NEW_LINE> <INDENT> self.logger.error("attempt to select non existing frame.") <NEW_LINE> return <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.server.controller.get_frame(self.frame) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> self.server.controller.init_frame(self.frame) <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> self.logger.error("unable to select a frame.") <NEW_LINE> return <NEW_LINE> <DEDENT> self.logger.error("what shall I do?") | This part of the protocol is used by IRAF to set the frame number.
| 625941ba236d856c2ad44672 |
def rob(self, root): <NEW_LINE> <INDENT> return max(self.find_max(root)) | :type root: TreeNode
:rtype: int | 625941ba8a349b6b435e8008 |
def activation(self, x, w, b): <NEW_LINE> <INDENT> if self.activation_fnc in [0, 'logistic']: <NEW_LINE> <INDENT> return self.logistic(self.processInput(x,w.transpose(),b)) <NEW_LINE> <DEDENT> elif self.activation_fnc in [8, 'sigmoid']: <NEW_LINE> <INDENT> return self.sigmoid(self.processInput(x,w.transpose(),b)) <NEW_LINE> <DEDENT> elif self.activation_fnc in [1, 'linear']: <NEW_LINE> <INDENT> return self.linear(self.processInput(x, w.transpose(), b)) <NEW_LINE> <DEDENT> elif self.activation_fnc in [2, 'relu']: <NEW_LINE> <INDENT> return self.relu(self.processInput(x,w.transpose(),b)) <NEW_LINE> <DEDENT> elif self.activation_fnc in [3, 'tanH']: <NEW_LINE> <INDENT> return self.tanH(self.processInput(x,w.transpose(),b)) <NEW_LINE> <DEDENT> elif self.activation_fnc in [4, 'relu']: <NEW_LINE> <INDENT> return self.relu(self.processInput(x,w.transpose(),b)) <NEW_LINE> <DEDENT> elif self.activation_fnc in [5, 'softplus']: <NEW_LINE> <INDENT> return self.softplus(self.processInput(x,w.transpose(),b)) <NEW_LINE> <DEDENT> elif self.activation_fnc in [6, 'arctan']: <NEW_LINE> <INDENT> return self.garctan(self.processInput(x,w.transpose(),b)) <NEW_LINE> <DEDENT> elif self.activation_fnc in [7, 'perceptron']: <NEW_LINE> <INDENT> if self.verbose > 1: <NEW_LINE> <INDENT> print('perception:') <NEW_LINE> print('activation function {}'.format(self.activation_fnc)) <NEW_LINE> <DEDENT> return self.perceptron(self.processInput(x,w.transpose(),b)) | Performs the activation function calculation selected when neuron
was instantiated
{'sigmoid':8, 'linear':1, 'relu':2, 'tanH':3, 'softplus':5, 'arctan':6, 'perceptron':7, 'logistic':0,}
:param x: input value numpy array from inputing neurons
:param w: weights on inputs
:return: | 625941bab545ff76a8913cb2 |
def escape_nl(msg): <NEW_LINE> <INDENT> if msg != '' and msg[-1] == "\n": <NEW_LINE> <INDENT> return msg[:-1] + "\\n" <NEW_LINE> <DEDENT> return msg | It's nice to know if we actually sent a complete line ending in
, so escape it for display.
| 625941ba01c39578d7e74cd7 |
def marvinPrinciple(row, asList=None, asStr=None): <NEW_LINE> <INDENT> if row.intersection(['principle', 'princip', 'principer']): <NEW_LINE> <INDENT> principles = getString("principle") <NEW_LINE> key = row.intersection(list(principles.keys())) <NEW_LINE> if key: <NEW_LINE> <INDENT> return principles[key.pop()] <NEW_LINE> <DEDENT> return principles[random.choice(list(principles.keys()))] | Display one selected software principle, or provide one as random | 625941ba1b99ca400220a945 |
def test_uboot(self): <NEW_LINE> <INDENT> uboot_url = ('https://acc.dl.osdn.jp/users/23/23888/u-boot.bin.gz') <NEW_LINE> uboot_hash = '9b78dbd43b40b2526848c0b1ce9de02c24f4dcdb' <NEW_LINE> uboot_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash) <NEW_LINE> uboot_path = archive.uncompress(uboot_path, self.workdir) <NEW_LINE> self.vm.set_console() <NEW_LINE> self.vm.add_args('-bios', uboot_path, '-no-reboot') <NEW_LINE> self.vm.launch() <NEW_LINE> uboot_version = 'U-Boot 2016.05-rc3-23705-ga1ef3c71cb-dirty' <NEW_LINE> wait_for_console_pattern(self, uboot_version) <NEW_LINE> gcc_version = 'rx-unknown-linux-gcc (GCC) 9.0.0 20181105 (experimental)' | U-Boot and checks that the console is operational.
:avocado: tags=arch:rx
:avocado: tags=machine:gdbsim-r5f562n8
:avocado: tags=endian:little | 625941ba566aa707497f440d |
def test_leading_unknown_messge(): <NEW_LINE> <INDENT> rawmessage = bytearray([0x02, 0x00, 0x15, 0x02, 0x50, 0x46, 0xd0, 0xe6, 0x43, 0x6c, 0x15, 0x40, 0x11, 0x01]) <NEW_LINE> msg, buffer = insteonplm.messages.create(rawmessage) <NEW_LINE> assert isinstance(msg, StandardReceive) <NEW_LINE> assert msg.cmd1 == 0x11 <NEW_LINE> assert msg.cmd2 == 0x01 | Test leading unknown messge. | 625941ba8e71fb1e9831d641 |
def xls(self, filename): <NEW_LINE> <INDENT> name = os.path.basename(filename).split('.')[0] + '.pdf' <NEW_LINE> exportfile = os.path.join(self._export_folder, name) <NEW_LINE> xlApp = DispatchEx("Excel.Application") <NEW_LINE> xlApp.Visible = False <NEW_LINE> xlApp.DisplayAlerts = 0 <NEW_LINE> books = xlApp.Workbooks.Open(filename,False) <NEW_LINE> books.ExportAsFixedFormat(0, exportfile) <NEW_LINE> books.Close(False) <NEW_LINE> print('保存 PDF 文件:', exportfile) <NEW_LINE> xlApp.Quit() | xls 和 xlsx 文件转换 | 625941bacc40096d615957e7 |
def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> self.failfast = kwargs.get("failfast") <NEW_LINE> self.verbosity = int(kwargs.get("verbosity")) <NEW_LINE> if not os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS',""): <NEW_LINE> <INDENT> os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = "localhost:9000-9999" <NEW_LINE> <DEDENT> return super(KALiteTestRunner, self).__init__(*args, **kwargs) | Force setting up live server test. Adding to kwargs doesn't work, need to go to env.
Dependent on how Django works here. | 625941ba96565a6dacc8f569 |
def compute_topk_scores_and_seq(sequences, scores, scores_to_gather, flags, beam_dim, prefix="default"): <NEW_LINE> <INDENT> unused_batch_dim, old_beam_dim, unused_length_dim = sequences.shape.dims <NEW_LINE> _, topk_indices = mtf.top_k(scores, old_beam_dim, k_dim=beam_dim) <NEW_LINE> selector = mtf.one_hot(topk_indices, old_beam_dim, dtype=tf.float32) <NEW_LINE> def gather(tensor, name): <NEW_LINE> <INDENT> with tf.name_scope(prefix + name): <NEW_LINE> <INDENT> output_shape = mtf.Shape( [beam_dim if d == old_beam_dim else d for d in tensor.shape.dims]) <NEW_LINE> return mtf.gather( tensor, topk_indices, old_beam_dim, output_shape=output_shape) <NEW_LINE> <DEDENT> <DEDENT> topk_seq = gather(sequences, "_seq") <NEW_LINE> topk_flags = gather(flags, "_flags") <NEW_LINE> topk_gathered_scores = gather(scores_to_gather, "_scores") <NEW_LINE> return topk_seq, topk_gathered_scores, topk_flags, selector | Given sequences and scores, will gather the top k=beam size sequences.
This function is used to grow alive, and finished. It takes sequences,
scores, and flags, and returns the top k from sequences, scores_to_gather,
and flags based on the values in scores.
This method permits easy introspection using tfdbg. It adds two named ops
that are prefixed by `prefix`:
- _topk_seq: the tensor for topk_seq returned by this method.
- _topk_flags: the tensor for topk_finished_flags returned by this method.
Args:
sequences: Tensor of sequences that we need to gather from.
[batch_size, beam_size, seq_length]
scores: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will use these to compute the topk.
scores_to_gather: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will return the gathered scores from here.
Scores to gather is different from scores because for grow_alive, we will
need to return log_probs, while for grow_finished, we will need to return
the length penalized scores.
flags: Tensor of bools for sequences that say whether a sequence has reached
EOS or not
beam_dim: mtf.Dimension
prefix: an optional string
Returns:
Tuple of
(topk_seq [batch_size, beam_size, decode_length],
topk_gathered_scores [batch_size, beam_size],
topk_finished_flags[batch_size, beam_size],
selector) | 625941ba63f4b57ef0000fb5 |
def Get(self, request, global_params=None): <NEW_LINE> <INDENT> config = self.GetMethodConfig('Get') <NEW_LINE> return self._RunMethod( config, request, global_params=global_params) | Retrieves the settings that control the specified consumer project's usage.
of the service.
Args:
request: (ServicemanagementServicesProjectSettingsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ProjectSettings) The response message.
| 625941bad10714528d5ffb74 |
def quotient_char_p(I, p): <NEW_LINE> <INDENT> if not I.is_integral(): <NEW_LINE> <INDENT> raise ValueError("I must be an integral ideal.") <NEW_LINE> <DEDENT> K = I.number_field() <NEW_LINE> OK = K.maximal_order() <NEW_LINE> M_OK = OK.free_module() <NEW_LINE> M_I = I.free_module() <NEW_LINE> B_I = M_I.basis() <NEW_LINE> M_OK_mat = M_OK.basis_matrix() <NEW_LINE> M_OK_change = M_OK_mat**(-1) <NEW_LINE> B_I_in_terms_of_M = M_I.basis_matrix() * M_OK_change <NEW_LINE> n = K.absolute_degree() <NEW_LINE> k = FiniteField(p) <NEW_LINE> M_OK_modp = k**n <NEW_LINE> B_mod = B_I_in_terms_of_M.change_ring(k) <NEW_LINE> M_I_modp = M_OK_modp.span(B_mod.row_space()) <NEW_LINE> Q = M_OK_modp.quotient(M_I_modp) <NEW_LINE> K_to_Q = QuotientMap(K, M_OK_change, Q, I) <NEW_LINE> Q_to_OK = LiftMap(OK, M_OK_mat, Q, I) <NEW_LINE> return Q, K_to_Q, Q_to_OK | Given an integral ideal `I` that contains a prime number `p`, compute
a vector space `V = (O_K \mod p) / (I \mod p)`, along with a
homomorphism `O_K \to V` and a section `V \to O_K`.
EXAMPLES::
sage: from sage.rings.number_field.number_field_ideal import quotient_char_p
sage: K.<i> = NumberField(x^2 + 1); O = K.maximal_order(); I = K.fractional_ideal(15)
sage: quotient_char_p(I, 5)[0]
Vector space quotient V/W of dimension 2 over Finite Field of size 5 where
V: Vector space of dimension 2 over Finite Field of size 5
W: Vector space of degree 2 and dimension 0 over Finite Field of size 5
Basis matrix:
[]
sage: quotient_char_p(I, 3)[0]
Vector space quotient V/W of dimension 2 over Finite Field of size 3 where
V: Vector space of dimension 2 over Finite Field of size 3
W: Vector space of degree 2 and dimension 0 over Finite Field of size 3
Basis matrix:
[]
sage: I = K.factor(13)[0][0]; I
Fractional ideal (-3*i - 2)
sage: I.residue_class_degree()
1
sage: quotient_char_p(I, 13)[0]
Vector space quotient V/W of dimension 1 over Finite Field of size 13 where
V: Vector space of dimension 2 over Finite Field of size 13
W: Vector space of degree 2 and dimension 1 over Finite Field of size 13
Basis matrix:
[1 8] | 625941bac4546d3d9de728c5 |
def GetJoinBlob(self): <NEW_LINE> <INDENT> l_request = skypekit.XCallRequest("ZR\022\030", 18, 24) <NEW_LINE> l_request.add_parm('O', 0, self) <NEW_LINE> l_response = self.transport.xcall(l_request) <NEW_LINE> l_result = l_response.get(1, '') <NEW_LINE> return l_result | Retrieves a binary join blob for joining public conversations, which are always of type CONFERENCE. If called for a dialog, the blob argument will contain the empty string. The best way to create a Public Chat is to first create a fresh conversation with Skype class CreateConference, then minimally apply the public chat options OPT_JOINING_ENABLED and OPT_ENTRY_LEVEL_RANK - options, like this (C++):
::
C->SetOption(Conversation::OPT_JOINING_ENABLED, true);
When that is done, you can call GetJoinBlob to retrieve the blob string. Use the blob string to generate and post an HTML link whose href attribute looks like this: href="skype:?chat&blob=_BLOB_GOES_HERE" A person running Skype desktop client can click this link to join the conversation and have that conversation opened in his UI. Note that the conversation host (creator) needs to be online for new joiners-via-link to start participating in the Public Chat.
B{Return values:}
- B{blob} - Returns the public conversation join blob. | 625941bae64d504609d746d5 |
def decode(self, emission, mask): <NEW_LINE> <INDENT> emission_shape = emission.size() <NEW_LINE> mask = mask.unsqueeze(dim=1) <NEW_LINE> mask = mask.repeat(1, emission_shape[1], 1) <NEW_LINE> mask = mask.reshape([-1, mask.size(2)]) <NEW_LINE> emission = emission.reshape([-1, emission_shape[2], emission.size(3)]) <NEW_LINE> result = self.crf.decode(emission, mask) <NEW_LINE> result = result.reshape([-1, emission_shape[1], mask.size(1)]) <NEW_LINE> result = result.tolist() <NEW_LINE> return result | emission: B T L F | 625941ba090684286d50eb75 |
def __getattr__(self, key): <NEW_LINE> <INDENT> return self.get(key) | Get the environment variable. | 625941ba92d797404e30401d |
def to_knx(self, value: Sequence[int]) -> DPTArray: <NEW_LINE> <INDENT> if not isinstance(value, (list, tuple)): <NEW_LINE> <INDENT> raise ConversionError( "Could not serialize RemoteValueColorRGBW (wrong type, expecting list of 4-6 bytes))", value=value, type=type(value), ) <NEW_LINE> <DEDENT> if len(value) < 4 or len(value) > 6: <NEW_LINE> <INDENT> raise ConversionError( "Could not serialize value to DPT 251.600 (wrong length, expecting list of 4-6 bytes)", value=value, type=type(value), ) <NEW_LINE> <DEDENT> rgbw = value[:4] <NEW_LINE> if ( any(not isinstance(color, int) for color in rgbw) or any(color < 0 for color in rgbw) or any(color > 255 for color in rgbw) ): <NEW_LINE> <INDENT> raise ConversionError( "Could not serialize DPT 251.600 (wrong RGBW values)", value=value ) <NEW_LINE> <DEDENT> if len(value) < 5: <NEW_LINE> <INDENT> return DPTArray(list(rgbw) + [0x00, 0x0F]) <NEW_LINE> <DEDENT> if len(value) < 6: <NEW_LINE> <INDENT> return DPTArray(list(rgbw) + [0x00] + list(value[4:])) <NEW_LINE> <DEDENT> return DPTArray(value) | Convert value (4-6 bytes) to payload (6 bytes).
* Structure of DPT 251.600
** Byte 0: R value
** Byte 1: G value
** Byte 2: B value
** Byte 3: W value
** Byte 4: 0x00 (reserved)
** Byte 5:
*** Bit 0: W value valid?
*** Bit 1: B value valid?
*** Bit 2: G value valid?
*** Bit 3: R value valid?
*** Bit 4-7: 0
In case we receive
* > 6 bytes: error
* 6 bytes: all bytes are passed through
* 5 bytes: 0x00?? fill up to 6 bytes
* 4 bytes: 0x000f right padding to 6 bytes
* < 4 bytes: error | 625941ba377c676e9127203e |
def __len__(self): <NEW_LINE> <INDENT> return len(self.list) | Return the size of the OrderedSet
| 625941ba0fa83653e4656e51 |
def new_url(**kwargs): <NEW_LINE> <INDENT> url_base = "/axapi/v3/debug/tcp" <NEW_LINE> f_dict = {} <NEW_LINE> return url_base.format(**f_dict) | Return the URL for creating a resource | 625941ba44b2445a33931f34 |
def __init__(self): <NEW_LINE> <INDENT> super().__init__("rollback") <NEW_LINE> self.snap = FormattedParameter("--snap={}") <NEW_LINE> self.epc = FormattedParameter("--epc={}") | Create a daos container rollback command object. | 625941ba60cbc95b062c63dd |
def keypoints_heatmap(kpts, heat_size, stride, var): <NEW_LINE> <INDENT> kpts = kpts.reshape(-1, 14, 3) <NEW_LINE> heat_one_img = [] <NEW_LINE> for kpt in kpts: <NEW_LINE> <INDENT> heat_one_joint = [] <NEW_LINE> for joint in kpt: <NEW_LINE> <INDENT> x, y, v = joint <NEW_LINE> x, y = (x//stride, y//stride) <NEW_LINE> if v < 1.5: <NEW_LINE> <INDENT> heat = heatmap_one_kpt(x, y, heat_size, var) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> heat = np.zeros(heat_size, dtype=np.float32) <NEW_LINE> <DEDENT> heat_one_joint.append(heat) <NEW_LINE> <DEDENT> heat_one_joint = np.stack(heat_one_joint, axis=0) <NEW_LINE> heat_one_img.append(heat_one_joint) <NEW_LINE> <DEDENT> heat_one_img = np.stack(heat_one_img, axis=0) <NEW_LINE> heat_one_img = np.amax(heat_one_img, axis=0) <NEW_LINE> return heat_one_img | Generate heatmap of all keypoints, shape [num_kpts, h, w].
Params:
kpts: keypoints coordinates of all human in one img, [num_human*14, 3]
heat_size: feature map size of network output
stirde: downsample ratio
var: Gaussian variance
Returns:
heatmap of one img, [14, h, w] | 625941baa17c0f6771cbdee8 |
@njit('(float32[:,:,:], float32[:,:,:], int32[:,:], int32)') <NEW_LINE> def steps2D(p, dP, inds, niter): <NEW_LINE> <INDENT> shape = p.shape[1:] <NEW_LINE> for t in range(niter): <NEW_LINE> <INDENT> for j in range(inds.shape[0]): <NEW_LINE> <INDENT> y = inds[j,0] <NEW_LINE> x = inds[j,1] <NEW_LINE> p0, p1 = int(p[0,y,x]), int(p[1,y,x]) <NEW_LINE> p[0,y,x] = min(shape[0]-1, max(0, p[0,y,x] - dP[0,p0,p1])) <NEW_LINE> p[1,y,x] = min(shape[1]-1, max(0, p[1,y,x] - dP[1,p0,p1])) <NEW_LINE> <DEDENT> <DEDENT> return p | run dynamics of pixels to recover masks in 2D
Euler integration of dynamics dP for niter steps
Parameters
----------------
p: float32, 3D array
pixel locations [axis x Ly x Lx] (start at initial meshgrid)
dP: float32, 3D array
flows [axis x Ly x Lx]
inds: int32, 2D array
non-zero pixels to run dynamics on [npixels x 2]
niter: int32
number of iterations of dynamics to run
Returns
---------------
p: float32, 3D array
final locations of each pixel after dynamics | 625941ba6aa9bd52df036c37 |
def _break_dictionary(data): <NEW_LINE> <INDENT> if not isinstance(data, dict): <NEW_LINE> <INDENT> raise ParameterValueFormatError( f"expected data to be in dictionary format, instead got '{type(data).__name__}'" ) <NEW_LINE> <DEDENT> indexes, values = zip(*data.items()) <NEW_LINE> return list(indexes), np.array(values) | Converts {"index": value} style dictionary into (list(indexes), numpy.ndarray(values)) tuple. | 625941ba851cf427c661a3a6 |
def __init__(self, api, area): <NEW_LINE> <INDENT> self._area_id = area['id'] <NEW_LINE> self._name = area['name'] <NEW_LINE> self._state = _get_alarm_state(area['mode']) <NEW_LINE> if self._state == STATE_ALARM_DISARMED: <NEW_LINE> <INDENT> self._changed_by = area.get('last_unset_user_name', 'unknown') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._changed_by = area.get('last_set_user_name', 'unknown') <NEW_LINE> <DEDENT> self._api = api | Initialize the SPC alarm panel. | 625941bab5575c28eb68de92 |
def decomposed_energy(context, parm, NRG_UNIT=u.kilocalories_per_mole): <NEW_LINE> <INDENT> energies = {} <NEW_LINE> s = context.getState(getEnergy=True, enforcePeriodicBox=parm.ptr('ifbox')>0, groups=2**parm.BOND_FORCE_GROUP) <NEW_LINE> energies['bond'] = s.getPotentialEnergy().value_in_unit(NRG_UNIT) <NEW_LINE> s = context.getState(getEnergy=True, enforcePeriodicBox=parm.ptr('ifbox')>0, groups=2**parm.ANGLE_FORCE_GROUP) <NEW_LINE> energies['angle'] = s.getPotentialEnergy().value_in_unit(NRG_UNIT) <NEW_LINE> s = context.getState(getEnergy=True, enforcePeriodicBox=parm.ptr('ifbox')>0, groups=2**parm.DIHEDRAL_FORCE_GROUP) <NEW_LINE> energies['dihedral'] = s.getPotentialEnergy().value_in_unit(NRG_UNIT) <NEW_LINE> s = context.getState(getEnergy=True, enforcePeriodicBox=parm.ptr('ifbox')>0, groups=2**parm.NONBONDED_FORCE_GROUP) <NEW_LINE> energies['nonbond'] = s.getPotentialEnergy().value_in_unit(NRG_UNIT) <NEW_LINE> if isinstance(parm, ChamberParm): <NEW_LINE> <INDENT> s = context.getState(getEnergy=True, enforcePeriodicBox=parm.ptr('ifbox')>0, groups=2**parm.UREY_BRADLEY_FORCE_GROUP) <NEW_LINE> energies['urey'] = s.getPotentialEnergy().value_in_unit(NRG_UNIT) <NEW_LINE> s = context.getState(getEnergy=True, enforcePeriodicBox=parm.ptr('ifbox')>0, groups=2**parm.IMPROPER_FORCE_GROUP) <NEW_LINE> energies['improper']=s.getPotentialEnergy().value_in_unit(NRG_UNIT) <NEW_LINE> s = context.getState(getEnergy=True, enforcePeriodicBox=parm.ptr('ifbox')>0, groups=2**parm.CMAP_FORCE_GROUP) <NEW_LINE> energies['cmap'] = s.getPotentialEnergy().value_in_unit(NRG_UNIT) <NEW_LINE> <DEDENT> return energies | Gets a decomposed energy for a given system | 625941ba50485f2cf553cc2d |
def _check_access_granted(self,input_file): <NEW_LINE> <INDENT> _,found_ext=os.path.splitext(input_file) <NEW_LINE> if found_ext != '.nxs': <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if not h5py_installed: <NEW_LINE> <INDENT> self.reducer.prop_man.log ('*** Can not verify if file is accessible. Install h5py to be able to check file access in waiting mode', 'notice') <NEW_LINE> return <NEW_LINE> <DEDENT> ic=0 <NEW_LINE> try: <NEW_LINE> <INDENT> f = h5py.File(input_file,'r') <NEW_LINE> ok = True <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> ok = False <NEW_LINE> while not ok: <NEW_LINE> <INDENT> self.reducer.prop_man.log ('*** File found but access can not be gained. Waiting for 10 sec','notice') <NEW_LINE> time.sleep(10) <NEW_LINE> ic = ic+1 <NEW_LINE> try: <NEW_LINE> <INDENT> f = h5py.File(input_file,'r') <NEW_LINE> ok = True <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> ok = False <NEW_LINE> if ic>24: <NEW_LINE> <INDENT> raise IOError ("Can not get read access to input file: "+input_file+" after 4 min of trying") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if ok: <NEW_LINE> <INDENT> f.close() | Check if the access to the found nxs file is granted
Created to fix issue on ISIS archive, when file
is copied through the network for ~2min and become available
2 minutes after it has been found. | 625941bad7e4931a7ee9ddb0 |
def welcome(self): <NEW_LINE> <INDENT> self.set_rgb(0x00, 0x40, 0x00) <NEW_LINE> time.sleep(0.5) <NEW_LINE> self.set_rgb(0x00, 0x00, 0x00) <NEW_LINE> time.sleep(0.2) <NEW_LINE> self.set_rgb(0x00, 0x40, 0x00) <NEW_LINE> time.sleep(0.5) <NEW_LINE> self.set_rgb(0x00, 0x00, 0x00) | Some visual feedback that the device is operational. | 625941ba30bbd722463cbc57 |
def reload_napp(self, username, napp_name): <NEW_LINE> <INDENT> self.unload_napp(username, napp_name) <NEW_LINE> try: <NEW_LINE> <INDENT> self.reload_napp_module(username, napp_name, 'settings') <NEW_LINE> self.reload_napp_module(username, napp_name, 'main') <NEW_LINE> <DEDENT> except (ModuleNotFoundError, ImportError): <NEW_LINE> <INDENT> return 400 <NEW_LINE> <DEDENT> self.log.info("NApp '%s/%s' successfully reloaded", username, napp_name) <NEW_LINE> self.load_napp(username, napp_name) <NEW_LINE> return 200 | Reload a NApp. | 625941ba046cf37aa974cbdf |
def getSuccessors(self, state): <NEW_LINE> <INDENT> successors = [] <NEW_LINE> for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]: <NEW_LINE> <INDENT> x,y = state[0] <NEW_LINE> dx, dy = Actions.directionToVector(action) <NEW_LINE> nextx, nexty = int(x + dx), int(y + dy) <NEW_LINE> nextState = (nextx, nexty) <NEW_LINE> hitsWall = self.walls[nextx][nexty] <NEW_LINE> if not hitsWall: <NEW_LINE> <INDENT> newTuple = None <NEW_LINE> if nextState in self.corners: <NEW_LINE> <INDENT> print("in big block") <NEW_LINE> if nextState == self.corners[0]: <NEW_LINE> <INDENT> newTuple = (nextState, action, 1, True, state[4], state[5], state[6]) <NEW_LINE> <DEDENT> elif nextState == self.corners[1]: <NEW_LINE> <INDENT> newTuple = (nextState, action, 1, state[3], True, state[5], state[6]) <NEW_LINE> <DEDENT> elif nextState == self.corners[2]: <NEW_LINE> <INDENT> newTuple = (nextState, action, 1, state[3], state[4], True, state[6]) <NEW_LINE> <DEDENT> elif nextState == self.corners[3]: <NEW_LINE> <INDENT> newTuple = (nextState, action, 1, state[3], state[4], state[5], True) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print("in small block") <NEW_LINE> newTuple = (nextState, action, 1, state[3], state[4], state[5], state[6]) <NEW_LINE> <DEDENT> print(newTuple) <NEW_LINE> successors.append(newTuple) <NEW_LINE> <DEDENT> <DEDENT> self._expanded += 1 <NEW_LINE> return successors | Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost'
is the incremental cost of expanding to that successor | 625941ba38b623060ff0ac83 |
def sendGreeting(self, destination): <NEW_LINE> <INDENT> self.time_greeting_sent = time.time() <NEW_LINE> spore = Spore() <NEW_LINE> spore.dataType = spore.OURP <NEW_LINE> spore.castType = spore.BROADCAST <NEW_LINE> spore.hash = self._generateHash() <NEW_LINE> spore.ourpData.type = OurpData.GREETING <NEW_LINE> spore.ourpData.ipAddress = self.potator.config['IP_ADDRESS'] <NEW_LINE> spore.ourpData.onionUrl = self.potator.server.tor_launcher.port.getHost( ).onion_uri <NEW_LINE> if self.potator.config.get('NETWORK_PASSWORD'): <NEW_LINE> <INDENT> payload = { 'password': self.potator.config.get('NETWORK_PASSWORD') } <NEW_LINE> spore.ourpData.payload = json.dumps(payload) <NEW_LINE> <DEDENT> def looper(): <NEW_LINE> <INDENT> spore.hash = self._generateHash() <NEW_LINE> log.msg('[OURP] GREETING to %s' % destination) <NEW_LINE> self.potator.server.sendSpore( destination, spore.SerializeToString()) <NEW_LINE> self.potator.network_dispatcher.hash_cache.append(spore.hash) <NEW_LINE> <DEDENT> l = task.LoopingCall(looper) <NEW_LINE> l.start(5.0) <NEW_LINE> self.greeting_loop = l | Sends an OURP greeting to destination
:param str destination: The onion URL to send the greeting to
The OURP greeting packet is crafted and sent to the onion URL by calling :func:`potator.tor.server.Server.sendSpore`.
The OURP greeting is sent in a loop until Potator receives an OURP greeting acknowledge. | 625941ba7d847024c06be155 |
def calculate_percent_of_parent(self): <NEW_LINE> <INDENT> if self.__f_parent_timer is None: <NEW_LINE> <INDENT> return "N/A" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self.__f_parent_timer.__f_elapsed is not 0: <NEW_LINE> <INDENT> return str(round((self.__f_elapsed / self.__f_parent_timer.__f_elapsed) * 100, 3)) + "%" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return "N/A" | Calculate what percentage of the parent method's run time was spent on this timer's method | 625941ba55399d3f05588548 |
def pad5(ar,N,axis=3,mode='mirror',c=0.0): <NEW_LINE> <INDENT> ar = make_nd(ar,5) <NEW_LINE> if N == 0: <NEW_LINE> <INDENT> return ar <NEW_LINE> <DEDENT> N1,N2 = T.cast((N/2), 'int32'),T.cast(N-(N/2), 'int32') <NEW_LINE> Nint = T.cast(N, 'int32') <NEW_LINE> if mode == 'mirror': <NEW_LINE> <INDENT> if axis == 1: <NEW_LINE> <INDENT> return T.concatenate([ar[:,Nint:0:-1,:,:,:],ar],axis=1) <NEW_LINE> <DEDENT> if axis == 3: <NEW_LINE> <INDENT> return T.concatenate([ar[:,:,:,N1:0:-1,:],ar,ar[:,:,:,-1:-(N2+1):-1,:]],axis=3) <NEW_LINE> <DEDENT> if axis ==4: <NEW_LINE> <INDENT> return T.concatenate([ar[:,:,:,:,N1:0:-1],ar,ar[:,:,:,:,-1:-(N2+1):-1]],axis=4) <NEW_LINE> <DEDENT> <DEDENT> if mode == 'border': <NEW_LINE> <INDENT> if axis == 1: <NEW_LINE> <INDENT> return T.concatenate([ar[:,:1,:,:,:]]*Nint+[ar],axis=1) <NEW_LINE> <DEDENT> if axis == 3: <NEW_LINE> <INDENT> return T.concatenate([ar[:,:,:,:1,:]]*N1+[ar]+[ar[:,:,:,-1:,:]]*N2,axis=3) <NEW_LINE> <DEDENT> if axis ==4: <NEW_LINE> <INDENT> return T.concatenate([ar[:,:,:,:,:1]]*N1+[ar]+[ar[:,:,:,:,-1:]]*N2,axis=4) <NEW_LINE> <DEDENT> <DEDENT> if mode == 'const': <NEW_LINE> <INDENT> if axis == 1: <NEW_LINE> <INDENT> return T.concatenate([c*T.ones_like(ar[:,Nint:0:-1,:,:,:]),ar],axis=1) <NEW_LINE> <DEDENT> if axis == 3: <NEW_LINE> <INDENT> return T.concatenate([c*T.ones_like(ar[:,:,:,N1:0:-1,:]),ar,c*T.ones_like(ar[:,:,:,-1:-(N2+1):-1,:])],axis=3) <NEW_LINE> <DEDENT> if axis ==4: <NEW_LINE> <INDENT> return T.concatenate([c*T.ones_like(ar[:,:,:,:,N1:0:-1]),ar,c*T.ones_like(ar[:,:,:,:,-1:-(N2+1):-1])],axis=4) | Padds a 5 dimensional tensor with `N` additional values.
If the tensor has less than 5 dimensions, it will be extended.
Returns a 5 dimensional tensor.
Axis 0 and 2 are ignored.
Usage:
pad5 padds one axis at a time with one of the following modes:
mode = 'mirror' (default)
the image is mirrored at the edges, such that image statistics are similar
mode = 'border'
the border pixels are repeated
mode = 'const'
the padded area is filled with a single value (default 0.0)
It can also be a theano varible such as the mean of the tensor that is to be padded.
For axis 1 (time), the padding happens exclusively at the front,
for axes 3 and 4 (x and y) the amount is split into `N/2` and `N-(N/2)` (this can be asymmetric!).
The total amount padded is always `N`.
For convenience, `pad5_txy` can pad time, x and y with the same mode simultaneously.
`pad3` and `pad2` return 3 tensors and matrices after padding. | 625941ba1f037a2d8b946093 |
def __get_in_progress_order_or_create(self, client): <NEW_LINE> <INDENT> in_progress_order = self.filter( client=client, status=OrderChoices.IN_PROGRESS) <NEW_LINE> if in_progress_order.count(): <NEW_LINE> <INDENT> return in_progress_order.get() <NEW_LINE> <DEDENT> return self.create(client=client) | Get the client's in progress order or create a new one | 625941bafff4ab517eb2f2ce |
def OnTreeSelectionChange(self, event): <NEW_LINE> <INDENT> hosts = self.getHostsFromTreeByEvent(event) <NEW_LINE> self.current_tree_hosts = hosts <NEW_LINE> self.updateBtnStatus(hosts) <NEW_LINE> if not hosts or (hosts not in self.hostses and hosts not in self.origin_hostses and hosts not in self.common_hostses): <NEW_LINE> <INDENT> return event.Veto() <NEW_LINE> <DEDENT> if hosts and hosts != self.current_showing_hosts: <NEW_LINE> <INDENT> if hosts.is_origin: <NEW_LINE> <INDENT> hosts.getContent() <NEW_LINE> <DEDENT> self.showHosts(hosts) | 当点击左边树状结构的节点的时候触发 | 625941ba9c8ee82313fbb609 |
def set_clear_color(color='black', alpha=None): <NEW_LINE> <INDENT> glir = get_current_glir_queue() <NEW_LINE> glir.command('FUNC', 'glClearColor', *Color(color, alpha).rgba) | Set the screen clear color
This is a wrapper for gl.glClearColor.
Parameters
----------
color : str | tuple | instance of Color
Color to use. See vispy.color.Color for options. | 625941ba7b25080760e392ef |
def _new_game_board(self) -> [[Piece]]: <NEW_LINE> <INDENT> board = [] <NEW_LINE> if self._top_left_disc_color == 'B': <NEW_LINE> <INDENT> diagonal1, diagonal2 = BLACK, WHITE <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> diagonal1, diagonal2 = WHITE, BLACK <NEW_LINE> <DEDENT> turns = 0 <NEW_LINE> for row in range(self.rows): <NEW_LINE> <INDENT> board.append([]) <NEW_LINE> for col in range(self.columns): <NEW_LINE> <INDENT> condition1 = self.columns / 2 - col <NEW_LINE> condition2 = self.rows / 2 - row <NEW_LINE> if 0 <= condition1 <= 1 and 0 <= condition2 <= 1: <NEW_LINE> <INDENT> if turns == 0 or turns == 3: <NEW_LINE> <INDENT> board[-1].append(Piece(diagonal1, (row, col))) <NEW_LINE> <DEDENT> elif turns == 1 or turns == 2: <NEW_LINE> <INDENT> board[-1].append(Piece(diagonal2, (row, col))) <NEW_LINE> <DEDENT> turns += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> board[-1].append(Piece(NONE, (row, col))) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return board | Create a new game board using list
:return: List of the list of boards and their colors inside | 625941ba3346ee7daa2b2bfe |
def to_bytes(self, data): <NEW_LINE> <INDENT> if isinstance(data, (bool, int, float, complex)): <NEW_LINE> <INDENT> data = str(data) <NEW_LINE> <DEDENT> elif isinstance(data, bytes): <NEW_LINE> <INDENT> return data <NEW_LINE> <DEDENT> elif isinstance(data, bytearray): <NEW_LINE> <INDENT> return bytes(data) <NEW_LINE> <DEDENT> elif isinstance(data, memoryview): <NEW_LINE> <INDENT> return data.tobytes() <NEW_LINE> <DEDENT> encoding = 'utf-8' if self._chinese else self._codepage.name <NEW_LINE> try: <NEW_LINE> <INDENT> return bytes(data, encoding, errors='replace') <NEW_LINE> <DEDENT> except LookupError: <NEW_LINE> <INDENT> encoding = CodePageConverted[self._codepage.name].value <NEW_LINE> return bytes(data, encoding, errors='replace') | Convert data before sending to the printer. | 625941ba8e71fb1e9831d642 |
def _choose_image(self, *args): <NEW_LINE> <INDENT> if self.separator_image: <NEW_LINE> <INDENT> return self.separator_image <NEW_LINE> <DEDENT> if self._main_above: <NEW_LINE> <INDENT> return 'resources/images/navigationdrawer_gradient_rtol.png' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 'resources/images/navigationdrawer_gradient_ltor.png' | Chooses which image to display as the main/side separator, based on
_main_above. | 625941baf9cc0f698b14049a |
def _render_stimulus(self, stimulus): <NEW_LINE> <INDENT> stimulus.draw() <NEW_LINE> self.win.flip() <NEW_LINE> core.wait(STIM_DUR) | render stimuls on the window | 625941ba460517430c394022 |
def describesensor(): <NEW_LINE> <INDENT> pass | Make a request to the SOS, returns an XML document wrapped in a
Python file object. | 625941ba851cf427c661a3a7 |
def create_alien(ai_settings, screen, aliens, alien_number, row_number): <NEW_LINE> <INDENT> alien = Alien(ai_settings, screen) <NEW_LINE> alien_width = alien.rect.width <NEW_LINE> alien_height = alien.rect.height <NEW_LINE> alien.x = alien_width + 2 * alien_width * alien_number <NEW_LINE> alien.rect.x = alien.x <NEW_LINE> alien.rect.y = alien_height + 2 * alien_height * row_number <NEW_LINE> aliens.add(alien) | 创建一个外星人,并将其放在当前行 | 625941ba099cdd3c635f0af1 |
def merge_pdfs(source_folder, output_file, rotate_deg): <NEW_LINE> <INDENT> output = PdfFileWriter() <NEW_LINE> filenames = glob.glob(os.path.join(source_folder, '*.pdf')) <NEW_LINE> for filename in filenames: <NEW_LINE> <INDENT> pdf_reader = PdfFileReader(open(filename, 'rb')) <NEW_LINE> for pagenum in range(pdf_reader.numPages): <NEW_LINE> <INDENT> page = pdf_reader.getPage(pagenum) <NEW_LINE> if rotate_deg is not None: <NEW_LINE> <INDENT> page.rotateClockwise(int(rotate_deg)) <NEW_LINE> <DEDENT> output.addPage(page) <NEW_LINE> <DEDENT> <DEDENT> outputStream = open(output_file, "wb") <NEW_LINE> output.write(outputStream) <NEW_LINE> outputStream.close() | Function for merging multiple pdf files into one with
optional page rotation.
:param source_folder: Folder containing separate pdf files
:param output_file: Merged file
:param rotate_deg: rotation of page (CW in degree)
:return: None | 625941ba50812a4eaa59c1ba |
def test_pickling(self): <NEW_LINE> <INDENT> pc1 = cPickle.loads(cPickle.dumps(self.pc)) <NEW_LINE> self.pwm.pwmscale = 3 <NEW_LINE> pc2 = cPickle.loads(cPickle.dumps(self.pc)) <NEW_LINE> self.assertEqual('mypwm', pc1.peakwidthmodel.type()) <NEW_LINE> self.assertEqual(1.5, pc1.peakwidthmodel.pwmscale) <NEW_LINE> self.assertEqual(1.5, pc1.pwmscale) <NEW_LINE> self.assertEqual('mypwm', pc2.peakwidthmodel.type()) <NEW_LINE> self.assertEqual(3, pc2.peakwidthmodel.pwmscale) <NEW_LINE> self.assertEqual(3, pc2.pwmscale) <NEW_LINE> return | Check pickling of an owned PeakWidthModel instance.
| 625941ba63d6d428bbe44384 |
def _norm_scale(self, arr): <NEW_LINE> <INDENT> arr = arr.astype('float') <NEW_LINE> for i in range(3): <NEW_LINE> <INDENT> minval = arr[...,i].min() <NEW_LINE> maxval = arr[...,i].max() <NEW_LINE> if minval != maxval: <NEW_LINE> <INDENT> arr[...,i] -= minval <NEW_LINE> arr[...,i] *= (255.0/(maxval-minval)) <NEW_LINE> <DEDENT> <DEDENT> return arr | Pixel intensity normalization w/ min/max
http://en.wikipedia.org/wiki/Normalization_%28image_processing%29 | 625941babe383301e01b5321 |
def test_users_id_delete(self): <NEW_LINE> <INDENT> pass | Test case for users_id_delete
Supprimer un utilisateur | 625941ba3346ee7daa2b2bff |
def _fraction_visited(source, sink, waypoint, tprob, for_committors, cond_committors): <NEW_LINE> <INDENT> fraction_visited = (np.float(tprob[source, :].dot(cond_committors)) / np.float(tprob[source, :].dot(for_committors))) <NEW_LINE> return fraction_visited | Calculate the fraction of times a walker on `tprob` going from `sources`
to `sinks` will travel through the set of states `waypoints` en route.
Computes the conditional committors q^{ABC^+} and uses them to find the
fraction of paths mentioned above.
Note that in the notation of Dickson et. al. this computes h_c(A,B), with
sources = A
sinks = B
waypoint = C
Parameters
----------
source : int
The index of the source state
sink : int
The index of the sink state
waypoint : int
The index of the intermediate state
tprob : np.ndarray
Transition matrix
for_committors : np.ndarray
The forward committors for the reaction sources -> sinks
cond_committors : np.ndarray
Conditional committors, i.e. the probability of visiting
a waypoint when on a path between source and sink.
Returns
-------
fraction_visited : float
The fraction of times a walker going from `sources` -> `sinks` stops
by `waypoints` on its way.
See Also
--------
msmbuilder.tpt.conditional_committors
Calculate the probability of visiting a waypoint while on a path
between a source and sink.
msmbuilder.tpt.hub_scores : function
Compute the 'hub score', the weighted fraction of visits for an
entire network.
References
----------
.. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. | 625941ba16aa5153ce36230d |
def rename_item(self, what, object_id, newname, token=None): <NEW_LINE> <INDENT> self._log("rename_item(%s)" % what, object_id=object_id, token=token) <NEW_LINE> obj = self.__get_object(object_id) <NEW_LINE> self.api.rename_item(what, obj, newname) <NEW_LINE> return True | Renames an object specified by object_id to a new name.
:param what: The type of object which shall be renamed to a new name.
:param object_id: The id which refers to the object.
:param newname: The new name for the object.
:param token: The API-token obtained via the login() method.
:return: True if the action succeeded. | 625941ba24f1403a926009fe |
def test_airline_get_passengers_from_terminal(self): <NEW_LINE> <INDENT> self.assertEqual(len(self.airline_dummy.get_passengers_from_terminal(self.terminal_2)), 6) <NEW_LINE> self.assertEqual(len(self.airline_dummy.get_passengers_from_terminal(self.terminal_1)), 0) | Get all the passengers that are flying from a terminal | 625941bab57a9660fec33715 |
def write_char(self, c): <NEW_LINE> <INDENT> if type(c) not in int_types: <NEW_LINE> <INDENT> raise TypeError('expected an int (got:%r)' % (type(c),)) <NEW_LINE> <DEDENT> if not -128 <= c <= 127: <NEW_LINE> <INDENT> raise OverflowError("Not in range, %d" % c) <NEW_LINE> <DEDENT> self.write(struct.pack("b", c)) | Write a C{char} to the stream.
@param c: char
@type c: C{int}
@raise TypeError: Unexpected type for int C{c}.
@raise OverflowError: Not in range. | 625941ba30dc7b76659017ff |
def removeDuplicates(self, nums): <NEW_LINE> <INDENT> if not nums: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> last = 0 <NEW_LINE> duplicate = float("inf") <NEW_LINE> for num in nums[1:]: <NEW_LINE> <INDENT> if nums[last] != num: <NEW_LINE> <INDENT> last += 1 <NEW_LINE> nums[last] = num <NEW_LINE> <DEDENT> elif num != duplicate: <NEW_LINE> <INDENT> last += 1 <NEW_LINE> nums[last] = num <NEW_LINE> duplicate = num <NEW_LINE> <DEDENT> <DEDENT> print(nums[:last + 1]) <NEW_LINE> return last + 1 | :type nums: List[int]
:rtype: int | 625941ba99cbb53fe6792a7c |
def test_lower_the_maximized_window_works(self): <NEW_LINE> <INDENT> calc_win = self.open_new_application_window("Calculator") <NEW_LINE> text_win = self.open_new_application_window("Text Editor", maximized=True) <NEW_LINE> self.assertProperty(text_win, is_focused=True) <NEW_LINE> self.assertProperty(calc_win, is_focused=False) <NEW_LINE> self.mouse.click_object(self.panel.grab_area, button=2) <NEW_LINE> self.assertProperty(calc_win, is_focused=True) | Middle-clicking on the panel grab area must lower a maximized window. | 625941babe7bc26dc91cd49a |
def __init__(self): <NEW_LINE> <INDENT> self.__property_name = None <NEW_LINE> self.__property_type = None | Constructor | 625941ba5fcc89381b1e1559 |
def hidden_frame(func, posargs, kwargs): <NEW_LINE> <INDENT> func(*posargs, **kwargs) | this is just an extra method for new thread so | 625941ba925a0f43d2549d09 |
def test_admin_add_category_empty_name(client, authorize_admin): <NEW_LINE> <INDENT> headers = authorize_admin <NEW_LINE> expected_result = 'please provide a category name' <NEW_LINE> response = client.post('/api/v2/categories', data=json.dumps(CATEGORIES['category6']), headers=headers) <NEW_LINE> data = response.json <NEW_LINE> assert response.status_code == 400 <NEW_LINE> assert data['message'] == expected_result | admin should not be able to add category with empty name | 625941ba2c8b7c6e89b35658 |
def name(): <NEW_LINE> <INDENT> return "rc" | return module name | 625941ba0c0af96317bb807e |
def getLUTPathForFlipbook(self, flipbook, lut): <NEW_LINE> <INDENT> pass | Return the path for the given flipbook and lut. May return an empty string if none registered.
@param flipbook: The unique name of the flipbook
@param lut: The unique name for the LUT, e.g. 'sRGB' and 'rec709' | 625941bab7558d58953c4db0 |
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, V1alpha1Workflow): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.to_dict() == other.to_dict() | Returns true if both objects are equal | 625941bad53ae8145f87a10b |
def safeStr(self, o): <NEW_LINE> <INDENT> if isinstance(o, unicode): <NEW_LINE> <INDENT> return o.encode('UTF-8', 'ignore') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return str(o) | Return a safe string
:param o: the string to be checked | 625941ba4f6381625f1148da |
def test_move(self): <NEW_LINE> <INDENT> start = self.vimiv.get_pos() <NEW_LINE> self.thumb.move_direction("l") <NEW_LINE> self.assertEqual(self.vimiv.get_paths()[start + 1], self.vimiv.get_pos(True)) <NEW_LINE> self.thumb.move_direction("h") <NEW_LINE> self.assertEqual(self.vimiv.get_paths()[start], self.vimiv.get_pos(True)) <NEW_LINE> self.thumb.move_direction("L") <NEW_LINE> self.assertEqual(self.vimiv.get_paths()[-1], self.vimiv.get_pos(True)) <NEW_LINE> self.thumb.move_direction("H") <NEW_LINE> self.assertEqual(self.vimiv.get_paths()[0], self.vimiv.get_pos(True)) <NEW_LINE> self.vimiv["main_window"].scroll("L") <NEW_LINE> self.assertEqual(self.vimiv.get_paths()[-1], self.vimiv.get_pos(True)) <NEW_LINE> self.vimiv["main_window"].scroll("H") <NEW_LINE> self.assertEqual(self.vimiv.get_paths()[0], self.vimiv.get_pos(True)) <NEW_LINE> last = len(self.vimiv.get_paths()) - 1 <NEW_LINE> rows = self.thumb.get_item_row(Gtk.TreePath(last)) + 1 <NEW_LINE> if rows > 1: <NEW_LINE> <INDENT> self.fail("Implementation not done for more than one row.") <NEW_LINE> <DEDENT> for direction in "jkJK": <NEW_LINE> <INDENT> self.thumb.move_direction(direction) <NEW_LINE> self.assertEqual(self.vimiv.get_paths()[0], self.vimiv.get_pos(True)) | Move in thumbnail mode. | 625941bad18da76e23532367 |
def _loadToPositionGroupBox(self, inPmGroupBox): <NEW_LINE> <INDENT> self.toPositionspinboxes = PM_CoordinateSpinBoxes(inPmGroupBox) <NEW_LINE> self.moveXSpinBox = self.toPositionspinboxes.xSpinBox <NEW_LINE> self.moveYSpinBox = self.toPositionspinboxes.ySpinBox <NEW_LINE> self.moveZSpinBox = self.toPositionspinboxes.zSpinBox <NEW_LINE> self.moveAbsoluteButton = PM_PushButton( inPmGroupBox, label = "", text = "Move Selection", spanWidth = True ) | Load widgets in the Translate To a given Position group box, which is
present within the Translate groupbox.
@param inPmGroupBox: Translate To Position group box in the Translate
group box.
@type inPmGroupBox: L{PM_GroupBox} | 625941ba9f2886367277a726 |
def split_commutative_parts(e): <NEW_LINE> <INDENT> c_part, nc_part = e.args_cnc() <NEW_LINE> c_part = list(c_part) <NEW_LINE> return c_part, nc_part | Split into commutative and non-commutative parts. | 625941ba5166f23b2e1a4fee |
def clearReferencesToDependentCameras(self): <NEW_LINE> <INDENT> return _osgUtil.SceneView_clearReferencesToDependentCameras(self) | clearReferencesToDependentCameras(SceneView self) | 625941ba462c4b4f79d1d565 |
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, ListResponseOfContactDto): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.__dict__ == other.__dict__ | Returns true if both objects are equal | 625941ba167d2b6e31218a31 |
def delete_file(file_path): <NEW_LINE> <INDENT> os.remove(file_path) | Delete file by specified path.
:param file_path: path to file
:type file_path: str | 625941ba3617ad0b5ed67d95 |
def cmd_exists(cmd): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with open(os.devnull, 'w') as FNULL: <NEW_LINE> <INDENT> subprocess.check_call(['which', cmd], stdout=FNULL) <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True | Determine if a given command is available. Requires "which". | 625941ba91af0d3eaac9b8aa |
def generate(battery_dest=None,battery_repo=None,experiment_repo=None,experiments=None,config=None,make_config=True,warning=True,time=30): <NEW_LINE> <INDENT> if battery_dest == None: <NEW_LINE> <INDENT> battery_dest = tempfile.mkdtemp() <NEW_LINE> shutil.rmtree(battery_dest) <NEW_LINE> <DEDENT> if not os.path.exists(battery_dest): <NEW_LINE> <INDENT> base = generate_base(battery_dest=battery_dest, tasks=experiments, experiment_repo=experiment_repo, battery_repo=battery_repo, warning=warning, add_surveys=False) <NEW_LINE> custom_variables = dict() <NEW_LINE> custom_variables["load"] = [("[SUB_TOTALTIME_SUB]",time)] <NEW_LINE> template_experiments(battery_dest=battery_dest, battery_repo=base["battery_repo"], valid_experiments=base["experiments"], custom_variables=custom_variables) <NEW_LINE> if make_config: <NEW_LINE> <INDENT> if config == None: <NEW_LINE> <INDENT> config = dict() <NEW_LINE> <DEDENT> generate_config(battery_dest,config) <NEW_LINE> <DEDENT> return battery_dest <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Folder exists at %s, cannot generate." %(battery_dest)) | generate
will create a battery from a template and list of experiments
:param battery_dest: is the output folder for your battery. This folder MUST NOT EXIST. If not specified, a temp folder is created
:param battery_repo: location of psiturk-battery repo to use as a template. If not specified, will be downloaded to a temporary directory
:param experiment_repo: location of a expfactory-experiments repo to check for valid experiments. If not specified, will be downloaded to a temporary directory
:param experiments: a list of experiments, meaning the "exp_id" variable in the config.json, to include. This variable also conincides with the experiment folder name.
:param config: A dictionary with keys that coincide with parameters in the config.txt file for a expfactory experiment. If not provided, a dummy config will be generated.
:param make_config: A boolean (default True) to control generation of the config. If there is a config generated before calling this function, this should be set to False.
:param warning: Show config.json warnings when validating experiments. Default is True
:param time: maximum amount of time for battery to endure (default 30 minutes) to select experiments | 625941ba1d351010ab8559b3 |
def get_loc_week_counts(query, total_weeks=6): <NEW_LINE> <INDENT> list_of_weeks = get_list_of_weeks(total_weeks) <NEW_LINE> pipeline = [ {'$match': { 'posted_week': {'$in': list_of_weeks}, '$text': {'$search': query}} }, {'$project': { '_id': 0, 'query_loc': 1, 'posted_week': 1} } ] <NEW_LINE> pipeline.append( { '$group': { '_id': {'loc': '$query_loc', 'week_num': '$posted_week'}, 'count': {'$sum': 1} } } ) <NEW_LINE> cursor = db.posts.aggregate(pipeline) <NEW_LINE> locs = [l.lower() for l in QueryLoc.objects.values_list('query', flat=True)] <NEW_LINE> loc_posts = [] <NEW_LINE> for loc in locs: <NEW_LINE> <INDENT> loc_posts.append({'loc': loc, 'posts': []}) <NEW_LINE> <DEDENT> for d in cursor: <NEW_LINE> <INDENT> loc = d['_id']['loc'] <NEW_LINE> week = d['_id']['week_num'] <NEW_LINE> posts = d['count'] <NEW_LINE> loc_post = list(filter(lambda loc_post: loc_post['loc'] == loc, loc_posts))[0] <NEW_LINE> loc_post[week] = posts <NEW_LINE> <DEDENT> for loc_post in loc_posts: <NEW_LINE> <INDENT> for week in range(min(list_of_weeks), max(list_of_weeks)+1): <NEW_LINE> <INDENT> post_count = loc_post.get(week, 0) <NEW_LINE> loc_post['posts'].append(post_count) <NEW_LINE> <DEDENT> loc_post['total'] = sum(loc_post['posts']) <NEW_LINE> <DEDENT> loc_posts = sorted(loc_posts, key=itemgetter('total'), reverse=True) <NEW_LINE> return loc_posts | Gets counts of posts by location and week
Return:
List of dicts with 'loc' string and 'posts' list of plot values | 625941ba67a9b606de4a7d52 |
def export_to_string(self): <NEW_LINE> <INDENT> self.validate() <NEW_LINE> aci = "" <NEW_LINE> for t, v in sorted(self.target.items()): <NEW_LINE> <INDENT> op = v['operator'] <NEW_LINE> if type(v['expression']) in (tuple, list): <NEW_LINE> <INDENT> target = "" <NEW_LINE> for l in v['expression']: <NEW_LINE> <INDENT> target = target + l + " || " <NEW_LINE> <DEDENT> target = target[:-4] <NEW_LINE> aci = aci + "(%s %s \"%s\")" % (t, op, target) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> aci = aci + "(%s %s \"%s\")" % (t, op, v['expression']) <NEW_LINE> <DEDENT> <DEDENT> aci = aci + "(version 3.0;acl \"%s\";%s (%s) %s %s \"%s\"" % (self.name, self.action, ",".join(self.permissions), self.bindrule['keyword'], self.bindrule['operator'], self.bindrule['expression']) + ";)" <NEW_LINE> return aci | Output a Directory Server-compatible ACI string | 625941baf7d966606f6a9e9e |
def replace(self, i: int, v: Val) -> 'ImmTup': <NEW_LINE> <INDENT> return ImmTup(tuple(v if i == j else x for j, x in enumerate(self))) | Creates a new copy | 625941babf627c535bc1306c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.